repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fresskarma/tinyos-1.x
|
tools/python/pytos/util/RoutingMessages.py
|
1
|
7725
|
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# @author Kamin Whitehouse
#
"""\
RoutingMessages: This is a package of classes that have some functionality
commonly used by routing messages, eg. Rpc and RamSymbol.
This class is not intended to be used on its own.
"""
import sys, string, time, types
import pytos.util.nescDecls as nescDecls
import pytos.Comm as Comm
import pytos.tools.Drip as Drip
import pytos.tools.Drain as Drain
from copy import deepcopy
class RoutingMessage( nescDecls.TosMsg ) :
def __init__(self, parent, amType, *structArgs) :
#store the parent
self.parent = parent
#initialize the default call parameters to none (ie, use the parent's defaults)
for (callParam,default) in self.parent.defaultCallParams :
self.__dict__[callParam] = None
nescDecls.TosMsg.__init__(self, parent.app.enums.AM_RPCCOMMANDMSG, *structArgs)
def _assignParam(self, field, param, paramId) :
"""assign a call parameter to the correct field (checking types)"""
if type(field) == nescDecls.nescType and (
type(param) == int or type(param) == long or
type(param) == float or type(param) == str or
type(param) == unicode ) :
field.value = param
elif type(field) == type(param) :
field = param
else :
raise Exception("Illegal parameter type for param #%s. Requires type %s." % (
str(paramId), str(type(field))) )
def _send(self, address, *posArgs, **nameArgs) :
commArgs = ()
#posArgs and nameArgs now contain only field values.
#now assign them to the appropriate RoutingMessage fields.
#create a temporary RoutingMessage to hold the call-time parameters
thisCall = deepcopy(self)
for i in range(len(posArgs)) :
thisCall._assignParam(thisCall.value[thisCall.fields[i+1]["name"]], posArgs[i], i)
for key in nameArgs.keys() :
if not thisCall.value.has_key(key) :
raise Exception("parameter name %s non-existent" % key)
thisCall._assignParam(thisCall.value[key], nameArgs[key], key)
thisCall.parent.sendComm.send(address, thisCall, *commArgs)
def parseCallParams(self, nameArgs) :
callParams = self.getCallParams()
#parse any call-time call parameters
for param in nameArgs.keys() :
if callParams.has_key(param) :
callParams[param] = nameArgs[param]
del nameArgs[param]
return callParams
def getCallParams(self) :
"""Use the default call parameters from the parent module, but if I have the same
field with a non-None value, use it instead"""
callParams = self.parent.getCallParams()
for param in callParams.keys() :
if self.__dict__.has_key(param) and self.__getattribute__(param) != None :
callParams[param] = self.__getattribute__(param)
return callParams
def __repr__(self) :
"""full function name"""
return "%s object at %s:\n\n%s" % (self.__class__, hex(id(self)), str(self))
def register(self, listener, comm=()) :
self.parent.receiveComm.register(self, listener, *comm)
def unregister(self, listener, comm=()) :
self.parent.receiveComm.unregister(self, listener, *comm)
class Shortcut (object):
"""used to allow multiple levels of indirection w/routing messages using dots;
ie., to allow something.module.interface.RoutingMessage()"""
def __init__(self, parent, name):
self.parent = parent
self.name = name
def __getattr__(self, name) :
name = self.name + "." + name
if self.parent._messages.has_key(name) :
return self.parent._messages.get(name)
else :
for message in self.parent._messages.values() :
if message.nescType.find(name+".") == 0 :
return Shortcut(self.parent,name)
raise Exception("Cannot find %s. Check spelling." % name)
def __repr__(self):
return "%s object at %s:\n\n%s" % (self.__class__, hex(id(self)), str(self))
def __str__(self):
string = ""
funcs = ()
messageNames = self.parent._messages.keys()
messageNames.sort()
for message in messageNames :
if message.find(self.name) == 0 :
string += str(self.parent._messages[message])
string = string.replace(self.name + "." , "" )
return string
class RoutingMessages(object) :
def __init__(self, app) :
self.app = app
self._messages = {}
## In this constructor, we connect to the routing layer as best as
## we can. This may mean creating new drip/drain instances,
## reusing old ones, reusing old Comm objects, or not connecting
## at all, depending...
if app.motecom == None:
return
#connect to sendComm: use localComm if user requested or if drip not compiled in.
self.address=app.enums.TOS_BCAST_ADDR
if app.localCommOnly==True or "AM_DRIPMSG" not in app.enums._enums:
self.sendComm = Comm.getCommObject(app, app.motecom)
else :
self.sendComm = Drip.getDripObject(app, app.motecom, app.enums.AM_RPCCOMMANDMSG)[0]
#connect to receiveComm: always use Drain unless not compiled in
if "AM_DRAINMSG" not in app.enums._enums:
self.receiveComm = Comm.getCommObject(app, app.motecom)
self.returnAddress = app.enums.TOS_BCAST_ADDR
else :
treeID = 0xfffe #can we set this automatically?
self.receiveComm = Drain.getDrainObject(app, app.motecom, treeID)[0]
if app.localCommOnly == False :
self.receiveComm.maintainTree()
if app.tosbase==True: #can we discover this like deluge?
self.returnAddress = treeID
else :
self.returnAddress = app.enums.TOS_UART_ADDR
def initializeCallParams(self, callParams) :
for (callParam,defaultVal) in self.defaultCallParams :
if callParams.has_key(callParam) :
self.__dict__[callParam] = callParams[callParam]
elif not self.__dict__.has_key(callParam):
self.__dict__[callParam] = defaultVal
def getCallParams(self) :
callParams = {}
for (callParam,default) in self.defaultCallParams :
callParams[callParam] = self.__dict__[callParam]
return callParams
def __getattr__(self, name) :
for function in self._messages.values() :
if function.nescType.find(name + ".") == 0 :
return Shortcut(self,name)
raise AttributeError("No such attribute %s" % name)
def __repr__(self) :
return "%s object at %s:\n\n%s" % (self.__class__, hex(id(self)), str(self))
def __str__(self) :
""" Print all available RoutingMessages."""
string = ""
keys = self._messages.keys()
keys.sort()
for name in keys :
string += str( self._messages[name])
return string
|
bsd-3-clause
| 4,245,047,028,988,728,300
| 36.139423
| 95
| 0.667961
| false
| 3.640434
| false
| false
| false
|
newsages/nQTrucks
|
ntrain/puertoct/truck2/prep.py
|
1
|
5615
|
#!/usr/bin/python
import os
from PIL import Image
import uuid
import shutil
import sys
WIDTH=52
HEIGHT=13
COUNTRY='trucks2'
#WIDTH=52
#HEIGHT=13
#COUNTRY='eu'
#constants
OPENCV_DIR= '/usr/bin'
SAMPLE_CREATOR = OPENCV_DIR + '/opencv_createsamples'
BASE_DIR = './'
OUTPUT_DIR = BASE_DIR + "out/"
INPUT_NEGATIVE_DIR = BASE_DIR + 'neg/'
INPUT_POSITIVE_DIR = BASE_DIR + COUNTRY + '/'
OUTPUT_NEGATIVE_DIR = BASE_DIR + 'negative/'
OUTPUT_POSITIVE_DIR = BASE_DIR + 'positive/'
POSITIVE_INFO_FILE = OUTPUT_POSITIVE_DIR + 'positive.txt'
NEGATIVE_INFO_FILE = OUTPUT_NEGATIVE_DIR + 'negative.txt'
VEC_FILE = OUTPUT_POSITIVE_DIR + 'vecfile.vec'
vector_arg = '-vec %s' % (VEC_FILE)
width_height_arg = '-w %d -h %d' % (WIDTH, HEIGHT)
def print_usage():
print "Usage: prep.py [Operation]"
print " -- Operations --"
print " neg -- Prepares the negative samples list"
print " pos -- Copies all the raw positive files to a opencv vector"
print " showpos -- Shows the positive samples that were created"
print " train -- Outputs the command for the Cascade Training algorithm"
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
command=""
if command != "":
pass
elif len(sys.argv) != 2:
print_usage()
exit()
else:
command = sys.argv[1]
if command == "neg":
print "Neg"
# Get rid of any spaces
for neg_file in os.listdir(INPUT_NEGATIVE_DIR):
if " " in neg_file:
fileName, fileExtension = os.path.splitext(neg_file)
newfilename = str(uuid.uuid4()) + fileExtension
#print "renaming: " + files + " to "+ root_dir + "/" + str(uuid.uuid4()) + fileExtension
os.rename(INPUT_NEGATIVE_DIR + neg_file, INPUT_POSITIVE_DIR + newfilename)
f = open(NEGATIVE_INFO_FILE,'w')
## Write a list of all the negative files
for neg_file in os.listdir(INPUT_NEGATIVE_DIR):
if os.path.isdir(INPUT_NEGATIVE_DIR + neg_file):
continue
shutil.copy2(INPUT_NEGATIVE_DIR + neg_file, OUTPUT_NEGATIVE_DIR + neg_file )
f.write(neg_file + "\n")
f.close()
elif command == "pos":
print "Pos"
info_arg = '-info %s' % (POSITIVE_INFO_FILE)
# Copy all files in the raw directory and build an info file
## Remove all files in the output positive directory
for old_file in os.listdir(OUTPUT_POSITIVE_DIR):
os.unlink(OUTPUT_POSITIVE_DIR + old_file)
## First, prep the sample filenames (make sure they have no spaces)
for files in os.listdir(INPUT_POSITIVE_DIR):
if os.path.isdir(INPUT_POSITIVE_DIR + files):
continue
# Rename the file if it has a space in it
newfilename = files
if " " in files:
fileName, fileExtension = os.path.splitext(files)
newfilename = str(uuid.uuid4()) + fileExtension
#print "renaming: " + files + " to "+ root_dir + "/" + str(uuid.uuid4()) + fileExtension
os.rename(INPUT_POSITIVE_DIR + files, INPUT_POSITIVE_DIR + newfilename)
# Copy from the raw directory to the positive directory
shutil.copy2(INPUT_POSITIVE_DIR + newfilename, OUTPUT_POSITIVE_DIR + newfilename )
total_pics = 0
## Create the positive.txt input file
f = open(POSITIVE_INFO_FILE,'w')
for filename in os.listdir(OUTPUT_POSITIVE_DIR):
if os.path.isdir(OUTPUT_POSITIVE_DIR + filename):
continue
if filename.endswith(".txt"):
continue
try:
img = Image.open(OUTPUT_POSITIVE_DIR + filename)
# get the image's width and height in pixels
width, height = img.size
f.write(filename + " 1 0 0 " + str(width) + " " + str(height) + '\n')
total_pics = total_pics + 1
except IOError:
print "Exception reading image file: " + filename
f.close()
# Collapse the samples into a vector file
execStr = '%s/opencv_createsamples %s %s %s -num %d' % (OPENCV_DIR, vector_arg, width_height_arg, info_arg, total_pics )
print execStr
os.system(execStr)
#opencv_createsamples -info ./positive.txt -vec ../positive/vecfile.vec -w 120 -h 60 -bg ../negative/PentagonCityParkingGarage21.jpg -num 100
elif command == "showpos":
print "SHOW"
execStr = '%s/opencv_createsamples -vec %s -w %d -h %d' % (OPENCV_DIR, VEC_FILE, WIDTH, HEIGHT )
print execStr
os.system(execStr)
#opencv_createsamples -vec ../positive/vecfile.vec -w 120 -h 60
elif command == "train":
print "TRAIN"
data_arg = '-data %s/' % (OUTPUT_DIR)
bg_arg = '-bg %s' % (NEGATIVE_INFO_FILE)
try:
num_pos_samples = file_len(POSITIVE_INFO_FILE)
except:
num_pos_samples = -1
num_neg_samples = file_len(NEGATIVE_INFO_FILE)
execStr = '%s/opencv_traincascade %s %s %s %s -numPos %d -numNeg %d -maxFalseAlarmRate 0.45 -featureType LBP -numStages 13' % (OPENCV_DIR, data_arg, vector_arg, bg_arg, width_height_arg, num_pos_samples, num_neg_samples )
print "Execute the following command to start training:"
print execStr
#opencv_traincascade -data ./out/ -vec ./positive/vecfile.vec -bg ./negative/negative.txt -w 120 -h 60 -numPos 99 -numNeg 5 -featureType LBP -numStages 8
#opencv_traincascade -data ./out/ -vec ./positive/vecfile.vec -bg ./negative/negative.txt -w 120 -h 60 -numPos 99 -numNeg 5 -featureType LBP -numStages 20
elif command == "SDFLSDFSDFSDF":
root_dir = '/home/mhill/projects/anpr/AlprPlus/samples/svm/raw-pos'
outputfilename = "positive.txt"
else:
print_usage()
exit()
|
gpl-3.0
| -3,035,221,647,513,750,500
| 28.708995
| 225
| 0.636331
| false
| 3.201254
| false
| false
| false
|
marnnie/Cable-buenaventura
|
plugin.video.genesis/resources/lib/sources/mvsnap_mv_tv.py
|
1
|
4444
|
# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse,json
from resources.lib.libraries import client
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://mvsnap.com'
self.search_link = '/v1/api/search?query=%s'
def get_movie(self, imdb, title, year):
try:
query = self.search_link % imdb
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = json.loads(result)
result = result['movies'][0]['slug']
url = '/movies/%s' % result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = '%s (%s)' % (tvshowtitle, year)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
query = self.search_link % imdb
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = json.loads(result)
result = result['movies']
season = '%02d' % int(season)
episode = '%02d' % int(episode)
result = [(i['slug'], i['long_title']) for i in result]
result = [(i[0], re.compile('(\d*)$').findall(i[1])) for i in result]
result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0]
result = [i[0] for i in result if season == i[1]][0]
url = '/tv-shows/%s?S%sE%s' % (result, season, episode)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
query = urlparse.urlparse(url).query
try: query = '%02d' % int(re.compile('E(\d*)$').findall(query)[0])
except: query = ''
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
result = client.parseDOM(result, 'select', attrs = {'id': 'myDropdown'})[0]
result = zip(client.parseDOM(result, 'option', ret='value'), client.parseDOM(result, 'option'))
result = [i[0] for i in result if i[1].endswith(query) or query == ''][0]
direct = re.compile('(.+)[|](.+?)[,]').findall(result)
if len(direct) > 0:
quality = 'HD' if 'hd' in direct[0][0].lower() else 'SD'
sources.append({'source': 'GVideo', 'quality': quality, 'provider': 'MVsnap', 'url': direct[0][1]})
return sources
url = urlparse.urljoin(self.base_link, result)
url = client.source(url, output='geturl')
if not 'google' in url: raise Exception()
url = url.split('get_video_info')[0]
url = resolvers.request(url)
for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'MVsnap', 'url': i['url']})
return sources
except:
return sources
def resolve(self, url):
try:
if url.startswith('stack://'): return url
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
|
gpl-2.0
| 2,883,124,037,299,441,000
| 31.437956
| 126
| 0.54928
| false
| 3.932743
| false
| false
| false
|
metabrainz/botbot-web
|
botbot/apps/plugins/core/logger.py
|
1
|
2002
|
import re
from botbot.apps.logs.models import Log
from botbot_plugins.base import BasePlugin
import botbot_plugins.config as config
class Config(config.BaseConfig):
ignore_prefixes = config.Field(
default=["!-"],
required=False,
help_text="""
Specify a list of regular expressions which match
the start of messages to be ignored (excluded from the logs)
"""
)
def should_ignore_text(text, ignore_prefixes):
return any(
(
prefix and
re.match(prefix, text, flags=re.IGNORECASE) is not None
)
for prefix in ignore_prefixes
)
class Plugin(BasePlugin):
"""
Logs all activity.
I keep extensive logs on all the activity in `{{ channel.name }}`.
You can read and search them at {{ SITE }}{{ channel.get_absolute_url }}.
"""
config_class = Config
def logit(self, line):
"""Log a message to the database"""
# If the channel does not start with "#" that means the message
# is part of a /query
if line._channel_name.startswith("#"):
ignore_prefixes = self.config['ignore_prefixes']
if ignore_prefixes:
if not isinstance(ignore_prefixes, list):
ignore_prefixes = [ignore_prefixes]
else:
ignore_prefixes = []
# Delete ACTION prefix created by /me
text = line.text
if text.startswith("ACTION "):
text = text[7:]
if not should_ignore_text(text, ignore_prefixes):
Log.objects.create(
channel_id=line._channel.pk,
timestamp=line._received,
nick=line.user,
text=line.full_text,
room=line._channel,
host=line._host,
command=line._command,
raw=line._raw)
logit.route_rule = ('firehose', ur'(.*)')
|
mit
| 801,682,193,017,813,800
| 28.880597
| 77
| 0.545455
| false
| 4.46875
| true
| false
| false
|
evanmiltenburg/Dutch-corpora
|
overheid/scripts/make_xml_plain.py
|
1
|
1350
|
from bs4 import BeautifulSoup
import nltk.data
from nltk.tokenize import word_tokenize
import glob
import gzip
import sys
tokenizer = nltk.data.load('tokenizers/punkt/dutch.pickle')
def good_sentence(s):
if len(s) < 4 or s.count(',') > 4:
return False
else:
digits = filter(lambda x:x.isdigit(),s)
if len(digits) > (float(len(s))/2):
return False
else:
return True
def sentences_for_file(filename):
with open(filename) as f:
soup = BeautifulSoup(f)
pars = filter(lambda p: not p == None,
map(lambda x:x.get_text(), soup.find_all('al')))
sentences = [word_tokenize(sentence) for x in pars
for sentence in tokenizer.tokenize(x)]
return [' '.join(s).encode('utf-8') for s in filter(good_sentence, sentences)]
def main(ftype):
with gzip.open('../corpus/' + ftype + '_plain.txt.gz','w') as f:
for filename in glob.glob('../data/' + ftype + '/*/*.xml'):
f.write('\n'.join(sentences_for_file(filename)))
if __name__ == "__main__":
ftypes = {'kst', 'trb', 'stb', 'ag', 'ah', 'stcrt', 'kv', 'h', 'blg', 'nds'}
ftype = sys.argv[1]
if ftype in ftypes:
main(ftype)
else:
raise KeyError('No known folder of that type. (You entered: '+ftype + ')')
|
apache-2.0
| 3,827,033,118,878,343,000
| 31.926829
| 86
| 0.568148
| false
| 3.452685
| false
| false
| false
|
erikdejonge/newsrivr
|
daemons/oauth.py
|
1
|
23551
|
"""
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import int
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import cgi
import urllib.request, urllib.parse, urllib.error
import time
import random
import urllib.parse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.parse.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, str):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urllib.parse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urllib.parse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.parse.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.items():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.items():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.items()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in list(params.items())]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urllib.parse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urllib.parse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.parse.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.items():
parameters[k] = urllib.parse.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(list(self.signature_methods.keys()))
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key
|
gpl-2.0
| 6,105,287,884,939,560,000
| 34.416541
| 86
| 0.617893
| false
| 4.342799
| false
| false
| false
|
datapythonista/pandas
|
pandas/core/arrays/sparse/accessor.py
|
2
|
11479
|
"""Sparse accessor"""
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import find_common_type
from pandas.core.accessor import (
PandasDelegate,
delegate_names,
)
from pandas.core.arrays.sparse.array import SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype
class BaseAccessor:
_validation_msg = "Can only use the '.sparse' accessor with Sparse data."
def __init__(self, data=None):
self._parent = data
self._validate(data)
def _validate(self, data):
raise NotImplementedError
@delegate_names(
SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
)
class SparseAccessor(BaseAccessor, PandasDelegate):
"""
Accessor for SparseSparse from other sparse matrix data types.
"""
def _validate(self, data):
if not isinstance(data.dtype, SparseDtype):
raise AttributeError(self._validation_msg)
def _delegate_property_get(self, name, *args, **kwargs):
return getattr(self._parent.array, name)
def _delegate_method(self, name, *args, **kwargs):
if name == "from_coo":
return self.from_coo(*args, **kwargs)
elif name == "to_coo":
return self.to_coo(*args, **kwargs)
else:
raise ValueError
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a Series with sparse values from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : Series
A Series with sparse values.
Examples
--------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(
... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 2.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> ss = pd.Series.sparse.from_coo(A)
>>> ss
0 2 1.0
3 2.0
1 0 3.0
dtype: Sparse[float64, nan]
"""
from pandas import Series
from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series
result = coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
return result
def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
>>> s.index = pd.MultiIndex.from_tuples(
... [
... (1, 2, "a", 0),
... (1, 2, "a", 1),
... (1, 1, "b", 0),
... (1, 1, "b", 1),
... (2, 1, "b", 0),
... (2, 1, "b", 1)
... ],
... names=["A", "B", "C", "D"],
... )
>>> s
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: float64
>>> ss = s.astype("Sparse")
>>> ss
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: Sparse[float64, nan]
>>> A, rows, columns = ss.sparse.to_coo(
... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 3.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
A, rows, columns = sparse_series_to_coo(
self._parent, row_levels, column_levels, sort_labels=sort_labels
)
return A, rows, columns
def to_dense(self):
"""
Convert a Series from sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
Series:
A Series with the same values, stored as a dense array.
Examples
--------
>>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
>>> series
0 0
1 1
2 0
dtype: Sparse[int64, 0]
>>> series.sparse.to_dense()
0 0
1 1
2 0
dtype: int64
"""
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
class SparseFrameAccessor(BaseAccessor, PandasDelegate):
"""
DataFrame accessor for sparse data.
.. versionadded:: 0.25.0
"""
def _validate(self, data):
dtypes = data.dtypes
if not all(isinstance(t, SparseDtype) for t in dtypes):
raise AttributeError(self._validation_msg)
@classmethod
def from_spmatrix(cls, data, index=None, columns=None):
"""
Create a new DataFrame from a scipy sparse matrix.
.. versionadded:: 0.25.0
Parameters
----------
data : scipy.sparse.spmatrix
Must be convertible to csc format.
index, columns : Index, optional
Row and column labels to use for the resulting DataFrame.
Defaults to a RangeIndex.
Returns
-------
DataFrame
Each column of the DataFrame is stored as a
:class:`arrays.SparseArray`.
Examples
--------
>>> import scipy.sparse
>>> mat = scipy.sparse.eye(3)
>>> pd.DataFrame.sparse.from_spmatrix(mat)
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas._libs.sparse import IntIndex
from pandas import DataFrame
data = data.tocsc()
index, columns = cls._prep_index(data, index, columns)
n_rows, n_columns = data.shape
# We need to make sure indices are sorted, as we create
# IntIndex with no input validation (i.e. check_integrity=False ).
# Indices may already be sorted in scipy in which case this adds
# a small overhead.
data.sort_indices()
indices = data.indices
indptr = data.indptr
array_data = data.data
dtype = SparseDtype(array_data.dtype, 0)
arrays = []
for i in range(n_columns):
sl = slice(indptr[i], indptr[i + 1])
idx = IntIndex(n_rows, indices[sl], check_integrity=False)
arr = SparseArray._simple_new(array_data[sl], idx, dtype)
arrays.append(arr)
return DataFrame._from_arrays(
arrays, columns=columns, index=index, verify_integrity=False
)
def to_dense(self):
"""
Convert a DataFrame with sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
DataFrame
A DataFrame with the same values stored as dense arrays.
Examples
--------
>>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
>>> df.sparse.to_dense()
A
0 0
1 1
2 0
"""
from pandas import DataFrame
data = {k: v.array.to_dense() for k, v in self._parent.items()}
return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.25.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
import_optional_dependency("scipy")
from scipy.sparse import coo_matrix
dtype = find_common_type(self._parent.dtypes.to_list())
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, data = [], [], []
for col, name in enumerate(self._parent):
s = self._parent[name]
row = s.array.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
data.append(s.array.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
data = np.concatenate(data)
return coo_matrix((data, (rows, cols)), shape=self._parent.shape)
@property
def density(self) -> float:
"""
Ratio of non-sparse points to total (dense) data points.
"""
tmp = np.mean([column.array.density for _, column in self._parent.items()])
return tmp
@staticmethod
def _prep_index(data, index, columns):
from pandas.core.indexes.api import ensure_index
import pandas.core.indexes.base as ibase
N, K = data.shape
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
if len(columns) != K:
raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
if len(index) != N:
raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
return index, columns
|
bsd-3-clause
| -6,695,402,877,092,622,000
| 29.287599
| 86
| 0.531492
| false
| 3.959641
| false
| false
| false
|
nkalodimas/invenio
|
modules/bibsched/lib/bibtask.py
|
1
|
50380
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Bibliographic Task Class.
BibTask class.
A BibTask is an executable under CFG_BINDIR, whose name is stored in
bibtask_config.CFG_BIBTASK_VALID_TASKS.
A valid task must call the task_init function with the proper parameters.
Generic task related parameters (user, sleeptime, runtime, task_id, task_name
verbose)
go to _TASK_PARAMS global dictionary accessible through task_get_task_param.
Option specific to the particular BibTask go to _OPTIONS global dictionary
and are accessible via task_get_option/task_set_option.
In order to log something properly, just use write_message(s) with the desired
verbose level.
task_update_status and task_update_progress can be used to update the status
of the task (DONE, FAILED, DONE WITH ERRORS...) and it's progress
(1 out 100..) within the bibsched monitor.
It is possible to enqueue a BibTask via API call by means of
task_low_level_submission.
"""
__revision__ = "$Id$"
import getopt
import getpass
import marshal
import os
import pwd
import re
import signal
import sys
import time
import datetime
import traceback
import logging
import logging.handlers
import random
from socket import gethostname
from invenio.dbquery import run_sql, _db_login
from invenio.access_control_engine import acc_authorize_action
from invenio.config import CFG_PREFIX, CFG_BINDIR, CFG_LOGDIR, \
CFG_BIBSCHED_PROCESS_USER, CFG_TMPDIR, CFG_SITE_SUPPORT_EMAIL
from invenio.errorlib import register_exception
from invenio.access_control_config import CFG_EXTERNAL_AUTH_USING_SSO, \
CFG_EXTERNAL_AUTHENTICATION
from invenio.webuser import get_user_preferences, get_email
from invenio.bibtask_config import CFG_BIBTASK_VALID_TASKS, \
CFG_BIBTASK_DEFAULT_TASK_SETTINGS, CFG_BIBTASK_FIXEDTIMETASKS
from invenio.dateutils import parse_runtime_limit
from invenio.shellutils import escape_shell_arg
from invenio.mailutils import send_email
from invenio.bibsched import bibsched_set_host, \
bibsched_get_host
# Global _TASK_PARAMS dictionary.
_TASK_PARAMS = {
'version': '',
'task_stop_helper_fnc': None,
'task_name': os.path.basename(sys.argv[0]),
'task_specific_name': '',
'task_id': 0,
'user': '',
# If the task is not initialized (usually a developer debugging
# a single method), output all messages.
'verbose': 9,
'sleeptime': '',
'runtime': time.strftime("%Y-%m-%d %H:%M:%S"),
'priority': 0,
'runtime_limit': None,
'profile': [],
'post-process': [],
'sequence-id':None,
'stop_queue_on_error': False,
'fixed_time': False,
'email_logs_to': [],
}
# Global _OPTIONS dictionary.
_OPTIONS = {}
# Which tasks don't need to ask the user for authorization?
CFG_VALID_PROCESSES_NO_AUTH_NEEDED = ("bibupload", )
CFG_TASK_IS_NOT_A_DEAMON = ("bibupload", )
def fix_argv_paths(paths, argv=None):
"""Given the argv vector of cli parameters, and a list of path that
can be relative and may have been specified within argv,
it substitute all the occurencies of these paths in argv.
argv is changed in place and returned.
"""
if argv is None:
argv = sys.argv
for path in paths:
for count in xrange(len(argv)):
if path == argv[count]:
argv[count] = os.path.abspath(path)
return argv
def task_low_level_submission(name, user, *argv):
"""Let special lowlevel enqueuing of a task on the bibsche queue.
@param name: is the name of the bibtask. It must be a valid executable under
C{CFG_BINDIR}.
@type name: string
@param user: is a string that will appear as the "user" submitting the task.
Since task are submitted via API it make sense to set the
user to the name of the module/function that called
task_low_level_submission.
@type user: string
@param argv: are all the additional CLI parameters that would have been
passed on the CLI (one parameter per variable).
e.g.:
>>> task_low_level_submission('bibupload', 'admin', '-a', '/tmp/z.xml')
@type: strings
@return: the task identifier when the task is correctly enqueued.
@rtype: int
@note: use absolute paths in argv
"""
def get_priority(argv):
"""Try to get the priority by analysing the arguments."""
priority = 0
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 'P:', ['priority='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-P', '--priority'):
try:
priority = int(opt[1])
except ValueError:
pass
return priority
def get_special_name(argv):
"""Try to get the special name by analysing the arguments."""
special_name = ''
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 'N:', ['name='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-N', '--name'):
special_name = opt[1]
return special_name
def get_runtime(argv):
"""Try to get the runtime by analysing the arguments."""
runtime = time.strftime("%Y-%m-%d %H:%M:%S")
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 't:', ['runtime='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-t', '--runtime'):
try:
runtime = get_datetime(opt[1])
except ValueError:
pass
return runtime
def get_sleeptime(argv):
"""Try to get the runtime by analysing the arguments."""
sleeptime = ""
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 's:', ['sleeptime='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-s', '--sleeptime'):
try:
sleeptime = opt[1]
except ValueError:
pass
return sleeptime
def get_sequenceid(argv):
"""Try to get the sequenceid by analysing the arguments."""
sequenceid = None
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 'I:', ['sequence-id='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-I', '--sequence-id'):
try:
sequenceid = opt[1]
except ValueError:
pass
return sequenceid
task_id = None
try:
if not name in CFG_BIBTASK_VALID_TASKS:
raise StandardError('%s is not a valid task name' % name)
new_argv = []
for arg in argv:
if isinstance(arg, unicode):
arg = arg.encode('utf8')
new_argv.append(arg)
argv = new_argv
priority = get_priority(argv)
special_name = get_special_name(argv)
runtime = get_runtime(argv)
sleeptime = get_sleeptime(argv)
sequenceid = get_sequenceid(argv)
argv = tuple([os.path.join(CFG_BINDIR, name)] + list(argv))
if special_name:
name = '%s:%s' % (name, special_name)
verbose_argv = 'Will execute: %s' % ' '.join([escape_shell_arg(str(arg)) for arg in argv])
## submit task:
task_id = run_sql("""INSERT INTO schTASK (proc,user,
runtime,sleeptime,status,progress,arguments,priority,sequenceid)
VALUES (%s,%s,%s,%s,'WAITING',%s,%s,%s,%s)""",
(name, user, runtime, sleeptime, verbose_argv, marshal.dumps(argv), priority, sequenceid))
except Exception:
register_exception(alert_admin=True)
if task_id:
run_sql("""DELETE FROM schTASK WHERE id=%s""", (task_id, ))
raise
return task_id
def bibtask_allocate_sequenceid(curdir=None):
"""
Returns an almost unique number to be used a task sequence ID.
In WebSubmit functions, set C{curdir} to the curdir (!) to read
the shared sequence ID for all functions of this submission (reading
"access number").
@param curdir: in WebSubmit functions (ONLY) the value retrieved
from the curdir parameter of the function
@return: an integer for the sequence ID. 0 is returned if the
sequence ID could not be allocated
@rtype: int
"""
if curdir:
try:
fd = file(os.path.join(curdir, 'access'), "r")
access = fd.readline().strip()
fd.close()
return access.replace("_", "")[-9:]
except:
return 0
else:
return random.randrange(1, 4294967296)
def setup_loggers(task_id=None):
"""Sets up the logging system."""
logger = logging.getLogger()
for handler in logger.handlers:
## Let's clean the handlers in case some piece of code has already
## fired any write_message, i.e. any call to debug, info, etc.
## which triggered a call to logging.basicConfig()
logger.removeHandler(handler)
formatter = logging.Formatter('%(asctime)s --> %(message)s', '%Y-%m-%d %H:%M:%S')
if task_id is not None:
err_logger = logging.handlers.RotatingFileHandler(os.path.join(CFG_LOGDIR, 'bibsched_task_%d.err' % _TASK_PARAMS['task_id']), 'a', 1*1024*1024, 10)
log_logger = logging.handlers.RotatingFileHandler(os.path.join(CFG_LOGDIR, 'bibsched_task_%d.log' % _TASK_PARAMS['task_id']), 'a', 1*1024*1024, 10)
log_logger.setFormatter(formatter)
log_logger.setLevel(logging.DEBUG)
err_logger.setFormatter(formatter)
err_logger.setLevel(logging.WARNING)
logger.addHandler(err_logger)
logger.addHandler(log_logger)
stdout_logger = logging.StreamHandler(sys.stdout)
stdout_logger.setFormatter(formatter)
stdout_logger.setLevel(logging.DEBUG)
stderr_logger = logging.StreamHandler(sys.stderr)
stderr_logger.setFormatter(formatter)
stderr_logger.setLevel(logging.WARNING)
logger.addHandler(stderr_logger)
logger.addHandler(stdout_logger)
logger.setLevel(logging.INFO)
return logger
def task_init(
authorization_action="",
authorization_msg="",
description="",
help_specific_usage="",
version=__revision__,
specific_params=("", []),
task_stop_helper_fnc=None,
task_submit_elaborate_specific_parameter_fnc=None,
task_submit_check_options_fnc=None,
task_run_fnc=None):
""" Initialize a BibTask.
@param authorization_action: is the name of the authorization action
connected with this task;
@param authorization_msg: is the header printed when asking for an
authorization password;
@param description: is the generic description printed in the usage page;
@param help_specific_usage: is the specific parameter help
@param task_stop_fnc: is a function that will be called
whenever the task is stopped
@param task_submit_elaborate_specific_parameter_fnc: will be called passing
a key and a value, for parsing specific cli parameters. Must return True if
it has recognized the parameter. Must eventually update the options with
bibtask_set_option;
@param task_submit_check_options: must check the validity of options (via
bibtask_get_option) once all the options where parsed;
@param task_run_fnc: will be called as the main core function. Must return
False in case of errors.
"""
global _TASK_PARAMS, _OPTIONS
_TASK_PARAMS = {
"version" : version,
"task_stop_helper_fnc" : task_stop_helper_fnc,
"task_name" : os.path.basename(sys.argv[0]),
"task_specific_name" : '',
"user" : '',
"verbose" : 1,
"sleeptime" : '',
"runtime" : time.strftime("%Y-%m-%d %H:%M:%S"),
"priority" : 0,
"runtime_limit" : None,
"profile" : [],
"post-process": [],
"sequence-id": None,
"stop_queue_on_error": False,
"fixed_time": False,
}
to_be_submitted = True
if len(sys.argv) == 2 and sys.argv[1].isdigit():
_TASK_PARAMS['task_id'] = int(sys.argv[1])
argv = _task_get_options(_TASK_PARAMS['task_id'], _TASK_PARAMS['task_name'])
to_be_submitted = False
else:
argv = sys.argv
setup_loggers(_TASK_PARAMS.get('task_id'))
task_name = os.path.basename(sys.argv[0])
if task_name not in CFG_BIBTASK_VALID_TASKS or os.path.realpath(os.path.join(CFG_BINDIR, task_name)) != os.path.realpath(sys.argv[0]):
raise OSError("%s is not in the allowed modules" % sys.argv[0])
from invenio.errorlib import wrap_warn
wrap_warn()
if type(argv) is dict:
# FIXME: REMOVE AFTER MAJOR RELEASE 1.0
# This is needed for old task submitted before CLI parameters
# where stored in DB and _OPTIONS dictionary was stored instead.
_OPTIONS = argv
else:
try:
_task_build_params(_TASK_PARAMS['task_name'], argv, description,
help_specific_usage, version, specific_params,
task_submit_elaborate_specific_parameter_fnc,
task_submit_check_options_fnc)
except (SystemExit, Exception), err:
if not to_be_submitted:
register_exception(alert_admin=True)
write_message("Error in parsing the parameters: %s." % err, sys.stderr)
write_message("Exiting.", sys.stderr)
task_update_status("ERROR")
raise
write_message('argv=%s' % (argv, ), verbose=9)
write_message('_OPTIONS=%s' % (_OPTIONS, ), verbose=9)
write_message('_TASK_PARAMS=%s' % (_TASK_PARAMS, ), verbose=9)
if to_be_submitted:
_task_submit(argv, authorization_action, authorization_msg)
else:
try:
try:
if task_get_task_param('profile'):
try:
from cStringIO import StringIO
import pstats
filename = os.path.join(CFG_TMPDIR, 'bibsched_task_%s.pyprof' % _TASK_PARAMS['task_id'])
existing_sorts = pstats.Stats.sort_arg_dict_default.keys()
required_sorts = []
profile_dump = []
for sort in task_get_task_param('profile'):
if sort not in existing_sorts:
sort = 'cumulative'
if sort not in required_sorts:
required_sorts.append(sort)
if sys.hexversion < 0x02050000:
import hotshot
import hotshot.stats
pr = hotshot.Profile(filename)
ret = pr.runcall(_task_run, task_run_fnc)
for sort_type in required_sorts:
tmp_out = sys.stdout
sys.stdout = StringIO()
hotshot.stats.load(filename).strip_dirs().sort_stats(sort_type).print_stats()
# pylint: disable=E1103
# This is a hack. sys.stdout is a StringIO in this case.
profile_dump.append(sys.stdout.getvalue())
# pylint: enable=E1103
sys.stdout = tmp_out
else:
import cProfile
pr = cProfile.Profile()
ret = pr.runcall(_task_run, task_run_fnc)
pr.dump_stats(filename)
for sort_type in required_sorts:
strstream = StringIO()
pstats.Stats(filename, stream=strstream).strip_dirs().sort_stats(sort_type).print_stats()
profile_dump.append(strstream.getvalue())
profile_dump = '\n'.join(profile_dump)
profile_dump += '\nYou can use profile=%s' % existing_sorts
open(os.path.join(CFG_LOGDIR, 'bibsched_task_%d.log' % _TASK_PARAMS['task_id']), 'a').write("%s" % profile_dump)
os.remove(filename)
except ImportError:
ret = _task_run(task_run_fnc)
write_message("ERROR: The Python Profiler is not installed!", stream=sys.stderr)
else:
ret = _task_run(task_run_fnc)
if not ret:
write_message("Error occurred. Exiting.", sys.stderr)
except Exception, e:
register_exception(alert_admin=True)
write_message("Unexpected error occurred: %s." % e, sys.stderr)
write_message("Traceback is:", sys.stderr)
write_messages(''.join(traceback.format_tb(sys.exc_info()[2])), sys.stderr)
write_message("Exiting.", sys.stderr)
task_update_status("ERROR")
finally:
_task_email_logs()
logging.shutdown()
def _task_build_params(
task_name,
argv,
description="",
help_specific_usage="",
version=__revision__,
specific_params=("", []),
task_submit_elaborate_specific_parameter_fnc=None,
task_submit_check_options_fnc=None):
""" Build the BibTask params.
@param argv: a list of string as in sys.argv
@param description: is the generic description printed in the usage page;
@param help_specific_usage: is the specific parameter help
@param task_submit_elaborate_specific_parameter_fnc: will be called passing
a key and a value, for parsing specific cli parameters. Must return True if
it has recognized the parameter. Must eventually update the options with
bibtask_set_option;
@param task_submit_check_options: must check the validity of options (via
bibtask_get_option) once all the options where parsed;
"""
global _OPTIONS
_OPTIONS = {}
if task_name in CFG_BIBTASK_DEFAULT_TASK_SETTINGS:
_OPTIONS.update(CFG_BIBTASK_DEFAULT_TASK_SETTINGS[task_name])
# set user-defined options:
try:
(short_params, long_params) = specific_params
opts, args = getopt.gnu_getopt(argv[1:], "hVv:u:s:t:P:N:L:I:" +
short_params, [
"help",
"version",
"verbose=",
"user=",
"sleep=",
"runtime=",
"priority=",
"name=",
"limit=",
"profile=",
"post-process=",
"sequence-id=",
"stop-on-error",
"continue-on-error",
"fixed-time",
"email-logs-to="
] + long_params)
except getopt.GetoptError, err:
_usage(1, err, help_specific_usage=help_specific_usage, description=description)
try:
for opt in opts:
if opt[0] in ("-h", "--help"):
_usage(0, help_specific_usage=help_specific_usage, description=description)
elif opt[0] in ("-V", "--version"):
print _TASK_PARAMS["version"]
sys.exit(0)
elif opt[0] in ("-u", "--user"):
_TASK_PARAMS["user"] = opt[1]
elif opt[0] in ("-v", "--verbose"):
_TASK_PARAMS["verbose"] = int(opt[1])
elif opt[0] in ("-s", "--sleeptime"):
if task_name not in CFG_TASK_IS_NOT_A_DEAMON:
get_datetime(opt[1]) # see if it is a valid shift
_TASK_PARAMS["sleeptime"] = opt[1]
elif opt[0] in ("-t", "--runtime"):
_TASK_PARAMS["runtime"] = get_datetime(opt[1])
elif opt[0] in ("-P", "--priority"):
_TASK_PARAMS["priority"] = int(opt[1])
elif opt[0] in ("-N", "--name"):
_TASK_PARAMS["task_specific_name"] = opt[1]
elif opt[0] in ("-L", "--limit"):
_TASK_PARAMS["runtime_limit"] = parse_runtime_limit(opt[1])
elif opt[0] in ("--profile", ):
_TASK_PARAMS["profile"] += opt[1].split(',')
elif opt[0] in ("--post-process", ):
_TASK_PARAMS["post-process"] += [opt[1]]
elif opt[0] in ("-I","--sequence-id"):
_TASK_PARAMS["sequence-id"] = opt[1]
elif opt[0] in ("--stop-on-error", ):
_TASK_PARAMS["stop_queue_on_error"] = True
elif opt[0] in ("--continue-on-error", ):
_TASK_PARAMS["stop_queue_on_error"] = False
elif opt[0] in ("--fixed-time", ):
_TASK_PARAMS["fixed_time"] = True
elif opt[0] in ("--email-logs-to",):
_TASK_PARAMS["email_logs_to"] = opt[1].split(',')
elif not callable(task_submit_elaborate_specific_parameter_fnc) or \
not task_submit_elaborate_specific_parameter_fnc(opt[0],
opt[1], opts, args):
_usage(1, help_specific_usage=help_specific_usage, description=description)
except StandardError, e:
_usage(e, help_specific_usage=help_specific_usage, description=description)
if callable(task_submit_check_options_fnc):
if not task_submit_check_options_fnc():
_usage(1, help_specific_usage=help_specific_usage, description=description)
def task_set_option(key, value):
"""Set an value to key in the option dictionary of the task"""
global _OPTIONS
try:
_OPTIONS[key] = value
except NameError:
_OPTIONS = {key : value}
def task_get_option(key, default=None):
"""Returns the value corresponding to key in the option dictionary of the task"""
try:
return _OPTIONS.get(key, default)
except NameError:
return default
def task_has_option(key):
"""Map the has_key query to _OPTIONS"""
try:
return _OPTIONS.has_key(key)
except NameError:
return False
def task_get_task_param(key, default=None):
"""Returns the value corresponding to the particular task param"""
try:
return _TASK_PARAMS.get(key, default)
except NameError:
return default
def task_set_task_param(key, value):
"""Set the value corresponding to the particular task param"""
global _TASK_PARAMS
try:
_TASK_PARAMS[key] = value
except NameError:
_TASK_PARAMS = {key : value}
def task_update_progress(msg):
"""Updates progress information in the BibSched task table."""
write_message("Updating task progress to %s." % msg, verbose=9)
if "task_id" in _TASK_PARAMS:
return run_sql("UPDATE schTASK SET progress=%s where id=%s",
(msg, _TASK_PARAMS["task_id"]))
def task_update_status(val):
"""Updates status information in the BibSched task table."""
write_message("Updating task status to %s." % val, verbose=9)
if "task_id" in _TASK_PARAMS:
return run_sql("UPDATE schTASK SET status=%s where id=%s",
(val, _TASK_PARAMS["task_id"]))
def task_read_status():
"""Read status information in the BibSched task table."""
res = run_sql("SELECT status FROM schTASK where id=%s",
(_TASK_PARAMS['task_id'],), 1)
try:
out = res[0][0]
except:
out = 'UNKNOWN'
return out
def write_messages(msgs, stream=None, verbose=1):
"""Write many messages through write_message"""
if stream is None:
stream = sys.stdout
for msg in msgs.split('\n'):
write_message(msg, stream, verbose)
def write_message(msg, stream=None, verbose=1):
"""Write message and flush output stream (may be sys.stdout or sys.stderr).
Useful for debugging stuff.
@note: msg could be a callable with no parameters. In this case it is
been called in order to obtain the string to be printed.
"""
if stream is None:
stream = sys.stdout
if msg and _TASK_PARAMS['verbose'] >= verbose:
if callable(msg):
msg = msg()
if stream == sys.stdout:
logging.info(msg)
elif stream == sys.stderr:
logging.error(msg)
else:
sys.stderr.write("Unknown stream %s. [must be sys.stdout or sys.stderr]\n" % stream)
else:
logging.debug(msg)
_RE_SHIFT = re.compile("([-\+]{0,1})([\d]+)([dhms])")
def get_datetime(var, format_string="%Y-%m-%d %H:%M:%S", now=None):
"""Returns a date string according to the format string.
It can handle normal date strings and shifts with respect
to now."""
date = now or datetime.datetime.now()
factors = {"d": 24 * 3600, "h": 3600, "m": 60, "s": 1}
m = _RE_SHIFT.match(var)
if m:
sign = m.groups()[0] == "-" and -1 or 1
factor = factors[m.groups()[2]]
value = float(m.groups()[1])
delta = sign * factor * value
while delta > 0 and date < datetime.datetime.now():
date = date + datetime.timedelta(seconds=delta)
date = date.strftime(format_string)
else:
date = time.strptime(var, format_string)
date = time.strftime(format_string, date)
return date
def task_sleep_now_if_required(can_stop_too=False):
"""This function should be called during safe state of BibTask,
e.g. after flushing caches or outside of run_sql calls.
"""
status = task_read_status()
write_message('Entering task_sleep_now_if_required with status=%s' % status, verbose=9)
if status == 'ABOUT TO SLEEP':
write_message("sleeping...")
task_update_status("SLEEPING")
signal.signal(signal.SIGTSTP, _task_sig_dumb)
os.kill(os.getpid(), signal.SIGSTOP)
time.sleep(1)
if task_read_status() == 'NOW STOP':
if can_stop_too:
write_message("stopped")
task_update_status("STOPPED")
sys.exit(0)
else:
write_message("stopping as soon as possible...")
task_update_status('ABOUT TO STOP')
else:
write_message("... continuing...")
task_update_status("CONTINUING")
signal.signal(signal.SIGTSTP, _task_sig_sleep)
elif status == 'ABOUT TO STOP':
if can_stop_too:
write_message("stopped")
task_update_status("STOPPED")
sys.exit(0)
else:
## I am a capricious baby. At least I am going to sleep :-)
write_message("sleeping...")
task_update_status("SLEEPING")
signal.signal(signal.SIGTSTP, _task_sig_dumb)
os.kill(os.getpid(), signal.SIGSTOP)
time.sleep(1)
## Putting back the status to "ABOUT TO STOP"
write_message("... continuing...")
task_update_status("ABOUT TO STOP")
signal.signal(signal.SIGTSTP, _task_sig_sleep)
if can_stop_too:
runtime_limit = task_get_option("limit")
if runtime_limit is not None:
if not (runtime_limit[0] <= datetime.datetime.now() <= runtime_limit[1]):
write_message("stopped (outside runtime limit)")
task_update_status("STOPPED")
sys.exit(0)
def authenticate(user, authorization_action, authorization_msg=""):
"""Authenticate the user against the user database.
Check for its password, if it exists.
Check for authorization_action access rights.
Return user name upon authorization success,
do system exit upon authorization failure.
"""
# With SSO it's impossible to check for pwd
if CFG_EXTERNAL_AUTH_USING_SSO or os.path.basename(sys.argv[0]) in CFG_VALID_PROCESSES_NO_AUTH_NEEDED:
return user
if authorization_msg:
print authorization_msg
print "=" * len(authorization_msg)
if user == "":
print >> sys.stdout, "\rUsername: ",
try:
user = sys.stdin.readline().lower().strip()
except EOFError:
sys.stderr.write("\n")
sys.exit(1)
except KeyboardInterrupt:
sys.stderr.write("\n")
sys.exit(1)
else:
print >> sys.stdout, "\rUsername:", user
## first check user:
# p_un passed may be an email or a nickname:
res = run_sql("select id from user where email=%s", (user,), 1) + \
run_sql("select id from user where nickname=%s", (user,), 1)
if not res:
print "Sorry, %s does not exist." % user
sys.exit(1)
else:
uid = res[0][0]
ok = False
login_method = get_user_preferences(uid)['login_method']
if not CFG_EXTERNAL_AUTHENTICATION[login_method]:
#Local authentication, let's see if we want passwords.
res = run_sql("select id from user where id=%s "
"and password=AES_ENCRYPT(email,'')",
(uid,), 1)
if res:
ok = True
if not ok:
try:
password_entered = getpass.getpass()
except EOFError:
sys.stderr.write("\n")
sys.exit(1)
except KeyboardInterrupt:
sys.stderr.write("\n")
sys.exit(1)
if not CFG_EXTERNAL_AUTHENTICATION[login_method]:
res = run_sql("select id from user where id=%s "
"and password=AES_ENCRYPT(email, %s)",
(uid, password_entered), 1)
if res:
ok = True
else:
if CFG_EXTERNAL_AUTHENTICATION[login_method].auth_user(get_email(uid), password_entered):
ok = True
if not ok:
print "Sorry, wrong credentials for %s." % user
sys.exit(1)
else:
## secondly check authorization for the authorization_action:
(auth_code, auth_message) = acc_authorize_action(uid, authorization_action)
if auth_code != 0:
print auth_message
sys.exit(1)
return user
def _task_submit(argv, authorization_action, authorization_msg):
"""Submits task to the BibSched task queue. This is what people will
be invoking via command line."""
## check as whom we want to submit?
check_running_process_user()
## sanity check: remove eventual "task" option:
## authenticate user:
_TASK_PARAMS['user'] = authenticate(_TASK_PARAMS["user"], authorization_action, authorization_msg)
## submit task:
if _TASK_PARAMS['task_specific_name']:
task_name = '%s:%s' % (_TASK_PARAMS['task_name'], _TASK_PARAMS['task_specific_name'])
else:
task_name = _TASK_PARAMS['task_name']
write_message("storing task options %s\n" % argv, verbose=9)
verbose_argv = 'Will execute: %s' % ' '.join([escape_shell_arg(str(arg)) for arg in argv])
_TASK_PARAMS['task_id'] = run_sql("""INSERT INTO schTASK (proc,user,
runtime,sleeptime,status,progress,arguments,priority,sequenceid)
VALUES (%s,%s,%s,%s,'WAITING',%s,%s,%s,%s)""",
(task_name, _TASK_PARAMS['user'], _TASK_PARAMS["runtime"],
_TASK_PARAMS["sleeptime"], verbose_argv, marshal.dumps(argv), _TASK_PARAMS['priority'], _TASK_PARAMS['sequence-id']))
## update task number:
write_message("Task #%d submitted." % _TASK_PARAMS['task_id'])
return _TASK_PARAMS['task_id']
def _task_get_options(task_id, task_name):
"""Returns options for the task 'id' read from the BibSched task
queue table."""
out = {}
res = run_sql("SELECT arguments FROM schTASK WHERE id=%s AND proc LIKE %s",
(task_id, task_name+'%'))
try:
out = marshal.loads(res[0][0])
except:
write_message("Error: %s task %d does not seem to exist." \
% (task_name, task_id), sys.stderr)
task_update_status('ERROR')
sys.exit(1)
write_message('Options retrieved: %s' % (out, ), verbose=9)
return out
def _task_email_logs():
"""
In case this was requested, emails the logs.
"""
email_logs_to = task_get_task_param('email_logs_to')
if not email_logs_to:
return
status = task_read_status()
task_name = task_get_task_param('task_name')
task_specific_name = task_get_task_param('task_specific_name')
if task_specific_name:
task_name += ':' + task_specific_name
runtime = task_get_task_param('runtime')
title = "Execution of %s: %s" % (task_name, status)
body = """
Attached you can find the stdout and stderr logs of the execution of
name: %s
id: %s
runtime: %s
options: %s
status: %s
""" % (task_name, _TASK_PARAMS['task_id'], runtime, _OPTIONS, status)
err_file = os.path.join(CFG_LOGDIR, 'bibsched_task_%d.err' % _TASK_PARAMS['task_id'])
log_file = os.path.join(CFG_LOGDIR, 'bibsched_task_%d.log' % _TASK_PARAMS['task_id'])
return send_email(CFG_SITE_SUPPORT_EMAIL, email_logs_to, title, body, attachments=[(log_file, 'text/plain'), (err_file, 'text/plain')])
def _task_run(task_run_fnc):
"""Runs the task by fetching arguments from the BibSched task queue.
This is what BibSched will be invoking via daemon call.
The task prints Fibonacci numbers for up to NUM on the stdout, and some
messages on stderr.
@param task_run_fnc: will be called as the main core function. Must return
False in case of errors.
Return True in case of success and False in case of failure."""
from invenio.bibtasklet import _TASKLETS
## We prepare the pid file inside /prefix/var/run/taskname_id.pid
check_running_process_user()
try:
pidfile_name = os.path.join(CFG_PREFIX, 'var', 'run',
'bibsched_task_%d.pid' % _TASK_PARAMS['task_id'])
pidfile = open(pidfile_name, 'w')
pidfile.write(str(os.getpid()))
pidfile.close()
except OSError:
register_exception(alert_admin=True)
task_update_status("ERROR")
return False
## check task status:
task_status = task_read_status()
if task_status not in ("WAITING", "SCHEDULED"):
write_message("Error: The task #%d is %s. I expected WAITING or SCHEDULED." %
(_TASK_PARAMS['task_id'], task_status), sys.stderr)
return False
time_now = datetime.datetime.now()
if _TASK_PARAMS['runtime_limit'] is not None and os.environ.get('BIBSCHED_MODE', 'manual') != 'manual':
if not _TASK_PARAMS['runtime_limit'][0][0] <= time_now <= _TASK_PARAMS['runtime_limit'][0][1]:
if time_now <= _TASK_PARAMS['runtime_limit'][0][0]:
new_runtime = _TASK_PARAMS['runtime_limit'][0][0].strftime("%Y-%m-%d %H:%M:%S")
else:
new_runtime = _TASK_PARAMS['runtime_limit'][1][0].strftime("%Y-%m-%d %H:%M:%S")
progress = run_sql("SELECT progress FROM schTASK WHERE id=%s", (_TASK_PARAMS['task_id'], ))
if progress:
progress = progress[0][0]
else:
progress = ''
g = re.match(r'Postponed (\d+) time\(s\)', progress)
if g:
postponed_times = int(g.group(1))
else:
postponed_times = 0
if _TASK_PARAMS['sequence-id']:
## Also postponing other dependent tasks.
run_sql("UPDATE schTASK SET runtime=%s, progress=%s WHERE sequenceid=%s AND status='WAITING'", (new_runtime, 'Postponed as task %s' % _TASK_PARAMS['task_id'], _TASK_PARAMS['sequence-id'])) # kwalitee: disable=sql
run_sql("UPDATE schTASK SET runtime=%s, status='WAITING', progress=%s, host='' WHERE id=%s", (new_runtime, 'Postponed %d time(s)' % (postponed_times + 1), _TASK_PARAMS['task_id'])) # kwalitee: disable=sql
write_message("Task #%d postponed because outside of runtime limit" % _TASK_PARAMS['task_id'])
return True
# Make sure the host field is updated
# It will not be updated properly when we run
# a task from the cli (without using the bibsched monitor)
host = bibsched_get_host(_TASK_PARAMS['task_id'])
if host and host != gethostname():
write_message("Error: The task #%d is bound to %s." %
(_TASK_PARAMS['task_id'], host), sys.stderr)
return False
else:
bibsched_set_host(_TASK_PARAMS['task_id'], gethostname())
## initialize signal handler:
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGTSTP, _task_sig_sleep)
signal.signal(signal.SIGTERM, _task_sig_stop)
signal.signal(signal.SIGQUIT, _task_sig_stop)
signal.signal(signal.SIGABRT, _task_sig_suicide)
signal.signal(signal.SIGINT, _task_sig_stop)
## we can run the task now:
write_message("Task #%d started." % _TASK_PARAMS['task_id'])
task_update_status("RUNNING")
## run the task:
_TASK_PARAMS['task_starting_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
sleeptime = _TASK_PARAMS['sleeptime']
try:
try:
if callable(task_run_fnc) and task_run_fnc():
task_update_status("DONE")
else:
task_update_status("DONE WITH ERRORS")
except SystemExit:
pass
except:
write_message(traceback.format_exc()[:-1])
register_exception(alert_admin=True)
if task_get_task_param('stop_queue_on_error'):
task_update_status("ERROR")
else:
task_update_status("CERROR")
finally:
task_status = task_read_status()
if sleeptime:
argv = _task_get_options(_TASK_PARAMS['task_id'], _TASK_PARAMS['task_name'])
verbose_argv = 'Will execute: %s' % ' '.join([escape_shell_arg(str(arg)) for arg in argv])
# Here we check if the task can shift away of has to be run at
# a fixed time
if task_get_task_param('fixed_time') or _TASK_PARAMS['task_name'] in CFG_BIBTASK_FIXEDTIMETASKS:
old_runtime = run_sql("SELECT runtime FROM schTASK WHERE id=%s", (_TASK_PARAMS['task_id'], ))[0][0]
else:
old_runtime = None
new_runtime = get_datetime(sleeptime, now=old_runtime)
## The task is a daemon. We resubmit it
if task_status == 'DONE':
## It has finished in a good way. We recycle the database row
run_sql("UPDATE schTASK SET runtime=%s, status='WAITING', progress=%s, host='' WHERE id=%s", (new_runtime, verbose_argv, _TASK_PARAMS['task_id']))
write_message("Task #%d finished and resubmitted." % _TASK_PARAMS['task_id'])
elif task_status == 'STOPPED':
run_sql("UPDATE schTASK SET status='WAITING', progress=%s, host='' WHERE id=%s", (verbose_argv, _TASK_PARAMS['task_id'], ))
write_message("Task #%d stopped and resubmitted." % _TASK_PARAMS['task_id'])
else:
## We keep the bad result and we resubmit with another id.
#res = run_sql('SELECT proc,user,sleeptime,arguments,priority FROM schTASK WHERE id=%s', (_TASK_PARAMS['task_id'], ))
#proc, user, sleeptime, arguments, priority = res[0]
#run_sql("""INSERT INTO schTASK (proc,user,
#runtime,sleeptime,status,arguments,priority)
#VALUES (%s,%s,%s,%s,'WAITING',%s, %s)""",
#(proc, user, new_runtime, sleeptime, arguments, priority))
write_message("Task #%d finished but not resubmitted. [%s]" % (_TASK_PARAMS['task_id'], task_status))
else:
## we are done:
write_message("Task #%d finished. [%s]" % (_TASK_PARAMS['task_id'], task_status))
## Removing the pid
os.remove(pidfile_name)
#Lets call the post-process tasklets
if task_get_task_param("post-process"):
split = re.compile(r"(bst_.*)\[(.*)\]")
for tasklet in task_get_task_param("post-process"):
if not split.match(tasklet): # wrong syntax
_usage(1, "There is an error in the post processing option "
"for this task.")
aux_tasklet = split.match(tasklet)
_TASKLETS[aux_tasklet.group(1)](**eval("dict(%s)" % (aux_tasklet.group(2))))
return True
def _usage(exitcode=1, msg="", help_specific_usage="", description=""):
"""Prints usage info."""
if msg:
sys.stderr.write("Error: %s.\n" % msg)
sys.stderr.write("Usage: %s [options]\n" % sys.argv[0])
if help_specific_usage:
sys.stderr.write("Command options:\n")
sys.stderr.write(help_specific_usage)
sys.stderr.write(" Scheduling options:\n")
sys.stderr.write(" -u, --user=USER\tUser name under which to submit this"
" task.\n")
sys.stderr.write(" -t, --runtime=TIME\tTime to execute the task. [default=now]\n"
"\t\t\tExamples: +15s, 5m, 3h, 2002-10-27 13:57:26.\n")
sys.stderr.write(" -s, --sleeptime=SLEEP\tSleeping frequency after"
" which to repeat the task.\n"
"\t\t\tExamples: 30m, 2h, 1d. [default=no]\n")
sys.stderr.write(" --fixed-time\t\tAvoid drifting of execution time when using --sleeptime\n")
sys.stderr.write(" -I, --sequence-id=SEQUENCE-ID\tSequence Id of the current process\n")
sys.stderr.write(" -L --limit=LIMIT\tTime limit when it is"
" allowed to execute the task.\n"
"\t\t\tExamples: 22:00-03:00, Sunday 01:00-05:00.\n"
"\t\t\tSyntax: [Wee[kday]] [hh[:mm][-hh[:mm]]].\n")
sys.stderr.write(" -P, --priority=PRI\tTask priority (0=default, 1=higher, etc).\n")
sys.stderr.write(" -N, --name=NAME\tTask specific name (advanced option).\n\n")
sys.stderr.write(" General options:\n")
sys.stderr.write(" -h, --help\t\tPrint this help.\n")
sys.stderr.write(" -V, --version\t\tPrint version information.\n")
sys.stderr.write(" -v, --verbose=LEVEL\tVerbose level (0=min,"
" 1=default, 9=max).\n")
sys.stderr.write(" --profile=STATS\tPrint profile information. STATS is a comma-separated\n\t\t\tlist of desired output stats (calls, cumulative,\n\t\t\tfile, line, module, name, nfl, pcalls, stdname, time).\n")
sys.stderr.write(" --stop-on-error\tIn case of unrecoverable error stop the bibsched queue.\n")
sys.stderr.write(" --continue-on-error\tIn case of unrecoverable error don't stop the bibsched queue.\n")
sys.stderr.write(" --post-process=BIB_TASKLET_NAME[parameters]\tPostprocesses the specified\n\t\t\tbibtasklet with the given parameters between square\n\t\t\tbrackets.\n")
sys.stderr.write("\t\t\tExample:--post-process \"bst_send_email[fromaddr=\n\t\t\t'foo@xxx.com', toaddr='bar@xxx.com', subject='hello',\n\t\t\tcontent='help']\"\n")
sys.stderr.write(" --email-logs-to=EMAILS Sends an email with the results of the execution\n\t\t\tof the task, and attached the logs (EMAILS could be a comma-\n\t\t\tseparated lists of email addresses)\n")
if description:
sys.stderr.write(description)
sys.exit(exitcode)
def _task_sig_sleep(sig, frame):
"""Signal handler for the 'sleep' signal sent by BibSched."""
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
write_message("task_sig_sleep(), got signal %s frame %s"
% (sig, frame), verbose=9)
write_message("sleeping as soon as possible...")
_db_login(relogin=1)
task_update_status("ABOUT TO SLEEP")
def _task_sig_stop(sig, frame):
"""Signal handler for the 'stop' signal sent by BibSched."""
write_message("task_sig_stop(), got signal %s frame %s"
% (sig, frame), verbose=9)
write_message("stopping as soon as possible...")
_db_login(relogin=1) # To avoid concurrency with an interrupted run_sql call
task_update_status("ABOUT TO STOP")
def _task_sig_suicide(sig, frame):
"""Signal handler for the 'suicide' signal sent by BibSched."""
write_message("task_sig_suicide(), got signal %s frame %s"
% (sig, frame), verbose=9)
write_message("suiciding myself now...")
task_update_status("SUICIDING")
write_message("suicided")
_db_login(relogin=1)
task_update_status("SUICIDED")
sys.exit(1)
def _task_sig_dumb(sig, frame):
"""Dumb signal handler."""
pass
_RE_PSLINE = re.compile('^\s*(\w+)\s+(\w+)')
def guess_apache_process_user_from_ps():
"""Guess Apache process user by parsing the list of running processes."""
apache_users = []
try:
# Tested on Linux, Sun and MacOS X
for line in os.popen('ps -A -o user,comm').readlines():
g = _RE_PSLINE.match(line)
if g:
username = g.group(1)
process = os.path.basename(g.group(2))
if process in ('apache', 'apache2', 'httpd') :
if username not in apache_users and username != 'root':
apache_users.append(username)
except Exception, e:
print >> sys.stderr, "WARNING: %s" % e
return tuple(apache_users)
def guess_apache_process_user():
"""
Return the possible name of the user running the Apache server process.
(Look at running OS processes or look at OS users defined in /etc/passwd.)
"""
apache_users = guess_apache_process_user_from_ps() + ('apache2', 'apache', 'www-data')
for username in apache_users:
try:
userline = pwd.getpwnam(username)
return userline[0]
except KeyError:
pass
print >> sys.stderr, "ERROR: Cannot detect Apache server process user. Please set the correct value in CFG_BIBSCHED_PROCESS_USER."
sys.exit(1)
def check_running_process_user():
"""
Check that the user running this program is the same as the user
configured in CFG_BIBSCHED_PROCESS_USER or as the user running the
Apache webserver process.
"""
running_as_user = pwd.getpwuid(os.getuid())[0]
if CFG_BIBSCHED_PROCESS_USER:
# We have the expected bibsched process user defined in config,
# so check against her, not against Apache.
if running_as_user != CFG_BIBSCHED_PROCESS_USER:
print >> sys.stderr, """ERROR: You must run "%(x_proc)s" as the user set up in your
CFG_BIBSCHED_PROCESS_USER (seems to be "%(x_user)s").
You may want to do "sudo -u %(x_user)s %(x_proc)s ..." to do so.
If you think this is not right, please set CFG_BIBSCHED_PROCESS_USER
appropriately and rerun "inveniocfg --update-config-py".""" % \
{'x_proc': os.path.basename(sys.argv[0]), 'x_user': CFG_BIBSCHED_PROCESS_USER}
sys.exit(1)
elif running_as_user != guess_apache_process_user(): # not defined in config, check against Apache
print >> sys.stderr, """ERROR: You must run "%(x_proc)s" as the same user that runs your Apache server
process (seems to be "%(x_user)s").
You may want to do "sudo -u %(x_user)s %(x_proc)s ..." to do so.
If you think this is not right, please set CFG_BIBSCHED_PROCESS_USER
appropriately and rerun "inveniocfg --update-config-py".""" % \
{'x_proc': os.path.basename(sys.argv[0]), 'x_user': guess_apache_process_user()}
sys.exit(1)
return
|
gpl-2.0
| 5,392,646,837,147,061,000
| 41.622673
| 228
| 0.583426
| false
| 3.780296
| false
| false
| false
|
alanmcruickshank/superset-dev
|
superset/connectors/druid/models.py
|
1
|
45334
|
# pylint: disable=invalid-unary-operand-type
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
import json
import logging
from multiprocessing import Pool
from dateutil.parser import parse as dparse
from flask import escape, Markup
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from flask_babel import lazy_gettext as _
from pydruid.client import PyDruid
from pydruid.utils.aggregators import count
from pydruid.utils.filters import Bound, Dimension, Filter
from pydruid.utils.having import Aggregation
from pydruid.utils.postaggregator import (
Const, Field, HyperUniqueCardinality, Postaggregator, Quantile, Quantiles,
)
import requests
from six import string_types
import sqlalchemy as sa
from sqlalchemy import (
Boolean, Column, DateTime, ForeignKey, Integer, or_, String, Text,
)
from sqlalchemy.orm import backref, relationship
from superset import conf, db, import_util, sm, utils
from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
from superset.models.helpers import AuditMixinNullable, QueryResult, set_perm
from superset.utils import (
DimSelector, DTTM_ALIAS, flasher, MetricPermException,
)
DRUID_TZ = conf.get('DRUID_TZ')
# Function wrapper because bound methods cannot
# be passed to processes
def _fetch_metadata_for(datasource):
return datasource.latest_metadata()
class JavascriptPostAggregator(Postaggregator):
def __init__(self, name, field_names, function):
self.post_aggregator = {
'type': 'javascript',
'fieldNames': field_names,
'name': name,
'function': function,
}
self.name = name
class CustomPostAggregator(Postaggregator):
"""A way to allow users to specify completely custom PostAggregators"""
def __init__(self, name, post_aggregator):
self.name = name
self.post_aggregator = post_aggregator
class DruidCluster(Model, AuditMixinNullable):
"""ORM object referencing the Druid clusters"""
__tablename__ = 'clusters'
type = 'druid'
id = Column(Integer, primary_key=True)
verbose_name = Column(String(250), unique=True)
# short unique name, used in permissions
cluster_name = Column(String(250), unique=True)
coordinator_host = Column(String(255))
coordinator_port = Column(Integer, default=8081)
coordinator_endpoint = Column(
String(255), default='druid/coordinator/v1/metadata')
broker_host = Column(String(255))
broker_port = Column(Integer, default=8082)
broker_endpoint = Column(String(255), default='druid/v2')
metadata_last_refreshed = Column(DateTime)
cache_timeout = Column(Integer)
def __repr__(self):
return self.verbose_name if self.verbose_name else self.cluster_name
def get_pydruid_client(self):
cli = PyDruid(
'http://{0}:{1}/'.format(self.broker_host, self.broker_port),
self.broker_endpoint)
return cli
def get_datasources(self):
endpoint = (
'http://{obj.coordinator_host}:{obj.coordinator_port}/'
'{obj.coordinator_endpoint}/datasources'
).format(obj=self)
return json.loads(requests.get(endpoint).text)
def get_druid_version(self):
endpoint = (
'http://{obj.coordinator_host}:{obj.coordinator_port}/status'
).format(obj=self)
return json.loads(requests.get(endpoint).text)['version']
def refresh_datasources(
self,
datasource_name=None,
merge_flag=True,
refreshAll=True):
"""Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
"""
self.druid_version = self.get_druid_version()
ds_list = self.get_datasources()
blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', [])
ds_refresh = []
if not datasource_name:
ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list))
elif datasource_name not in blacklist and datasource_name in ds_list:
ds_refresh.append(datasource_name)
else:
return
self.refresh_async(ds_refresh, merge_flag, refreshAll)
def refresh_async(self, datasource_names, merge_flag, refreshAll):
"""
Fetches metadata for the specified datasources andm
merges to the Superset database
"""
session = db.session
ds_list = (
session.query(DruidDatasource)
.filter(or_(DruidDatasource.datasource_name == name
for name in datasource_names))
)
ds_map = {ds.name: ds for ds in ds_list}
for ds_name in datasource_names:
datasource = ds_map.get(ds_name, None)
if not datasource:
datasource = DruidDatasource(datasource_name=ds_name)
with session.no_autoflush:
session.add(datasource)
flasher(
'Adding new datasource [{}]'.format(ds_name), 'success')
ds_map[ds_name] = datasource
elif refreshAll:
flasher(
'Refreshing datasource [{}]'.format(ds_name), 'info')
else:
del ds_map[ds_name]
continue
datasource.cluster = self
datasource.merge_flag = merge_flag
session.flush()
# Prepare multithreaded executation
pool = Pool()
ds_refresh = list(ds_map.values())
metadata = pool.map(_fetch_metadata_for, ds_refresh)
pool.close()
pool.join()
for i in range(0, len(ds_refresh)):
datasource = ds_refresh[i]
cols = metadata[i]
col_objs_list = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_name == datasource.datasource_name)
.filter(or_(DruidColumn.column_name == col for col in cols))
)
col_objs = {col.column_name: col for col in col_objs_list}
for col in cols:
if col == '__time': # skip the time column
continue
col_obj = col_objs.get(col, None)
if not col_obj:
col_obj = DruidColumn(
datasource_name=datasource.datasource_name,
column_name=col)
with session.no_autoflush:
session.add(col_obj)
datatype = cols[col]['type']
if datatype == 'STRING':
col_obj.groupby = True
col_obj.filterable = True
if datatype == 'hyperUnique' or datatype == 'thetaSketch':
col_obj.count_distinct = True
# Allow sum/min/max for long or double
if datatype == 'LONG' or datatype == 'DOUBLE':
col_obj.sum = True
col_obj.min = True
col_obj.max = True
col_obj.type = datatype
col_obj.datasource = datasource
datasource.generate_metrics_for(col_objs_list)
session.commit()
@property
def perm(self):
return '[{obj.cluster_name}].(id:{obj.id})'.format(obj=self)
def get_perm(self):
return self.perm
@property
def name(self):
return self.verbose_name if self.verbose_name else self.cluster_name
@property
def unique_name(self):
return self.verbose_name if self.verbose_name else self.cluster_name
class DruidColumn(Model, BaseColumn):
"""ORM model for storing Druid datasource column metadata"""
__tablename__ = 'columns'
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'DruidDatasource',
backref=backref('columns', cascade='all, delete-orphan'),
enable_typechecks=False)
dimension_spec_json = Column(Text)
export_fields = (
'datasource_name', 'column_name', 'is_active', 'type', 'groupby',
'count_distinct', 'sum', 'avg', 'max', 'min', 'filterable',
'description', 'dimension_spec_json',
)
def __repr__(self):
return self.column_name
@property
def expression(self):
return self.dimension_spec_json
@property
def dimension_spec(self):
if self.dimension_spec_json:
return json.loads(self.dimension_spec_json)
def get_metrics(self):
metrics = {}
metrics['count'] = DruidMetric(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
json=json.dumps({'type': 'count', 'name': 'count'}),
)
# Somehow we need to reassign this for UDAFs
if self.type in ('DOUBLE', 'FLOAT'):
corrected_type = 'DOUBLE'
else:
corrected_type = self.type
if self.sum and self.is_num:
mt = corrected_type.lower() + 'Sum'
name = 'sum__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='sum',
verbose_name='SUM({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.avg and self.is_num:
mt = corrected_type.lower() + 'Avg'
name = 'avg__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='avg',
verbose_name='AVG({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.min and self.is_num:
mt = corrected_type.lower() + 'Min'
name = 'min__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='min',
verbose_name='MIN({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.max and self.is_num:
mt = corrected_type.lower() + 'Max'
name = 'max__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='max',
verbose_name='MAX({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.count_distinct:
name = 'count_distinct__' + self.column_name
if self.type == 'hyperUnique' or self.type == 'thetaSketch':
metrics[name] = DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type=self.type,
json=json.dumps({
'type': self.type,
'name': name,
'fieldName': self.column_name,
}),
)
else:
metrics[name] = DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type='count_distinct',
json=json.dumps({
'type': 'cardinality',
'name': name,
'fieldNames': [self.column_name]}),
)
return metrics
def generate_metrics(self):
"""Generate metrics based on the column metadata"""
metrics = self.get_metrics()
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidCluster.cluster_name == self.datasource.cluster_name)
.filter(DruidMetric.datasource_name == self.datasource_name)
.filter(or_(
DruidMetric.metric_name == m for m in metrics
))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
metric.datasource_name = self.datasource_name
if not dbmetrics.get(metric.metric_name, None):
db.session.add(metric)
@classmethod
def import_obj(cls, i_column):
def lookup_obj(lookup_column):
return db.session.query(DruidColumn).filter(
DruidColumn.datasource_name == lookup_column.datasource_name,
DruidColumn.column_name == lookup_column.column_name).first()
return import_util.import_simple_obj(db.session, i_column, lookup_obj)
class DruidMetric(Model, BaseMetric):
"""ORM object referencing Druid metrics for a datasource"""
__tablename__ = 'metrics'
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'DruidDatasource',
backref=backref('metrics', cascade='all, delete-orphan'),
enable_typechecks=False)
json = Column(Text)
export_fields = (
'metric_name', 'verbose_name', 'metric_type', 'datasource_name',
'json', 'description', 'is_restricted', 'd3format',
)
@property
def expression(self):
return self.json
@property
def json_obj(self):
try:
obj = json.loads(self.json)
except Exception:
obj = {}
return obj
@property
def perm(self):
return (
'{parent_name}.[{obj.metric_name}](id:{obj.id})'
).format(obj=self,
parent_name=self.datasource.full_name,
) if self.datasource else None
@classmethod
def import_obj(cls, i_metric):
def lookup_obj(lookup_metric):
return db.session.query(DruidMetric).filter(
DruidMetric.datasource_name == lookup_metric.datasource_name,
DruidMetric.metric_name == lookup_metric.metric_name).first()
return import_util.import_simple_obj(db.session, i_metric, lookup_obj)
class DruidDatasource(Model, BaseDatasource):
"""ORM object referencing Druid datasources (tables)"""
__tablename__ = 'datasources'
type = 'druid'
query_langtage = 'json'
cluster_class = DruidCluster
metric_class = DruidMetric
column_class = DruidColumn
baselink = 'druiddatasourcemodelview'
# Columns
datasource_name = Column(String(255), unique=True)
is_hidden = Column(Boolean, default=False)
fetch_values_from = Column(String(100))
cluster_name = Column(
String(250), ForeignKey('clusters.cluster_name'))
cluster = relationship(
'DruidCluster', backref='datasources', foreign_keys=[cluster_name])
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship(
sm.user_model,
backref=backref('datasources', cascade='all, delete-orphan'),
foreign_keys=[user_id])
export_fields = (
'datasource_name', 'is_hidden', 'description', 'default_endpoint',
'cluster_name', 'offset', 'cache_timeout', 'params',
)
@property
def database(self):
return self.cluster
@property
def connection(self):
return str(self.database)
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.is_num]
@property
def name(self):
return self.datasource_name
@property
def schema(self):
ds_name = self.datasource_name or ''
name_pieces = ds_name.split('.')
if len(name_pieces) > 1:
return name_pieces[0]
else:
return None
@property
def schema_perm(self):
"""Returns schema permission if present, cluster one otherwise."""
return utils.get_schema_perm(self.cluster, self.schema)
def get_perm(self):
return (
'[{obj.cluster_name}].[{obj.datasource_name}]'
'(id:{obj.id})').format(obj=self)
@property
def link(self):
name = escape(self.datasource_name)
return Markup('<a href="{self.url}">{name}</a>').format(**locals())
@property
def full_name(self):
return utils.get_datasource_full_name(
self.cluster_name, self.datasource_name)
@property
def time_column_grains(self):
return {
'time_columns': [
'all', '5 seconds', '30 seconds', '1 minute',
'5 minutes', '1 hour', '6 hour', '1 day', '7 days',
'week', 'week_starting_sunday', 'week_ending_saturday',
'month',
],
'time_grains': ['now'],
}
def __repr__(self):
return self.datasource_name
@renders('datasource_name')
def datasource_link(self):
url = '/superset/explore/{obj.type}/{obj.id}/'.format(obj=self)
name = escape(self.datasource_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_metric_obj(self, metric_name):
return [
m.json_obj for m in self.metrics
if m.metric_name == metric_name
][0]
@classmethod
def import_obj(cls, i_datasource, import_time=None):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overridden if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
def lookup_datasource(d):
return db.session.query(DruidDatasource).join(DruidCluster).filter(
DruidDatasource.datasource_name == d.datasource_name,
DruidCluster.cluster_name == d.cluster_name,
).first()
def lookup_cluster(d):
return db.session.query(DruidCluster).filter_by(
cluster_name=d.cluster_name).one()
return import_util.import_datasource(
db.session, i_datasource, lookup_cluster, lookup_datasource,
import_time)
@staticmethod
def version_higher(v1, v2):
"""is v1 higher than v2
>>> DruidDatasource.version_higher('0.8.2', '0.9.1')
False
>>> DruidDatasource.version_higher('0.8.2', '0.6.1')
True
>>> DruidDatasource.version_higher('0.8.2', '0.8.2')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9.BETA')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9')
False
"""
def int_or_0(v):
try:
v = int(v)
except (TypeError, ValueError):
v = 0
return v
v1nums = [int_or_0(n) for n in v1.split('.')]
v2nums = [int_or_0(n) for n in v2.split('.')]
v1nums = (v1nums + [0, 0, 0])[:3]
v2nums = (v2nums + [0, 0, 0])[:3]
return v1nums[0] > v2nums[0] or \
(v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or \
(v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and v1nums[2] > v2nums[2])
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
logging.info('Syncing datasource [{}]'.format(self.datasource_name))
client = self.cluster.get_pydruid_client()
results = client.time_boundary(datasource=self.datasource_name)
if not results:
return
max_time = results[0]['result']['maxTime']
max_time = dparse(max_time)
# Query segmentMetadata for 7 days back. However, due to a bug,
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which triggered a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
lbound = (max_time - timedelta(days=7)).isoformat()
if not self.version_higher(self.cluster.druid_version, '0.8.2'):
rbound = (max_time - timedelta(1)).isoformat()
else:
rbound = max_time.isoformat()
segment_metadata = None
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + '/' + rbound,
merge=self.merge_flag,
analysisTypes=[])
except Exception as e:
logging.warning('Failed first attempt to get latest segment')
logging.exception(e)
if not segment_metadata:
# if no segments in the past 7 days, look at all segments
lbound = datetime(1901, 1, 1).isoformat()[:10]
if not self.version_higher(self.cluster.druid_version, '0.8.2'):
rbound = datetime.now().isoformat()
else:
rbound = datetime(2050, 1, 1).isoformat()[:10]
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + '/' + rbound,
merge=self.merge_flag,
analysisTypes=[])
except Exception as e:
logging.warning('Failed 2nd attempt to get latest segment')
logging.exception(e)
if segment_metadata:
return segment_metadata[-1]['columns']
def generate_metrics(self):
self.generate_metrics_for(self.columns)
def generate_metrics_for(self, columns):
metrics = {}
for col in columns:
metrics.update(col.get_metrics())
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidCluster.cluster_name == self.cluster_name)
.filter(DruidMetric.datasource_name == self.datasource_name)
.filter(or_(DruidMetric.metric_name == m for m in metrics))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
metric.datasource_name = self.datasource_name
if not dbmetrics.get(metric.metric_name, None):
with db.session.no_autoflush:
db.session.add(metric)
@classmethod
def sync_to_db_from_config(
cls,
druid_config,
user,
cluster,
refresh=True):
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session
datasource = (
session.query(cls)
.filter_by(datasource_name=druid_config['name'])
.first()
)
# Create a new datasource.
if not datasource:
datasource = cls(
datasource_name=druid_config['name'],
cluster=cluster,
owner=user,
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
elif not refresh:
return
dimensions = druid_config['dimensions']
col_objs = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_name == druid_config['name'])
.filter(or_(DruidColumn.column_name == dim for dim in dimensions))
)
col_objs = {col.column_name: col for col in col_objs}
for dim in dimensions:
col_obj = col_objs.get(dim, None)
if not col_obj:
col_obj = DruidColumn(
datasource_name=druid_config['name'],
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type='STRING',
datasource=datasource,
)
session.add(col_obj)
# Import Druid metrics
metric_objs = (
session.query(DruidMetric)
.filter(DruidMetric.datasource_name == druid_config['name'])
.filter(or_(DruidMetric.metric_name == spec['name']
for spec in druid_config['metrics_spec']))
)
metric_objs = {metric.metric_name: metric for metric in metric_objs}
for metric_spec in druid_config['metrics_spec']:
metric_name = metric_spec['name']
metric_type = metric_spec['type']
metric_json = json.dumps(metric_spec)
if metric_type == 'count':
metric_type = 'longSum'
metric_json = json.dumps({
'type': 'longSum',
'name': metric_name,
'fieldName': metric_name,
})
metric_obj = metric_objs.get(metric_name, None)
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name='%s(%s)' % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
'Imported from the airolap config dir for %s' %
druid_config['name']),
)
session.add(metric_obj)
session.commit()
@staticmethod
def time_offset(granularity):
if granularity == 'week_ending_saturday':
return 6 * 24 * 3600 * 1000 # 6 days
return 0
# uses https://en.wikipedia.org/wiki/ISO_8601
# http://druid.io/docs/0.8.0/querying/granularities.html
# TODO: pass origin from the UI
@staticmethod
def granularity(period_name, timezone=None, origin=None):
if not period_name or period_name == 'all':
return 'all'
iso_8601_dict = {
'5 seconds': 'PT5S',
'30 seconds': 'PT30S',
'1 minute': 'PT1M',
'5 minutes': 'PT5M',
'1 hour': 'PT1H',
'6 hour': 'PT6H',
'one day': 'P1D',
'1 day': 'P1D',
'7 days': 'P7D',
'week': 'P1W',
'week_starting_sunday': 'P1W',
'week_ending_saturday': 'P1W',
'month': 'P1M',
}
granularity = {'type': 'period'}
if timezone:
granularity['timeZone'] = timezone
if origin:
dttm = utils.parse_human_datetime(origin)
granularity['origin'] = dttm.isoformat()
if period_name in iso_8601_dict:
granularity['period'] = iso_8601_dict[period_name]
if period_name in ('week_ending_saturday', 'week_starting_sunday'):
# use Sunday as start of the week
granularity['origin'] = '2016-01-03T00:00:00'
elif not isinstance(period_name, string_types):
granularity['type'] = 'duration'
granularity['duration'] = period_name
elif period_name.startswith('P'):
# identify if the string is the iso_8601 period
granularity['period'] = period_name
else:
granularity['type'] = 'duration'
granularity['duration'] = utils.parse_human_timedelta(
period_name).total_seconds() * 1000
return granularity
@staticmethod
def _metrics_and_post_aggs(metrics, metrics_dict):
all_metrics = []
post_aggs = {}
def recursive_get_fields(_conf):
_type = _conf.get('type')
_field = _conf.get('field')
_fields = _conf.get('fields')
field_names = []
if _type in ['fieldAccess', 'hyperUniqueCardinality',
'quantile', 'quantiles']:
field_names.append(_conf.get('fieldName', ''))
if _field:
field_names += recursive_get_fields(_field)
if _fields:
for _f in _fields:
field_names += recursive_get_fields(_f)
return list(set(field_names))
for metric_name in metrics:
metric = metrics_dict[metric_name]
if metric.metric_type != 'postagg':
all_metrics.append(metric_name)
else:
mconf = metric.json_obj
all_metrics += recursive_get_fields(mconf)
all_metrics += mconf.get('fieldNames', [])
if mconf.get('type') == 'javascript':
post_aggs[metric_name] = JavascriptPostAggregator(
name=mconf.get('name', ''),
field_names=mconf.get('fieldNames', []),
function=mconf.get('function', ''))
elif mconf.get('type') == 'quantile':
post_aggs[metric_name] = Quantile(
mconf.get('name', ''),
mconf.get('probability', ''),
)
elif mconf.get('type') == 'quantiles':
post_aggs[metric_name] = Quantiles(
mconf.get('name', ''),
mconf.get('probabilities', ''),
)
elif mconf.get('type') == 'fieldAccess':
post_aggs[metric_name] = Field(mconf.get('name'))
elif mconf.get('type') == 'constant':
post_aggs[metric_name] = Const(
mconf.get('value'),
output_name=mconf.get('name', ''),
)
elif mconf.get('type') == 'hyperUniqueCardinality':
post_aggs[metric_name] = HyperUniqueCardinality(
mconf.get('name'),
)
elif mconf.get('type') == 'arithmetic':
post_aggs[metric_name] = Postaggregator(
mconf.get('fn', '/'),
mconf.get('fields', []),
mconf.get('name', ''))
else:
post_aggs[metric_name] = CustomPostAggregator(
mconf.get('name', ''),
mconf)
return all_metrics, post_aggs
def values_for_column(self,
column_name,
limit=10000):
"""Retrieve some values for the given column"""
# TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid
if self.fetch_values_from:
from_dttm = utils.parse_human_datetime(self.fetch_values_from)
else:
from_dttm = datetime(1970, 1, 1)
qry = dict(
datasource=self.datasource_name,
granularity='all',
intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(),
aggregations=dict(count=count('count')),
dimension=column_name,
metric='count',
threshold=limit,
)
client = self.cluster.get_pydruid_client()
client.topn(**qry)
df = client.export_pandas()
return [row[column_name] for row in df.to_records(index=False)]
def get_query_str(self, query_obj, phase=1, client=None):
return self.run_query(client=client, phase=phase, **query_obj)
def _add_filter_from_pre_query_data(self, df, dimensions, dim_filter):
ret = dim_filter
if df is not None and not df.empty:
new_filters = []
for unused, row in df.iterrows():
fields = []
for dim in dimensions:
f = Dimension(dim) == row[dim]
fields.append(f)
if len(fields) > 1:
term = Filter(type='and', fields=fields)
new_filters.append(term)
elif fields:
new_filters.append(fields[0])
if new_filters:
ff = Filter(type='or', fields=new_filters)
if not dim_filter:
ret = ff
else:
ret = Filter(type='and', fields=[ff, dim_filter])
return ret
def run_query( # noqa / druid
self,
groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None, # noqa
select=None, # noqa
columns=None, phase=2, client=None, form_data=None,
order_desc=True):
"""Runs a query against Druid and returns a dataframe.
"""
# TODO refactor into using a TBD Query object
client = client or self.cluster.get_pydruid_client()
if not is_timeseries:
granularity = 'all'
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=DRUID_TZ)
to_dttm = to_dttm.replace(tzinfo=DRUID_TZ)
timezone = from_dttm.tzname()
query_str = ''
metrics_dict = {m.metric_name: m for m in self.metrics}
columns_dict = {c.column_name: c for c in self.columns}
all_metrics, post_aggs = self._metrics_and_post_aggs(
metrics,
metrics_dict)
aggregations = OrderedDict()
for m in self.metrics:
if m.metric_name in all_metrics:
aggregations[m.metric_name] = m.json_obj
rejected_metrics = [
m.metric_name for m in self.metrics
if m.is_restricted and
m.metric_name in aggregations.keys() and
not sm.has_access('metric_access', m.perm)
]
if rejected_metrics:
raise MetricPermException(
'Access to the metrics denied: ' + ', '.join(rejected_metrics),
)
# the dimensions list with dimensionSpecs expanded
dimensions = []
groupby = [gb for gb in groupby if gb in columns_dict]
for column_name in groupby:
col = columns_dict.get(column_name)
dim_spec = col.dimension_spec
if dim_spec:
dimensions.append(dim_spec)
else:
dimensions.append(column_name)
qry = dict(
datasource=self.datasource_name,
dimensions=dimensions,
aggregations=aggregations,
granularity=DruidDatasource.granularity(
granularity,
timezone=timezone,
origin=extras.get('druid_time_origin'),
),
post_aggregations=post_aggs,
intervals=from_dttm.isoformat() + '/' + to_dttm.isoformat(),
)
filters = DruidDatasource.get_filters(filter, self.num_cols)
if filters:
qry['filter'] = filters
having_filters = self.get_having_filters(extras.get('having_druid'))
if having_filters:
qry['having'] = having_filters
order_direction = 'descending' if order_desc else 'ascending'
if len(groupby) == 0 and not having_filters:
del qry['dimensions']
client.timeseries(**qry)
if (
not having_filters and
len(groupby) == 1 and
order_desc and
not isinstance(list(qry.get('dimensions'))[0], dict)
):
dim = list(qry.get('dimensions'))[0]
if timeseries_limit_metric:
order_by = timeseries_limit_metric
else:
order_by = list(qry['aggregations'].keys())[0]
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = 'all'
pre_qry['threshold'] = min(row_limit,
timeseries_limit or row_limit)
pre_qry['metric'] = order_by
pre_qry['dimension'] = dim
del pre_qry['dimensions']
client.topn(**pre_qry)
query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
query_str += '\n'
if phase == 1:
return query_str
query_str += (
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
qry['dimensions'], filters)
qry['threshold'] = timeseries_limit or 1000
if row_limit and granularity == 'all':
qry['threshold'] = row_limit
qry['dimension'] = list(qry.get('dimensions'))[0]
qry['dimension'] = dim
del qry['dimensions']
qry['metric'] = list(qry['aggregations'].keys())[0]
client.topn(**qry)
elif len(groupby) > 1 or having_filters or not order_desc:
# If grouping on multiple fields or using a having filter
# we have to force a groupby query
if timeseries_limit and is_timeseries:
order_by = metrics[0] if metrics else self.metrics[0]
if timeseries_limit_metric:
order_by = timeseries_limit_metric
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = 'all'
pre_qry['limit_spec'] = {
'type': 'default',
'limit': min(timeseries_limit, row_limit),
'intervals': (
inner_from_dttm.isoformat() + '/' +
inner_to_dttm.isoformat()),
'columns': [{
'dimension': order_by,
'direction': order_direction,
}],
}
client.groupby(**pre_qry)
query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
query_str += '\n'
if phase == 1:
return query_str
query_str += (
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
qry['dimensions'],
filters,
)
qry['limit_spec'] = None
if row_limit:
qry['limit_spec'] = {
'type': 'default',
'limit': row_limit,
'columns': [{
'dimension': (
metrics[0] if metrics else self.metrics[0]),
'direction': order_direction,
}],
}
client.groupby(**qry)
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
return query_str
def query(self, query_obj):
qry_start_dttm = datetime.now()
client = self.cluster.get_pydruid_client()
query_str = self.get_query_str(
client=client, query_obj=query_obj, phase=2)
df = client.export_pandas()
if df is None or df.size == 0:
raise Exception(_('No data was returned.'))
df.columns = [
DTTM_ALIAS if c == 'timestamp' else c for c in df.columns]
is_timeseries = query_obj['is_timeseries'] \
if 'is_timeseries' in query_obj else True
if (
not is_timeseries and
DTTM_ALIAS in df.columns):
del df[DTTM_ALIAS]
# Reordering columns
cols = []
if DTTM_ALIAS in df.columns:
cols += [DTTM_ALIAS]
cols += [col for col in query_obj['groupby'] if col in df.columns]
cols += [col for col in query_obj['metrics'] if col in df.columns]
df = df[cols]
time_offset = DruidDatasource.time_offset(query_obj['granularity'])
def increment_timestamp(ts):
dt = utils.parse_human_datetime(ts).replace(
tzinfo=DRUID_TZ)
return dt + timedelta(milliseconds=time_offset)
if DTTM_ALIAS in df.columns and time_offset:
df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(increment_timestamp)
return QueryResult(
df=df,
query=query_str,
duration=datetime.now() - qry_start_dttm)
@staticmethod
def get_filters(raw_filters, num_cols): # noqa
filters = None
for flt in raw_filters:
if not all(f in flt for f in ['col', 'op', 'val']):
continue
col = flt['col']
op = flt['op']
eq = flt['val']
cond = None
if op in ('in', 'not in'):
eq = [
types.replace('"', '').strip()
if isinstance(types, string_types)
else types
for types in eq]
elif not isinstance(flt['val'], string_types):
eq = eq[0] if eq and len(eq) > 0 else ''
is_numeric_col = col in num_cols
if is_numeric_col:
if op in ('in', 'not in'):
eq = [utils.string_to_num(v) for v in eq]
else:
eq = utils.string_to_num(eq)
if op == '==':
cond = Dimension(col) == eq
elif op == '!=':
cond = Dimension(col) != eq
elif op in ('in', 'not in'):
fields = []
# ignore the filter if it has no value
if not len(eq):
continue
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type='or', fields=fields)
if op == 'not in':
cond = ~cond
elif op == 'regex':
cond = Filter(type='regex', pattern=eq, dimension=col)
elif op == '>=':
cond = Bound(col, eq, None, alphaNumeric=is_numeric_col)
elif op == '<=':
cond = Bound(col, None, eq, alphaNumeric=is_numeric_col)
elif op == '>':
cond = Bound(
col, eq, None,
lowerStrict=True, alphaNumeric=is_numeric_col,
)
elif op == '<':
cond = Bound(
col, None, eq,
upperStrict=True, alphaNumeric=is_numeric_col,
)
if filters:
filters = Filter(type='and', fields=[
cond,
filters,
])
else:
filters = cond
return filters
def _get_having_obj(self, col, op, eq):
cond = None
if op == '==':
if col in self.column_names:
cond = DimSelector(dimension=col, value=eq)
else:
cond = Aggregation(col) == eq
elif op == '>':
cond = Aggregation(col) > eq
elif op == '<':
cond = Aggregation(col) < eq
return cond
def get_having_filters(self, raw_filters):
filters = None
reversed_op_map = {
'!=': '==',
'>=': '<',
'<=': '>',
}
for flt in raw_filters:
if not all(f in flt for f in ['col', 'op', 'val']):
continue
col = flt['col']
op = flt['op']
eq = flt['val']
cond = None
if op in ['==', '>', '<']:
cond = self._get_having_obj(col, op, eq)
elif op in reversed_op_map:
cond = ~self._get_having_obj(col, reversed_op_map[op], eq)
if filters:
filters = filters & cond
else:
filters = cond
return filters
@classmethod
def query_datasources_by_name(
cls, session, database, datasource_name, schema=None):
return (
session.query(cls)
.filter_by(cluster_name=database.id)
.filter_by(datasource_name=datasource_name)
.all()
)
sa.event.listen(DruidDatasource, 'after_insert', set_perm)
sa.event.listen(DruidDatasource, 'after_update', set_perm)
|
apache-2.0
| -5,881,308,706,200,281,000
| 35.677994
| 89
| 0.527088
| false
| 4.143118
| false
| false
| false
|
shsdev/khclass
|
khclarifai/khclarifai_predict.py
|
1
|
1302
|
#!/usr/bin/env python
# coding=UTF-8
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from math import floor
from clarifai.rest import ClarifaiApp
from config.configuration import data_path, test_set_id, clarifai_api_key, clarifai_model_name
def floored_percentage(val, digits):
"""Format float value as percentage string"""
val *= 10 ** (digits + 2)
return '{1:.{0}f}%'.format(digits, floor(val) / 10 ** digits)
def get_prediction_confidence(model, image_path):
"""Get the first value's float prediction value"""
print "Processing prediction for image: %s" % image_path
full_image_path = "%s/%s" % (data_path, image_path)
prediction_confidence = 0.0
result = model.predict_by_filename(full_image_path)
for o in result['outputs']:
concept_results = o['data']['concepts']
for concept_result in concept_results:
print concept_result['value']
prediction_confidence = float(concept_result['value'])
break
return prediction_confidence
if __name__ == '__main__':
app = ClarifaiApp(api_key=clarifai_api_key)
mdl = app.models.get(clarifai_model_name)
print floored_percentage(get_prediction_confidence(mdl, "images/test%s/%s" % (test_set_id, "EMK_1303.jpg")), 2)
|
gpl-3.0
| -7,671,868,139,313,991,000
| 34.189189
| 115
| 0.667435
| false
| 3.304569
| false
| false
| false
|
h2g2bob/ynmp-wikipedia-sync
|
chgparty_dot.py
|
1
|
5461
|
import csv
from collections import defaultdict
from collections import namedtuple
class Pty(object):
def __init__(self, ynmp, name, rank=3, color="white"):
self.ynmp = ynmp
self.name = name.replace('"', '').replace("'", "")
self.rank = rank
self.color = color
self.code = "".join(x for x in self.ynmp if x.isalpha())
def __hash__(self):
return hash(self.ynmp)
def __cmp__(self, other):
return cmp(self.ynmp, other)
parties = dict((x.ynmp, x) for x in (
Pty("Conservative Party", "Conservative", 0, "dodgerblue"),
Pty("Labour Party", "Labour", 0, "firebrick1"),
Pty("Liberal Democrats", "Lib Dem", 0, "orange"),
Pty("UK Independence Party (UKIP)", "UKIP", 1, "purple"),
Pty("Green Party", "Green", 1, "green"),
Pty("British National Party", "BNP"),
Pty("Christian Party \"Proclaiming Christ's Lordship\"", "Christian"),
Pty("English Democrats", "Eng Dem"),
Pty("Ulster Unionist Party", "UUP"),
Pty("Trade Unionist and Socialist Coalition", "TUSC"),
Pty("National Health Action Party", "NHA"),
))
party_others = Pty("Others", "Others")
def get_party(ynmp_name, args):
try:
party = parties[ynmp_name]
except KeyError:
if ynmp_name == "Independent":
party = Pty("Independent", "Independent", rank=0 if args.independent else 100)
else:
party = Pty(ynmp_name, ynmp_name)
if party.rank > 5 - args.hide_small:
party = party_others
return party
def format_name(name):
return name
def name_grouping_individual(l):
return [[x] for x in l]
def name_grouping_grouped(l):
return [l]
def print_digraph(by_parties, name_grouping, args):
print "digraph {"
for party in set(n for (n, _) in by_parties.keys()) | set(n for (_, n) in by_parties.keys()):
print "%s [label=\"%s\",style=filled,fillcolor=%s];" % (party.code, party.name, party.color if args.color else "white",)
for ((old, new), full_namelist) in by_parties.items():
for namelist in name_grouping(full_namelist):
print "%s -> %s [label=\"%s\", penwidth=%d, weight=%d, fontsize=10];" % (
old.code,
new.code,
"\\n".join(format_name(name) for name in namelist) if args.names else "",
len(namelist),
len(namelist))
print "}"
def main(args):
by_parties = defaultdict(list)
for _, name, old_name, new_name in csv.reader(open("chgparty.csv")):
old = get_party(old_name, args)
new = get_party(new_name, args)
by_parties[old, new].append(name)
if args.ignore_uup:
by_parties.pop(("Conservative and Unionist Party", "Ulster Unionist Party"), None) # pop with default avoids KeyError
if args.trim_parties:
by_parties = trim_parties(args, by_parties)
if not args.no_others:
by_parties = dict(((old, new), namelist) for ((old, new), namelist) in by_parties.items() if old != "Others" and new != "Others")
if not args.others_to_others:
by_parties = dict(((old, new), namelist) for ((old, new), namelist) in by_parties.items() if old != "Others" or new != "Others")
if args.trim:
by_parties = dict(((old, new), namelist) for ((old, new), namelist) in by_parties.items() if len(namelist) > args.trim or max((old.rank, new.rank)) < args.dont_trim_large)
print_digraph(by_parties, name_grouping_individual if args.single_line else name_grouping_grouped, args)
def trim_parties(args, by_parties):
counts = defaultdict(int)
for (old, new), namelist in by_parties.items():
counts[old] += len(namelist)
counts[new] += len(namelist)
to_trim = set(k for (k, v) in counts.items() if v <= args.trim_parties)
rtn = {}
for (old, new), namelist in by_parties.items():
if old in to_trim:
old = party_others
if new in to_trim:
new = party_others
rtn.setdefault((old, new), []).extend(namelist)
return rtn
if __name__=='__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--trim", action="count", default=0, help="Hide single defections (multiple times to hide less than N defections)")
parser.add_argument("-T", "--dont-trim-large", action="count", default=0, help="Do not hide single defections to/from large political parties")
parser.add_argument("-s", "--hide-small", action="count", default=0, help="Hide small parties (multiple times to hide more parties)")
parser.add_argument("-x", "--trim-parties", action="count", default=0, help="Trim parties with few defections")
parser.add_argument("-o", "--no-others", action="store_false", default=True, help="Hide the combined \"others\" for small parties")
parser.add_argument("-2", "--others-to-others", action="store_true", default=False, help="Show defections from \"others\" to itself")
parser.add_argument("-i", "--independent", action="store_true", default=False, help="Show independent and others as different")
parser.add_argument("-1", "--single-line", action="store_true", default=False, help="Show one line per candidate")
parser.add_argument("-c", "--no-color", action="store_false", dest="color", default=True, help="No color")
parser.add_argument("-n", "--no-names", action="store_false", dest="names", default=True, help="No names")
parser.add_argument("--no-ignore-uup", action="store_false", dest="ignore_uup", default=True, help="The UUP fielded a bunch of candidates jointly with the Conservative Party, using the name \"Conservative and Unionist Party\". The candidates were really UUP people, so this transition is boring.")
args = parser.parse_args()
if args.dont_trim_large and not args.trim:
raise ValueError("You can't use -T without -t")
main(args)
|
agpl-3.0
| -6,079,931,843,688,282,000
| 38.572464
| 298
| 0.680461
| false
| 2.942349
| false
| false
| false
|
underyx/TheMajorNews
|
main.py
|
1
|
2711
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import config
import requests
from requests_oauthlib import OAuth1
from base64 import b64encode
def get_access_token():
token = config.twitter_app_key + ':' + config.twitter_app_secret
h = {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Authorization': b'Basic ' + b64encode(bytes(token, 'utf8'))}
print()
r = requests.post('https://api.twitter.com/oauth2/token',
data=b'grant_type=client_credentials', headers=h)
assert r.json()['token_type'] == 'bearer'
return r.json()['access_token']
def get_latest_tweet(token):
parameters = {'screen_name': 'TwoHeadlines',
'count': 1,
'trim_user': True}
headers = {'Authorization': 'Bearer ' + token}
r = requests.get('https://api.twitter.com/1.1/statuses/user_timeline.json',
params=parameters, headers=headers)
return r.json(encoding='utf8')[0]['text']
def do_translations(tweet, i=0):
i += 1
if i > config.run_limit:
return tweet
ko_parameters = {'q': tweet,
'format': 'text',
'target': 'ko',
'source': 'en',
'key': config.google_key}
ko_r = requests.get('https://www.googleapis.com/language/translate/v2',
params=ko_parameters)
ko_result = ko_r.json()['data']['translations'][0]['translatedText']
en_parameters = {'q': ko_result,
'format': 'text',
'target': 'en',
'source': 'ko',
'key': config.google_key}
en_r = requests.get('https://www.googleapis.com/language/translate/v2',
params=en_parameters)
en_result = en_r.json()['data']['translations'][0]['translatedText']
print('Translation #{} is: {}'.format(i, en_result))
return do_translations(en_result, i) if tweet != en_result else en_result
def post_tweet(tweet):
if len(tweet) > 140:
tweet = tweet[:137] + "..."
auth = OAuth1(config.twitter_app_key, config.twitter_app_secret,
config.twitter_user_key, config.twitter_user_secret)
r = requests.post('https://api.twitter.com/1.1/statuses/update.json',
auth=auth, data={'status': tweet})
return r.json()
def main():
bearer_token = get_access_token()
latest_tweet = get_latest_tweet(bearer_token)
print('Latest Original is: ' + latest_tweet)
translation = do_translations(latest_tweet)
print('Translation is: ' + translation)
post_tweet(translation)
if __name__ == '__main__':
main()
|
mit
| 2,091,146,900,491,236,900
| 28.467391
| 79
| 0.569532
| false
| 3.683424
| true
| false
| false
|
hunse/vrep-python
|
dvs-play.py
|
1
|
1515
|
"""
Play DVS events in real time
TODO: deal with looping event times for recordings > 65 s
"""
import numpy as np
import matplotlib.pyplot as plt
import dvs
def close(a, b, atol=1e-8, rtol=1e-5):
return np.abs(a - b) < atol + rtol * b
def imshow(image, ax=None):
ax = plt.gca() if ax is None else ax
ax.imshow(image, vmin=-1, vmax=1, cmap='gray', interpolation=None)
def add_to_image(image, events):
for x, y, s, _ in events:
image[y, x] += 1 if s else -1
def as_image(events):
image = np.zeros((128, 128), dtype=float)
add_to_image(image, events)
return image
# filename = 'dvs.npz'
filename = 'dvs-ball-10ms.npz'
events = dvs.load(filename, dt_round=True)
udiffs = np.unique(np.diff(np.unique(events['t'])))
assert np.allclose(udiffs, 0.01)
plt.figure(1)
plt.clf()
times = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
for i in range(6):
plt.subplot(2, 3, i+1)
imshow(as_image(events[close(events['t'], times[i])]))
plt.title("t = %0.3f" % times[i])
# plt.figure(1)
# plt.clf()
# image = np.zeros((128, 128), dtype=float)
# plt_image = plt.imshow(image, vmin=-1, vmax=1, cmap='gray', interpolation=None)
# plt.gca().invert_yaxis()
# while t0 < t_max:
# time.sleep(0.001)
# t1 = time.time() - t_world
# new_events = events[(ts > t0) & (ts < t1)]
# dt = t1 - t0
# image *= np.exp(-dt / 0.01)
# for x, y, s, _ in new_events:
# image[y, x] += 1 if s else -1
# plt_image.set_data(image)
# plt.draw()
# t0 = t1
plt.show()
|
gpl-2.0
| 5,992,263,968,722,449,000
| 21.279412
| 81
| 0.59538
| false
| 2.467427
| false
| false
| false
|
jasonmaier/CircularEconomyBlog
|
db_repository/versions/013_migration.py
|
1
|
1155
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
tasks = Table('tasks', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('priority', INTEGER, nullable=False),
Column('user_id', INTEGER),
Column('task', VARCHAR(length=140)),
)
tasks = Table('tasks', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('description', String(length=140)),
Column('priority', Integer),
Column('user_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['tasks'].columns['task'].drop()
post_meta.tables['tasks'].columns['description'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['tasks'].columns['task'].create()
post_meta.tables['tasks'].columns['description'].drop()
|
bsd-3-clause
| 1,869,276,016,205,127,000
| 30.216216
| 68
| 0.688312
| false
| 3.799342
| false
| false
| false
|
dwaiter/django-filebrowser-old
|
filebrowser/base.py
|
1
|
5606
|
# coding: utf-8
# imports
import os, re, datetime
from time import gmtime, strftime
# django imports
from django.conf import settings
# filebrowser imports
from filebrowser.settings import *
from filebrowser.functions import get_file_type, url_join, is_selectable, get_version_path
# PIL import
if STRICT_PIL:
from PIL import Image
else:
try:
from PIL import Image
except ImportError:
import Image
class FileObject(object):
"""
The FileObject represents a File on the Server.
PATH has to be relative to MEDIA_ROOT.
"""
def __init__(self, path):
self.path = path
self.url_rel = path.replace("\\","/")
self.head = os.path.split(path)[0]
self.filename = os.path.split(path)[1]
self.filename_lower = self.filename.lower() # important for sorting
self.filetype = get_file_type(self.filename)
def _filesize(self):
"""
Filesize.
"""
if os.path.isfile(os.path.join(MEDIA_ROOT, self.path)) or os.path.isdir(os.path.join(MEDIA_ROOT, self.path)):
return os.path.getsize(os.path.join(MEDIA_ROOT, self.path))
return ""
filesize = property(_filesize)
def _date(self):
"""
Date.
"""
if os.path.isfile(os.path.join(MEDIA_ROOT, self.path)) or os.path.isdir(os.path.join(MEDIA_ROOT, self.path)):
return os.path.getmtime(os.path.join(MEDIA_ROOT, self.path))
return ""
date = property(_date)
def _datetime(self):
"""
Datetime Object.
"""
return datetime.datetime.fromtimestamp(self.date)
datetime = property(_datetime)
def _extension(self):
"""
Extension.
"""
return u"%s" % os.path.splitext(self.filename)[1]
extension = property(_extension)
def _filetype_checked(self):
if self.filetype == "Folder" and os.path.isdir(self.path_full):
return self.filetype
elif self.filetype != "Folder" and os.path.isfile(self.path_full):
return self.filetype
else:
return ""
filetype_checked = property(_filetype_checked)
def _path_full(self):
"""
Full server PATH including MEDIA_ROOT.
"""
return u"%s" % os.path.join(MEDIA_ROOT, self.path)
path_full = property(_path_full)
def _path_relative(self):
return self.path
path_relative = property(_path_relative)
def _path_relative_directory(self):
"""
Path relative to initial directory.
"""
directory_re = re.compile(r'^(%s)' % (DIRECTORY))
value = directory_re.sub('', self.path)
return u"%s" % value
path_relative_directory = property(_path_relative_directory)
def _folder(self):
directory_re = re.compile(r'^(%s)' % (DIRECTORY.rstrip('/')))
return u"%s/" % directory_re.sub('', self.head)
folder = property(_folder)
def _url_relative(self):
return self.url_rel
url_relative = property(_url_relative)
def _url_full(self):
"""
Full URL including MEDIA_URL.
"""
return u"%s" % url_join(MEDIA_URL, self.url_rel)
url_full = property(_url_full)
def _url_save(self):
"""
URL used for the filebrowsefield.
"""
if SAVE_FULL_URL:
return self.url_full
else:
return self.url_rel
url_save = property(_url_save)
def _url_thumbnail(self):
"""
Thumbnail URL.
"""
if self.filetype == "Image":
return u"%s" % url_join(MEDIA_URL, get_version_path(self.path, 'fb_thumb'))
else:
return ""
url_thumbnail = property(_url_thumbnail)
def url_admin(self):
if self.filetype_checked == "Folder":
directory_re = re.compile(r'^(%s)' % (DIRECTORY))
value = directory_re.sub('', self.path)
return u"%s" % value
else:
return u"%s" % url_join(MEDIA_URL, self.path)
def _dimensions(self):
"""
Image Dimensions.
"""
if self.filetype == 'Image':
try:
im = Image.open(os.path.join(MEDIA_ROOT, self.path))
return im.size
except:
pass
else:
return False
dimensions = property(_dimensions)
def _width(self):
"""
Image Width.
"""
return self.dimensions[0]
width = property(_width)
def _height(self):
"""
Image Height.
"""
return self.dimensions[1]
height = property(_height)
def _orientation(self):
"""
Image Orientation.
"""
if self.dimensions:
if self.dimensions[0] >= self.dimensions[1]:
return "Landscape"
else:
return "Portrait"
else:
return None
orientation = property(_orientation)
def _is_empty(self):
"""
True if Folder is empty, False if not.
"""
if os.path.isdir(self.path_full):
if not os.listdir(self.path_full):
return True
else:
return False
else:
return None
is_empty = property(_is_empty)
def __repr__(self):
return u"%s" % self.url_save
def __str__(self):
return u"%s" % self.url_save
def __unicode__(self):
return u"%s" % self.url_save
|
bsd-3-clause
| 1,552,659,356,722,771,000
| 26.082126
| 117
| 0.539422
| false
| 3.987198
| false
| false
| false
|
grengojbo/st2
|
st2client/st2client/models/core.py
|
1
|
11692
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
from functools import wraps
import six
from six.moves import urllib
from st2client.utils import httpclient
LOG = logging.getLogger(__name__)
def add_auth_token_to_kwargs_from_env(func):
@wraps(func)
def decorate(*args, **kwargs):
if not kwargs.get('token') and os.environ.get('ST2_AUTH_TOKEN', None):
kwargs['token'] = os.environ.get('ST2_AUTH_TOKEN')
return func(*args, **kwargs)
return decorate
class Resource(object):
# An alias to use for the resource if different than the class name.
_alias = None
# Display name of the resource. This may be different than its resource
# name specifically when the resource name is composed of multiple words.
_display_name = None
# URL path for the resource.
_url_path = None
# Plural form of the resource name. This will be used to build the
# latter part of the REST URL.
_plural = None
# Plural form of the resource display name.
_plural_display_name = None
# A list of class attributes which will be included in __repr__ return value
_repr_attributes = []
def __init__(self, *args, **kwargs):
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
def to_dict(self, exclude_attributes=None):
"""
Return a dictionary representation of this object.
:param exclude_attributes: Optional list of attributes to exclude.
:type exclude_attributes: ``list``
:rtype: ``dict``
"""
exclude_attributes = exclude_attributes or []
attributes = self.__dict__.keys()
attributes = [attr for attr in attributes if not attr.startswith('__') and
attr not in exclude_attributes]
result = {}
for attribute in attributes:
value = getattr(self, attribute, None)
result[attribute] = value
return result
@classmethod
def get_alias(cls):
return cls._alias if cls._alias else cls.__name__
@classmethod
def get_display_name(cls):
return cls._display_name if cls._display_name else cls.__name__
@classmethod
def get_plural_name(cls):
if not cls._plural:
raise Exception('The %s class is missing class attributes '
'in its definition.' % cls.__name__)
return cls._plural
@classmethod
def get_plural_display_name(cls):
return (cls._plural_display_name
if cls._plural_display_name
else cls._plural)
@classmethod
def get_url_path_name(cls):
if cls._url_path:
return cls._url_path
return cls.get_plural_name().lower()
def serialize(self):
return dict((k, v)
for k, v in six.iteritems(self.__dict__)
if not k.startswith('_'))
@classmethod
def deserialize(cls, doc):
if type(doc) is not dict:
doc = json.loads(doc)
return cls(**doc)
def __str__(self):
return str(self.__repr__())
def __repr__(self):
if not self._repr_attributes:
return super(Resource, self).__repr__()
attributes = []
for attribute in self._repr_attributes:
value = getattr(self, attribute, None)
attributes.append('%s=%s' % (attribute, value))
attributes = ','.join(attributes)
class_name = self.__class__.__name__
result = '<%s %s>' % (class_name, attributes)
return result
class ResourceManager(object):
def __init__(self, resource, endpoint, cacert=None, debug=False):
self.resource = resource
self.debug = debug
self.client = httpclient.HTTPClient(endpoint, cacert=cacert, debug=debug)
@staticmethod
def handle_error(response):
try:
content = response.json()
fault = content.get('faultstring', '') if content else ''
if fault:
response.reason += '\nMESSAGE: %s' % fault
except Exception as e:
response.reason += ('\nUnable to retrieve detailed message '
'from the HTTP response. %s\n' % str(e))
response.raise_for_status()
@add_auth_token_to_kwargs_from_env
def get_all(self, **kwargs):
# TODO: This is ugly, stop abusing kwargs
url = '/%s' % self.resource.get_url_path_name()
limit = kwargs.pop('limit', None)
pack = kwargs.pop('pack', None)
prefix = kwargs.pop('prefix', None)
params = {}
if limit and limit <= 0:
limit = None
if limit:
params['limit'] = limit
if pack:
params['pack'] = pack
if prefix:
params['prefix'] = prefix
response = self.client.get(url=url, params=params, **kwargs)
if response.status_code != 200:
self.handle_error(response)
return [self.resource.deserialize(item)
for item in response.json()]
@add_auth_token_to_kwargs_from_env
def get_by_id(self, id, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), id)
response = self.client.get(url, **kwargs)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
return self.resource.deserialize(response.json())
@add_auth_token_to_kwargs_from_env
def get_property(self, id_, property_name, self_deserialize=True, **kwargs):
"""
Gets a property of a Resource.
id_ : Id of the resource
property_name: Name of the property
self_deserialize: #Implies use the deserialize method implemented by this resource.
"""
token = None
if kwargs:
token = kwargs.pop('token', None)
url = '/%s/%s/%s/?%s' % (self.resource.get_url_path_name(), id_, property_name,
urllib.parse.urlencode(kwargs))
else:
url = '/%s/%s/%s/' % (self.resource.get_url_path_name(), id_, property_name)
response = self.client.get(url, token=token) if token else self.client.get(url)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
if self_deserialize:
return [self.resource.deserialize(item) for item in response.json()]
else:
return response.json()
@add_auth_token_to_kwargs_from_env
def get_by_ref_or_id(self, ref_or_id, **kwargs):
return self.get_by_id(id=ref_or_id, **kwargs)
@add_auth_token_to_kwargs_from_env
def query(self, **kwargs):
if not kwargs:
raise Exception('Query parameter is not provided.')
if 'limit' in kwargs and kwargs.get('limit') <= 0:
kwargs.pop('limit')
token = kwargs.get('token', None)
params = {}
for k, v in six.iteritems(kwargs):
if k != 'token':
params[k] = v
url = '/%s/?%s' % (self.resource.get_url_path_name(),
urllib.parse.urlencode(params))
response = self.client.get(url, token=token) if token else self.client.get(url)
if response.status_code == 404:
return []
if response.status_code != 200:
self.handle_error(response)
items = response.json()
instances = [self.resource.deserialize(item) for item in items]
return instances
@add_auth_token_to_kwargs_from_env
def get_by_name(self, name_or_id, **kwargs):
instances = self.query(name=name_or_id, **kwargs)
if not instances:
return None
else:
if len(instances) > 1:
raise Exception('More than one %s named "%s" are found.' %
(self.resource.__name__.lower(), name_or_id))
return instances[0]
@add_auth_token_to_kwargs_from_env
def create(self, instance, **kwargs):
url = '/%s' % self.resource.get_url_path_name()
response = self.client.post(url, instance.serialize(), **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
@add_auth_token_to_kwargs_from_env
def update(self, instance, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id)
response = self.client.put(url, instance.serialize(), **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
@add_auth_token_to_kwargs_from_env
def delete(self, instance, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id)
response = self.client.delete(url, **kwargs)
if response.status_code not in [200, 204, 404]:
self.handle_error(response)
return False
return True
@add_auth_token_to_kwargs_from_env
def delete_by_id(self, instance_id, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance_id)
response = self.client.delete(url, **kwargs)
if response.status_code not in [200, 204, 404]:
self.handle_error(response)
return False
try:
resp_json = response.json()
if resp_json:
return resp_json
except:
pass
return True
class ActionAliasResourceManager(ResourceManager):
def __init__(self, resource, endpoint, cacert=None, debug=False):
endpoint = endpoint.replace('v1', 'exp')
self.resource = resource
self.debug = debug
self.client = httpclient.HTTPClient(root=endpoint, cacert=cacert, debug=debug)
class LiveActionResourceManager(ResourceManager):
@add_auth_token_to_kwargs_from_env
def re_run(self, execution_id, parameters=None, **kwargs):
url = '/%s/%s/re_run' % (self.resource.get_url_path_name(), execution_id)
data = {}
if parameters:
data['parameters'] = parameters
response = self.client.post(url, data, **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
class TriggerInstanceResourceManager(ResourceManager):
@add_auth_token_to_kwargs_from_env
def re_emit(self, trigger_instance_id):
url = '/%s/%s/re_emit' % (self.resource.get_url_path_name(), trigger_instance_id)
response = self.client.post(url, None)
if response.status_code != 200:
self.handle_error(response)
return response.json()
|
apache-2.0
| 1,653,460,699,954,385,700
| 33.187135
| 91
| 0.595621
| false
| 4
| false
| false
| false
|
lammps/lammps-packages
|
mingw-cross/cmake-win-on-linux.py
|
1
|
14980
|
#!/usr/bin/env python
# Script to build windows installer packages for LAMMPS
# (c) 2017,2018,2019,2020 Axel Kohlmeyer <akohlmey@gmail.com>
from __future__ import print_function
import sys,os,shutil,glob,re,subprocess,tarfile,gzip,time,inspect
try: from urllib.request import urlretrieve as geturl
except: from urllib import urlretrieve as geturl
try:
import multiprocessing
numcpus = multiprocessing.cpu_count()
except:
numcpus = 1
# helper functions
def error(str=None):
if not str: print(helpmsg)
else: print(sys.argv[0],"ERROR:",str)
sys.exit()
def getbool(arg,keyword):
if arg in ['yes','Yes','Y','y','on','1','True','true']:
return True
elif arg in ['no','No','N','n','off','0','False','false']:
return False
else:
error("Unknown %s option: %s" % (keyword,arg))
def fullpath(path):
return os.path.abspath(os.path.expanduser(path))
def getexe(url,name):
gzname = name + ".gz"
geturl(url,gzname)
with gzip.open(gzname,'rb') as gz_in:
with open(name,'wb') as f_out:
shutil.copyfileobj(gz_in,f_out)
gz_in.close()
f_out.close()
os.remove(gzname)
def system(cmd):
try:
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
except subprocess.CalledProcessError as e:
print("Command '%s' returned non-zero exit status" % e.cmd)
error(e.output.decode('UTF-8'))
return txt.decode('UTF-8')
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# record location and name of python script
homedir, exename = os.path.split(os.path.abspath(inspect.getsourcefile(lambda:0)))
# default settings help message and default settings
bitflag = '64'
parflag = 'no'
pythonflag = False
thrflag = 'omp'
revflag = 'stable'
verbose = False
gitdir = os.path.join(homedir,"lammps")
adminflag = True
msixflag = False
helpmsg = """
Usage: python %s -b <bits> -j <cpus> -p <mpi> -t <thread> -y <yes|no> -r <rev> -v <yes|no> -g <folder> -a <yes|no>
Flags (all flags are optional, defaults listed below):
-b : select Windows variant (default value: %s)
-b 32 : build for 32-bit Windows
-b 64 : build for 64-bit Windows
-j : set number of CPUs for parallel make (default value: %d)
-j <num> : set to any reasonable number or 1 for serial make
-p : select message passing parallel build (default value: %s)
-p mpi : build an MPI parallel version with MPICH2 v1.4.1p1
-p no : build a serial version using MPI STUBS library
-t : select thread support (default value: %s)
-t omp : build with threads via OpenMP enabled
-t no : build with thread support disabled
-y : select python support (default value: %s)
-y yes : build with python included
-y no : build without python
-r : select LAMMPS source revision to build (default value: %s)
-r stable : download and build the latest stable LAMMPS version
-r unstable : download and build the latest patch release LAMMPS version
-r master : download and build the latest development snapshot
-r patch_<date> : download and build a specific patch release
-r <sha256> : download and build a specific snapshot version
-v : select output verbosity
-v yes : print progress messages and output of make commands
-v no : print only progress messages
-g : select folder with git checkout of LAMMPS sources
-g <folder> : use LAMMPS checkout in <folder> (default value: %s)
-a : select admin level installation (default value: yes)
-a yes : the created installer requires to be run at admin level
and LAMMPS is installed to be accessible by all users
-a no : the created installer runs without admin privilege and
LAMMPS is installed into the current user's appdata folder
-a msix : same as "no" but adjust for creating an MSIX package
Example:
python %s -r unstable -t omp -p mpi
""" % (exename,bitflag,numcpus,parflag,thrflag,pythonflag,revflag,gitdir,exename)
# parse arguments
argv = sys.argv
argc = len(argv)
i = 1
while i < argc:
if i+1 >= argc:
print("\nMissing argument to flag:",argv[i])
error()
if argv[i] == '-b':
bitflag = argv[i+1]
elif argv[i] == '-j':
numcpus = int(argv[i+1])
elif argv[i] == '-p':
parflag = argv[i+1]
elif argv[i] == '-t':
thrflag = argv[i+1]
elif argv[i] == '-y':
pythonflag = getbool(argv[i+1],"python")
elif argv[i] == '-r':
revflag = argv[i+1]
elif argv[i] == '-v':
verbose = getbool(argv[i+1],"verbose")
elif argv[i] == '-a':
if argv[i+1] in ['msix','MSIX']:
adminflag = False
msixflag = True
else:
msixflag = False
adminflag = getbool(argv[i+1],"admin")
elif argv[i] == '-g':
gitdir = fullpath(argv[i+1])
else:
print("\nUnknown flag:",argv[i])
error()
i+=2
# checks
if bitflag != '32' and bitflag != '64':
error("Unsupported bitness flag %s" % bitflag)
if parflag != 'no' and parflag != 'mpi':
error("Unsupported parallel flag %s" % parflag)
if thrflag != 'no' and thrflag != 'omp':
error("Unsupported threading flag %s" % thrflag)
# test for valid revision name format: branch names, release tags, or commit hashes
rev1 = re.compile("^(stable|unstable|master)$")
rev2 = re.compile(r"^(patch|stable)_\d+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\d{4}$")
rev3 = re.compile(r"^[a-f0-9]{40}$")
if not rev1.match(revflag) and not rev2.match(revflag) and not rev3.match(revflag):
error("Unsupported revision flag %s" % revflag)
# create working directory
if adminflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s" % (bitflag,parflag,thrflag,revflag))
else:
if pythonflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-python" % (bitflag,parflag,thrflag,revflag))
elif msixflag:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-msix" % (bitflag,parflag,thrflag,revflag))
else:
builddir = os.path.join(fullpath('.'),"tmp-%s-%s-%s-%s-noadmin" % (bitflag,parflag,thrflag,revflag))
shutil.rmtree(builddir,True)
try:
os.mkdir(builddir)
except:
error("Cannot create temporary build folder: %s" % builddir)
# check for prerequisites and set up build environment
if bitflag == '32':
cc_cmd = which('i686-w64-mingw32-gcc')
cxx_cmd = which('i686-w64-mingw32-g++')
fc_cmd = which('i686-w64-mingw32-gfortran')
ar_cmd = which('i686-w64-mingw32-ar')
size_cmd = which('i686-w64-mingw32-size')
nsis_cmd = which('makensis')
lmp_size = 'smallsmall'
else:
cc_cmd = which('x86_64-w64-mingw32-gcc')
cxx_cmd = which('x86_64-w64-mingw32-g++')
fc_cmd = which('x86_64-w64-mingw32-gfortran')
ar_cmd = which('x86_64-w64-mingw32-ar')
size_cmd = which('x86_64-w64-mingw32-size')
nsis_cmd = which('makensis')
lmp_size = 'smallbig'
print("""
Settings: building LAMMPS revision %s for %s-bit Windows
Message passing : %s
Multi-threading : %s
Home folder : %s
Source folder : %s
Build folder : %s
C compiler : %s
C++ compiler : %s
Fortran compiler : %s
Library archiver : %s
""" % (revflag,bitflag,parflag,thrflag,homedir,gitdir,builddir,cc_cmd,cxx_cmd,fc_cmd,ar_cmd))
# create/update git checkout
if not os.path.exists(gitdir):
txt = system("git clone https://github.com/lammps/lammps.git %s" % gitdir)
if verbose: print(txt)
os.chdir(gitdir)
txt = system("git fetch origin")
if verbose: print(txt)
txt = system("git checkout %s" % revflag)
if verbose: print(txt)
if revflag == "master" or revflag == "stable" or revflag == "unstable":
txt = system("git pull")
if verbose: print(txt)
# switch to build folder
os.chdir(builddir)
# download what is not automatically downloaded by CMake
print("Downloading third party tools")
url='http://download.lammps.org/thirdparty'
print("FFMpeg")
getexe("%s/ffmpeg-win%s.exe.gz" % (url,bitflag),"ffmpeg.exe")
print("gzip")
getexe("%s/gzip.exe.gz" % url,"gzip.exe")
if parflag == "mpi":
mpiflag = "on"
else:
mpiflag = "off"
if thrflag == "omp":
ompflag = "on"
else:
ompflag = "off"
print("Configuring build with CMake")
cmd = "mingw%s-cmake -G Ninja -D CMAKE_BUILD_TYPE=Release" % bitflag
cmd += " -D ADD_PKG_CONFIG_PATH=%s/mingw%s-pkgconfig" % (homedir,bitflag)
cmd += " -C %s/mingw%s-pkgconfig/addpkg.cmake" % (homedir,bitflag)
cmd += " -C %s/cmake/presets/mingw-cross.cmake %s/cmake" % (gitdir,gitdir)
cmd += " -DBUILD_SHARED_LIBS=on -DBUILD_MPI=%s -DBUILD_OPENMP=%s" % (mpiflag,ompflag)
cmd += " -DWITH_GZIP=on -DWITH_FFMPEG=on -DLAMMPS_EXCEPTIONS=on"
cmd += " -DINTEL_LRT_MODE=c++11 -DBUILD_LAMMPS_SHELL=on"
cmd += " -DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
if pythonflag: cmd += " -DPKG_PYTHON=yes"
print("Running: ",cmd)
txt = system(cmd)
if verbose: print(txt)
print("Compiling")
system("ninja")
print("Done")
print("Building PDF manual")
os.chdir(os.path.join(gitdir,"doc"))
txt = system("make pdf")
if verbose: print(txt)
shutil.move("Manual.pdf",os.path.join(builddir,"LAMMPS-Manual.pdf"))
print("Done")
# switch back to build folder and copy/process files for inclusion in installer
print("Collect and convert files for the Installer package")
os.chdir(builddir)
shutil.copytree(os.path.join(gitdir,"examples"),os.path.join(builddir,"examples"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"bench"),os.path.join(builddir,"bench"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"tools"),os.path.join(builddir,"tools"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"python","lammps"),os.path.join(builddir,"python","lammps"),symlinks=False)
shutil.copytree(os.path.join(gitdir,"potentials"),os.path.join(builddir,"potentials"),symlinks=False)
shutil.copy(os.path.join(gitdir,"README"),os.path.join(builddir,"README.txt"))
shutil.copy(os.path.join(gitdir,"LICENSE"),os.path.join(builddir,"LICENSE.txt"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","colvars-refman-lammps.pdf"),os.path.join(builddir,"Colvars-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"tools","createatoms","Manual.pdf"),os.path.join(builddir,"CreateAtoms-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","kspace.pdf"),os.path.join(builddir,"Kspace-Extra-Info.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","pair_gayberne_extra.pdf"),os.path.join(builddir,"PairGayBerne-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","pair_resquared_extra.pdf"),os.path.join(builddir,"PairReSquared-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_overview.pdf"),os.path.join(builddir,"PDLAMMPS-Overview.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_EPS.pdf"),os.path.join(builddir,"PDLAMMPS-EPS.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","PDLammps_VES.pdf"),os.path.join(builddir,"PDLAMMPS-VES.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","SPH_LAMMPS_userguide.pdf"),os.path.join(builddir,"SPH-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","SMD_LAMMPS_userguide.pdf"),os.path.join(builddir,"SMD-Manual.pdf"))
shutil.copy(os.path.join(gitdir,"doc","src","PDF","USER-CGDNA.pdf"),os.path.join(builddir,"CGDNA-Manual.pdf"))
# prune outdated inputs, too large files, or examples of packages we don't bundle
for d in ['accelerate','kim','mscg','USER/quip','USER/vtk']:
shutil.rmtree(os.path.join("examples",d),True)
for d in ['FERMI','KEPLER']:
shutil.rmtree(os.path.join("bench",d),True)
shutil.rmtree("tools/msi2lmp/test",True)
os.remove("potentials/C_10_10.mesocnt")
os.remove("potentials/TABTP_10_10.mesont")
os.remove("examples/USER/mesont/C_10_10.mesocnt")
os.remove("examples/USER/mesont/TABTP_10_10.mesont")
# convert text files to CR-LF conventions
txt = system("unix2dos LICENSE.txt README.txt tools/msi2lmp/README")
if verbose: print(txt)
txt = system("find bench examples potentials python tools/msi2lmp/frc_files -type f -print | xargs unix2dos")
if verbose: print(txt)
# mass rename README to README.txt
txt = system('for f in $(find tools bench examples potentials python -name README -print); do mv -v $f $f.txt; done')
if verbose: print(txt)
# mass rename in.<name> to in.<name>.lmp
txt = system('for f in $(find bench examples -name in.\* -print); do mv -v $f $f.lmp; done')
if verbose: print(txt)
print("Done")
print("Configuring and building installer")
os.chdir(builddir)
if pythonflag:
nsisfile = os.path.join(homedir,"installer","lammps-python.nsis")
elif adminflag:
nsisfile = os.path.join(homedir,"installer","lammps-admin.nsis")
else:
if msixflag:
nsisfile = os.path.join(homedir,"installer","lammps-msix.nsis")
else:
nsisfile = os.path.join(homedir,"installer","lammps-noadmin.nsis")
shutil.copy(nsisfile,os.path.join(builddir,"lammps.nsis"))
shutil.copy(os.path.join(homedir,"installer","FileAssociation.nsh"),os.path.join(builddir,"FileAssociation.nsh"))
shutil.copy(os.path.join(homedir,"installer","lammps.ico"),os.path.join(builddir,"lammps.ico"))
shutil.copy(os.path.join(homedir,"installer","lammps-text-logo-wide.bmp"),os.path.join(builddir,"lammps-text-logo-wide.bmp"))
shutil.copytree(os.path.join(homedir,"installer","envvar"),os.path.join(builddir,"envvar"),symlinks=False)
# define version flag of the installer:
# - use current timestamp, when pulling from master (for daily builds)
# - parse version from src/version.h when pulling from stable, unstable, or specific tag
# - otherwise use revflag, i.e. the commit hash
version = revflag
if revflag == 'stable' or revflag == 'unstable' or rev2.match(revflag):
with open(os.path.join(gitdir,"src","version.h"),'r') as v_file:
verexp = re.compile(r'^.*"(\w+) (\w+) (\w+)".*$')
vertxt = v_file.readline()
verseq = verexp.match(vertxt).groups()
version = "".join(verseq)
elif revflag == 'master':
version = time.strftime('%Y-%m-%d')
if bitflag == '32':
mingwdir = '/usr/i686-w64-mingw32/sys-root/mingw/bin/'
elif bitflag == '64':
mingwdir = '/usr/x86_64-w64-mingw32/sys-root/mingw/bin/'
if parflag == 'mpi':
txt = system("makensis -DMINGW=%s -DVERSION=%s-MPI -DBIT=%s -DLMPREV=%s lammps.nsis" % (mingwdir,version,bitflag,revflag))
if verbose: print(txt)
else:
txt = system("makensis -DMINGW=%s -DVERSION=%s -DBIT=%s -DLMPREV=%s lammps.nsis" % (mingwdir,version,bitflag,revflag))
if verbose: print(txt)
# clean up after successful build
os.chdir('..')
print("Cleaning up...")
shutil.rmtree(builddir,True)
print("Done.")
|
mit
| -3,950,353,748,060,748,000
| 38.21466
| 128
| 0.672029
| false
| 2.959889
| false
| false
| false
|
Kopachris/seshet
|
seshet/bot.py
|
1
|
18891
|
"""Implement SeshetBot as subclass of ircutils3.bot.SimpleBot."""
import logging
import os
from io import StringIO
from datetime import datetime
from ircutils3 import bot, client
from .utils import KVStore, Storage, IRCstr
class SeshetUser(object):
"""Represent one IRC user."""
def __init__(self, nick, user, host):
logging.debug("Building new SeshetUser, %s", nick)
self.nick = IRCstr(nick)
self.user = user
self.host = host
self.channels = []
def join(self, channel):
"""Add this user to the channel's user list and add the channel to this
user's list of joined channels.
"""
if channel not in self.channels:
channel.users.add(self.nick)
self.channels.append(channel)
def part(self, channel):
"""Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
"""
if channel in self.channels:
channel.users.remove(self.nick)
self.channels.remove(channel)
def quit(self):
"""Remove this user from all channels and reinitialize the user's list
of joined channels.
"""
for c in self.channels:
c.users.remove(self.nick)
self.channels = []
def change_nick(self, nick):
"""Update this user's nick in all joined channels."""
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick)
def __str__(self):
return "{}!{}@{}".format(self.nick, self.user, self.host)
def __repr__(self):
temp = "<SeshetUser {}!{}@{} in channels {}>"
return temp.format(self.nick, self.user, self.host, self.channels)
class SeshetChannel(object):
"""Represent one IRC channel."""
def __init__(self, name, users, log_size=100):
self.name = IRCstr(name)
self.users = users
self.message_log = []
self._log_size = log_size
def log_message(self, user, message):
"""Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
"""
if isinstance(user, SeshetUser):
user = user.nick
elif not isinstance(user, IRCstr):
user = IRCstr(user)
time = datetime.utcnow()
self.message_log.append((time, user, message))
while len(self.message_log) > self._log_size:
del self.message_log[0]
def __str__(self):
return str(self.name)
def __repr__(self):
temp = "<SeshetChannel {} with {} users>"
return temp.format(self.name, len(self.users))
class SeshetBot(bot.SimpleBot):
"""Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
"""
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
"""Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
"""
# initialize debug logging
if debug_file is None:
logging.basicConfig(level=verbosity)
else:
logging.basicConfig(filename=os.path.expanduser(debug_file),
level=verbosity
)
logging.debug("Running `SimpleBot.__init__`...")
bot.SimpleBot.__init__(self, nick, auto_handle=False)
# define defaults
self.session = Storage()
self.log_file = 'seshet.log'
self.log_formats = {}
self.locale = {}
self.channels = {}
self.users = {}
if db is None:
# no database connection, only log to file and run
# core command modules
logging.info("No db, IRC logging will be done to file")
self.log = self._log_to_file
self.run_modules = self._run_only_core
# dummy KV store since no db
self.storage = Storage()
else:
logging.info("Using database %s", db)
self.db = db
self.storage = KVStore(db)
# Add default handlers
logging.debug("Adding default handlers...")
self.events["any"].add_handler(client._update_client_info)
self.events["ctcp_version"].add_handler(client._reply_to_ctcp_version)
self.events["name_reply"].add_handler(_add_channel_names)
def log(self, etype, source, msg='', target='', hostmask='', params=''):
"""Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
"""
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
def run_modules(self, e):
# grab local pointer to self.db for faster lookup
db = self.db
# get initial list of modules handling this event type
event_types = db.modules.event_types
mod_enabled = db.modules.enabled
init_mods = db(event_types.contains(e.command) & mod_enabled).select()
logging.debug(("Running modules for {} command. "
"Initial module list:\n{}").format(e.command, init_mods)
)
if e.command in ('PRIVMSG', 'CTCP_ACTION', 'NOTICE'):
# narrow down list of modules to run based on event parameters
# lowercase for non-caps comparisons
m_low = e.message.lower()
bot_n = self.nickname.lower()
bot_u = self.user.lower()
bot_r = self.real_name.lower()
# indicates whether or not name has already been stripped from
# original message
for_us = False
if e.target.startswith('#'):
chan_msg = True
chan_nicks = self.channels[e.target].users
else:
chan_msg = False
fin_mods = list() # final list of modules to run
for mod in init_mods:
if e.source in mod.whitelist:
fin_mods.append(mod)
elif e.source in mod.blacklist:
pass
if self.nickname in mod.enicks:
if e.target == self.nickname or for_us:
fin_mods.append(mod)
elif m_low.startswith(bot_n):
# strip nickname from original message so modules can
# process it correctly
e.message = e.message[len(bot_n):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_u):
e.message = e.message[len(bot_u):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_r):
e.message = e.message[len(bot_r):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
if chan_msg:
if e.target in mod.dchannels:
pass
elif set(mod.dnicks) & chan_nicks:
pass
elif e.target in mod.echannels:
fin_mods.append(mod)
elif set(mod.enicks) & chan_nicks:
fin_mods.append(mod)
argv = m_low.split()
for mod in fin_mods:
# run each module
m = __import__(mod.name) # TODO: use importlib
# TODO: add authentication and rate limiting
for cmd, fun in m.commands.items():
if (mod.cmd_prefix + cmd) == argv[0]:
fun(self, e)
break
def get_unique_users(self, chan):
"""Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
"""
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
def on_message(self, e):
self.log('privmsg',
source=e.source,
msg=e.message,
target=e.target,
)
if e.target in self.channels:
# TODO: move this to self.log() so we don't have to get time twice?
self.channels[e.target].log_message(e.source, e.message)
self.run_modules(e)
def on_join(self, e):
self.log('join',
source=e.source,
target=e.target,
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
if e.source != self.nickname:
if nick not in self.users:
self.users[nick] = SeshetUser(nick, e.user, e.host)
self.users[nick].join(self.channels[chan])
self.run_modules(e)
def on_part(self, e):
self.log('part',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params[1:]),
target=e.target,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_quit(self, e):
nick = IRCstr(e.source)
for chan in self.channels.values():
if nick in chan.users:
self.log('quit',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params),
target=chan.name,
)
self.users[nick].quit()
del self.users[nick]
def on_disconnect(self, e):
pass
def on_kick(self, e):
self.log('kick',
source=e.source,
target=e.target,
params=e.params[0],
msg=' '.join(e.params[1:]),
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_nick_change(self, e):
new_nick = IRCstr(e.target)
old_nick = IRCstr(e.source)
for chan in self.channels.values():
if e.source in chan.user_list:
self.log('nick',
source=e.source,
hostmask=e.user+'@'+e.host,
target=chan.name,
params=e.target,
)
self.users[old_nick].change_nick(new_nick)
self.users[new_nick] = self.users[old_nick]
del self.users[old_nick]
def on_ctcp_action(self, e):
self.log('action',
source=e.source,
target=e.target,
msg=' '.join(e.params),
)
def on_welcome(self, e):
pass
def on_mode(self, e):
self.log('mode',
source=e.source,
msg=' '.join(e.params),
target=e.target,
)
def before_poll(self):
"""Called each loop before polling sockets for I/O."""
pass
def after_poll(self):
"""Called each loop after polling sockets for I/O and
handling any queued events.
"""
pass
def connect(self, *args, **kwargs):
"""Extend `client.SimpleClient.connect()` with defaults"""
defaults = {}
for i, k in enumerate(('host', 'port', 'channel', 'use_ssl', 'password')):
if i < len(args):
defaults[k] = args[i]
elif k in kwargs:
defaults[k] = kwargs[k]
else:
def_k = 'default_' + k
defaults[k] = getattr(self, def_k, None)
if defaults['use_ssl'] is None:
defaults['use_ssl'] = False
if defaults['host'] is None:
raise TypeError("missing 1 required positional argument: 'host'")
logging.info("Connecting to %s:%s and joining channels %s",
defaults['host'],
defaults['port'],
defaults['channel'],
)
client.SimpleClient.connect(self, **defaults)
def start(self):
logging.debug("Beginning poll loop")
self._loop(self.conn._map)
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
"""Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
"""
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
# else do nothing
def _run_only_core(self, *args, **kwargs):
"""Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
"""
pass
def _loop(self, map):
"""The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
"""
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
def _add_channel_names(client, e):
"""Add a new channel to self.channels and initialize its user list.
Called as event handler for RPL_NAMES events. Do not call directly.
"""
chan = IRCstr(e.channel)
names = set([IRCstr(n) for n in e.name_list])
client.channels[chan] = SeshetChannel(chan, names)
|
bsd-3-clause
| 1,934,334,764,152,155,100
| 34.984762
| 85
| 0.505214
| false
| 4.44285
| false
| false
| false
|
Naeka/vosae-app
|
www/organizer/api/resources/event.py
|
1
|
10316
|
# -*- coding:Utf-8 -*-
from django.conf.urls import url
from django.core.exceptions import ObjectDoesNotExist
from tastypie import fields as base_fields, http
from tastypie.utils import trailing_slash
from tastypie.validation import Validation
from tastypie_mongoengine import fields
from dateutil.parser import parse
from core.api.utils import TenantResource
from organizer.models import VosaeEvent, DATERANGE_FILTERS
from organizer.api.doc import HELP_TEXT
__all__ = (
'VosaeEventResource',
)
class EventValidation(Validation):
def is_valid(self, bundle, request=None):
from django.utils.timezone import is_naive
errors = {}
for field in ['start', 'end']:
data = bundle.data.get(field)
if not data.get('date', None) and not data.get('datetime', None):
errors['__all__'] = ["One of 'date' and 'datetime' must be set."]
elif data.get('date', None) and data.get('datetime', None):
errors['__all__'] = ["Only one of 'date' and 'datetime' must be set. The 'date' field is used for all-day events."]
elif data.get('datetime', None) and is_naive(parse(data.get('datetime'))) and not data.get('timezone', None):
errors['datetime'] = ["A timezone offset is required if not specified in the 'timezone' field"]
return errors
class VosaeEventResource(TenantResource):
status = base_fields.CharField(
attribute='status',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['status']
)
created_at = base_fields.DateTimeField(
attribute='created_at',
readonly=True,
help_text=HELP_TEXT['vosae_event']['created_at']
)
updated_at = base_fields.DateTimeField(
attribute='updated_at',
readonly=True,
help_text=HELP_TEXT['vosae_event']['updated_at']
)
summary = base_fields.CharField(
attribute='summary',
help_text=HELP_TEXT['vosae_event']['summary']
)
description = base_fields.CharField(
attribute='description',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['description']
)
location = base_fields.CharField(
attribute='location',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['location']
)
color = base_fields.CharField(
attribute='color',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['color']
)
start = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.EventDateTimeResource',
attribute='start',
help_text=HELP_TEXT['vosae_event']['start']
)
end = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.EventDateTimeResource',
attribute='end',
help_text=HELP_TEXT['vosae_event']['end']
)
recurrence = base_fields.CharField(
attribute='recurrence',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['recurrence']
)
original_start = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.EventDateTimeResource',
attribute='original_start',
readonly=True,
help_text=HELP_TEXT['vosae_event']['original_start']
)
instance_id = base_fields.CharField(
attribute='instance_id',
readonly=True,
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['instance_id']
)
transparency = base_fields.CharField(
attribute='transparency',
null=True,
blank=True,
help_text=HELP_TEXT['vosae_event']['transparency']
)
calendar = fields.ReferenceField(
to='organizer.api.resources.VosaeCalendarResource',
attribute='calendar',
help_text=HELP_TEXT['vosae_event']['calendar']
)
creator = fields.ReferenceField(
to='core.api.resources.VosaeUserResource',
attribute='creator',
readonly=True,
help_text=HELP_TEXT['vosae_event']['creator']
)
organizer = fields.ReferenceField(
to='core.api.resources.VosaeUserResource',
attribute='organizer',
readonly=True,
help_text=HELP_TEXT['vosae_event']['organizer']
)
attendees = fields.EmbeddedListField(
of='organizer.api.resources.AttendeeResource',
attribute='attendees',
null=True,
blank=True,
full=True,
help_text=HELP_TEXT['vosae_event']['attendees']
)
reminders = fields.EmbeddedDocumentField(
embedded='organizer.api.resources.ReminderSettingsResource',
attribute='reminders',
blank=True,
help_text=HELP_TEXT['vosae_event']['reminders']
)
class Meta(TenantResource.Meta):
resource_name = 'vosae_event'
queryset = VosaeEvent.objects.all()
excludes = ('tenant', 'occurrences', 'next_reminder', 'ical_uid', 'ical_data')
filtering = {
'start': ('exact', 'gt', 'gte'),
'end': ('exact', 'lt', 'lte'),
'calendar': ('exact')
}
validation = EventValidation()
def prepend_urls(self):
"""Add urls for resources actions."""
urls = super(VosaeEventResource, self).prepend_urls()
urls.extend((
url(r'^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/instances%s$' % (self._meta.resource_name, trailing_slash()), self.wrap_view('event_instances'), name='api_vosae_event_instances'),
))
return urls
def build_filters(self, filters=None):
qs_filters = super(VosaeEventResource, self).build_filters(filters)
for filter_name, filter_value in qs_filters.iteritems():
if filter_name.endswith('__exact'):
new_name = filter_name[:filter_name.index('__exact')]
qs_filters[new_name] = filter_value
del qs_filters[filter_name]
filter_name = new_name
if filter_name in DATERANGE_FILTERS:
if isinstance(filter_value, basestring):
qs_filters[filter_name] = parse(filter_value)
return qs_filters
def get_object_list(self, request):
"""Filters events based on calendar accesses (extracted from request user)"""
from organizer.models import VosaeCalendar
object_list = super(VosaeEventResource, self).get_object_list(request)
principals = [request.vosae_user] + request.vosae_user.groups
calendars = VosaeCalendar.objects.filter(acl__read_list__in=principals, acl__negate_list__nin=principals)
return object_list.filter(calendar__in=list(calendars))
def apply_filters(self, request, applicable_filters):
object_list = super(VosaeEventResource, self).apply_filters(request, applicable_filters)
filters = request.GET
if 'single_events' in filters and filters['single_events'] in ['true', 'True', True]:
start = None
end = None
for filter_name, filter_value in filters.iteritems():
try:
if filter_name.startswith('start'):
start = parse(filter_value)
elif filter_name.startswith('end'):
end = parse(filter_value)
except:
pass
return object_list.with_instances(start, end)
return object_list
def event_instances(self, request, **kwargs):
"""List all instances of the event"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
try:
bundle = self.build_bundle(request=request)
objects = self.obj_get_list(bundle, **self.remove_api_resource_names(kwargs)).with_instances()
except ObjectDoesNotExist:
return http.HttpNotFound()
if objects.count() < 2:
return http.HttpNotFound()
sorted_objects = self.apply_sorting(objects, options=request.GET)
first_objects_bundle = self.build_bundle(obj=objects[0], request=request)
instances_resource_uri = '%sinstances/' % self.get_resource_uri(first_objects_bundle)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=instances_resource_uri, limit=self._meta.limit)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = [self.build_bundle(obj=obj, request=request) for obj in to_be_serialized['objects']]
to_be_serialized['objects'] = [self.full_dehydrate(b) for b in bundles]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def full_hydrate(self, bundle):
"""Set event's creator and organizer"""
bundle = super(VosaeEventResource, self).full_hydrate(bundle)
bundle.obj.creator = bundle.request.vosae_user
# Organizer should be the user owner of the calendar
try:
organizer = bundle.obj.calendar.acl.get_owner()
except:
organizer = bundle.request.vosae_user
bundle.obj.organizer = organizer
return bundle
def full_dehydrate(self, bundle, for_list=False):
bundle = super(VosaeEventResource, self).full_dehydrate(bundle, for_list=for_list)
if not bundle.data['instance_id']:
del bundle.data['instance_id']
return bundle
def dehydrate(self, bundle):
"""Dehydrates the appropriate CalendarList which differs according to user (extracted from request)"""
from organizer.models import CalendarList
from organizer.api.resources import CalendarListResource
bundle = super(VosaeEventResource, self).dehydrate(bundle)
calendar_list = CalendarList.objects.get(calendar=bundle.obj.calendar, vosae_user=bundle.request.vosae_user)
calendar_list_resource = CalendarListResource()
calendar_list_resource_bundle = calendar_list_resource.build_bundle(obj=calendar_list, request=bundle.request)
bundle.data['calendar_list'] = calendar_list_resource.get_resource_uri(calendar_list_resource_bundle)
return bundle
|
agpl-3.0
| -4,812,843,896,607,923,000
| 38.830116
| 190
| 0.630186
| false
| 3.926913
| false
| false
| false
|
m-tmatma/svnmailer
|
src/lib/svnmailer/settings.py
|
1
|
19703
|
# -*- coding: utf-8 -*-
#
# Copyright 2004-2006 André Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Runtime settings for the svnmailer
==================================
This module defines one public class, called L{Settings}. This class is the
storage container for all settings used by the svnmailer. L{Settings} is an
abstract class. There is just one method that must be implemented --
L{Settings.init}. This method is responsible for filling the container
properly. An implementor of the L{Settings} class can be found in the
L{svnmailer.config} module.
This module further defines the Settings subcontainers
L{GroupSettingsContainer}, L{GeneralSettingsContainer} and
L{RuntimeSettingsContainer}, but you should not instantiate them directly --
L{Settings} provides methods that return instances of these containers.
"""
__author__ = "André Malo"
__docformat__ = "epytext en"
__all__ = ['Settings', 'modes']
# global imports
from svnmailer import typedstruct, struct_accessors
class _Tokens(object):
""" Generic token container
@ivar valid_tokens: The valid mode tokens (str, str, ...)
@type valid_tokens: C{tuple}
"""
valid_tokens = ()
def __init__(self, *args):
""" Initialization """
self.valid_tokens = args
for token in self.valid_tokens:
setattr(self, token.encode('us-ascii'), token)
modes = _Tokens('commit', 'propchange', 'lock', 'unlock')
xpath = _Tokens(u'yes', u'no', u'ignore')
showenc = _Tokens(u'yes', u'no', u'nondefault')
def groupMembers(space):
""" Define the members of the group settings
The following members are defined:
- C{_name}: Name of the group
- C{_def_for_repos}: default for_repos regex
- C{_def_for_paths}: default for_paths regex
- C{for_repos}: Repository regex
- C{for_paths}: Path regex (inside the repos)
- C{exclude_paths}: Exclude path regex to prevent for_paths from
being applied
- C{ignore_if_other_matches}: this group will be ignored if there
are any other groups selected for a particular path
- C{show_nonmatching_paths}: How to deal with paths that are not
matched by the group
- C{commit_subject_template}: Subject template for commit mail
- C{propchange_subject_template}: Subject template for revpropchanges
- C{lock_subject_template}: Subject template for locks
- C{unlock_subject_template}: Subject template for unlocks
- C{commit_subject_prefix}: Subject prefix for commit mail
- C{propchange_subject_prefix}: Subject prefix for revpropchanges
- C{lock_subject_prefix}: Subject prefix for locks
- C{unlock_subject_prefix}: Subject prefix for unlocks
- C{max_subject_length}: Maximum subject length
- C{from_addr}: C{From:} address format string
- C{to_addr}: C{To:} address format string
- C{to_fake}: C{To:} non-address format string
- C{bcc_addr}: C{Bcc:} address format string
- C{reply_to_addr}: C{Reply-To:} address format string
- C{diff_command}: The diff command to use
- C{generate_diffs}: List of actions for which diffs are generated
- C{browser_base_url}: type and format string of the repository
browser base url
- C{custom_header}: custom header name and format template
- C{to_newsgroup}: The newsgroup where the notification should be
posted to
- C{long_news_action}: The action to take on huge commit postings
- C{long_mail_action}: The action to take on huge commit mails
- C{mail_transfer_encoding}: Content-Transfer-Encoding for mails
- C{news_transfer_encoding}: Content-Transfer-Encoding for news
- C{mail_type}: The mail construction type
- C{extract_x509_author}: Treat author as x509 subject and try to
extract author's real name and email address
- C{cia_project_name}: The project name used for CIA notifications
- C{cia_project_module}: The project module used for CIA
notifications
- C{cia_project_branch}: The project branch used for CIA
notifications
- C{cia_project_submodule}: The project submodule used for CIA
notifications
- C{cia_project_path}: The project path, which will be stripped from
the absolute node path
- C{apply_charset_property}: Should svnmailer:content-charset
properties be recognized?
- C{show_applied_charset}: Show the encoding of the files in the
diff?
- C{viewcvs_base_url}: (I{deprecated}, use C{browser_base_url}
instead) format string for the viewcvs URL
@param space: The namespace to pollute
@type space: C{dict}
@return: The members definition
@rtype: C{dict}
"""
args = {
'space' : space,
'typemap' : struct_accessors.typemap,
'the_members': {
'_name' : 'unicode',
'_def_for_repos' : 'regex',
'_def_for_paths' : 'regex',
'for_repos' : ('regex', {'map': True}),
'for_paths' : ('regex', {'map': True}),
'exclude_paths' : ('regex', {'map': True}),
'ignore_if_other_matches' : 'humanbool',
'show_nonmatching_paths' : ('token',
{'map': True,
'allowed': xpath.valid_tokens}),
'commit_subject_template' : ('unicode', {'map': True}),
'propchange_subject_template': ('unicode', {'map': True}),
'lock_subject_template' : ('unicode', {'map': True}),
'unlock_subject_template' : ('unicode', {'map': True}),
'commit_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'propchange_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'lock_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'unlock_subject_prefix' : ('unicode',
{'subst': True, 'map': True}),
'max_subject_length' : 'int',
'from_addr' : ('tokenlist',
{'subst': True, 'map': True}),
'to_addr' : ('tokenlist',
{'subst': True, 'map': True}),
'to_fake' : ('unicode',
{'subst': True, 'map': True}),
'bcc_addr' : ('tokenlist',
{'subst': True, 'map': True}),
'reply_to_addr' : ('unicode',
{'subst': True, 'map': True}),
'to_newsgroup' : ('tokenlist',
{'subst': True, 'map': True}),
'diff_command' : ('unicommand', {'map': True}),
'generate_diffs' : 'tokenlist',
'browser_base_url' : ('unicode',
{'subst': True, 'map': True}),
'long_mail_action' : ('mailaction', {'map': True}),
'long_news_action' : ('mailaction', {'map': True}),
'mail_type' : ('unicode', {'map': True}),
'mail_transfer_encoding' : 'unicode',
'news_transfer_encoding' : 'unicode',
'custom_header' : ('unicode',
{'subst': True, 'map': True}),
'extract_x509_author' : 'humanbool',
'cia_rpc_server' : ('unicode', {'map': True}),
'cia_project_name' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_module' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_branch' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_submodule' : ('unicode',
{'subst': True, 'map': True}),
'cia_project_path' : ('unicode',
{'subst': True, 'map': True}),
'apply_charset_property' : 'humanbool',
'show_applied_charset' : ('token',
{'allowed': showenc.valid_tokens}),
# deprecated
'viewcvs_base_url' : ('unicode',
{'subst': True, 'map': True}),
},
'aliases': {
'suppress_if_match' : 'ignore_if_other_matches',
'fallback' : 'ignore_if_other_matches',
'reply_to' : 'reply_to_addr',
'x509_author' : 'extract_x509_author',
'charset_property' : 'apply_charset_property',
'truncate_subject' : 'max_subject_length',
'subject_length' : 'max_subject_length',
'diff' : 'diff_command',
'nonmatching_paths' : 'show_nonmatching_paths',
'nongroup_paths' : 'show_nonmatching_paths',
'show_nongroup_paths': 'show_nonmatching_paths',
},
}
return typedstruct.members(**args)
def generalMembers(space):
""" Define the members of the general settings
The following members are defined:
- C{diff_command}: The diff command
- C{sendmail_command}: The sendmail compatible command
- C{ssl_mode}: ssl mode
- C{smtp_host}: The smtp host (C{host[:port]})
- C{smtp_user}: The smtp auth. user
- C{smtp_pass}: The smtp auth. password
- C{debug_all_mails_to}: All mails are sent to these addresses
(for debugging purposes)
- C{cia_rpc_server}: The XML-RPC server running the CIA tracker
- C{tempdir}: The directory to use for temporary files
@param space: The namespace to pollute
@type space: C{dict}
@return: The members definition
@rtype: C{dict}
"""
args = {
'space' : space,
'typemap' : struct_accessors.typemap,
'the_members': {
'sendmail_command' : ('unicommand', {'map': True}),
'ssl_mode' : ('unicode', {'map': True}),
'smtp_host' : ('unicode', {'map': True}),
'smtp_user' : ('quotedstr', {'map': True}),
'smtp_pass' : ('quotedstr', {'map': True}),
'nntp_host' : ('unicode', {'map': True}),
'nntp_user' : ('quotedstr', {'map': True}),
'nntp_pass' : ('quotedstr', {'map': True}),
'debug_all_mails_to': ('tokenlist', {'map': True}),
'tempdir' : ('filename', {'map': True}),
# deprecated
'cia_rpc_server' : ('unicode', {'map': True}),
'diff_command' : ('unicommand', {'map': True}),
},
'aliases' : {
'mail_command' : 'sendmail_command',
'smtp_hostname': 'smtp_host',
'diff' : 'diff_command',
},
}
return typedstruct.members(**args)
def runtimeMembers(space):
""" Define the members of the runtime settings
The following members are defined:
- C{_repos}: The repository object
- C{stdin}: The stdin, read once
- C{path_encoding}: The path-encoding parameter
- C{debug}: debug mode (True/False)
- C{revision}: committed revision number
- C{repository}: path to the repository
- C{config}: supplied config file name
- C{mode}: running mode (see L{modes})
- C{author}: Author of the commit or revpropchange
- C{propname}: Property changed (in revpropchange)
- C{action}: The revprop action (M, A, D)
@param space: The namespace to pollute
@type space: C{dict}
@return: The members definition
@rtype: C{dict}
"""
args = {
'space' : space,
'typemap' : struct_accessors.typemap,
'the_members': {
'_repos' : None, # internal usage (Repository object)
'stdin' : 'stdin',
'path_encoding': 'string',
'debug' : 'bool',
'revision' : 'int',
'repository' : 'filename',
'config' : 'filename',
'mode' : 'string',
'author' : 'unicode',
'propname' : 'unicode',
'action' : 'unicode', # >= svn 1.2
},
'aliases' : None,
}
return typedstruct.members(**args)
class GroupSettingsContainer(typedstruct.Struct):
""" Container for group settings
@see: L{groupMembers} for the actual member list
"""
__slots__ = groupMembers(locals())
def _compare(self, other):
""" compare some of the attributes
@note: It uses a list of attributes that are compared if two
of these types are tested for equality. Keep in mind that
this comparision takes place, when the decision is made
whether a mail for more than one group should be sent more
than once (if the groups are not equal). All attributes, but
the ones returned by L{_getIgnorableMembers} are compared.
@see: L{_getIgnorableMembers}
@param other: The object compared to
@type other: C{GroupSettingsContainer}
@return: Are the objects equal?
@rtype: C{bool}
"""
if type(self) != type(other):
return False
attrs = [name for name in self._members_
if name not in self._getIgnorableMembers()
]
for name in attrs:
if getattr(self, name) != getattr(other, name):
return False
return True
def _getIgnorableMembers(self):
""" Returns the list of member names that be ignored in comparisons
This method called by L{_compare}. Override this method to modify
the list.
@return: The list
@rtype: C{list}
"""
return [
'_name', '_def_for_repos', '_def_for_paths',
'for_repos', 'for_paths', 'exclude_paths',
'ignore_if_other_matches', 'to_addr', 'from_addr',
'to_newsgroup', 'custom_header', 'cia_rpc_server',
'cia_project_name', 'cia_project_module', 'cia_project_branch',
'cia_project_submodule', 'cia_project_path',
]
class GeneralSettingsContainer(typedstruct.Struct):
""" Container for general settings
@see: L{generalMembers} for the actual member list
"""
__slots__ = generalMembers(locals())
class RuntimeSettingsContainer(typedstruct.Struct):
""" Container for runtime settings
@see: L{runtimeMembers} for the actual member list
"""
__slots__ = runtimeMembers(locals())
class Settings(object):
""" Settings management
@note: The C{init} method must be overridden to do the actual
initialization.
@ivar groups: group settings list
@type groups: C{list} of C{GroupSettingsContainer}
@ivar general: General settings
@type general: C{GeneralSettingsContainer}
@ivar runtime: Runtime settigs
@type runtime: C{RuntimeSettingsContainer}
@ivar debug: Debug state
@type debug: C{bool}
@ivar _charset_: The charset used for settings recoding
@type _charset_: C{str}
@ivar _maps_: The value mappers to use or C{None}
@type _maps_: C{dict}
"""
def __init__(self, *args, **kwargs):
""" Constructor
Don't override this one. Override C{init()} instead.
"""
# supply default values
self._charset_ = 'us-ascii'
self._fcharset_ = None
self._maps_ = None
self.groups = []
self.general = None
self.runtime = None
# parameter initialization
self.init(*args, **kwargs)
# sanity check
self._checkInitialization()
def _checkInitialization(self):
""" Checks if all containers are filled """
if not(self.general and self.runtime and self.groups):
raise RuntimeError("Settings are not completely initialized")
def init(self, *args, **kwargs):
""" Abstract initialization method """
raise NotImplementedError()
def _getArgs(self):
""" Returns the basic arguments for container initialization
@return: The args
@rtype: C{list}
"""
return [
self._maps_,
{'encoding': self._charset_, 'path_encoding': self._fcharset_}
]
def getGroupContainer(self, **kwargs):
""" Returns an initialized group settings container
@return: The container object
@rtype: C{GroupSettingsContainer}
"""
return GroupSettingsContainer(*self._getArgs(), **kwargs)
def getDefaultGroupContainer(self, **kwargs):
""" Returns an initialized default group settings container
@return: The container object
@rtype: C{GroupSettingsContainer}
"""
args = self._getArgs()
args[0] = None # no maps
return GroupSettingsContainer(*args, **kwargs)
def getGeneralContainer(self, **kwargs):
""" Returns an initialized general settings container
@return: The container object
@rtype: C{GeneralSettingsContainer}
"""
return GeneralSettingsContainer(*self._getArgs(), **kwargs)
def getRuntimeContainer(self, **kwargs):
""" Returns an initialized runtime settings container
Note that the runtime settings (from commandline)
are always assumed to be utf-8 encoded.
@return: The container object
@rtype: C{RuntimeSettingsContainer}
"""
args = self._getArgs()
args[0] = None
args[1]["encoding"] = "utf-8"
return RuntimeSettingsContainer(*args, **kwargs)
|
apache-2.0
| 704,259,331,109,030,700
| 39.206122
| 81
| 0.526115
| false
| 4.425202
| false
| false
| false
|
polyaxon/polyaxon-api
|
polyaxon_lib/estimators/hooks/general_hooks.py
|
1
|
2631
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from polyaxon_lib.estimators.hooks.utils import can_run_hook
class GlobalStepWaiterHook(basic_session_run_hooks.GlobalStepWaiterHook):
"""Delay execution until global step reaches to wait_until_step.
(A mirror to tensorflow.python.training.basic_session_run_hooks GlobalStepWaiterHook.)
This hook delays execution until global step reaches to `wait_until_step`. It
is used to gradually start workers in distributed settings. One example usage
would be setting `wait_until_step=int(K*log(task_id+1))` assuming that
task_id=0 is the chief.
Args:
wait_until_step: an `int` shows until which global step should we wait.
"""
pass
class FinalOpsHook(basic_session_run_hooks.FinalOpsHook):
"""A run hook which evaluates `Tensors` at the end of a session.
(A mirror to tensorflow.python.training.basic_session_run_hooks GlobalStepWaiterHook.)
Args:
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when running `final_ops_dict`.
"""
pass
class StopAfterNEvalsHook(evaluation._StopAfterNEvalsHook): # pylint: disable=protected-access
"""Run hook used by the evaluation routines to run the `eval_ops` N times."""
pass
class NanTensorHook(basic_session_run_hooks.NanTensorHook):
"""NaN Loss monitor.
A modified version of tensorflow.python.training.basic_session_run_hooks NanTensorHook.
Checks the context for `no_run_hooks_op` before calling the the hook.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
def before_run(self, run_context): # pylint: disable=unused-argument
if can_run_hook(run_context):
return super(NanTensorHook, self).before_run(run_context)
return None
def after_run(self, run_context, run_values):
if can_run_hook(run_context):
return super(NanTensorHook, self).after_run(run_context, run_values)
GENERAL_HOOKS = OrderedDict([
('GlobalStepWaiterHook', GlobalStepWaiterHook),
('FinalOpsHook', FinalOpsHook),
('StopAfterNEvalsHook', StopAfterNEvalsHook),
('NanTensorHook', NanTensorHook)
])
|
mit
| -773,701,700,634,766,200
| 34.08
| 96
| 0.717598
| false
| 3.774749
| false
| false
| false
|
ShovanSarker/mango_office
|
actions/views.py
|
1
|
15639
|
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from users.models import AllUsers, ACL
from status.models import Status
from task.models import Task
import datetime
from attendance.models import AttendanceInOffice, AttendanceInHome
from django.contrib.auth.models import User
# Create your views here.
@csrf_exempt
def login_page(request):
return render(request, 'login.html')
@csrf_exempt
def login_auth(request):
post_data = request.POST
print(post_data)
if 'username' and 'password' in post_data:
print(post_data['username'])
print(post_data['password'])
user = authenticate(username=post_data['username'], password=post_data['password'])
if user is not None:
if user.is_active:
login(request, user)
request.session['user'] = post_data['username']
if user.is_superuser:
res = redirect('/admin')
else:
res = redirect('/')
else:
res = render(request, 'login.html',
{'wrong': True,
'text': 'The password is valid, but the account has been disabled!'})
else:
res = render(request, 'login.html',
{'wrong': True,
'text': 'The username and password you have entered is not correct. Please retry'})
else:
res = render(request, 'login.html', {'wrong': False})
res['Access-Control-Allow-Origin'] = "*"
res['Access-Control-Allow-Headers'] = "Origin, X-Requested-With, Content-Type, Accept"
res['Access-Control-Allow-Methods'] = "PUT, GET, POST, DELETE, OPTIONS"
return res
def logout_now(request):
logout(request)
return redirect('/login')
@login_required(login_url='/login/')
def home(request):
page_title = 'Home'
user = request.session['user']
if not AllUsers.objects.exists():
print(request.session['user'])
new_status = Status.objects.get(StatusKey='office')
new_user = AllUsers(username=user, Name=user, Email=user + '@inflack.com', Status=new_status)
new_user.save()
new_user_acl = ACL(user=new_user,
CanSeeOthersTaskList=True,
CanSeeOthersAttendance=True,
CanAddMoreEmployee=True,
CanSeeOthersDetails=True,
CanSeeOthersStatus=True)
new_user_acl.save()
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
if this_user.Active:
all_status = Status.objects.all()
display = render(request, 'client_dashboard.html', {'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'all_status': all_status,
'page_title': page_title})
else:
logout(request)
display = render(request, 'login.html',
{'wrong': True,
'text': 'You are not authorized to login. Please contact administrator for more details'})
else:
logout(request)
display = render(request, 'login.html',
{'wrong': True,
'text': 'Something went wrong. Please LOGIN again.'})
return display
@login_required(login_url='/login/')
def add_employee(request):
user = request.session['user']
post_data = request.POST
this_user = AllUsers.objects.get(username__exact=user)
# login_user = this_user.Name
# print(post_data['super-admin'])
if 'username' in post_data and 'csrfmiddlewaretoken' in post_data:
if AllUsers.objects.filter(username__exact=user).exists():
if this_user.Active and this_user.acl.CanAddMoreEmployee:
if AllUsers.objects.filter(username__exact=post_data['username']).exists() or \
post_data['username'] == 'admin':
# This username is already taken
print(post_data)
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'wrong': True,
'text': 'This USERNAME is already taken.'
'Please try with a different one'})
else:
if post_data['password'] == post_data['re-password']:
# password matches
print(post_data)
new_status = Status.objects.get(StatusKey='office')
new_user = AllUsers(username=post_data['username'],
Name=post_data['name'],
Designation=post_data['designation'],
Phone=post_data['phone'],
Email=post_data['email'],
Status=new_status)
new_user.save()
new_user_acl = ACL(user=new_user)
new_user_acl.save()
new_user_login = User.objects.create_user(post_data['username'],
post_data['email'],
post_data['password'])
new_user_login.save()
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'success': True,
'text': 'New employee has been '
'added successfully.'})
else:
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee,
'wrong': True,
'text': 'The passwords do not match.'
'Please try again'})
else:
logout(request)
display = render(request, 'login.html',
{'wrong': True,
'text': 'You are not authorized to login.'
' Please contact administrator for more details'})
else:
display = redirect('/')
else:
if this_user.acl.CanAddMoreEmployee:
display = render(request, 'add_admin.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = render(request, 'access_denied.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
return display
@login_required(login_url='/login/')
def change_status(request):
user = request.session['user']
get_data = request.GET
if AllUsers.objects.filter(username__exact=user).exists():
new_status = Status.objects.get(StatusKey=get_data['to'])
this_user = AllUsers.objects.get(username__exact=user)
current_status = this_user.Status
print(current_status.StatusKey)
print(get_data['to'])
if ((get_data['to'] == 'office' or get_data['to'] == 'away' or
get_data['to'] == 'meeting' or get_data['to'] == 'out') and current_status.StatusKey != 'home') or \
get_data['to'] == 'home' and current_status.StatusKey == 'out' or \
get_data['to'] == 'out' and current_status.StatusKey == 'home':
if (get_data['to'] == 'office' or get_data['to'] == 'away' or get_data['to'] == 'meeting') \
and current_status.StatusKey == 'out':
new_office_attendance = AttendanceInOffice(User=this_user)
new_office_attendance.save()
elif get_data['to'] == 'home'and current_status.StatusKey == 'out':
new_home_attendance = AttendanceInHome(User=this_user)
new_home_attendance.save()
elif get_data['to'] == 'out'and current_status.StatusKey == 'home':
new_home_attendance = AttendanceInHome.objects.get(User=this_user, ExitTime=None)
print(datetime.datetime.now())
new_home_attendance.ExitTime = datetime.datetime.now()
new_home_attendance.save()
elif get_data['to'] == 'out'and (current_status.StatusKey == 'office' or
current_status.StatusKey == 'away' or
current_status.StatusKey == 'meeting'):
new_office_attendance = AttendanceInOffice.objects.get(User=this_user, ExitTime=None)
print(datetime.datetime.now())
new_office_attendance.ExitTime = datetime.datetime.now()
new_office_attendance.save()
this_user.Status = new_status
this_user.save()
display = redirect('/')
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def employee_list(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
if this_user.acl.CanSeeOthersStatus:
all_employees = AllUsers.objects.all()
display = render(request, 'admin_list.html', {'page_title': 'Add Employee',
'login_user': this_user,
'all_employees': all_employees,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = render(request, 'access_denied.html', {'page_title': 'Add Employee',
'login_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def all_task(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
# if this_user.acl.CanSeeOthersStatus:
all_tasks = Task.objects.filter(AssignedTo=this_user)
assigned_tasks = Task.objects.filter(AssignedBy=this_user)
display = render(request, 'all_task.html', {'page_title': 'Task List',
'login_user': this_user,
'all_tasks': all_tasks,
'assigned_tasks': assigned_tasks,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def attendance(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
# if this_user.acl.CanSeeOthersStatus:
office_work = AttendanceInOffice.objects.filter(User=this_user)
home_work = AttendanceInHome.objects.filter(User=this_user)
display = render(request, 'attendance.html', {'page_title': 'Attendance',
'login_user': this_user,
'office_work': office_work,
'home_work': home_work,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
@login_required(login_url='/login/')
def profile(request):
user = request.session['user']
if AllUsers.objects.filter(username__exact=user).exists():
this_user = AllUsers.objects.get(username__exact=user)
display = render(request, 'profile.html', {'page_title': 'Profile',
'login_user': this_user,
'this_user': this_user,
'can_see_others_status': this_user.acl.CanSeeOthersStatus,
'can_add_employee': this_user.acl.CanAddMoreEmployee})
else:
display = redirect('/logout')
return display
|
gpl-2.0
| -5,272,535,817,592,566,000
| 53.114187
| 127
| 0.475478
| false
| 4.843295
| false
| false
| false
|
qingtech/weibome
|
weibome/settings.py
|
1
|
5454
|
# Django settings for weibome project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
import os
if 'SERVER_SOFTWARE' in os.environ:
from sae.const import (
MYSQL_HOST, MYSQL_PORT, MYSQL_USER, MYSQL_PASS, MYSQL_DB
)
else:
# Make `python manage.py syncdb` works happy!
MYSQL_HOST = 'localhost'
MYSQL_PORT = '3306'
MYSQL_USER = 'root'
MYSQL_PASS = '123'
MYSQL_DB = 'weibome'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_DB,
'USER': MYSQL_USER,
'PASSWORD': MYSQL_PASS,
'HOST': MYSQL_HOST,
'PORT': MYSQL_PORT,
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = 'static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#7-*s-)n!pnjrlv@f%f4&pn+#lr8)3o!5j-d-(is2accw!9x5p'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'weibome.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'weibome.wsgi.application'
import os.path
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'weime',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
gpl-2.0
| 3,294,211,815,736,853,500
| 30.526012
| 88
| 0.688302
| false
| 3.530097
| false
| false
| false
|
vbraun/SageUI
|
src/sageui/view/trac_window.py
|
1
|
13199
|
"""
Window showing a Trac Ticket
"""
##############################################################################
# SageUI: A graphical user interface to Sage, Trac, and Git.
# Copyright (C) 2013 Volker Braun <vbraun.name@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import logging
import gtk
import gobject
import pango
from gtksourceview2 import View as GtkSourceView
from buildable import Buildable
from window import Window
from terminal_widget import TerminalWidget
class TracWindowUpdater(object):
def __init__(self, trac_window, timeout=1):
self.trac_window = trac_window
self.counter = 0
gobject.timeout_add_seconds(timeout, self.callback)
def callback(self):
self.counter += 1
#print 'updating trac window', str(self.counter)
if not self.trac_window.window.get_visible():
return False
self.trac_window.update_ticket_age()
return True
class TracWindow(Buildable, Window):
def __init__(self, presenter, glade_file):
self.presenter = presenter
Buildable.__init__(self, ['trac_window', 'trac_menubar', 'trac_toolbar',
'trac_tool_web', 'trac_tool_git', 'trac_tool_refresh',
'trac_tool_git_icon',
'trac_ticketlist_store', 'trac_ticketlist_view',
'trac_search_entry',
'trac_comments',
'trac_comment_text', 'trac_comment_buffer'])
builder = self.get_builder(glade_file)
Window.__init__(self, builder, 'trac_window')
self.menu = builder.get_object('trac_menubar')
self.toolbar = builder.get_object('trac_toolbar')
self.search_entry = builder.get_object('trac_search_entry')
self.ticketlist_store = builder.get_object('trac_ticketlist_store')
self.ticketlist_view = builder.get_object('trac_ticketlist_view')
self._init_ticketlist(self.ticketlist_view)
self.comments = builder.get_object('trac_comments')
self._init_comments(self.comments)
self.comment_text = builder.get_object('trac_comment_text')
self.comment_buffer = builder.get_object('trac_comment_buffer')
self.toolbar_web = builder.get_object('trac_tool_web')
self.toolbar_refresh = builder.get_object('trac_tool_refresh')
self.toolbar_git = builder.get_object('trac_tool_git')
builder.connect_signals(self)
self.ticket_list = None
self.current_ticket = None
def _init_ticketlist(self, listview):
listview.get_selection().set_mode(gtk.SELECTION_BROWSE)
# add two columns
self.col_title = gtk.TreeViewColumn('Description')
self.col_time = gtk.TreeViewColumn('Last seen')
listview.append_column(self.col_title)
listview.append_column(self.col_time)
# create a CellRenderers to render the data
self.cell_title = gtk.CellRendererText()
self.cell_title.set_property('ellipsize', pango.ELLIPSIZE_END)
self.cell_time = gtk.CellRendererText()
# add the cells to the columns - 2 in the first
self.col_title.pack_start(self.cell_title, True)
self.col_title.set_attributes(self.cell_title, markup=1)
self.col_title.set_resizable(True)
self.col_title.set_expand(True)
self.col_time.pack_end(self.cell_time, True)
self.col_time.set_attributes(self.cell_time, markup=2)
#self.col_time.set_expand(True)
def _init_comments(self, comments):
color = gtk.gdk.color_parse('#F0EAD6')
comments.modify_base(gtk.STATE_NORMAL, color)
tag_table = comments.get_buffer().get_tag_table()
tag = gtk.TextTag('warning')
tag.set_property('foreground', 'red')
tag_table.add(tag)
tag = gtk.TextTag('label')
tag.set_property('foreground', 'blue')
tag.set_property('style', pango.STYLE_ITALIC)
tag_table.add(tag)
tag = gtk.TextTag('description')
tag.set_property('foreground', 'black')
tag.set_property('family', 'monospace')
tag.set_property('wrap-mode', gtk.WRAP_WORD)
tag_table.add(tag)
tag = gtk.TextTag('trac_field')
tag.set_property('foreground', 'black')
tag.set_property('family', 'monospace')
tag.set_property('weight', pango.WEIGHT_SEMIBOLD)
tag_table.add(tag)
tag = gtk.TextTag('comment')
tag.set_property('foreground', 'black')
tag.set_property('family', 'monospace')
tag.set_property('wrap-mode', gtk.WRAP_WORD)
tag_table.add(tag)
tag = gtk.TextTag('title')
tag.set_property('foreground', 'black')
tag.set_property('weight', pango.WEIGHT_BOLD)
tag.set_property('scale', pango.SCALE_X_LARGE)
tag_table.add(tag)
tag = gtk.TextTag('debug')
tag.set_property('wrap-mode', gtk.WRAP_WORD)
tag_table.add(tag)
def show(self):
super(TracWindow, self).show()
TracWindowUpdater(self)
def set_ticket_list(self, ticket_list, current_ticket=None):
assert (current_ticket is None) or (current_ticket in ticket_list)
self.ticket_list = ticket_list
self.ticketlist_store.clear()
for ticket in ticket_list:
n = ticket.get_number()
row = [n,
'<b>#'+str(n)+'</b> '+ticket.get_title(),
str(ticket.get_pretty_last_viewed_time())]
self.ticketlist_store.append(row)
self.set_current_ticket(current_ticket)
def get_ticket_numbers(self):
result = []
store = self.ticketlist_store
iter = store.get_iter_first()
while iter is not None:
result.append(store.get_value(iter, 0))
return tuple(result)
def set_current_ticket(self, ticket):
"""
Select ``ticket`` in the ticket list.
Also, updates the "Last seen" field since it probably changed to right now.
"""
self.current_ticket = ticket
sel = self.ticketlist_view.get_selection()
if ticket is None:
sel.unselect_all()
self.toolbar_refresh.set_sensitive(False)
self.toolbar_web.set_sensitive(False)
self.toolbar_git.set_sensitive(False)
return
assert ticket in self.ticket_list
ticket_number = ticket.get_number()
store = self.ticketlist_store
iter = store.get_iter_first()
while (iter is not None) and (store.get_value(iter, 0) != ticket_number):
iter = store.iter_next(iter)
assert iter != None
sel.select_iter(iter)
self.toolbar_refresh.set_sensitive(True)
self.toolbar_web.set_sensitive(True)
self.toolbar_git.set_sensitive(ticket.get_branch() is not None)
self.update_ticket_age([ticket])
def update_ticket_age(self, tickets=None):
if tickets is None:
tickets = self.ticket_list
if tickets is None:
return
ticket_by_number = dict()
for ticket in self.ticket_list:
ticket_by_number[ticket.get_number()] = ticket
store = self.ticketlist_store
iter = store.get_iter_first()
while iter is not None:
n = store.get_value(iter, 0)
ticket = ticket_by_number[n]
store.set(iter, 2, str(ticket.get_pretty_last_viewed_time()))
iter = store.iter_next(iter)
def on_trac_ticketlist_view_cursor_changed(self, widget, data=None):
model, iter = self.ticketlist_view.get_selection().get_selected()
if not iter:
return
ticket_number = model.get_value(iter, 0)
logging.info('trac ticket cursor changed to #%s', ticket_number)
self.presenter.ticket_selected(ticket_number)
def display_ticket(self, ticket):
buf = self.comments.get_buffer()
buf.set_text('')
if ticket is None:
return
def append(*args):
buf.insert_with_tags(buf.get_end_iter(), *args)
tag_table = buf.get_tag_table()
warn_tag = tag_table.lookup('warning')
title_tag = tag_table.lookup('title')
label_tag = tag_table.lookup('label')
trac_field_tag = tag_table.lookup('trac_field')
description_tag = tag_table.lookup('description')
comment_tag = tag_table.lookup('comment')
debug_tag = tag_table.lookup('debug')
append('Trac #'+str(ticket.get_number())+': '+ticket.get_title(), title_tag)
append('\n\n')
branch = ticket.get_branch()
if branch is not None:
append('Branch: ', label_tag)
append(branch, trac_field_tag)
append('\n')
deps = ticket.get_dependencies()
if deps is not None:
append('Dependencies: ', label_tag)
append(deps, trac_field_tag)
append('\n')
append('Description:\n', label_tag)
append(ticket.get_description().strip(), description_tag)
for comment in ticket.comment_iter():
append('\n\n')
author = comment.get_author()
time = comment.get_ctime().ctime()
append('Comment (by {0} on {1}):\n'.format(author, time), label_tag)
append(comment.get_comment().strip(), comment_tag)
append('\n\n')
append('Created: ', label_tag)
append(ticket.get_ctime().ctime(), trac_field_tag)
append('\t Last modified: ', label_tag)
append(ticket.get_mtime().ctime(), trac_field_tag)
append('\n\n')
append(str(ticket._data), debug_tag)
append('\n')
for log in ticket._change_log:
append(str(log) + '\n', debug_tag)
def on_trac_window_delete_event(self, widget, data=None):
self.presenter.hide_trac_window()
return True
def on_trac_menu_close_activate(self, widget, data=None):
self.presenter.hide_trac_window()
def on_trac_window_map(self, widget, data=None):
print 'trac window map'
def on_trac_menu_new_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac new ticket")
def on_trac_menu_open_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac open ticket")
def on_trac_menu_about_activate(self, widget, data=None):
self.presenter.show_about_dialog()
def on_trac_menu_cut_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac cut")
def on_trac_menu_copy_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac copy")
def on_trac_menu_paste_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac paste")
def on_trac_menu_delete_activate(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac delete")
def on_trac_menu_preferences_activate(self, widget, data=None):
self.presenter.show_preferences_dialog()
def on_trac_tool_new_clicked(self, widget, data=None):
self.presenter.show_notification(self, "todo: trac new ticket")
def on_trac_tool_web_clicked(self, widget, data=None):
url = 'http://trac.sagemath.org/{0}'.format(self.current_ticket.get_number())
self.presenter.xdg_open(url)
def on_trac_tool_git_clicked(self, widget, data=None):
branch = self.current_ticket.get_branch()
assert branch is not None # button should have been disabled
number = self.current_ticket.get_number()
logging.info('git button for %s %s', branch, number)
self.presenter.checkout_branch(branch, number)
self.presenter.show_git_window()
def on_trac_tool_refresh_clicked(self, widget, data=None):
self.presenter.load_ticket(self.current_ticket)
def on_trac_search_entry_activate(self, widget, data=None):
entry = self.search_entry.get_buffer().get_text()
entry = entry.strip('# ')
logging.info('searching trac for %s', entry)
try:
ticket_number = int(entry)
self.presenter.load_ticket(ticket_number)
except ValueError:
self.presenter.show_error(self, 'Invalid ticket number', 'Expected integer, got: '+entry)
|
gpl-3.0
| 6,258,722,187,542,107,000
| 39.48773
| 101
| 0.608152
| false
| 3.746523
| false
| false
| false
|
berkeley-stat159/project-lambda
|
code/stat159lambda/utils/tests/test_parse_demographics.py
|
1
|
1388
|
from __future__ import absolute_import
from stat159lambda.utils import parse_demographics
import os
import csv
def prepare_for_tests():
with open('demographics.csv', 'w') as csvfile:
file_writer = csv.writer(csvfile, delimiter=',', quotechar='"')
file_writer.writerow(['id', 'gender', 'age', 'forrest_seen_count'])
file_writer.writerow(['1', 'm', '30-35', '5'])
file_writer.writerow(['2', 'm', '30-35', '1'])
test_object = parse_demographics.parse_csv('demographics.csv')
return test_object
def test_seen_most_times():
test_subjects = prepare_for_tests()
seen_count = parse_demographics.seen_most_times(test_subjects)
assert seen_count[0] == 5
assert seen_count[1] == 1
delete_file()
def test_seen_least_times():
test_subjects = prepare_for_tests()
seen_count = parse_demographics.seen_least_times(test_subjects)
assert seen_count[0] == 1
assert seen_count[1] == 2
delete_file()
def test_find_id_by_gender():
test_subjects = prepare_for_tests()
id_list = parse_demographics.find_id_by_gender(test_subjects, 'm')
assert len(id_list) == 2
assert id_list[0] == 'm'
assert id_list[1] == 'm'
delete_file()
def test_find_count_by_id():
test_subjects = prepare_for_tests()
count = parse_demographics.find_count_by_id(test_subjects, 1)
assert count == 5
delete_file()
def delete_file():
os.remove('demographics.csv')
|
bsd-3-clause
| -6,484,319,558,559,641,000
| 26.215686
| 75
| 0.676513
| false
| 2.922105
| true
| false
| false
|
trabucayre/gnuradio
|
gr-audio/examples/python/dial_tone_daemon.py
|
1
|
1411
|
#!/usr/bin/env python
#
# Copyright 2004,2005,2007,2008,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gru
from gnuradio import audio
from gnuradio.eng_arg import eng_float
from argparse import ArgumentParser
import os
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = ArgumentParser()
parser.add_argument("-O", "--audio-output", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_argument("-r", "--sample-rate", type=eng_float, default=48000,
help="set sample rate to RATE (%(default)r)")
args = parser.parse_args()
sample_rate = int(args.sample_rate)
ampl = 0.1
src0 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 350, ampl)
src1 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 440, ampl)
dst = audio.sink(sample_rate, args.audio_output)
self.connect(src0, (dst, 0))
self.connect(src1, (dst, 1))
if __name__ == '__main__':
pid = gru.daemonize()
print("To stop this program, enter 'kill %d'" % pid)
my_top_block().run()
|
gpl-3.0
| -8,665,632,935,623,739,000
| 29.673913
| 83
| 0.61871
| false
| 3.273782
| false
| false
| false
|
mtl/svg2mod
|
svg2mod/svg2mod.py
|
1
|
39409
|
#!/usr/bin/python
from __future__ import absolute_import
import argparse
import datetime
import os
from pprint import pformat, pprint
import re
import svg2mod.svg as svg
import sys
#----------------------------------------------------------------------------
DEFAULT_DPI = 96 # 96 as of Inkscape 0.92
def main():
args, parser = get_arguments()
pretty = args.format == 'pretty'
use_mm = args.units == 'mm'
if pretty:
if not use_mm:
print( "Error: decimil units only allowed with legacy output type" )
sys.exit( -1 )
#if args.include_reverse:
#print(
#"Warning: reverse footprint not supported or required for" +
#" pretty output format"
#)
# Import the SVG:
imported = Svg2ModImport(
args.input_file_name,
args.module_name,
args.module_value
)
# Pick an output file name if none was provided:
if args.output_file_name is None:
args.output_file_name = os.path.splitext(
os.path.basename( args.input_file_name )
)[ 0 ]
# Append the correct file name extension if needed:
if pretty:
extension = ".kicad_mod"
else:
extension = ".mod"
if args.output_file_name[ - len( extension ) : ] != extension:
args.output_file_name += extension
# Create an exporter:
if pretty:
exported = Svg2ModExportPretty(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
args.dpi,
)
else:
# If the module file exists, try to read it:
exported = None
if os.path.isfile( args.output_file_name ):
try:
exported = Svg2ModExportLegacyUpdater(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
args.dpi,
include_reverse = not args.front_only,
)
except Exception as e:
raise e
#print( e.message )
#exported = None
# Write the module file:
if exported is None:
exported = Svg2ModExportLegacy(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
use_mm = use_mm,
dpi = args.dpi,
include_reverse = not args.front_only,
)
# Export the footprint:
exported.write()
#----------------------------------------------------------------------------
class LineSegment( object ):
#------------------------------------------------------------------------
@staticmethod
def _on_segment( p, q, r ):
""" Given three colinear points p, q, and r, check if
point q lies on line segment pr. """
if (
q.x <= max( p.x, r.x ) and
q.x >= min( p.x, r.x ) and
q.y <= max( p.y, r.y ) and
q.y >= min( p.y, r.y )
):
return True
return False
#------------------------------------------------------------------------
@staticmethod
def _orientation( p, q, r ):
""" Find orientation of ordered triplet (p, q, r).
Returns following values
0 --> p, q and r are colinear
1 --> Clockwise
2 --> Counterclockwise
"""
val = (
( q.y - p.y ) * ( r.x - q.x ) -
( q.x - p.x ) * ( r.y - q.y )
)
if val == 0: return 0
if val > 0: return 1
return 2
#------------------------------------------------------------------------
def __init__( self, p = None, q = None ):
self.p = p
self.q = q
#------------------------------------------------------------------------
def connects( self, segment ):
if self.q.x == segment.p.x and self.q.y == segment.p.y: return True
if self.q.x == segment.q.x and self.q.y == segment.q.y: return True
if self.p.x == segment.p.x and self.p.y == segment.p.y: return True
if self.p.x == segment.q.x and self.p.y == segment.q.y: return True
return False
#------------------------------------------------------------------------
def intersects( self, segment ):
""" Return true if line segments 'p1q1' and 'p2q2' intersect.
Adapted from:
http://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/
"""
# Find the four orientations needed for general and special cases:
o1 = self._orientation( self.p, self.q, segment.p )
o2 = self._orientation( self.p, self.q, segment.q )
o3 = self._orientation( segment.p, segment.q, self.p )
o4 = self._orientation( segment.p, segment.q, self.q )
return (
# General case:
( o1 != o2 and o3 != o4 )
or
# p1, q1 and p2 are colinear and p2 lies on segment p1q1:
( o1 == 0 and self._on_segment( self.p, segment.p, self.q ) )
or
# p1, q1 and p2 are colinear and q2 lies on segment p1q1:
( o2 == 0 and self._on_segment( self.p, segment.q, self.q ) )
or
# p2, q2 and p1 are colinear and p1 lies on segment p2q2:
( o3 == 0 and self._on_segment( segment.p, self.p, segment.q ) )
or
# p2, q2 and q1 are colinear and q1 lies on segment p2q2:
( o4 == 0 and self._on_segment( segment.p, self.q, segment.q ) )
)
#------------------------------------------------------------------------
def q_next( self, q ):
self.p = self.q
self.q = q
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class PolygonSegment( object ):
#------------------------------------------------------------------------
def __init__( self, points ):
self.points = points
if len( points ) < 3:
print(
"Warning:"
" Path segment has only {} points (not a polygon?)".format(
len( points )
)
)
#------------------------------------------------------------------------
# KiCad will not "pick up the pen" when moving between a polygon outline
# and holes within it, so we search for a pair of points connecting the
# outline (self) to the hole such that the connecting segment will not
# cross the visible inner space within any hole.
def _find_insertion_point( self, hole, holes ):
#print( " Finding insertion point. {} holes".format( len( holes ) ) )
# Try the next point on the container:
for cp in range( len( self.points ) ):
container_point = self.points[ cp ]
#print( " Trying container point {}".format( cp ) )
# Try the next point on the hole:
for hp in range( len( hole.points ) - 1 ):
hole_point = hole.points[ hp ]
#print( " Trying hole point {}".format( cp ) )
bridge = LineSegment( container_point, hole_point )
# Check for intersection with each other hole:
for other_hole in holes:
#print( " Trying other hole. Check = {}".format( hole == other_hole ) )
# If the other hole intersects, don't bother checking
# remaining holes:
if other_hole.intersects(
bridge,
check_connects = (
other_hole == hole or other_hole == self
)
): break
#print( " Hole does not intersect." )
else:
print( " Found insertion point: {}, {}".format( cp, hp ) )
# No other holes intersected, so this insertion point
# is acceptable:
return ( cp, hole.points_starting_on_index( hp ) )
print(
"Could not insert segment without overlapping other segments"
)
#------------------------------------------------------------------------
# Return the list of ordered points starting on the given index, ensuring
# that the first and last points are the same.
def points_starting_on_index( self, index ):
points = self.points
if index > 0:
# Strip off end point, which is a duplicate of the start point:
points = points[ : -1 ]
points = points[ index : ] + points[ : index ]
points.append(
svg.Point( points[ 0 ].x, points[ 0 ].y )
)
return points
#------------------------------------------------------------------------
# Return a list of points with the given polygon segments (paths) inlined.
def inline( self, segments ):
if len( segments ) < 1:
return self.points
print( " Inlining {} segments...".format( len( segments ) ) )
all_segments = segments[ : ] + [ self ]
insertions = []
# Find the insertion point for each hole:
for hole in segments:
insertion = self._find_insertion_point(
hole, all_segments
)
if insertion is not None:
insertions.append( insertion )
insertions.sort( key = lambda i: i[ 0 ] )
inlined = [ self.points[ 0 ] ]
ip = 1
points = self.points
for insertion in insertions:
while ip <= insertion[ 0 ]:
inlined.append( points[ ip ] )
ip += 1
if (
inlined[ -1 ].x == insertion[ 1 ][ 0 ].x and
inlined[ -1 ].y == insertion[ 1 ][ 0 ].y
):
inlined += insertion[ 1 ][ 1 : -1 ]
else:
inlined += insertion[ 1 ]
inlined.append( svg.Point(
points[ ip - 1 ].x,
points[ ip - 1 ].y,
) )
while ip < len( points ):
inlined.append( points[ ip ] )
ip += 1
return inlined
#------------------------------------------------------------------------
def intersects( self, line_segment, check_connects ):
hole_segment = LineSegment()
# Check each segment of other hole for intersection:
for point in self.points:
hole_segment.q_next( point )
if hole_segment.p is not None:
if (
check_connects and
line_segment.connects( hole_segment )
): continue
if line_segment.intersects( hole_segment ):
#print( "Intersection detected." )
return True
return False
#------------------------------------------------------------------------
# Apply all transformations and rounding, then remove duplicate
# consecutive points along the path.
def process( self, transformer, flip ):
points = []
for point in self.points:
point = transformer.transform_point( point, flip )
if (
len( points ) < 1 or
point.x != points[ -1 ].x or
point.y != points[ -1 ].y
):
points.append( point )
if (
points[ 0 ].x != points[ -1 ].x or
points[ 0 ].y != points[ -1 ].y
):
#print( "Warning: Closing polygon. start=({}, {}) end=({}, {})".format(
#points[ 0 ].x, points[ 0 ].y,
#points[ -1 ].x, points[ -1 ].y,
#) )
points.append( svg.Point(
points[ 0 ].x,
points[ 0 ].y,
) )
#else:
#print( "Polygon closed: start=({}, {}) end=({}, {})".format(
#points[ 0 ].x, points[ 0 ].y,
#points[ -1 ].x, points[ -1 ].y,
#) )
self.points = points
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModImport( object ):
#------------------------------------------------------------------------
def __init__( self, file_name, module_name, module_value ):
self.file_name = file_name
self.module_name = module_name
self.module_value = module_value
print( "Parsing SVG..." )
self.svg = svg.parse( file_name )
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExport( object ):
#------------------------------------------------------------------------
@staticmethod
def _convert_decimil_to_mm( decimil ):
return float( decimil ) * 0.00254
#------------------------------------------------------------------------
@staticmethod
def _convert_mm_to_decimil( mm ):
return int( round( mm * 393.700787 ) )
#------------------------------------------------------------------------
def _get_fill_stroke( self, item ):
fill = True
stroke = True
stroke_width = 0.0
if item.style is not None and item.style != "":
for property in item.style.split( ";" ):
nv = property.split( ":" );
name = nv[ 0 ].strip()
value = nv[ 1 ].strip()
if name == "fill" and value == "none":
fill = False
elif name == "stroke" and value == "none":
stroke = False
elif name == "stroke-width":
value = value.replace( "px", "" )
stroke_width = float( value ) * 25.4 / float(self.dpi)
if not stroke:
stroke_width = 0.0
elif stroke_width is None:
# Give a default stroke width?
stroke_width = self._convert_decimil_to_mm( 1 )
return fill, stroke, stroke_width
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
use_mm = True,
dpi = DEFAULT_DPI,
):
if use_mm:
# 25.4 mm/in;
scale_factor *= 25.4 / float(dpi)
use_mm = True
else:
# PCBNew uses "decimil" (10K DPI);
scale_factor *= 10000.0 / float(dpi)
self.imported = svg2mod_import
self.file_name = file_name
self.scale_factor = scale_factor
self.precision = precision
self.use_mm = use_mm
self.dpi = dpi
#------------------------------------------------------------------------
def _calculate_translation( self ):
min_point, max_point = self.imported.svg.bbox()
# Center the drawing:
adjust_x = min_point.x + ( max_point.x - min_point.x ) / 2.0
adjust_y = min_point.y + ( max_point.y - min_point.y ) / 2.0
self.translation = svg.Point(
0.0 - adjust_x,
0.0 - adjust_y,
)
#------------------------------------------------------------------------
# Find and keep only the layers of interest.
def _prune( self, items = None ):
if items is None:
self.layers = {}
for name in self.layer_map.iterkeys():
self.layers[ name ] = None
items = self.imported.svg.items
self.imported.svg.items = []
for item in items:
if not isinstance( item, svg.Group ):
continue
for name in self.layers.iterkeys():
#if re.search( name, item.name, re.I ):
if name == item.name:
print( "Found SVG layer: {}".format( item.name ) )
self.imported.svg.items.append( item )
self.layers[ name ] = item
break
else:
self._prune( item.items )
#------------------------------------------------------------------------
def _write_items( self, items, layer, flip = False ):
for item in items:
if isinstance( item, svg.Group ):
self._write_items( item.items, layer, flip )
continue
elif isinstance( item, svg.Path ):
segments = [
PolygonSegment( segment )
for segment in item.segments(
precision = self.precision
)
]
for segment in segments:
segment.process( self, flip )
if len( segments ) > 1:
points = segments[ 0 ].inline( segments[ 1 : ] )
elif len( segments ) > 0:
points = segments[ 0 ].points
fill, stroke, stroke_width = self._get_fill_stroke( item )
if not self.use_mm:
stroke_width = self._convert_mm_to_decimil(
stroke_width
)
print( " Writing polygon with {} points".format(
len( points ) )
)
self._write_polygon(
points, layer, fill, stroke, stroke_width
)
else:
print( "Unsupported SVG element: {}".format(
item.__class__.__name__
) )
#------------------------------------------------------------------------
def _write_module( self, front ):
module_name = self._get_module_name( front )
min_point, max_point = self.imported.svg.bbox()
min_point = self.transform_point( min_point, flip = False )
max_point = self.transform_point( max_point, flip = False )
label_offset = 1200
label_size = 600
label_pen = 120
if self.use_mm:
label_size = self._convert_decimil_to_mm( label_size )
label_pen = self._convert_decimil_to_mm( label_pen )
reference_y = min_point.y - self._convert_decimil_to_mm( label_offset )
value_y = max_point.y + self._convert_decimil_to_mm( label_offset )
else:
reference_y = min_point.y - label_offset
value_y = max_point.y + label_offset
self._write_module_header(
label_size, label_pen,
reference_y, value_y,
front,
)
for name, group in self.layers.iteritems():
if group is None: continue
layer = self._get_layer_name( name, front )
#print( " Writing layer: {}".format( name ) )
self._write_items( group.items, layer, not front )
self._write_module_footer( front )
#------------------------------------------------------------------------
def _write_polygon_filled( self, points, layer, stroke_width = 0.0 ):
self._write_polygon_header( points, layer )
for point in points:
self._write_polygon_point( point )
self._write_polygon_footer( layer, stroke_width )
#------------------------------------------------------------------------
def _write_polygon_outline( self, points, layer, stroke_width ):
prior_point = None
for point in points:
if prior_point is not None:
self._write_polygon_segment(
prior_point, point, layer, stroke_width
)
prior_point = point
#------------------------------------------------------------------------
def transform_point( self, point, flip = False ):
transformed_point = svg.Point(
( point.x + self.translation.x ) * self.scale_factor,
( point.y + self.translation.y ) * self.scale_factor,
)
if flip:
transformed_point.x *= -1
if self.use_mm:
transformed_point.x = round( transformed_point.x, 12 )
transformed_point.y = round( transformed_point.y, 12 )
else:
transformed_point.x = int( round( transformed_point.x ) )
transformed_point.y = int( round( transformed_point.y ) )
return transformed_point
#------------------------------------------------------------------------
def write( self ):
self._prune()
# Must come after pruning:
translation = self._calculate_translation()
print( "Writing module file: {}".format( self.file_name ) )
self.output_file = open( self.file_name, 'w' )
self._write_library_intro()
self._write_modules()
self.output_file.close()
self.output_file = None
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportLegacy( Svg2ModExport ):
layer_map = {
#'inkscape-name' : [ kicad-front, kicad-back ],
'Cu' : [ 15, 0 ],
'Adhes' : [ 17, 16 ],
'Paste' : [ 19, 18 ],
'SilkS' : [ 21, 20 ],
'Mask' : [ 23, 22 ],
'Dwgs.User' : [ 24, 24 ],
'Cmts.User' : [ 25, 25 ],
'Eco1.User' : [ 26, 26 ],
'Eco2.User' : [ 27, 27 ],
'Edge.Cuts' : [ 28, 28 ],
}
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
use_mm = True,
dpi = DEFAULT_DPI,
include_reverse = True,
):
super( Svg2ModExportLegacy, self ).__init__(
svg2mod_import,
file_name,
scale_factor,
precision,
use_mm,
dpi,
)
self.include_reverse = include_reverse
#------------------------------------------------------------------------
def _get_layer_name( self, name, front ):
layer_info = self.layer_map[ name ]
layer = layer_info[ 0 ]
if not front and layer_info[ 1 ] is not None:
layer = layer_info[ 1 ]
return layer
#------------------------------------------------------------------------
def _get_module_name( self, front = None ):
if self.include_reverse and not front:
return self.imported.module_name + "-rev"
return self.imported.module_name
#------------------------------------------------------------------------
def _write_library_intro( self ):
modules_list = self._get_module_name( front = True )
if self.include_reverse:
modules_list += (
"\n" +
self._get_module_name( front = False )
)
units = ""
if self.use_mm:
units = "\nUnits mm"
self.output_file.write( """PCBNEW-LibModule-V1 {0}{1}
$INDEX
{2}
$EndINDEX
#
# {3}
#
""".format(
datetime.datetime.now().strftime( "%a %d %b %Y %I:%M:%S %p %Z" ),
units,
modules_list,
self.imported.file_name,
)
)
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
self.output_file.write( """$MODULE {0}
Po 0 0 0 {6} 00000000 00000000 ~~
Li {0}
T0 0 {1} {2} {2} 0 {3} N I 21 "{0}"
T1 0 {5} {2} {2} 0 {3} N I 21 "{4}"
""".format(
self._get_module_name( front ),
reference_y,
label_size,
label_pen,
self.imported.module_value,
value_y,
15, # Seems necessary
)
)
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
self.output_file.write(
"$EndMODULE {0}\n".format( self._get_module_name( front ) )
)
#------------------------------------------------------------------------
def _write_modules( self ):
self._write_module( front = True )
if self.include_reverse:
self._write_module( front = False )
self.output_file.write( "$EndLIBRARY" )
#------------------------------------------------------------------------
def _write_polygon( self, points, layer, fill, stroke, stroke_width ):
if fill:
self._write_polygon_filled(
points, layer
)
if stroke:
self._write_polygon_outline(
points, layer, stroke_width
)
#------------------------------------------------------------------------
def _write_polygon_footer( self, layer, stroke_width ):
pass
#------------------------------------------------------------------------
def _write_polygon_header( self, points, layer ):
pen = 1
if self.use_mm:
pen = self._convert_decimil_to_mm( pen )
self.output_file.write( "DP 0 0 0 0 {} {} {}\n".format(
len( points ),
pen,
layer
) )
#------------------------------------------------------------------------
def _write_polygon_point( self, point ):
self.output_file.write(
"Dl {} {}\n".format( point.x, point.y )
)
#------------------------------------------------------------------------
def _write_polygon_segment( self, p, q, layer, stroke_width ):
self.output_file.write( "DS {} {} {} {} {} {}\n".format(
p.x, p.y,
q.x, q.y,
stroke_width,
layer
) )
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportLegacyUpdater( Svg2ModExportLegacy ):
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
dpi = DEFAULT_DPI,
include_reverse = True,
):
self.file_name = file_name
use_mm = self._parse_output_file()
super( Svg2ModExportLegacyUpdater, self ).__init__(
svg2mod_import,
file_name,
scale_factor,
precision,
use_mm,
dpi,
include_reverse,
)
#------------------------------------------------------------------------
def _parse_output_file( self ):
print( "Parsing module file: {}".format( self.file_name ) )
module_file = open( self.file_name, 'r' )
lines = module_file.readlines()
module_file.close()
self.loaded_modules = {}
self.post_index = []
self.pre_index = []
use_mm = False
index = 0
# Find the start of the index:
while index < len( lines ):
line = lines[ index ]
index += 1
self.pre_index.append( line )
if line[ : 6 ] == "$INDEX":
break
m = re.match( "Units[\s]+mm[\s]*", line )
if m is not None:
print( " Use mm detected" )
use_mm = True
# Read the index:
while index < len( lines ):
line = lines[ index ]
if line[ : 9 ] == "$EndINDEX":
break
index += 1
self.loaded_modules[ line.strip() ] = []
# Read up until the first module:
while index < len( lines ):
line = lines[ index ]
if line[ : 7 ] == "$MODULE":
break
index += 1
self.post_index.append( line )
# Read modules:
while index < len( lines ):
line = lines[ index ]
if line[ : 7 ] == "$MODULE":
module_name, module_lines, index = self._read_module( lines, index )
if module_name is not None:
self.loaded_modules[ module_name ] = module_lines
elif line[ : 11 ] == "$EndLIBRARY":
break
else:
raise Exception(
"Expected $EndLIBRARY: [{}]".format( line )
)
#print( "Pre-index:" )
#pprint( self.pre_index )
#print( "Post-index:" )
#pprint( self.post_index )
#print( "Loaded modules:" )
#pprint( self.loaded_modules )
return use_mm
#------------------------------------------------------------------------
def _read_module( self, lines, index ):
# Read module name:
m = re.match( r'\$MODULE[\s]+([^\s]+)[\s]*', lines[ index ] )
module_name = m.group( 1 )
print( " Reading module {}".format( module_name ) )
index += 1
module_lines = []
while index < len( lines ):
line = lines[ index ]
index += 1
m = re.match(
r'\$EndMODULE[\s]+' + module_name + r'[\s]*', line
)
if m is not None:
return module_name, module_lines, index
module_lines.append( line )
raise Exception(
"Could not find end of module '{}'".format( module_name )
)
#------------------------------------------------------------------------
def _write_library_intro( self ):
# Write pre-index:
self.output_file.writelines( self.pre_index )
self.loaded_modules[ self._get_module_name( front = True ) ] = None
if self.include_reverse:
self.loaded_modules[
self._get_module_name( front = False )
] = None
# Write index:
for module_name in sorted(
self.loaded_modules.iterkeys(),
key = str.lower
):
self.output_file.write( module_name + "\n" )
# Write post-index:
self.output_file.writelines( self.post_index )
#------------------------------------------------------------------------
def _write_preserved_modules( self, up_to = None ):
if up_to is not None:
up_to = up_to.lower()
for module_name in sorted(
self.loaded_modules.iterkeys(),
key = str.lower
):
if up_to is not None and module_name.lower() >= up_to:
continue
module_lines = self.loaded_modules[ module_name ]
if module_lines is not None:
self.output_file.write(
"$MODULE {}\n".format( module_name )
)
self.output_file.writelines( module_lines )
self.output_file.write(
"$EndMODULE {}\n".format( module_name )
)
self.loaded_modules[ module_name ] = None
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
super( Svg2ModExportLegacyUpdater, self )._write_module_footer(
front,
)
# Write remaining modules:
if not front:
self._write_preserved_modules()
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
self._write_preserved_modules(
up_to = self._get_module_name( front )
)
super( Svg2ModExportLegacyUpdater, self )._write_module_header(
label_size,
label_pen,
reference_y,
value_y,
front,
)
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportPretty( Svg2ModExport ):
layer_map = {
#'inkscape-name' : kicad-name,
'Cu' : "{}.Cu",
'Adhes' : "{}.Adhes",
'Paste' : "{}.Paste",
'SilkS' : "{}.SilkS",
'Mask' : "{}.Mask",
'CrtYd' : "{}.CrtYd",
'Fab' : "{}.Fab",
'Edge.Cuts' : "Edge.Cuts"
}
#------------------------------------------------------------------------
def _get_layer_name( self, name, front ):
if front:
return self.layer_map[ name ].format("F")
else:
return self.layer_map[ name ].format("B")
#------------------------------------------------------------------------
def _get_module_name( self, front = None ):
return self.imported.module_name
#------------------------------------------------------------------------
def _write_library_intro( self ):
self.output_file.write( """(module {0} (layer F.Cu) (tedit {1:8X})
(attr smd)
(descr "{2}")
(tags {3})
""".format(
self.imported.module_name, #0
int( round( os.path.getctime( #1
self.imported.file_name
) ) ),
"Imported from {}".format( self.imported.file_name ), #2
"svg2mod", #3
)
)
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
self.output_file.write( "\n)" )
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
if front:
side = "F"
else:
side = "B"
self.output_file.write(
""" (fp_text reference {0} (at 0 {1}) (layer {2}.SilkS) hide
(effects (font (size {3} {3}) (thickness {4})))
)
(fp_text value {5} (at 0 {6}) (layer {2}.SilkS) hide
(effects (font (size {3} {3}) (thickness {4})))
)""".format(
self._get_module_name(), #0
reference_y, #1
side, #2
label_size, #3
label_pen, #4
self.imported.module_value, #5
value_y, #6
)
)
#------------------------------------------------------------------------
def _write_modules( self ):
self._write_module( front = True )
#------------------------------------------------------------------------
def _write_polygon( self, points, layer, fill, stroke, stroke_width ):
if fill:
self._write_polygon_filled(
points, layer, stroke_width
)
# Polygons with a fill and stroke are drawn with the filled polygon
# above:
if stroke and not fill:
self._write_polygon_outline(
points, layer, stroke_width
)
#------------------------------------------------------------------------
def _write_polygon_footer( self, layer, stroke_width ):
self.output_file.write(
" )\n (layer {})\n (width {})\n )".format(
layer, stroke_width
)
)
#------------------------------------------------------------------------
def _write_polygon_header( self, points, layer ):
self.output_file.write( "\n (fp_poly\n (pts \n" )
#------------------------------------------------------------------------
def _write_polygon_point( self, point ):
self.output_file.write(
" (xy {} {})\n".format( point.x, point.y )
)
#------------------------------------------------------------------------
def _write_polygon_segment( self, p, q, layer, stroke_width ):
self.output_file.write(
"""\n (fp_line
(start {} {})
(end {} {})
(layer {})
(width {})
)""".format(
p.x, p.y,
q.x, q.y,
layer,
stroke_width,
)
)
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
def get_arguments():
parser = argparse.ArgumentParser(
description = (
'Convert Inkscape SVG drawings to KiCad footprint modules.'
)
)
#------------------------------------------------------------------------
parser.add_argument(
'-i', '--input-file',
type = str,
dest = 'input_file_name',
metavar = 'FILENAME',
help = "name of the SVG file",
required = True,
)
parser.add_argument(
'-o', '--output-file',
type = str,
dest = 'output_file_name',
metavar = 'FILENAME',
help = "name of the module file",
)
parser.add_argument(
'--name', '--module-name',
type = str,
dest = 'module_name',
metavar = 'NAME',
help = "base name of the module",
default = "svg2mod",
)
parser.add_argument(
'--value', '--module-value',
type = str,
dest = 'module_value',
metavar = 'VALUE',
help = "value of the module",
default = "G***",
)
parser.add_argument(
'-f', '--factor',
type = float,
dest = 'scale_factor',
metavar = 'FACTOR',
help = "scale paths by this factor",
default = 1.0,
)
parser.add_argument(
'-p', '--precision',
type = float,
dest = 'precision',
metavar = 'PRECISION',
help = "smoothness for approximating curves with line segments (float)",
default = 10.0,
)
parser.add_argument(
'--front-only',
dest = 'front_only',
action = 'store_const',
const = True,
help = "omit output of back module (legacy output format)",
default = False,
)
parser.add_argument(
'--format',
type = str,
dest = 'format',
metavar = 'FORMAT',
choices = [ 'legacy', 'pretty' ],
help = "output module file format (legacy|pretty)",
default = 'pretty',
)
parser.add_argument(
'--units',
type = str,
dest = 'units',
metavar = 'UNITS',
choices = [ 'decimil', 'mm' ],
help = "output units, if output format is legacy (decimil|mm)",
default = 'mm',
)
parser.add_argument(
'-d', '--dpi',
type = int,
dest = 'dpi',
metavar = 'DPI',
help = "DPI of the SVG file (int)",
default = DEFAULT_DPI,
)
return parser.parse_args(), parser
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
main()
#----------------------------------------------------------------------------
# vi: set et sts=4 sw=4 ts=4:
|
cc0-1.0
| 9,195,613,608,947,256,000
| 26.103851
| 97
| 0.414905
| false
| 4.459545
| false
| false
| false
|
mikehulluk/morphforge
|
src/morphforge/constants/ions.py
|
1
|
1668
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
class ChlIon(object):
Na = 'na'
Ks = 'ks'
Kf = 'kf'
Ca = 'ca'
Lk = 'lk'
Chls = [Na, Ks, Kf, Ca, Lk]
|
bsd-2-clause
| 4,506,075,776,726,834,700
| 37.790698
| 72
| 0.67446
| false
| 4.448
| false
| false
| false
|
paninetworks/neutron
|
neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
|
1
|
46084
|
#!/usr/bin/env python
# Copyright 2012 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Performs per host Linux Bridge configuration for Neutron.
# Based on the structure of the OpenVSwitch agent in the
# Neutron OpenVSwitch Plugin.
import os
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import service
from six import moves
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.l2pop.rpc_manager \
import l2population_rpc as l2pop_rpc
from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa
from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
import constants as lconst
LOG = logging.getLogger(__name__)
BRIDGE_NAME_PREFIX = "brq"
# NOTE(toabctl): Don't use /sys/devices/virtual/net here because not all tap
# devices are listed here (i.e. when using Xen)
BRIDGE_FS = "/sys/class/net/"
BRIDGE_NAME_PLACEHOLDER = "bridge_name"
BRIDGE_INTERFACES_FS = BRIDGE_FS + BRIDGE_NAME_PLACEHOLDER + "/brif/"
DEVICE_NAME_PLACEHOLDER = "device_name"
BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + DEVICE_NAME_PLACEHOLDER + "/brport"
VXLAN_INTERFACE_PREFIX = "vxlan-"
class NetworkSegment(object):
def __init__(self, network_type, physical_network, segmentation_id):
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
class LinuxBridgeManager(object):
def __init__(self, interface_mappings):
self.interface_mappings = interface_mappings
self.ip = ip_lib.IPWrapper()
# VXLAN related parameters:
self.local_ip = cfg.CONF.VXLAN.local_ip
self.vxlan_mode = lconst.VXLAN_NONE
if cfg.CONF.VXLAN.enable_vxlan:
device = self.ip.get_device_by_ip(self.local_ip)
if device:
self.local_int = device.name
self.check_vxlan_support()
else:
self.local_int = None
LOG.warning(_LW('VXLAN is enabled, a valid local_ip '
'must be provided'))
# Store network mapping to segments
self.network_map = {}
def interface_exists_on_bridge(self, bridge, interface):
directory = '/sys/class/net/%s/brif' % bridge
for filename in os.listdir(directory):
if filename == interface:
return True
return False
def get_bridge_name(self, network_id):
if not network_id:
LOG.warning(_LW("Invalid Network ID, will lead to incorrect "
"bridge name"))
bridge_name = BRIDGE_NAME_PREFIX + network_id[0:11]
return bridge_name
def get_subinterface_name(self, physical_interface, vlan_id):
if not vlan_id:
LOG.warning(_LW("Invalid VLAN ID, will lead to incorrect "
"subinterface name"))
subinterface_name = '%s.%s' % (physical_interface, vlan_id)
return subinterface_name
def get_tap_device_name(self, interface_id):
if not interface_id:
LOG.warning(_LW("Invalid Interface ID, will lead to incorrect "
"tap device name"))
tap_device_name = constants.TAP_DEVICE_PREFIX + interface_id[0:11]
return tap_device_name
def get_vxlan_device_name(self, segmentation_id):
if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI:
return VXLAN_INTERFACE_PREFIX + str(segmentation_id)
else:
LOG.warning(_LW("Invalid Segmentation ID: %s, will lead to "
"incorrect vxlan device name"), segmentation_id)
def get_all_neutron_bridges(self):
neutron_bridge_list = []
bridge_list = os.listdir(BRIDGE_FS)
for bridge in bridge_list:
if bridge.startswith(BRIDGE_NAME_PREFIX):
neutron_bridge_list.append(bridge)
return neutron_bridge_list
def get_interfaces_on_bridge(self, bridge_name):
if ip_lib.device_exists(bridge_name):
bridge_interface_path = BRIDGE_INTERFACES_FS.replace(
BRIDGE_NAME_PLACEHOLDER, bridge_name)
return os.listdir(bridge_interface_path)
else:
return []
def get_tap_devices_count(self, bridge_name):
bridge_interface_path = BRIDGE_INTERFACES_FS.replace(
BRIDGE_NAME_PLACEHOLDER, bridge_name)
try:
if_list = os.listdir(bridge_interface_path)
return len([interface for interface in if_list if
interface.startswith(constants.TAP_DEVICE_PREFIX)])
except OSError:
return 0
def get_bridge_for_tap_device(self, tap_device_name):
bridges = self.get_all_neutron_bridges()
for bridge in bridges:
interfaces = self.get_interfaces_on_bridge(bridge)
if tap_device_name in interfaces:
return bridge
return None
def is_device_on_bridge(self, device_name):
if not device_name:
return False
else:
bridge_port_path = BRIDGE_PORT_FS_FOR_DEVICE.replace(
DEVICE_NAME_PLACEHOLDER, device_name)
return os.path.exists(bridge_port_path)
def ensure_vlan_bridge(self, network_id, physical_interface, vlan_id):
"""Create a vlan and bridge unless they already exist."""
interface = self.ensure_vlan(physical_interface, vlan_id)
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(interface)
if self.ensure_bridge(bridge_name, interface, ips, gateway):
return interface
def ensure_vxlan_bridge(self, network_id, segmentation_id):
"""Create a vxlan and bridge unless they already exist."""
interface = self.ensure_vxlan(segmentation_id)
if not interface:
LOG.error(_LE("Failed creating vxlan interface for "
"%(segmentation_id)s"),
{segmentation_id: segmentation_id})
return
bridge_name = self.get_bridge_name(network_id)
self.ensure_bridge(bridge_name, interface)
return interface
def get_interface_details(self, interface):
device = self.ip.device(interface)
ips = device.addr.list(scope='global')
# Update default gateway if necessary
gateway = device.route.get_gateway(scope='global')
return ips, gateway
def ensure_flat_bridge(self, network_id, physical_interface):
"""Create a non-vlan bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(physical_interface)
if self.ensure_bridge(bridge_name, physical_interface, ips, gateway):
return physical_interface
def ensure_local_bridge(self, network_id):
"""Create a local bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
return self.ensure_bridge(bridge_name)
def ensure_vlan(self, physical_interface, vlan_id):
"""Create a vlan unless it already exists."""
interface = self.get_subinterface_name(physical_interface, vlan_id)
if not ip_lib.device_exists(interface):
LOG.debug("Creating subinterface %(interface)s for "
"VLAN %(vlan_id)s on interface "
"%(physical_interface)s",
{'interface': interface, 'vlan_id': vlan_id,
'physical_interface': physical_interface})
if utils.execute(['ip', 'link', 'add', 'link',
physical_interface,
'name', interface, 'type', 'vlan', 'id',
vlan_id], run_as_root=True):
return
if utils.execute(['ip', 'link', 'set',
interface, 'up'], run_as_root=True):
return
LOG.debug("Done creating subinterface %s", interface)
return interface
def ensure_vxlan(self, segmentation_id):
"""Create a vxlan unless it already exists."""
interface = self.get_vxlan_device_name(segmentation_id)
if not ip_lib.device_exists(interface):
LOG.debug("Creating vxlan interface %(interface)s for "
"VNI %(segmentation_id)s",
{'interface': interface,
'segmentation_id': segmentation_id})
args = {'dev': self.local_int}
if self.vxlan_mode == lconst.VXLAN_MCAST:
args['group'] = cfg.CONF.VXLAN.vxlan_group
if cfg.CONF.VXLAN.ttl:
args['ttl'] = cfg.CONF.VXLAN.ttl
if cfg.CONF.VXLAN.tos:
args['tos'] = cfg.CONF.VXLAN.tos
if cfg.CONF.VXLAN.l2_population:
args['proxy'] = True
int_vxlan = self.ip.add_vxlan(interface, segmentation_id, **args)
int_vxlan.link.set_up()
LOG.debug("Done creating vxlan interface %s", interface)
return interface
def update_interface_ip_details(self, destination, source, ips,
gateway):
if ips or gateway:
dst_device = self.ip.device(destination)
src_device = self.ip.device(source)
# Append IP's to bridge if necessary
if ips:
for ip in ips:
dst_device.addr.add(cidr=ip['cidr'])
if gateway:
# Ensure that the gateway can be updated by changing the metric
metric = 100
if 'metric' in gateway:
metric = gateway['metric'] - 1
dst_device.route.add_gateway(gateway=gateway['gateway'],
metric=metric)
src_device.route.delete_gateway(gateway=gateway['gateway'])
# Remove IP's from interface
if ips:
for ip in ips:
src_device.addr.delete(cidr=ip['cidr'])
def _bridge_exists_and_ensure_up(self, bridge_name):
"""Check if the bridge exists and make sure it is up."""
br = ip_lib.IPDevice(bridge_name)
br.set_log_fail_as_error(False)
try:
# If the device doesn't exist this will throw a RuntimeError
br.link.set_up()
except RuntimeError:
return False
return True
def ensure_bridge(self, bridge_name, interface=None, ips=None,
gateway=None):
"""Create a bridge unless it already exists."""
# _bridge_exists_and_ensure_up instead of device_exists is used here
# because there are cases where the bridge exists but it's not UP,
# for example:
# 1) A greenthread was executing this function and had not yet executed
# "ip link set bridge_name up" before eventlet switched to this
# thread running the same function
# 2) The Nova VIF driver was running concurrently and had just created
# the bridge, but had not yet put it UP
if not self._bridge_exists_and_ensure_up(bridge_name):
LOG.debug("Starting bridge %(bridge_name)s for subinterface "
"%(interface)s",
{'bridge_name': bridge_name, 'interface': interface})
bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name)
if bridge_device.setfd(0):
return
if bridge_device.disable_stp():
return
if bridge_device.link.set_up():
return
LOG.debug("Done starting bridge %(bridge_name)s for "
"subinterface %(interface)s",
{'bridge_name': bridge_name, 'interface': interface})
else:
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if not interface:
return bridge_name
# Update IP info if necessary
self.update_interface_ip_details(bridge_name, interface, ips, gateway)
# Check if the interface is part of the bridge
if not self.interface_exists_on_bridge(bridge_name, interface):
try:
# Check if the interface is not enslaved in another bridge
if self.is_device_on_bridge(interface):
bridge = self.get_bridge_for_tap_device(interface)
bridge_lib.BridgeDevice(bridge).delif(interface)
bridge_device.addif(interface)
except Exception as e:
LOG.error(_LE("Unable to add %(interface)s to %(bridge_name)s"
"! Exception: %(e)s"),
{'interface': interface, 'bridge_name': bridge_name,
'e': e})
return
return bridge_name
def ensure_physical_in_bridge(self, network_id,
network_type,
physical_network,
segmentation_id):
if network_type == p_const.TYPE_VXLAN:
if self.vxlan_mode == lconst.VXLAN_NONE:
LOG.error(_LE("Unable to add vxlan interface for network %s"),
network_id)
return
return self.ensure_vxlan_bridge(network_id, segmentation_id)
physical_interface = self.interface_mappings.get(physical_network)
if not physical_interface:
LOG.error(_LE("No mapping for physical network %s"),
physical_network)
return
if network_type == p_const.TYPE_FLAT:
return self.ensure_flat_bridge(network_id, physical_interface)
elif network_type == p_const.TYPE_VLAN:
return self.ensure_vlan_bridge(network_id, physical_interface,
segmentation_id)
else:
LOG.error(_LE("Unknown network_type %(network_type)s for network "
"%(network_id)s."), {network_type: network_type,
network_id: network_id})
def add_tap_interface(self, network_id, network_type, physical_network,
segmentation_id, tap_device_name):
"""Add tap interface.
If a VIF has been plugged into a network, this function will
add the corresponding tap device to the relevant bridge.
"""
if not ip_lib.device_exists(tap_device_name):
LOG.debug("Tap device: %s does not exist on "
"this host, skipped", tap_device_name)
return False
bridge_name = self.get_bridge_name(network_id)
if network_type == p_const.TYPE_LOCAL:
self.ensure_local_bridge(network_id)
else:
phy_dev_name = self.ensure_physical_in_bridge(network_id,
network_type,
physical_network,
segmentation_id)
if not phy_dev_name:
return False
self.ensure_tap_mtu(tap_device_name, phy_dev_name)
# Check if device needs to be added to bridge
tap_device_in_bridge = self.get_bridge_for_tap_device(tap_device_name)
if not tap_device_in_bridge:
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
LOG.debug("Adding device %(tap_device_name)s to bridge "
"%(bridge_name)s", data)
if bridge_lib.BridgeDevice(bridge_name).addif(tap_device_name):
return False
else:
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
LOG.debug("%(tap_device_name)s already exists on bridge "
"%(bridge_name)s", data)
return True
def ensure_tap_mtu(self, tap_dev_name, phy_dev_name):
"""Ensure the MTU on the tap is the same as the physical device."""
phy_dev_mtu = ip_lib.IPDevice(phy_dev_name).link.mtu
ip_lib.IPDevice(tap_dev_name).link.set_mtu(phy_dev_mtu)
def add_interface(self, network_id, network_type, physical_network,
segmentation_id, port_id):
self.network_map[network_id] = NetworkSegment(network_type,
physical_network,
segmentation_id)
tap_device_name = self.get_tap_device_name(port_id)
return self.add_tap_interface(network_id, network_type,
physical_network, segmentation_id,
tap_device_name)
def delete_vlan_bridge(self, bridge_name):
if ip_lib.device_exists(bridge_name):
interfaces_on_bridge = self.get_interfaces_on_bridge(bridge_name)
for interface in interfaces_on_bridge:
self.remove_interface(bridge_name, interface)
if interface.startswith(VXLAN_INTERFACE_PREFIX):
self.delete_vxlan(interface)
continue
for physical_interface in self.interface_mappings.values():
if (interface.startswith(physical_interface)):
ips, gateway = self.get_interface_details(bridge_name)
if ips:
# This is a flat network or a VLAN interface that
# was setup outside of neutron => return IP's from
# bridge to interface
self.update_interface_ip_details(interface,
bridge_name,
ips, gateway)
elif physical_interface != interface:
self.delete_vlan(interface)
LOG.debug("Deleting bridge %s", bridge_name)
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if bridge_device.link.set_down():
return
if bridge_device.delbr():
return
LOG.debug("Done deleting bridge %s", bridge_name)
else:
LOG.error(_LE("Cannot delete bridge %s, does not exist"),
bridge_name)
def remove_empty_bridges(self):
for network_id in list(self.network_map.keys()):
bridge_name = self.get_bridge_name(network_id)
if not self.get_tap_devices_count(bridge_name):
self.delete_vlan_bridge(bridge_name)
del self.network_map[network_id]
def remove_interface(self, bridge_name, interface_name):
if ip_lib.device_exists(bridge_name):
if not self.is_device_on_bridge(interface_name):
return True
LOG.debug("Removing device %(interface_name)s from bridge "
"%(bridge_name)s",
{'interface_name': interface_name,
'bridge_name': bridge_name})
if bridge_lib.BridgeDevice(bridge_name).delif(interface_name):
return False
LOG.debug("Done removing device %(interface_name)s from bridge "
"%(bridge_name)s",
{'interface_name': interface_name,
'bridge_name': bridge_name})
return True
else:
LOG.debug("Cannot remove device %(interface_name)s bridge "
"%(bridge_name)s does not exist",
{'interface_name': interface_name,
'bridge_name': bridge_name})
return False
def delete_vlan(self, interface):
if ip_lib.device_exists(interface):
LOG.debug("Deleting subinterface %s for vlan", interface)
if utils.execute(['ip', 'link', 'set', interface, 'down'],
run_as_root=True):
return
if utils.execute(['ip', 'link', 'delete', interface],
run_as_root=True):
return
LOG.debug("Done deleting subinterface %s", interface)
def delete_vxlan(self, interface):
if ip_lib.device_exists(interface):
LOG.debug("Deleting vxlan interface %s for vlan",
interface)
int_vxlan = self.ip.device(interface)
int_vxlan.link.set_down()
int_vxlan.link.delete()
LOG.debug("Done deleting vxlan interface %s", interface)
def get_tap_devices(self):
devices = set()
for device in os.listdir(BRIDGE_FS):
if device.startswith(constants.TAP_DEVICE_PREFIX):
devices.add(device)
return devices
def vxlan_ucast_supported(self):
if not cfg.CONF.VXLAN.l2_population:
return False
if not ip_lib.iproute_arg_supported(
['bridge', 'fdb'], 'append'):
LOG.warning(_LW('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode'),
{'option': 'append',
'command': 'bridge fdb',
'mode': 'VXLAN UCAST'})
return False
test_iface = None
for seg_id in moves.range(1, p_const.MAX_VXLAN_VNI + 1):
if not ip_lib.device_exists(
self.get_vxlan_device_name(seg_id)):
test_iface = self.ensure_vxlan(seg_id)
break
else:
LOG.error(_LE('No valid Segmentation ID to perform UCAST test.'))
return False
try:
utils.execute(
cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0],
'dev', test_iface, 'dst', '1.1.1.1'],
run_as_root=True, log_fail_as_error=False)
return True
except RuntimeError:
return False
finally:
self.delete_vxlan(test_iface)
def vxlan_mcast_supported(self):
if not cfg.CONF.VXLAN.vxlan_group:
LOG.warning(_LW('VXLAN muticast group must be provided in '
'vxlan_group option to enable VXLAN MCAST mode'))
return False
if not ip_lib.iproute_arg_supported(
['ip', 'link', 'add', 'type', 'vxlan'],
'proxy'):
LOG.warning(_LW('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode'),
{'option': 'proxy',
'command': 'ip link add type vxlan',
'mode': 'VXLAN MCAST'})
return False
return True
def check_vxlan_support(self):
self.vxlan_mode = lconst.VXLAN_NONE
if self.vxlan_ucast_supported():
self.vxlan_mode = lconst.VXLAN_UCAST
elif self.vxlan_mcast_supported():
self.vxlan_mode = lconst.VXLAN_MCAST
else:
raise exceptions.VxlanNetworkUnsupported()
LOG.debug('Using %s VXLAN mode', self.vxlan_mode)
def fdb_ip_entry_exists(self, mac, ip, interface):
entries = utils.execute(['ip', 'neigh', 'show', 'to', ip,
'dev', interface],
run_as_root=True)
return mac in entries
def fdb_bridge_entry_exists(self, mac, interface, agent_ip=None):
entries = utils.execute(['bridge', 'fdb', 'show', 'dev', interface],
run_as_root=True)
if not agent_ip:
return mac in entries
return (agent_ip in entries and mac in entries)
def add_fdb_ip_entry(self, mac, ip, interface):
utils.execute(['ip', 'neigh', 'replace', ip, 'lladdr', mac,
'dev', interface, 'nud', 'permanent'],
run_as_root=True,
check_exit_code=False)
def remove_fdb_ip_entry(self, mac, ip, interface):
utils.execute(['ip', 'neigh', 'del', ip, 'lladdr', mac,
'dev', interface],
run_as_root=True,
check_exit_code=False)
def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"):
utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface,
'dst', agent_ip],
run_as_root=True,
check_exit_code=False)
def remove_fdb_bridge_entry(self, mac, agent_ip, interface):
utils.execute(['bridge', 'fdb', 'del', mac, 'dev', interface,
'dst', agent_ip],
run_as_root=True,
check_exit_code=False)
def add_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.add_fdb_ip_entry(mac, ip, interface)
self.add_fdb_bridge_entry(mac, agent_ip, interface,
operation="replace")
elif self.vxlan_mode == lconst.VXLAN_UCAST:
if self.fdb_bridge_entry_exists(mac, interface):
self.add_fdb_bridge_entry(mac, agent_ip, interface,
"append")
else:
self.add_fdb_bridge_entry(mac, agent_ip, interface)
def remove_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.remove_fdb_ip_entry(mac, ip, interface)
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
elif self.vxlan_mode == lconst.VXLAN_UCAST:
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
class LinuxBridgeRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2pop_rpc.L2populationRpcCallBackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
target = oslo_messaging.Target(version='1.3')
def __init__(self, context, agent, sg_agent):
super(LinuxBridgeRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def network_delete(self, context, **kwargs):
LOG.debug("network_delete received")
network_id = kwargs.get('network_id')
bridge_name = self.agent.br_mgr.get_bridge_name(network_id)
LOG.debug("Delete %s", bridge_name)
self.agent.br_mgr.delete_vlan_bridge(bridge_name)
def port_update(self, context, **kwargs):
port_id = kwargs['port']['id']
tap_name = self.agent.br_mgr.get_tap_device_name(port_id)
# Put the tap name in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.agent.updated_devices.add(tap_name)
LOG.debug("port_update RPC received for port: %s", port_id)
def fdb_add(self, context, fdb_entries):
LOG.debug("fdb_add received")
for network_id, values in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
self.agent.br_mgr.add_fdb_entries(agent_ip,
ports,
interface)
def fdb_remove(self, context, fdb_entries):
LOG.debug("fdb_remove received")
for network_id, values in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
self.agent.br_mgr.remove_fdb_entries(agent_ip,
ports,
interface)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug("update chg_ip received")
for network_id, agent_ports in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
for agent_ip, state in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
after = state.get('after', [])
for mac, ip in after:
self.agent.br_mgr.add_fdb_ip_entry(mac, ip, interface)
before = state.get('before', [])
for mac, ip in before:
self.agent.br_mgr.remove_fdb_ip_entry(mac, ip, interface)
def fdb_update(self, context, fdb_entries):
LOG.debug("fdb_update received")
for action, values in fdb_entries.items():
method = '_fdb_' + action
if not hasattr(self, method):
raise NotImplementedError()
getattr(self, method)(context, values)
class LinuxBridgeNeutronAgentRPC(service.Service):
def __init__(self, interface_mappings, polling_interval,
quitting_rpc_timeout):
"""Constructor.
:param interface_mappings: dict mapping physical_networks to
physical_interfaces.
:param polling_interval: interval (secs) to poll DB.
:param quitting_rpc_timeout: timeout in seconds for rpc calls after
stop is called.
"""
super(LinuxBridgeNeutronAgentRPC, self).__init__()
self.interface_mappings = interface_mappings
self.polling_interval = polling_interval
self.quitting_rpc_timeout = quitting_rpc_timeout
def start(self):
self.prevent_arp_spoofing = cfg.CONF.AGENT.prevent_arp_spoofing
self.setup_linux_bridge(self.interface_mappings)
configurations = {'interface_mappings': self.interface_mappings}
if self.br_mgr.vxlan_mode != lconst.VXLAN_NONE:
configurations['tunneling_ip'] = self.br_mgr.local_ip
configurations['tunnel_types'] = [p_const.TYPE_VXLAN]
configurations['l2_population'] = cfg.CONF.VXLAN.l2_population
self.agent_state = {
'binary': 'neutron-linuxbridge-agent',
'host': cfg.CONF.host,
'topic': constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': constants.AGENT_TYPE_LINUXBRIDGE,
'start_flag': True}
# stores received port_updates for processing by the main loop
self.updated_devices = set()
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc, defer_refresh_firewall=True)
self.setup_rpc(self.interface_mappings.values())
self.daemon_loop()
def stop(self, graceful=True):
LOG.info(_LI("Stopping linuxbridge agent."))
if graceful and self.quitting_rpc_timeout:
self.set_rpc_timeout(self.quitting_rpc_timeout)
super(LinuxBridgeNeutronAgentRPC, self).stop(graceful)
def reset(self):
common_config.setup_logging()
def _report_state(self):
try:
devices = len(self.br_mgr.get_tap_devices())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def setup_rpc(self, physical_interfaces):
if physical_interfaces:
mac = utils.get_interface_mac(physical_interfaces[0])
else:
devices = ip_lib.IPWrapper().get_devices(True)
if devices:
mac = utils.get_interface_mac(devices[0].name)
else:
LOG.error(_LE("Unable to obtain MAC address for unique ID. "
"Agent terminated!"))
exit(1)
self.agent_id = '%s%s' % ('lb', (mac.replace(":", "")))
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
# Handle updates from service
self.endpoints = [LinuxBridgeRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
if cfg.CONF.VXLAN.l2_population:
consumers.append([topics.L2POPULATION,
topics.UPDATE, cfg.CONF.host])
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def setup_linux_bridge(self, interface_mappings):
self.br_mgr = LinuxBridgeManager(interface_mappings)
def remove_port_binding(self, network_id, interface_id):
bridge_name = self.br_mgr.get_bridge_name(network_id)
tap_device_name = self.br_mgr.get_tap_device_name(interface_id)
return self.br_mgr.remove_interface(bridge_name, tap_device_name)
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.setup_port_filters(device_info.get('added'),
device_info.get('updated'))
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_devices_added_updated(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, devices, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for "
"%(devices)s: %(e)s",
{'devices': devices, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port %s added", device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': device_details})
if self.prevent_arp_spoofing:
port = self.br_mgr.get_tap_device_name(
device_details['port_id'])
arp_protect.setup_arp_spoofing_protection(port,
device_details)
if device_details['admin_state_up']:
# create the networking for the port
network_type = device_details.get('network_type')
if network_type:
segmentation_id = device_details.get('segmentation_id')
else:
# compatibility with pre-Havana RPC vlan_id encoding
vlan_id = device_details.get('vlan_id')
(network_type,
segmentation_id) = lconst.interpret_vlan_id(vlan_id)
if self.br_mgr.add_interface(
device_details['network_id'],
network_type,
device_details['physical_network'],
segmentation_id,
device_details['port_id']):
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.remove_port_binding(device_details['network_id'],
device_details['port_id'])
else:
LOG.info(_LI("Device %s not defined on plugin"), device)
return False
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
for device in devices:
LOG.info(_LI("Attachment %s removed"), device)
details = None
try:
details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("port_removed failed for %(device)s: %(e)s",
{'device': device, 'e': e})
resync = True
if details and details['exists']:
LOG.info(_LI("Port %s updated."), device)
else:
LOG.debug("Device %s not defined on plugin", device)
if self.prevent_arp_spoofing:
arp_protect.delete_arp_spoofing_protection(devices)
return resync
def scan_devices(self, previous, sync):
device_info = {}
# Save and reinitialise the set variable that the port_update RPC uses.
# This should be thread-safe as the greenthread should not yield
# between these two statements.
updated_devices = self.updated_devices
self.updated_devices = set()
current_devices = self.br_mgr.get_tap_devices()
device_info['current'] = current_devices
if previous is None:
# This is the first iteration of daemon_loop().
previous = {'added': set(),
'current': set(),
'updated': set(),
'removed': set()}
# clear any orphaned ARP spoofing rules (e.g. interface was
# manually deleted)
if self.prevent_arp_spoofing:
arp_protect.delete_unreferenced_arp_protection(current_devices)
if sync:
# This is the first iteration, or the previous one had a problem.
# Re-add all existing devices.
device_info['added'] = current_devices
# Retry cleaning devices that may not have been cleaned properly.
# And clean any that disappeared since the previous iteration.
device_info['removed'] = (previous['removed'] | previous['current']
- current_devices)
# Retry updating devices that may not have been updated properly.
# And any that were updated since the previous iteration.
# Only update devices that currently exist.
device_info['updated'] = (previous['updated'] | updated_devices
& current_devices)
else:
device_info['added'] = current_devices - previous['current']
device_info['removed'] = previous['current'] - current_devices
device_info['updated'] = updated_devices & current_devices
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def daemon_loop(self):
LOG.info(_LI("LinuxBridge Agent RPC Daemon Started!"))
device_info = None
sync = True
while True:
start = time.time()
device_info = self.scan_devices(previous=device_info, sync=sync)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
sync = False
if (self._device_info_has_changes(device_info)
or self.sg_agent.firewall_refresh_needed()):
LOG.debug("Agent loop found changes! %s", device_info)
try:
sync = self.process_network_devices(device_info)
except Exception:
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
def set_rpc_timeout(self, timeout):
for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc,
self.state_rpc):
rpc_api.client.timeout = timeout
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
interface_mappings = n_utils.parse_mappings(
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
except ValueError as e:
LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
"Agent terminated!"), e)
sys.exit(1)
LOG.info(_LI("Interface mappings: %s"), interface_mappings)
polling_interval = cfg.CONF.AGENT.polling_interval
quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
agent = LinuxBridgeNeutronAgentRPC(interface_mappings,
polling_interval,
quitting_rpc_timeout)
LOG.info(_LI("Agent initialized successfully, now running... "))
launcher = service.launch(cfg.CONF, agent)
launcher.wait()
if __name__ == "__main__":
main()
|
apache-2.0
| -1,381,419,549,410,301,700
| 42.230769
| 79
| 0.552578
| false
| 4.317002
| false
| false
| false
|
pyrochlore/cycles
|
src/blender/addon/__init__.py
|
1
|
3388
|
#
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# <pep8 compliant>
bl_info = {
"name": "Cycles Render Engine",
"author": "",
"blender": (2, 70, 0),
"location": "Info header, render engine menu",
"description": "Cycles Render Engine integration",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Doc:2.6/Manual/Render/Cycles",
"tracker_url": "",
"support": 'OFFICIAL',
"category": "Render"}
import bpy
from . import engine
from . import version_update
class CyclesRender(bpy.types.RenderEngine):
bl_idname = 'CYCLES'
bl_label = "Cycles Render"
bl_use_shading_nodes = True
bl_use_preview = True
bl_use_exclude_layers = True
bl_use_save_buffers = True
def __init__(self):
self.session = None
def __del__(self):
engine.free(self)
# final render
def update(self, data, scene):
if self.is_preview:
if not self.session:
cscene = bpy.context.scene.cycles
use_osl = cscene.shading_system and cscene.device == 'CPU'
engine.create(self, data, scene,
None, None, None, use_osl)
else:
if not self.session:
engine.create(self, data, scene)
else:
engine.reset(self, data, scene)
engine.update(self, data, scene)
def render(self, scene):
engine.render(self)
def bake(self, scene, obj, pass_type, pixel_array, num_pixels, depth, result):
engine.bake(self, obj, pass_type, pixel_array, num_pixels, depth, result)
# viewport render
def view_update(self, context):
if not self.session:
engine.create(self, context.blend_data, context.scene,
context.region, context.space_data, context.region_data)
engine.update(self, context.blend_data, context.scene)
def view_draw(self, context):
engine.draw(self, context.region, context.space_data, context.region_data)
def update_script_node(self, node):
if engine.with_osl():
from . import osl
osl.update_script_node(node, self.report)
else:
self.report({'ERROR'}, "OSL support disabled in this build.")
def register():
from . import ui
from . import properties
from . import presets
engine.init()
properties.register()
ui.register()
presets.register()
bpy.utils.register_module(__name__)
bpy.app.handlers.version_update.append(version_update.do_versions)
def unregister():
from . import ui
from . import properties
from . import presets
bpy.app.handlers.version_update.remove(version_update.do_versions)
ui.unregister()
properties.unregister()
presets.unregister()
bpy.utils.unregister_module(__name__)
|
apache-2.0
| -5,977,137,211,679,149,000
| 27.957265
| 82
| 0.634002
| false
| 3.789709
| false
| false
| false
|
leppa/home-assistant
|
homeassistant/components/minio/__init__.py
|
1
|
8056
|
"""Minio component."""
import logging
import os
from queue import Queue
import threading
from typing import List
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from .minio_helper import MinioEventThread, create_minio_client
_LOGGER = logging.getLogger(__name__)
DOMAIN = "minio"
CONF_HOST = "host"
CONF_PORT = "port"
CONF_ACCESS_KEY = "access_key"
CONF_SECRET_KEY = "secret_key"
CONF_SECURE = "secure"
CONF_LISTEN = "listen"
CONF_LISTEN_BUCKET = "bucket"
CONF_LISTEN_PREFIX = "prefix"
CONF_LISTEN_SUFFIX = "suffix"
CONF_LISTEN_EVENTS = "events"
ATTR_BUCKET = "bucket"
ATTR_KEY = "key"
ATTR_FILE_PATH = "file_path"
DEFAULT_LISTEN_PREFIX = ""
DEFAULT_LISTEN_SUFFIX = ".*"
DEFAULT_LISTEN_EVENTS = "s3:ObjectCreated:*"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_ACCESS_KEY): cv.string,
vol.Required(CONF_SECRET_KEY): cv.string,
vol.Required(CONF_SECURE): cv.boolean,
vol.Optional(CONF_LISTEN, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_LISTEN_BUCKET): cv.string,
vol.Optional(
CONF_LISTEN_PREFIX, default=DEFAULT_LISTEN_PREFIX
): cv.string,
vol.Optional(
CONF_LISTEN_SUFFIX, default=DEFAULT_LISTEN_SUFFIX
): cv.string,
vol.Optional(
CONF_LISTEN_EVENTS, default=DEFAULT_LISTEN_EVENTS
): cv.string,
}
)
],
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
BUCKET_KEY_SCHEMA = vol.Schema(
{vol.Required(ATTR_BUCKET): cv.template, vol.Required(ATTR_KEY): cv.template}
)
BUCKET_KEY_FILE_SCHEMA = BUCKET_KEY_SCHEMA.extend(
{vol.Required(ATTR_FILE_PATH): cv.template}
)
def setup(hass, config):
"""Set up MinioClient and event listeners."""
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = conf[CONF_PORT]
access_key = conf[CONF_ACCESS_KEY]
secret_key = conf[CONF_SECRET_KEY]
secure = conf[CONF_SECURE]
queue_listener = QueueListener(hass)
queue = queue_listener.queue
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, queue_listener.start_handler)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, queue_listener.stop_handler)
def _setup_listener(listener_conf):
bucket = listener_conf[CONF_LISTEN_BUCKET]
prefix = listener_conf[CONF_LISTEN_PREFIX]
suffix = listener_conf[CONF_LISTEN_SUFFIX]
events = listener_conf[CONF_LISTEN_EVENTS]
minio_listener = MinioListener(
queue,
get_minio_endpoint(host, port),
access_key,
secret_key,
secure,
bucket,
prefix,
suffix,
events,
)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, minio_listener.start_handler)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, minio_listener.stop_handler)
for listen_conf in conf[CONF_LISTEN]:
_setup_listener(listen_conf)
minio_client = create_minio_client(
get_minio_endpoint(host, port), access_key, secret_key, secure
)
def _render_service_value(service, key):
value = service.data[key]
value.hass = hass
return value.async_render()
def put_file(service):
"""Upload file service."""
bucket = _render_service_value(service, ATTR_BUCKET)
key = _render_service_value(service, ATTR_KEY)
file_path = _render_service_value(service, ATTR_FILE_PATH)
if not hass.config.is_allowed_path(file_path):
_LOGGER.error("Invalid file_path %s", file_path)
return
minio_client.fput_object(bucket, key, file_path)
def get_file(service):
"""Download file service."""
bucket = _render_service_value(service, ATTR_BUCKET)
key = _render_service_value(service, ATTR_KEY)
file_path = _render_service_value(service, ATTR_FILE_PATH)
if not hass.config.is_allowed_path(file_path):
_LOGGER.error("Invalid file_path %s", file_path)
return
minio_client.fget_object(bucket, key, file_path)
def remove_file(service):
"""Delete file service."""
bucket = _render_service_value(service, ATTR_BUCKET)
key = _render_service_value(service, ATTR_KEY)
minio_client.remove_object(bucket, key)
hass.services.register(DOMAIN, "put", put_file, schema=BUCKET_KEY_FILE_SCHEMA)
hass.services.register(DOMAIN, "get", get_file, schema=BUCKET_KEY_FILE_SCHEMA)
hass.services.register(DOMAIN, "remove", remove_file, schema=BUCKET_KEY_SCHEMA)
return True
def get_minio_endpoint(host: str, port: int) -> str:
"""Create minio endpoint from host and port."""
return f"{host}:{port}"
class QueueListener(threading.Thread):
"""Forward events from queue into HASS event bus."""
def __init__(self, hass):
"""Create queue."""
super().__init__()
self._hass = hass
self._queue = Queue()
def run(self):
"""Listen to queue events, and forward them to HASS event bus."""
_LOGGER.info("Running QueueListener")
while True:
event = self._queue.get()
if event is None:
break
_, file_name = os.path.split(event[ATTR_KEY])
_LOGGER.debug(
"Sending event %s, %s, %s",
event["event_name"],
event[ATTR_BUCKET],
event[ATTR_KEY],
)
self._hass.bus.fire(DOMAIN, {"file_name": file_name, **event})
@property
def queue(self):
"""Return wrapped queue."""
return self._queue
def stop(self):
"""Stop run by putting None into queue and join the thread."""
_LOGGER.info("Stopping QueueListener")
self._queue.put(None)
self.join()
_LOGGER.info("Stopped QueueListener")
def start_handler(self, _):
"""Start handler helper method."""
self.start()
def stop_handler(self, _):
"""Stop handler helper method."""
self.stop()
class MinioListener:
"""MinioEventThread wrapper with helper methods."""
def __init__(
self,
queue: Queue,
endpoint: str,
access_key: str,
secret_key: str,
secure: bool,
bucket_name: str,
prefix: str,
suffix: str,
events: List[str],
):
"""Create Listener."""
self._queue = queue
self._endpoint = endpoint
self._access_key = access_key
self._secret_key = secret_key
self._secure = secure
self._bucket_name = bucket_name
self._prefix = prefix
self._suffix = suffix
self._events = events
self._minio_event_thread = None
def start_handler(self, _):
"""Create and start the event thread."""
self._minio_event_thread = MinioEventThread(
self._queue,
self._endpoint,
self._access_key,
self._secret_key,
self._secure,
self._bucket_name,
self._prefix,
self._suffix,
self._events,
)
self._minio_event_thread.start()
def stop_handler(self, _):
"""Issue stop and wait for thread to join."""
if self._minio_event_thread is not None:
self._minio_event_thread.stop()
|
apache-2.0
| 4,996,086,234,179,014,000
| 29.4
| 85
| 0.566783
| false
| 3.925926
| true
| false
| false
|
matejsuchanek/pywikibot-scripts
|
fix_qualifiers.py
|
1
|
4360
|
#!/usr/bin/python
"""This script is obsolete!"""
import pywikibot
from pywikibot import pagegenerators
from .query_store import QueryStore
from .wikidata import WikidataEntityBot
class QualifiersFixingBot(WikidataEntityBot):
blacklist = frozenset(['P143', 'P248', 'P459', 'P518', 'P577', 'P805',
'P972', 'P1065', 'P1135', 'P1480', 'P1545', 'P1932',
'P2315', 'P2701', 'P3274', ])
whitelist = frozenset(['P17', 'P21', 'P39', 'P155', 'P156', 'P281', 'P580',
'P582', 'P585', 'P669', 'P708', 'P969', 'P1355',
'P1356', ])
good_item = 'Q15720608'
use_from_page = False
def __init__(self, **kwargs):
kwargs.update({
'bad_cache': kwargs.get('bad_cache', []) + list(self.blacklist),
'good_cache': kwargs.get('good_cache', []) + list(self.whitelist),
})
super().__init__(**kwargs)
self.store = QueryStore()
def filterProperty(self, prop_page):
if prop_page.type == 'external-id':
return False
prop_page.get()
if 'P31' not in prop_page.claims:
pywikibot.warning('%s is not classified' % prop_page.getID())
return False
for claim in prop_page.claims['P31']:
if claim.target_equals(self.good_item):
return True
return False
@property
def generator(self):
query = self.store.build_query(
'qualifiers', item=self.good_item,
good=', wd:'.join(self.whitelist),
bad=', wd:'.join(self.blacklist))
return pagegenerators.PreloadingItemGenerator(
pagegenerators.WikidataSPARQLPageGenerator(query, site=self.repo))
def treat_page_and_item(self, page, item):
for prop in item.claims.keys():
for claim in item.claims[prop]:
moved = set()
json = claim.toJSON()
i = -1
for source in claim.sources:
i += 1
for ref_prop in filter(self.checkProperty, source.keys()):
for snak in source[ref_prop]:
json.setdefault('qualifiers', {}).setdefault(ref_prop, [])
for qual in (pywikibot.Claim.qualifierFromJSON(self.repo, q)
for q in json['qualifiers'][ref_prop]):
if qual.target_equals(snak.getTarget()):
break
else:
snak.isReference = False
snak.isQualifier = True
json['qualifiers'][ref_prop].append(snak.toJSON())
json['references'][i]['snaks'][ref_prop].pop(0)
if len(json['references'][i]['snaks'][ref_prop]) == 0:
json['references'][i]['snaks'].pop(ref_prop)
if len(json['references'][i]['snaks']) == 0:
json['references'].pop(i)
i -= 1
moved.add(ref_prop)
if len(moved) > 0:
data = {'claims': [json]}
self.user_edit_entity(item, data, summary=self.makeSummary(prop, moved),
asynchronous=True)
def makeSummary(self, prop, props):
props = ['[[Property:P%s]]' % pid for pid in sorted(
int(pid[1:]) for pid in props)]
return '[[Property:%s]]: moving misplaced reference%s %s to qualifiers' % (
prop, 's' if len(props) > 1 else '', '%s and %s' % (
', '.join(props[:-1]), props[-1]) if len(props) > 1 else props[0])
def main(*args):
options = {}
for arg in pywikibot.handle_args(args):
if arg.startswith('-'):
arg, sep, value = arg.partition(':')
if value != '':
options[arg[1:]] = value if not value.isdigit() else int(value)
else:
options[arg[1:]] = True
site = pywikibot.Site('wikidata', 'wikidata')
bot = QualifiersFixingBot(site=site, **options)
bot.run()
if __name__ == '__main__':
main()
|
gpl-2.0
| 4,387,339,838,632,818,700
| 38.636364
| 92
| 0.482569
| false
| 4.052045
| false
| false
| false
|
mediatum/mediatum
|
workflow/addtofolder.py
|
1
|
4475
|
# -*- coding: utf-8 -*-
"""
mediatum - a multimedia content repository
Copyright (C) 2011 Arne Seifert <arne.seifert@tum.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from .upload import WorkflowStep
from .workflow import registerStep
from core.translation import t, addLabels
from utils.utils import isNumeric
from core import Node
from core import db
import json
from schema.schema import Metafield
from contenttypes.container import Directory
q = db.query
def register():
registerStep("workflowstep_addtofolder")
addLabels(WorkflowStep_AddToFolder.getLabels())
class WorkflowStep_AddToFolder(WorkflowStep):
"""
workflowstep that adds item to selectable subfolder.
attributes:
- destination: list of node ids ;-separated
- subfolder: list of subfolders below destination, if a subfolder exists the item is added and the remaining
subfolders are ignored
subfolders are specified as json-string and can contain metadata from the item, like:
["{faculty}/Prüfungsarbeiten/{type}en/", "{faculty}/Prüfungsarbeiten/Weitere Prüfungsarbeiten/"]
"""
def show_workflow_node(self, node, req):
return self.forwardAndShow(node, True, req)
def getFolder(self, node, destNode, subfolder):
"""
search the subfolder below destNode
:param node: node which should be placed in the subfolder, parts of the node attributes may be specified
in subfolder
:param destNode: destination Node under which the subfolder is searched
:param subfolder: directorypath to the subfolder below destNode like: "{faculty}/Prüfungsarbeiten/{type}en/"
:return: returns the node if the subfolder if found or None
"""
subfolderNode = destNode
for subdir in subfolder.format(**node.attrs).split("/"):
if not subdir:
continue
subfolderNode = subfolderNode.children.filter_by(name=subdir).scalar()
if not subfolderNode:
return None
return subfolderNode
def runAction(self, node, op=""):
subfolders = json.loads(self.get('destination_subfolder'))
for nid in self.get('destination').split(";"):
if not nid:
continue
destNode = q(Node).get(nid)
if not destNode:
continue
for subfolder in subfolders:
subfolderNode = self.getFolder(node, destNode, subfolder)
if not subfolderNode:
continue
subfolderNode.children.append(node)
db.session.commit()
break
def metaFields(self, lang=None):
ret = []
field = Metafield("destination")
field.set("label", t(lang, "admin_wfstep_addtofolder_destination"))
field.set("type", "treeselect")
ret.append(field)
field = Metafield("destination_subfolder")
field.set("label", t(lang, "admin_wfstep_addtofolder_destination_subfolder"))
field.set("type", "text")
ret.append(field)
return ret
@staticmethod
def getLabels():
return {"de":
[
("workflowstep-addtofolder", "Zu einem Verzeichnis hinzufügen"),
("admin_wfstep_addtofolder_destination", "Zielknoten-ID"),
("admin_wfstep_addtofolder_destination_subfolder", "Unterverzeichnis"),
],
"en":
[
("workflowstep-addtofolder", "add to folder"),
("admin_wfstep_addtofolder_destination", "ID of destination node"),
("admin_wfstep_addtofolder_destination_subfolder", "sub folder"),
]
}
|
gpl-3.0
| 92,644,555,041,265,800
| 36.25
| 121
| 0.625056
| false
| 4.365234
| false
| false
| false
|
greggian/TapdIn
|
django/core/handlers/base.py
|
1
|
9267
|
import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured, '%s isn\'t a middleware module' % middleware_path
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured, 'Error importing middleware %s: "%s"' % (mw_module, e)
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured, 'Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname)
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
return response
# Get urlconf from request object, if available. Otherwise use default.
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
return response
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
return response
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError, "The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name)
return response
except http.Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
return callback(request, **param_dict)
except:
try:
return self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
return http.HttpResponseForbidden('<h1>Permission denied</h1>')
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
exc_info = sys.exc_info()
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
return self.handle_uncaught_exception(request, resolver, exc_info)
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
from django.core.mail import mail_admins
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
# When DEBUG is False, send an error message to the admins.
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (self._get_traceback(exc_info), request_repr)
mail_admins(subject, message, fail_silently=True)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def _get_traceback(self, exc_info=None):
"Helper function to return the traceback as a string"
import traceback
return '\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to
anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every webserver (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
|
apache-2.0
| -8,968,178,709,805,447,000
| 42.985437
| 143
| 0.604726
| false
| 4.811526
| false
| false
| false
|
luksan/kodos
|
modules/urlDialog.py
|
1
|
1874
|
# -*- coding: utf-8; mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; truncate-lines: 0 -*-
# vi: set fileencoding=utf-8 filetype=python expandtab tabstop=4 shiftwidth=4 softtabstop=4 cindent:
# :mode=python:indentSize=4:tabSize=4:noTabs=true:
#-----------------------------------------------------------------------------#
# Built-in modules
import urllib
#-----------------------------------------------------------------------------#
# Installed modules
from PyQt4 import QtGui, QtCore
#-----------------------------------------------------------------------------#
# Kodos modules
from .urlDialogBA import Ui_URLDialogBA
from . import help
#-----------------------------------------------------------------------------#
class URLDialog(QtGui.QDialog, Ui_URLDialogBA):
urlImported = QtCore.pyqtSignal(str, str)
def __init__(self, url=None, parent=None, f=QtCore.Qt.WindowFlags()):
QtGui.QDialog.__init__(self, parent, f)
self.setupUi(self)
if url:
self.URLTextEdit.setPlainText(url)
self.show()
return
def help_slot(self):
self.helpWindow = help.Help(self, "importURL.html")
return
def ok_slot(self):
url = str(self.URLTextEdit.toPlainText())
try:
fp = urllib.urlopen(url)
lines = fp.readlines()
except Exception as e:
QtGui.QMessageBox.information(
None,
"Failed to open URL",
"Could not open the specified URL. Please check to ensure \
that you have entered the correct URL.\n\n{0}".format(str(e))
)
return
html = ''.join(lines)
self.urlImported.emit(html, url)
self.accept()
return
#-----------------------------------------------------------------------------#
|
gpl-2.0
| -2,844,676,749,710,994,000
| 28.28125
| 112
| 0.47492
| false
| 4.419811
| false
| false
| false
|
luzhuomi/collamine-client-python
|
scrapybot/scrapybot/spiders/hwz.py
|
1
|
1627
|
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapybot.items import ScrapybotItem
from scrapybot.utils import normalizeFriendlyDate
import datetime
from dateutil.parser import parse
from django.utils import timezone
import django.db.utils
class HwzSpider(CrawlSpider):
name = "hwz"
allowed_domains = ["hardwarezone.com.sg"]
domain = "www.hardwarezone.com" # for collamine upload
start_urls = [
"http://forums.hardwarezone.com.sg/current-affairs-lounge-17/",
"http://forums.hardwarezone.com.sg/money-mind-210/"
]
rules = (
Rule(SgmlLinkExtractor(allow=('current\-affairs\-lounge\-17/.*\.html', )), callback='parse_item', follow=True),
Rule(SgmlLinkExtractor(allow=('money\-mind\-210/.*\.html', )), callback='parse_item', follow=True),
)
"""
When writing crawl spider rules, avoid using parse as callback, since the CrawlSpider uses the parse method itself to implement its logic. So if you override the parse method, the crawl spider will no longer work.
"""
def parse_item(self, response):
source="original"
if ((response.flags) and ("collamine" in response.flags)):
source="collamine"
i = ScrapybotItem(url=response.url,
domain=self.allowed_domains[0],
source=source,
content=response.body.decode(response.encoding),
crawled_date=timezone.now())
try:
i.save()
except django.db.utils.IntegrityError:
print "url exists"
|
apache-2.0
| -3,216,291,927,608,126,000
| 35.977273
| 217
| 0.690227
| false
| 3.70615
| false
| false
| false
|
oudalab/phyllo
|
phyllo/extractors/germanicusDB.py
|
1
|
3002
|
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
def main():
# The collection URL below.
collURL = 'http://www.thelatinlibrary.com/germanicus.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = collSOUP.title.string.split(":")[0].strip()
colltitle = collSOUP.p.string.split(":")[0].strip()
date = "no date found"
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Germanicus'")
for url in textsURL:
chapter = "-1"
verse = 0
title = collSOUP.title.string.split(":")[1].strip()
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
getp = textsoup.find_all('p')
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
txtstr = p.get_text().strip()
if txtstr.startswith("The"):
continue
brtags = p.findAll('br')
verses = []
try:
firstline = brtags[0].previous_sibling.previous_sibling.strip()
except:
firstline = brtags[0].previous_sibling.strip()
verses.append(firstline)
for br in brtags:
try:
text = br.next_sibling.next_sibling.strip()
except:
text = br.next_sibling.strip()
if text is None or text == '' or text.isspace():
continue
# remove in-text line numbers
if text.endswith(r'[0-9]+'):
try:
text = text.split(r'[0-9]')[0].strip()
except:
pass
verses.append(text)
for v in verses:
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v, url, 'poetry'))
if __name__ == '__main__':
main()
|
apache-2.0
| -921,874,890,846,832,400
| 34.738095
| 111
| 0.476682
| false
| 4.494012
| false
| false
| false
|
kubernetes-client/python
|
kubernetes/client/models/v1_job_spec.py
|
1
|
13937
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1JobSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'active_deadline_seconds': 'int',
'backoff_limit': 'int',
'completions': 'int',
'manual_selector': 'bool',
'parallelism': 'int',
'selector': 'V1LabelSelector',
'template': 'V1PodTemplateSpec',
'ttl_seconds_after_finished': 'int'
}
attribute_map = {
'active_deadline_seconds': 'activeDeadlineSeconds',
'backoff_limit': 'backoffLimit',
'completions': 'completions',
'manual_selector': 'manualSelector',
'parallelism': 'parallelism',
'selector': 'selector',
'template': 'template',
'ttl_seconds_after_finished': 'ttlSecondsAfterFinished'
}
def __init__(self, active_deadline_seconds=None, backoff_limit=None, completions=None, manual_selector=None, parallelism=None, selector=None, template=None, ttl_seconds_after_finished=None, local_vars_configuration=None): # noqa: E501
"""V1JobSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._active_deadline_seconds = None
self._backoff_limit = None
self._completions = None
self._manual_selector = None
self._parallelism = None
self._selector = None
self._template = None
self._ttl_seconds_after_finished = None
self.discriminator = None
if active_deadline_seconds is not None:
self.active_deadline_seconds = active_deadline_seconds
if backoff_limit is not None:
self.backoff_limit = backoff_limit
if completions is not None:
self.completions = completions
if manual_selector is not None:
self.manual_selector = manual_selector
if parallelism is not None:
self.parallelism = parallelism
if selector is not None:
self.selector = selector
self.template = template
if ttl_seconds_after_finished is not None:
self.ttl_seconds_after_finished = ttl_seconds_after_finished
@property
def active_deadline_seconds(self):
"""Gets the active_deadline_seconds of this V1JobSpec. # noqa: E501
Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer # noqa: E501
:return: The active_deadline_seconds of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._active_deadline_seconds
@active_deadline_seconds.setter
def active_deadline_seconds(self, active_deadline_seconds):
"""Sets the active_deadline_seconds of this V1JobSpec.
Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer # noqa: E501
:param active_deadline_seconds: The active_deadline_seconds of this V1JobSpec. # noqa: E501
:type: int
"""
self._active_deadline_seconds = active_deadline_seconds
@property
def backoff_limit(self):
"""Gets the backoff_limit of this V1JobSpec. # noqa: E501
Specifies the number of retries before marking this job failed. Defaults to 6 # noqa: E501
:return: The backoff_limit of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._backoff_limit
@backoff_limit.setter
def backoff_limit(self, backoff_limit):
"""Sets the backoff_limit of this V1JobSpec.
Specifies the number of retries before marking this job failed. Defaults to 6 # noqa: E501
:param backoff_limit: The backoff_limit of this V1JobSpec. # noqa: E501
:type: int
"""
self._backoff_limit = backoff_limit
@property
def completions(self):
"""Gets the completions of this V1JobSpec. # noqa: E501
Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:return: The completions of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._completions
@completions.setter
def completions(self, completions):
"""Sets the completions of this V1JobSpec.
Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:param completions: The completions of this V1JobSpec. # noqa: E501
:type: int
"""
self._completions = completions
@property
def manual_selector(self):
"""Gets the manual_selector of this V1JobSpec. # noqa: E501
manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector # noqa: E501
:return: The manual_selector of this V1JobSpec. # noqa: E501
:rtype: bool
"""
return self._manual_selector
@manual_selector.setter
def manual_selector(self, manual_selector):
"""Sets the manual_selector of this V1JobSpec.
manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector # noqa: E501
:param manual_selector: The manual_selector of this V1JobSpec. # noqa: E501
:type: bool
"""
self._manual_selector = manual_selector
@property
def parallelism(self):
"""Gets the parallelism of this V1JobSpec. # noqa: E501
Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:return: The parallelism of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._parallelism
@parallelism.setter
def parallelism(self, parallelism):
"""Sets the parallelism of this V1JobSpec.
Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
:param parallelism: The parallelism of this V1JobSpec. # noqa: E501
:type: int
"""
self._parallelism = parallelism
@property
def selector(self):
"""Gets the selector of this V1JobSpec. # noqa: E501
:return: The selector of this V1JobSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1JobSpec.
:param selector: The selector of this V1JobSpec. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
@property
def template(self):
"""Gets the template of this V1JobSpec. # noqa: E501
:return: The template of this V1JobSpec. # noqa: E501
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this V1JobSpec.
:param template: The template of this V1JobSpec. # noqa: E501
:type: V1PodTemplateSpec
"""
if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
self._template = template
@property
def ttl_seconds_after_finished(self):
"""Gets the ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature. # noqa: E501
:return: The ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
:rtype: int
"""
return self._ttl_seconds_after_finished
@ttl_seconds_after_finished.setter
def ttl_seconds_after_finished(self, ttl_seconds_after_finished):
"""Sets the ttl_seconds_after_finished of this V1JobSpec.
ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature. # noqa: E501
:param ttl_seconds_after_finished: The ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
:type: int
"""
self._ttl_seconds_after_finished = ttl_seconds_after_finished
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1JobSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1JobSpec):
return True
return self.to_dict() != other.to_dict()
|
apache-2.0
| 6,172,444,438,612,771,000
| 43.244444
| 685
| 0.664705
| false
| 4.175255
| true
| false
| false
|
andrewjpage/gubbins
|
python/scripts/gubbins_drawer.py
|
1
|
26242
|
#!/usr/bin/env python3
#################################
# Import some necessary modules #
#################################
import argparse
import pkg_resources
from Bio.Nexus import Trees, Nodes
from Bio.Graphics.GenomeDiagram._Colors import ColorTranslator
from Bio.GenBank import Scanner
from Bio.GenBank import _FeatureConsumer
from Bio.GenBank.utils import FeatureValueCleaner
from reportlab.lib.units import inch
from reportlab.lib import pagesizes
from reportlab.graphics.shapes import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics import renderPDF
################################
# Get the command line options #
################################
def main():
parser = argparse.ArgumentParser(description='Gubbins Drawer creates a PDF with a tree on one side and the recombination regions plotted on the reference space on the other side. An interactive version can be found at https://sanger-pathogens.github.io/phandango/', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('tree', help='Tree in Newick format, such as XXXX.final_tree.tre')
parser.add_argument('embl_file', help='EMBL file, such as XXXX.recombination_predictions.embl')
parser.add_argument( '-o', '--outputfile', help='Output PDF filename', default = 'gubbins_recombinations.pdf')
return parser.parse_args()
##########################################################
# Function to read an alignment whichever format it's in #
##########################################################
def tab_parser(handle, quiet=False):
def Drawer_parse_tab_features(object, skip=False):
features = []
line = object.line
while True:
if not line:
break
raise ValueError("Premature end of line during features table")
if line[:object.HEADER_WIDTH].rstrip() in object.SEQUENCE_HEADERS:
if object.debug : print("Found start of sequence")
break
line = line.rstrip()
if line == "//":
raise ValueError("Premature end of features table, marker '//' found")
if line in object.FEATURE_END_MARKERS:
if object.debug : print("Found end of features")
line = object.handle.readline()
break
if line[2:object.FEATURE_QUALIFIER_INDENT].strip() == "":
print(line[2:object.FEATURE_QUALIFIER_INDENT].strip())
raise ValueError("Expected a feature qualifier in line '%s'" % line)
if skip:
line = object.handle.readline()
while line[:object.FEATURE_QUALIFIER_INDENT] == object.FEATURE_QUALIFIER_SPACER:
line = object.handle.readline()
else:
#Build up a list of the lines making up this feature:
feature_key = line[2:object.FEATURE_QUALIFIER_INDENT].strip()
feature_lines = [line[object.FEATURE_QUALIFIER_INDENT:]]
line = object.handle.readline()
while line[:object.FEATURE_QUALIFIER_INDENT] == object.FEATURE_QUALIFIER_SPACER or line.rstrip() == "" : # cope with blank lines in the midst of a feature
feature_lines.append(line[object.FEATURE_QUALIFIER_INDENT:].rstrip())
line = object.handle.readline()
if len(line)==0:
break#EOF
feature_lines.append('/seq="N"')
sys.stdout.flush()
features.append(object.parse_feature(feature_key, feature_lines))
object.line = line
return features
def Drawer_feed(object, handle, consumer, do_features=True):
if do_features:
object._feed_feature_table(consumer, Drawer_parse_tab_features(object,skip=False))
else:
Drawer_parse_tab_features(object,skip=True) # ignore the data
sequence_string="N"
consumer.sequence(sequence_string)
consumer.record_end("//")
length=0
for record in consumer.data.features:
if record.location.nofuzzy_end>length:
length=record.location.nofuzzy_end
consumer.data.seq="N"*length
return True
myscanner=Scanner.InsdcScanner()
myscanner.set_handle(handle)
myscanner.line=myscanner.handle.readline()
myscanner.FEATURE_QUALIFIER_INDENT=21
myscanner.FEATURE_QUALIFIER_SPACER = "FT" + " " * (myscanner.FEATURE_QUALIFIER_INDENT-2)
myscanner.debug=True
consumer = _FeatureConsumer(use_fuzziness = 1, feature_cleaner = FeatureValueCleaner())
Drawer_feed(myscanner, handle, consumer)
return consumer.data
####################################################
# Function to round floats to n significant digits #
####################################################
def round_to_n(x, n):
if n < 1:
raise ValueError("number of significant digits must be >= 1")
# Use %e format to get the n most significant digits, as a string.
format = "%." + str(n-1) + "e"
as_string = format % x
if x>=10 or x<=-10:
return int(float(as_string))
else:
return float(as_string)
##############################################################################################################
# Function to convert features with subfeatures (e.g. pseudogenes) to a list of locations of the subfeatures #
##############################################################################################################
def iterate_subfeatures(feature, locations):
if len(feature.sub_features)>0:
for subfeature in feature.sub_features:
locations=iterate_subfeatures(subfeature, locations)
else:
locations.append((feature.location.start.position, feature.location.end.position))
return locations
####################################################
# Function to get the pixel width of a text string #
####################################################
def get_text_width(font, size, text):
c = Canvas(test,pagesize=pagesize)
length= c.stringWidth(str(text),font,size)
return length
#####################################################################################
# Function to add an embl file to multiple tracks split using the qualifiers option #
#####################################################################################
def add_ordered_embl_to_diagram(record, incfeatures=["CDS", "feature"], emblfile=True):
incfeatures= [x.lower() for x in incfeatures]
new_tracks={}
print(len(record.features), "features found for", record.name)
if len(record.seq)>500000:
scale_largetick_interval=int(round((len(record.seq)/10),-5))
scale_smalltick_interval=int(round((len(record.seq)/10),-5)/5)
else:
scale_largetick_interval=len(record.seq)
scale_smalltick_interval=len(record.seq)/5
for x, feature in enumerate(record.features):
if feature.type.lower() not in incfeatures or feature.location.nofuzzy_end<0 or (feature.location.nofuzzy_start>-1 and -1!=-1):
continue
if "colour" in feature.qualifiers:
colourline=feature.qualifiers["colour"][0]
elif "color" in feature.qualifiers:
colourline=feature.qualifiers["color"][0]
else:
colourline = "5"
if len(colourline.split())==1:
colour=translator.artemis_color(colourline)
elif len(colourline.split())==3:
colour=translator.int255_color((int(colourline.split()[0]),int(colourline.split()[1]),int(colourline.split()[2])))
else:
print("Can't understand colour code!")
print(colourline)
sys.exit()
locations=[]
locations.append((feature.location.start, feature.location.end))
if "taxa" in feature.qualifiers:
qualifiernames=feature.qualifiers["taxa"][0].replace(", "," ").split()
for taxonname in qualifiernames:
taxonname=taxonname.strip()
if not taxonname in new_tracks:
newtrack = Track()
newtrack.name=taxonname
new_tracks[taxonname]=newtrack
arrows=0
new_tracks[taxonname].add_feature(locations, fillcolour=colour, strokecolour=colour, arrows=arrows)
else:
if not record.name in new_tracks:
newtrack = Track()
newtrack.name=record.name
new_tracks[record.name]=newtrack
arrows=0
new_tracks[record.name].add_feature(locations, fillcolour=colour, strokecolour=colour, arrows=arrows)
if len(new_tracks)>1 and record.name in new_tracks:
del new_tracks[record.name]
return new_tracks
###################################################################################
# Function to add a tab file to multiple tracks split using the qualifiers option #
###################################################################################
def add_ordered_tab_to_diagram(filename):
features={"":[]}
featurename=""
names_to_add_feature_to=[]
try:
record=tab_parser(open(filename,"r"))
except IOError:
print("Cannot find file", filename)
sys.exit()
record.name=filename
new_tracks=add_ordered_embl_to_diagram(record, incfeatures=["i", "d", "li", "del", "snp", "misc_feature", "core", "cds", "insertion", "deletion", "recombination", "feature", "blastn_hit", "fasta_record", "variation"], emblfile=False)
return new_tracks
def add_empty_track(existing_tracks, track_name):
newtrack = Track()
newtrack.name=track_name
newtrack.beginning=0
newtrack.track_height=1
existing_tracks[track_name] = newtrack
existing_tracks[track_name].add_feature(locations=[(0,0)], fillcolour=translator.artemis_color(2), strokecolour=translator.artemis_color(2), arrows=0)
return existing_tracks
#############################
# Function to draw the tree #
#############################
def drawtree(treeObject, treeheight, treewidth, xoffset, yoffset, name_offset=5):
def get_max_branch_depth():
terminals=treeObject.get_terminals()
maxbrlen=0.0
for terminal in terminals:
if treeObject.sum_branchlength(node=terminal)>maxbrlen:
maxbrlen=treeObject.sum_branchlength(node=terminal)
return maxbrlen
def draw_scale():
if vertical_scaling_factor<5:
linewidth=0.5
else:
linewidth=1.0
branchlength=round_to_n(max_branch_depth/10, 2)*horizontal_scaling_factor
horizontalpos=xoffset+round_to_n(max_branch_depth/10, 2)*horizontal_scaling_factor
vertpos=treebase-fontsize
scalestring = str(round_to_n(max_branch_depth/10, 2))
scalefontsize=fontsize
if scalefontsize<6:
scalefontsize=6
d.add(Line(horizontalpos, vertpos, horizontalpos+branchlength, vertpos, strokeWidth=linewidth))
d.add(String(horizontalpos+(float(branchlength)/2), vertpos-(scalefontsize+1), scalestring, textAnchor='middle', fontSize=scalefontsize, fontName='Helvetica'))
def get_node_vertical_positions():
def get_node_vertical_position(node):
for daughter in treeObject.node(node).succ:
get_node_vertical_position(daughter)
if not treeObject.is_terminal(node):
daughters=treeObject.node(node).succ
if treeObject.node(node).data.comment==None:
treeObject.node(node).data.comment={}
treeObject.node(node).data.comment["vertpos"]=float(treeObject.node(daughters[0]).data.comment["vertpos"]+treeObject.node(daughters[-1]).data.comment["vertpos"])/2
node=treeObject.root
get_node_vertical_position(node)
def drawbranch(node,horizontalpos):
vertpos=treeObject.node(node).data.comment["vertpos"]+yoffset
horizontalpos+=xoffset
branchlength=treeObject.node(node).data.branchlength*horizontal_scaling_factor
if vertical_scaling_factor<5:
linewidth=0.5
else:
linewidth=1.0
if treeObject.node(node).data.comment and "branch_colour" in treeObject.node(node).data.comment:
r,g,b=treeObject.node(node).data.comment["branch_colour"]
branch_colour=colors.Color(float(r)/255,float(g)/255,float(b)/255)
else:
branch_colour=colors.black
if branchlength<linewidth:
branchlength=linewidth
d.add(Line(horizontalpos-(linewidth/2), vertpos, (horizontalpos-(linewidth/2))+branchlength, vertpos, strokeWidth=linewidth, strokeColor=branch_colour))
if node!=treeObject.root:
parentnode=treeObject.node(node).prev
sisters=treeObject.node(parentnode).succ
parentvertpos=treeObject.node(parentnode).data.comment["vertpos"]+yoffset
d.add(Line(horizontalpos, vertpos, horizontalpos, parentvertpos, strokeWidth=linewidth, strokeColor=branch_colour))
if treeObject.is_terminal(node):
if treeObject.node(node).data.comment and "name_colour" in treeObject.node(node).data.comment:
name_colours=[]
for x in range(0,len(treeObject.node(node).data.comment["name_colour"])):
r,g,b= treeObject.node(node).data.comment["name_colour"][x]
name_colours.append(colors.Color(float(r)/255,float(g)/255,float(b)/255))
else:
name_colours=[colors.black]
gubbins_length=0.0
colpos=0
namewidth=get_text_width('Helvetica', fontsize, treeObject.node(node).data.taxon)+name_offset
gubbins_length += namewidth
colpos=1
for x in range(colpos,len(name_colours)):
gubbins_length += block_length
if x!=0:
gubbins_length += vertical_scaling_factor
#Add the taxon names
d.add(String(treewidth+xoffset+(max_name_width-gubbins_length)+(fontsize/2), vertpos-(fontsize/3), treeObject.node(node).data.taxon, textAnchor='start', fontSize=fontsize, fillColor=name_colours[0], fontName='Helvetica'))
block_xpos=treewidth+xoffset+(max_name_width-gubbins_length)+(fontsize/2)+namewidth
# draw dashed lines
d.add(Line(horizontalpos+branchlength, vertpos, treewidth+xoffset+(max_name_width-gubbins_length), vertpos, strokeDashArray=[1, 2], strokeWidth=linewidth/2, strokeColor=name_colours[0]))
def recurse_subtree(node, horizontalpos):
daughters=treeObject.node(node).succ
daughterhorizontalpos=horizontalpos+(treeObject.node(node).data.branchlength*horizontal_scaling_factor)
drawbranch(node,horizontalpos)
for daughter in daughters:
recurse_subtree(daughter,daughterhorizontalpos)
def get_max_name_width(name_offset, fontsize):
max_width=0.0
for taxon in treeObject.get_terminals():
curwidth= get_text_width("Helvetica", fontsize, treeObject.node(taxon).data.taxon)
if curwidth>max_width:
max_width=curwidth
return max_width
fontsize=vertical_scaling_factor
if fontsize>12:
fontsize=12
while get_max_name_width(name_offset, fontsize)+name_offset>treewidth/3:
fontsize-=0.2
max_name_width=get_max_name_width(name_offset, fontsize)+name_offset
colblockstart=1
block_length=0
treewidth-=(max_name_width+(fontsize/2))
max_branch_depth=get_max_branch_depth()
horizontal_scaling_factor=float(treewidth)/max_branch_depth
get_node_vertical_positions()
recurse_subtree(treeObject.root, 0)
treebase=treeObject.node(treeObject.get_terminals()[-1]).data.comment["vertpos"]+yoffset
draw_scale()
return
#################
# Drawing class #
#################
class Figure:
def __init__(self, beginning, end):
self.begnining=0
self.end=-1
###############
# Track class #
###############
class Track:
def __init__(self, track_position=[-1,-1], track_height=0, track_length=0, track_draw_proportion=0.75, scale=False, tick_marks=True, tick_mark_number=5, tick_mark_labels=True, minor_tick_marks=True, minor_tick_mark_number=3, features=[], beginning=0, end=-1):
self.track_position=track_position#horizontal and vertical position of centre of track
self.track_height=track_height#height of space allocated for track
self.track_length=track_length
self.track_draw_proportion=track_draw_proportion#proportion of the track that should be used for drawing
self.scale=scale
self.scale_position="middle"
self.tick_marks=tick_marks
self.tick_mark_number=tick_mark_number
self.tick_mark_labels=tick_mark_labels
self.tick_mark_label_font="Helvetica"
self.tick_mark_label_size=8
self.tick_mark_label_angle=45
self.minor_tick_marks=minor_tick_marks
self.minor_tick_mark_number=minor_tick_mark_number
self.features=features[:]
self.scaled_features=features[:]
self.draw_feature_labels=False
self.feature_label_size=8
self.feature_label_angle=0
self.feature_label_font="Helvetica"
self.greytrack=False
self.grey_track_colour=colors.Color(0.25,0.25,0.25)
self.grey_track_opacity_percent=10
self.max_feature_length=-1
self.beginning=0
self.end=-1
self.track_number=-1
self.plots=[]
self.fragments=1
self.name=""
self.show_name=False
self.name_font="Helvetica"
self.name_size=10
self.name_length=0
self.is_key=False
self.key_data=[]
def get_max_feature_length(self):
max_feature_length=0
for feature in self.features:
for location in feature.feature_locations:
if location[0]>max_feature_length:
max_feature_length=location[0]
if location[1]>max_feature_length:
max_feature_length=location[1]
return max_feature_length
def scale_feature_positions(self):
self.scaled_features=[]
if self.end!=-1:
length=float(self.end-self.beginning)
else:
length=float(self.max_feature_length-self.beginning)
for feature in self.features:
newfeature=Feature()
newfeature.fillcolour=feature.fillcolour
newfeature.strokecolour=feature.strokecolour
newfeature.strokeweight=feature.strokeweight
newfeature.strand=feature.strand
newfeature.label=feature.label
newfeature.arrows=feature.arrows
scaledlocations=[]
for location in feature.feature_locations:
start=location[0]
finish=location[1]
if self.beginning!=0:
if start<self.beginning and finish>self.beginning:
start=self.beginning
if self.end!=-1:
if start<self.end and finish>self.end:
finish=self.end
start-=self.beginning
finish-=self.beginning
scaledlocations.append(((float(start)/length)*self.track_length,(float(finish)/length)*self.track_length))
newfeature.feature_locations=scaledlocations
self.scaled_features.append(newfeature)
def draw_features(self):
if self.max_feature_length==-1:
return
else:
self.scale_feature_positions()
featuresort=[]
for x, feature in enumerate(self.scaled_features):
featuresort.append([feature.feature_locations[0][0], x])
joins=[]
for featurenum in featuresort[::-1]:
feature=self.scaled_features[featurenum[1]]
#if the feature is white, outline it in black so we can see it
if feature.strokecolour==colors.Color(1,1,1,1):
feature.strokecolour=colors.Color(0,0,0,1)
subfeaturesort=[]
for x, subfeature in enumerate(feature.feature_locations):
subfeaturesort.append([subfeature[0], x])
subfeaturesort.sort()
subfeature_locations=[]
for subfeaturenum in subfeaturesort:
subfeature_locations.append(feature.feature_locations[subfeaturenum[1]])
for x, location in enumerate(subfeature_locations):
if (location[0]>0 and location[0]<=self.track_length) or (location[1]>0 and location[1]<=self.track_length):
y=self.track_position[1]-((float(self.track_height)/4)*self.track_draw_proportion)
height=(float(self.track_height)*self.track_draw_proportion)/2
y1=self.track_position[1]
y2=self.track_position[1]+((float(self.track_height)/8)*self.track_draw_proportion)
if feature.arrows==0:
d.add(Rect(self.track_position[0]+location[0], y, location[1]-location[0], height, fillColor=feature.fillcolour, strokeColor=feature.strokecolour, strokeWidth=feature.strokeweight))
if len(subfeature_locations)>x+1 and subfeature_locations[x+1][0]<=self.track_length:
if subfeature_locations[x+1][0]<location[1]:
joinheight=y1
elif y2>y1:
if (y2-y1)>(float(subfeature_locations[x+1][0]-location[1])/2):
joinheight=y1+(float(subfeature_locations[x+1][0]-location[1])/2)
else:
joinheight=y2
else:
if (y1-y2)>(float(subfeature_locations[x+1][0]-location[1])/2):
joinheight=y1-(float(subfeature_locations[x+1][0]-location[1])/2)
else:
joinheight=y2
joins.append(Line(self.track_position[0]+location[1], y1, self.track_position[0]+location[1]+(float(subfeature_locations[x+1][0]-location[1])/2), joinheight, strokeDashArray=[0.5, 1], strokeWidth=0.5))
joins.append(Line(self.track_position[0]+((location[1]+subfeature_locations[x+1][0])/2), joinheight, self.track_position[0]+location[1]+(float(subfeature_locations[x+1][0]-location[1])), y1, strokeDashArray=[0.5, 1], strokeWidth=0.5))
for join in joins:
d.add(join)
self.scaled_features=[]
def draw_track(self):
self.draw_features()
def add_feature(self,locations=[(-1,-1)], fillcolour=colors.white, strokecolour=colors.black, strokeweight=0, label="", strand=0, arrows=0):
newfeature=Feature()
feature_locations=[]
for location in locations:
if location[0]>location[1]:
feature_locations.append((location[1],location[0]))
else:
feature_locations.append((location[0],location[1]))
newfeature.feature_locations=feature_locations
newfeature.fillcolour=fillcolour
newfeature.strokecolour=strokecolour
newfeature.strokeweight=strokeweight
newfeature.strand=strand
newfeature.label=label
newfeature.arrows=arrows
self.features.append(newfeature)
def sort_features_by_length(self):
featurelist=[]
ordered_features=[]
for x, feature in enumerate(self.features):
featurelist.append([feature.feature_locations[-1][1]-feature.feature_locations[0][0], x])
featurelist.sort()
#featurelist.reverse()
for feature in featurelist:
ordered_features.append(self.features[feature[1]])
self.features=ordered_features[:]
#################
# Feature class #
#################
class Feature:
def __init__(self):
self.feature_locations=[(-1,-1)]
self.strand=0
self.arrows=0
self.label=""
self.fillcolour=colors.blue
self.strokecolour=colors.black
self.strokeweight=0
################
# Main program #
################
if __name__ == "__main__":
options = main()
pagesize=pagesizes.A4
height, width = pagesize
if len(options.embl_file)==0:
print("Found nothing to draw")
sys.exit()
d = Drawing(width, height)
margin=0.5*inch
metadatanames={}
namecolours={}
colour_dict=[]
my_tracks={}
#create translator object for translating artemis colours to GenomeDiagram colours
translator = ColorTranslator()
track_count=0
tree_positions=[]
track_names={}
input_order=[]
new_tracks=add_ordered_tab_to_diagram(options.embl_file)
for track in new_tracks:
newtrack=new_tracks[track]
newtrack.beginning=0
newtrack.name=new_tracks[track].name
name=newtrack.name
x=1
while name in my_tracks:
name=newtrack.name+"_"+str(x)
x+=1
if not newtrack.name in track_names:
track_names[newtrack.name]=[]
input_order.append(name)
track_names[newtrack.name].append(name)
track_count+=1
newtrack.track_height=1
my_tracks[name]=newtrack
treenames=[]
tree_name_to_node={}
listnames=[]
if options.tree!="":
if not os.path.isfile(options.tree):
print("Cannot find file:", options.tree)
options.tree=""
else:
treestring=open(options.tree,"rU").read().strip()
tree=Trees.Tree(treestring, rooted=True)
tree.root
treeterminals=tree.get_terminals()
totalbr=0.0
for terminal_node in treeterminals:
terminal=tree.node(terminal_node).data.taxon
treenames.append(terminal)
if not terminal in track_names:
track_count+=1
tree_name_to_node[terminal]=terminal_node
tree.node(terminal_node).data.comment={}
tree.node(terminal_node).data.comment["name_colour"]=[(0,0,0)]
#from this we can work out a constant for the height of a track which takes into account the height of the page and margin sizes
vertical_scaling_factor=float(height-(margin*2))/(track_count)
#to make sure names can be printed in the space of a track, we can scale the name to the same size as the vertical scaling factor, but limit it to 12pt so it doesn't get crazily big
name_font_size=vertical_scaling_factor
if name_font_size>12:
name_font_size=12
left_proportion=0.3
treetrack=0
output_order=treenames[::-1]
for name in input_order[::-1]:
if not name in treenames:
output_order.append(name)
track_number=0
for track in output_order:
if(track not in my_tracks):
my_tracks = add_empty_track(my_tracks, track)
track_height=my_tracks[track].track_height
my_tracks[track].track_draw_proportion=0.8
my_tracks[track].track_height=track_height*vertical_scaling_factor
if left_proportion==1:
my_tracks[track].track_length=(width-margin)-((width-(margin*2))*0.2+margin)
my_tracks[track].track_position=[(width-(margin*2))*0.2+margin, margin+((track_number)*vertical_scaling_factor)+float((my_tracks[track].track_height)/2)]
else:
my_tracks[track].track_length=(width-margin)-((width-(margin*2))*left_proportion+margin)
my_tracks[track].track_position=[(width-(margin*2))*left_proportion+margin, margin+((track_number)*vertical_scaling_factor)+float((my_tracks[track].track_height)/2)]
my_tracks[track].track_number=track_number
if track in treenames:
tree.node(tree_name_to_node[track]).data.comment["vertpos"]=margin+((track_number)*vertical_scaling_factor)+float((my_tracks[track].track_height)/2)
my_tracks[track].grey_track_colour=colors.Color(0,0,0)
track_number+=track_height
#find the maximum feature endpoint to scale by
max_feature_length=0
for track in my_tracks:
max_track_feature_length=my_tracks[track].get_max_feature_length()
if max_track_feature_length>max_feature_length:
max_feature_length=max_track_feature_length
for plot in my_tracks[track].plots:
for data in plot.xdata:
if data[-1]>max_feature_length:
max_feature_length=data[-1]
#tell each track what the max feature length is
for track in my_tracks:
if my_tracks[track].max_feature_length<max_feature_length:
my_tracks[track].max_feature_length=max_feature_length
beginning=0
end=max_feature_length
for track in output_order:
if not track in my_tracks or (my_tracks[track].is_key and fragment!=1) or my_tracks[track].track_length==0:
continue
my_tracks[track].beginning=beginning
my_tracks[track].end=end
my_tracks[track].track_position[1]=margin+(((my_tracks[track].track_number)*vertical_scaling_factor)+(my_tracks[track].track_height)/2)
my_tracks[track].sort_features_by_length()
my_tracks[track].draw_track()
if options.tree!="":
drawtree(tree, height-(margin*2), (width-(margin*2))*left_proportion, margin, 0, 5)
renderPDF.drawToFile(d, options.outputfile)
class DrawerError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
gpl-2.0
| -7,902,293,239,757,884,000
| 31.198773
| 323
| 0.685657
| false
| 3.227401
| false
| false
| false
|
jupyter/jupyterlab
|
examples/cell/main.py
|
4
|
2644
|
"""
An example demonstrating a stand-alone "notebook".
Copyright (c) Jupyter Development Team.
Distributed under the terms of the Modified BSD License.
Example
-------
To run the example, see the instructions in the README to build it. Then
run ``python main.py``.
"""
import os
import json
from jupyterlab_server import LabServerApp
from jupyter_server.base.handlers import JupyterHandler
from jupyter_server.extension.handler import ExtensionHandlerMixin, ExtensionHandlerJinjaMixin
from jupyter_server.utils import url_path_join as ujoin
HERE = os.path.dirname(__file__)
with open(os.path.join(HERE, 'package.json')) as fid:
version = json.load(fid)['version']
def _jupyter_server_extension_points():
return [
{
'module': __name__,
'app': ExampleApp
}
]
class ExampleHandler(
ExtensionHandlerJinjaMixin,
ExtensionHandlerMixin,
JupyterHandler
):
"""Handle requests between the main app page and notebook server."""
def get(self):
"""Get the main page for the application's interface."""
config_data = {
# Use camelCase here, since that's what the lab components expect
"appVersion": version,
'baseUrl': self.base_url,
'token': self.settings['token'],
'fullStaticUrl': ujoin(self.base_url, 'static', self.name),
'frontendUrl': ujoin(self.base_url, 'example/'),
}
return self.write(
self.render_template(
'index.html',
static=self.static_url,
base_url=self.base_url,
token=self.settings['token'],
page_config=config_data
)
)
class ExampleApp(LabServerApp):
extension_url = '/example'
default_url = '/example'
app_url = "/example"
name = __name__
load_other_extensions = False
app_name = 'JupyterLab Example Cell'
static_dir = os.path.join(HERE, 'build')
templates_dir = os.path.join(HERE, 'templates')
app_version = version
app_settings_dir = os.path.join(HERE, 'build', 'application_settings')
schemas_dir = os.path.join(HERE, 'build', 'schemas')
themes_dir = os.path.join(HERE, 'build', 'themes')
user_settings_dir = os.path.join(HERE, 'build', 'user_settings')
workspaces_dir = os.path.join(HERE, 'build', 'workspaces')
def initialize_handlers(self):
"""Add example handler to Lab Server's handler list.
"""
self.handlers.append(
('/example', ExampleHandler)
)
if __name__ == '__main__':
ExampleApp.launch_instance()
|
bsd-3-clause
| -5,810,901,252,969,317,000
| 28.707865
| 94
| 0.621785
| false
| 3.820809
| false
| false
| false
|
cjvogl/finite_volume_seismic_model
|
3d/setplot_pwaves.py
|
1
|
3701
|
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
import numpy as np
import os, shutil
from mapping import Mapping
import dtopotools_horiz_okada_and_1d as dtopotools
length_scale = 1.0e-3 # m to km
xlimits = [-150.0e3*length_scale,200.0e3*length_scale]
zlimits = [-175.0e3*length_scale,0.0]
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
slice_number = 3
tmpdir = os.path.abspath(os.curdir)
os.chdir(plotdata.outdir)
for filename in os.listdir('.'):
if (filename.startswith('slice_%d' % slice_number)):
shutil.copyfile(filename,filename.replace('slice_%d' % slice_number,'fort',1))
fault = dtopotools.Fault()
fault.read('fault.data')
os.chdir(tmpdir)
mapping = Mapping(fault)
xp1 = mapping.xp1*length_scale
xp2 = mapping.xp2*length_scale
zp1 = mapping.zp1*length_scale
zp2 = mapping.zp2*length_scale
xcenter = mapping.xcenter
ycenter = mapping.ycenter
def mapc2p(xc,yc):
xp,yp = mapping.mapc2p_xz(xc,yc)
return xp*length_scale,yp*length_scale
def plot_fault(current_data):
from pylab import linspace, plot, xlabel, ylabel, tick_params
xl = linspace(xp1,xp2,100)
zl = linspace(zp1,zp2,100)
plot(xl,zl,'g',linewidth=3)
tick_params(labelsize=25)
xlabel('kilometers',fontsize=25)
ylabel('kilometers',fontsize=25)
from clawpack.visclaw import colormaps
plotdata.clearfigures() # clear any old figures,axes,items data
#plotdata.format = 'binary'
def sigmatr(current_data):
# return -trace(sigma)
q = current_data.q
return -(q[0,:,:] + q[1,:,:] + q[2,:,:])
# Figure for trace(sigma)
plotfigure = plotdata.new_plotfigure(name='fault', figno=1)
plotfigure.kwargs = {'figsize':(11,6)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = xlimits
plotaxes.ylimits = zlimits
plotaxes.title_with_t = False
plotaxes.title = ''
plotaxes.scaled = True
plotaxes.afteraxes = plot_fault
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = sigmatr
plotitem.pcolor_cmap = colormaps.blue_white_red
plotitem.pcolor_cmin = -1e6
plotitem.pcolor_cmax = 1e6
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0]
plotitem.MappedGrid = True
plotitem.mapc2p = mapc2p
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
# plotdata.parallel = True
return plotdata
|
gpl-3.0
| -4,647,494,930,394,782,000
| 31.464912
| 90
| 0.640367
| false
| 3.272325
| false
| false
| false
|
VitorHugoAguiar/ProBot
|
ProBot_BeagleBone/PIDControllersFile.py
|
1
|
4310
|
#!/usr/bin/python
# Python Standard Library Imports
import time
# Local files
import ProBotConstantsFile
# Initialization of classes from local files
Pconst = ProBotConstantsFile.Constants()
# PID functions
class PIDControllersClass():
# Build a constructor
def __init__(self):
self.error = 0
self.SaberTooth_KpV = 280
self.SaberTooth_KiV = 0.6
self.SaberTooth_KdV = 12
self.SaberTooth_KpA = 18
self.SaberTooth_KiA = 2.2
self.SaberTooth_KdA = -2
self.PWM_KpV = 75
self.PWM_KiV = 0.6
self.PWM_KdV = 0.2
self.PWM_KpA = 9
self.PWM_KiA = 3
self.PWM_KdA = -0.001
self.limitV = 800
self.limitA = 1000
self.integrated_error_V1 = 0
self.integrated_error_V2 = 0
self.integrated_error_A1 = 0
self.integrated_error_A2 = 0
self.last_error_V1 = 0
self.last_error_V2 = 0
self.last_error_A1 = 0
self.last_error_A2 = 0
def standardPID(self, reference, measured, type, userChoice):
self.error = float(reference - measured)
# Load the right values for the controllers, depending on if we are using Sabertooth of PWM controller
if userChoice=='1':
KpV = self.SaberTooth_KpV
KiV = self.SaberTooth_KiV
KdV = self.SaberTooth_KdV
KpA = self.SaberTooth_KpA
KiA = self.SaberTooth_KiA
KdA = self.SaberTooth_KdA
if userChoice=='2':
KpV = self.PWM_KpV
KiV = self.PWM_KiV
KdV = self.PWM_KdV
KpA = self.PWM_KpA
KiA = self.PWM_KiA
KdA = self.PWM_KdA
# Loading the variables for the controllers
typeController = {
'Velocity1': [KpV, KiV, KdV, self.limitV, self.integrated_error_V1, self.last_error_V1],
'Velocity2': [KpV, KiV, KdV, self.limitV, self.integrated_error_V2, self.last_error_V2],
'Angle1': [KpA, KiA, KdA, self.limitA, self.integrated_error_A1, self.last_error_A1],
'Angle2': [KpA, KiA, KdA, self.limitA, self.integrated_error_A2, self.last_error_A2]}
controllerVar = typeController[type]
# Code for the PID controllers
pTerm = float(controllerVar[0] * self.error)
controllerVar[4] += float(self.error)
# Limiting the integrated error, avoiding windup
controllerVar[4] = max(-controllerVar[3], min(controllerVar[4], controllerVar[3]))
iTerm = float(controllerVar[1] * controllerVar[4])
dTerm = float(controllerVar[2] * (self.error - controllerVar[5]))
controllerVar[5] = self.error
PID_result = float(pTerm + iTerm + dTerm)
# Updating the integrated error and the last error for the next loop
if(type is 'Velocity1'):
self.integrated_error_V1 = controllerVar[4]
self.last_error_V1 = controllerVar[5]
if(type is 'Velocity2'):
self.integrated_error_V2 = controllerVar[4]
self.last_error_V2 = controllerVar[5]
if(type is 'Angle1'):
self.integrated_error_A1 = controllerVar[4]
self.last_error_A1 = controllerVar[5]
if userChoice == '1':
PID_result = max(-127, min(PID_result, 127)) #Limiting the PID values because of the Sabertooth range (-127, 127)
if userChoice == '2':
PID_result = max(-100, min(PID_result, 100)) #Limiting the percentage of the PWM
if(type is 'Angle2'):
self.integrated_error_A2 = controllerVar[4]
self.last_error_A2 = controllerVar[5]
if userChoice=='1': #Limiting the PID values because of the Sabertooth range (-127, 127)
PID_result = max(-127, min(PID_result, 127))
if userChoice=='2':
PID_result = max(-100, min(PID_result, 100)) #Limiting the percentage of the PWM
return -PID_result
|
agpl-3.0
| -3,683,972,053,555,202,600
| 39.280374
| 163
| 0.547796
| false
| 3.420635
| false
| false
| false
|
FarzanHajian/CreateSwap
|
src/createswap2.py
|
1
|
4347
|
#!/usr/bin/env python
# encoding: utf-8
'''
create_swap.py
A Python 2 script for creating and removing Linux swap files.
Copyright (C) 2016 Farzan Hajian
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
@author: Farzan Hajian
@copyright: 2016. All rights reserved.
@license: GPL3
@contact: farzan.hajian@gmail.com
NOTE:
THIS SCRIPT WORKS ONLY WITH PYTHON VERSION 2.
FOR PYTHON 3, USE "createswap.py".
'''
import sys
import os
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='the file full name on which the swap space is going to be built (must be used with --size option)')
parser.add_argument('-s', '--size', help='size of the swap space in megabytes (must be used with --file option)', type=int)
parser.add_argument('-o', '--off', help='removes the swap file and disables its swap space', metavar='FILE')
parser.add_argument('--verbose', help='executes in the verbose mode (useful for tracking errors)', action='store_true')
args = parser.parse_args()
try:
if not args.file and not args.size and not args.off:
if not args.verbose:
parser.print_help()
raise Exception()
else:
raise Exception("--verbose option cannot be used alone")
if(args.file and not args.size) or (not args.file and args.size):
raise Exception("--file and --size options must be used together")
if args.off and (args.file or args.size):
raise Exception("--off option cannot be used with other options")
except Exception as ex:
show_error(ex, 3)
return args
def is_verbose():
return args.verbose
def print_header():
os.system('clear')
print('-'*50)
print('createswap.py v 2.0 (Python 2)\n')
print('This program is published under GPL v3 license')
print('You can contact me at farzan.hajian@gmail.com')
print('-'*50)
def print_step(message):
if is_verbose():
print ("")
print '%-40.40s'%message
else:
print '%-40.40s'%message,
def print_status(is_failed=False):
status = ('Failed' if is_failed else 'OK')
print('[%s]'%status)
def show_error(exception, exit_code):
print('\n%s'%exception)
sys.exit(exit_code)
def sudo():
os.system('sudo id > /dev/null')
def exec_step(message, command, arg_tuple=None):
print_step(message)
command = 'sudo ' + command
if not is_verbose(): command += ' > /dev/null 2>&1'
if arg_tuple != None:
exit_code = os.system(command.format(*arg_tuple))
else:
exit_code = os.system(command)
if exit_code == 0:
print_status()
else:
print_status(True)
def create_swap(filename, size):
try:
tuple1 = (filename, size)
tuple2 = (filename,)
exec_step('Creating the file', 'dd if=/dev/zero of={} bs=1M count={}', tuple1)
exec_step('Setting the file access mode', 'chmod 600 {}', tuple2)
exec_step('Setting up the swap space', 'mkswap {}', tuple2)
exec_step('Enabling the swap space', 'swapon {}', tuple2)
except Exception as ex:
show_error(ex, 2)
def drop_swap(filename):
try:
tuple1 = (filename,)
exec_step('Disabling the swap space', 'swapoff {}', tuple1)
exec_step('Removing the file', 'rm {}', tuple1)
except Exception as ex:
show_error(ex, 2)
# program entry point
print_header()
args = parse_args()
sudo()
if args.file:
create_swap(args.file, args.size)
elif args.off:
drop_swap(args.off)
print("")
|
gpl-3.0
| -7,813,304,307,696,668,000
| 30.5
| 145
| 0.635381
| false
| 3.72813
| false
| false
| false
|
hbldh/sudokuextract
|
sudokuextract/imgproc/binary.py
|
1
|
1736
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`binary`
==================
Created by hbldh <henrik.blidh@nedomkull.com>
Created on 2016-01-26
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
from skimage.transform import resize
from skimage.filters import threshold_otsu
from skimage.filters import gaussian_filter, threshold_adaptive
def to_binary_otsu(img, invert=False):
if img.dtype == np.bool:
img = np.array(img, 'uint8')
if img.max() == img.min():
if img.min() == 1:
return np.array(img * 255, 'uint8')
else:
return np.array(img, 'uint8')
else:
t = threshold_otsu(img)
img[img <= t] = 255 if invert else 0
img[img > t] = 0 if invert else 255
return np.array(img, 'uint8')
def to_binary_adaptive(img):
sigma = 1.0
m = max(img.shape)
if m > 2000:
block_size = 80
elif m > 1500:
block_size = 50
elif m > 1000:
block_size = 35
else:
block_size = 20
bimg = gaussian_filter(img, sigma=sigma)
bimg = threshold_adaptive(bimg, block_size, offset=2 / 255)
bimg = np.array(bimg, 'uint8') * 255
return bimg
def add_border(img, size=(28, 28), border_size=0, background_value=255):
img = resize(img, (size[0] - border_size * 2,
size[1] - border_size * 2))
img = np.array(img * 255, 'uint8')
output_img = np.ones(size, 'uint8') * background_value
if border_size == 0:
output_img[:, :] = img
else:
output_img[border_size:-border_size, border_size:-border_size] = img
return output_img
|
mit
| 2,918,771,141,372,004,400
| 24.910448
| 76
| 0.597926
| false
| 3.197053
| false
| false
| false
|
GerbenJavado/LinkFinder
|
linkfinder.py
|
1
|
13951
|
#!/usr/bin/env python
# Python 3
# LinkFinder
# By Gerben_Javado
# Fix webbrowser bug for MacOS
import os
os.environ["BROWSER"] = "open"
# Import libraries
import re, sys, glob, html, argparse, jsbeautifier, webbrowser, subprocess, base64, ssl, xml.etree.ElementTree
from gzip import GzipFile
from string import Template
try:
from StringIO import StringIO
readBytesCustom = StringIO
except ImportError:
from io import BytesIO
readBytesCustom = BytesIO
try:
from urllib.request import Request, urlopen
except ImportError:
from urllib2 import Request, urlopen
# Regex used
regex_str = r"""
(?:"|') # Start newline delimiter
(
((?:[a-zA-Z]{1,10}://|//) # Match a scheme [a-Z]*1-10 or //
[^"'/]{1,}\. # Match a domainname (any character + dot)
[a-zA-Z]{2,}[^"']{0,}) # The domainextension and/or path
|
((?:/|\.\./|\./) # Start with /,../,./
[^"'><,;| *()(%%$^/\\\[\]] # Next character can't be...
[^"'><,;|()]{1,}) # Rest of the characters can't be
|
([a-zA-Z0-9_\-/]{1,}/ # Relative endpoint with /
[a-zA-Z0-9_\-/]{1,} # Resource name
\.(?:[a-zA-Z]{1,4}|action) # Rest + extension (length 1-4 or action)
(?:[\?|#][^"|']{0,}|)) # ? or # mark with parameters
|
([a-zA-Z0-9_\-/]{1,}/ # REST API (no extension) with /
[a-zA-Z0-9_\-/]{3,} # Proper REST endpoints usually have 3+ chars
(?:[\?|#][^"|']{0,}|)) # ? or # mark with parameters
|
([a-zA-Z0-9_\-]{1,} # filename
\.(?:php|asp|aspx|jsp|json|
action|html|js|txt|xml) # . + extension
(?:[\?|#][^"|']{0,}|)) # ? or # mark with parameters
)
(?:"|') # End newline delimiter
"""
context_delimiter_str = "\n"
def parser_error(errmsg):
'''
Error Messages
'''
print("Usage: python %s [Options] use -h for help" % sys.argv[0])
print("Error: %s" % errmsg)
sys.exit()
def parser_input(input):
'''
Parse Input
'''
# Method 1 - URL
if input.startswith(('http://', 'https://',
'file://', 'ftp://', 'ftps://')):
return [input]
# Method 2 - URL Inspector Firefox
if input.startswith('view-source:'):
return [input[12:]]
# Method 3 - Burp file
if args.burp:
jsfiles = []
items = xml.etree.ElementTree.fromstring(open(args.input, "r").read())
for item in items:
jsfiles.append({"js":base64.b64decode(item.find('response').text).decode('utf-8',"replace"), "url":item.find('url').text})
return jsfiles
# Method 4 - Folder with a wildcard
if "*" in input:
paths = glob.glob(os.path.abspath(input))
for index, path in enumerate(paths):
paths[index] = "file://%s" % path
return (paths if len(paths) > 0 else parser_error('Input with wildcard does \
not match any files.'))
# Method 5 - Local file
path = "file://%s" % os.path.abspath(input)
return [path if os.path.exists(input) else parser_error("file could not \
be found (maybe you forgot to add http/https).")]
def send_request(url):
'''
Send requests with Requests
'''
q = Request(url)
q.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36')
q.add_header('Accept', 'text/html,\
application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
q.add_header('Accept-Language', 'en-US,en;q=0.8')
q.add_header('Accept-Encoding', 'gzip')
q.add_header('Cookie', args.cookies)
try:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
response = urlopen(q, timeout=args.timeout, context=sslcontext)
except:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
response = urlopen(q, timeout=args.timeout, context=sslcontext)
if response.info().get('Content-Encoding') == 'gzip':
data = GzipFile(fileobj=readBytesCustom(response.read())).read()
elif response.info().get('Content-Encoding') == 'deflate':
data = response.read().read()
else:
data = response.read()
return data.decode('utf-8', 'replace')
def getContext(list_matches, content, include_delimiter=0, context_delimiter_str="\n"):
'''
Parse Input
list_matches: list of tuple (link, start_index, end_index)
content: content to search for the context
include_delimiter Set 1 to include delimiter in context
'''
items = []
for m in list_matches:
match_str = m[0]
match_start = m[1]
match_end = m[2]
context_start_index = match_start
context_end_index = match_end
delimiter_len = len(context_delimiter_str)
content_max_index = len(content) - 1
while content[context_start_index] != context_delimiter_str and context_start_index > 0:
context_start_index = context_start_index - 1
while content[context_end_index] != context_delimiter_str and context_end_index < content_max_index:
context_end_index = context_end_index + 1
if include_delimiter:
context = content[context_start_index: context_end_index]
else:
context = content[context_start_index + delimiter_len: context_end_index]
item = {
"link": match_str,
"context": context
}
items.append(item)
return items
def parser_file(content, regex_str, mode=1, more_regex=None, no_dup=1):
'''
Parse Input
content: string of content to be searched
regex_str: string of regex (The link should be in the group(1))
mode: mode of parsing. Set 1 to include surrounding contexts in the result
more_regex: string of regex to filter the result
no_dup: remove duplicated link (context is NOT counted)
Return the list of ["link": link, "context": context]
The context is optional if mode=1 is provided.
'''
global context_delimiter_str
if mode == 1:
# Beautify
if len(content) > 1000000:
content = content.replace(";",";\r\n").replace(",",",\r\n")
else:
content = jsbeautifier.beautify(content)
regex = re.compile(regex_str, re.VERBOSE)
if mode == 1:
all_matches = [(m.group(1), m.start(0), m.end(0)) for m in re.finditer(regex, content)]
items = getContext(all_matches, content, context_delimiter_str=context_delimiter_str)
else:
items = [{"link": m.group(1)} for m in re.finditer(regex, content)]
if no_dup:
# Remove duplication
all_links = set()
no_dup_items = []
for item in items:
if item["link"] not in all_links:
all_links.add(item["link"])
no_dup_items.append(item)
items = no_dup_items
# Match Regex
filtered_items = []
for item in items:
# Remove other capture groups from regex results
if more_regex:
if re.search(more_regex, item["link"]):
filtered_items.append(item)
else:
filtered_items.append(item)
return filtered_items
def cli_output(endpoints):
'''
Output to CLI
'''
for endpoint in endpoints:
print(html.escape(endpoint["link"]).encode(
'ascii', 'ignore').decode('utf8'))
def html_save(html):
'''
Save as HTML file and open in the browser
'''
hide = os.dup(1)
os.close(1)
os.open(os.devnull, os.O_RDWR)
try:
s = Template(open('%s/template.html' % sys.path[0], 'r').read())
text_file = open(args.output, "wb")
text_file.write(s.substitute(content=html).encode('utf8'))
text_file.close()
print("URL to access output: file://%s" % os.path.abspath(args.output))
file = "file:///%s" % os.path.abspath(args.output)
if sys.platform == 'linux' or sys.platform == 'linux2':
subprocess.call(["xdg-open", file])
else:
webbrowser.open(file)
except Exception as e:
print("Output can't be saved in %s \
due to exception: %s" % (args.output, e))
finally:
os.dup2(hide, 1)
def check_url(url):
nopelist = ["node_modules", "jquery.js"]
if url[-3:] == ".js":
words = url.split("/")
for word in words:
if word in nopelist:
return False
if url[:2] == "//":
url = "https:" + url
if url[:4] != "http":
if url[:1] == "/":
url = args.input + url
else:
url = args.input + "/" + url
return url
else:
return False
if __name__ == "__main__":
# Parse command line
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--domain",
help="Input a domain to recursively parse all javascript located in a page",
action="store_true")
parser.add_argument("-i", "--input",
help="Input a: URL, file or folder. \
For folders a wildcard can be used (e.g. '/*.js').",
required="True", action="store")
parser.add_argument("-o", "--output",
help="Where to save the file, \
including file name. Default: output.html",
action="store", default="output.html")
parser.add_argument("-r", "--regex",
help="RegEx for filtering purposes \
against found endpoint (e.g. ^/api/)",
action="store")
parser.add_argument("-b", "--burp",
help="",
action="store_true")
parser.add_argument("-c", "--cookies",
help="Add cookies for authenticated JS files",
action="store", default="")
default_timeout = 10
parser.add_argument("-t", "--timeout",
help="How many seconds to wait for the server to send data before giving up (default: " + str(default_timeout) + " seconds)",
default=default_timeout, type=int, metavar="<seconds>")
args = parser.parse_args()
if args.input[-1:] == "/":
args.input = args.input[:-1]
mode = 1
if args.output == "cli":
mode = 0
# Convert input to URLs or JS files
urls = parser_input(args.input)
# Convert URLs to JS
output = ''
for url in urls:
if not args.burp:
try:
file = send_request(url)
except Exception as e:
parser_error("invalid input defined or SSL error: %s" % e)
else:
file = url['js']
url = url['url']
endpoints = parser_file(file, regex_str, mode, args.regex)
if args.domain:
for endpoint in endpoints:
endpoint = html.escape(endpoint["link"]).encode('ascii', 'ignore').decode('utf8')
endpoint = check_url(endpoint)
if endpoint is False:
continue
print("Running against: " + endpoint)
print("")
try:
file = send_request(endpoint)
new_endpoints = parser_file(file, regex_str, mode, args.regex)
if args.output == 'cli':
cli_output(new_endpoints)
else:
output += '''
<h1>File: <a href="%s" target="_blank" rel="nofollow noopener noreferrer">%s</a></h1>
''' % (html.escape(endpoint), html.escape(endpoint))
for endpoint2 in new_endpoints:
url = html.escape(endpoint2["link"])
header = "<div><a href='%s' class='text'>%s" % (
html.escape(url),
html.escape(url)
)
body = "</a><div class='container'>%s</div></div>" % html.escape(
endpoint2["context"]
)
body = body.replace(
html.escape(endpoint2["link"]),
"<span style='background-color:yellow'>%s</span>" %
html.escape(endpoint2["link"])
)
output += header + body
except Exception as e:
print("Invalid input defined or SSL error for: " + endpoint)
continue
if args.output == 'cli':
cli_output(endpoints)
else:
output += '''
<h1>File: <a href="%s" target="_blank" rel="nofollow noopener noreferrer">%s</a></h1>
''' % (html.escape(url), html.escape(url))
for endpoint in endpoints:
url = html.escape(endpoint["link"])
header = "<div><a href='%s' class='text'>%s" % (
html.escape(url),
html.escape(url)
)
body = "</a><div class='container'>%s</div></div>" % html.escape(
endpoint["context"]
)
body = body.replace(
html.escape(endpoint["link"]),
"<span style='background-color:yellow'>%s</span>" %
html.escape(endpoint["link"])
)
output += header + body
if args.output != 'cli':
html_save(output)
|
mit
| -3,957,937,958,894,511,600
| 33.70398
| 149
| 0.514587
| false
| 3.932074
| false
| false
| false
|
ownport/ansiblite
|
src/ansiblite/utils/path.py
|
1
|
2926
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from errno import EEXIST
from ansiblite.errors import AnsibleError
from ansiblite.utils._text import to_bytes, to_native, to_text
__all__ = ['unfrackpath', 'makedirs_safe']
def unfrackpath(path, follow=True):
'''
Returns a path that is free of symlinks (if follow=True), environment variables, relative path traversals and symbols (~)
:arg path: A byte or text string representing a path to be canonicalized
:arg follow: A boolean to indicate of symlinks should be resolved or not
:raises UnicodeDecodeError: If the canonicalized version of the path
contains non-utf8 byte sequences.
:rtype: A text string (unicode on pyyhon2, str on python3).
:returns: An absolute path with symlinks, environment variables, and tilde
expanded. Note that this does not check whether a path exists.
example::
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
if follow:
final_path = os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))))
else:
final_path = os.path.normpath(os.path.abspath(os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))))
return to_text(final_path, errors='surrogate_or_strict')
def makedirs_safe(path, mode=None):
'''Safe way to create dirs in muliprocess/thread environments.
:arg path: A byte or text string representing a directory to be created
:kwarg mode: If given, the mode to set the directory to
:raises AnsibleError: If the directory cannot be created and does not already exists.
:raises UnicodeDecodeError: if the path is not decodable in the utf-8 encoding.
'''
rpath = unfrackpath(path)
b_rpath = to_bytes(rpath)
if not os.path.exists(b_rpath):
try:
if mode:
os.makedirs(b_rpath, mode)
else:
os.makedirs(b_rpath)
except OSError as e:
if e.errno != EEXIST:
raise AnsibleError("Unable to create local directories(%s): %s" % (to_native(rpath), to_native(e)))
|
gpl-3.0
| -6,000,778,393,953,600,000
| 40.211268
| 141
| 0.698906
| false
| 3.880637
| false
| false
| false
|
rigdenlab/SIMBAD
|
simbad/command_line/simbad_full.py
|
1
|
6513
|
#!/usr/bin/env python
__author__ = "Adam Simpkin, and Felix Simkovic"
__contributing_authors__ = "Jens Thomas, and Ronan Keegan"
__credits__ = "Daniel Rigden, William Shepard, Charles Ballard, Villi Uski, and Andrey Lebedev"
__date__ = "05 May 2017"
__email__ = "hlasimpk@liv.ac.uk"
__version__ = "0.1"
import argparse
import os
import sys
from pyjob.stopwatch import StopWatch
import simbad.command_line
import simbad.exit
import simbad.util
import simbad.util.logging_util
import simbad.util.pyrvapi_results
logger = None
def simbad_argparse():
"""Create the argparse options"""
p = argparse.ArgumentParser(
description="SIMBAD: Sequence Independent Molecular replacement Based on Available Database",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
simbad.command_line._argparse_core_options(p)
simbad.command_line._argparse_job_submission_options(p)
simbad.command_line._argparse_contaminant_options(p)
simbad.command_line._argparse_morda_options(p)
simbad.command_line._argparse_lattice_options(p)
simbad.command_line._argparse_rot_options(p)
simbad.command_line._argparse_mr_options(p)
simbad.command_line._argparse_mtz_options(p)
p.add_argument('mtz', help="The path to the input mtz file")
return p
def main():
"""Main SIMBAD routine"""
args = simbad_argparse().parse_args()
args.work_dir = simbad.command_line.get_work_dir(
args.run_dir, work_dir=args.work_dir, ccp4_jobid=args.ccp4_jobid, ccp4i2_xml=args.ccp4i2_xml
)
log_file = os.path.join(args.work_dir, 'simbad.log')
debug_log_file = os.path.join(args.work_dir, 'debug.log')
global logger
logger = simbad.util.logging_util.setup_logging(args.debug_lvl, logfile=log_file, debugfile=debug_log_file)
if not os.path.isfile(args.amore_exe):
raise OSError("amore executable not found")
gui = simbad.util.pyrvapi_results.SimbadOutput(
args.rvapi_document, args.webserver_uri, args.display_gui, log_file, args.work_dir, ccp4i2_xml=args.ccp4i2_xml, tab_prefix=args.tab_prefix
)
simbad.command_line.print_header()
logger.info("Running in directory: %s\n", args.work_dir)
stopwatch = StopWatch()
stopwatch.start()
end_of_cycle, solution_found, all_results = False, False, {}
while not (solution_found or end_of_cycle):
# =====================================================================================
# Perform the lattice search
solution_found = simbad.command_line._simbad_lattice_search(args)
logger.info("Lattice search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found and not args.process_all:
logger.info(
"Lucky you! SIMBAD worked its charm and found a lattice match for you.")
continue
elif solution_found and args.process_all:
logger.info(
"SIMBAD thinks it has found a solution however process_all is set, continuing to contaminant search")
else:
logger.info("No results found - lattice search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'latt/lattice_mr.csv')
all_results['latt'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Perform the contaminant search
solution_found = simbad.command_line._simbad_contaminant_search(args)
logger.info("Contaminant search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found and not args.process_all:
logger.info(
"Check you out, crystallizing contaminants! But don't worry, SIMBAD figured it out and found a solution.")
continue
elif solution_found and args.process_all:
logger.info(
"SIMBAD thinks it has found a solution however process_all is set, continuing to morda search")
else:
logger.info(
"No results found - contaminant search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'cont/cont_mr.csv')
all_results['cont'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Perform the morda search
solution_found = simbad.command_line._simbad_morda_search(args)
logger.info("Full MoRDa domain search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found:
logger.info("... and SIMBAD worked once again. Get in!")
continue
else:
logger.info("No results found - full search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'morda/morda_mr.csv')
all_results['morda'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Make sure we only run the loop once for now
end_of_cycle = True
if len(all_results) >= 1:
if sys.version_info.major == 3:
sorted_results = sorted(all_results.items(), key=lambda kv: (kv[1], kv))
else:
sorted_results = sorted(all_results.iteritems(), key=lambda kv: (kv[1], kv))
result = sorted_results[0][1]
simbad.util.output_files(args.work_dir, result, args.output_pdb, args.output_mtz)
stopwatch.stop()
logger.info("All processing completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.time_pretty)
gui.display_results(True, args.results_to_display)
if args.rvapi_document:
gui.save_document()
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.NOTSET)
try:
main()
except Exception:
simbad.exit.exit_error(*sys.exc_info())
|
bsd-3-clause
| 6,700,612,757,377,132,000
| 39.70625
| 146
| 0.614617
| false
| 3.560962
| false
| false
| false
|
mdavidsaver/spicetools
|
spicetools/bench/fileframe.py
|
1
|
2613
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Michael Davidsaver
License is GPL3+, see file LICENSE for details
"""
import logging
_log=logging.getLogger(__name__)
import os, os.path
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
from .fileframe_ui import Ui_FileFrame
class FileFrame(QtGui.QFrame):
fileChanged = QtCore.pyqtSignal(QtCore.QString)
typeChanged = QtCore.pyqtSignal(bool)
def __init__(self, parent):
super(FileFrame, self).__init__(parent)
self.ui = Ui_FileFrame()
self.ui.setupUi(self)
self.dia = QtGui.QFileDialog(self, "Select Net of Schem.",
os.getcwd(),
"Net/Schem. (*.net *.sch);;All (*)")
self.dia.fileSelected.connect(self.setFile)
self.ui.fileBox.activated.connect(self._fileChange)
self.ui.typeBox.currentIndexChanged.connect(self._typeChanged)
self.ui.fileBtn.clicked.connect(self._select_existing)
A = QtGui.QAction("&Create file", self.ui.fileBtn)
self.ui.fileBtn.addAction(A)
A.activated.connect(self._select_new)
A = QtGui.QAction("S&elect file", self.ui.fileBtn)
A.activated.connect(self._select_existing)
self.ui.fileBtn.addAction(A)
def _select_existing(self):
self.dia.setFileMode(self.dia.ExistingFile)
self.dia.setAcceptMode(self.dia.AcceptOpen)
self.dia.exec_()
def _select_new(self):
self.dia.setFileMode(self.dia.AnyFile)
self.dia.setAcceptMode(self.dia.AcceptSave)
R = self.dia.exec_()
if not R:
return
F = str(self.dia.selectedFiles()[0])
_log.info("Create %s", F)
with open(F, 'w') as F:
pass # create empty file
def clear(self):
self.setFile('')
self.setType(True)
def _fileChange(self):
self.fileChanged.emit(self.ui.fileBox.currentText())
def _typeChanged(self, i):
self.typeChanged.emit(i==1)
def setFile(self, fname):
self.dia.selectFile(fname)
self.ui.fileBox.setEditText(fname)
self.fileChanged.emit(fname)
def setType(self, B):
self.ui.typeBox.setCurrentIndex(1 if B else 0)
def file(self):
return self.ui.fileBox.currentText()
def type(self):
return self.ui.typeBox.currentIndex()==1
file = QtCore.pyqtProperty(QtCore.QString, file, setFile,
notify=fileChanged)
type = QtCore.pyqtProperty(bool, type, setType,
notify=typeChanged)
|
gpl-3.0
| -8,681,363,298,104,596,000
| 28.359551
| 73
| 0.606965
| false
| 3.589286
| false
| false
| false
|
zathras777/pywind
|
pywind/ofgem/objects.py
|
1
|
8218
|
# coding=utf-8
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
from datetime import datetime
from pprint import pprint
from pywind.utils import map_xml_to_dict
class OfgemObjectBase(object):
XML_MAPPING = None
def __init__(self, node):
""" Extract information from the supplied XML node.
The factor figure is MWh per certificate.
"""
if self.XML_MAPPING is None:
raise NotImplementedError("Child classes should define their XML_MAPPING")
self.attrs = map_xml_to_dict(node, self.XML_MAPPING)
# pprint(self.attrs)
def __getattr__(self, item):
if item in self.attrs:
return self.attrs[item]
raise AttributeError(item)
def as_row(self):
"""
Return the information in correct format for :func:`rows()` usage
:returns: Formatted attribute dict
:rtype: dict
"""
return {'@{}'.format(key): self.attrs[key] for key in self.attrs.keys()}
class Certificates(OfgemObjectBase):
""" Certificate Number Fact Sheet
https://www.ofgem.gov.uk/sites/default/files/docs/roc_identifier_fact_sheet_dec_2015.pdf
"""
XML_MAPPING = (
('textbox4', 'generator_id'),
('textbox13', 'name'),
('textbox5', 'scheme'),
('textbox19', 'capacity', 'float', 0.0),
('textbox12', 'country'),
('textbox15', 'technology'),
('textbox31', 'generation_type'),
('textbox18', 'period'),
('textbox21', 'certs', 'int', 0),
('textbox24', 'start_no'),
('textbox27', 'finish_no'),
('textbox37', 'factor', 'float', 0.0),
('textbox30', 'issue_dt', 'date'),
('textbox33', 'status'),
('textbox36', 'status_dt', 'date'),
('textbox39', 'current_holder'),
('textbox45', 'reg_no')
)
def __init__(self, node):
OfgemObjectBase.__init__(self, node)
if self.attrs['period'].startswith("01"):
dt = datetime.strptime(self.attrs['period'][:10], '%d/%m/%Y')
self.attrs['period'] = dt.strftime("%b-%Y")
def __str__(self):
return " {} {} {:5d} {}".format(self.issue_dt.strftime("%Y %b %d"), self.start_no,
self.certs, self.current_holder)
@property
def digits(self):
""" Number of digits that store the certificate number.
:rtype: int
"""
return 10 if self.scheme == 'REGO' else 6
@property
def certificates(self):
""" Number of certificates covered by this object.
:rtype: int
"""
return self.finish - self.start + 1
@property
def start(self):
""" Return the numeric start number for the certificates.
Each certificate number contains the station, period and the number of the certificate,
so this function extracts the numeric part.
:returns: Start number of the certificates referenced
:rtype: int
"""
return int(self.start_no[10:10 + self.digits])
@property
def finish(self):
""" Return the numeric finish number for the certificates.
Each certificate number contains the station, period and the number of the certificate,
so this function extracts the numeric part.
:returns: Finish number of the certificates referenced
:rtype: integer
"""
return int(self.finish_no[10:10 + self.digits])
def output_summary(self):
""" Return a string with the output for the certificates.
:rtype: str
"""
perc = (float(self['certs']) / self['capacity']) * 100
return "%s: %s %s vs %s => %.02f%%" % (self.period, self.name, self.certs,
self.capacity, perc)
def station_details(self):
""" Get a dict object with the station information for these certificates.
:returns: Dict with just information relevant to identifying the station
:rtype: dict
"""
rv_dict = {fld: self.attrs[fld] for fld in ['generator_id',
'name',
'scheme',
'capacity',
'country',
'technology']}
rv_dict['output'] = self.output
return rv_dict
@property
def output(self):
""" Calculate the output based on the number of certs issued and factor.
:returns: Numeric output or 0
:rtype: float
"""
return self.certs / self.factor
class Station(OfgemObjectBase):
"""
Store details of a single station using data from Ofgem.
The exposed object makes the individual pieces of data available by \
acting as a dict, i.e.
.. :code::
name = station['name']
The convenience function :func:`as_string` will return a full list of the data \
formatted for display in a terminal.
"""
XML_MAPPING = (
('GeneratorID', 'generator_id'),
('StatusName', 'status'),
('GeneratorName', 'name'),
('SchemeName', 'scheme'),
('Capacity', '', 'float'),
('Country',),
('TechnologyName', 'technology'),
('OutputType', 'output'),
('AccreditationDate', 'accreditation_dt', 'date'),
('CommissionDate', 'commission_dt', 'date'),
('textbox6', 'developer'),
('textbox61', 'developer_address', 'address'),
('textbox65', 'address', 'address'),
('FaxNumber', 'fax')
)
def __init__(self, node):
OfgemObjectBase.__init__(self, node)
# catch/correct some odd results I have observed...
if self.attrs['technology'] is not None and '\n' in self.attrs['technology']:
self.attrs['technology'] = self.attrs['technology'].split('\n')[0]
class CertificateStation(object):
""" We are normally interested in knowing about certificates issued to
a station, so this class attempts to simplify this process.
Once issued all certificates will be accounted for, but the final
owner and status may change. This class attempts to take a bunch of
Certificate objects and simplify them into a final set, with ownership
and status correctly attributed.
"""
def __init__(self, name, g_id, capacity, scheme):
self.name = name
self.generator_id = g_id
self.scheme = scheme
self.capacity = capacity
self.certs = []
def __len__(self):
return len(self.certs)
def __iter__(self):
for c in self.certs:
yield c
def add_cert(self, cert):
self.certs.append(cert)
def as_row(self):
return [cert.as_row() for cert in self.certs]
|
unlicense
| 3,872,703,660,219,905,000
| 33.970213
| 100
| 0.586152
| false
| 4.258031
| false
| false
| false
|
robertnishihara/ray
|
python/ray/autoscaler/_private/aws/utils.py
|
1
|
4590
|
from collections import defaultdict
from ray.autoscaler._private.cli_logger import cli_logger
import colorful as cf
class LazyDefaultDict(defaultdict):
"""
LazyDefaultDict(default_factory[, ...]) --> dict with default factory
The default factory is call with the key argument to produce
a new value when a key is not present, in __getitem__ only.
A LazyDefaultDict compares equal to a dict with the same items.
All remaining arguments are treated the same as if they were
passed to the dict constructor, including keyword arguments.
"""
def __missing__(self, key):
"""
__missing__(key) # Called by __getitem__ for missing key; pseudo-code:
if self.default_factory is None: raise KeyError((key,))
self[key] = value = self.default_factory(key)
return value
"""
self[key] = self.default_factory(key)
return self[key]
def handle_boto_error(exc, msg, *args, **kwargs):
if cli_logger.old_style:
# old-style logging doesn't do anything here
# so we exit early
return
error_code = None
error_info = None
# todo: not sure if these exceptions always have response
if hasattr(exc, "response"):
error_info = exc.response.get("Error", None)
if error_info is not None:
error_code = error_info.get("Code", None)
generic_message_args = [
"{}\n"
"Error code: {}",
msg.format(*args, **kwargs),
cf.bold(error_code)
]
# apparently
# ExpiredTokenException
# ExpiredToken
# RequestExpired
# are all the same pretty much
credentials_expiration_codes = [
"ExpiredTokenException", "ExpiredToken", "RequestExpired"
]
if error_code in credentials_expiration_codes:
# "An error occurred (ExpiredToken) when calling the
# GetInstanceProfile operation: The security token
# included in the request is expired"
# "An error occurred (RequestExpired) when calling the
# DescribeKeyPairs operation: Request has expired."
token_command = (
"aws sts get-session-token "
"--serial-number arn:aws:iam::" + cf.underlined("ROOT_ACCOUNT_ID")
+ ":mfa/" + cf.underlined("AWS_USERNAME") + " --token-code " +
cf.underlined("TWO_FACTOR_AUTH_CODE"))
secret_key_var = (
"export AWS_SECRET_ACCESS_KEY = " + cf.underlined("REPLACE_ME") +
" # found at Credentials.SecretAccessKey")
session_token_var = (
"export AWS_SESSION_TOKEN = " + cf.underlined("REPLACE_ME") +
" # found at Credentials.SessionToken")
access_key_id_var = (
"export AWS_ACCESS_KEY_ID = " + cf.underlined("REPLACE_ME") +
" # found at Credentials.AccessKeyId")
# fixme: replace with a Github URL that points
# to our repo
aws_session_script_url = ("https://gist.github.com/maximsmol/"
"a0284e1d97b25d417bd9ae02e5f450cf")
cli_logger.verbose_error(*generic_message_args)
cli_logger.verbose(vars(exc))
cli_logger.panic("Your AWS session has expired.")
cli_logger.newline()
cli_logger.panic("You can request a new one using")
cli_logger.panic(cf.bold(token_command))
cli_logger.panic("then expose it to Ray by setting")
cli_logger.panic(cf.bold(secret_key_var))
cli_logger.panic(cf.bold(session_token_var))
cli_logger.panic(cf.bold(access_key_id_var))
cli_logger.newline()
cli_logger.panic("You can find a script that automates this at:")
cli_logger.panic(cf.underlined(aws_session_script_url))
# Do not re-raise the exception here because it looks awful
# and we already print all the info in verbose
cli_logger.abort()
# todo: any other errors that we should catch separately?
cli_logger.panic(*generic_message_args)
cli_logger.newline()
with cli_logger.verbatim_error_ctx("Boto3 error:"):
cli_logger.verbose("{}", str(vars(exc)))
cli_logger.panic("{}", str(exc))
cli_logger.abort()
def boto_exception_handler(msg, *args, **kwargs):
# todo: implement timer
class ExceptionHandlerContextManager():
def __enter__(self):
pass
def __exit__(self, type, value, tb):
import botocore
if type is botocore.exceptions.ClientError:
handle_boto_error(value, msg, *args, **kwargs)
return ExceptionHandlerContextManager()
|
apache-2.0
| 369,375,581,428,346,560
| 34.859375
| 78
| 0.621133
| false
| 3.919727
| false
| false
| false
|
zaneveld/picrust
|
picrust/util.py
|
1
|
16630
|
#!/usr/bin/env python
# File created on 23 Nov 2011
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2015, The PICRUSt Project"
__credits__ = ["Greg Caporaso", "Morgan Langille", "Daniel McDonald"]
__license__ = "GPL"
__version__ = "1.1.0"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
__status__ = "Development"
from json import dumps
from os.path import abspath, dirname, isdir
from os import makedirs
from cogent.core.tree import PhyloNode, TreeError
from numpy import array, asarray, atleast_1d
from biom import Table, parse_table
from biom.table import vlen_list_of_str_formatter
from biom.util import biom_open, HAVE_H5PY
from subprocess import Popen, PIPE
import StringIO
def make_sample_transformer(scaling_factors):
def transform_sample(sample_value,sample_id,sample_metadata):
scaling_factor = scaling_factors[sample_id]
new_val = sample_value * scaling_factor
return new_val
return transform_sample
def scale_metagenomes(metagenome_table,scaling_factors):
""" scale metagenomes from metagenome table and scaling factors
"""
transform_sample_f = make_sample_transformer(scaling_factors)
new_metagenome_table = metagenome_table.transform(transform_sample_f)
return new_metagenome_table
def convert_precalc_to_biom(precalc_in, ids_to_load=None,transpose=True,md_prefix='metadata_'):
"""Loads PICRUSTs tab-delimited version of the precalc file and outputs a BIOM object"""
#if given a string convert to a filehandle
if type(precalc_in) ==str or type(precalc_in) == unicode:
fh = StringIO.StringIO(precalc_in)
else:
fh=precalc_in
#first line has to be header
header_ids=fh.readline().strip().split('\t')
col_meta_locs={}
for idx,col_id in enumerate(header_ids):
if col_id.startswith(md_prefix):
col_meta_locs[col_id[len(md_prefix):]]=idx
end_of_data=len(header_ids)-len(col_meta_locs)
trait_ids = header_ids[1:end_of_data]
col_meta=[]
row_meta=[{} for i in trait_ids]
if ids_to_load is not None and len(ids_to_load) > 0:
ids_to_load=set(ids_to_load)
load_all_ids=False
else:
load_all_ids=True
matching=[]
otu_ids=[]
for line in fh:
fields = line.strip().split('\t')
row_id=fields[0]
if(row_id.startswith(md_prefix)):
#handle metadata
#determine type of metadata (this may not be perfect)
metadata_type=determine_metadata_type(line)
for idx,trait_name in enumerate(trait_ids):
row_meta[idx][row_id[len(md_prefix):]]=parse_metadata_field(fields[idx+1],metadata_type)
elif load_all_ids or (row_id in set(ids_to_load)):
otu_ids.append(row_id)
matching.append(map(float,fields[1:end_of_data]))
#add metadata
col_meta_dict={}
for meta_name in col_meta_locs:
col_meta_dict[meta_name]=fields[col_meta_locs[meta_name]]
col_meta.append(col_meta_dict)
if not load_all_ids:
ids_to_load.remove(row_id)
if not otu_ids:
raise ValueError,"No OTUs match identifiers in precalculated file. PICRUSt requires an OTU table reference/closed picked against GreenGenes.\nExample of the first 5 OTU ids from your table: {0}".format(', '.join(list(ids_to_load)[:5]))
if ids_to_load:
raise ValueError,"One or more OTU ids were not found in the precalculated file!\nAre you using the correct --gg_version?\nExample of (the {0}) unknown OTU ids: {1}".format(len(ids_to_load),', '.join(list(ids_to_load)[:5]))
#note that we transpose the data before making biom obj
matching = asarray(matching)
if transpose:
return Table(matching.T, trait_ids, otu_ids, row_meta, col_meta,
type='Gene table')
else:
return Table(matching, otu_ids, trait_ids, col_meta, row_meta,
type='Gene table')
def convert_biom_to_precalc(biom_table):
"""Converts a biom table into a PICRUSt precalculated tab-delimited file """
col_ids = biom_table.ids(axis='observation')
row_ids = biom_table.ids()
lines = []
header = ['#OTU_IDs'] + list(col_ids)
col_metadata_names = []
# peak at metadata for Samples (e.g. NSTI) so we can set the header
if biom_table.metadata():
col_metadata_names = biom_table.metadata()[0].keys()
#add the metadata names to the header
for col_metadata_name in col_metadata_names:
header.append('metadata_' + col_metadata_name)
lines.append(map(str, header))
row_metadata_names = []
# peak at metadata for observations (e.g. KEGG_Pathways)
if biom_table.metadata(axis='observation'):
row_metadata_names = biom_table.metadata(axis='observation')[0].keys()
for metadata_name in row_metadata_names:
metadata_line = ['metadata_' + metadata_name]
# do the observation metadata now
for col_id in col_ids:
metadata = biom_table.metadata(axis='observation')[biom_table.index(col_id, axis='observation')]
metadata_line.append(biom_meta_to_string(metadata[metadata_name]))
lines.append(map(str, metadata_line))
# transpose the actual count data
transposed_table = biom_table._data.T
for idx, count in enumerate(transposed_table.toarray()):
line = [row_ids[idx]] + map(str, count)
# add the metadata values to the end of the row now
for meta_name in col_metadata_names:
line.append(biom_table.metadata()[idx][meta_name])
lines.append(line)
return "\n".join("\t".join(map(str, x)) for x in lines)
def determine_metadata_type(line):
if ';' in line:
if '|' in line:
return 'list_of_lists'
else:
return 'list'
else:
return 'string'
def parse_metadata_field(metadata_str,metadata_format='string'):
if metadata_format == 'string':
return metadata_str
elif metadata_format == 'list':
return [e.strip() for e in metadata_str.split(';')]
elif metadata_format == 'list_of_lists':
return [[e.strip() for e in y.split(';')] for y in metadata_str.split('|')]
def biom_meta_to_string(metadata):
""" Determine which format the metadata is and then convert to a string"""
#Note that since ';' and '|' are used as seperators we must replace them if they exist
if type(metadata) ==str or type(metadata)==unicode:
return metadata.replace(';',':')
elif type(metadata) == list:
if type(metadata[0]) == list:
return "|".join(";".join([y.replace(';',':').replace('|',':') for y in x]) for x in metadata)
else:
return ";".join(x.replace(';',':') for x in metadata)
def system_call(cmd, shell=True):
"""Call cmd and return (stdout, stderr, return_value).
cmd can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
Please see Python's subprocess.Popen for a description of the shell
parameter and how cmd is interpreted differently based on its value.
This code was copied from QIIME's qiime_system_call() (util.py) function on June 3rd, 2013.
"""
proc = Popen(cmd, shell=shell, universal_newlines=True, stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
def file_contains_nulls(file):
"""Checks given file for null characters. These are sometimes created on SGE clusters when system IO is overloaded."""
return '\x00' in open(file,'rb').read()
def parse_table_to_biom(table_lines, table_format="tab-delimited",\
biom_format = 'otu table'):
"""Read the lines of an open trait table file, and output a .biom table object
The trait table must be either a biom file, or a picrust tab-delimited file
table_format -- must be either 'tab-delimited' or 'biom'
"""
return parse_table(table_lines)
def get_picrust_project_dir():
""" Returns the top-level PICRUST directory
"""
# Get the full path of util.py
current_file_path = abspath(__file__)
# Get the directory containing util.py
current_dir_path = dirname(current_file_path)
# Return the directory containing the directory containing util.py
return dirname(current_dir_path)
def transpose_trait_table_fields(data_fields,header,id_row_idx=0,\
input_header_delimiter="\t",output_delimiter="\t"):
"""Transpose the fields of a trait table, returning new data_fields,header
data_fields: list of lists for data fields
header: a string describing the header_line
id_row_idx: index of row labels. Almost always 0 but included for
but included for completeness
input_header_delimiter: delimiter for fields in the header string
output_delimiter: use this delimiter to join header fields
NOTE: typically the header and data fields are generated
by parse_trait_table in picrust.parse
"""
header_fields = header.split(input_header_delimiter)
# ensure no trailing newlines
old_header_fields = [h.strip() for h in header_fields]
new_header_fields = [old_header_fields[0]] + \
[df[id_row_idx].strip() for df in data_fields]
non_label_data_fields = []
for row in data_fields:
non_label_fields = [e for i, e in enumerate(row) if i != id_row_idx]
non_label_data_fields.append(non_label_fields)
data_array = array(non_label_data_fields)
new_data_array = data_array.T
new_rows = []
for i,row in enumerate(new_data_array):
label = old_header_fields[i+1]
# this is i+1 not i because i is the blank/meaningless
# upper left corner entry.
new_row = [label] + list(row)
new_rows.append(new_row)
new_header = output_delimiter.join(new_header_fields)
return new_header + "\n", new_rows
def make_output_dir_for_file(filepath):
"""Create sub-directories for a new file if they don't already exist"""
dirpath = dirname(filepath)
if not isdir(dirpath) and not dirpath == '':
makedirs(dirpath)
def write_biom_table(biom_table, biom_table_fp, compress=True,
write_hdf5=HAVE_H5PY, format_fs=None):
"""Writes a BIOM table to the specified filepath
Parameters
----------
biom_table : biom.Table
The table object to write out
biom_table_fp : str
The path to the output file
compress : bool, optional
Defaults to ``True``. If True, built-in compression on the output HDF5
file will be enabled. This option is only relevant if ``write_hdf5`` is
``True``.
write_hdf5 : bool, optional
Defaults to ``True`` if H5PY is installed and to ``False`` if H5PY is
not installed. If ``True`` the output biom table will be written as an
HDF5 binary file, otherwise it will be a JSON string.
format_fs : dict, optional
Formatting functions to be passed to `Table.to_hdf5`
Notes
-----
This code was adapted from QIIME 1.9
"""
generated_by = "PICRUSt " + __version__
if write_hdf5:
with biom_open(biom_table_fp, 'w') as biom_file:
biom_table.to_hdf5(biom_file, generated_by, compress,
format_fs=format_fs)
else:
with open(biom_table_fp, 'w') as biom_file:
biom_table.to_json(generated_by, biom_file)
def make_output_dir(dirpath, strict=False):
"""Make an output directory if it doesn't exist
Returns the path to the directory
dirpath -- a string describing the path to the directory
strict -- if True, raise an exception if dir already
exists
"""
dirpath = abspath(dirpath)
#Check if directory already exists
if isdir(dirpath):
if strict == True:
err_str = "Directory '%s' already exists" % dirpath
raise IOError(err_str)
return dirpath
try:
makedirs(dirpath)
except IOError,e:
err_str = "Could not create directory '%s'. Are permissions set correctly? Got error: '%s'" %e
raise IOError(err_str)
return dirpath
class PicrustNode(PhyloNode):
def multifurcating(self, num, eps=None, constructor=None):
"""Return a new tree with every node having num or few children
num : the number of children a node can have max
eps : default branch length to set if self or constructor is of
PhyloNode type
constructor : a TreeNode or subclass constructor. If None, uses self
"""
if num < 2:
raise TreeError, "Minimum number of children must be >= 2"
if eps is None:
eps = 0.0
if constructor is None:
constructor = self.__class__
if hasattr(constructor, 'Length'):
set_branchlength = True
else:
set_branchlength = False
new_tree = self.copy()
for n in new_tree.preorder(include_self=True):
while len(n.Children) > num:
new_node = constructor(Children=n.Children[-num:])
if set_branchlength:
new_node.Length = eps
n.append(new_node)
return new_tree
def bifurcating(self, eps=None, constructor=None):
"""Wrap multifurcating with a num of 2"""
return self.multifurcating(2, eps, constructor)
def nameUnnamedNodes(self):
"""sets the Data property of unnamed nodes to an arbitrary value
Internal nodes are often unnamed and so this function assigns a
value for referencing.
Note*: This method is faster then pycogent nameUnnamedNodes()
because it uses a dict instead of an array. Also, we traverse
only over internal nodes (and not including tips)
"""
#make a list of the names that are already in the tree
names_in_use = {}
for node in self.iterNontips(include_self=True):
if node.Name:
names_in_use[node.Name]=1
#assign unique names to the Data property of nodes where Data = None
name_index = 1
for node in self.iterNontips(include_self=True):
#if (not node.Name) or re.match('edge',node.Name):
if not node.Name:
new_name = 'node' + str(name_index)
#choose a new name if name is already in tree
while new_name in names_in_use:
name_index += 1
new_name = 'node' + str(name_index)
node.Name = new_name
names_in_use[node.Name]=1
name_index += 1
def getSubTree(self,names):
"""return a new subtree with just the tips in names
assumes names is a set
assumes all names in names are present as tips in tree
"""
tcopy = self.deepcopy()
while len(tcopy.tips()) != len(names):
# for each tip, remove it if we do not want to keep it
for n in tcopy.tips():
if n.Name not in names:
n.Parent.removeNode(n)
# reduce single-child nodes
tcopy.prune()
return tcopy
def list_of_list_of_str_formatter(grp, header, md, compression):
"""Serialize [[str]] into a BIOM hdf5 compatible form
Parameters
----------
grp : h5py.Group
This is ignored. Provided for passthrough
header : str
The key in each dict to pull out
md : list of dict
The axis metadata
compression : bool
Whether to enable dataset compression. This is ignored, provided for
passthrough
Returns
-------
grp : h5py.Group
The h5py.Group
header : str
The key in each dict to pull out
md : list of dict
The modified metadata that can be formatted in hdf5
compression : bool
Whether to enable dataset compression.
Notes
-----
This method is intended to be a "passthrough" to BIOM's
vlen_list_of_str_formatter method. It is a transform method.
"""
new_md = [{header: atleast_1d(asarray(dumps(m[header])))} for m in md]
return (grp, header, new_md, compression)
def picrust_formatter(*args):
"""Transform, and format"""
return vlen_list_of_str_formatter(*list_of_list_of_str_formatter(*args))
|
gpl-3.0
| 4,356,464,400,882,689,000
| 33.936975
| 243
| 0.632111
| false
| 3.734561
| false
| false
| false
|
certik/chess
|
common/appenginepatch/appenginepatcher/patch.py
|
1
|
9983
|
# -*- coding: utf-8 -*-
from google.appengine.ext import db
import logging, os, sys
def patch_all():
patch_python()
patch_app_engine()
patch_django()
setup_logging()
def patch_python():
# Remove modules that we want to override
for module in ('httplib', 'urllib', 'urllib2', 'memcache',):
if module in sys.modules:
del sys.modules[module]
# For some reason the imp module can't be replaced via sys.path
from appenginepatcher import have_appserver
if have_appserver:
from appenginepatcher import imp
sys.modules['imp'] = imp
# Add fake error and gaierror to socket module. Required for boto support.
import socket
class error(Exception):
pass
class gaierror(Exception):
pass
socket.error = error
socket.gaierror = gaierror
if have_appserver:
def unlink(_):
raise NotImplementedError('App Engine does not support FS writes!')
os.unlink = unlink
def patch_app_engine():
# This allows for using Paginator on a Query object. We limit the number
# of results to 301, so there won't be any timeouts (301 because you can
# say "more than 300 results").
def __len__(self):
return self.count(301)
db.Query.__len__ = __len__
# Add "model" property to Query (needed by generic views)
class ModelProperty(object):
def __get__(self, query, unused):
try:
return query._Query__model_class
except:
return query._model_class
db.Query.model = ModelProperty()
# Add a few Model methods that are needed for serialization
def _get_pk_val(self):
return unicode(self.key())
db.Model._get_pk_val = _get_pk_val
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._get_pk_val() == other._get_pk_val()
db.Model.__eq__ = __eq__
def __ne__(self, other):
return not self.__eq__(other)
db.Model.__ne__ = __ne__
# Make Property more Django-like (needed for serialization)
db.Property.serialize = True
db.Property.rel = None
class Relation(object):
field_name = 'key_name'
db.ReferenceProperty.rel = Relation
# Add repr to make debugging a little bit easier
def __repr__(self):
d = dict([(k, getattr(self, k)) for k in self.properties()])
return '%s(**%s)' % (self.__class__.__name__, repr(d))
db.Model.__repr__ = __repr__
# Replace save() method with one that calls put(), so a monkey-patched
# put() will also work if someone uses save()
def save(self):
return self.put()
db.Model.save = save
# Add _meta to Model, so porting code becomes easier (generic views,
# xheaders, and serialization depend on it).
class _meta(object):
many_to_many = []
class pk:
name = 'key_name'
def __init__(self, model):
self.app_label = model.__module__.split('.')[-2]
self.object_name = model.__name__
self.module_name = self.object_name.lower()
self.verbose_name = self.object_name.lower()
self.verbose_name_plural = None
self.abstract = False
self.model = model
def __str__(self):
return '%s.%s' % (self.app_label, self.module_name)
@property
def local_fields(self):
return self.model.properties().values()
# Register models with Django
old_init = db.PropertiedClass.__init__
def __init__(cls, name, bases, attrs):
"""Creates a combined appengine and Django model.
The resulting model will be known to both the appengine libraries and
Django.
"""
cls._meta = _meta(cls)
cls._default_manager = cls
old_init(cls, name, bases, attrs)
from django.db.models.loading import register_models
register_models(cls._meta.app_label, cls)
db.PropertiedClass.__init__ = __init__
def log_exception(*args, **kwargs):
logging.exception('Exception in request:')
def patch_django():
# In order speed things up and consume less memory we lazily replace
# modules if possible. This requires some __path__ magic. :)
# Add fake 'appengine' DB backend
# This also creates a separate datastore for each project.
from appenginepatcher.db_backends import appengine
sys.modules['django.db.backends.appengine'] = appengine
base_path = os.path.abspath(os.path.dirname(__file__))
# Replace generic views
from django.views import generic
generic.__path__.insert(0, os.path.join(base_path, 'generic_views'))
# Replace db session backend and tests
from django.contrib import sessions
sessions.__path__.insert(0, os.path.join(base_path, 'sessions'))
from django.contrib.sessions import backends
backends.__path__.insert(0, os.path.join(base_path, 'session_backends'))
# Replace the dispatchers.
from django.core import signals
# Log errors.
signals.got_request_exception.connect(log_exception)
# Unregister the rollback event handler.
import django.db
signals.got_request_exception.disconnect(django.db._rollback_on_exception)
# Replace auth models
# This MUST happen before any other modules import User or they'll
# get Django's original User model!!!
from appenginepatcher.auth import models
sys.modules['django.contrib.auth.models'] = models
# Replace rest of auth app
from django.contrib import auth
auth.__path__.insert(0, os.path.join(base_path, 'auth'))
# Replace ModelForm
# This MUST happen as early as possible, but after User got replaced!
from google.appengine.ext.db import djangoforms as aeforms
from django import forms
from django.forms import models as modelforms
forms.ModelForm = modelforms.ModelForm = aeforms.ModelForm
forms.ModelFormMetaclass = aeforms.ModelFormMetaclass
modelforms.ModelFormMetaclass = aeforms.ModelFormMetaclass
# Fix handling of verbose_name. Google resolves lazy translation objects
# immedately which of course breaks translation support.
from django.utils.text import capfirst
def get_form_field(self, form_class=forms.CharField, **kwargs):
defaults = {'required': self.required}
if self.verbose_name:
defaults['label'] = capfirst(self.verbose_name)
if self.choices:
choices = []
if not self.required or (self.default is None and
'initial' not in kwargs):
choices.append(('', '---------'))
for choice in self.choices:
choices.append((str(choice), unicode(choice)))
defaults['widget'] = forms.Select(choices=choices)
if self.default is not None:
defaults['initial'] = self.default
defaults.update(kwargs)
return form_class(**defaults)
db.Property.get_form_field = get_form_field
# Extend ModelForm with support for EmailProperty
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an email property."""
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(db.EmailProperty, self).get_form_field(**defaults)
db.EmailProperty.get_form_field = get_form_field
# Fix default value of UserProperty (Google resolves the user too early)
def get_form_field(self, **kwargs):
from django.contrib.auth.models import User
from django.utils.functional import lazy
from google.appengine.api import users
defaults = {'initial': lazy(users.GetCurrentUser, User)}
defaults.update(kwargs)
return super(db.UserProperty, self).get_form_field(**defaults)
db.UserProperty.get_form_field = get_form_field
# Replace mail backend
from appenginepatcher import mail as gmail
from django.core import mail
mail.SMTPConnection = gmail.GoogleSMTPConnection
mail.mail_admins = gmail.mail_admins
mail.mail_managers = gmail.mail_managers
# Fix translation support if we're in a zip file. We change the path
# of the django.conf module, so the translation code tries to load
# Django's translations from the common/django-locale/locale folder.
from django import conf
from aecmd import COMMON_DIR
if '.zip' + os.sep in conf.__file__:
conf.__file__ = os.path.join(COMMON_DIR, 'django-locale', 'fake.py')
# Patch login_required if using Google Accounts
from django.conf import settings
if 'ragendja.auth.middleware.GoogleAuthenticationMiddleware' in \
settings.MIDDLEWARE_CLASSES:
from ragendja.auth.decorators import google_login_required, \
redirect_to_google_login
from django.contrib.auth import decorators, views
decorators.login_required = google_login_required
views.redirect_to_login = redirect_to_google_login
# Activate ragendja's GLOBALTAGS support (automatically done on import)
from ragendja import template
# Patch auth forms
from appenginepatcher import auth_forms_patch
# Add XML serializer
if not hasattr(settings, 'SERIALIZATION_MODULES'):
settings.SERIALIZATION_MODULES = {}
for name in ('xml', 'python', 'json', 'yaml'):
settings.SERIALIZATION_MODULES[name] = 'appenginepatcher.serializers.' \
+ name
# Patch DeserializedObject
from django.core.serializers import base
class DeserializedObject(base.DeserializedObject):
def save(self, save_m2m=True):
self.object.save()
self.object._parent = None
base.DeserializedObject = DeserializedObject
def setup_logging():
from django.conf import settings
if settings.DEBUG:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
|
mit
| 7,800,223,992,368,336,000
| 36.389513
| 80
| 0.650005
| false
| 4.1218
| false
| false
| false
|
ronin13/pyvolume
|
pyvolume/sshfs.py
|
1
|
4118
|
# -*- coding: utf-8 -*-
""" Module providing SSHFileSystem implementation."""
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import os
import os.path
from plumbum import ProcessExecutionError
from plumbum.cmd import sshfs
from plumbum.cmd import sudo
from plumbum.cmd import umount
from pyvolume.exceptions import NeedOptionsException
log = logging.getLogger(__name__)
class SSHFileSystem(object):
"""
Mounts an external directory pointed by `remote_path`
onto `base` (/mnt by default) and passes it to Docker
to use as a volume. Uses vol_dict to keep track of
different volumes.
"""
def __init__(self, base):
self.base = base
self.sshfs_options = [
"-o",
"reconnect,cache_timeout=60,allow_other,uid=1000,gid=1000,intr",
]
self.vol_dict = {}
def create(self, volname, options):
""" Creates the directories but does not mount it yet."""
if "remote_path" not in options:
raise NeedOptionsException("remote_path is a required option for sshfs")
remote_path = options["remote_path"]
local_path = os.path.join(self.base, volname)
log.info("Creating directory " + local_path)
os.mkdir(local_path)
cmdline = []
if "ssh_config" in options:
cmdline += ["-F", options["ssh_config"]]
if "sshfs_options" in options:
sshfs_options = [options["sshfs_options"]]
else:
sshfs_options = self.sshfs_options
cmdline += [remote_path]
cmdline += [local_path]
cmdline += sshfs_options
self.vol_dict[volname] = {
"Local": local_path,
"Remote": remote_path,
"cmdline": cmdline,
"mounted": False,
}
def list(self):
""" Lists the existing volumes being managed."""
vol_list = []
for volumes in self.vol_dict:
vol_list += [volumes]
return vol_list
def mount_check(self, volname):
"""Check if the volume is already mounted.
If mounted, return its path.
"""
if not self.vol_dict[volname]["mounted"]:
log.error("Volume {0} is not mounted".format(volname))
return None
return self.vol_dict[volname]["Local"]
def path(self, volname):
"""Check if the volume is already mounted.
If mounted, return its path.
"""
if not self.mount_check(volname):
return None
return self.vol_dict[volname]["Local"]
def remove(self, volname):
"""
Removes the volume.
It unmounts the remote if necessary, tolerates
if already unmounted.
After which, it removes the mounted directory.
"""
local_path = self.vol_dict[volname]["Local"]
try:
self.umount(volname)
except ProcessExecutionError as e:
if e.retcode != 1:
raise
log.info("Removing local path " + local_path)
if os.path.exists(local_path):
os.rmdir(local_path)
return True
def mount(self, volname):
""" Mount the remote onto local for volname. """
check = self.mount_check(volname)
if check:
return check
cmdline = self.vol_dict[volname]["cmdline"]
mount_cmd = sshfs[cmdline]
mount_cmd()
self.vol_dict[volname]["mounted"] = True
return self.vol_dict[volname]["Local"]
def umount(self, volname):
if not self.mount_check(volname):
return None
local_path = self.vol_dict[volname]["Local"]
umount_cmd = sudo[umount[local_path]]
umount_cmd()
self.vol_dict[volname]["mounted"] = False
return True
def cleanup(self):
""" Unmounts and removes mount paths when shutting down."""
for volume in self.vol_dict:
self.remove(volume)
def scope(self):
""" Returns scope of this - global."""
return "global"
|
mit
| -1,407,974,807,998,446,800
| 29.279412
| 84
| 0.578679
| false
| 4.089374
| false
| false
| false
|
PhilLidar-DAD/geonode
|
geonode/eula/models.py
|
1
|
2090
|
from django.db import models
from geonode.layers.models import Layer
from geonode.documents.models import Document
from datetime import datetime
from django.utils.translation import ugettext_lazy as _
from geonode.base.models import ResourceBase
from geonode.people.models import OrganizationType
from django_enumfield import enum
try:
from django.conf import settings
User = settings.AUTH_USER_MODEL
except ImportError:
from django.contrib.auth.models import User
from geonode.datarequests.models import LipadOrgType
# Create your models here.
class EULALayerDownload(models.Model):
date_time = models.DateTimeField(default=datetime.now)
user = models.ForeignKey(User, null=False, blank=False)
layer = models.ForeignKey(Layer, null=False, blank=False)
def __unicode__(self):
return "{0}:{1}".format(self.user.username, self.layer.title)
class AnonDownloader(models.Model):
ORG_TYPE_CHOICES = LipadOrgType.objects.values_list('val', 'display_val')
date = models.DateTimeField(auto_now=True)
anon_first_name = models.CharField(_('First Name'), max_length=100)
anon_last_name = models.CharField(_('Last Name'), max_length=100)
anon_email = models.EmailField(_('Email'), max_length=50)
anon_organization = models.CharField(_('Organization'), max_length=100)
anon_purpose = models.CharField(_('Purpose'), max_length=100)
anon_layer = models.CharField(_('Layer Name'), max_length=100, null=True, blank=True,)
anon_orgtype = models.CharField(
_('Organization Type'),
max_length=100,
choices=ORG_TYPE_CHOICES,
default="Other",
help_text='Organization type based on Phil-LiDAR1 Data Distribution Policy'
)
anon_orgother = models.CharField(
_('If Other, please specify'),
max_length=255,
blank=True,
null=True,
)
# anon_resourcebase = models.ForeignKey(ResourceBase, null=True, blank=True, related_name='anon_resourcebase')
anon_document = models.CharField(_('Document Name'), max_length=100, null=True, blank=True,)
|
gpl-3.0
| 5,668,980,210,110,807,000
| 42.541667
| 114
| 0.710526
| false
| 3.813869
| false
| false
| false
|
Griger/Intel-CervicalCancer-KaggleCompetition
|
featureHOG.py
|
1
|
1456
|
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import numpy as np
from math import pi
from keras.preprocessing.image import ImageDataGenerator
import cv2
from sklearn.cluster import KMeans
import sklearn.preprocessing as prepro
# Generamos nuevos ejemplos
'''
datagen = ImageDataGenerator(
rotation_range=180,
shear_range=pi,
fill_mode='nearest')
train_data = np.load('Datos/train244all.npy')
train_labels = np.load('Datos/train_target244all.npy')
datagen.fit(train_data,rounds=2)
i = 0
nuevas_imagenes = []
tam = 1
for batch in datagen.flow(train_data,train_labels,batch_size = (len(train_data))):
i += 1
if i > tam:
break
nuevas_imagenes.append(batch[0])
nuevas_imagenes = np.array(nuevas_imagenes)
nuevas_imagenes = np.reshape(nuevas_imagenes, (len(train_data)*tam,244,244,3))
np.save('Datos/extraRotations.npy', nuevas_imagenes, allow_pickle=True, fix_imports=True)
'''
train_data = np.load('Datos/train244all.npy')
test_data = np.load('Datos/test244.npy')
hog = cv2.HOGDescriptor()
def getHist(image):
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = image * 255
image = image.astype('uint8')
return hog.compute(image)
histograms = [getHist(img) for img in train_data]
if __name__ == '__main__':
# Guardar los histogramas
|
gpl-3.0
| 4,660,950,177,554,977,000
| 21.483871
| 94
| 0.665522
| false
| 2.95935
| false
| false
| false
|
m3z/HT
|
openstack_dashboard/api/swift.py
|
1
|
9568
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import swiftclient
from django.conf import settings
from django.utils.translation import ugettext as _
from horizon import exceptions
from openstack_dashboard.api.base import url_for, APIDictWrapper
LOG = logging.getLogger(__name__)
FOLDER_DELIMITER = "/"
class Container(APIDictWrapper):
pass
class StorageObject(APIDictWrapper):
def __init__(self, apidict, container_name, orig_name=None, data=None):
super(StorageObject, self).__init__(apidict)
self.container_name = container_name
self.orig_name = orig_name
self.data = data
class PseudoFolder(APIDictWrapper):
"""
Wrapper to smooth out discrepencies between swift "subdir" items
and swift pseudo-folder objects.
"""
def __init__(self, apidict, container_name):
super(PseudoFolder, self).__init__(apidict)
self.container_name = container_name
def _has_content_type(self):
content_type = self._apidict.get("content_type", None)
return content_type == "application/directory"
@property
def name(self):
if self._has_content_type():
return self._apidict['name']
return self.subdir.rstrip(FOLDER_DELIMITER)
@property
def bytes(self):
if self._has_content_type():
return self._apidict['bytes']
return None
@property
def content_type(self):
return "application/directory"
def _objectify(items, container_name):
""" Splits a listing of objects into their appropriate wrapper classes. """
objects = {}
subdir_markers = []
# Deal with objects and object pseudo-folders first, save subdirs for later
for item in items:
if item.get("content_type", None) == "application/directory":
objects[item['name']] = PseudoFolder(item, container_name)
elif item.get("subdir", None) is not None:
subdir_markers.append(PseudoFolder(item, container_name))
else:
objects[item['name']] = StorageObject(item, container_name)
# Revisit subdirs to see if we have any non-duplicates
for item in subdir_markers:
if item.name not in objects.keys():
objects[item.name] = item
return objects.values()
def swift_api(request):
endpoint = url_for(request, 'object-store')
LOG.debug('Swift connection created using token "%s" and url "%s"'
% (request.user.token.id, endpoint))
return swiftclient.client.Connection(None,
request.user.username,
None,
preauthtoken=request.user.token.id,
preauthurl=endpoint,
auth_version="2.0")
def swift_container_exists(request, container_name):
try:
swift_api(request).head_container(container_name)
return True
except swiftclient.client.ClientException:
return False
def swift_object_exists(request, container_name, object_name):
try:
swift_api(request).head_object(container_name, object_name)
return True
except swiftclient.client.ClientException:
return False
def swift_get_containers(request, marker=None):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
headers, containers = swift_api(request).get_account(limit=limit + 1,
marker=marker,
full_listing=True)
container_objs = [Container(c) for c in containers]
if(len(container_objs) > limit):
return (container_objs[0:-1], True)
else:
return (container_objs, False)
def swift_create_container(request, name):
if swift_container_exists(request, name):
raise exceptions.AlreadyExists(name, 'container')
swift_api(request).put_container(name)
return Container({'name': name})
def swift_delete_container(request, name):
swift_api(request).delete_container(name)
return True
def swift_get_objects(request, container_name, prefix=None, marker=None,
limit=None):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
kwargs = dict(prefix=prefix,
marker=marker,
limit=limit + 1,
delimiter=FOLDER_DELIMITER,
full_listing=True)
headers, objects = swift_api(request).get_container(container_name,
**kwargs)
object_objs = _objectify(objects, container_name)
if(len(object_objs) > limit):
return (object_objs[0:-1], True)
else:
return (object_objs, False)
def swift_filter_objects(request, filter_string, container_name, prefix=None,
marker=None):
# FIXME(kewu): Swift currently has no real filtering API, thus the marker
# parameter here won't actually help the pagination. For now I am just
# getting the largest number of objects from a container and filtering
# based on those objects.
limit = 9999
objects = swift_get_objects(request,
container_name,
prefix=prefix,
marker=marker,
limit=limit)
filter_string_list = filter_string.lower().strip().split(' ')
def matches_filter(obj):
for q in filter_string_list:
return wildcard_search(obj.name.lower(), q)
return filter(matches_filter, objects[0])
def wildcard_search(string, q):
q_list = q.split('*')
if all(map(lambda x: x == '', q_list)):
return True
elif q_list[0] not in string:
return False
else:
if q_list[0] == '':
tail = string
else:
head, delimiter, tail = string.partition(q_list[0])
return wildcard_search(tail, '*'.join(q_list[1:]))
def swift_copy_object(request, orig_container_name, orig_object_name,
new_container_name, new_object_name):
try:
# FIXME(gabriel): The swift currently fails at unicode in the
# copy_to method, so to provide a better experience we check for
# unicode here and pre-empt with an error message rather than
# letting the call fail.
str(orig_container_name)
str(orig_object_name)
str(new_container_name)
str(new_object_name)
except UnicodeEncodeError:
raise exceptions.HorizonException(_("Unicode is not currently "
"supported for object copy."))
if swift_object_exists(request, new_container_name, new_object_name):
raise exceptions.AlreadyExists(new_object_name, 'object')
headers = {"X-Copy-From": FOLDER_DELIMITER.join([orig_container_name,
orig_object_name])}
return swift_api(request).put_object(new_container_name,
new_object_name,
None,
headers=headers)
def swift_create_subfolder(request, container_name, folder_name):
headers = {'content-type': 'application/directory',
'content-length': 0}
etag = swift_api(request).put_object(container_name,
folder_name,
None,
headers=headers)
obj_info = {'subdir': folder_name, 'etag': etag}
return PseudoFolder(obj_info, container_name)
def swift_upload_object(request, container_name, object_name, object_file):
headers = {}
headers['X-Object-Meta-Orig-Filename'] = object_file.name
etag = swift_api(request).put_object(container_name,
object_name,
object_file,
headers=headers)
obj_info = {'name': object_name, 'bytes': object_file.size, 'etag': etag}
return StorageObject(obj_info, container_name)
def swift_delete_object(request, container_name, object_name):
swift_api(request).delete_object(container_name, object_name)
return True
def swift_get_object(request, container_name, object_name):
headers, data = swift_api(request).get_object(container_name, object_name)
orig_name = headers.get("x-object-meta-orig-filename")
obj_info = {'name': object_name, 'bytes': len(data)}
return StorageObject(obj_info,
container_name,
orig_name=orig_name,
data=data)
|
apache-2.0
| -1,708,836,969,548,699,400
| 35.10566
| 79
| 0.599289
| false
| 4.248668
| false
| false
| false
|
350dotorg/Django
|
django/core/mail/message.py
|
1
|
10976
|
import mimetypes
import os
import random
import time
from email import Charset, Encoders
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.Header import Header
from email.Utils import formatdate, getaddresses, formataddr
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import smart_str, force_unicode
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
Charset.add_charset('utf-8', Charset.SHORTEST, Charset.QP, 'utf-8')
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_unicode(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val = val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ('to', 'from', 'cc'):
result = []
for nm, addr in getaddresses((val,)):
nm = str(Header(nm.encode(encoding), encoding))
try:
addr = addr.encode('ascii')
except UnicodeEncodeError: # IDN
addr = str(Header(addr.encode(encoding), encoding))
result.append(formataddr((nm, addr)))
val = ', '.join(result)
else:
val = Header(val.encode(encoding), encoding)
else:
if name.lower() == 'subject':
val = Header(val)
return name, val
class SafeMIMEText(MIMEText):
def __init__(self, text, subtype, charset):
self.encoding = charset
MIMEText.__init__(self, text, subtype, charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, basestring), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if bcc:
assert not isinstance(bcc, basestring), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(smart_str(self.body, encoding),
self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(self.to))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Bcc entries).
"""
return self.to + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content == mimetype == None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, 'rb').read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(smart_str(content, encoding), subtype, encoding)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers)
self.alternatives=alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
|
bsd-3-clause
| 5,907,717,041,566,670,000
| 36.979239
| 122
| 0.622449
| false
| 4.31277
| false
| false
| false
|
erichaase/topcoder-python
|
topcoder/knights_tour.py
|
1
|
2012
|
"""
`KnightsTour <http://community.topcoder.com/stat?c=problem_statement&pm=10577>`__
"""
def solution (board):
b, n = Board(board), 1
while b.update(): n += 1
return n
class Board:
def __init__ (self, board):
self.board = [list(row) for row in board]
def update (self):
k, t = self.next_move()
if k and t:
self.board[k[0]][k[1]] = "*"
self.board[t[0]][t[1]] = "K"
return True
else:
return False
def next_move (self):
k = self.knight()
m = self.moves(k)
m.sort(key = lambda p: p[1])
m.sort(key = lambda p: p[0])
m.sort(key = lambda p: len(self.moves(p)))
t = None
if len(m) > 0:
t = m[0]
return k, t
def knight (self):
for x, row in enumerate(self.board):
for y, cell in enumerate(row):
if cell == "K":
return x, y
return None, None
def moves (self, p):
x, y = p[0], p[1]
targets = [
[x - 2, y - 1],
[x - 2, y + 1],
[x - 1, y + 2],
[x + 1, y + 2],
[x + 2, y - 1],
[x + 2, y + 1],
[x - 1, y - 2],
[x + 1, y - 2],
]
m = []
for target in targets:
if self.valid(target):
m.append(target)
return m
def valid (self, p):
x, y = p[0], p[1]
if x < 0:
return False
if x >= len(self.board):
return False
if y < 0:
return False
if y >= len(self.board[0]):
return False
c = self.board[x][y]
if c == "*":
return False
if c == "K":
return False
if c == ".":
return True
return False
def __str__ (self):
s = ""
for row in self.board:
s += "".join(row)
s += "\n"
return s
|
mit
| -7,622,723,604,843,863,000
| 21.863636
| 81
| 0.394135
| false
| 3.451115
| false
| false
| false
|
fupadev/FuME
|
fume/threads/DownloadProcessor.py
|
1
|
5680
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# --------------------------------------------------------------------------
# FuME FuPa Match Explorer Copyright (c) 2017 Andreas Feldl <fume@afeldl.de>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The full license of the GNU General Public License is in the file LICENCE,
# distributed with this software; if not, see http://www.gnu.org/licenses/.
# --------------------------------------------------------------------------
import sqlite3
import lxml.html
import requests
from PyQt5 import QtCore
class DownloadProcessor(QtCore.QThread):
loggerSignal = QtCore.pyqtSignal(str)
statusBarSignal = QtCore.pyqtSignal(str)
def __init__(self, options):
super(DownloadProcessor, self).__init__(options['parent'])
self.region = options['region']
self.date_from = options['date-from']
self.date_to = options['date-to']
self.dbPath = options['database-path']
# def __del__(self):
# self.wait()
def download(self, date):
uAStr = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
headers = {'User-Agent': uAStr}
url = 'https://www.fupa.net/index.php?page=kalender&site_linkurl=%s&date=%s' % (self.region, date)
r = requests.get(url, headers=headers)
doc = lxml.html.fromstring(r.content)
path = '/html/body//table[@class]//tr/td/a[not(contains(@class, "spielbericht_icon"))]//text() | ' \
'/html/body//table[@class]//tr/td//img/@src | ' \
'/html/body//table[@class]//th//text() | ' \
'/html/body//table[@class]//th/a/@href | ' \
'/html/body//table[@class]//tr/td[@style]/a/@href'
raw = doc.xpath(path)
# replacing '-Live-' with '-:-'
raw = [i.replace('https://www.fupa.net/fupa/images/buttons/tipp_live.jpg', '-:-') for i in raw]
# From
# ['/liga/bezirksliga-west-31261.html', 'Bezirksliga West', '19:15 Uhr', 'TSV Abensberg', '-:-',
# '/spielberichte/tsv-abensberg-spvgg-mariaposching-3679861.html', 'SpVgg Mariaposching',
# To
# [['Bezirksliga West', '19:15 Uhr', 'TSV Abensberg', '-:-', '3679861', 'SpVgg Mariaposching'],
matches = []
for i, d in enumerate(raw):
if 'Relegation' in d:
league = 'Relegation'
elif '/liga/' in d:
league = raw[i + 1]
elif 'Test' in d:
league = raw[i]
if 'Uhr' in d:
# print(i)
current = [league]
for i in raw[i:i + 5]:
if '/spielberichte/' in i:
i = i.split('.')[0].split('-')[-1]
if '/spielberichte/' in i: # Fehler in Fupa: URL = '/spielberichte/.html'
i = ''
current.append(i)
matches.append(current)
# rearrange
# ['3679861', 'Bezirksliga West', '19:15 Uhr', 'TSV Abensberg', 'SpVgg Mariaposching', '-:-']
tmp = []
for spiel in matches:
order = [4, 0, 1, 2, 5, 3]
spiel = [spiel[i] for i in order]
spiel[2] = date + ' ' + spiel[2][0:5]
tmp.append(spiel)
data = tmp
connection = sqlite3.connect(self.dbPath)
cursor = connection.cursor()
for p in data:
format_str = """INSERT OR IGNORE INTO calendar(match_id, league, match_date, home, guest, result, region)
VALUES ("{match_id}", "{league}", "{match_date}", "{home}", "{guest}", "{result}", "{region}");"""
sql_command = format_str.format(match_id=p[0], league=p[1], match_date=p[2],
home=p[3], guest=p[4], result=p[5], region=self.region)
try:
cursor.execute(sql_command)
except:
self.loggerSignal.emit('Folgendes Spiel wurde nicht hinzugefügt: %s' % p)
update_str = """UPDATE calendar
SET match_date="{match_date}", result="{result}", league="{league}" WHERE match_id = "{match_id}";"""
sql_command = update_str.format(match_id=p[0], match_date=p[2], league=p[1], result=p[5])
try:
cursor.execute(sql_command)
except:
self.loggerSignal.emit('Folgendes Spiel wurde nicht hinzugefügt: %s' % p)
connection.commit()
connection.close()
return len(data)
def run(self):
self.statusBarSignal.emit("Download")
date_from = self.date_from
date_to = self.date_to.addDays(1)
counter = 0
while date_from != date_to:
try:
counter += self.download(date_from.toString("yyyy-MM-dd"))
except Exception as e:
self.loggerSignal.emit('Fehler beim importieren: %s' % e)
return
date_from = date_from.addDays(1)
self.statusBarSignal.emit("Download: #%s Spiele" % counter)
self.loggerSignal.emit('%s Spiele erfolgreich hinzugefügt' % counter)
self.statusBarSignal.emit("Bereit")
|
gpl-3.0
| -4,301,365,850,361,170,000
| 38.423611
| 121
| 0.543773
| false
| 3.457369
| false
| false
| false
|
ibc/MediaSoup
|
worker/deps/catch/projects/TestScripts/testRandomOrder.py
|
1
|
2135
|
#!/usr/bin/env python3
"""
This test script verifies that the random ordering of tests inside
Catch2 is invariant in regards to subsetting. This is done by running
the binary 3 times, once with all tests selected, and twice with smaller
subsets of tests selected, and verifying that the selected tests are in
the same relative order.
"""
import subprocess
import sys
import random
def list_tests(self_test_exe, tags, rng_seed):
cmd = [self_test_exe, '--list-test-names-only', '--order', 'rand',
'--rng-seed', str(rng_seed)]
tags_arg = ','.join('[{}]'.format(t) for t in tags)
if tags_arg:
cmd.append(tags_arg + '~[.]')
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if stderr:
raise RuntimeError("Unexpected error output:\n" + process.stderr)
result = stdout.split(b'\n')
result = [s for s in result if s]
if len(result) < 2:
raise RuntimeError("Unexpectedly few tests listed (got {})".format(
len(result)))
return result
def check_is_sublist_of(shorter, longer):
assert len(shorter) < len(longer)
assert len(set(longer)) == len(longer)
indexes_in_longer = {s: i for i, s in enumerate(longer)}
for s1, s2 in zip(shorter, shorter[1:]):
assert indexes_in_longer[s1] < indexes_in_longer[s2], (
'{} comes before {} in longer list.\n'
'Longer: {}\nShorter: {}'.format(s2, s1, longer, shorter))
def main():
self_test_exe, = sys.argv[1:]
# We want a random seed for the test, but want to avoid 0,
# because it has special meaning
seed = random.randint(1, 2 ** 32 - 1)
list_one_tag = list_tests(self_test_exe, ['generators'], seed)
list_two_tags = list_tests(self_test_exe, ['generators', 'matchers'], seed)
list_all = list_tests(self_test_exe, [], seed)
# First, verify that restricting to a subset yields the same order
check_is_sublist_of(list_two_tags, list_all)
check_is_sublist_of(list_one_tag, list_two_tags)
if __name__ == '__main__':
sys.exit(main())
|
isc
| 6,595,939,224,350,894,000
| 35.186441
| 79
| 0.640749
| false
| 3.405104
| true
| false
| false
|
arpitprogressive/arpittest
|
intergration_test/banner.py
|
1
|
18254
|
# -*- coding: utf-8 -*-
"""
banner
Description goes here...
:copyright: (c) 2014 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
import unittest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, \
NoAlertPresentException
from base import Selenium2OnSauce
class Banner(Selenium2OnSauce):
def test_advanced_skills(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/advanced-skills/")
self.assertTrue(self.is_element_present(By.ID, "wfmis"))
def test_advanced_skills_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/advanced-skills/")
def test_advanced_skills_erd(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/advanced-skills/")
def test_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/")
def test_central_overnment(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/central-government/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_company_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/company-research/")
def test_company_training_provider(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/company-training-programs/")
def test_courseware(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_developing_tomorrow(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/")
def test_download(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/download/")
def test_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/foundation-skills/epp/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_erd(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/")
def test_event(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/")
def test_executive_summary(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/executive-summary/")
def test_foundation_advance_skills_devlopment(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/")
def test_foundation_convocation_banner(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.XPATH, "(//a[contains(text(),'Know More')])[3]"))
driver.get("http://pursuite.openlabs.us/about-us/ssc-nasscom/vision-mission/")
def test_foundation_skills_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/")
def test_foundation_skills_ed(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/foundation-skills/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_foundation_skills_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/foundation-skills/")
def test_full_course(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/full-course/")
def test_gbfs_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "span.filetitle"))
def test_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_government_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_government_training_program(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/government-training-programs/")
def test_healp_you_choose(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.LINK_TEXT, "Know More"))
def test_ict_academy_tamilnadu(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/ict-academy-tamilnadu/")
def test_il_fs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resougvrces/private-sector-training-programs/ilfs/")
def test_implementation_cycle_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/implementation-cycle/")
def test_interactive_tools(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/interactive-tools/")
def test_it_initiative(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_it_ites(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/it-ites-initiativesprograms/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[7]/div"))
def test_listining_of_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/listing-programs/")
def test_nasscom_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/nasscom-research/")
def test_niit(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/niit/")
def test_obf_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/outcome-based-framework-gbfs/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "span.filetitle"))
def test_other_bodies_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/government-training-programs/other-bodies/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_other_bodies(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/other-bodies/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[5]/div"))
def test_other_publication(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/other-publication/")
def test_policy_development(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/policy-development/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_private_sector_training_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/")
def test_program_registration(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/program-registration/")
def test_promotion_marketing(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/promotion-marketing/")
def test_read_only(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_skills_academy(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/skills-academy/")
def test_software_products(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/")
def test_ssc_training_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/")
def test_state_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/state-government/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_talent_sprint(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/talent-sprint/")
def test_training_materials(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/training-materials/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_training_that_helps_you(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.XPATH, "(//a[contains(text(),'Know More')])[2]"))
def test_training_tools(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/training-tools/")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
| 7,783,938,493,523,269,000
| 60.461279
| 238
| 0.7165
| false
| 3.1713
| true
| false
| false
|
alfa-addon/addon
|
plugin.video.alfa/channels/bloghorror.py
|
1
|
5917
|
# -*- coding: utf-8 -*-
# -*- Channel BlogHorror -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import os
import re
from bs4 import BeautifulSoup
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger, subtitletools
from channelselector import get_thumb
host = 'http://bloghorror.com/'
fanart = 'http://bloghorror.com/wp-content/uploads/2015/04/bloghorror-2017-x.jpg'
def create_soup(url, referer=None, unescape=False):
logger.info()
if referer:
data = httptools.downloadpage(url, headers={'Referer': referer}).data
else:
data = httptools.downloadpage(url).data
if unescape:
data = scrapertools.unescape(data)
soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
return soup
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Todas", action="list_all",
url=host+'/category/terror', thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Asiaticas", action="list_all",
url=host+'/category/asiatico', thumbnail=get_thumb('asiaticas', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title = 'Buscar', action="search", url=host + '?s=', pages=3,
thumbnail=get_thumb('search', auto=True)))
return itemlist
def list_all(item):
logger.info()
itemlist = list()
soup = create_soup(item.url)
matches = soup.find(id="primary").find_all("article")
for elem in matches:
cat = elem.find("a", class_="covernews-categories")["alt"]
if cat in ["View all posts in Las Mejores Peliculas de Terror", "View all posts in Editoriales"]:
continue
title_data = elem.find("h3", class_="article-title").text.strip()
if "(" in title_data:
title = title_data.replace(")", "").split(" (")
elif "[" in title_data:
title = title_data.replace("]", "").split(" [")
url = elem.find("h3", class_="article-title").a["href"]
thumb = elem.find("div", class_="data-bg-hover")["data-background"]
try:
year = title[1]
except:
year = "-"
if "serie" in url:
continue
itemlist.append(Item(channel=item.channel, title=title[0], url=url, contentTitle=title[0], thumbnail=thumb,
action="findvideos", infoLabels={"year": year}))
tmdb.set_infoLabels_itemlist(itemlist, True)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist:
try:
next_page = soup.find("div", class_="navigation").find("a", class_="next")["href"]
if next_page != '':
itemlist.append(Item(channel=item.channel, fanart=fanart, action="list_all", title='Siguiente >>>',
url=next_page))
except:
pass
return itemlist
def findvideos(item):
logger.info()
itemlist = list()
soup = create_soup(item.url).find("div", class_="entry-content-wrap")
quality = scrapertools.find_single_match(soup.text, r"Calidad: ([^\n]+)\n").split("+")
urls_list = soup.find_all("a", {"data-wpel-link": True, "href": re.compile("magnet|torrent")})
try:
sub_url = soup.find("a", {"data-wpel-link": True, "href": re.compile("subdivx")})["href"]
except:
sub_url = ""
qlty_cnt = 0
for url in urls_list:
url = url["href"]
if not sub_url:
lang = 'VO'
else:
lang = 'VOSE'
try:
qlty = quality[qlty_cnt]
qlty_cnt += 1
except:
qlty = "SD"
itemlist.append(Item(channel=item.channel, title="[%s][%s][%s]", url=url, action="play", quality=qlty,
language=lang, subtitle=sub_url, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % (i.server, i.language, i.quality))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def play(item):
logger.info()
if item.subtitle:
sub = subtitletools.get_from_subdivx(item.subtitle)
return [item.clone(subtitle=sub)]
else:
return [item]
def search(item, texto):
logger.info()
try:
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas', 'terror', 'torrent']:
item.url = host
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
|
gpl-3.0
| 5,249,670,684,535,636,000
| 28.575
| 123
| 0.572612
| false
| 3.70382
| false
| false
| false
|
NicholasHoCode/Galaxy
|
assets/code/GF.py
|
1
|
2040
|
from PIL import Image
import numpy as np
import math
from scipy import signal
def boxfilter(n):
assert (n%2 != 0),"Dimension must be odd"
a = np.empty((n, n))
a.fill(1/(n*n))
return a
def gauss1d(sigma):
arr_length = 6*sigma
if arr_length % 2 == 0:
val = ((arr_length)/2)+1
elif arr_length.is_integer() == False:
arr_length = np.ceil(arr_length)
val = (arr_length + 1)/2
if arr_length % 2 == 0:
arr_length = arr_length + 1
val = arr_length - 1
elif arr_length % 2 != 0:
val = (arr_length + 1)/2
lst = list(range(int(val)))
neg_lst = [-x for x in lst]
neg_lst.remove(0)
neg_lst.reverse()
a_val = neg_lst + lst
a_val = [math.exp(- (abs(x)*abs(x)) / (2*sigma*sigma)) for x in a_val]
sum_aval = sum(a_val)
a_aval = [(1/sum_aval)*x for x in a_val]
return np.asarray(a_aval)
def gauss2d(sigma):
f = gauss1d(sigma)
return signal.convolve2d(f[np.newaxis], np.transpose(f[np.newaxis]))
def gaussconvolve2d(array,sigma):
assert (array.ndim == 2),"Array must be 2D"
filter = gauss2d(sigma)
result = signal.convolve2d(array, filter, 'same')
return result
# signal.convolve2d and signal.correlated2d will produce different results if the filter is not symetric due to the associative property of convolution in essence
# convolution is used when multiple symmteric filters are pre-convolved and those multiple filters are then convolved to a single filter.
im = Image.open('bb.jpg')
im.show()
im = im.convert('L')
im_arr = np.asarray(im)
nim = gaussconvolve2d(im_arr, 3)
fim = Image.fromarray(nim)
if fim.mode != 'L':
fim = fim.convert('L')
fim.save('bb_filtered.jpg')
# Since convolution with a Gaussian is seperable a 2D Gaussian filter can be obtianed by multiplying two 1D Gaussian filter a more efficient implementation will be to first convolve each row with a 1D fillter
# then convolve each column with a 1D filter which results in O(n) complexity instead of O(n^2) complexity.
|
mit
| -6,833,917,436,678,237,000
| 34.172414
| 208
| 0.659314
| false
| 3.044776
| false
| false
| false
|
Azure/azure-sdk-for-python
|
common/smoketest/key_vault_keys_async.py
|
1
|
1475
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import uuid
from azure.keyvault.keys.aio import KeyClient
from key_vault_base_async import KeyVaultBaseAsync
class KeyVaultKeys(KeyVaultBaseAsync):
def __init__(self):
args = self.get_client_args()
self.key_client = KeyClient(**args)
self.key_name = "key-name-" + uuid.uuid1().hex
async def create_rsa_key(self):
print("Creating an RSA key...")
await self.key_client.create_rsa_key(name=self.key_name, size=2048)
print("\tdone")
async def get_key(self):
print("Getting a key...")
key = await self.key_client.get_key(name=self.key_name)
print("\tdone, key: {}.".format(key.name))
async def delete_key(self):
print("Deleting a key...")
deleted_key = await self.key_client.delete_key(name=self.key_name)
print("\tdone: " + deleted_key.name)
async def run(self):
print("")
print("------------------------")
print("Key Vault - Keys\nIdentity - Credential")
print("------------------------")
print("1) Create a key")
print("2) Get that key")
print("3) Delete that key (Clean up the resource)")
print("")
try:
await self.create_rsa_key()
await self.get_key()
finally:
await self.delete_key()
|
mit
| 5,781,894,766,305,510,000
| 31.086957
| 75
| 0.541695
| false
| 3.891821
| false
| false
| false
|
pytroll/pygac
|
pygac/tests/test_calibrate_pod.py
|
1
|
5529
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015 Martin Raspaud
# Author(s):
# Martin Raspaud <martin.raspaud@smhi.se>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test function for the POD calibration.
"""
import unittest
try:
import mock
except ImportError:
from unittest import mock
import numpy as np
from pygac.calibration import Calibrator, calibrate_solar, calibrate_thermal
class TestGenericCalibration(unittest.TestCase):
def test_calibration_vis(self):
counts = np.array([[0, 0, 0, 0, 0,
512, 512, 512, 512, 512,
1023, 1023, 1023, 1023, 1023],
[41, 41, 41, 41, 41,
150, 150, 150, 150, 150,
700, 700, 700, 700, 700]])
year = 1997
jday = 196
spacecraft_id = "noaa14"
cal = Calibrator(spacecraft_id)
corr = 1
channel = 0
ref1 = calibrate_solar(counts[:, channel::5], channel, year, jday, cal, corr)
channel = 1
ref2 = calibrate_solar(counts[:, channel::5], channel, year, jday, cal, corr)
channel = 2
data = np.ma.array(counts[:, channel::5], mask=True)
ref3 = calibrate_solar(data, channel, year, jday, cal, corr)
expected = (np.array([[np.nan, 60.891074, 126.953364],
[0., 14.091565, 85.195791]]),
np.array([[np.nan, 72.98262, 152.16334],
[0., 16.889821, 102.113687]]),
np.array([[-32001., -32001., -32001.],
[-32001., -32001., -32001.]]))
np.testing.assert_allclose(ref1, expected[0])
np.testing.assert_allclose(ref2, expected[1])
np.testing.assert_allclose(ref3.filled(-32001), expected[2])
def test_calibration_ir(self):
counts = np.array([[0, 0, 612, 0, 0,
512, 512, 487, 512, 512,
923, 923, 687, 923, 923],
[41, 41, 634, 41, 41,
150, 150, 461, 150, 150,
700, 700, 670, 700, 700],
[241, 241, 656, 241, 241,
350, 350, 490, 350, 350,
600, 600, 475, 600, 600]])
prt_counts = np.array([0, 230, 230])
ict_counts = np.array([[745.3, 397.9, 377.8],
[744.8, 398.1, 378.4],
[745.7, 398., 378.3]])
space_counts = np.array([[987.3, 992.5, 989.4],
[986.9, 992.8, 989.6],
[986.3, 992.3, 988.9]])
spacecraft_id = "noaa14"
cal = Calibrator(spacecraft_id)
ch3 = calibrate_thermal(counts[:, 2::5],
prt_counts,
ict_counts[:, 0],
space_counts[:, 0],
line_numbers=np.array([1, 2, 3]),
channel=3,
cal=cal)
expected_ch3 = np.array([[298.28466, 305.167571, 293.16182],
[296.878502, 306.414234, 294.410224],
[295.396779, 305.020259, 305.749526]])
np.testing.assert_allclose(expected_ch3, ch3)
ch4 = calibrate_thermal(counts[:, 3::5],
prt_counts,
ict_counts[:, 1],
space_counts[:, 1],
line_numbers=np.array([1, 2, 3]),
channel=4,
cal=cal)
expected_ch4 = np.array([[325.828062, 275.414804, 196.214709],
[322.359517, 312.785057, 249.380649],
[304.326806, 293.490822, 264.148021]])
np.testing.assert_allclose(expected_ch4, ch4)
ch5 = calibrate_thermal(counts[:, 4::5],
prt_counts,
ict_counts[:, 2],
space_counts[:, 2],
line_numbers=np.array([1, 2, 3]),
channel=5,
cal=cal)
expected_ch5 = np.array([[326.460316, 272.146547, 187.434456],
[322.717606, 312.388155, 244.241633],
[303.267012, 291.590832, 260.05426]])
np.testing.assert_allclose(expected_ch5, ch5)
def suite():
"""The suite for test_slerp
"""
loader = unittest.TestLoader()
mysuite = unittest.TestSuite()
mysuite.addTest(loader.loadTestsFromTestCase(TestGenericCalibration))
return mysuite
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| 4,917,036,265,671,069,000
| 35.375
| 85
| 0.477302
| false
| 3.748475
| true
| false
| false
|
mind1master/aiohttp
|
tests/test_client_session.py
|
1
|
14280
|
import asyncio
import contextlib
import gc
import http.cookies
import re
import types
from unittest import mock
import pytest
from multidict import CIMultiDict, MultiDict
import aiohttp
from aiohttp import web
from aiohttp.client import ClientSession
from aiohttp.connector import BaseConnector, TCPConnector
@pytest.fixture
def connector(loop):
conn = BaseConnector(loop=loop)
transp = mock.Mock()
conn._conns['a'] = [(transp, 'proto', 123)]
return conn
@pytest.yield_fixture
def create_session(loop):
session = None
def maker(*args, **kwargs):
nonlocal session
session = ClientSession(*args, loop=loop, **kwargs)
return session
yield maker
if session is not None:
session.close()
@pytest.fixture
def session(create_session):
return create_session()
@pytest.fixture
def params():
return dict(
headers={"Authorization": "Basic ..."},
max_redirects=2,
encoding="latin1",
version=aiohttp.HttpVersion10,
compress="deflate",
chunked=True,
expect100=True,
read_until_eof=False)
def test_init_headers_simple_dict(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
assert (sorted(session._default_headers.items()) ==
([("H1", "header1"), ("H2", "header2")]))
def test_init_headers_list_of_tuples(create_session):
session = create_session(headers=[("h1", "header1"),
("h2", "header2"),
("h3", "header3")])
assert (session._default_headers ==
CIMultiDict([("h1", "header1"),
("h2", "header2"),
("h3", "header3")]))
def test_init_headers_MultiDict(create_session):
session = create_session(headers=MultiDict([("h1", "header1"),
("h2", "header2"),
("h3", "header3")]))
assert (session._default_headers ==
CIMultiDict([("H1", "header1"),
("H2", "header2"),
("H3", "header3")]))
def test_init_headers_list_of_tuples_with_duplicates(create_session):
session = create_session(headers=[("h1", "header11"),
("h2", "header21"),
("h1", "header12")])
assert (session._default_headers ==
CIMultiDict([("H1", "header11"),
("H2", "header21"),
("H1", "header12")]))
def test_init_cookies_with_simple_dict(create_session):
session = create_session(cookies={"c1": "cookie1",
"c2": "cookie2"})
assert set(session.cookies) == {'c1', 'c2'}
assert session.cookies['c1'].value == 'cookie1'
assert session.cookies['c2'].value == 'cookie2'
def test_init_cookies_with_list_of_tuples(create_session):
session = create_session(cookies=[("c1", "cookie1"),
("c2", "cookie2")])
assert set(session.cookies) == {'c1', 'c2'}
assert session.cookies['c1'].value == 'cookie1'
assert session.cookies['c2'].value == 'cookie2'
def test_merge_headers(create_session):
# Check incoming simple dict
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers({"h1": "h1"})
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("h2", "header2"),
("h1", "h1")])
def test_merge_headers_with_multi_dict(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers(MultiDict([("h1", "h1")]))
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("h2", "header2"),
("h1", "h1")])
def test_merge_headers_with_list_of_tuples(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers([("h1", "h1")])
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("h2", "header2"),
("h1", "h1")])
def test_merge_headers_with_list_of_tuples_duplicated_names(create_session):
session = create_session(headers={"h1": "header1",
"h2": "header2"})
headers = session._prepare_headers([("h1", "v1"),
("h1", "v2")])
assert isinstance(headers, CIMultiDict)
assert headers == CIMultiDict([("H2", "header2"),
("H1", "v1"),
("H1", "v2")])
def test_http_GET(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.get("http://test.example.com",
params={"x": 1},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("GET", "http://test.example.com",),
dict(
params={"x": 1},
allow_redirects=True,
**params)]
def test_http_OPTIONS(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.options("http://opt.example.com",
params={"x": 2},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("OPTIONS", "http://opt.example.com",),
dict(
params={"x": 2},
allow_redirects=True,
**params)]
def test_http_HEAD(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.head("http://head.example.com",
params={"x": 2},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("HEAD", "http://head.example.com",),
dict(
params={"x": 2},
allow_redirects=False,
**params)]
def test_http_POST(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.post("http://post.example.com",
params={"x": 2},
data="Some_data",
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("POST", "http://post.example.com",),
dict(
params={"x": 2},
data="Some_data",
**params)]
def test_http_PUT(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.put("http://put.example.com",
params={"x": 2},
data="Some_data",
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("PUT", "http://put.example.com",),
dict(
params={"x": 2},
data="Some_data",
**params)]
def test_http_PATCH(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.patch("http://patch.example.com",
params={"x": 2},
data="Some_data",
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("PATCH", "http://patch.example.com",),
dict(
params={"x": 2},
data="Some_data",
**params)]
def test_http_DELETE(session, params):
with mock.patch("aiohttp.client.ClientSession._request") as patched:
session.delete("http://delete.example.com",
params={"x": 2},
**params)
assert patched.called, "`ClientSession._request` not called"
assert list(patched.call_args) == [("DELETE",
"http://delete.example.com",),
dict(
params={"x": 2},
**params)]
def test_close(create_session, connector):
session = create_session(connector=connector)
session.close()
assert session.connector is None
assert connector.closed
def test_closed(session):
assert not session.closed
session.close()
assert session.closed
def test_connector(create_session, loop):
connector = TCPConnector(loop=loop)
session = create_session(connector=connector)
assert session.connector is connector
def test_connector_loop(loop):
with contextlib.ExitStack() as stack:
another_loop = asyncio.new_event_loop()
stack.enter_context(contextlib.closing(another_loop))
connector = TCPConnector(loop=another_loop)
stack.enter_context(contextlib.closing(connector))
with pytest.raises(ValueError) as ctx:
ClientSession(connector=connector, loop=loop)
assert re.match("loop argument must agree with connector",
str(ctx.value))
def test_cookies_are_readonly(session):
with pytest.raises(AttributeError):
session.cookies = 123
def test_detach(session):
conn = session.connector
try:
assert not conn.closed
session.detach()
assert session.connector is None
assert session.closed
assert not conn.closed
finally:
conn.close()
@pytest.mark.run_loop
def test_request_closed_session(session):
session.close()
with pytest.raises(RuntimeError):
yield from session.request('get', '/')
def test_close_flag_for_closed_connector(session):
conn = session.connector
assert not session.closed
conn.close()
assert session.closed
def test_double_close(connector, create_session):
session = create_session(connector=connector)
session.close()
assert session.connector is None
session.close()
assert session.closed
assert connector.closed
def test_del(connector, loop, warning):
# N.B. don't use session fixture, it stores extra reference internally
session = ClientSession(connector=connector, loop=loop)
loop.set_exception_handler(lambda loop, ctx: None)
with warning(ResourceWarning):
del session
gc.collect()
def test_context_manager(connector, loop):
with ClientSession(loop=loop, connector=connector) as session:
pass
assert session.closed
def test_borrow_connector_loop(connector, create_session, loop):
session = ClientSession(connector=connector, loop=None)
try:
assert session._loop, loop
finally:
session.close()
@pytest.mark.run_loop
def test_reraise_os_error(create_session):
err = OSError(1, "permission error")
req = mock.Mock()
req_factory = mock.Mock(return_value=req)
req.send = mock.Mock(side_effect=err)
session = create_session(request_class=req_factory)
@asyncio.coroutine
def create_connection(req):
# return self.transport, self.protocol
return mock.Mock(), mock.Mock()
session._connector._create_connection = create_connection
with pytest.raises(aiohttp.ClientOSError) as ctx:
yield from session.request('get', 'http://example.com')
e = ctx.value
assert e.errno == err.errno
assert e.strerror == err.strerror
@pytest.mark.run_loop
def test_request_ctx_manager_props(loop):
yield from asyncio.sleep(0, loop=loop) # to make it a task
with aiohttp.ClientSession(loop=loop) as client:
ctx_mgr = client.get('http://example.com')
next(ctx_mgr)
assert isinstance(ctx_mgr.gi_frame, types.FrameType)
assert not ctx_mgr.gi_running
assert isinstance(ctx_mgr.gi_code, types.CodeType)
@pytest.mark.run_loop
def test_cookie_jar_usage(create_app_and_client):
req_url = None
jar = mock.Mock()
jar.filter_cookies.return_value = None
@asyncio.coroutine
def handler(request):
nonlocal req_url
req_url = "http://%s/" % request.host
resp = web.Response()
resp.set_cookie("response", "resp_value")
return resp
app, client = yield from create_app_and_client(
client_params={"cookies": {"request": "req_value"},
"cookie_jar": jar}
)
app.router.add_route('GET', '/', handler)
# Updating the cookie jar with initial user defined cookies
jar.update_cookies.assert_called_with({"request": "req_value"})
jar.update_cookies.reset_mock()
yield from client.get("/")
# Filtering the cookie jar before sending the request,
# getting the request URL as only parameter
jar.filter_cookies.assert_called_with(req_url)
# Updating the cookie jar with the response cookies
assert jar.update_cookies.called
resp_cookies = jar.update_cookies.call_args[0][0]
assert isinstance(resp_cookies, http.cookies.SimpleCookie)
assert "response" in resp_cookies
assert resp_cookies["response"].value == "resp_value"
def test_session_default_version(loop):
session = aiohttp.ClientSession(loop=loop)
assert session.version == aiohttp.HttpVersion11
|
apache-2.0
| 8,333,107,715,438,681,000
| 33
| 78
| 0.55112
| false
| 4.27673
| true
| false
| false
|
dnanexus/rseqc
|
rseqc/scripts/read_distribution.py
|
1
|
11931
|
#!/usr/bin/env python
'''-------------------------------------------------------------------------------------------------
Check reads distribution over exon, intron, UTR, intergenic ... etc
-------------------------------------------------------------------------------------------------'''
#import built-in modules
import os,sys
if sys.version_info[0] != 2 or sys.version_info[1] != 7:
print >>sys.stderr, "\nYou are using python" + str(sys.version_info[0]) + '.' + str(sys.version_info[1]) + " RSeQC needs python2.7!\n"
sys.exit()
import re
import string
from optparse import OptionParser
import warnings
import string
import collections
import math
import sets
#import third-party modules
from bx.bitset import *
from bx.bitset_builders import *
from bx.intervals import *
from bx.binned_array import BinnedArray
from bx_extras.fpconst import isNaN
from bx.bitset_utils import *
#import my own modules
from qcmodule import BED
from qcmodule import SAM
from qcmodule import bam_cigar
__author__ = "Liguo Wang"
__copyright__ = "Copyright 2012. All rights reserved."
__credits__ = []
__license__ = "GPL"
__version__="2.3.3"
__maintainer__ = "Liguo Wang"
__email__ = "wang.liguo@mayo.edu"
__status__ = "Production"
def cal_size(list):
'''calcualte bed list total size'''
size=0
for l in list:
size += l[2] - l[1]
return size
def foundone(chrom,ranges, st, end):
found = 0
if chrom in ranges:
found = len(ranges[chrom].find(st,end))
return found
def build_bitsets(list):
'''build intevalTree from list'''
ranges={}
for l in list:
chrom =l[0].upper()
st = int(l[1])
end = int(l[2])
if chrom not in ranges:
ranges[chrom] = Intersecter()
ranges[chrom].add_interval( Interval( st, end ) )
return ranges
def process_gene_model(gene_model):
print >>sys.stderr, "processing " + gene_model + ' ...',
obj = BED.ParseBED(gene_model)
utr_3 = obj.getUTR(utr=3)
utr_5 = obj.getUTR(utr=5)
cds_exon = obj.getCDSExon()
intron = obj.getIntron()
intron = BED.unionBed3(intron)
cds_exon=BED.unionBed3(cds_exon)
utr_5 = BED.unionBed3(utr_5)
utr_3 = BED.unionBed3(utr_3)
utr_5 = BED.subtractBed3(utr_5,cds_exon)
utr_3 = BED.subtractBed3(utr_3,cds_exon)
intron = BED.subtractBed3(intron,cds_exon)
intron = BED.subtractBed3(intron,utr_5)
intron = BED.subtractBed3(intron,utr_3)
intergenic_up_1kb = obj.getIntergenic(direction="up",size=1000)
intergenic_down_1kb = obj.getIntergenic(direction="down",size=1000)
intergenic_up_5kb = obj.getIntergenic(direction="up",size=5000)
intergenic_down_5kb = obj.getIntergenic(direction="down",size=5000)
intergenic_up_10kb = obj.getIntergenic(direction="up",size=10000)
intergenic_down_10kb = obj.getIntergenic(direction="down",size=10000)
#merge integenic region
intergenic_up_1kb=BED.unionBed3(intergenic_up_1kb)
intergenic_up_5kb=BED.unionBed3(intergenic_up_5kb)
intergenic_up_10kb=BED.unionBed3(intergenic_up_10kb)
intergenic_down_1kb=BED.unionBed3(intergenic_down_1kb)
intergenic_down_5kb=BED.unionBed3(intergenic_down_5kb)
intergenic_down_10kb=BED.unionBed3(intergenic_down_10kb)
#purify intergenic region
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,cds_exon)
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,utr_5)
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,utr_3)
intergenic_up_1kb=BED.subtractBed3(intergenic_up_1kb,intron)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,cds_exon)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,utr_5)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,utr_3)
intergenic_down_1kb=BED.subtractBed3(intergenic_down_1kb,intron)
#purify intergenic region
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,cds_exon)
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,utr_5)
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,utr_3)
intergenic_up_5kb=BED.subtractBed3(intergenic_up_5kb,intron)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,cds_exon)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,utr_5)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,utr_3)
intergenic_down_5kb=BED.subtractBed3(intergenic_down_5kb,intron)
#purify intergenic region
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,cds_exon)
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,utr_5)
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,utr_3)
intergenic_up_10kb=BED.subtractBed3(intergenic_up_10kb,intron)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,cds_exon)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,utr_5)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,utr_3)
intergenic_down_10kb=BED.subtractBed3(intergenic_down_10kb,intron)
#build intervalTree
cds_exon_ranges = build_bitsets(cds_exon)
utr_5_ranges = build_bitsets(utr_5)
utr_3_ranges = build_bitsets(utr_3)
intron_ranges = build_bitsets(intron)
interg_ranges_up_1kb_ranges = build_bitsets(intergenic_up_1kb)
interg_ranges_up_5kb_ranges = build_bitsets(intergenic_up_5kb)
interg_ranges_up_10kb_ranges = build_bitsets(intergenic_up_10kb)
interg_ranges_down_1kb_ranges = build_bitsets(intergenic_down_1kb)
interg_ranges_down_5kb_ranges = build_bitsets(intergenic_down_5kb)
interg_ranges_down_10kb_ranges = build_bitsets(intergenic_down_10kb)
exon_size = cal_size(cds_exon)
intron_size = cal_size(intron)
utr3_size = cal_size(utr_3)
utr5_size = cal_size(utr_5)
int_up1k_size = cal_size(intergenic_up_1kb)
int_up5k_size = cal_size(intergenic_up_5kb)
int_up10k_size = cal_size(intergenic_up_10kb)
int_down1k_size = cal_size(intergenic_down_1kb)
int_down5k_size = cal_size(intergenic_down_5kb)
int_down10k_size = cal_size(intergenic_down_10kb)
print >>sys.stderr, "Done"
return (cds_exon_ranges,intron_ranges,utr_5_ranges,utr_3_ranges,\
interg_ranges_up_1kb_ranges,interg_ranges_up_5kb_ranges,interg_ranges_up_10kb_ranges,\
interg_ranges_down_1kb_ranges,interg_ranges_down_5kb_ranges,interg_ranges_down_10kb_ranges,\
exon_size,intron_size,utr5_size,utr3_size,\
int_up1k_size,int_up5k_size,int_up10k_size,\
int_down1k_size,int_down5k_size,int_down10k_size)
def main():
usage="%prog [options]" + '\n' + __doc__ + "\n"
parser = OptionParser(usage,version="%prog " + __version__)
parser.add_option("-i","--input-file",action="store",type="string",dest="input_file",help="Alignment file in BAM or SAM format.")
parser.add_option("-r","--refgene",action="store",type="string",dest="ref_gene_model",help="Reference gene model in bed format.")
(options,args)=parser.parse_args()
if not (options.input_file and options.ref_gene_model):
parser.print_help()
sys.exit(0)
if not os.path.exists(options.ref_gene_model):
print >>sys.stderr, '\n\n' + options.ref_gene_model + " does NOT exists" + '\n'
#parser.print_help()
sys.exit(0)
if not os.path.exists(options.input_file):
print >>sys.stderr, '\n\n' + options.input_file + " does NOT exists" + '\n'
sys.exit(0)
#build bitset
(cds_exon_r, intron_r, utr_5_r, utr_3_r,\
intergenic_up_1kb_r,intergenic_up_5kb_r,intergenic_up_10kb_r,\
intergenic_down_1kb_r,intergenic_down_5kb_r,intergenic_down_10kb_r,\
cds_exon_base,intron_base,utr_5_base,utr_3_base,\
intergenic_up1kb_base,intergenic_up5kb_base,intergenic_up10kb_base,\
intergenic_down1kb_base,intergenic_down5kb_base,intergenic_down10kb_base) = process_gene_model(options.ref_gene_model)
intron_read=0
cds_exon_read=0
utr_5_read=0
utr_3_read=0
intergenic_up1kb_read=0
intergenic_down1kb_read=0
intergenic_up5kb_read=0
intergenic_down5kb_read=0
intergenic_up10kb_read=0
intergenic_down10kb_read=0
totalReads=0
totalFrags=0
unAssignFrags=0
obj = SAM.ParseBAM(options.input_file)
R_qc_fail=0
R_duplicate=0
R_nonprimary=0
R_unmap=0
print >>sys.stderr, "processing " + options.input_file + " ...",
try:
while(1):
aligned_read = obj.samfile.next()
if aligned_read.is_qcfail: #skip QC fail read
R_qc_fail +=1
continue
if aligned_read.is_duplicate: #skip duplicate read
R_duplicate +=1
continue
if aligned_read.is_secondary: #skip non primary hit
R_nonprimary +=1
continue
if aligned_read.is_unmapped: #skip unmap read
R_unmap +=1
continue
totalReads +=1
chrom = obj.samfile.getrname(aligned_read.tid)
chrom=chrom.upper()
exons = bam_cigar.fetch_exon(chrom, aligned_read.pos, aligned_read.cigar)
totalFrags += len(exons)
for exn in exons:
#print chrom + '\t' + str(exn[1]) + '\t' + str(exn[2])
mid = int(exn[1]) + int((int(exn[2]) - int(exn[1]))/2)
if foundone(chrom,cds_exon_r,mid,mid) > 0:
cds_exon_read += 1
continue
elif foundone(chrom,utr_5_r,mid,mid) >0 and foundone(chrom,utr_3_r,mid,mid) == 0:
utr_5_read += 1
continue
elif foundone(chrom,utr_3_r,mid,mid) >0 and foundone(chrom,utr_5_r,mid,mid) == 0:
utr_3_read += 1
continue
elif foundone(chrom,utr_3_r,mid,mid) >0 and foundone(chrom,utr_5_r,mid,mid) > 0:
unAssignFrags +=1
continue
elif foundone(chrom,intron_r,mid,mid) > 0:
intron_read += 1
continue
elif foundone(chrom,intergenic_up_10kb_r,mid,mid) >0 and foundone(chrom,intergenic_down_10kb_r,mid,mid) > 0:
unAssignFrags +=1
continue
elif foundone(chrom,intergenic_up_1kb_r,mid,mid) >0:
intergenic_up1kb_read += 1
intergenic_up5kb_read += 1
intergenic_up10kb_read += 1
elif foundone(chrom,intergenic_up_5kb_r,mid,mid) >0:
intergenic_up5kb_read += 1
intergenic_up10kb_read += 1
elif foundone(chrom,intergenic_up_10kb_r,mid,mid) >0:
intergenic_up10kb_read += 1
elif foundone(chrom,intergenic_down_1kb_r,mid,mid) >0:
intergenic_down1kb_read += 1
intergenic_down5kb_read += 1
intergenic_down10kb_read += 1
elif foundone(chrom,intergenic_down_5kb_r,mid,mid) >0:
intergenic_down5kb_read += 1
intergenic_down10kb_read += 1
elif foundone(chrom,intergenic_down_10kb_r,mid,mid) >0:
intergenic_down10kb_read += 1
else:
unAssignFrags +=1
except StopIteration:
print >>sys.stderr, "Finished\n"
print "%-30s%d" % ("Total Reads",totalReads)
print "%-30s%d" % ("Total Tags",totalFrags)
print "%-30s%d" % ("Total Assigned Tags",totalFrags-unAssignFrags)
print "====================================================================="
print "%-20s%-20s%-20s%-20s" % ('Group','Total_bases','Tag_count','Tags/Kb')
print "%-20s%-20d%-20d%-18.2f" % ('CDS_Exons',cds_exon_base,cds_exon_read,cds_exon_read*1000.0/(cds_exon_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("5'UTR_Exons",utr_5_base,utr_5_read, utr_5_read*1000.0/(utr_5_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("3'UTR_Exons",utr_3_base,utr_3_read, utr_3_read*1000.0/(utr_3_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("Introns",intron_base,intron_read,intron_read*1000.0/(intron_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TSS_up_1kb",intergenic_up1kb_base, intergenic_up1kb_read, intergenic_up1kb_read*1000.0/(intergenic_up1kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TSS_up_5kb",intergenic_up5kb_base, intergenic_up5kb_read, intergenic_up5kb_read*1000.0/(intergenic_up5kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TSS_up_10kb",intergenic_up10kb_base, intergenic_up10kb_read, intergenic_up10kb_read*1000.0/(intergenic_up10kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TES_down_1kb",intergenic_down1kb_base, intergenic_down1kb_read, intergenic_down1kb_read*1000.0/(intergenic_down1kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TES_down_5kb",intergenic_down5kb_base, intergenic_down5kb_read, intergenic_down5kb_read*1000.0/(intergenic_down5kb_base+1))
print "%-20s%-20d%-20d%-18.2f" % ("TES_down_10kb",intergenic_down10kb_base, intergenic_down10kb_read, intergenic_down10kb_read*1000.0/(intergenic_down10kb_base+1))
print "====================================================================="
if __name__ == '__main__':
main()
|
gpl-3.0
| -9,165,150,322,009,244,000
| 39.036913
| 165
| 0.693068
| false
| 2.34816
| false
| false
| false
|
nickname456/pbots
|
poker.py
|
1
|
5255
|
#-----------------------------------------------------------#
# Heads Up Omaha Challange - Starter Bot #
#===========================================================#
# #
# Last update: 22 May, 2014 #
# #
# @author Jackie <jackie@starapple.nl> #
# @version 1.0 #
# @license MIT License (http://opensource.org/licenses/MIT) #
#-----------------------------------------------------------#
class Card(object):
'''
Card class
'''
def __init__(self, suit, value):
self.suit = suit
self.value = value
self.number = '23456789TJQKA'.find(value)
def __repr__(self):
return self.value+self.suit
def __cmp__(self,other):
n_cmp = cmp(self.number,other.number)
if n_cmp!=0:
return n_cmp
return cmp(self.suit,other.suit)
class Pocket(object):
'''
Pocket class
'''
def __init__(self, cards):
self.cards = cards
def __iter__(self):
return iter(self.cards)
class Table(object):
'''
Table class
'''
def __init__(self, cards):
self.cards = cards
class Hand(object):
'''
Hand class
'''
def __init__(self, cards):
self.cards = cards
self.rank = Ranker.rank_five_cards(cards)
def __gt__(self, hand):
return self.rank > hand.rank
def __ge__(self, hand):
return self.rank >= hand.rank
def __lt__(self, hand):
return self.rank < hand.rank
def __le__(self, hand):
return self.rank <= hand.rank
def __eq__(self, hand):
return self.rank == hand.rank
def __repr__(self):
return "Hand:"+str(self.cards)+" rank"+str(self.rank)
# TODO: cache the below?
def is_flush_draw(self):
return Ranker.is_flush_draw(self.cards)
def is_straight_draw(self):
return Ranker.is_flush_draw(self.cards)
class Ranker(object):
'''
Ranker class
'''
@staticmethod
def rank_five_cards(cards):
# List of all card values
values = sorted(['23456789TJQKA'.find(card.value) for card in cards])
# Checks if hand is a straight
is_straight = all([values[i] == values[0] + i for i in range(5)])
# Additional straight check
if not is_straight:
# Wheel
is_straight = all(values[i] == values[0] + i for i in range(4)) \
and values[4] == 12 \
and values[0] == 0
# Rotate values as the ace is weakest in this case
values = values[1:] + values[:1]
# Checks if hand is a flush
is_flush = all([card.suit == cards[0].suit for card in cards])
# Get card value counts
value_count = {value: values.count(value) for value in values}
# Sort value counts by most occuring
sorted_value_count = sorted([(count, value) for value, count in value_count.items()], reverse = True)
# Get all kinds (e.g. four of a kind, three of a kind, pair)
kinds = [value_count[0] for value_count in sorted_value_count]
# Get values for kinds
kind_values = [value_count[1] for value_count in sorted_value_count]
# Royal flush
if is_straight and is_flush and values[0] == 8:
return ['9'] + values
# Straight flush
if is_straight and is_flush:
return ['8'] + kind_values
# Four of a kind
if kinds[0] == 4:
return ['7'] + kind_values
# Full house
if kinds[0] == 3 and kinds[1] == 2:
return ['6'] + kind_values
# Flush
if is_flush:
return ['5'] + kind_values
# Straight
if is_straight:
return ['4'] + kind_values
# Three of a kind
if kinds[0] == 3:
return ['3'] + kind_values
# Two pair
if kinds[0] == 2 and kinds[1] == 2:
return ['2'] + kind_values
# Pair
if kinds[0] == 2:
return ['1'] + kind_values
# No pair
return ['0'] + kind_values
@staticmethod
def is_flush_draw(cards):
for i in range(0,5):
cards_ = cards[0:i]+cards[(i+1):]
same_suit = all([c.suit == cards_[0].suit for c in cards_])
if same_suit:
return True
return False
@staticmethod
def is_straight_draw(cards):
# List of all card values
values = sorted(['23456789TJQKA'.find(card.value) for card in cards])
for i in range(0,5):
cards_ = cards[0:i]+cards[(i+1):]
assert False # copied logic from full hand, haven't fixed it up yet
sd = all([v[i] == values[0] + i for i in range(5)])
# Additional straight check
if not is_straight:
# Wheel
is_straight = all(values[i] == values[0] + i for i in range(4)) \
and values[4] == 12 \
and values[0] == 0
|
mit
| 5,522,409,518,547,737,000
| 27.873626
| 109
| 0.479163
| false
| 3.881093
| false
| false
| false
|
cmbclh/vnpy1.7
|
vnpy/trader/app/login/uiLoginWidget.py
|
1
|
7055
|
# encoding: UTF-8
'''
登陆模块相关的GUI控制组件
'''
import sys
sys.path.append('../')
#sys.path.append('D:\\tr\\vnpy-master\\vn.trader\\DAO')
sys.path.append('D:\\tr\\vnpy-1.7\\vnpy\\DAO')
sys.path.append('D:\\tr\\vnpy-1.7\\vnpy\\common')
import vnpy.DAO
import vnpy.common
from vnpy.DAO import *
import pandas as pd
import Tkinter
#from Tkinter import messagebox
from vnpy.trader.app.login.language import text
from vnpy.trader.uiBasicWidget import QtWidgets
TBUSER_COLUMNS = ['user_id','user_name','status','password','branch_no','open_date','cancel_date','passwd_date','op_group','op_rights','reserve1','dep_id','last_logon_date','last_logon_time','last_ip_address','fail_times','fail_date','reserve2','last_fail_ip']
########################################################################
class LoginSpinBox(QtWidgets.QLineEdit):#.QSpinBox):
"""调整参数用的数值框"""
#----------------------------------------------------------------------
def __init__(self, value):
"""Constructor"""
super(LoginSpinBox, self).__init__()
#self.setMinimum(0)
#self.setMaximum(1000000)
self.setText(value)
########################################################################
class LoginLine(QtWidgets.QFrame):
"""水平分割线"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(LoginLine, self).__init__()
self.setFrameShape(self.HLine)
self.setFrameShadow(self.Sunken)
########################################################################
class LoginEngineManager(QtWidgets.QWidget):
"""风控引擎的管理组件"""
#----------------------------------------------------------------------
def __init__(self, loginEngine, eventEngine, parent=None):
"""Constructor"""
super(LoginEngineManager, self).__init__(parent)
self.loginEngine = loginEngine
self.eventEngine = eventEngine
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
print self
self.setWindowTitle(text.LOGIN_MANAGER)
# 设置界面
self.userId = LoginSpinBox(self.loginEngine.userId)
self.password = LoginSpinBox(self.loginEngine.password)
buttonLogin = QtWidgets.QPushButton(text.LOGIN)
buttonLogout = QtWidgets.QPushButton(text.LOGOUT)
buttonSubmit = QtWidgets.QPushButton(text.SUBMIT)
Label = QtWidgets.QLabel
grid = QtWidgets.QGridLayout()
grid.addWidget(Label(text.USERID), 2, 0)
grid.addWidget(self.userId, 2, 1)
grid.addWidget(Label(text.PASSWORD), 3, 0)
grid.addWidget(self.password, 3, 1)
grid.addWidget(LoginLine(), 4, 0, 1, 2)
hbox = QtWidgets.QHBoxLayout()
hbox.addStretch()
hbox.addWidget(buttonSubmit)
hbox.addWidget(buttonLogin)
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(grid)
vbox.addLayout(hbox)
self.setLayout(vbox)
# 连接组件信号
buttonSubmit.clicked.connect(self.submit)
buttonLogin.clicked.connect(self.login)
# 设为固定大小
self.setFixedSize(self.sizeHint())
# ----------------------------------------------------------------------
def login(self):
print (u'登陆验证开始self.userId=%s, self.password=%s' % (self.userId, self.password))
userId = str(self.userId.text())
password = str(self.password.text())
print (u'登陆验证开始userId=%s, password=%s' % (userId, password))
# 根据以下条件查询出的有效用户只有一条记录
sql = ' SELECT *' \
' from tbuser where user_id = \'%s\' and password = \'%s\' and status = 0 ' % (userId, password)
try:
ret = vnpy.DAO.getDataBySQL('vnpy', sql)
if ret.empty :
print (u'登陆验证失败,用户不存在或密码不正确')
#QtWidgets.QMessageBox.information(self, "登陆失败", "用户不存在或密码不正确,请重试!", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
QtWidgets.QMessageBox.information(self, text.LOGINERROR,text.LOGINERRORINFO,
QtWidgets.QMessageBox.Retry)
#Tkinter.messagebox.showinfo('登陆验证失败,用户不存在或密码不正确')
else:
print (u'登陆验证成功')
QtWidgets.QMessageBox.information(self, text.LOGINSUSS, text.LOGINSUSSINFO, QtWidgets.QMessageBox.Ok)
self.close()
#Tkinter.messagebox.showinfo('欢迎')
except Exception as e:
print e
# ----------------------------------------------------------------------
def logout(self):
pass
# ----------------------------------------------------------------------
def submit(self):
userId = str(self.userId.text())
password = str(self.password.text())
print (u'注册验证开始userId=%s, password=%s' % (userId, password))
# 根据以下条件查询出的有效用户只有一条记录
sql = ' SELECT user_id,status' \
' from tbuser where user_id = \'%s\' ' % (userId)
try:
ret = vnpy.DAO.getDataBySQL('vnpy', sql)
#若系统中无该用户,则直接插入注册
if ret.empty:
print (u'无此客户信息,可直接注册')
userData = [userId, userId, 0, password, '', 0, 0, 0, '', ' ', ' ', '', 0, 0, '', 0, 0, ' ', '']
d = pd.DataFrame([userData], columns=TBUSER_COLUMNS)
try:
print("开始写入TBUSER中")
vnpy.DAO.writeData('vnpy', 'tbuser', d)
print (u'注册成功')
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITSUSS, QtWidgets.QMessageBox.Ok)
self.close()
except Exception as e1:
print (u'注册失败')
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITFAIL, QtWidgets.QMessageBox.Retry)
print e1
# 若系统中有该用户,则修改状态及密码,激活用户
else:
#暂时空
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITFAIL, QtWidgets.QMessageBox.Ok)
self.close()
except Exception as e:
print e
#QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITSUSS, QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
# ----------------------------------------------------------------------
def closeLoginEngineManager(self):
self.close()
pass
|
mit
| 3,855,137,239,608,203,000
| 36.542857
| 260
| 0.507992
| false
| 3.635307
| false
| false
| false
|
fast90/christian
|
modules/hq.py
|
1
|
1507
|
from datetime import datetime
class HQ(object):
def __init__(self):
self.people_in_hq = 0
self.keys_in_hq = 0
self.joined_users = []
self.hq_status = 'unknown'
self.status_since = datetime.now().strftime('%Y-%m-%d %H:%M')
self.is_clean = True
self.joined_keys = []
def update_time(self):
self.status_since = datetime.now().strftime('%Y-%m-%d %H:%M')
def hq_open(self):
self.hq_status = 'open'
self.update_time()
def hq_close(self):
self.hq_status = 'closed'
self.update_time()
self.people_in_hq = 0
del(self.joined_users[:])
del(self.joined_keys[:])
def hq_private(self):
self.hq_status = 'private'
self.update_time()
def hq_clean(self):
self.is_clean = True
def hq_dirty(self):
self.is_clean = False
def hq_join(self,user):
self.people_in_hq +=1
self.joined_users.append(user)
def hq_leave(self,user):
self.people_in_hq -=1
self.joined_users.remove(user)
def hq_keyjoin(self,user):
self.keys_in_hq +=1
self.joined_keys.append(user)
def hq_keyleave(self,user):
self.keys_in_hq -=1
self.joined_keys.remove(user)
def get_hq_status(self):
return ('HQ is {} since {}. {} Members are here'
.format(self.hq_status, self.status_since, self.people_in_hq))
def get_hq_clean(self):
return self.is_clean
|
gpl-3.0
| 6,626,186,680,984,100,000
| 24.542373
| 78
| 0.558062
| false
| 3.24086
| false
| false
| false
|
micbou/YouCompleteMe
|
python/ycm/client/completer_available_request.py
|
2
|
1716
|
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from ycm.client.base_request import BaseRequest, BuildRequestData
class CompleterAvailableRequest( BaseRequest ):
def __init__( self, filetypes ):
super( CompleterAvailableRequest, self ).__init__()
self.filetypes = filetypes
self._response = None
def Start( self ):
request_data = BuildRequestData()
request_data.update( { 'filetypes': self.filetypes } )
self._response = self.PostDataToHandler( request_data,
'semantic_completion_available' )
def Response( self ):
return self._response
def SendCompleterAvailableRequest( filetypes ):
request = CompleterAvailableRequest( filetypes )
# This is a blocking call.
request.Start()
return request.Response()
|
gpl-3.0
| -4,551,042,928,777,884,700
| 33.32
| 78
| 0.726107
| false
| 4.115108
| false
| false
| false
|
vivisect/synapse
|
synapse/cryotank.py
|
1
|
49542
|
import os
import types
import shutil
import struct
import logging
import threading
import contextlib
from functools import partial, wraps
from collections import defaultdict
import lmdb # type: ignore
import synapse.lib.cell as s_cell
import synapse.lib.lmdb as s_lmdb
import synapse.lib.queue as s_queue
import synapse.lib.config as s_config
import synapse.lib.msgpack as s_msgpack
import synapse.lib.threads as s_threads
import synapse.lib.datapath as s_datapath
import synapse.exc as s_exc
import synapse.glob as s_glob
import synapse.common as s_common
import synapse.eventbus as s_eventbus
import synapse.datamodel as s_datamodel
logger = logging.getLogger(__name__)
class CryoTank(s_config.Config):
'''
A CryoTank implements a stream of structured data.
'''
def __init__(self, dirn, conf=None):
s_config.Config.__init__(self, conf)
self.path = s_common.gendir(dirn)
path = s_common.gendir(self.path, 'cryo.lmdb')
mapsize = self.getConfOpt('mapsize')
self.lmdb = lmdb.open(path, writemap=True, max_dbs=128)
self.lmdb.set_mapsize(mapsize)
self.lmdb_items = self.lmdb.open_db(b'items')
self.lmdb_metrics = self.lmdb.open_db(b'metrics')
noindex = self.getConfOpt('noindex')
self.indexer = None if noindex else CryoTankIndexer(self)
with self.lmdb.begin() as xact:
self.items_indx = xact.stat(self.lmdb_items)['entries']
self.metrics_indx = xact.stat(self.lmdb_metrics)['entries']
def fini():
self.lmdb.sync()
self.lmdb.close()
self.onfini(fini)
@staticmethod
@s_config.confdef(name='cryotank')
def _cryotank_confdefs():
defs = (
('mapsize', {'type': 'int', 'doc': 'LMDB mapsize value', 'defval': s_lmdb.DEFAULT_MAP_SIZE}),
('noindex', {'type': 'bool', 'doc': 'Disable indexing', 'defval': 0}),
)
return defs
def last(self):
'''
Return the last item stored in this CryoTank.
'''
with self.lmdb.begin() as xact:
with xact.cursor(db=self.lmdb_items) as curs:
if not curs.last():
return None
indx = struct.unpack('>Q', curs.key())[0]
return indx, s_msgpack.un(curs.value())
def puts(self, items):
'''
Add the structured data from items to the CryoTank.
Args:
items (list): A list of objects to store in the CryoTank.
Returns:
int: The index that the item storage began at.
'''
itembyts = [s_msgpack.en(i) for i in items]
tick = s_common.now()
bytesize = sum([len(b) for b in itembyts])
with self.lmdb.begin(db=self.lmdb_items, write=True) as xact:
retn = self.items_indx
todo = []
for byts in itembyts:
todo.append((struct.pack('>Q', self.items_indx), byts))
self.items_indx += 1
with xact.cursor() as curs:
curs.putmulti(todo, append=True)
took = s_common.now() - tick
with xact.cursor(db=self.lmdb_metrics) as curs:
lkey = struct.pack('>Q', self.metrics_indx)
self.metrics_indx += 1
info = {'time': tick, 'count': len(items), 'size': bytesize, 'took': took}
curs.put(lkey, s_msgpack.en(info), append=True)
self.fire('cryotank:puts', numrecords=len(itembyts))
return retn
def metrics(self, offs, size=None):
'''
Yield metrics rows starting at offset.
Args:
offs (int): The index offset.
size (int): The maximum number of records to yield.
Yields:
((int, dict)): An index offset, info tuple for metrics.
'''
mink = struct.pack('>Q', offs)
with self.lmdb.begin() as xact:
with xact.cursor(db=self.lmdb_metrics) as curs:
if not curs.set_range(mink):
return
for i, (lkey, lval) in enumerate(curs):
if size is not None and i >= size:
return
indx = struct.unpack('>Q', lkey)[0]
item = s_msgpack.un(lval)
yield indx, item
def slice(self, offs, size):
'''
Yield a number of items from the CryoTank starting at a given offset.
Args:
offs (int): The index of the desired datum (starts at 0)
size (int): The max number of items to yield.
Notes:
This API performs msgpack unpacking on the bytes, and could be
slow to call remotely.
Yields:
((index, object)): Index and item values.
'''
lmin = struct.pack('>Q', offs)
with self.lmdb.begin() as xact:
with xact.cursor(db=self.lmdb_items) as curs:
if not curs.set_range(lmin):
return
for i, (lkey, lval) in enumerate(curs):
if i >= size:
return
indx = struct.unpack('>Q', lkey)[0]
yield indx, s_msgpack.un(lval)
def rows(self, offs, size):
'''
Yield a number of raw items from the CryoTank starting at a given offset.
Args:
offs (int): The index of the desired datum (starts at 0)
size (int): The max number of items to yield.
Yields:
((indx, bytes)): Index and msgpacked bytes.
'''
lmin = struct.pack('>Q', offs)
imax = offs + size
# time slice the items from the cryo tank
with self.lmdb.begin() as xact:
with xact.cursor(db=self.lmdb_items) as curs:
if not curs.set_range(lmin):
return
for lkey, lval in curs:
indx = struct.unpack('>Q', lkey)[0]
if indx >= imax:
break
yield indx, lval
def info(self):
'''
Returns information about the CryoTank instance.
Returns:
dict: A dict containing items and metrics indexes.
'''
return {'indx': self.items_indx, 'metrics': self.metrics_indx, 'stat': self.lmdb.stat()}
class CryoCell(s_cell.Cell):
def postCell(self):
'''
CryoCell initialization routines.
'''
self.names = self.getCellDict('cryo:names')
self.confs = self.getCellDict('cryo:confs')
self.tanks = s_eventbus.BusRef()
for name, iden in self.names.items():
logger.info('Bringing tank [%s][%s] online', name, iden)
path = self.getCellPath('tanks', iden)
conf = self.confs.get(name)
tank = CryoTank(path, conf)
self.tanks.put(name, tank)
def initConfDefs(self):
super().initConfDefs()
self.addConfDefs((
('defvals', {'defval': {},
'ex': '{"mapsize": 1000000000}',
'doc': 'Default settings for cryotanks created by the cell.',
'asloc': 'tank_defaults'}),
))
def finiCell(self):
'''
Fini handlers for the CryoCell
'''
self.tanks.fini()
def handlers(self):
'''
CryoCell message handlers.
'''
cryo_handlers = {
'cryo:init': self._onCryoInit,
'cryo:list': self._onCryoList,
'cryo:puts': self._onCryoPuts,
'cryo:dele': self._onCryoDele,
'cryo:last': partial(self._onGeneric, CryoTank.last),
'cryo:rows': partial(self._onGeneric, CryoTank.rows),
'cryo:slice': partial(self._onGeneric, CryoTank.slice),
'cryo:metrics': partial(self._onGeneric, CryoTank.metrics),
}
indexer_calls = {
'cryo:indx:add': CryoTankIndexer.addIndex,
'cryo:indx:del': CryoTankIndexer.delIndex,
'cryo:indx:pause': CryoTankIndexer.pauseIndex,
'cryo:indx:resume': CryoTankIndexer.resumeIndex,
'cryo:indx:stat': CryoTankIndexer.getIndices,
'cryo:indx:querynormvalu': CryoTankIndexer.queryNormValu,
'cryo:indx:querynormrecords': CryoTankIndexer.queryNormRecords,
'cryo:indx:queryrows': CryoTankIndexer.queryRows
}
cryo_handlers.update({k: partial(self._onCryoIndex, v) for k, v in indexer_calls.items()})
return cryo_handlers
def _standard_return(self, chan, subfunc, *args, **kwargs):
'''
Calls a function and returns the return value or exception back through the channel
'''
try:
rv = subfunc(*args, **kwargs)
except Exception as e:
retn = s_common.getexcfo(e)
return chan.tx((False, retn))
if isinstance(rv, types.GeneratorType):
chan.setq()
chan.tx((True, True))
genr = s_common.chunks(rv, 1000)
chan.txwind(genr, 100, timeout=30)
return
return chan.tx((True, rv))
@s_glob.inpool
def _onGeneric(self, method, chan, mesg):
'''
Generic handler that looks up tank in name field and passes it to method of cryotank
'''
cmdstr, kwargs = mesg
name = kwargs.pop('name')
tank = self.tanks.get(name)
with chan:
if tank is None:
return chan.tx((False, ('NoSuchName', {'name': name})))
return self._standard_return(chan, method, tank, **kwargs)
@s_glob.inpool
def _onCryoIndex(self, subfunc, chan, mesg):
cmdstr, kwargs = mesg
name = kwargs.pop('name')
tank = self.tanks.get(name)
with chan:
if tank is None:
return chan.tx((False, ('NoSuchName', {'name': name})))
indexer = tank.indexer
if indexer is None:
return chan.tx((False, ('IndexingDisabled', {'name': name})))
return self._standard_return(chan, subfunc, indexer, **kwargs)
def genCryoTank(self, name, conf=None):
'''
Generate a new CryoTank with a given name or get an reference to an existing CryoTank.
Args:
name (str): Name of the CryoTank.
Returns:
CryoTank: A CryoTank instance.
'''
tank = self.tanks.get(name)
if tank is not None:
return tank
iden = s_common.guid()
logger.info('Creating new tank: %s', name)
path = self.getCellPath('tanks', iden)
mergeconf = self.tank_defaults.copy()
if conf is not None:
mergeconf.update(conf)
tank = CryoTank(path, mergeconf)
self.names.set(name, iden)
self.confs.set(name, conf)
self.tanks.put(name, tank)
return tank
def getCryoList(self):
'''
Get a list of (name, info) tuples for the CryoTanks.
Returns:
list: A list of tufos.
'''
return [(name, tank.info()) for (name, tank) in self.tanks.items()]
def _onCryoList(self, chan, mesg):
chan.txfini((True, self.getCryoList()))
@s_glob.inpool
def _onCryoDele(self, chan, mesg):
name = mesg[1].get('name')
logger.info('Deleting tank: %s' % (name,))
with chan:
tank = self.tanks.pop(name) # type: CryoTank
if tank is None:
return chan.tx((True, False))
self.names.pop(name)
tank.fini()
shutil.rmtree(tank.path, ignore_errors=True)
return chan.tx((True, True))
@s_glob.inpool
def _onCryoPuts(self, chan, mesg):
name = mesg[1].get('name')
chan.setq()
chan.tx(True)
with chan:
size = 0
tank = self.genCryoTank(name)
for items in chan.rxwind(timeout=30):
tank.puts(items)
size += len(items)
chan.txok(size)
@s_glob.inpool
def _onCryoInit(self, chan, mesg):
with chan:
tank = self.tanks.get(mesg[1].get('name'))
if tank:
return chan.tx((True, False))
return self._standard_return(chan, lambda **kwargs: bool(self.genCryoTank(**kwargs)), **mesg[1])
class CryoClient:
'''
Client-side helper for interacting with a CryoCell which hosts CryoTanks.
Args:
auth ((str, dict)): A user auth tufo
addr ((str, int)): The address / port tuple.
timeout (int): Connect timeout
'''
_chunksize = 10000
def _remotecall(self, name, cmd_str, timeout=None, **kwargs):
'''
Handles all non-generator remote calls
'''
kwargs['name'] = name
ok, retn = self.sess.call((cmd_str, kwargs), timeout=timeout)
return s_common.reqok(ok, retn)
def _genremotecall(self, name, cmd_str, timeout=None, **kwargs):
'''
Handles all generator function remote calls
'''
kwargs['name'] = name
with self.sess.task((cmd_str, kwargs), timeout=timeout) as chan:
ok, retn = chan.next(timeout=timeout)
s_common.reqok(ok, retn)
for bloc in chan.rxwind(timeout=timeout):
for item in bloc:
yield item
def __init__(self, sess):
self.sess = sess
def puts(self, name, items, timeout=None):
'''
Add data to the named remote CryoTank by consuming from items.
Args:
name (str): The name of the remote CryoTank.
items (iter): An iterable of data items to load.
timeout (float/int): The maximum timeout for an ack.
Returns:
None
'''
with self.sess.task(('cryo:puts', {'name': name})) as chan:
if not chan.next(timeout=timeout):
return False
genr = s_common.chunks(items, self._chunksize)
chan.txwind(genr, 100, timeout=timeout)
return chan.next(timeout=timeout)
def last(self, name, timeout=None):
'''
Return the last entry in the named CryoTank.
Args:
name (str): The name of the remote CryoTank.
timeout (int): Request timeout
Returns:
((int, object)): The last entry index and object from the CryoTank.
'''
return self._remotecall(name, cmd_str='cryo:last', timeout=timeout)
def delete(self, name, timeout=None):
'''
Delete a named CryoTank.
Args:
name (str): The name of the remote CryoTank.
timeout (int): Request timeout
Returns:
bool: True if the CryoTank was deleted, False if it was not deleted.
'''
return self._remotecall(name, cmd_str='cryo:dele', timeout=timeout)
def list(self, timeout=None):
'''
Get a list of the remote CryoTanks.
Args:
timeout (int): Request timeout
Returns:
tuple: A tuple containing name, info tufos for the remote CryoTanks.
'''
ok, retn = self.sess.call(('cryo:list', {}), timeout=timeout)
return s_common.reqok(ok, retn)
def slice(self, name, offs, size, timeout=None):
'''
Slice and return a section from the named CryoTank.
Args:
name (str): The name of the remote CryoTank.
offs (int): The offset to begin the slice.
size (int): The number of records to slice.
timeout (int): Request timeout
Yields:
(int, obj): (indx, item) tuples for the sliced range.
'''
return self._genremotecall(name, offs=offs, size=size, cmd_str='cryo:slice', timeout=timeout)
def rows(self, name, offs, size, timeout=None):
'''
Retrieve raw rows from a section of the named CryoTank.
Args:
name (str): The name of the remote CryoTank.
offs (int): The offset to begin the row retrieval from.
size (int): The number of records to retrieve.
timeout (int): Request timeout.
Notes:
This returns msgpack encoded records. It is the callers
responsibility to decode them.
Yields:
(int, bytes): (indx, bytes) tuples for the rows in range.
'''
return self._genremotecall(name, offs=offs, size=size, cmd_str='cryo:rows', timeout=timeout)
def metrics(self, name, offs, size=None, timeout=None):
'''
Carve a slice of metrics data from the named CryoTank.
Args:
name (str): The name of the remote CryoTank.
offs (int): The index offset.
timeout (int): Request timeout
Returns:
tuple: A tuple containing metrics tufos for the named CryoTank.
'''
return self._genremotecall(name, offs=offs, size=size, cmd_str='cryo:metrics', timeout=timeout)
def init(self, name, conf=None, timeout=None):
'''
Create a new named Cryotank.
Args:
name (str): Name of the Cryotank to make.
conf (dict): Additional configable options for the Cryotank.
timeout (int): Request timeout
Returns:
True if the tank was created, False if the tank existed or
there was an error during CryoTank creation.
'''
return self._remotecall(name, conf=conf, cmd_str='cryo:init', timeout=timeout)
def addIndex(self, name, prop, syntype, datapaths, timeout=None):
'''
Add an index to the cryotank
Args:
name (str): name of the Cryotank.
prop (str): the name of the property this will be stored as in the normalized record
syntype (str): the synapse type this will be interpreted as
datapaths(Iterable[str]): datapath specs against which the raw record is run to extract a single field
that is passed to the type normalizer. These will be tried in order until one succeeds. At least one
must be present.
timeout (Optional[float]): the maximum timeout for an ack
Returns:
None
Note:
Additional datapaths will only be tried if prior datapaths are not present, and *not* if
the normalization fails.
'''
if not len(datapaths):
raise s_exc.BadOperArg(mesg='datapaths must have at least one entry')
return self._remotecall(name, prop=prop, syntype=syntype, datapaths=datapaths, cmd_str='cryo:indx:add',
timeout=timeout)
def delIndex(self, name, prop, timeout=None):
'''
Delete an index
Args:
name (str): name of the Cryotank
prop (str): the (normalized) property name
timeout (Optional[float]): the maximum timeout for an ack
Returns:
None
'''
return self._remotecall(name, prop=prop, cmd_str='cryo:indx:del', timeout=timeout)
def pauseIndex(self, name, prop=None, timeout=None):
'''
Temporarily stop indexing one or all indices
Args:
name (str): name of the Cryotank
prop: (Optional[str]): the index to stop indexing, or if None, indicate to stop all indices
timeout (Optional[float]): the maximum timeout for an ack
Returns:
None
Note:
Pausing is not persistent. Restarting the process will resume indexing.
'''
return self._remotecall(name, prop=prop, cmd_str='cryo:indx:pause', timeout=timeout)
def resumeIndex(self, name, prop=None, timeout=None):
'''
Undo a pauseIndex
Args:
name (str): name of the Cryotank
prop (Optional[str]): the index to start indexing, or if None, indicate to resume all indices
timeout (Optional[float]): the maximum timeout for an ack
Returns:
None
'''
return self._remotecall(name, prop=prop, cmd_str='cryo:indx:resume', timeout=timeout)
def getIndices(self, name, timeout=None):
'''
Get information about all the indices
Args:
name (str): name of the Cryotank
timeout (Optional[float]): the maximum timeout for an ack
Returns:
List[Dict[str: Any]]: all the indices with progress and statistics
'''
return self._remotecall(name, cmd_str='cryo:indx:stat', timeout=timeout)
def queryNormValu(self, name, prop, valu=None, exact=False, timeout=None):
'''
Query for normalized individual property values
Args:
name (str): name of the Cryotank
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
timeout (Optional[float]): the maximum timeout for an ack
Returns:
Iterable[Tuple[int, Union[str, int]]]: A generator of offset, normalized value tuples.
'''
return self._genremotecall(name, prop=prop, valu=valu, exact=exact, cmd_str='cryo:indx:querynormvalu',
timeout=timeout)
def queryNormRecords(self, name, prop, valu=None, exact=False, timeout=None):
'''
Query for normalized property values grouped together in dicts
Args:
name (str): name of the Cryotank
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
timeout (Optional[float]): the maximum timeout for an ack
Returns:
Iterable[Tuple[int, Dict[str, Union[str, int]]]]: A generator of offset, dictionary tuples
'''
return self._genremotecall(name, prop=prop, valu=valu, exact=exact, cmd_str='cryo:indx:querynormrecords',
timeout=timeout)
def queryRows(self, name, prop, valu=None, exact=False, timeout=None):
'''
Query for raw (i.e. from the cryotank itself) records
Args:
name (str): name of the Cryotank
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
timeout (Optional[float]): The maximum timeout for an ack
Returns:
Iterable[Tuple[int, bytes]]: A generator of tuple (offset, messagepack encoded) raw records
'''
return self._genremotecall(name, prop=prop, valu=valu, exact=exact, cmd_str='cryo:indx:queryrows',
timeout=timeout)
# TODO: what to do with subprops returned from getTypeNorm
class _MetaEntry:
''' Describes a single CryoTank index in the system. '''
def __init__(self, propname: str, syntype: str, datapaths) -> None:
'''
Makes a MetaEntry
Args:
propname: The name of the key in the normalized dictionary
syntype: The synapse type name against which the data will be normalized
datapath (Iterable[str]) One or more datapath strings that will be used to find the field in a raw record
'''
self.propname = propname
self.syntype = syntype
self.datapaths = tuple(s_datapath.DataPath(d) for d in datapaths)
def en(self):
'''
Encodes a MetaEntry for storage
'''
return s_msgpack.en(self.asdict())
def asdict(self):
'''
Returns a MetaEntry as a dictionary
'''
return {'propname': self.propname,
'syntype': self.syntype,
'datapaths': tuple(d.path for d in self.datapaths)}
# Big-endian 64-bit integer encoder
_Int64be = struct.Struct('>Q')
class _IndexMeta:
'''
Manages persistence of CryoTank index metadata with an in-memory copy
"Schema":
b'indices' key has msgpack encoded dict of
{ 'present': [8238483: {'propname': 'foo:bar', 'syntype': type, 'datapaths': (datapath, datapath2)}, ...],
'deleting': [8238483, ...]
}
b'progress' key has mesgpack encoded dict of
{ 8328483: {nextoffset, ngood, nnormfail}, ...
_present_ contains the encoding information about the current indices
_deleting_ contains the indices currently being deleted (but aren't done)
_progress_ contains how far each index has gotten, how many successful props were indexed (which might be different
because of missing properties), and how many normalizations failed. It is separate because it gets updated a lot
more.
'''
def __init__(self, dbenv: lmdb.Environment) -> None:
'''
Creates metadata for all the indices.
Args:
dbenv (lmdb.Environment): the lmdb instance in which to store the metadata.
Returns:
None
'''
self._dbenv = dbenv
# The table in the database file (N.B. in LMDB speak, this is called a database)
self._metatbl = dbenv.open_db(b'meta')
is_new_db = False
with dbenv.begin(db=self._metatbl, buffers=True) as txn:
indices_enc = txn.get(b'indices')
progress_enc = txn.get(b'progress')
if indices_enc is None or progress_enc is None:
if indices_enc is None and progress_enc is None:
is_new_db = True
indices_enc = s_msgpack.en({'present': {}, 'deleting': []})
progress_enc = s_msgpack.en({})
else:
raise s_exc.CorruptDatabase('missing meta information in index meta') # pragma: no cover
indices = s_msgpack.un(indices_enc)
# The details about what the indices are actually indexing: the datapath and type.
self.indices = {k: _MetaEntry(**s_msgpack.un(v)) for k, v in indices.get('present', {}).items()}
self.deleting = list(indices.get('deleting', ()))
# Keeps track (non-persistently) of which indices have been paused
self.asleep = defaultdict(bool) # type: ignore
# How far each index has progressed as well as statistics
self.progresses = s_msgpack.un(progress_enc)
if not all(p in self.indices for p in self.deleting):
raise s_exc.CorruptDatabase(
'index meta table: deleting entry with unrecognized property name') # pragma: no cover
if not all(p in self.indices for p in self.progresses):
raise s_exc.CorruptDatabase(
'index meta table: progress entry with unrecognized property name') # pragma: no cover
if is_new_db:
self.persist()
def persist(self, progressonly=False, txn=None):
'''
Persists the index info to the database
Args:
progressonly (bool): if True, only persists the progress (i.e. more dynamic) information
txn (Optional[lmdb.Transaction]): if not None, will use that transaction to record data. txn is
not committed.
Returns:
None
'''
d = {'delete': self.deleting,
'present': {k: metaentry.en() for k, metaentry in self.indices.items()}}
with contextlib.ExitStack() as stack:
if txn is None:
txn = stack.enter_context(self._dbenv.begin(db=self._metatbl, buffers=True, write=True))
if not progressonly:
txn.put(b'indices', s_msgpack.en(d), db=self._metatbl)
txn.put(b'progress', s_msgpack.en(self.progresses), db=self._metatbl)
def lowestProgress(self):
'''
Returns:
int: The next offset that should be indexed, based on active indices.
'''
nextoffsets = [p['nextoffset'] for iid, p in self.progresses.items() if not self.asleep[iid]]
return min(nextoffsets) if nextoffsets else s_lmdb.MAX_INT_VAL
def iidFromProp(self, prop):
'''
Retrieve the random index ID from the property name
Args:
prop (str) The name of the indexed property
Returns:
int: the index id for the propname, None if not found
'''
return next((k for k, idx in self.indices.items() if idx.propname == prop), None)
def addIndex(self, prop, syntype, datapaths):
'''
Add an index to the cryotank
Args:
prop (str): the name of the property this will be stored as in the normalized record
syntype (str): the synapse type this will be interpreted as
datapaths (Iterable[str]): datapaths that will be tried in order.
Returns:
None
Note:
Additional datapaths will only be tried if prior datapaths are not present, and *not* if
the normalization fails.
'''
if self.iidFromProp(prop) is not None:
raise s_exc.DupIndx(mesg='Index already exists', index=prop)
if not len(datapaths):
raise s_exc.BadOperArg(mesg='datapaths must have at least one entry')
s_datamodel.tlib.reqDataType(syntype)
iid = int.from_bytes(os.urandom(8), 'little')
self.indices[iid] = _MetaEntry(propname=prop, syntype=syntype, datapaths=datapaths)
self.progresses[iid] = {'nextoffset': 0, 'ngood': 0, 'nnormfail': 0}
self.persist()
def delIndex(self, prop):
'''
Delete an index
Args:
prop (str): the (normalized) property name
Returns:
None
'''
iid = self.iidFromProp(prop)
if iid is None:
raise s_exc.NoSuchIndx(mesg='No such index', index=prop)
del self.indices[iid]
self.deleting.append(iid)
# remove the progress entry in case a new index with the same propname gets added later
del self.progresses[iid]
self.persist()
def pauseIndex(self, prop):
'''
Temporarily stop indexing one or all indices
Args:
prop: (Optional[str]): the index to stop indexing, or if None, indicate to stop all indices
Returns:
None
Note:
Pausing is not persistent. Restarting the process will resume indexing.
'''
for iid, idx in self.indices.items():
if prop is None or prop == idx.propname:
self.asleep[iid] = True
def resumeIndex(self, prop):
'''
Undo a pauseIndex
Args:
prop (Optional[str]): the index to start indexing, or if None, indicate to resume all indices
Returns:
None
'''
for iid, idx in self.indices.items():
if prop is None or prop == idx.propname:
self.asleep[iid] = False
def markDeleteComplete(self, iid):
'''
Indicates that deletion of a single index is complete
Args:
iid (int): The index ID to mark as deleted
'''
self.deleting.remove(iid)
self.persist()
_Int64le = struct.Struct('<Q')
def _iid_en(iid):
'''
Encode a little endian 64-bit integer
'''
return _Int64le.pack(iid)
def _iid_un(iid):
'''
Decode a little endian 64-bit integer
'''
return _Int64le.unpack(iid)[0]
def _inWorker(callback):
'''
Queue the the decorated function to the indexing worker to run in his thread
Args:
callback: the function to wrap
Returns:
the wrapped function
(Just like inpool for the worker)
'''
@wraps(callback)
def wrap(self, *args, **kwargs):
with s_threads.RetnWait() as retn:
self._workq.put((retn, callback, (self, ) + args, kwargs))
succ, rv = retn.wait(timeout=self.MAX_WAIT_S)
if succ:
if isinstance(rv, Exception):
raise rv
return rv
raise s_exc.TimeOut()
return wrap
class CryoTankIndexer:
'''
Manages indexing of a single cryotank's records
This implements a lazy indexer that indexes a cryotank in a separate thread.
Cryotank entries are msgpack-encoded values. An index consists of a property name, one or more datapaths (i.e.
what field out of the entry), and a synapse type. The type specifies the function that normalizes the output of
the datapath query into a string or integer.
Indices can be added and deleted asynchronously from the indexing thread via CryotankIndexer.addIndex and
CryotankIndexer.delIndex.
Indexes can be queried with queryNormValu, queryNormRecords, queryRows.
To harmonize with LMDB requirements, writing only occurs on a singular indexing thread. Reading indices takes
place in the caller's thread. Both reading and writing index metadata (that is, information about which indices
are running) take place on the indexer's thread.
Note:
The indexer cannot detect when a type has changed from underneath itself. Operators must explicitly delete
and re-add the index to avoid mixed normalized data.
'''
MAX_WAIT_S = 10
def __init__(self, cryotank):
'''
Create an indexer
Args:
cryotank: the cryotank to index
Returns:
None
'''
self.cryotank = cryotank
ebus = cryotank
self._worker = threading.Thread(target=self._workerloop, name='CryoTankIndexer')
path = s_common.gendir(cryotank.path, 'cryo_index.lmdb')
cryotank_map_size = cryotank.lmdb.info()['map_size']
self._dbenv = lmdb.open(path, writemap=True, metasync=False, max_readers=8, max_dbs=4,
map_size=cryotank_map_size)
# iid, v -> offset table
self._idxtbl = self._dbenv.open_db(b'indices', dupsort=True)
# offset, iid -> normalized prop
self._normtbl = self._dbenv.open_db(b'norms')
self._to_delete = {} # type: Dict[str, int]
self._workq = s_queue.Queue()
# A dict of propname -> MetaEntry
self._meta = _IndexMeta(self._dbenv)
self._next_offset = self._meta.lowestProgress()
self._chunk_sz = 1000 # < How many records to read at a time
self._remove_chunk_sz = 1000 # < How many index entries to remove at a time
ebus.on('cryotank:puts', self._onData)
self._worker.start()
def _onfini():
self._workq.done()
self._worker.join(self.MAX_WAIT_S)
self._dbenv.close()
ebus.onfini(_onfini)
def _onData(self, unused):
'''
Wake up the index worker if he already doesn't have a reason to be awake
'''
if 0 == len(self._workq):
self._workq.put((None, lambda: None, None, None))
def _removeSome(self):
'''
Make some progress on removing deleted indices
'''
left = self._remove_chunk_sz
for iid in self._meta.deleting:
if not left:
break
iid_enc = _iid_en(iid)
with self._dbenv.begin(db=self._idxtbl, buffers=True, write=True) as txn, txn.cursor() as curs:
if curs.set_range(iid_enc):
for k, offset_enc in curs.iternext():
if k[:len(iid_enc)] != iid_enc:
break
if not curs.delete():
raise s_exc.CorruptDatabase('delete failure') # pragma: no cover
txn.delete(offset_enc, iid_enc, db=self._normtbl)
left -= 1
if not left:
break
if not left:
break
self._meta.markDeleteComplete(iid)
def _normalize_records(self, raw_records):
'''
Yield stream of normalized fields
Args:
raw_records(Iterable[Tuple[int, Dict[int, str]]]) generator of tuples of offset/decoded raw cryotank
record
Returns:
Iterable[Tuple[int, int, Union[str, int]]]: generator of tuples of offset, index ID, normalized property
value
'''
for offset, record in raw_records:
self._next_offset = offset + 1
dp = s_datapath.initelem(s_msgpack.un(record))
for iid, idx in ((k, v) for k, v in self._meta.indices.items() if not self._meta.asleep[k]):
if self._meta.progresses[iid]['nextoffset'] > offset:
continue
try:
self._meta.progresses[iid]['nextoffset'] = offset + 1
for datapath in idx.datapaths:
field = dp.valu(datapath)
if field is None:
continue
# TODO : what to do with subprops?
break
else:
# logger.debug('Datapaths %s yield nothing for offset %d',
# [d.path for d in idx.datapaths], offset)
continue
normval, _ = s_datamodel.getTypeNorm(idx.syntype, field)
except (s_exc.NoSuchType, s_exc.BadTypeValu):
# logger.debug('Norm fail', exc_info=True)
self._meta.progresses[iid]['nnormfail'] += 1
continue
self._meta.progresses[iid]['ngood'] += 1
yield offset, iid, normval
def _writeIndices(self, rows):
'''
Persist actual indexing to disk
Args:
rows(Iterable[Tuple[int, int, Union[str, int]]]): generators of tuples of offset, index ID, normalized
property value
Returns:
int: the next cryotank offset that should be indexed
'''
count = -1
with self._dbenv.begin(db=self._idxtbl, buffers=True, write=True) as txn:
for count, (offset, iid, normval) in enumerate(rows):
offset_enc = _Int64be.pack(offset)
iid_enc = _iid_en(iid)
valkey_enc = s_lmdb.encodeValAsKey(normval)
txn.put(iid_enc + valkey_enc, offset_enc)
txn.put(offset_enc + iid_enc, s_msgpack.en(normval), db=self._normtbl)
self._meta.persist(progressonly=True, txn=txn)
return count + 1
def _workerloop(self):
'''
Actually do the indexing
Runs as separate thread.
'''
stillworktodo = True
last_callback = 'None'
while True:
# Run the outstanding commands
recalc = False
while True:
try:
job = self._workq.get(timeout=0 if stillworktodo else None)
stillworktodo = True
retn, callback, args, kwargs = job
try:
if retn is not None:
last_callback = callback.__name__
retn.retn(callback(*args, **kwargs))
recalc = True
except Exception as e:
if retn is None:
raise
else:
# Not using errx because I want the exception object itself
retn.retn(e)
except s_exc.IsFini:
return
except s_exc.TimeOut:
break
if recalc:
# Recalculate the next offset to index, since we may have a new index
self._next_offset = self._meta.lowestProgress()
record_tuples = self.cryotank.rows(self._next_offset, self._chunk_sz)
norm_gen = self._normalize_records(record_tuples)
rowcount = self._writeIndices(norm_gen)
self._removeSome()
if not rowcount and not self._meta.deleting:
if stillworktodo is True:
self.cryotank.fire('cryotank:indexer:noworkleft:' + last_callback)
last_callback = 'None'
stillworktodo = False
else:
stillworktodo = True
@_inWorker
def addIndex(self, prop, syntype, datapaths):
'''
Add an index to the cryotank
Args:
prop (str): the name of the property this will be stored as in the normalized record
syntype (str): the synapse type this will be interpreted as
datapaths(Iterable[str]): datapath specs against which the raw record is run to extract a single field
that is passed to the type normalizer. These will be tried in order until one succeeds. At least one
must be present.
Returns:
None
Note:
Additional datapaths will only be tried if prior datapaths are not present, and *not* if
the normalization fails.
'''
return self._meta.addIndex(prop, syntype, datapaths)
@_inWorker
def delIndex(self, prop):
'''
Delete an index
Args:
prop (str): the (normalized) property name
Returns:
None
'''
return self._meta.delIndex(prop)
@_inWorker
def pauseIndex(self, prop=None):
'''
Temporarily stop indexing one or all indices.
Args:
prop: (Optional[str]): the index to stop indexing, or if None, indicate to stop all indices
Returns:
None
Note:
Pausing is not persistent. Restarting the process will resume indexing.
'''
return self._meta.pauseIndex(prop)
@_inWorker
def resumeIndex(self, prop=None):
'''
Undo a pauseIndex
Args:
prop: (Optional[str]): the index to start indexing, or if None, indicate to resume all indices
Returns:
None
'''
return self._meta.resumeIndex(prop)
@_inWorker
def getIndices(self):
'''
Get information about all the indices
Args:
None
Returns:
List[Dict[str: Any]]: all the indices with progress and statistics
'''
idxs = {iid: dict(metaentry.asdict()) for iid, metaentry in self._meta.indices.items()}
for iid in idxs:
idxs[iid].update(self._meta.progresses.get(iid, {}))
return list(idxs.values())
def _iterrows(self, prop, valu, exact=False):
'''
Query against an index.
Args:
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversly, if False, indicates a prefix match.
Returns:
Iterable[Tuple[int, bytes, bytes, lmdb.Transaction]: a generator of a Tuple of the offset, the encoded
offset, the encoded index ID, and the LMDB read transaction.
Note:
Ordering of Tuples disregard everything after the first 128 bytes of a property.
'''
iid = self._meta.iidFromProp(prop)
if iid is None:
raise s_exc.NoSuchIndx(mesg='No such index', index=prop)
iidenc = _iid_en(iid)
islarge = valu is not None and isinstance(valu, str) and len(valu) >= s_lmdb.LARGE_STRING_SIZE
if islarge and not exact:
raise s_exc.BadOperArg(mesg='prefix search valu cannot exceed 128 characters')
if islarge and exact:
key = iidenc + s_lmdb.encodeValAsKey(valu)
elif valu is None:
key = iidenc
else:
key = iidenc + s_lmdb.encodeValAsKey(valu, isprefix=not exact)
with self._dbenv.begin(db=self._idxtbl, buffers=True) as txn, txn.cursor() as curs:
if exact:
rv = curs.set_key(key)
else:
rv = curs.set_range(key)
if not rv:
return
while True:
rv = []
curkey, offset_enc = curs.item()
if (not exact and not curkey[:len(key)] == key) or (exact and curkey != key):
return
offset = _Int64be.unpack(offset_enc)[0]
yield (offset, offset_enc, iidenc, txn)
if not curs.next():
return
def queryNormValu(self, prop, valu=None, exact=False):
'''
Query for normalized individual property values
Args:
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
Returns:
Iterable[Tuple[int, Union[str, int]]]: A generator of offset, normalized value tuples.
'''
if not exact and valu is not None and isinstance(valu, str) and len(valu) >= s_lmdb.LARGE_STRING_SIZE:
raise s_exc.BadOperArg(mesg='prefix search valu cannot exceed 128 characters')
for (offset, offset_enc, iidenc, txn) in self._iterrows(prop, valu, exact):
rv = txn.get(bytes(offset_enc) + iidenc, None, db=self._normtbl)
if rv is None:
raise s_exc.CorruptDatabase('Missing normalized record') # pragma: no cover
yield offset, s_msgpack.un(rv)
def queryNormRecords(self, prop, valu=None, exact=False):
'''
Query for normalized property values grouped together in dicts
Args:
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
Returns:
Iterable[Tuple[int, Dict[str, Union[str, int]]]]: A generator of offset, dictionary tuples
'''
if not exact and valu is not None and isinstance(valu, str) and len(valu) >= s_lmdb.LARGE_STRING_SIZE:
raise s_exc.BadOperArg(mesg='prefix search valu cannot exceed 128 characters')
for offset, offset_enc, _, txn in self._iterrows(prop, valu, exact):
norm = {}
olen = len(offset_enc)
with txn.cursor(db=self._normtbl) as curs:
if not curs.set_range(offset_enc):
raise s_exc.CorruptDatabase('Missing normalized record') # pragma: no cover
while True:
curkey, norm_enc = curs.item()
if curkey[:olen] != offset_enc:
break
iid = _iid_un(curkey[olen:])
# this is racy with the worker, but it is still safe
idx = self._meta.indices.get(iid)
if idx is not None:
norm[idx.propname] = s_msgpack.un(norm_enc)
if not curs.next():
break
yield offset, norm
def queryRows(self, prop, valu=None, exact=False):
'''
Query for raw (i.e. from the cryotank itself) records
Args:
prop (str): The name of the indexed property
valu (Optional[Union[int, str]]): The normalized value. If not present, all records with prop present,
sorted by prop will be returned. It will be considered a prefix if exact is False.
exact (bool): Indicates that the result must match exactly. Conversely, if False, indicates a prefix match.
Returns:
Iterable[Tuple[int, bytes]]: A generator of tuple (offset, messagepack encoded) raw records
'''
if not exact and valu is not None and isinstance(valu, str) and len(valu) >= s_lmdb.LARGE_STRING_SIZE:
raise s_exc.BadOperArg(mesg='prefix search valu cannot exceed 128 characters')
for offset, _, _, txn in self._iterrows(prop, valu, exact):
yield next(self.cryotank.rows(offset, 1))
|
apache-2.0
| -7,920,170,919,440,636,000
| 35.214912
| 120
| 0.568669
| false
| 3.987605
| false
| false
| false
|
Tufin/pytos
|
pytos/securetrack/xml_objects/rest/nat_rules.py
|
1
|
13490
|
import logging
from pytos.common.base_types import XML_List, XML_Object_Base, Comparable
from pytos.common.definitions.xml_tags import Elements
from pytos.common.logging.definitions import XML_LOGGER_NAME
from pytos.common.functions import get_xml_int_value, get_xml_text_value, get_xml_node, create_tagless_xml_objects_list, str_to_bool
from pytos.securetrack.xml_objects.base_types import Base_Object
logger = logging.getLogger(XML_LOGGER_NAME)
class NatRules(XML_List):
def __init__(self, nat_rules):
super().__init__(Elements.NAT_RULES, nat_rules)
@classmethod
def from_xml_node(cls, xml_node):
rules = []
for nat_rule in xml_node.iter(tag=Elements.NAT_RULE):
rules.append(NatRule.from_xml_node(nat_rule))
return cls(rules)
class NatRule(XML_Object_Base, Comparable):
def __init__(self, binding ,num_id, order, uid, auto_nat, disabled, dst_nat_method, enable_net4tonet6, enable_route_lookup,
orig_dst_network, orig_service, orig_src_network, egress_interface, rule_number, service_nat_method,
src_nat_method, translated_service, translated_dst_network, translated_src_network, nat_type):
self.binding = binding
self.id = num_id
self.order = order
self.uid = uid
self.autoNat = auto_nat
self.disabled = disabled
self.dstNatMethod = dst_nat_method
self.enable_net4tonet6 = enable_net4tonet6
self.enable_route_lookup = enable_route_lookup
self.orig_dst_network = orig_dst_network
self.orig_service = orig_service
self.orig_src_network = orig_src_network
self.egress_interface = egress_interface
self.ruleNumber = rule_number
self.serviceNatMethod = service_nat_method
self.srcNatMethod = src_nat_method
self.translated_service = translated_service
self.translated_dst_network = translated_dst_network
self.translated_src_network = translated_src_network
self.type = nat_type
super().__init__(Elements.NAT_RULE)
def _key(self):
hash_keys = [self.id, self.uid]
if self.binding:
try:
hash_keys.append(self.binding.uid)
except AttributeError:
pass
return tuple(hash_keys)
def __str__(self):
return "ORIGINAL: (src={} dst={} srv={}); TRANSLATED: (src={} dst={} srv={})".format(
self.orig_src_network,
self.orig_dst_network,
self.orig_service,
self.translated_src_network,
self.translated_dst_network,
self.translated_service
)
def is_enabled(self):
return str_to_bool(self.disabled)
@classmethod
def from_xml_node(cls, xml_node):
num_id = get_xml_int_value(xml_node, Elements.ID)
order = get_xml_text_value(xml_node, Elements.ORDER)
uid = get_xml_text_value(xml_node, Elements.UID)
auto_nat = get_xml_text_value(xml_node, Elements.AUTONAT)
disabled = get_xml_text_value(xml_node, Elements.DISABLED)
dst_nat_method = get_xml_text_value(xml_node, Elements.DST_NAT_METHOD)
enable_net4tonet6 = get_xml_text_value(xml_node, Elements.ENABLE_NET_4_TO_NET_6)
enable_route_lookup = get_xml_text_value(xml_node, Elements.ENABLE_ROUTE_LOOKUP)
rule_number = get_xml_text_value(xml_node, Elements.RULENUMBER)
service_nat_method = get_xml_text_value(xml_node, Elements.SERVICENATMETHOD)
src_nat_method = get_xml_text_value(xml_node, Elements.SRCNATMETHOD)
nat_type = get_xml_text_value(xml_node, Elements.TYPE)
binding = create_tagless_xml_objects_list(xml_node, Elements.BINDING, NatRuleBinding)[0]
orig_dst_network = create_tagless_xml_objects_list(xml_node, Elements.ORIG_DST_NETWORK, OrigDstNetwork)[0]
orig_service = create_tagless_xml_objects_list(xml_node, Elements.ORIG_SERVICE, OrigService)[0]
orig_src_network = create_tagless_xml_objects_list(xml_node, Elements.ORIG_SRC_NETWORK, OrigSrcNetwork)[0]
egress_interface_node = get_xml_node(xml_node, Elements.ENGRESS_INTERFACE)
egress_interface = EgressInterface.from_xml_node(egress_interface_node) if egress_interface_node else None
translated_service = create_tagless_xml_objects_list(xml_node, Elements.TRANSLATED_SERVICE, TranslatedService)[0]
translated_dst_network = create_tagless_xml_objects_list(xml_node, Elements.TRANSLATED_DST_NETWORK, TranslatedDstNetwork)[0]
translated_src_network = create_tagless_xml_objects_list(xml_node, Elements.TRANSLATED_SRC_NETWORK, TranslatedSrcNetwork)[0]
return cls(binding ,num_id, order, uid, auto_nat, disabled, dst_nat_method, enable_net4tonet6, enable_route_lookup,
orig_dst_network, orig_service, orig_src_network, egress_interface, rule_number, service_nat_method,
src_nat_method, translated_service, translated_dst_network, translated_src_network, nat_type)
class NatRuleBinding(XML_Object_Base):
def __init__(self, default, postnat_iface, prenat_iface, rule_count, security_rule_count, uid):
self.default = default
self.postnat_iface = postnat_iface
self.prenat_iface = prenat_iface
self.rule_count = rule_count
self.security_rule_count = security_rule_count
self.uid = uid
super().__init__(Elements.BINDING)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
default = get_xml_text_value(xml_node, Elements.DEFAULT)
postnat_iface = get_xml_text_value(xml_node, Elements.POSTNAT_IFACE)
prenat_iface = get_xml_text_value(xml_node, Elements.PRENAT_IFACE)
rule_count = get_xml_text_value(xml_node, Elements.RULE_COUNT)
security_rule_count = get_xml_text_value(xml_node, Elements.SECURITY_RULE_COUNT)
uid = get_xml_text_value(xml_node, Elements.UID)
return cls(default, postnat_iface, prenat_iface, rule_count, security_rule_count, uid)
class OrigDstNetwork(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.ORIG_DST_NETWORK, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class OrigService(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.DST_SERVICE, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class OrigSrcNetwork(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.ORIG_SRC_NETWORK, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class TranslatedService(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.TRANSLATED_SERVICE, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class TranslatedSrcNetwork(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.TRANSLATED_SRC_NETWORK, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class TranslatedDstNetwork(Base_Object):
def __init__(self, id, uid, display_name, name, dm_inline_members):
super().__init__(Elements.TRANSLATED_DST_NETWORK, name, display_name, id, uid)
if dm_inline_members is not None:
self.dm_inline_members = dm_inline_members
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
dm_inline_members_node = get_xml_node(xml_node, Elements.DM_INLINE_MEMBRES, True)
if dm_inline_members_node:
dm_inline_members = XML_List.from_xml_node_by_tags(xml_node, Elements.DM_INLINE_MEMBRES, Elements.MEMBER,
DmInlineMember)
else:
dm_inline_members = None
return cls(id, uid, display_name, name, dm_inline_members)
class DmInlineMember(Base_Object):
def __init__(self, id, uid, display_name, name):
super().__init__(Elements.MEMBER, name, display_name, id, uid)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
id = get_xml_int_value(xml_node, Elements.ID)
uid = get_xml_text_value(xml_node, Elements.UID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
return cls(id, uid, display_name, name)
class EgressInterface(XML_Object_Base):
def __init__(self, name, id, direction, device_id, acl_name, is_global, interface_ips):
self.name = name
self.id = id
self.direction = direction
self.device_id = device_id
self.acl_name = acl_name
self.is_global = is_global
self.interface_ips = interface_ips
super().__init__(Elements.ENGRESS_INTERFACE)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
name = get_xml_text_value(xml_node, Elements.NAME)
id = get_xml_int_value(xml_node, Elements.ID)
direction = get_xml_text_value(xml_node, Elements.DIRECTION)
device_id = get_xml_text_value(xml_node, Elements.DEVICE_ID)
acl_name = get_xml_text_value(xml_node, Elements.ACL_NAME)
is_global = get_xml_text_value(xml_node, Elements.GLOBAL)
interface_ips_node = get_xml_node(xml_node, Elements.INTERFACE_IPS, True)
if interface_ips_node:
interface_ips = XML_List.from_xml_node_by_tags(xml_node, Elements.INTERFACE_IPS, Elements.INTERFACE_IP,
NatInterfaceIP)
else:
interface_ips = None
return cls(name, id, direction, device_id, acl_name, is_global, interface_ips)
class NatInterfaceIP(XML_Object_Base):
def __init__(self, ip, netmask):
self.ip = ip
self.netmask = netmask
super().__init__(Elements.INTERFACE_IP)
@classmethod
def from_xml_node(cls, xml_node):
ip = get_xml_text_value(xml_node, Elements.IP)
netmask = get_xml_text_value(xml_node, Elements.NETMASK)
return cls(ip, netmask)
|
apache-2.0
| -5,468,314,753,821,417,000
| 43.820598
| 132
| 0.646256
| false
| 3.437819
| false
| false
| false
|
brandonw/photo-album
|
photo_album/rotate_and_thumbs.py
|
1
|
1245
|
import os, sys
from PIL import Image, ExifTags
size = (128, 128)
for infile in os.listdir(sys.argv[1]):
inpath = os.path.join(sys.argv[1], infile)
pieces = os.path.splitext(inpath)
outpath = pieces[0] + ".thumb" + pieces[1]
if (inpath != outpath and not os.path.exists(outpath) and
'thumb' not in infile):
try:
image = Image.open(inpath)
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
e = image._getexif()
if e is not None:
exif = dict(e.items())
if orientation in exif:
if exif[orientation] == 3:
image=image.transpose(Image.ROTATE_180)
elif exif[orientation] == 6:
image = image.transpose(Image.ROTATE_270)
elif exif[orientation] == 8:
image = image.transpose(Image.ROTATE_90)
image.save(inpath)
image.thumbnail(size, Image.ANTIALIAS)
image.save(outpath, 'JPEG')
except IOError as ex:
print('cannot create thumbnail for ' + infile + ' -- ' + ex.strerror)
|
bsd-3-clause
| 8,823,710,597,784,660,000
| 36.727273
| 81
| 0.526104
| false
| 3.939873
| false
| false
| false
|
innovation-cat/DeepLearningBook
|
cifar10 classification/py3/softmax.py
|
1
|
4468
|
# coding: utf-8
#
# softmax.py
#
# Author: Huang Anbu
# Date: 2017.3
#
# Description: Implementation of softmax classification
#
# Copyright©2017. All Rights Reserved.
# ===============================================================================================
from __future__ import print_function, division
from basiclib import *
# 模型构建
class SoftmaxLayer:
def __init__ (self, input, n_input, n_output):
self.input = input
self.n_input = n_input
self.n_output = n_output
# 权重参数初始化
self.W = theano.shared(
value = numpy.zeros(shape=(n_input, n_output)).astype(theano.config.floatX), name = "W", borrow = True
)
self.b = theano.shared(
value = numpy.zeros(shape=(n_output, )).astype(theano.config.floatX), name = 'b', borrow = True
)
self.params = [self.W, self.b]
# 输出矩阵
self.p_y_given_x = T.nnet.softmax(T.dot(self.input, self.W)+self.b)
# 预测值
self.p_pred = T.argmax(self.p_y_given_x, axis=1)
def cross_entropy(self, y):
# 交叉熵损失函数
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def get_cost_updates(self, y, lr, reg, optimizer_fun):
cost = self.cross_entropy(y) + 0.5*reg*((self.W**2).sum())
try:
updates = optimizer_fun(cost, self.params, lr)
except:
print("Error: no optimizer function")
else:
return (cost, updates)
def error_rate(self, y):
# 错误率
return T.mean(T.neq(self.p_pred, y))
if __name__ == "__main__":
# 读取输入数据
train_x, train_y = load_cifar10_dataset(r"./dataset/cifar-10-batches-py/data_batch_*")
test_x, test_y = load_cifar10_dataset(r"./dataset/cifar-10-batches-py/test_batch")
train_x = train_x / 255.0
test_x = test_x / 255.0
train_set_size, col = train_x.shape
test_set_size, _ = test_x.shape
# 设置tensor变量
x = T.matrix('x').astype(theano.config.floatX)
y = T.ivector('y')
index = T.iscalar('index')
lr = T.scalar('lr', dtype=theano.config.floatX)
reg = T.scalar('reg', dtype=theano.config.floatX)
batch_size = options['batch_size']
n_train_batch = train_set_size//batch_size
n_test_batch = test_set_size//batch_size
model = SoftmaxLayer(x, col, options['n_output'])
cost, updates = model.get_cost_updates(y, lr, reg, optimizer[options["optimizer"]])
# 构建训练函数
train_model = theano.function(inputs = [x, y, lr, reg], outputs = cost, updates = updates)
# 构建测试函数
train_err = theano.function(inputs = [x, y, lr, reg], outputs = model.error_rate(y), on_unused_input = 'ignore')
test_err = theano.function(inputs = [x, y, lr, reg], outputs = model.error_rate(y), on_unused_input = 'ignore')
idx = numpy.arange(train_set_size)
train_num = 0
best_err = 1.0
error_output = open("softmax.txt", "w")
with open("model_softmax.npz", "wb") as fout:
for epoch in range(options["n_epoch"]):
numpy.random.shuffle(idx)
new_train_x = [train_x[i] for i in idx]
new_train_y = [train_y[i] for i in idx]
for n_batch_index in range(n_train_batch):
c = train_model(
new_train_x[n_batch_index*batch_size:(n_batch_index+1)*batch_size],
new_train_y[n_batch_index*batch_size:(n_batch_index+1)*batch_size],
0.0001, 0.0
)
train_num = train_num + 1
if train_num%options["print_freq"]==0:
print("train num: %d, cost: %lf"%(train_num, c))
if train_num%options["valid_freq"]==0:
train_errors = [train_err(train_x[n_train_index*batch_size:(n_train_index+1)*batch_size], train_y[n_train_index*batch_size:(n_train_index+1)*batch_size], 0.00000001, 0.0) for n_train_index in range(n_train_batch)]
test_errors = [test_err(test_x[n_test_index*batch_size:(n_test_index+1)*batch_size], test_y[n_test_index*batch_size:(n_test_index+1)*batch_size], 0.00000001, 0.0) for n_test_index in range(n_test_batch)]
if numpy.mean(test_errors) < best_err:
best_err = numpy.mean(test_errors)
params = dict([(p.name, p.get_value()) for p in model.params])
numpy.savez(fout, params)
print("train num: %d, best train error: %lf, best test error: %lf"%(train_num, numpy.mean(train_errors), numpy.mean(test_errors)))
print("epoch %d end" % epoch)
test_errors = [test_err(test_x[n_test_index*batch_size:(n_test_index+1)*batch_size], test_y[n_test_index*batch_size:(n_test_index+1)*batch_size], 0.00000001, 0.0) for n_test_index in range(n_test_batch)]
print("%lf" % numpy.mean(test_errors), file=error_output)
|
mit
| -4,965,939,120,042,824,000
| 33.393701
| 218
| 0.63499
| false
| 2.549329
| true
| false
| false
|
sthesing/Podstatty
|
db.py
|
1
|
4858
|
# -*- coding: utf8 -*-
## Copyright (c) 2013 Stefan Thesing
##
##This file is part of Podstatty.
##
##Podstatty is free software: you can redistribute it and/or modify
##it under the terms of the GNU General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##Podstatty is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU General Public License for more details.
##
##You should have received a copy of the GNU General Public License
##along with Podstatty. If not, see http://www.gnu.org/licenses/.
from storm.locals import Storm, Int, Unicode, ReferenceSet
import requests
class Db:
"""
A class intended to provide handy control over the database.
"""
def __init__(self, store, base_url):
self.store = store
self.base_url = base_url
def add_file(self, filename, exclude_strings):
"""
Processes a prepared logfile and stores the data into the
database.
"""
log = open(filename)
date = filename.split("access_log_")[1]
date = date.replace("_filtered.txt", "")
if self.store.find(Stats, Stats.date_time_string == unicode(date)).count():
print "A logfile for this date has already been processed."
return None
stats =[]
for line in log:
# In the settings file, users can specify strings that are
# used as filter criteria. If the line contains this string,
# it won't be processed.
# In the beginning, we assume the line will be processed.
line_shall_be_processed = True
# 'exclude_strings' is a list of the filter criteria.
# If the line contains one of those strings, the line will
# not be processed.
for string in exclude_strings:
if string in line:
line_shall_be_processed = False
if line_shall_be_processed:
split_line = line.split()
stat = Stats(unicode(split_line[0]), int(split_line[1]), unicode(date))
stats.append(stat)
urls = []
for stat in stats:
if not stat.url in urls:
urls.append(stat.url)
for url in urls:
new_stat = Stats(url, 0, unicode(date))
for stat in stats:
if stat.url == url:
new_stat.traffic = new_stat.traffic+stat.traffic
self.store.add(new_stat)
#check if all URLs are already in table "filesizes", if not,
#get the filesize and write it into that table
self.check_url(url)
self.store.flush()
self.store.commit()
def check_url(self, url):
"""
Checks if the filesize of the file found behind this url is
already stored in the database. If not, it tries to retrieve
the filesize by making a http HEAD request and stores it into
the database.
"""
#if the url is not yet in the "Filesizes" table
if not self.store.find(Filesizes, Filesizes.url == url).count():
# Get the filesize from the server
# TODO Implement error routine
r = requests.head(self.base_url + url)
# Files no longer present on the server are removed, for now.
# TODO Maybe add an "else"-condition here and ask the user what to do?
# What about files that are no longer there but you still want to
# have them in your statistics?
if not (r.status_code == 404):
size = int(r.headers['Content-Length'])
# Write the URL and it's filesize to database
self.store.add(Filesizes(url, size))
class Stats(Storm):
"""
The table containing the actual numbers
'CREATE TABLE stats (id INTEGER PRIMARY KEY, url VARCHAR,
traffic INTEGER, date_time_string VARCHAR)'
"""
__storm_table__ = "stats"
id = Int(primary=True)
url = Unicode()
traffic = Int()
date_time_string = Unicode()
def __init__(self, url, traffic, date_time_string):
self.url = url
self.traffic = traffic
self.date_time_string = date_time_string
class Filesizes(Storm):
"""
The table containing the filesizes for each URL
'CREATE TABLE filesizes (url VARCHAR PRIMARY KEY, filesize INTEGER)'
"""
__storm_table__ = "filesizes"
url = Unicode(primary=True)
filesize = Int()
def __init__(self, url, filesize):
self.url = url
self.filesize = filesize
|
gpl-3.0
| -6,791,800,694,305,659,000
| 37.251969
| 87
| 0.594072
| false
| 4.299115
| false
| false
| false
|
SabaFar/plc
|
examples/plc-ccphy-example.py
|
1
|
4082
|
# -*- Mode:Python; -*-
# /*
# * Copyright (c) 2010 INRIA
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# *
# * Authors: Alexander Schloegl <alexander.schloegl@gmx.de>
# */
# Chase combining phy example
import ns.plc
import ns.core
import ns.spectrum
import ns.network
def startTx(phy,p):
phy.StartTx(p)
def sendRedundancy(phy):
phy.SendRedundancy()
def receiveSuccess(packet):
print "\n*** Packet received ***\n"
def main(dummy_argv):
## Enable logging
ns.core.LogComponentEnableAll(ns.core.LOG_PREFIX_TIME)
ns.core.LogComponentEnable('PLC_Phy', ns.core.LOG_LEVEL_FUNCTION)
# ns.core.LogComponentEnable('PLC_LinkPerformanceModel', ns.core.LOG_LEVEL_LOGIC)
# ns.core.LogComponentEnable('PLC_Interference', ns.core.LOG_LEVEL_LOGIC)
## Enable packet printing
ns.network.Packet.EnablePrinting()
## Define spectrum model
sm = ns.plc.PLC_SpectrumModelHelper().GetG3SpectrumModel()
## Define time model, mains frequency: 60Hz, OFDM symbol duration: 2240us
ns.plc.PLC_Time.SetTimeModel(60, ns.core.MicroSeconds(2240))
## Define transmit power spectral density
txPsd = ns.spectrum.SpectrumValue(sm)
txPsd += 1e-8;
## Create nodes
n1 = ns.plc.PLC_Node()
n2 = ns.plc.PLC_Node()
n1.SetPosition(0,0,0)
n2.SetPosition(1000,0,0)
n1.SetName('Node1')
n2.SetName('Node2')
nodes = [n1,n2]
## Create cable type
cable = ns.plc.PLC_NAYY50SE_Cable(sm)
## Link nodes
ns.plc.PLC_Line(cable,n1,n2)
## Setup channel
channelHelper = ns.plc.PLC_ChannelHelper()
channelHelper.Install(nodes)
channel = channelHelper.GetChannel()
## Create outlets
o1 = ns.plc.PLC_Outlet(n1)
o2 = ns.plc.PLC_Outlet(n2)
## Create PHYs
phy1 = ns.plc.PLC_ChaseCombiningPhy()
phy2 = ns.plc.PLC_ChaseCombiningPhy()
## Define RX/TX impedances
txImp = ns.plc.PLC_ConstImpedance(sm, 50)
rxImp = ns.plc.PLC_ConstImpedance(sm, 50)
## Create interfaces
phy1.CreateInterfaces(o1, txPsd, txImp, rxImp)
phy2.CreateInterfaces(o2, txPsd, txImp, rxImp)
## Set background noise
noiseFloor = ns.plc.PLC_ColoredNoiseFloor(-140,38.75,-0.72,sm).GetNoisePsd()
noiseFloor += 1e-7
phy1.SetNoiseFloor(noiseFloor)
phy2.SetNoiseFloor(noiseFloor)
## Set modulation and coding scheme
phy1.SetHeaderModulationAndCodingScheme(ns.plc.BPSK_1_2)
phy2.SetHeaderModulationAndCodingScheme(ns.plc.BPSK_1_2)
phy1.SetPayloadModulationAndCodingScheme(ns.plc.QAM64_16_21)
phy2.SetPayloadModulationAndCodingScheme(ns.plc.QAM64_16_21)
## Aggregate RX-Interfaces to ns3 nodes
phy1.GetRxInterface().AggregateObject(ns.network.Node())
phy2.GetRxInterface().AggregateObject(ns.network.Node())
## Set the function to be called after successful packet reception by phy2
phy2.SetReceiveSuccessCallback(receiveSuccess)
## Calculate channels
channel.InitTransmissionChannels()
channel.CalcTransmissionChannels()
## Create packet to send
p = ns.network.Packet(128)
## Schedule chase combining transmissions
ns.core.Simulator.Schedule(ns.core.Seconds(1), startTx, phy1, p)
for i in range(1,11):
ns.core.Simulator.Schedule(ns.core.Seconds(i), sendRedundancy, phy1)
## Start simulation
ns.core.Simulator.Run()
## Cleanup simulation
ns.core.Simulator.Destroy()
if __name__ == '__main__':
import sys
main(sys.argv)
|
gpl-3.0
| -16,086,139,063,123,222
| 29.691729
| 84
| 0.695492
| false
| 3.104183
| false
| false
| false
|
ayepezv/GAD_ERP
|
addons/website_sale/models/product.py
|
1
|
8355
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
class ProductStyle(models.Model):
_name = "product.style"
name = fields.Char(string='Style Name', required=True)
html_class = fields.Char(string='HTML Classes')
class ProductPricelist(models.Model):
_inherit = "product.pricelist"
code = fields.Char(string='E-commerce Promotional Code')
class ProductPublicCategory(models.Model):
_name = "product.public.category"
_inherit = ["website.seo.metadata"]
_description = "Website Product Category"
_order = "sequence, name"
name = fields.Char(required=True, translate=True)
parent_id = fields.Many2one('product.public.category', string='Parent Category', index=True)
child_id = fields.One2many('product.public.category', 'parent_id', string='Children Categories')
sequence = fields.Integer(help="Gives the sequence order when displaying a list of product categories.")
# NOTE: there is no 'default image', because by default we don't show
# thumbnails for categories. However if we have a thumbnail for at least one
# category, then we display a default image on the other, so that the
# buttons have consistent styling.
# In this case, the default image is set by the js code.
image = fields.Binary(attachment=True, help="This field holds the image used as image for the category, limited to 1024x1024px.")
image_medium = fields.Binary(string='Medium-sized image', attachment=True,
help="Medium-sized image of the category. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary(string='Small-sized image', attachment=True,
help="Small-sized image of the category. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
@api.model
def create(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).create(vals)
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).write(vals)
@api.constrains('parent_id')
def check_parent_id(self):
if not self._check_recursion():
raise ValueError(_('Error ! You cannot create recursive categories.'))
@api.multi
def name_get(self):
res = []
for category in self:
names = [category.name]
parent_category = category.parent_id
while parent_category:
names.append(parent_category.name)
parent_category = parent_category.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
class ProductTemplate(models.Model):
_inherit = ["product.template", "website.seo.metadata", 'website.published.mixin', 'rating.mixin']
_order = 'website_published desc, website_sequence desc, name'
_name = 'product.template'
_mail_post_access = 'read'
website_message_ids = fields.One2many(
'mail.message', 'res_id',
domain=lambda self: ['&', ('model', '=', self._name), ('message_type', '=', 'comment')],
string='Website Comments',
)
website_description = fields.Html('Description for the website', sanitize=False, translate=True)
alternative_product_ids = fields.Many2many('product.template', 'product_alternative_rel', 'src_id', 'dest_id',
string='Suggested Products', help='Appear on the product page')
accessory_product_ids = fields.Many2many('product.product', 'product_accessory_rel', 'src_id', 'dest_id',
string='Accessory Products', help='Appear on the shopping cart')
website_size_x = fields.Integer('Size X', default=1)
website_size_y = fields.Integer('Size Y', default=1)
website_style_ids = fields.Many2many('product.style', string='Styles')
website_sequence = fields.Integer('Website Sequence', help="Determine the display order in the Website E-commerce",
default=lambda self: self._default_website_sequence())
public_categ_ids = fields.Many2many('product.public.category', string='Website Product Category',
help="Those categories are used to group similar products for e-commerce.")
availability = fields.Selection([
('empty', 'Display Nothing'),
('in_stock', 'In Stock'),
('warning', 'Warning'),
], "Availability", default='empty', help="This field is used to display a availability banner with a message on the ecommerce")
availability_warning = fields.Text("Availability Warning", translate=True)
def _default_website_sequence(self):
self._cr.execute("SELECT MIN(website_sequence) FROM %s" % self._table)
min_sequence = self._cr.fetchone()[0]
return min_sequence and min_sequence - 1 or 10
def set_sequence_top(self):
self.website_sequence = self.sudo().search([], order='website_sequence desc', limit=1).website_sequence + 1
def set_sequence_bottom(self):
self.website_sequence = self.sudo().search([], order='website_sequence', limit=1).website_sequence - 1
def set_sequence_up(self):
previous_product_tmpl = self.sudo().search(
[('website_sequence', '>', self.website_sequence), ('website_published', '=', self.website_published)],
order='website_sequence', limit=1)
if previous_product_tmpl:
previous_product_tmpl.website_sequence, self.website_sequence = self.website_sequence, previous_product_tmpl.website_sequence
else:
self.set_sequence_top()
def set_sequence_down(self):
next_prodcut_tmpl = self.search([('website_sequence', '<', self.website_sequence), ('website_published', '=', self.website_published)], order='website_sequence desc', limit=1)
if next_prodcut_tmpl:
next_prodcut_tmpl.website_sequence, self.website_sequence = self.website_sequence, next_prodcut_tmpl.website_sequence
else:
return self.set_sequence_bottom()
@api.multi
def _compute_website_url(self):
super(ProductTemplate, self)._compute_website_url()
for product in self:
product.website_url = "/shop/product/%s" % (product.id,)
@api.multi
def display_price(self, pricelist, qty=1, public=False, **kw):
self.ensure_one()
return self.product_variant_ids and self.product_variant_ids[0].display_price(pricelist, qty=qty, public=public) or 0
class Product(models.Model):
_inherit = "product.product"
@api.multi
def website_publish_button(self):
self.ensure_one()
return self.product_tmpl_id.website_publish_button()
@api.multi
def display_price(self, pricelist, qty=1, public=False, **kw):
self.ensure_one()
partner = self.env.user.partner_id
context = {
'pricelist': pricelist.id,
'quantity': qty,
'partner': partner
}
ret = self.env.user.has_group('sale.group_show_price_subtotal') and 'total_excluded' or 'total_included'
taxes = partner.property_account_position_id.map_tax(self.taxes_id)
return taxes.compute_all(public and self.lst_price or self.with_context(context).price, pricelist.currency_id, qty, product=self, partner=partner)[ret]
class ProductAttribute(models.Model):
_inherit = "product.attribute"
type = fields.Selection([('radio', 'Radio'), ('select', 'Select'), ('color', 'Color'), ('hidden', 'Hidden')], default='radio')
class ProductAttributeValue(models.Model):
_inherit = "product.attribute.value"
html_color = fields.Char(string='HTML Color Index', oldname='color', help="Here you can set a "
"specific HTML color index (e.g. #ff0000) to display the color on the website if the "
"attibute type is 'Color'.")
|
gpl-3.0
| 7,746,958,657,846,388,000
| 47.017241
| 183
| 0.641412
| false
| 4.07959
| false
| false
| false
|
poldracklab/cogat
|
cognitive/apps/atlas/forms.py
|
1
|
18517
|
from django import forms
from django.core.exceptions import ValidationError
from django.urls import reverse
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Div, Field, HTML, Layout, Reset, Submit
from cognitive.apps.atlas.query import Assertion, Disorder, Task, Battery, ConceptClass, Concept
import cognitive.apps.atlas.query as query
def set_field_html_name(cls, new_name):
"""
This creates wrapper around the normal widget rendering,
allowing for a custom field name (new_name).
"""
old_render = cls.widget.render
def _widget_render_wrapper(name, value, attrs=None):
return old_render(new_name, value, attrs)
cls.widget.render = _widget_render_wrapper
class TaskForm(forms.Form):
term_name = forms.CharField(required=True)
definition_text = forms.CharField(required=True)
class ConceptForm(forms.Form):
name = forms.CharField(required=True, label="Term:")
definition_text = forms.CharField(required=True, widget=forms.Textarea(),
label="Your Definition:")
concept_class = ConceptClass()
choices = [(x['id'], "-yes- " + str(x['name']))
for x in concept_class.all()]
choices.insert(0, (None, "-no-"))
cc_label = "In your opinion, does this concept belong to a larger class of concepts?"
concept_class = forms.ChoiceField(
choices=choices, label=cc_label, required=False)
def __init__(self, concept_id, *args, **kwargs):
if not args or not args[0].get('submit'):
concept = Concept()
con_class = concept.get_relation(concept_id, "CLASSIFIEDUNDER",
label="concept_class")
if con_class:
args[0]['concept_class'] = con_class[0]['id']
super(ConceptForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse(
'update_concept', kwargs={'uid': concept_id, })
self.helper.layout = Layout(
Div(
Field('name'),
Field('definition_text'),
Field('concept_class'),
Submit('submit', 'Submit'),
Reset('concept-cancel', 'Cancel', type="reset"),
css_class="formline",
)
)
class ContrastForm(forms.Form):
name = forms.CharField(required=True)
description = forms.CharField(required=True)
class ConditionForm(forms.Form):
condition_text = forms.CharField(required=True)
condition_description = forms.CharField(required=True)
class WeightForm(forms.Form):
weight = forms.FloatField()
def __init__(self, cond_id, label, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.weight_name = cond_id
self.fields['weight'].label = label
set_field_html_name(self.fields['weight'], self.weight_name)
def clean_weight(self):
data = self.data['weight']
if not data:
raise ValidationError('Missing input')
return data
class ImplementationForm(forms.Form):
implementation_uri = forms.URLField(required=True)
implementation_name = forms.CharField(required=True)
implementation_description = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
super(ImplementationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('implementation-cancel', 'Cancel'))
class ExternalDatasetForm(forms.Form):
dataset_name = forms.CharField(required=True)
dataset_uri = forms.URLField(required=True)
def __init__(self, *args, **kwargs):
super(ExternalDatasetForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('dataset-cancel', 'Cancel'))
class IndicatorForm(forms.Form):
type = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
super(IndicatorForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('indicator-cancel', 'Cancel'))
class CitationForm(forms.Form):
citation_url = forms.URLField(required=True)
citation_comment = forms.CharField(required=False)
citation_desc = forms.CharField(required=True)
citation_authors = forms.CharField(required=False)
citation_type = forms.CharField(required=False)
citation_pubname = forms.CharField(required=False)
citation_pubdate = forms.CharField(required=False)
citation_pmid = forms.CharField(required=False)
citation_source = forms.CharField(required=False)
doi = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
super(CitationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('citation-cancel', 'Cancel'))
class DisorderForm(forms.Form):
name = forms.CharField(required=True)
definition = forms.CharField(required=True, widget=forms.Textarea())
def __init__(self, *args, **kwargs):
super(DisorderForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('disorder-cancel', 'Cancel'))
class TheoryAssertionForm(forms.Form):
def __init__(self, *args, **kwargs):
super(TheoryAssertionForm, self).__init__(*args, **kwargs)
assertions = Assertion()
choices = [(x['id'], x['name']) for x in assertions.all()]
self.fields['assertions'] = forms.ChoiceField(choices=choices)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('theory-assertion-cancel', 'Cancel'))
class TaskDisorderForm(forms.Form):
def __init__(self, task_id, *args, **kwargs):
super(TaskDisorderForm, self).__init__(*args, **kwargs)
disorders = Disorder()
behaviors = query.Behavior()
traits = query.Trait()
tasks = Task()
contrasts = tasks.get_relation(task_id, "HASCONTRAST")
cont_choices = [(x['id'], x['name']) for x in contrasts]
self.fields['contrasts'] = forms.ChoiceField(choices=cont_choices)
pheno_choices = []
pheno_choices.extend(
[(x['id'], ''.join([x['name'], " (Disorder)"])) for x in disorders.all()])
pheno_choices.extend(
[(x['id'], ''.join([x['name'], " (Behavior)"])) for x in behaviors.all()])
pheno_choices.extend(
[(x['id'], ''.join([x['name'], " (Trait)"])) for x in traits.all()])
self.fields['disorders'] = forms.ChoiceField(choices=pheno_choices)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('task-disorder-cancel', 'Cancel'))
class TaskConceptForm(forms.Form):
def __init__(self, task_id, *args, **kwargs):
super(TaskConceptForm, self).__init__(*args, **kwargs)
concept = Concept()
tasks = Task()
contrasts = tasks.get_relation(task_id, "HASCONTRAST")
cont_choices = [(x['id'], x['name']) for x in contrasts]
self.fields['concept-contrasts'] = forms.ChoiceField(
choices=cont_choices)
concept_choices = [(x['id'], x['name']) for x in concept.all()]
self.fields['concept'] = forms.ChoiceField(choices=concept_choices)
self.helper = FormHelper()
self.helper.attrs = {'id': 'concept-form'}
self.helper.form_class = "hidden"
self.helper.form_action = reverse('add_task_concept',
kwargs={'uid': task_id})
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('task-concept-cancel', 'Cancel'))
class TheoryForm(forms.Form):
label = "Enter the name of the theory collection you wish to add: "
name = forms.CharField(required=True, label=label)
def __init__(self, *args, **kwargs):
super(TheoryForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.attrs = {'id': 'theory-form'}
self.helper.form_class = "hidden"
self.helper.form_action = reverse('add_theory')
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('theory-cancel', 'Cancel'))
class BatteryForm(forms.Form):
label = "Enter the name of the task collection you wish to add: "
name = forms.CharField(required=True, label=label)
def __init__(self, *args, **kwargs):
super(BatteryForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.attrs = {'id': 'battery-form'}
self.helper.form_class = "hidden"
self.helper.form_action = reverse('add_battery')
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('battery-cancel', 'Cancel', type="button"))
class ConceptTaskForm(forms.Form):
def __init__(self, *args, **kwargs):
super(ConceptTaskForm, self).__init__(*args, **kwargs)
tasks = Task()
choices = [(x['id'], x['name']) for x in tasks.all()]
self.fields['tasks'] = forms.ChoiceField(choices=choices)
self.helper = FormHelper()
self.helper.form_class = "hidden"
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('battery-cancel', 'Cancel', type="button"))
class BatteryBatteryForm(forms.Form):
def __init__(self, *args, **kwargs):
super(BatteryBatteryForm, self).__init__(*args, **kwargs)
batteries = Battery()
choices = [(x['id'], x['name']) for x in batteries.all()]
self.fields['batteries'] = forms.ChoiceField(choices=choices)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('battery-cancel', 'Cancel', type="button"))
class BatteryTaskForm(forms.Form):
def __init__(self, *args, **kwargs):
super(BatteryTaskForm, self).__init__(*args, **kwargs)
tasks = Task()
choices = [(x['id'], x['name']) for x in tasks.all()]
self.fields['tasks'] = forms.ChoiceField(choices=choices)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('battery-task-cancel', 'Cancel',
type="button"))
class ConceptContrastForm(forms.Form):
def __init__(self, task_id, concept_id, *args, **kwargs):
super(ConceptContrastForm, self).__init__(*args, **kwargs)
tasks = Task()
contrasts = tasks.get_relation(task_id, "HASCONTRAST")
choices = [(x['id'], x['name']) for x in contrasts]
self.fields['contrasts'] = forms.ChoiceField(choices=choices)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('battery-cancel', 'Cancel', type="button"))
self.helper.form_action = reverse('add_concept_contrast',
kwargs={'uid': concept_id, 'tid': task_id})
class DisorderDisorderForm(forms.Form):
''' form for relating disorders to themselves '''
type = forms.ChoiceField(
choices=[('parent', 'Parent'), ('child', 'Child')])
def __init__(self, name=None, *args, **kwargs):
super(DisorderDisorderForm, self).__init__(*args, **kwargs)
name = (name if name is not None else '')
disorders = Disorder()
type_choices = [
('parent', '{} is a kind of <selected disorder>'.format(name)),
('child', '<selected disorder> is a kind of {}'.format(name))
]
dis_choices = [(x['id'], x['name']) for x in disorders.all()]
self.fields['type'] = forms.ChoiceField(choices=type_choices)
self.fields['disorders'] = forms.ChoiceField(choices=dis_choices)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('disorder-disorder-cancel', 'Cancel'))
class ExternalLinkForm(forms.Form):
''' an external link for a node. For disorders this link may describe the
disorder in more detail'''
uri = forms.URLField(
required=True, label="Enter the full URL for the link")
def __init__(self, *args, **kwargs):
super(ExternalLinkForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('link-cancel', 'Cancel'))
class ConceptClassForm(forms.Form):
name = forms.CharField()
def __init__(self, *args, **kwargs):
super(ConceptClassForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('concept-class-cancel', 'Cancel'))
self.helper.form_action = reverse('add_concept_class')
class DisambiguationForm(forms.Form):
term1_name = forms.CharField(label="")
term1_name_ext = forms.CharField(label="")
term1_definition = forms.CharField(required=True, widget=forms.Textarea(),
label="Original Term Description")
term2_name = forms.CharField(label="")
term2_name_ext = forms.CharField(label="")
term2_definition = forms.CharField(required=True, widget=forms.Textarea(),
label="New Term Description")
def __init__(self, label, uid, term=None, *args, **kwargs):
super(DisambiguationForm, self).__init__(*args, **kwargs)
if term is not None:
self.initial = {
'term1_name': term['name'],
'term2_name': term['name'],
'term1_definition': term['definition_text']
}
self.helper = FormHelper()
self.helper.add_input(Reset('disambiguate_cancel_button', 'Cancel'))
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.form_action = reverse('add_disambiguation',
kwargs={'label': label, 'uid': uid})
self.helper.layout = Layout(
Div(
Div(
Field('term1_name', css_class='disam-name'),
HTML('('),
Field('term1_name_ext', css_class='disam-name-ext'),
HTML(')'),
css_class='name-ext-inputs'
),
Field('term1_definition', css_class='disam-def'),
Div(
Field('term2_name', css_class='disam-name'),
HTML('('),
Field('term2_name_ext', css_class='disam-name-ext'),
HTML(')'),
css_class='name-ext-inputs'
),
Field('term2_definition', css_class='disam-def'),
css_class='popstar',
)
)
class PhenotypeForm(forms.Form):
name = forms.CharField(required=True, label="Phenotype Name:")
definition = forms.CharField(required=True, widget=forms.Textarea(),
label="Description:")
choices = (("disorder", "Disorder"),
("trait", "Trait"), ("behavior", "Behavior"))
type = forms.ChoiceField(
choices=choices, label="Phenotype classification", required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('phenotype-cancel-button', 'Cancel'))
self.helper.form_action = reverse('add_phenotype')
class TraitForm(forms.Form):
name = forms.CharField(required=True, label="Phenotype Name:")
definition = forms.CharField(required=True, widget=forms.Textarea(),
label="Description:")
def __init__(self, uid, trait=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if trait is not None:
self.initial = {
'name': trait['name'],
'definition': trait['definition']
}
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('trait_cancel_button', 'Cancel'))
self.helper.form_action = reverse('update_trait', kwargs={'uid': uid})
class BehaviorForm(forms.Form):
name = forms.CharField(required=True, label="Phenotype Name:")
definition = forms.CharField(required=True, widget=forms.Textarea(),
label="Description:")
def __init__(self, uid, behavior=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if behavior is not None:
self.initial = {
'name': behavior['name'],
'definition': behavior['definition']
}
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('behavior_cancel_button', 'Cancel'))
self.helper.form_action = reverse(
'update_behavior', kwargs={'uid': uid})
class DoiForm(forms.Form):
doi = forms.CharField(required=True, label="DOI:")
def __init__(self, uid, label, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(Reset('doi-cancel-button', 'Cancel'))
self.helper.form_action = reverse('add_citation_doi',
kwargs={'label': label, 'uid': uid})
|
mit
| -3,327,605,317,850,539,000
| 38.736052
| 96
| 0.596155
| false
| 3.850489
| false
| false
| false
|
CSAILVision/sceneparsing
|
evaluationCode/utils_eval.py
|
1
|
1826
|
import numpy as np
# This function takes the prediction and label of a single image, returns intersection and union areas for each class
# To compute over many images do:
# for i in range(Nimages):
# (area_intersection[:,i], area_union[:,i]) = intersectionAndUnion(imPred[i], imLab[i])
# IoU = 1.0 * np.sum(area_intersection, axis=1) / np.sum(np.spacing(1)+area_union, axis=1)
def intersectionAndUnion(imPred, imLab, numClass):
imPred = np.asarray(imPred)
imLab = np.asarray(imLab)
# Remove classes from unlabeled pixels in gt image.
# We should not penalize detections in unlabeled portions of the image.
imPred = imPred * (imLab>0)
# Compute area intersection:
intersection = imPred * (imPred==imLab)
(area_intersection,_) = np.histogram(intersection, bins=numClass, range=(1, numClass))
# Compute area union:
(area_pred,_) = np.histogram(imPred, bins=numClass, range=(1, numClass))
(area_lab,_) = np.histogram(imLab, bins=numClass, range=(1, numClass))
area_union = area_pred + area_lab - area_intersection
return (area_intersection, area_union)
# This function takes the prediction and label of a single image, returns pixel-wise accuracy
# To compute over many images do:
# for i = range(Nimages):
# (pixel_accuracy[i], pixel_correct[i], pixel_labeled[i]) = pixelAccuracy(imPred[i], imLab[i])
# mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / (np.spacing(1) + np.sum(pixel_labeled))
def pixelAccuracy(imPred, imLab):
imPred = np.asarray(imPred)
imLab = np.asarray(imLab)
# Remove classes from unlabeled pixels in gt image.
# We should not penalize detections in unlabeled portions of the image.
pixel_labeled = np.sum(imLab>0)
pixel_correct = np.sum((imPred==imLab)*(imLab>0))
pixel_accuracy = 1.0 * pixel_correct / pixel_labeled
return (pixel_accuracy, pixel_correct, pixel_labeled)
|
bsd-3-clause
| -2,552,586,764,081,443,000
| 42.47619
| 117
| 0.727273
| false
| 2.98366
| false
| false
| false
|
hexid/WordGenerator
|
GenerateChain.py
|
1
|
1975
|
#!/bin/env python
# usage: depth , inDictionary [, outJSON]
def generateChain(depth, inFile):
import collections, re
numChar, endChar = '#', '.'
regexWord = re.compile('^[a-z]+$')
depthRange = range(depth - 1)
padStr = ' ' * (depth - 1)
chars = collections.deque(maxlen = depth) # limit to depth chars
def NestedDict(): return collections.defaultdict(NestedDict)
rootNode = NestedDict() # create a tree of dictionaries
rootNode['depth'] = depth # set the depth of the chain
curNode, curChar = None, None
with open(inFile, 'r') as f:
for word in f.read().split():
if regexWord.match(word):
chars.extend(padStr) # reset chars for the new word
for curChar in "%s%s" % (word, endChar):
chars.append(curChar) # add the next character
curNode = rootNode # start at the root of the tree
for n in depthRange: # traverse down the tree
curNode = curNode[chars[n]]
# increment the total for the leaves on the branch
curNode[numChar] = curNode.get(numChar, 0) + 1
# increment the total for the current leaf
curNode[curChar] = curNode.get(curChar, 0) + 1
return rootNode
def writeToFile(chain, outFile):
with open(outFile, 'w') as f:
import json # write the json data to outFile
# the json data will be sorted and compressed to save space
f.write(json.dumps(chain, sort_keys=True, separators=(',',':')))
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('depth', metavar='depth', type=int, help='The length of any given chain')
parser.add_argument('inFile', type=str, help='Input dictionary file')
parser.add_argument('outFile', type=str, nargs='?', default='_markov.json', help='Output JSON file (default = _markov.json)')
(args, unknown) = parser.parse_known_args()
chain = generateChain(args.depth, args.inFile)
writeToFile(chain, args.outFile)
if __name__ == "__main__":
main()
|
mit
| -1,638,003,219,230,873,600
| 36.980769
| 127
| 0.658734
| false
| 3.604015
| false
| false
| false
|
smartforceplus/SmartForceplus
|
.local/share/Odoo/addons/8.0/builder/models/demo/base.py
|
1
|
5437
|
import json
import pickle
import os
import random
from openerp import models, api, fields, _
class GeneratorInterface(models.AbstractModel):
_name = 'builder.ir.model.demo.generator.base'
_description = 'Generator Interface'
@api.multi
def get_generator(self, field):
raise NotImplementedError
@api.multi
def action_save(self):
return {'type': 'ir.actions.act_window_close'}
_demo_data = {}
@api.model
def get_demo_data(self, filename=None, dataFormat='json'):
if filename is None:
filename = "{name}.json".format(name=self.subclass_model)
if filename not in self._demo_data:
fullname = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', filename))
if os.path.exists(fullname):
try:
if dataFormat == 'json':
self._demo_data[filename] = json.loads(open(fullname).read())
else:
self._demo_data[filename] = open(fullname).read()
except Exception, e:
return {}
return self._demo_data.get(filename, {})
class Generator(models.Model):
_name = 'builder.ir.model.demo.generator'
_description = 'Generic Generator'
_inherit = ['ir.mixin.polymorphism.superclass', 'builder.ir.model.demo.generator.base']
_order = 'module_id asc, model_id asc'
_target_type = 'char'
model_id = fields.Many2one('builder.ir.model', ondelete='cascade')
module_id = fields.Many2one('builder.ir.module.module', 'Module', related='model_id.module_id', ondelete='cascade',
store=True)
type = fields.Char('Type', compute='_compute_type')
target_fields_type = fields.Char('Target Fields Type', compute='_compute_target_fields_type')
field_ids = fields.Many2many(
comodel_name='builder.ir.model.fields',
relation='builder_model_demo_fields_rel',
column1='generator_id',
column2='field_id',
string='Fields',
)
field_names = fields.Char('Field Names', compute='_compute_field_names', store=True)
allow_nulls = fields.Boolean('Allow Null Values', help='If the field is not required allow to generate null values for them.')
_defaults = {
'subclass_model': lambda s, c, u, cxt=None: s._name
}
@api.multi
def generate_null_values(self, field):
if self.allow_nulls and not field.required:
return random.random() <= (1.0 / (self.model_id.demo_records + 1))
return False
@api.one
@api.depends('subclass_model')
def _compute_type(self):
data = dict(self.get_generators())
self.type = data.get(self.subclass_model, _('Unknown'))
@api.one
@api.depends('field_ids.name')
def _compute_field_names(self):
self.field_names = ', '.join([field.name for field in self.field_ids])
@api.one
@api.depends('subclass_model')
def _compute_target_fields_type(self):
self.target_fields_type = self.env[self.subclass_model]._model._target_type
@api.model
def get_generators(self):
ms = self.env['ir.model'].search([
('model', 'ilike', 'builder.ir.model.demo.generator.%'),
('model', 'not in', ['builder.ir.model.demo.generator.base', 'builder.ir.model.demo.generator'])
])
return [
(model.model, model.name)
for model in ms
]
@api.one
def get_generator(self, field):
return self.get_instance().get_generator(field)
@api.multi
def action_open_view(self):
model = self._model
action = model.get_formview_action(self.env.cr, self.env.uid, self.ids, self.env.context)
action.update({'target': 'new'})
return action
class IrModel(models.Model):
_name = 'builder.ir.model'
_inherit = ['builder.ir.model']
demo_records = fields.Integer('Demo Records')
demo_data_ids = fields.One2many(
comodel_name='builder.ir.model.demo.generator',
inverse_name='model_id',
string='Demo Data',
copy=True,
)
demo_xml_id_sample = fields.Text(compute='_compute_demo_xml_id_sample', store=True)
@api.one
@api.depends('demo_records', 'model')
def _compute_demo_xml_id_sample(self):
tmpl = '{model}_'.format(model=self.model.lower().replace('.', '_')) + '{id}' if self.model else 'model_'
self.demo_xml_id_sample = pickle.dumps([tmpl.format(id=i) for i in xrange(self.demo_records)])
@api.multi
def demo_xml_id(self, index):
return pickle.loads(self.demo_xml_id_sample)[index]
_field_generators = None
@property
def field_generators(self, reload=False):
if not self._field_generators or reload:
result = {}
for generator in self.demo_data_ids:
for field in generator.field_ids:
if field.name not in result:
result[field.name] = generator.instance.get_generator(field)
self._field_generators = result
return self._field_generators
class IrModule(models.Model):
_name = 'builder.ir.module.module'
_inherit = ['builder.ir.module.module']
demo_data_ids = fields.One2many(
comodel_name='builder.ir.model.demo.generator',
inverse_name='module_id',
string='Demo Data',
copy=True,
)
|
agpl-3.0
| 1,007,184,603,457,756,800
| 32.98125
| 130
| 0.606401
| false
| 3.693614
| false
| false
| false
|
marmarek/qubes-core-admin
|
qubes/vm/adminvm.py
|
1
|
9608
|
#
# The Qubes OS Project, https://www.qubes-os.org/
#
# Copyright (C) 2010-2015 Joanna Rutkowska <joanna@invisiblethingslab.com>
# Copyright (C) 2013-2015 Marek Marczykowski-Górecki
# <marmarek@invisiblethingslab.com>
# Copyright (C) 2014-2015 Wojtek Porczyk <woju@invisiblethingslab.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
#
''' This module contains the AdminVM implementation '''
import asyncio
import subprocess
import libvirt
import qubes
import qubes.exc
import qubes.vm
from qubes.vm.qubesvm import _setter_kbd_layout
class AdminVM(qubes.vm.BaseVM):
'''Dom0'''
dir_path = None
name = qubes.property('name',
default='dom0', setter=qubes.property.forbidden)
qid = qubes.property('qid',
default=0, type=int, setter=qubes.property.forbidden)
uuid = qubes.property('uuid',
default='00000000-0000-0000-0000-000000000000',
setter=qubes.property.forbidden)
default_dispvm = qubes.VMProperty('default_dispvm',
load_stage=4,
allow_none=True,
default=(lambda self: self.app.default_dispvm),
doc='Default VM to be used as Disposable VM for service calls.')
include_in_backups = qubes.property('include_in_backups',
default=True, type=bool,
doc='If this domain is to be included in default backup.')
updateable = qubes.property('updateable',
default=True,
type=bool,
setter=qubes.property.forbidden,
doc='True if this machine may be updated on its own.')
# for changes in keyboard_layout, see also the same property in QubesVM
keyboard_layout = qubes.property(
'keyboard_layout',
type=str,
setter=_setter_kbd_layout,
default='us++',
doc='Keyboard layout for this VM')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._qdb_connection = None
self._libvirt_domain = None
if not self.app.vmm.offline_mode:
self.start_qdb_watch()
def __str__(self):
return self.name
def __lt__(self, other):
# order dom0 before anything
return self.name != other.name
@property
def attached_volumes(self):
return []
@property
def xid(self):
'''Always ``0``.
.. seealso:
:py:attr:`qubes.vm.qubesvm.QubesVM.xid`
'''
return 0
@qubes.stateless_property
def icon(self): # pylint: disable=no-self-use
"""freedesktop icon name, suitable for use in
:py:meth:`PyQt4.QtGui.QIcon.fromTheme`"""
return 'adminvm-black'
@property
def libvirt_domain(self):
'''Libvirt object for dom0.
.. seealso:
:py:attr:`qubes.vm.qubesvm.QubesVM.libvirt_domain`
'''
if self._libvirt_domain is None:
self._libvirt_domain = self.app.vmm.libvirt_conn.lookupByID(0)
return self._libvirt_domain
@staticmethod
def is_running():
'''Always :py:obj:`True`.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.is_running`
'''
return True
@staticmethod
def is_halted():
'''Always :py:obj:`False`.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.is_halted`
'''
return False
@staticmethod
def get_power_state():
'''Always ``'Running'``.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.get_power_state`
'''
return 'Running'
@staticmethod
def get_mem():
'''Get current memory usage of Dom0.
Unit is KiB.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.get_mem`
'''
# return psutil.virtual_memory().total/1024
with open('/proc/meminfo') as file:
for line in file:
if line.startswith('MemTotal:'):
return int(line.split(':')[1].strip().split()[0])
raise NotImplementedError()
def get_mem_static_max(self):
'''Get maximum memory available to Dom0.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.get_mem_static_max`
'''
if self.app.vmm.offline_mode:
# default value passed on xen cmdline
return 4096
try:
return self.app.vmm.libvirt_conn.getInfo()[1]
except libvirt.libvirtError as e:
self.log.warning('Failed to get memory limit for dom0: %s', e)
return 4096
def get_cputime(self):
'''Get total CPU time burned by Dom0 since start.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.get_cputime`
'''
try:
return self.libvirt_domain.info()[4]
except libvirt.libvirtError as e:
self.log.warning('Failed to get CPU time for dom0: %s', e)
return 0
def verify_files(self):
'''Always :py:obj:`True`
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.verify_files`
''' # pylint: disable=no-self-use
return True
def start(self, start_guid=True, notify_function=None,
mem_required=None):
'''Always raises an exception.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.start`
''' # pylint: disable=unused-argument,arguments-differ
raise qubes.exc.QubesVMNotHaltedError(
self, 'Cannot start Dom0 fake domain!')
def suspend(self):
'''Does nothing.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.suspend`
'''
raise qubes.exc.QubesVMError(self, 'Cannot suspend Dom0 fake domain!')
def shutdown(self):
'''Does nothing.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.shutdown`
'''
raise qubes.exc.QubesVMError(self, 'Cannot shutdown Dom0 fake domain!')
def kill(self):
'''Does nothing.
.. seealso:
:py:meth:`qubes.vm.qubesvm.QubesVM.kill`
'''
raise qubes.exc.QubesVMError(self, 'Cannot kill Dom0 fake domain!')
@property
def untrusted_qdb(self):
'''QubesDB handle for this domain.'''
if self._qdb_connection is None:
import qubesdb # pylint: disable=import-error
self._qdb_connection = qubesdb.QubesDB(self.name)
return self._qdb_connection
async def run_service(self, service, source=None, user=None,
filter_esc=False, autostart=False, gui=False, **kwargs):
'''Run service on this VM
:param str service: service name
:param qubes.vm.qubesvm.QubesVM source: source domain as presented to
this VM
:param str user: username to run service as
:param bool filter_esc: filter escape sequences to protect terminal \
emulator
:param bool autostart: if :py:obj:`True`, machine will be started if \
it is not running
:param bool gui: when autostarting, also start gui daemon
:rtype: asyncio.subprocess.Process
.. note::
User ``root`` is redefined to ``SYSTEM`` in the Windows agent code
'''
# pylint: disable=unused-argument
source = 'dom0' if source is None else self.app.domains[source].name
if filter_esc:
raise NotImplementedError(
'filter_esc=True not supported on calls to dom0')
if user is None:
user = 'root'
await self.fire_event_async('domain-cmd-pre-run', pre_event=True,
start_guid=gui)
if user != 'root':
cmd = ['runuser', '-u', user, '--']
else:
cmd = []
cmd.extend([
qubes.config.system_path['qrexec_rpc_multiplexer'],
service,
source,
'name',
self.name,
])
return (await asyncio.create_subprocess_exec(
*cmd,
**kwargs))
async def run_service_for_stdio(self, *args, input=None, **kwargs):
'''Run a service, pass an optional input and return (stdout, stderr).
Raises an exception if return code != 0.
*args* and *kwargs* are passed verbatim to :py:meth:`run_service`.
.. warning::
There are some combinations if stdio-related *kwargs*, which are
not filtered for problems originating between the keyboard and the
chair.
''' # pylint: disable=redefined-builtin
kwargs.setdefault('stdin', subprocess.PIPE)
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
p = await self.run_service(*args, **kwargs)
# this one is actually a tuple, but there is no need to unpack it
stdouterr = await p.communicate(input=input)
if p.returncode:
raise subprocess.CalledProcessError(p.returncode,
args[0], *stdouterr)
return stdouterr
|
lgpl-2.1
| 412,797,591,439,523,840
| 29.791667
| 79
| 0.596128
| false
| 3.80626
| false
| false
| false
|
danielsunzhongyuan/my_leetcode_in_python
|
lowest_common_ancestor_of_a_binary_tree_236.py
|
1
|
1928
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
# Solution One: 99ms
# stack = [root]
# parent = {root: None}
# while p not in parent or q not in parent:
# node = stack.pop()
# if node.left:
# parent[node.left] = node
# stack.append(node.left)
# if node.right:
# parent[node.right] = node
# stack.append(node.right)
# ancestor_of_p = []
# while p:
# ancestor_of_p.append(p)
# p = parent[p]
# while q not in ancestor_of_p:
# q = parent[q]
# return q
# Solution Two:
if root in (None, p, q):
return root
left, right = (self.lowestCommonAncestor(kid, p, q) for kid in (root.left, root.right))
return root if left and right else left or right
def lowestCommonAncestor2(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
stack = [root]
parent = {root: None}
while p not in parent or q not in parent:
node = stack.pop()
if node.left:
parent[node.left] = node
stack.append(node.left)
if node.right:
parent[node.right] = node
stack.append(node.right)
ancestor_of_p = []
while p:
ancestor_of_p.append(p)
p = parent[p]
while q not in ancestor_of_p:
q = parent[q]
return q
|
apache-2.0
| -4,629,820,916,250,762,000
| 28.661538
| 95
| 0.48029
| false
| 3.750973
| false
| false
| false
|
beyondvalence/scratch
|
chp01.py
|
1
|
1697
|
# !/usr/bin/env python27
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 2 16:15:29 2016
@author: waynetliu
"""
#%%
from __future__ import division
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" }
]
print users
#%%
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
#%%
for user in users:
user["friends"]=[]
#%% appends together, not separately
for i, j in friendships:
users[i]["friends"].append(users[j]) # adds j as a friend of i
users[j]["friends"].append(users[i]) # adds i as a friend of j
print"\n", "users", "\n\n", users
#%%
# for first two friendship tuples
# hero - dunn, sue
# dunn - hero
# sue - hero (dunn)
def number_of_friends(user):
"""how many friends does user have?"""
return len(users["friends"]) # length of friends_id list
total_connections = sum(number_of_friends(user) for user in users) # 24
print "total connections: ", total_connections
num_users = len(users)
avg_connections = num_users / total_connections
# create a list of (ids, number of friends)
num_friends_by_id = [(users["id"], number_of_friends(user))
for user in users]
sorted(num_friends_by_id,
key=lambda (user_id, num_friends): num_friends, reverse=True)
print("/n", num_friends_by_id)
|
bsd-2-clause
| 5,344,159,609,228,425,000
| 27.283333
| 71
| 0.524455
| false
| 2.809603
| false
| false
| false
|
arcyfelix/ML-DL-AI
|
Supervised Learning/GANs/GAN.py
|
1
|
3364
|
# -*- coding: utf-8 -*-
""" GAN Example
Use a generative adversarial network (GAN) to generate digit images from a
noise distribution.
References:
- Generative adversarial nets. I Goodfellow, J Pouget-Abadie, M Mirza,
B Xu, D Warde-Farley, S Ozair, Y. Bengio. Advances in neural information
processing systems, 2672-2680.
Links:
- [GAN Paper](https://arxiv.org/pdf/1406.2661.pdf).
"""
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tflearn
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data()
image_dim = 784 # 28*28 pixels
z_dim = 200 # Noise data points
total_samples = len(X)
# Generator
def generator(x, reuse=False):
with tf.variable_scope('Generator', reuse=reuse):
x = tflearn.fully_connected(x, 256, activation='relu')
x = tflearn.fully_connected(x, image_dim, activation='sigmoid')
return x
# Discriminator
def discriminator(x, reuse=False):
with tf.variable_scope('Discriminator', reuse=reuse):
x = tflearn.fully_connected(x, 256, activation='relu')
x = tflearn.fully_connected(x, 1, activation='sigmoid')
return x
# Build Networks
gen_input = tflearn.input_data(shape=[None, z_dim], name='input_noise')
disc_input = tflearn.input_data(shape=[None, 784], name='disc_input')
gen_sample = generator(gen_input)
disc_real = discriminator(disc_input)
disc_fake = discriminator(gen_sample, reuse=True)
# Define Loss
disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))
gen_loss = -tf.reduce_mean(tf.log(disc_fake))
# Build Training Ops for both Generator and Discriminator.
# Each network optimization should only update its own variable, thus we need
# to retrieve each network variables (with get_layer_variables_by_scope) and set
# 'placeholder=None' because we do not need to feed any target.
gen_vars = tflearn.get_layer_variables_by_scope('Generator')
gen_model = tflearn.regression(gen_sample, placeholder=None, optimizer='adam',
loss=gen_loss, trainable_vars=gen_vars,
batch_size=64, name='target_gen', op_name='GEN')
disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')
disc_model = tflearn.regression(disc_real, placeholder=None, optimizer='adam',
loss=disc_loss, trainable_vars=disc_vars,
batch_size=64, name='target_disc', op_name='DISC')
# Define GAN model, that output the generated images.
gan = tflearn.DNN(gen_model)
# Training
# Generate noise to feed to the generator
z = np.random.uniform(-1., 1., size=[total_samples, z_dim])
# Start training, feed both noise and real images.
gan.fit(X_inputs={gen_input: z, disc_input: X},
Y_targets=None,
n_epoch=100)
# Generate images from noise, using the generator network.
f, a = plt.subplots(2, 10, figsize=(10, 4))
for i in range(10):
for j in range(2):
# Noise input.
z = np.random.uniform(-1., 1., size=[1, z_dim])
# Generate image from noise. Extend to 3 channels for matplot figure.
temp = [[ii, ii, ii] for ii in list(gan.predict([z])[0])]
a[j][i].imshow(np.reshape(temp, (28, 28, 3)))
f.show()
plt.draw()
plt.waitforbuttonpress()
|
apache-2.0
| 2,824,382,926,397,684,000
| 36.388889
| 82
| 0.678062
| false
| 3.250242
| false
| false
| false
|
TransportLayer/mc-id2name
|
id2name.py
|
1
|
45678
|
###############################################################################
# Minecraft ID to Friendly Name #
# Copyright (C) 2016 TransportLayer #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
items = {
'minecraft': {
'__VERSION__': 1.10,
'__LANGUAGE__': 'en_US',
'stone': {
'id': 1,
'category': 'Building Blocks',
0: 'Stone',
1: 'Granite',
2: 'Polished Granite',
3: 'Diorite',
4: 'Polished Diorite',
5: 'Andesite',
6: 'Polished Andesite'
},
'grass': {
'id': 2,
'category': 'Building Blocks',
0: 'Grass Block'
},
'dirt': {
'id': 3,
'category': 'Building Blocks',
0: 'Dirt',
1: 'Coarse Dirt',
2: 'Podzol'
},
'cobblestone': {
'id': 4,
'category': 'Building Blocks',
0: 'Cobblestone'
},
'planks': {
'id': 5,
'category': 'Building Blocks',
0: 'Oak Wood Planks',
1: 'Spruce Wood Planks',
2: 'Birch Wood Planks',
3: 'Jungle Wood Planks',
4: 'Acacia Wood Planks',
5: 'Dark Oak Wood Planks'
},
'sapling': {
'id': 6,
'category': 'Decoration Blocks',
0: 'Oak Sapling',
1: 'Spruce Sapling',
2: 'Birch Sapling',
3: 'Jungle Sapling',
4: 'Acacia Sapling',
5: 'Dark Oak Sapling'
},
'bedrock': {
'id': 7,
'category': 'Building Blocks',
0: 'Bedrock'
},
# No item 8?
# No item 9?
# No item 10?
# No item 11?
'sand': {
'id': 12,
'category': 'Building Blocks',
0: 'Sand',
1: 'Red Sand'
},
'gravel': {
'id': 13,
'category': 'Building Blocks',
0: 'Gravel'
},
'gold_ore': {
'id': 14,
'category': 'Building Blocks',
0: 'Gold Ore'
},
'iron_ore': {
'id': 15,
'category': 'Building Blocks',
0: 'Iron Ore'
},
'coal_ore': {
'id': 16,
'category': 'Building Blocks',
0: 'Coal Ore'
},
'log': {
'id': 17,
'category': 'Building Blocks',
0: 'Oak Wood',
1: 'Spruce Wood',
2: 'Birch Wood',
3: 'Jungle Wood'
},
'leaves': {
'id': 18,
'category': 'Decoration Blocks',
0: 'Oak Leaves',
1: 'Spruce Leaves',
2: 'Birch Leaves',
3: 'Jungle Leaves'
},
'sponge': {
'id': 19,
'category': 'Building Blocks',
0: 'Sponge',
1: 'Wet Sponge'
},
'glass': {
'id': 20,
'category': 'Building Blocks',
0: 'Glass'
},
'lapis_ore': {
'id': 21,
'category': 'Building Blocks',
0: 'Lapis Lazuli Ore'
},
'lapis_block': {
'id': 22,
'category': 'Building Blocks',
0: 'Lapis Lazuli Block'
},
'dispenser': {
'id': 23,
'category': 'Redstone',
0: 'Dispenser'
},
'sandstone': {
'id': 24,
'category': 'Building Blocks',
0: 'Sandstone',
1: 'Chiseled Sandstone',
2: 'Smooth Sandstone'
},
'noteblock': {
'id': 25,
'category': 'Redstone',
0: 'Note Block'
},
# No item 26?
'golden_rail': {
'id': 27,
'category': 'Transportation',
0: 'Powered Rail'
},
'detector_rail': {
'id': 28,
'category': 'Transportation',
0: 'Detector Rail'
},
'sticky_piston': {
'id': 29,
'category': 'Redstone',
0: 'Sticky Piston'
},
'web': {
'id': 30,
'category': 'Decoration Blocks',
0: 'Cobweb'
},
'tallgrass': {
'id': 31,
'category': 'Decoration Blocks',
# Missing DV 0?
1: 'Grass',
2: 'Fern'
},
'deadbush': {
'id': 32,
'category': 'Decoration Blocks',
0: 'Dead Bush'
},
'piston': {
'id': 33,
'category': 'Redstone',
0: 'Piston'
},
# No item 34?
'wool': {
'id': 35,
'category': 'Building Blocks',
0: 'Wool',
1: 'Orange Wool',
2: 'Magenta Wool',
3: 'Light Blue Wool',
4: 'Yellow Wool',
5: 'Lime Wool',
6: 'Pink Wool',
7: 'Gray Wool',
8: 'Light Gray Wool',
9: 'Cyan Wool',
10: 'Purple Wool',
11: 'Blue Wool',
12: 'Brown Wool',
13: 'Green Wool',
14: 'Red Wool',
15: 'Black Wool'
},
# No item 36?
'yellow_flower': {
'id': 37,
'category': 'Decoration Blocks',
0: 'Dandelion'
# Marked for more DVs.
},
'red_flower': {
'id': 38,
'category': 'Decoration Blocks',
0: 'Poppy',
1: 'Blue Orchid', # Not red.
2: 'Allium', # Also not red.
3: 'Azure Bluet', # Still not red.
4: 'Red Tulip', # Wow, good job, this one's red.
5: 'Orange Tulip', # Closer to red...?
6: 'White Tulip', # Farther from red.
7: 'Pink Tulip', # Ah, there we go, back on track.
8: 'Oxeye Daisy' # I give up at this point.
},
'brown_mushroom': {
'id': 39,
'category': 'Decoration Blocks',
0: 'Mushroom'
},
'red_mushroom': {
'id': 40,
'category': 'Decoration Blocks',
0: 'Mushroom'
},
'gold_block': {
'id': 41,
'category': 'Building Blocks',
0: 'Block of Gold'
},
'iron_block': {
'id': 42,
'category': 'Building Blocks',
0: 'Block of Iron'
},
# No item 43?
'stone_slab': {
'id': 44,
'category': 'Building Blocks',
0: 'Stone Slab',
1: 'Sandstone Slab',
# No DV 2?
3: 'Cobblestone Slab',
4: 'Bricks Slab',
5: 'Stone Bricks Slab',
6: 'Nether Brick Slab',
7: 'Quartz Slab'
},
'brick_block': {
'id': 45,
'category': 'Building Blocks',
0: 'Bricks'
},
'tnt': {
'id': 46,
'category': 'Redstone',
0: 'TNT'
},
'bookshelf': {
'id': 47,
'category': 'Building Blocks',
0: 'Bookshelf'
},
'mossy_cobblestone': {
'id': 48,
'category': 'Building Blocks',
0: 'Moss Stone'
},
'obsidian': {
'id': 49,
'category': 'Building Blocks',
0: 'Obsidian'
},
'torch': {
'id': 50,
'category': 'Decoration Blocks',
0: 'Torch'
},
# No item 51?
# No item 52?
'oak_stairs': {
'id': 53,
'category': 'Building Blocks',
0: 'Oak Wood Stairs'
},
'chest': {
'id': 54,
'category': 'Decoration Blocks',
0: 'Chest'
},
# No item 55?
'diamond_ore': {
'id': 56,
'category': 'Building Blocks',
0: 'Diamond Ore'
},
'diamond_block': {
'id': 57,
'category': 'Building Blocks',
0: 'Block of Diamond'
},
'crafting_table': {
'id': 58,
'category': 'Decoration Blocks',
0: 'Crafting Table'
},
# No item 59?
# No item 60?
'furnace': {
'id': 61,
'category': 'Decoration Blocks',
0: 'Furnace'
},
# No item 62?
# No item 63?
# No item 64?
'ladder': {
'id': 65,
'category': 'Decoration Blocks',
0: 'Ladder'
},
'rail': {
'id': 66,
'category': 'Transportation',
0: 'Rail'
},
'stone_stairs': {
'id': 67,
'category': 'Building Blocks',
0: 'Cobblestone Stairs'
},
# No item 68?
'lever': {
'id': 69,
'category': 'Redstone',
0: 'Lever'
},
'stone_pressure_plate': {
'id': 70,
'category': 'Redstone',
0: 'Stone Pressure Plate'
},
# No item 71?
'wooden_pressure_plate': {
'id': 72,
'category': 'Redstone',
0: 'Wooden Pressure Plate'
},
'redstone_ore': {
'id': 73,
'category': 'Building Blocks',
0: 'Redstone Ore'
},
# No item 74?
# No item 75?
'redstone_torch': {
'id': 76,
'category': 'Redstone',
0: 'Redstone Torch'
},
'stone_button': {
'id': 77,
'category': 'Redstone',
0: 'Button'
},
'snow_layer': {
'id': 78,
'category': 'Decoration Blocks',
0: 'Snow'
# Marked for more DVs.
},
'ice': {
'id': 79,
'category': 'Building Blocks',
0: 'Ice'
},
'snow': {
'id': 80,
'category': 'Building Blocks',
0: 'Snow'
},
'cactus': {
'id': 81,
'category': 'Decoration Blocks',
0: 'Cactus'
},
'clay': {
'id': 82,
'category': 'Building Blocks',
0: 'Clay'
},
# No item 83?
'jukebox': {
'id': 84,
'category': 'Decoration Blocks',
0: 'Jukebox'
},
'fence': {
'id': 85,
'category': 'Decoration Blocks',
0: 'Oak Fence'
},
'pumpkin': {
'id': 86,
'category': 'Building Blocks',
0: 'Pumpkin'
},
'netherrack': {
'id': 87,
'category': 'Building Blocks',
0: 'Netherrack'
},
'soul_sand': {
'id': 88,
'category': 'Building Blocks',
0: 'Soul Sand'
},
'glowstone': {
'id': 89,
'category': 'Building Blocks',
0: 'Glowstone'
},
# No item 90?
'lit_pumpkin': {
'id': 91,
'category': 'Building Blocks',
0: 'Jack o\'Lantern'
},
# No item 92?
# No item 93?
# No item 94?
'stained_glass': {
'id': 95,
'category': 'Building Blocks',
0: 'White Stained Glass',
1: 'Orange Stained Glass',
2: 'Magenta Stained Glass',
3: 'Light Blue Stained Glass',
4: 'Yellow Stained Glass',
5: 'Lime Stained Glass',
6: 'Pink Stained Glass',
7: 'Gray Stained Glass',
8: 'Light Gray Stained Glass',
9: 'Cyan Stained Glass',
10: 'Purple Stained Glass',
11: 'Blue Stained Glass',
12: 'Brown Stained Glass',
13: 'Green Stained Glass',
14: 'Red Stained Glass',
15: 'Black Stained Glass'
},
'trapdoor': {
'id': 96,
'category': 'Redstone',
0: 'Wooden Trapdoor'
},
'monster_egg': {
'id': 97,
'category': 'Decoration Blocks',
0: 'Stone Monster Egg',
1: 'Cobblestone Monster Egg',
2: 'Stone Brick Monster Egg',
3: 'Mossy Stone Brick Monster Egg',
4: 'Cracked Stone Brick Monster Egg',
5: 'Chiseled Stone Brick Monster Egg'
},
'stonebrick': {
'id': 98,
'category': 'Building Blocks',
0: 'Stone Bricks',
1: 'Mossy Stone Bricks',
2: 'Cracked Stone Bricks',
3: 'Chiseled Stone Bricks'
},
# No item 99?
# No item 100?
'iron_bars': {
'id': 101,
'category': 'Decoration Blocks',
0: 'Iron Bars'
},
'glass_pane': {
'id': 102,
'category': 'Decoration Blocks',
0: 'Glass Pane'
},
'melon_block': {
'id': 103,
'category': 'Building Blocks',
0: 'Melon'
},
# No item 104?
# No item 105?
'vine': {
'id': 106,
'category': 'Decoration Blocks',
0: 'Vines'
},
'fence_gate': {
'id': 107,
'category': 'Redstone',
0: 'Oak Fence Gate'
},
'brick_stairs': {
'id': 108,
'category': 'Building Blocks',
0: 'Brick Stairs'
},
'stone_brick_stairs': {
'id': 109,
'category': 'Building Blocks',
0: 'Stone Brick Stairs'
},
'mycelium': {
'id': 110,
'category': 'Building Blocks',
0: 'Mycelium'
},
'waterlily': {
'id': 111,
'category': 'Decoration Blocks',
0: 'Lily Pad'
},
'nether_brick': {
'id': 112,
'category': 'Building Blocks',
0: 'Nether Brick'
},
'nether_brick_fence': {
'id': 113,
'category': 'Decoration Blocks',
0: 'Nether Brick Fence'
},
'nether_brick_stairs': {
'id': 114,
'category': 'Building Blocks',
0: 'Nether Brick Stairs'
},
# No item 115?
'enchanting_table': {
'id': 116,
'category': 'Decoration Blocks',
0: 'Enchantment Table'
},
# No item 117?
# No item 118?
# No item 119?
'end_portal_frame': {
'id': 120,
'category': 'Decoration Blocks',
0: 'End Portal'
},
'end_stone': {
'id': 121,
'category': 'Building Blocks',
0: 'End Stone'
},
'redstone_lamp': {
'id': 123,
'category': 'Redstone',
0: 'Redstone Lamp'
},
# No item 124?
# No item 125?
'wooden_slab': {
'id': 126,
'category': 'Building Blocks',
0: 'Oak Wood Slab',
1: 'Spruce Wood Slab',
2: 'Birch Wood Slab',
3: 'Jungle Wood Slab',
4: 'Acacia Wood Slab',
5: 'Dark Oak Wood Slab'
},
# No item 127?
'sandstone_stairs': {
'id': 128,
'category': 'Building Blocks',
0: 'Sandstone Stairs'
},
'emerald_ore': {
'id': 129,
'category': 'Building Blocks',
0: 'Emerald Ore'
},
'ender_chest': {
'id': 130,
'category': 'Decoration Blocks',
0: 'Ender Chest'
},
'tripwire_hook': {
'id': 131,
'category': 'Redstone',
0: 'Tripwire Hook'
},
# No item 132?
'emerald_block': {
'id': 133,
'category': 'Building Blocks',
0: 'Block of Emerald'
},
'spruce_stairs': {
'id': 134,
'category': 'Building Blocks',
0: 'Spruce Wood Stairs'
},
'birch_stairs': {
'id': 135,
'category': 'Building Blocks',
0: 'Birch Wood Stairs'
},
'jungle_stairs': {
'id': 136,
'category': 'Building Blocks',
0: 'Jungle Wood Stairs'
},
# No item 137?
'beacon': {
'id': 138,
'category': 'Miscellaneous',
0: 'Beacon'
},
'cobblestone_wall': {
'id': 139,
'category': 'Building Blocks',
0: 'Cobblestone Wall',
1: 'Mossy Cobblestone Wall'
},
# No item 140?
# No item 141?
# No item 142?
'wooden_button': {
'id': 143,
'category': 'Redstone',
0: 'Button'
},
# No item 144?
'anvil': {
'id': 145,
'category': 'Decoration Blocks',
0: 'Anvil',
1: 'Slightly Damaged Anvil',
2: 'Very Damaged Anvil'
},
'trapped_chest': {
'id': 146,
'category': 'Redstone',
0: 'Trapped Chest'
},
'light_weighted_pressure_plate': {
'id': 147,
'category': 'Redstone',
0: 'Weighted Pressure Plate (Light)'
},
'heavy_weighted_pressure_plate': {
'id': 148,
'category': 'Redstone',
0: 'Weighted Pressure Plate (Heavy)'
},
# No item 149?
# No item 150?
'daylight_detector': {
'id': 151,
'category': 'Redstone',
0: 'Daylight Sensor'
},
'redstone_block': {
'id': 152,
'category': 'Redstone',
0: 'Block of Redstone'
},
'quartz_ore': {
'id': 153,
'category': 'Building Blocks',
0: 'Nether Quartz Ore'
},
'hopper': {
'id': 154,
'category': 'Redstone',
0: 'Hopper'
},
'quartz_block': {
'id': 155,
'category': 'Building Blocks',
0: 'Block of Quartz',
1: 'Chiseled Quartz Block',
2: 'Pillar Quartz Block'
},
'quartz_stairs': {
'id': 156,
'category': 'Building Blocks',
0: 'Quartz Stairs'
},
'activator_rail': {
'id': 157,
'category': 'Transportation',
0: 'Activator Rail'
},
'dropper': {
'id': 158,
'category': 'Redstone',
0: 'Dropper'
},
'stained_hardened_clay': {
'id': 159,
'category': 'Building Blocks',
0: 'White Hardened Clay',
1: 'Orange Hardened Clay',
2: 'Magenta Hardened Clay',
3: 'Light Blue Hardened Clay',
4: 'Yellow Hardened Clay',
5: 'Lime Hardened Clay',
6: 'Pink Hardened Clay',
7: 'Gray Hardened Clay',
8: 'Light Gray Hardened Clay',
9: 'Cyan Hardened Clay',
10: 'Purple Hardened Clay',
11: 'Blue Hardened Clay',
12: 'Brown Hardened Clay',
13: 'Green Hardened Clay',
14: 'Red Hardened Clay',
15: 'Black Hardened Clay'
},
'stained_glass_pane': {
'id': 160,
'category': 'Decoration Blocks',
0: 'White Stained Glass Pane',
1: 'Orange Stained Glass Pane',
2: 'Magenta Stained Glass Pane',
3: 'Light Blue Stained Glass Pane',
4: 'Yellow Stained Glass Pane',
5: 'Lime Stained Glass Pane',
6: 'Pink Stained Glass Pane',
7: 'Gray Stained Glass Pane',
8: 'Light Gray Stained Glass Pane',
9: 'Cyan Stained Glass Pane',
10: 'Purple Stained Glass Pane',
11: 'Blue Stained Glass Pane',
12: 'Brown Stained Glass Pane',
13: 'Green Stained Glass Pane',
14: 'Red Stained Glass Pane',
15: 'Black Stained Glass Pane'
},
'leaves2': {
'id': 161,
'category': 'Decoration Blocks',
0: 'Acacia Leaves',
1: 'Dark Oak Leaves'
},
'log2': {
'id': 162,
'category': 'Building Blocks',
0: 'Acacia Wood',
1: 'Dark Oak Wood'
},
'acacia_stairs': {
'id': 163,
'category': 'Building Blocks',
0: 'Acacia Wood Stairs'
},
'dark_oak_stairs': {
'id': 164,
'category': 'Building Blocks',
0: 'Dark Oak Wood Stairs'
},
'slime': {
'id': 165,
'category': 'Decoration Blocks',
0: 'Slime Block'
},
'iron_trapdoor': {
'id': 167,
'category': 'Redstone',
0: 'Iron Trapdoor'
},
'prismarine': {
'id': 168,
'category': 'Building Blocks',
0: 'Prismarine',
1: 'Prismarine Bricks',
2: 'Dark Prismarine'
},
'sea_lantern': {
'id': 169,
'category': 'Building Blocks',
0: 'Sea Lantern'
},
'hay_block': {
'id': 170,
'category': 'Building Blocks',
0: 'Hay Bale'
},
'carpet': {
'id': 171,
'category': 'Decoration Blocks',
0: 'Carpet',
1: 'Orange Carpet',
2: 'Magenta Carpet',
3: 'Light Blue Carpet',
4: 'Yellow Carpet',
5: 'Lime Carpet',
6: 'Pink Carpet',
7: 'Gray Carpet',
8: 'Light Gray Carpet',
9: 'Cyan Carpet',
10: 'Purple Carpet',
11: 'Blue Carpet',
12: 'Brown Carpet',
13: 'Green Carpet',
14: 'Red Carpet',
15: 'Black Carpet'
},
'hardened_clay': {
'id': 172,
'category': 'Building Blocks',
0: 'Hardened Clay'
},
'coal_block': {
'id': 173,
'category': 'Building Blocks',
0: 'Block of Coal'
},
'packed_ice': {
'id': 174,
'category': 'Building Blocks',
0: 'Packed Ice'
},
'double_plant': {
'id': 175,
'category': 'Decoration Blocks',
0: 'Sunflower',
1: 'Lilac',
2: 'Double Tallgrass',
3: 'Large Fern',
4: 'Rose Bush',
5: 'Peony'
},
# No item 176?
# No item 177?
# No item 178?
'red_sandstone': {
'id': 179,
'category': 'Building Blocks',
0: 'Red Sandstone',
1: 'Chiseled Red Sandstone',
2: 'Smooth Red Sandstone'
},
'red_sandstone_stairs': {
'id': 180,
'category': 'Building Blocks',
0: 'Red Sandstone Stairs'
},
# No item 181?
'stone_slab2': {
'id': 182,
'category': 'Building Blocks',
0: 'Red Sandstone Slab'
# Marked for more DVs.
},
'spruce_fence_gate': {
'id': 183,
'category': 'Redstone',
0: 'Spruce Fence Gate'
},
'birch_fence_gate': {
'id': 184,
'category': 'Redstone',
0: 'Birch Fence Gate'
},
'jungle_fence_gate': {
'id': 185,
'category': 'Redstone',
0: 'Jungle Fence Gate'
},
'dark_oak_fence_gate': {
'id': 186,
'category': 'Redstone',
0: 'Dark Oak Fence Gate'
},
'acacia_fence_gate': {
'id': 187,
'category': 'Redstone',
0: 'Acacia Fence Gate'
},
'spruce_fence': {
'id': 188,
'category': 'Decoration Blocks',
0: 'Spruce Fence'
},
'birch_fence': {
'id': 189,
'category': 'Decoration Blocks',
0: 'Birch Fence'
},
'jungle_fence': {
'id': 190,
'category': 'Decoration Blocks',
0: 'Jungle Fence'
},
'dark_oak_fence': {
'id': 191,
'category': 'Decoration Blocks',
0: 'Dark Oak Fence'
},
'acacia_fence': {
'id': 192,
'category': 'Decoration Blocks',
0: 'Acacia Fence'
},
# No item 193?
# No item 194?
# No item 195?
# No item 196?
# No item 197?
'end_rod': {
'id': 198,
'category': 'Decoration Blocks',
0: 'End Rod'
},
'chorus_plant': {
'id': 199,
'category': 'Decoration Blocks',
0: 'Chorus Plant'
},
'chorus_flower': {
'id': 200,
'category': 'Decoration Blocks',
0: 'Chorus Flower'
},
'purpur_block': {
'id': 201,
'category': 'Building Blocks',
0: 'Purpur Block'
},
'purpur_pillar': {
'id': 202,
'category': 'Building Blocks',
0: 'Purpur Pillar'
},
'purpur_stairs': {
'id': 203,
'category': 'Building Blocks',
0: 'Purpur Stairs'
},
# No item 204?
'purpur_slab': {
'id': 205,
'category': 'Building Blocks',
0: 'Purpur Slab'
# Marked for more DVs.
},
'end_bricks': {
'id': 206,
'category': 'Building Blocks',
0: 'End Stone Bricks'
},
# No item 207?
# No item 208?
# No item 209?
# No item 210?
# No item 211?
# No item 212?
'magma': {
'id': 213,
'category': 'Building Blocks',
0: 'Magma Block'
},
'nether_wart_block': {
'id': 214,
'category': 'Building Blocks',
0: 'Nether Wart Block'
},
'red_nether_brick': {
'id': 215,
'category': 'Building Blocks',
0: 'Red Nether Brick'
},
'bone_block': {
'id': 216,
'category': 'Building Blocks',
0: 'Bone Block'
},
# No item...
# ...
# Start of 256 block.
'iron_shovel': {
'id': 256,
'category': 'Tools',
'name': 'Iron Shovel',
'uses': 251
},
'iron_pickaxe': {
'id': 257,
'category': 'Tools',
'name': 'Iron Pickaxe',
'uses': 251
},
'iron_axe': {
'id': 258,
'category': 'Tools',
'name': 'Iron Axe',
'uses': 251
},
'flint_and_steel': {
'id': 259,
'category': 'Tools',
'name': 'Flint and Steel',
'uses': 65
},
'apple': {
'id': 260,
'category': 'Foodstuffs',
0: 'Apple'
},
'bow': {
'id': 261,
'category': 'Combat',
'name': 'Bow',
'uses': 385
},
'arrow': {
'id': 262,
'category': 'Combat',
0: 'Arrow'
},
'coal': {
'id': 263,
'category': 'Materials',
0: 'Coal',
1: 'Charcoal'
},
'diamond': {
'id': 264,
'category': 'Materials',
0: 'Diamond'
},
'iron_ingot': {
'id': 265,
'category': 'Materials',
0: 'Iron Ingot'
},
'gold_ingot': {
'id': 266,
'category': 'Materials',
0: 'Gold Ingot'
},
'iron_sword': {
'id': 267,
'category': 'Combat',
'name': 'Iron Sword',
'uses': 251
},
'wooden_sword': {
'id': 268,
'category': 'Combat',
'name': 'Wooden Sword',
'uses': 60
},
'wooden_shovel': {
'id': 269,
'category': 'Tools',
'name': 'Wooden Shovel',
'uses': 60
},
'wooden_pickaxe': {
'id': 270,
'category': 'Tools',
'name': 'Wooden Pickaxe',
'uses': 60
},
'wooden_axe': {
'id': 271,
'category': 'Tools',
'name': 'Wooden Axe',
'uses': 60
},
'stone_sword': {
'id': 272,
'category': 'Combat',
'name': 'Stone Sword',
'uses': 132
},
'stone_shovel': {
'id': 273,
'category': 'Tools',
'name': 'Stone Shovel',
'uses': 132
},
'stone_pickaxe': {
'id': 274,
'category': 'Tools',
'name': 'Stone Pickaxe',
'uses': 132
},
'stone_axe': {
'id': 275,
'category': 'Tools',
'name': 'Stone Axe',
'uses': 132
},
'diamond_sword': {
'id': 276,
'category': 'Combat',
'name': 'Diamond Sword',
'uses': 1562
},
'diamond_shovel': {
'id': 277,
'category': 'Tools',
'name': 'Diamond Shovel',
'uses': 1562
},
'diamond_pickaxe': {
'id': 278,
'category': 'Tools',
'name': 'Diamond Pickaxe',
'uses': 1562
},
'diamond_axe': {
'id': 279,
'category': 'Tools',
'name': 'Diamond Axe',
'uses': 1562
},
'stick': {
'id': 280,
'category': 'Materials',
0: 'Stick'
},
'bowl': {
'id': 281,
'category': 'Materials',
0: 'Bowl'
},
'mushroom_stew': {
'id': 282,
'category': 'Foodstuffs',
0: 'Mushroom Stew'
},
'golden_sword': {
'id': 283,
'category': 'Combat',
'name': 'Golden Sword',
'uses': 33
},
'golden_shovel': {
'id': 284,
'category': 'Tools',
'name': 'Golden Shovel',
'uses': 33
},
'golden_pickaxe': {
'id': 285,
'category': 'Tools',
'name': 'Golden Pickaxe',
'uses': 33
},
'golden_axe': {
'id': 286,
'category': 'Tools',
'name': 'Golden Axe',
'uses': 33
},
'string': {
'id': 287,
'category': 'Materials',
0: 'String'
},
'feather': {
'id': 288,
'category': 'Materials',
0: 'Feather'
},
'gunpowder': {
'id': 289,
'category': 'Materials',
0: 'Gunpowder'
},
'wooden_hoe': {
'id': 290,
'category': 'Tools',
'name': 'Wooden Hoe',
'uses': 60
},
'stone_hoe': {
'id': 291,
'category': 'Tools',
'name': 'Stone Hoe',
'uses': 132
},
'iron_hoe': {
'id': 292,
'category': 'Tools',
'name': 'Iron Hoe',
'uses': 251
},
'diamond_hoe': {
'id': 293,
'category': 'Tools',
'name': 'Diamond Hoe',
'uses': 1562
},
'golden_hoe': {
'id': 294,
'category': 'Tools',
'names': 'Golden Hoe',
'uses': 33
},
'wheat_seeds': {
'id': 295,
'category': 'Materials',
0: 'Seeds'
},
'wheat': {
'id': 296,
'category': 'Materials',
0: 'Wheat'
},
'bread': {
'id': 297,
'category': 'Foodstuffs',
0: 'Bread'
},
'leather_helmet': {
'id': 298,
'category': 'Combat',
'name': 'Leather Cap',
'uses': 56,
'armor': 1,
'toughness': 0
},
'leather_chestplate': {
'id': 299,
'category': 'Combat',
'name': 'Leather Tunic',
'uses': 81,
'armor': 3,
'toughness': 0
},
'leather_leggings': {
'id': 300,
'category': 'Combat',
'name': 'Leather Pants',
'uses': 76,
'armor': 2,
'toughness': 0
},
'leather_boots': {
'id': 301,
'category': 'Combat',
'name': 'Leather Boots',
'uses': 66,
'armor': 1,
'toughness': 0
},
'chainmail_helmet': {
'id': 302,
'category': 'Combat',
'name': 'Chain Helmet',
'uses': 166,
'armor': 2,
'toughness': 0
},
'chainmail_chestplate': {
'id': 303,
'category': 'Combat',
'name': 'Chain Chestplate',
'uses': 241,
'armor': 5,
'toughness': 0
},
'chainmail_leggings': {
'id': 304,
'category': 'Combat',
'name': 'Chain Leggings',
'uses': 226,
'armor': 4,
'toughness': 0
},
'chainmail_boots': {
'id': 305,
'category': 'Combat',
'name': 'Chain Boots',
'uses': 196,
'armor': 1,
'toughness': 0
},
'iron_helmet': {
'id': 306,
'category': 'Combat',
'name': 'Iron Helmet',
'uses': 166,
'armor': 2,
'toughness': 0
},
'iron_chestplate': {
'id': 307,
'category': 'Combat',
'name': 'Iron Chestplate',
'uses': 241,
'armor': 6,
'toughness': 0
},
'iron_leggings': {
'id': 308,
'category': 'Combat',
'name': 'Iron Leggings',
'uses': 226,
'armor': 5,
'toughness': 0
},
'iron_boots': {
'id': 309,
'category': 'Combat',
'name': 'Iron Boots',
'uses': 196,
'armor': 2,
'toughness': 0
},
'diamond_helmet': {
'id': 310,
'category': 'Combat',
'name': 'Diamond Helmet',
'uses': 364,
'armor': 3,
'toughness': 2
},
'diamond_chestplate': {
'id': 311,
'category': 'Combat',
'name': 'Diamond Chestplate',
'uses': 529,
'armor': 8,
'toughness': 2
},
'diamond_leggings': {
'id': 312,
'category': 'Combat',
'name': 'Diamond Leggings',
'uses': 496,
'armor': 6,
'toughness': 2
},
'diamond_boots': {
'id': 313,
'category': 'Combat',
'name': 'Diamond Boots',
'uses': 430,
'armor': 3,
'toughness': 2
},
'golden_helmet': {
'id': 314,
'category': 'Combat',
'name': 'Golden Helmet',
'uses': 78,
'armor': 2,
'toughness': 0
},
'golden_chestplate': {
'id': 315,
'category': 'Combat',
'name': 'Golden Chestplate',
'uses': 113,
'armor': 5,
'toughness': 0
},
'golden_leggings': {
'id': 316,
'category': 'Combat',
'name': 'Golden Leggings',
'uses': 106,
'armor': 3,
'toughness': 0
},
'golden_boots': {
'id': 317,
'category': 'Combat',
'name': 'Golden Boots',
'uses': 92,
'armor': 1,
'toughness': 0
},
'flint': {
'id': 318,
'category': 'Materials',
0: 'Flint'
},
'porkchop': {
'id': 319,
'category': 'Foodstuffs',
0: 'Raw Porkchop'
},
'cooked_porkchop': {
'id': 320,
'category': 'Foodstuffs',
0: 'Cooked Porkchop'
},
'painting': {
'id': 321,
'category': 'Decoration Blocks',
0: 'Painting'
},
'golden_apple': {
'id': 322,
'category': 'Foodstuffs',
0: 'Golden Apple', # Regular.
1: 'Golden Apple' # Notch Apple.
},
'sign': {
'id': 323,
'category': 'Decoration Blocks',
0: 'Sign'
},
'wooden_door': {
'id': 324,
'category': 'Redstone',
0: 'Oak Door'
},
'bucket': {
'id': 325,
'category': 'Miscellaneous',
0: 'Bucket'
},
'water_bucket': {
'id': 326,
'category': 'Miscellaneous',
0: 'Water Bucket'
},
'lava_bucket': {
'id': 327,
'category': 'Miscellaneous',
0: 'Lava Bucket'
},
'minecart': {
'id': 328,
'category': 'Transportation',
0: 'Minecart'
},
'saddle': {
'id': 329,
'category': 'Transportation',
0: 'Saddle'
},
'iron_door': {
'id': 330,
'category': 'Redstone',
0: 'Iron Door'
},
'redstone': {
'id': 331,
'category': 'Redstone',
0: 'Redstone'
},
'snowball': {
'id': 332,
'category': 'Miscellaneous',
0: 'Snowball'
},
'boat': {
'id': 333,
'category': 'Transportation',
0: 'Oak Boat'
},
'leather': {
'id': 334,
'category': 'Materials',
0: 'Leather'
},
'milk_bucket': {
'id': 335,
'category': 'Miscellaneous',
0: 'Milk'
},
'brick': {
'id': 336,
'category': 'Materials',
0: 'Brick'
},
'clay_ball': {
'id': 337,
'category': 'Materials',
0: 'Clay'
},
'reeds': {
'id': 338,
'category': 'Materials',
0: 'Sugar Canes'
},
'paper': {
'id': 339,
'category': 'Miscellaneous',
0: 'Paper'
},
'book': {
'id': 340,
'category': 'Miscellaneous',
0: 'Book'
},
'slime_ball': {
'id': 341,
'category': 'Miscellaneous',
0: 'Slimeball'
},
'chest_minecart': {
'id': 342,
'category': 'Transportation',
0: 'Minecart with Chest'
},
'furnace_minecart': {
'id': 343,
'category': 'Transportation',
0: 'Minecart with Furnace'
},
'egg': {
'id': 334,
'category': 'Materials',
0: 'Egg'
},
'compass': {
'id': 345,
'category': 'Tools',
0: 'Compass'
},
'fishing_rod': {
'id': 346,
'category': 'Tools',
'name': 'Fishing Rod',
'uses': 65
},
'clock': {
'id': 347,
'category': 'Tools',
0: 'Clock'
},
'glowstone_dust': {
'id': 348,
'category': 'Materials',
0: 'Glowstone Dust'
},
'fish': {
'id': 349,
'category': 'Foodstuffs',
0: 'Raw Fish',
1: 'Raw Salmon',
2: 'Clownfish',
3: 'Pufferfish'
},
'cooked_fish': {
'id': 350,
'category': 'Foodstuffs',
0: 'Cooked Fish',
1: 'Cooked Salmon'
},
'dye': {
'id': 351,
'category': 'Materials',
0: 'Ink Sac',
1: 'Rose Red',
2: 'Cactus Green',
3: 'Cocoa Beans',
4: 'Lapis Lazuli',
5: 'Purple Dye',
6: 'Cyan Dye',
7: 'Light Gray Dye',
8: 'Gray Dye',
9: 'Pink Dye',
10: 'Lime Dye',
11: 'Dandelion Yellow',
12: 'Light Blue Dye',
13: 'Magenta Dye',
14: 'Orange Dye',
15: 'Bone Meal'
},
'bone': {
'id': 352,
'category': 'Miscellaneous',
0: 'Bone'
},
'sugar': {
'id': 353,
'category': 'Materials',
0: 'Sugar'
},
'cake': {
'id': 354,
'category': 'Foodstuffs',
0: 'Cake'
},
'bed': {
'id': 355,
'category': 'Decoration Blocks',
0: 'Bed'
},
'repeater': {
'id': 356,
'category': 'Redstone',
0: 'Redstone Repeater'
},
'cookie': {
'id': 357,
'category': 'Foodstuffs',
0: 'Cookie'
},
# No item 358?
'shears': {
'id': 359,
'category': 'Tools',
'name': 'Shears',
'uses': 238
},
'melon': {
'id': 360,
'category': 'Foodstuffs',
0: 'Melon'
},
'pumpkin_seeds': {
'id': 361,
'category': 'Materials',
0: 'Pumpkin Seeds'
},
'melon_seeds': {
'id': 362,
'category': 'Materials',
0: 'Melon Seeds'
},
'beef': {
'id': 363,
'category': 'Foodstuffs',
0: 'Raw Beef'
},
'cooked_beef': {
'id': 364,
'category': 'Foodstuffs',
0: 'Steak'
},
'chicken': {
'id': 365,
'category': 'Foodstuffs',
0: 'Raw Chicken'
},
'cooked_chicken': {
'id': 366,
'category': 'Foodstuffs',
0: 'Cooked Chicken'
},
'rotten_flesh': {
'id': 367,
'category': 'Foodstuffs',
0: 'Rotten Flesh'
},
'ender_pearl': {
'id': 368,
'category': 'Miscellaneous',
0: 'Ender Pearl'
},
'blaze_rod': {
'id': 369,
'category': 'Materials',
0: 'Blaze Rod'
},
'ghast_tear': {
'id': 370,
'category': 'Brewing',
0: 'Ghast Tear'
},
'gold_nugget': {
'id': 371,
'category': 'Materials',
0: 'Gold Nugget'
},
'nether_wart': {
'id': 372,
'category': 'Materials',
0: 'Nether Wart'
},
'potion': {
'id': 373,
'category': 'Brewing',
0: 'Potion' # Potions are stored as NBT data.
},
'glass_bottle': {
'id': 374,
'category': 'Brewing',
0: 'Glass Bottle'
},
'spider_eye': {
'id': 375,
'category': 'Foodstuffs',
0: 'Spider Eye'
},
'fermented_spider_eye': {
'id': 376,
'category': 'Brewing',
0: 'Fermented Spider Eye'
},
'blaze_powder': {
'id': 377,
'category': 'Brewing',
0: 'Blaze Powder'
},
'magma_cream': {
'id': 378,
'category': 'Brewing',
0: 'Magma Cream'
},
'brewing_stand': {
'id': 379,
'category': 'Brewing',
0: 'Brewing Stand'
},
'cauldron': {
'id': 380,
'category': 'Brewing',
0: 'Cauldron'
},
'ender_eye': {
'id': 381,
'category': 'Miscellaneous',
0: 'Eye of Ender'
},
'speckled_melon': {
'id': 382,
'category': 'Brewing',
0: 'Glistering Melon'
},
'spawn_egg': {
'id': 383,
'category': 'Miscellaneous',
0: 'Spawn Egg' # Entity data is stored as NBT data.
},
'experience_bottle': {
'id': 384,
'category': 'Miscellaneous',
0: 'Bottle o\' Enchanting'
},
'fire_charge': {
'id': 385,
'category': 'Miscellaneous',
0: 'Fire Charge'
},
'writable_book': {
'id': 386,
'category': 'Miscellaneous',
0: 'Book and Quill'
},
# No item 387?
'emerald': {
'id': 388,
'category': 'Materials',
0: 'Emerald'
},
'item_frame': {
'id': 389,
'category': 'Decoration Blocks',
0: 'Item Frame'
},
'flower_pot': {
'id': 390,
'category': 'Decoration Blocks',
0: 'Flower Pot'
},
'carrot': {
'id': 391,
'category': 'Foodstuff',
0: 'Carrot'
},
'potato': {
'id': 392,
'category': 'Foodstuff',
0: 'Potato'
},
'baked_potato': {
'id': 393,
'category': 'Foodstuffs',
0: 'Baked Potato'
},
'poisonous_potato': {
'id': 394,
'category': 'Foodstuffs',
0: 'Poisonous Potato'
},
'map': {
'id': 395,
'category': 'Miscellaneous',
0: 'Empty Map'
},
'golden_carrot': {
'id': 396,
'category': 'Brewing',
0: 'Golden Carrot'
},
'skull': {
'id': 397,
'category': 'Decoration Blocks',
0: 'Skeleton Skull',
1: 'Wither Skeleton Skull',
2: 'Zombie Head',
3: 'Head',
4: 'Creeper Head',
5: 'Dragon Head'
},
'carrot_on_a_stick': {
'id': 398,
'category': 'Transportation',
'name': 'Carrot on a Stick',
'uses': 26
},
'nether_star': {
'id': 399,
'category': 'Materials',
0: 'Nether Star'
},
'pumpkin_pie': {
'id': 400,
'category': 'Foodstuffs',
0: 'Pumpkin Pie'
},
# No item 401?
'firework_charge': {
'id': 402,
'category': 'Miscellaneous',
0: 'Firework Star'
},
'enchanted_book': {
'id': 403,
'category': 'Miscellaneous', # Category changes based on enchant.
0: 'Enchanted Book' # Enchant is stored as NBT data.
},
'comparator': {
'id': 404, # If you make a HTTP joke you will be slapped.
'category': 'Redstone',
0: 'Redstone Comparator'
},
'netherbrick': {
'id': 405,
'category': 'Materials',
0: 'Nether Brick'
},
'quartz': {
'id': 406,
'category': 'Materials',
0: 'Nether Quartz'
},
'tnt_minecart': {
'id': 407,
'category': 'Transportation',
0: 'Minecart with TNT'
},
'hopper_minecart': {
'id': 408,
'category': 'Transportation',
0: 'Minecart with Hopper'
},
'prismarine_shard': {
'id': 409,
'category': 'Materials',
0: 'Prismarine Shard'
},
'prismarine_crystals': {
'id': 410,
'category': 'Materials',
0: 'Prismarine Crystals'
},
'rabbit': {
'id': 411,
'category': 'Foodstuffs',
0: 'Raw Rabbit'
},
'cooked_rabbit': {
'id': 412,
'category': 'Foodstuffs',
0: 'Cooked Rabbit'
},
'rabbit_stew': {
'id': 413,
'category': 'Foodstuffs',
0: 'Rabbit Stew'
},
'rabbit_foot': {
'id': 414,
'category': 'Brewing',
0: 'Rabbit\'s Foot'
},
'rabbit_hide': {
'id': 415,
'category': 'Materials',
0: 'Rabbit Hide'
},
'armor_stand': {
'id': 416,
'category': 'Decoration Blocks',
0: 'Armor Stand'
},
'iron_horse_armor': {
'id': 417,
'category': 'Miscellaneous',
0: 'Iron Horse Armor'
},
'golden_horse_armor': {
'id': 418,
'category': 'Miscellaneous',
0: 'Gold Horse Armor'
},
'diamond_horse_armor': {
'id': 419,
'category': 'Miscellaneous',
0: 'Diamond Horse Armor'
},
'lead': {
'id': 420,
'category': 'Tools',
0: 'Lead'
},
'name_tag': {
'id': 421,
'category': 'Tools',
0: 'Name Tag'
},
# No item 422?
'mutton': {
'id': 423,
'category': 'Foodstuffs',
0: 'Raw Mutton'
},
'cooked_mutton': {
'id': 424,
'category': 'Foodstuffs',
0: 'Cooked Mutton'
},
'banner': {
'id': 425,
'category': 'Decoration Blocks',
0: 'Black Banner', # Colours are in reverse order...?
1: 'Red Banner',
2: 'Green Banner',
3: 'Brown Banner',
4: 'Blue Banner',
5: 'Purple Banner',
6: 'Cyan Banner',
7: 'Light Gray Banner',
8: 'Gray Banner',
9: 'Pink Banner',
10: 'Lime Banner',
11: 'Yellow Banner',
12: 'Light Blue Banner',
13: 'Magenta Banner',
14: 'Orange Banner',
15: 'White Banner'
},
'end_crystal': {
'id': 426,
'category': 'Decoration Blocks',
0: 'End Crystal'
},
'spruce_door': {
'id': 427,
'category': 'Redstone',
0: 'Spruce Door'
},
'birch_door': {
'id': 428,
'category': 'Redstone',
0: 'Birch Door'
},
'jungle_door': {
'id': 429,
'category': 'Redstone',
0: 'Jungle Door'
},
'acacia_door': {
'id': 430,
'category': 'Redstone',
0: 'Acacia Door'
},
'dark_oak_door': {
'id': 431,
'category': 'Redstone',
0: 'Dark Oak Door'
},
'chorus_fruit': {
'id': 432,
'category': 'Materials',
0: 'Chorus Fruit'
},
'chorus_fruit_popped': {
'id': 433,
'category': 'Materials',
0: 'Popped Chorus Fruit'
},
'beetroot': {
'id': 434,
'category': 'Foodstuffs',
0: 'Beetroot'
},
'beetroot_seeds': {
'id': 435,
'category': 'Materials',
0: 'Beetroot Seeds'
},
'beetroot_soup': {
'id': 436,
'category': 'Foodstuffs',
0: 'Beetroot Soup'
},
'dragon_breath': {
'id': 437,
'category': 'Brewing',
0: 'Dragon\'s Breath'
},
'splash_potion': {
'id': 438,
'category': 'Brewing',
0: 'Splash Potion' # Potion is stored as NBT data.
},
'spectral_arrow': {
'id': 439,
'category': 'Combat',
0: 'Spectral Arrow'
},
'tipped_arrow': {
'id': 440,
'category': 'Combat',
0: 'Tipped Arrow' # Arrow type is stored as NBT data.
},
'lingering_potion': {
'id': 441,
'category': 'Brewing',
0: 'Lingering Potion' # Potion is stored as NBT data.
},
'shield': {
'id': 442,
'category': 'Combat',
'name': 'Shield',
'uses': 337
},
'elytra': {
'id': 443,
'category': 'Transportation',
'name': 'Elytra',
'uses': 431
},
'spruce_boat': {
'id': 444,
'category': 'Transportation',
0: 'Spruce Boat'
},
'birch_boat': {
'id': 445,
'category': 'Transportation',
0: 'Birch Boat'
},
'jungle_boat': {
'id': 446,
'category': 'Transportation',
0: 'Jungle Boat'
},
'acacia_boat': {
'id': 447,
'category': 'Transportation',
0: 'Acacia Boat'
},
'dark_oak_boat': {
'id': 448,
'category': 'Transportation',
0: 'Dark Oak Boat'
},
# Missing item...
# ...
# Start of 2256 block.
'record_13': {
'id': 2256,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_cat': {
'id': 2257,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_blocks': {
'id': 2258,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_chirp': {
'id': 2259,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_far': {
'id': 2260,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_mall': {
'id': 2261,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_mellohi': {
'id': 2262,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_stal': {
'id': 2263,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_strad': {
'id': 2264,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_ward': {
'id': 2265,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_11': {
'id': 2266,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_wait': {
'id': 2267,
'category': 'Miscellaneous',
0: 'Music Disc'
}
}
}
enchantments = {
'minecraft': {
'__VERSION__': 1.10,
'__LANGUAGE__': 'en_US',
# Begin Armour Block.
'protection': {
'id': 0,
'name': 'Protection'
},
'fire_protection': {
'id': 1,
'name': 'Fire Protection'
},
'feather_falling': {
'id': 2,
'name': 'Feather Falling'
},
'blast_protection': {
'id': 3,
'name': 'Blast Protection'
},
'projectile_protection': {
'id': 4,
'name': 'Projectile Protection'
},
'respiration': {
'id': 5,
'name': 'Respiration'
},
'aqua_affinity': {
'id': 6,
'name': 'Aqua Affinity'
},
'thorns': {
'id': 7,
'name': 'Thorns'
},
'depth_strider': {
'id': 8,
'name': 'Depth Strider'
},
'frost_walker': {
'id': 9,
'name': 'Frost Walker'
},
# End Armour Block.
# Begin Sword Block.
'sharpness': {
'id': 16,
'name': 'Sharpness'
},
'smite': {
'id': 17,
'name': 'Smite'
},
'bane_of_arthropods': {
'id': 18,
'name': 'Bane of Arthropods'
},
'knockback': {
'id': 19,
'name': 'Knockback'
},
'fire_aspect': {
'id': 20,
'name': 'Fire Aspect'
},
'looting': {
'id': 21,
'name': 'Looting'
},
# End Sword Block.
# Begin Tools Block.
'efficiency': {
'id': 32,
'name': 'Efficiency'
},
'silk_touch': {
'id': 33,
'name': 'Silk Touch'
},
'unbreaking': {
'id': 34,
'name': 'Unbreaking'
},
'fortune': {
'id': 35,
'name': 'Fortune'
},
# End Tools Block.
# Begin Bows Block.
'power': {
'id': 48,
'name': 'Power'
},
'punch': {
'id': 49,
'name': 'Punch'
},
'flame': {
'id': 50,
'name': 'Flame'
},
'infinity': {
'id': 51,
'name': 'Flame'
},
# End Bows Block.
# Begin Fishing Rods Block.
'luck_of_the_sea': {
'id': 61,
'name': 'Luck of the Sea'
},
'lure': {
'id': 62,
'name': 'Lure'
},
# End Fishing Rods Block.
# Begin Misc Block.
'mending': {
'id': 70,
'name': 'Mending'
}
# End Misc Block.
}
}
# Roman Numeral Conversion
# Inspired by: https://stackoverflow.com/a/28777781
romanNumerals = (
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I')
)
def intToRoman(number):
romanString = ''
for romanTuple in romanNumerals:
div, number = divmod(number, romanTuple[0])
romanString += romanTuple[1] * div
return romanString
def lookupItem(item, damage=0):
mod, item = item.split(':')
result = [None, None, None, None]
if mod in items and item in items[mod]:
if damage in items[mod][item]:
result[0] = items[mod][item][damage]
elif 'name' in items[mod][item]:
result[0] = items[mod][item]['name']
else:
result[0] = '[Unknown Name]'
if 'uses' in items[mod][item]:
result[1] = '{:.1%}'.format((items[mod][item]['uses'] - damage) / float(items[mod][item]['uses']))
if 'armor' in items[mod][item]:
result[2] = items[mod][item]['armor']
if 'toughness' in items[mod][item]:
result[3] = items[mod][item]['toughness']
else:
result[0] = '[Item Not Found]'
return result
def lookupNumericItem(itemNumeric, damage=0):
print('WARNING: Item numeric IDs are deprecated. Please use text IDs.')
result = [None, None, None, None]
for mod in items.values():
for item in mod.values():
if type(item) is dict and item['id'] == itemNumeric:
if damage in item:
result[0] = item[damage]
elif 'name' in item:
result[0] = item['name']
else:
result[0] = '[Unknown Name]'
if 'uses' in item:
result[1] = '{:.1%}'.format((item['uses'] - damage) / float(item['uses']))
if 'armor' in item:
result[2] = item['armor']
if 'toughness' in item:
result[3] = item['toughness']
break
if not result[0]:
result[0] = '[Item Not Found]'
return result
def lookupEnchant(enchant, level=None):
mod, enchant = enchant.split(':')
result = [None, None]
if mod in enchantments and enchant in enchantments[mod]:
if 'name' in enchantments[mod][enchant]:
result[0] = enchantments[mod][enchant]['name']
else:
result[0] = '[Unknown Name]'
else:
result[0] = '[Enchantment Not Found]'
if level:
result[1] = intToRoman(level)
return result
def lookupNumericEnchant(enchantNumeric, level=None):
result = [None, None]
for mod in enchantments.values():
for enchant in mod.values():
if type(enchant) is dict and enchant['id'] == enchantNumeric:
if 'name' in enchant:
result[0] = enchant['name']
else:
result[0] = '[Unknown Name]'
break
if not result[0]:
result[0] = '[Enchantment Not Found]'
if level:
result[1] = intToRoman(level)
return result
|
gpl-3.0
| 3,909,941,655,184,969,700
| 18.160235
| 101
| 0.520557
| false
| 2.331938
| false
| false
| false
|
IZSVenezie/VetEpiGIS-Tool
|
plugin/xitem_dialog.py
|
1
|
1545
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'xitem_dialog_base.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 78)
self.gridLayout = QtWidgets.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(Dialog)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.lineEdit = QtWidgets.QLineEdit(Dialog)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit, 0, 1, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Save)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 2)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Item:"))
|
gpl-2.0
| -5,246,522,193,695,220,000
| 39.657895
| 108
| 0.698382
| false
| 3.951407
| false
| false
| false
|
hfiguiere/abiword
|
tools/build_osx_release.py
|
2
|
12008
|
#!/usr/bin/env python
# Copyright (C) 2011 Fabiano Fidencio
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA.
from os import chdir, environ, getcwd, listdir, mkdir, path
from shutil import copy2, rmtree
from subprocess import PIPE, Popen
from sys import argv, exit
from argparse import ArgumentParser
contents_path = "abiword/AbiWord.app/Contents"
def environment_prepare():
abisource_path="/tmp/abisource"
mkdir(abisource_path, 0755)
path = getcwd()
chdir(abisource_path)
return path
def environment_clean(path):
chdir(path)
abisource_path="/tmp/abisource"
rmtree(abisource_path)
def _macports_source_get():
source = "https://distfiles.macports.org/MacPorts/MacPorts-2.0.0.tar.gz"
cmd = "curl -O %s" % source
p = Popen(cmd, shell=True)
p.wait()
def _macports_source_extract():
cmd = "tar xzvpf MacPorts-2.0.0.tar.gz"
p = Popen(cmd, shell=True)
p.wait()
def _macports_install():
current_dir = getcwd()
chdir("MacPorts-2.0.0")
cmd = "./configure --prefix=/tmp/abisource/macports \
&& make \
&& sudo make install"
p = Popen(cmd, shell=True)
p.wait()
chdir(current_dir)
def _macports_env():
macports_path = "/tmp/abisource/macports/"
envs = environ
env = "%s/bin:%s/sbin:%s" % (macports_path, macports_path, envs["PATH"])
return env
def _macports_sync():
envs = _macports_env()
cmd = "sudo port -v selfupdate"
p = Popen(cmd, shell=True, env={"PATH":envs})
p.wait()
def macports_install():
_macports_source_get()
_macports_source_extract()
_macports_install()
_macports_sync()
def dependencies_install():
envs = _macports_env()
pkgs = "cairo +quartz+no_x11 \
pango +quartz+no_x11 \
fribidi \
libgsf +no_gnome \
redland \
wv +no_x11 \
enchant \
boost"
cmd = "sudo port install %s" % pkgs
p = Popen(cmd, shell=True, env={"PATH":envs})
p.wait()
def _abiword_source_get():
cmd = "svn co http://svn.abisource.com/abiword/trunk abiword"
p = Popen(cmd, shell=True)
p.wait()
def _abiword_fix_macports_path():
cmd = "sed -i -e \
's/\\/opt\\/local/\\/tmp\\/abisource\\/macports/g' \
configure.in"
p = Popen(cmd, shell=True)
p.wait()
def _abiword_install():
envs = _macports_env()
current_dir = getcwd()
chdir("abiword")
_abiword_fix_macports_path()
cmd = "./autogen.sh \
--with-darwinports \
--enable-maintainer-mode \
--disable-static \
--enable-shared \
--enable-plugins=\"docbook epub latex openwriter openxml opml\" \
&& make && DESTDIR=`pwd` make install"
p = Popen(cmd, shell=True, env={"PATH":envs})
p.wait()
chdir(current_dir)
def abiword_install():
_abiword_source_get()
_abiword_install()
def _dep_list_get(lib):
#otool -L path
cmd = "otool -L %s " %lib
#get all .dylib from otool -L
cmd += "| grep macports | sed -e 's/.dylib.*$/.dylib/'"
#remove white spaces before and after the lib path/name
cmd += "| sed 's/^[ \t]*//;s/[ \t]*$//'"
p = Popen(cmd, shell=True, stdout=PIPE)
p.wait()
stdout = p.communicate()
return stdout[0].split('\n')[:-1]
def _rdeps_get():
libabiword = ""
libabiword_deps = []
for content in listdir(contents_path + "/Frameworks"):
if content.endswith(".dylib"):
libabiword = contents_path + "/Frameworks/" + content
libabiword_deps = _dep_list_get(libabiword)
break
plugins = []
plugins_deps = []
for content in listdir(contents_path + "/PlugIns"):
if content.endswith(".so"):
plugin = contents_path + "/PlugIns/" + content
plugins.append(plugin)
plugins_deps = _dep_list_get(plugin)
abiword = contents_path + "/MacOS/AbiWord"
abiword_deps = _dep_list_get(abiword)
rdeps = []
for lib in libabiword_deps:
rdeps.append(lib)
for lib in plugins_deps:
if lib not in rdeps:
rdeps.append(lib)
for lib in abiword_deps:
if lib not in rdeps:
rdeps.append(lib)
rdeps_deps = []
for lib in rdeps:
rdeps_deps += _dep_list_get(lib)
for lib in rdeps_deps:
if lib not in rdeps_deps:
rdeps.append(lib)
return rdeps, libabiword, abiword, plugins
def _rdeps_copy(rdeps):
rdeps_path = contents_path
mkdir(rdeps_path, 0755)
n_rdeps = []
for dep in rdeps:
dep_path, dep_name = path.split(dep)
copy2(dep, rdeps_path)
d = "%s/%s" % (rdeps_path, dep_name)
cmd = "chmod 755 " + d
n_rdeps.append(d)
p = Popen(cmd, shell=True)
p.wait()
return n_rdeps
def _fix(lib, new):
dep_list = _dep_list_get(lib)
for d in dep_list:
d_path, d_name = path.split(d)
n = "@executable_path/../Frameworks/rdeps/" + d_name
cmd = "install_name_tool -change %s %s %s" % (d, n, lib)
p = Popen(cmd, shell=True)
p.wait()
lib_path, lib_name = path.split(lib)
cmd = "install_name_tool -id %s %s" % (new, lib)
p = Popen(cmd, shell=True)
p.wait()
def _rdeps_fix(rdeps):
for r in rdeps:
file_path, file_name = path.split(r)
new = "@executable_path/../Frameworks/rdeps/" + file_name
_fix(r, new)
def _libabiword_fix(libabiword):
file_path, file_name = path.split(libabiword)
new = "@executable_path/../Frameworks/" + file_name
_fix(libabiword, new)
def _abiword_fix(abiword):
file_path, file_name = path.split(abiword)
new = "@executable_path/" + file_name
_fix(abiword, new)
def _plugins_fix(plugins):
for p in plugins:
file_path, file_name = path.split(p)
new = "@executable_path/../PlugIns/" + file_name
_fix(p, new)
def do_app():
rdeps, libabiword, abiword, plugins = _rdeps_get()
n_rdeps = _rdeps_copy(rdeps)
_rdeps_fix(n_rdeps)
_libabiword_fix(libabiword)
_abiword_fix(abiword)
_plugins_fix(plugins)
def do_dmg():
mkdir("dmg", 0755)
cmd = "cp -a abiword/AbiWord.app dmg/"
p = Popen(cmd, shell = True)
p.wait()
cmd = "ln -s /Applications dmg/"
p = Popen(cmd, shell=True)
p.wait()
cmd = "hdiutil create \
-srcfolder \"dmg\" \
-volname \"AbiWord\" \
-fs HFS+ \
-fsargs \"-c c=64,a=16,e=16\" \
-format UDRW \"AbiWord.dmg\""
p = Popen(cmd, shell=True)
p.wait()
rmtree("dmg")
copy2("AbiWord.dmg", environ["HOME"] + "/Desktop/")
if __name__ == "__main__":
parser = ArgumentParser(description="Automated dmg generator")
parser.add_argument("--macports_path",
action="store",
dest="macports_path",
help="This option will use your current macports' \
installation from MACPORTS_PATH.\n\
ATTENTION: Without this option, macports will \
be downloaded and installed in: \
/tmp/abisource/macports")
parser.add_argument("--abisource_path",
action="store",
dest="abi_path",
default=False,
help="This option will consider that you have \
AbiWord's sources in your computer, located at \
ABISOURCE_PATH and want to build it and NOT a \
specific version from our SVN.")
parser.add_argument("--abisource_revision",
action="store",
dest="abi_rev",
help="This option will get a specific revision from \
AbiWord's SVN. \
ATTETION: If this option isn't passed, SVN's \
trunk will be used.")
parser.add_argument("--abiword_version",
action="store",
dest="abi_version",
help="This option will get a specific version from \
AbiWord's SVN. \
ATTETION: If this option isn't passed, SVN's \
trunk will be used.")
parser.add_argument("--no_deps",
action="store_true",
dest="no_deps",
default=False,
help="This option won't install AbiWord's \
dependencies in your computer. So, is YOUR \
WORK install all needed dependencies. Of \
course, you'll need to install macports before.")
parser.add_argument("--start_from_build",
action="store_true",
dest="start_from_build",
default=False,
help="This option will consider that you have \
macports and all AbiWord's dependencies \
installed. \
ATTENTION: This options will build AbiWord and \
create a dmg file. So, is REALLY NECESSARY \
that you pass --abisource_path option.")
parser.add_argument("--start_from_app",
action="store",
dest="start_from_app",
help="This option will use a generated .app file \
to fix all linkage and put all nedded libs \
into .app in a specific folder. After that a \
dmg file will be created(Don't put '/' at the end of .app package path). \
ATTENTION: Is REALLY NECESSARY that you pass \
--macports_path option. Eg: python build_script.py --start_from_app /Users/abi/Abiword.app")
parser.add_argument("--start_from_linkage_fixed",
action="store",
dest="start_from_linkage_fixed",
help="This option will use a generated .app file \
with linkage working properly to create a \
.dmg file.\
ATTENTION: Is REALLY NECESSARY that you pass \
--macports_path option.")
if len(argv) < 2:
parser.print_help()
exit()
else:
args = parser.parse_args()
# print args
current_dir = getcwd()
def cleanAndPrint():
environment_clean(current_dir)
print "****************************************************"
print "* AbiWord.dmg was created in you ~/Desktop. Enjoy! *"
print "****************************************************"
dict_args=vars(args)
print dict_args
if dict_args['start_from_app'] != None:
contents_path = dict_args['start_from_app'] + "/Contents"
do_app()
do_dmg()
print_text()
exit()
else:
environment_prepare()
macports_install()
dependencies_install()
abiword_install()
do_app()
do_dmg()
cleanAndPrint()
|
gpl-2.0
| 9,047,993,009,526,091,000
| 31.193029
| 122
| 0.541889
| false
| 3.674419
| false
| false
| false
|
Valchris/IEEEXtreme_WorkingAsIntended
|
2012/AA_Alex.py
|
1
|
1107
|
__author__ = 'alexander'
import sys
initial_bunnies = long(sys.stdin.readline())
bunnies = dict()
bunnies['adults'] = initial_bunnies
bunnies['babies'] = long(0)
bunnies['juveniles'] = long(0)
bunnies['juveniles2'] = long(0)
for i in range(0,365,15):
if i % 2 == 0:
bunnies['babies'] = long(bunnies['babies'])*0.75 # Death to flu
bunnies['juveniles'] = long(bunnies['juveniles']*0.75) # Death to flu
bunnies['juveniles2'] = long(bunnies['juveniles2']*0.75) # Death to flu
bunnies['adults'] = long(bunnies['adults']*0.75) # Death to flu
bunnies['adults'] += long(bunnies['juveniles2']*0.70) # Forest migration
if i == 0:
continue
bunnies['juveniles2'] = bunnies['juveniles'] # Juveniles growing
bunnies['juveniles'] = long(bunnies['babies']) # Babies growing
bunnies['babies'] = long(bunnies['adults']*0.90) # Babies being born / 10% of babies die at birth
if bunnies['adults'] == 0 and bunnies['babies'] == 0 and bunnies['juveniles'] == 0:
break
print long(bunnies['adults'] + bunnies['babies'] + bunnies['juveniles'])
|
mit
| -221,296,799,781,316,100
| 33.625
| 101
| 0.633243
| false
| 2.427632
| false
| false
| false
|
lesina/Hack70
|
env/bin/painter.py
|
1
|
2129
|
#!/home/oleg/Web/Hack70/env/bin/python3
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
try:
from tkinter import Tk, Canvas, NW
except ImportError:
from Tkinter import Tk, Canvas, NW
from PIL import Image, ImageTk
import sys
#
# painter widget
class PaintCanvas(Canvas):
def __init__(self, master, image):
Canvas.__init__(self, master, width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
|
gpl-3.0
| -5,233,558,321,454,570,000
| 24.963415
| 80
| 0.573978
| false
| 3.331768
| false
| false
| false
|
RRSCDS/douban-mining
|
src/Python/api-oauth/oauthx.py
|
1
|
2742
|
# -*- coding: utf8 -*-
import urllib, urllib2
import json
# key and secret通过创建豆瓣app获得(无审核)
# http://developers.douban.com/apikey/
APIKEY = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
SECRET = 'xxxxxxxxxxxxxxxx'
CALLBACK_URL = 'http://www.douban.com'
GETTOKEN_URL = 'https://www.douban.com/service/auth2/token'
def getToken(code):
postParams = {
'client_id': APIKEY,
'client_secret': SECRET,
'redirect_uri': CALLBACK_URL,
'grant_type': 'authorization_code',
'code': code
}
# hearders可能非必要
headers = {
'Host': 'www.douban.com',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Connection': 'keep-alive'
}
req = urllib2.Request(
url = GETTOKEN_URL,
data = urllib.urlencode(postParams),
headers = headers
)
# Get the access token
try:
r = urllib2.urlopen(req).read()
print r
return json.loads(r)['access_token']
# Get detailed error msg if 400 bad request occurs:
except urllib2.HTTPError as e:
print 'Error: ' + e.read()
raise SystemExit(e)
# Authorization code can be obtained manually using browser,
# see http://developers.douban.com/wiki/?title=oauth2 ("获取authorization_code")
# Each code can only be used once to get an access token (?)
# Tokens are relatively long-lived - no need to get a code every time
def apiTest(user, count=1, until_id=''):
# Use old token from file if there is one, otherwise get new token
f = open('token.txt', 'a+')
tok = f.read()
if len(tok) == 0:
tok = getToken(raw_input('input code here:')) # input code manually
f.write(tok)
f.close()
print 'Current token:', tok
# Reuqest url and headers
url = 'https://api.douban.com/shuo/v2/statuses/user_timeline/'
url = url + user + '?count=%s&until_id=%s'%(count, until_id)
headers = {'Authorization': 'Bearer '+tok}
# Get data
try:
req2 = urllib2.Request(url=url, headers=headers)
resp2 = urllib2.urlopen(req2)
rj = resp2.read() # Json格式数据
print rj
r = json.loads(rj) # 转换为python列表,每条广播表示为一个词典对象
print '%s statuses loaded' % len(r)
except urllib2.HTTPError as e:
print 'Error: ' + e.read()
raise SystemExit(e)
if __name__ == "__main__":
apiTest('homeland', 5, '1605326442')
# Note that contrary to what douban api help says, until_id is NOT inclusive,
# i.e. only statuses with id < until_id will be loaded.
|
mit
| 1,631,758,914,176,409,900
| 29.193182
| 101
| 0.615587
| false
| 3.106433
| false
| false
| false
|
thesealion/writelightly
|
writelightly/main.py
|
1
|
5266
|
import curses
import datetime
import sys
from writelightly.calendar import Calendar
from writelightly.conf import Config
from writelightly.edit import edit_date, get_edits, clean_tmp, show_edits
from writelightly.metadata import Metadata
from writelightly.screen import ScreenManager, TextArea
from writelightly.tags import show_tags, show_tag
from writelightly.utils import entry_exists, parse_date, WLError, WLQuit
import locale
locale.setlocale(locale.LC_ALL, ('en_US', 'UTF-8'))
def show_calendar():
"""Show an interactive calendar.
Show the calendar on the left side of the screen and some metadata about
the selected date on the right. Any entry can be edited in external editor.
"""
today = datetime.date.today()
year, month = today.year, today.month
cal = Calendar(year, month, today.day, entry_exists)
metadata = Metadata.get(year, month)
text_area = TextArea()
ScreenManager.draw_all()
d = cal.get_current_date()
text_area.show_text(metadata.text(d.day))
keys = Config.calendar_keys
while 1:
try:
kn = curses.keyname(cal.window.getch())
except KeyboardInterrupt:
break
except ValueError:
continue
if kn in Config.general_keys['quit']:
raise WLQuit
if kn in Config.general_keys['quit_mode']:
break
if kn == 'KEY_RESIZE':
ScreenManager.resize()
if cal.hidden:
continue
if kn in keys['left']:
moved = cal.move_left()
if not moved:
cal = cal.get_previous_calendar()
cal.draw()
metadata = Metadata.get(cal.year, cal.month)
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['right']:
moved = cal.move_right()
if not moved:
cal = cal.get_next_calendar()
cal.draw()
metadata = Metadata.get(cal.year, cal.month)
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['down']:
cal.move_down()
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['up']:
cal.move_up()
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['edit']:
date = cal.get_current_date()
edit_date(date)
metadata.load_day(date.day)
cal.set_active(entry_exists(date))
text_area.show_text(metadata.text(date.day))
elif kn in keys['tags']:
show_tags(cal.area_id, text_area)
ScreenManager.restore_area(cal.area_id)
cal.reinit()
text_area.set_title()
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['edits']:
date = cal.get_current_date()
edits = get_edits(date)
if edits:
show_edits(date, edits, text_area.area_id)
ScreenManager.restore_area(text_area.area_id)
text_area.show_text(metadata.text(date.day))
elif kn in keys['prev_month']:
cal = cal.get_previous_calendar(cal.get_current_day())
cal.draw()
metadata = Metadata.get(cal.year, cal.month)
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['next_month']:
cal = cal.get_next_calendar(cal.get_current_day())
cal.draw()
metadata = Metadata.get(cal.year, cal.month)
text_area.show_text(metadata.text(cal.get_current_day()))
Metadata.write_all()
clean_tmp()
def edit_single_date(date):
"""Edit a single entry in external editor without initializing screen."""
date = parse_date(date)
if not date:
raise WLError('Unrecognised date format\n')
edit_date(date)
metadata = Metadata(date.year, date.month)
metadata.load_day(date.day)
metadata.write()
usage = '''Usage:
%(name)s
%(name)s ( <date> | today | yesterday )
%(name)s -t [<tag>]
''' % {'name': sys.argv[0]}
def wrapper(func, with_screen=False):
if with_screen:
ScreenManager.init()
error = None
try:
func()
except WLQuit:
pass
except WLError as exc:
error = exc
finally:
if with_screen:
ScreenManager.quit()
if error is not None:
sys.stderr.write('%s\n' % error)
def main():
from getopt import getopt, GetoptError
from functools import partial
try:
options, args = getopt(sys.argv[1:], 'th', ['help'])
except GetoptError as exc:
sys.stderr.write('%s\nTry `%s -h` for help\n' % (exc, sys.argv[0]))
sys.exit(1)
init_screen = True
option_names = [o[0] for o in options]
if '-h' in option_names or '--help' in option_names:
print usage
sys.exit()
if options:
if args:
func = partial(show_tag, args[0])
else:
func = show_tags
else:
if args:
func = partial(edit_single_date, args[0])
init_screen = False
else:
func = show_calendar
wrapper(func, init_screen)
|
mit
| -884,814,466,088,044,500
| 32.75641
| 79
| 0.583175
| false
| 3.690259
| false
| false
| false
|
allofhercats/whiskey
|
proto/literal_print.py
|
1
|
1544
|
def get_n_digits(value, base):
n = 0
if value == 0:
return 1
else:
while value > 0:
value //= base
n += 1
return n
def literal_int_to_string(value, base, width = 0, pad = '0', prefix = True):
rtn = ""
if base == 2 and prefix:
rtn = "0b"
elif base == 8 and prefix:
rtn = "0"
elif base == 16 and prefix:
rtn = "0x"
n_digits = get_n_digits(value, base)
if width > 0 and width > n_digits:
i = 0
while i < width - n_digits:
rtn += pad
i += 1
fac = base ** (n_digits - 1)
i = 0
while i < n_digits:
digit = (value // fac) % base
if base == 2:
rtn += "01"[digit]
elif base == 8:
rtn += "01234567"[digit]
elif base == 10:
rtn += "0123456789"[digit]
elif base == 16:
rtn += "0123456789abcdef"[digit]
else:
raise NotImplementedError()
fac //= base
i += 1
return rtn
def literal_float_to_string(value, precision = 5, strip = True):
if value < 0.0:
return "-" + literal_float_to_string(-value, precision, strip)
rtn = literal_int_to_string(int(value), 10)
rtn += "."
value1 = value - int(value)
n_stripped = 0
i = 0
while i < precision:
value1 *= 10.0
digit = int(value1) % 10
if digit == 0:
n_stripped += 1
else:
n_stripped = 0
if not strip:
rtn += "0123456789"[digit]
i += 1
if strip:
value1 = value - int(value)
i = 0
while i < precision - n_stripped:
value1 *= 10.0
digit = int(value1) % 10
rtn += "0123456789"[digit]
i += 1
return rtn
if __name__ == "__main__":
print(literal_float_to_string(3.1400000000001, 20))
|
mit
| 2,214,180,999,662,640,600
| 17.60241
| 76
| 0.577073
| false
| 2.573333
| false
| false
| false
|
ngageoint/scale
|
scale/scale/settings.py
|
1
|
14209
|
"""
Django settings for scale_test project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import scale
import sys
import dj_database_url
def get_env_boolean(variable_name, default=False):
return os.getenv(variable_name, str(default)).lower() in ('yes', 'true', 't', '1')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Project version
VERSION = scale.__version__
DOCKER_VERSION = scale.__docker_version__
# Mesos connection information. Default for -m
# This can be something like "127.0.0.1:5050"
# or a zookeeper url like 'zk://host1:port1,host2:port2,.../path`
MESOS_MASTER = os.getenv('MESOS_MASTER', 'zk://leader.mesos:2181/mesos')
# We by default, use the '*' role, meaning all resources are unreserved offers are received
# By default, use the '*' role, meaning all resources are unreserved offers are received
MESOS_ROLE = os.getenv('MESOS_ROLE', '*')
# Used to set the user that Mesos tasks are launched by Docker. This should NEVER be set to root
# and must be a user name NOT a Linux UID. Mesos chokes on UIDs.
CONTAINER_PROCESS_OWNER = os.getenv('CONTAINER_PROCESS_OWNER', 'nobody')
# By default, the accepted resources match reservations to the MESOS_ROLE
ACCEPTED_RESOURCE_ROLE = os.getenv('ACCEPTED_RESOURCE_ROLE', MESOS_ROLE)
# By default, all API calls require authentication.
PUBLIC_READ_API = get_env_boolean('PUBLIC_READ_API')
# Placeholder for service secret that will be overridden in local_settings_docker
SERVICE_SECRET = None
# Zookeeper URL for scheduler leader election. If this is None, only a single scheduler is used.
SCHEDULER_ZK = None
# The full name for the Scale Docker image (without version tag)
SCALE_DOCKER_IMAGE = 'geoint/scale'
# The location of the config file containing Docker credentials
# The URI value should point to an externally hosted location such as a webserver or hosted S3 bucket.
# The value will be an http URL such as 'http://static.mysite.com/foo/.dockercfg'
CONFIG_URI = None
# Directory for rotating metrics storage
METRICS_DIR = None
# fluentd warning levels, or -1 to disable warnings
FLUENTD_BUFFER_WARN = int(os.environ.get('FLUENTD_BUFFER_WARN', -1))
FLUENTD_BUFFER_SIZE_WARN = int(os.environ.get('FLUENTD_BUFFER_SIZE_WARN', -1))
# URL for fluentd, or None to disable fluentd
LOGGING_ADDRESS = os.environ.get('LOGGING_ADDRESS')
LOGGING_HEALTH_ADDRESS = os.environ.get('LOGGING_HEALTH_ADDRESS')
# Base URL of elasticsearch nodes
ELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL')
# Placeholder for elasticsearch version. Supplied in production by local_settings_docker.py
ELASTICSEARCH_VERSION = None
# Placeholder for Elasticsearch object. Needed for unit tests.
ELASTICSEARCH = None
DATABASE_URL = os.getenv('DATABASE_URL')
#root url for scale installation
SCALE_VHOST = os.getenv('SCALE_VHOST', 'localhost:8000')
# Broker URL for connection to messaging backend
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
QUEUE_NAME = 'scale-command-messages'
MESSSAGE_QUEUE_DEPTH_WARN = int(os.environ.get('MESSSAGE_QUEUE_DEPTH_WARN', -1))
# Queue limit
SCHEDULER_QUEUE_LIMIT = int(os.environ.get('SCHEDULER_QUEUE_LIMIT', 500))
# The max number of times the scheduler will try to reconnect to
# mesos if disconnected.
SCHEDULER_MAX_RECONNECT = int(os.environ.get('SCHEDULER_MAX_RECONNECT', 3))
# Base URL of vault or DCOS secrets store, or None to disable secrets
SECRETS_URL = None
# Public token if DCOS secrets store, or privleged token for vault
SECRETS_TOKEN = None
# DCOS service account name, or None if not DCOS secrets store
DCOS_SERVICE_ACCOUNT = None
# Flag for raising SSL warnings associated with secrets transactions.
SECRETS_SSL_WARNINGS = True
# SECURITY WARNING: keep the secret key used in production secret!
INSECURE_DEFAULT_KEY = 'this-key-is-insecure-and-should-never-be-used-in-production'
SECRET_KEY = INSECURE_DEFAULT_KEY
# Used to write the superuser password
MESOS_SANDBOX = os.getenv('MESOS_SANDBOX')
# Security settings for production
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = get_env_boolean('SESSION_COOKIE_SECURE', True)
X_FRAME_OPTIONS = 'DENY'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# authentication toggle, to be used for testing
AUTHENTICATION_ENABLED = get_env_boolean('AUTHENTICATION_ENABLED', True)
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# used primarily by debug-toolbar to dictate what client url has access
if os.environ.get('INTERNAL_IP'):
INTERNAL_IPS = [os.environ.get('INTERNAL_IP')]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'rest_framework.authtoken',
'debug_toolbar',
###############
# Social Auth #
###############
'oauth2_provider',
'social_django',
'rest_framework_social_oauth2',
# Scale apps
'accounts',
'batch',
'cli',
'data',
'diagnostic',
'error',
'ingest',
'job',
'mesos_api',
'messaging',
'metrics',
'node',
'product',
'queue',
'recipe',
'scheduler',
'shared_resource',
'source',
'storage',
'trigger',
'util',
'vault'
)
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'util.middleware.MultipleProxyMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'util.middleware.ExceptionLoggingMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': False,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
###############
# Social Auth #
###############
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
]
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DEFAULT_PAGINATION_CLASS': 'util.rest.DefaultPagination',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework.renderers.AdminRenderer',
),
'ALLOWED_VERSIONS': ('v6', 'v7'),
'DEFAULT_VERSION': 'v6',
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',
}
if AUTHENTICATION_ENABLED:
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
###############
# Social Auth #
###############
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
'rest_framework_social_oauth2.authentication.SocialAuthentication',
)
REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'] = (
'util.rest.ScaleAPIPermissions',
)
else:
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = ()
REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'] = ()
REST_FRAMEWORK['UNAUTHENTICATED_USER'] = None
ROOT_URLCONF = 'scale.urls'
WSGI_APPLICATION = 'scale.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(default='sqlite://%s' % os.path.join(BASE_DIR, 'db.sqlite3'))
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGIN_REDIRECT_URL = '/'
#############################
# GEOAxIS specific settings #
#############################
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/'
# Redirect after directly hitting login endpoint
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
DEFAULT_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.mail.mail_validation',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details'
)
SOCIAL_AUTH_GEOAXIS_KEY = os.getenv('GEOAXIS_KEY')
SOCIAL_AUTH_GEOAXIS_SECRET = os.getenv('GEOAXIS_SECRET')
SOCIAL_AUTH_GEOAXIS_HOST = os.getenv('GEOAXIS_HOST', 'geoaxis.gxaccess.com')
OAUTH_GEOAXIS_USER_FIELDS = os.getenv(
'GEOAXIS_USER_FIELDS', 'username, email, last_name, first_name')
SOCIAL_AUTH_GEOAXIS_USER_FIELDS = map(
str.strip, OAUTH_GEOAXIS_USER_FIELDS.split(','))
OAUTH_GEOAXIS_SCOPES = os.getenv('GEOAXIS_SCOPES', 'UserProfile.me')
SOCIAL_AUTH_GEOAXIS_SCOPE = map(str.strip, OAUTH_GEOAXIS_SCOPES.split(','))
# GeoAxisOAuth2 will cause all login attempt to fail if
# SOCIAL_AUTH_GEOAXIS_HOST is None
GEOAXIS_ENABLED = False
if SOCIAL_AUTH_GEOAXIS_KEY and len(SOCIAL_AUTH_GEOAXIS_KEY) > 0:
GEOAXIS_ENABLED = True
AUTHENTICATION_BACKENDS += (
'django_geoaxis.backends.geoaxis.GeoAxisOAuth2',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Logging configuration
LOG_DIR = os.path.join(BASE_DIR, 'logs')
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
LOG_NAME = 'scale'
LOG_FORMATTERS = {
'standard': {
'format': ('%(asctime)s %(levelname)s ' +
'[%(name)s(%(lineno)s)] %(message)s'),
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'db-standard': {
'format': ('[%(name)s(%(lineno)s)] %(message)s'),
}
}
LOG_FILTERS = {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'debug_info_only':{
'()':'scale.custom_logging.UserFilter',
}
}
LOG_HANDLERS = {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'mesoshttp' : {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout
},
'console-stderr': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stderr
},
'console-stdout': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout,
'filters':['debug_info_only']
},
'log-db': {
'level': 'WARNING',
'class': 'error.handlers.DatabaseLogHandler',
'formatter': 'db-standard',
'model': 'error.models.LogEntry',
},
}
LOG_CONSOLE_DEBUG = {
'version': 1,
'formatters': LOG_FORMATTERS,
'filters': LOG_FILTERS,
'handlers': LOG_HANDLERS,
'loggers': {
'': {
'handlers': ['console-stdout','console-stderr'],
'level': 'DEBUG',
},
},
}
LOG_CONSOLE_INFO = {
'version': 1,
'formatters': LOG_FORMATTERS,
'filters': LOG_FILTERS,
'handlers': LOG_HANDLERS,
'loggers': {
'': {
'handlers': ['console-stdout','console-stderr'],
'level': 'INFO',
},
},
}
LOG_CONSOLE_ERROR = {
'version': 1,
'formatters': LOG_FORMATTERS,
'filters': LOG_FILTERS,
'handlers': LOG_HANDLERS,
'loggers': {
'': {
'handlers': ['console-stderr'],
'level': 'ERROR',
},
},
}
LOG_CONSOLE_WARNING = {
'version': 1,
'formatters': LOG_FORMATTERS,
'filters': LOG_FILTERS,
'handlers': LOG_HANDLERS,
'loggers': {
'': {
'handlers': ['console-stderr'],
'level': 'WARNING',
},
},
}
LOG_CONSOLE_CRITICAL = {
'version': 1,
'formatters': LOG_FORMATTERS,
'filters': LOG_FILTERS,
'handlers': LOG_HANDLERS,
'loggers': {
'': {
'handlers': ['console-stderr'],
'level': 'CRITICAL',
},
},
}
LOGGING = LOG_CONSOLE_DEBUG
# Hack to fix ISO8601 for datetime filters.
# This should be taken care of by a future django fix. And might even be handled
# by a newer version of django-rest-framework. Unfortunately, both of these solutions
# will accept datetimes without timezone information which we do not want to allow
# see https://code.djangoproject.com/tickets/23448
# Solution modified from http://akinfold.blogspot.com/2012/12/datetimefield-doesnt-accept-iso-8601.html
from django.forms import fields
from util.parse import parse_datetime
fields.DateTimeField.strptime = lambda _self, datetime_string, _format: parse_datetime(datetime_string)
|
apache-2.0
| -3,697,003,481,016,563,700
| 29.622845
| 103
| 0.661553
| false
| 3.472385
| false
| false
| false
|
TeamADEA/Hunger_Games
|
HG_Code/Model.py
|
1
|
7064
|
import numpy as np
import copy
import time
from Kat import Kat
from Visualize import Visualizer
from SimManager import sim_manager
from hg_settings import *
from Hunger_Grid import hunger_grid
import sys
import os
STEP_SIZE = 10 # 0 = only last frame,
# 1 = every frame,
# N = every N frames
# -1 = don't show
tki_breakdown = np.zeros(NUM_OF_GENERATIONS*6).reshape(NUM_OF_GENERATIONS, 6)
full_graph = np.zeros(NUM_OF_SPECIES*NUM_OF_GENERATIONS).reshape(NUM_OF_SPECIES, NUM_OF_GENERATIONS)
full_graph_bk = np.zeros(NUM_OF_SPECIES*2).reshape(NUM_OF_SPECIES, 2)
def run_model(from_lava = .02, to_lava = .02, from_berry = .05, to_berry = .05\
, from_mut=10, to_mut=10, from_gen = 33, to_gen = 33, \
t_name = 'Default', frames = -1):
global STEP_SIZE
STEP_SIZE = frames
progenitor = Kat(0,0)
grid = hunger_grid()
vis = Visualizer(grid)
start_time = time.time()
# Calculate the seed settings for each specie. This is used to run multiple
# species in a row without needing to manually set it
def calc_steps(from_num, to_num):
array = np.arange(1, NUM_OF_SPECIES+1, dtype='float')
if(from_num == to_num): # If values match. fill with only 1 value
array[:] = from_num
else:
# fill with incemental steps
inc = (float(to_num) - from_num) / float(NUM_OF_SPECIES)
array = np.arange(from_num, to_num, inc, dtype='float')
return copy.deepcopy(array)
#Fill arrays for each specie, these array determine % of land for each specie
lava_chance_array = calc_steps(from_lava, to_lava)
berry_chance_array = calc_steps(from_berry, to_berry)
mutate_chance_array = calc_steps(from_mut, to_mut)
generate_chance_array = calc_steps(from_gen, to_gen)
#open output file, file will be named after the test given name.
file_name = t_name + '.txt'
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
out_file = open(os.path.join(__location__,file_name), 'a')
print "\n", generate_chance_array
print mutate_chance_array
for i in range(NUM_OF_SPECIES): # MAIN LOOP OF SIMULATION RUNNING
mutation_var = [mutate_chance_array[i]]
mutation_var.append(generate_chance_array[i])
grid = hunger_grid(lava_chance_array[i], berry_chance_array[i])
full_graph[i] = model(progenitor, vis, grid, i, mutation_var,t_name, out_file)
full_graph_bk[i] = [grid.lava_chance, grid.berry_chance]
#close output file
out_file.close()
# DISPLAY VARIOUS GRAPHS AND PLOTS
tki_breakdown[:] /= NUM_OF_SPECIES
vis.graph(full_graph, full_graph_bk, t_name)
vis.ins_graph(tki_breakdown, t_name)
vis.chance_vs_fitness(full_graph, full_graph_bk, mutate_chance_array, generate_chance_array,t_name)
print("--- %s MODEL COMPLETE ---" % (t_name))
print("--- TIME TO COMPLETE MODEL: %s seconds ---" % (time.time() - start_time))
vis.show_plots()
def one_sim(seed_kat, grid, mut ,gen, out_file, multi_cat=False):
"""Run one simulation of number of time steps (default: 300)
First initialize a sim_manager with first Kat agent.
Then update at each time steps, finally taking the top
Kat and top fitness score, returns it.
"""
if not multi_cat:
sim_temp = sim_manager(seed_kat, grid, mut)
top_kat = seed_kat
else:
sim_temp = sim_manager(seed_kat, grid, mut, multi_cat=True)
top_kat = seed_kat[0]
for i in range(NUM_OF_INDIVIDUALS):
sim_temp.clear_grid(grid)
sim_temp.start_kat(i)
for j in range(STEPS_PER_SIM):
if(sim_temp.kats[i].dead == False):
sim_temp.update(i, j)
else:
break
avg_fitness = sim_temp.average_fitness()
top_kats = sim_temp.top_kats() # ARRAY FOR DISPLAYING FITNESS
tki_breakdown[gen] += sim_temp.tk_breakdown() # FOR BREAKDOWN OF INSTRUCTIONS
#file output
for k in top_kats:
out_file.write("\nFitness: ")
out_file.write(str(k.calculate_fitness()))
out_file.write(k.print_ins_1(False))
for kat in top_kats:
kat.reset()
kat_temp, score_temp = sim_temp.top_kat()
return copy.deepcopy(kat_temp), score_temp, sim_temp.return_playback(),\
avg_fitness, copy.deepcopy(top_kats)
def playback(vis, pb, best_kats, gen, specie, t_name):
if (STEP_SIZE == -1):
return
if (STEP_SIZE == 0):
vis.show(pb[-1], best_kats, gen, specie, t_name)
else:
for i in np.arange(0,len(pb), STEP_SIZE):
vis.show(pb[i], copy.deepcopy(best_kats), gen, specie, t_name)
def model(seed_kat, vis, grid, specie, mut,t_name, out_file):
"""Run multiple simulation of number of time steps each,
(default: 300 simulations).
In a loop, keep running each simulation of 300
number of time steps, append the top fitness score,
and after loops ended, graph the fitness score over
generations (simulations).
"""
top_kats = []
avg_kats = []
print "Species:",specie,"| Gen: 1"
seed_kat, fit_score, play, avg_fitness, seed_kats = one_sim(seed_kat, grid, mut, 0,out_file)
top_kats.append(fit_score)
avg_kats.append(avg_fitness)
playback(vis, play, seed_kat, 1, specie+1, t_name)
#flie output
out_file.write("Species:")
out_file.write(str(specie))
out_file.write(" | Gen: 1\n")
if (NUM_OF_SPECIES > 1):
for i in np.arange(2, (NUM_OF_GENERATIONS+1)):
#file output
out_file.write("\nMODEL NAME: %s" % (t_name))
out_file.write("\n######### START: Species:")
out_file.write(str(specie+1))
out_file.write(" | Gen:")
out_file.write(str(i))
out_file.write("###########")
print "\nMODEL NAME: %s" % (t_name)
print "\n############### START: Species:",specie+1," OF ", NUM_OF_SPECIES ," | Gen:",i, "#######################\n"
temp_top = seed_kats
seed_kat, fit_score, play, avg_fitness, seed_kats = one_sim(seed_kats, grid, mut, (i-1),out_file, multi_cat=True)
if fit_score < top_kats[-1]:
seed_kats = temp_top
top_kats.append(top_kats[-1])
else:
top_kats.append(fit_score)
avg_kats.append(avg_fitness)
playback(vis, play,copy.deepcopy(seed_kats),i, specie+1, t_name)
print "\n############### END: Species:",specie+1," OF ", NUM_OF_SPECIES ," | Gen:",i, "#######################\n"
#file output
out_file.write("######### END: Species:")
out_file.write(str(specie+1))
out_file.write(" OF ")
out_file.write(str(NUM_OF_SPECIES))
out_file.write(" | Gen:")
out_file.write(str(i))
out_file.write("###########\n")
return copy.deepcopy(list(top_kats))
|
mit
| 4,120,856,940,087,549,000
| 37.601093
| 127
| 0.587061
| false
| 3.09553
| false
| false
| false
|
chapware/aircrack
|
scripts/airdrop-ng/install.py
|
1
|
3800
|
#!/usr/bin/env python
__version__ = "1.13.2010.21:00"
__author__ = "Bryan Chapman <bryanwchapman@gmail.com>"
'''
This is the installer file for airdrop-ng. It first checks for
different dependancies, such as make, svn, etc.
'''
import os, sys
from shutil import rmtree
if os.geteuid() != 0:
print "Installer must be root to run. \nPlease 'su' or 'sudo -i' and try again. \nExiting..."
sys.exit(1)
class checkDepend:
def __init__ (self):
clear = "\n" *100
print clear
print "Checking for dependancies used by the installer..."
self.a = 0
self.deps = ["make", "svn", "tar", "gcc"]
for depends in self.deps:
if (os.path.isfile("/usr/bin/" + depends) or os.path.isfile("/usr/sbin/" + depends) or os.path.isfile("/usr/local/bin/" + depends) or os.path.isfile("/usr/local/sbin/" + depends) or os.path.isfile ("/bin/" + depends) ) == True:
pass
else:
self.a = 1
print depends + " not installed."
if self.a == 0:
print "All dependancies installed! Continuing...\n"
print "#### NOTE: For Ubuntu based distro's, \npython2.6-dev must be installed. Please \nmake sure it is installed before continuing!\n"
else:
print "Please install dependancies. Exiting...\n\n"
exit()
class installAirdrop:
def __init__(self):
print "Welcome to the airdrop-ng installer!\nYou will be prompted for installing\nAirdrop-ng, lorcon, and pylorcon.\n"
yno = raw_input ("Continue with installer? (y/n): ")
if yno == "y":
pass
else:
print "Fine, be that way. Exiting..."
exit()
yno = raw_input ("Install airdrop-ng? (y/n): ")
if yno == "y":
self.install()
else:
print "airdrop-ng not installed. Continuing..."
pass
def install(self):
print "Build exist? "
if os.path.isdir("build"):
rmtree("build") # imported from shutil, or shutil.rmtree()
print "File exists. Cleaning it..."
os.mkdir ("build")
else:
os.mkdir ("build")
print "Didn't exist. Creating..."
# moves everything to build/. This is to keep everything clean,
# and not clutter up the directory.
os.system ("cp airdrop-ng.py build/ && cp -r lib build/ && cp docs/airdrop-ng.1 build/")
print "Files copied. Now, moving to directory..."
os.chdir ("build")
if os.path.isdir("/usr/lib/airdrop-ng") == True:
rmtree ("/usr/lib/airdrop-ng")
print "Moving airdrop-ng to /usr/bin, lib to \n/usr/lib/airdrop-ng, and installing man pages..."
os.system ("cp airdrop-ng.py /usr/bin/airdrop-ng && cp -r lib /usr/lib/airdrop-ng && cp airdrop-ng.1 /usr/share/man/man1/")
#os.chdir ("..")
print "airdrop-ng installed! =)"
class installLorcon:
def __init__(self):
yno = raw_input ("Would you like to install lorcon? (y/n): ")
if yno == "y":
print "Running svn co http://802.11ninja.net/svn/lorcon/branch/lorcon-old. This may take a while..."
os.system ("svn co http://802.11ninja.net/svn/lorcon/branch/lorcon-old")
os.chdir("lorcon-old")
os.system ("./configure && make && make install")
print "Creating symlinks..."
os.system ("ln -s /usr/local/lib/liborcon-1.0.0.so /usr/lib")
os.chdir("..")
else:
print "Lorcon wasn't installed. "
class installPylorcon:
def __init__(self):
yno = raw_input ("Would you like to install pylorcon? (y/n): ")
if yno == "y":
import urllib
urllib.urlretrieve("http://pylorcon.googlecode.com/files/pylorcon-3.tar.bz2", "pylorcon-3.tar.bz2")
os.system ("tar -xvf pylorcon-3.tar.bz2")
os.chdir ("pylorcon")
os.system ("python setup.py install")
os.chdir("..")
# What actually runs the classes
checkDepend()
installAirdrop()
installLorcon()
installPylorcon()
yno = raw_input ("Clean up? (y/n): ")
if yno == "y":
os.chdir("..")
if os.path.isdir("build") == True:
rmtree("build")
print "Operation(s) complete! May the source be with you. =) "
sys.exit()
|
gpl-2.0
| 3,112,824,450,673,922,600
| 30.147541
| 230
| 0.649211
| false
| 2.810651
| false
| false
| false
|
ppries/tensorflow
|
tensorflow/contrib/framework/python/ops/variables.py
|
1
|
25287
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.framework.python.ops import add_arg_scope as contrib_add_arg_scope
from tensorflow.contrib.framework.python.ops import gen_variable_ops
from tensorflow.contrib.util import loader
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
__all__ = ['add_model_variable',
'assert_global_step',
'assert_or_get_global_step',
'assign_from_checkpoint',
'assign_from_checkpoint_fn',
'assign_from_values',
'assign_from_values_fn',
'create_global_step',
'filter_variables',
'get_global_step',
'get_or_create_global_step',
'get_local_variables',
'get_model_variables',
'get_unique_variable',
'get_variables_by_name',
'get_variables_by_suffix',
'get_variables_to_restore',
'get_variables',
'local_variable',
'model_variable',
'variable',
'VariableDeviceChooser',
'zero_initializer']
def zero_initializer(ref, use_locking=True, name="zero_initializer"):
"""Initialize 'ref' with all zeros, ref tensor should be uninitialized.
If already initialized, you will get ValueError. This op is intended to
save memory during initialization.
Args:
ref: ref of the tensor need to be zero initialized.
name: optional name for this operation.
Returns:
ref that initialized.
Raises:
ValueError: If ref tensor is initialized.
"""
loader.load_op_library(
resource_loader.get_path_to_datafile("_variable_ops.so"))
return gen_variable_ops.zero_initializer(ref, name=name)
def assert_global_step(global_step_tensor):
training_util.assert_global_step(global_step_tensor)
def assert_or_get_global_step(graph=None, global_step_tensor=None):
"""Verifies that a global step tensor is valid or gets one if None is given.
If `global_step_tensor` is not None, check that it is a valid global step
tensor (using `assert_global_step`). Otherwise find a global step tensor using
`get_global_step` and return it.
Args:
graph: The graph to find the global step tensor for.
global_step_tensor: The tensor to check for suitability as a global step.
If None is given (the default), find a global step tensor.
Returns:
A tensor suitable as a global step, or `None` if none was provided and none
was found.
"""
if global_step_tensor is None:
# Get the global step tensor the same way the supervisor would.
global_step_tensor = get_global_step(graph)
else:
assert_global_step(global_step_tensor)
return global_step_tensor
def get_global_step(graph=None):
return training_util.get_global_step(graph)
def create_global_step(graph=None):
"""Create global step tensor in graph.
Args:
graph: The graph in which to create the global step. If missing, use default
graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step key is already defined.
"""
graph = ops.get_default_graph() if graph is None else graph
if get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
collections = [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]
return variable(ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64,
initializer=init_ops.zeros_initializer, trainable=False,
collections=collections)
def get_or_create_global_step(graph=None):
"""Returns and create (if necessary) the global step variable.
Args:
graph: The graph in which to create the global step. If missing, use default
graph.
Returns:
the tensor representing the global step variable.
"""
graph = ops.get_default_graph() if graph is None else graph
globalstep = get_global_step(graph)
if globalstep is None:
globalstep = create_global_step(graph)
return globalstep
def local_variable(initial_value, validate_shape=True, name=None):
"""Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
Returns:
New variable.
"""
return variables.Variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape, name=name)
@contrib_add_arg_scope
def variable(name, shape=None, dtype=None, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None,
partitioner=None, custom_getter=None):
"""Gets an existing variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
If None it would default to `tf.GraphKeys.GLOBAL_VARIABLES`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
Returns:
The created or existing variable.
"""
collections = list(collections or [ops.GraphKeys.GLOBAL_VARIABLES])
# Remove duplicates
collections = set(collections)
getter = variable_scope.get_variable
if custom_getter is not None:
getter = custom_getter
with ops.device(device or ''):
return getter(name, shape=shape, dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner)
@contrib_add_arg_scope
def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None, partitioner=None,
custom_getter=None):
"""Gets an existing model variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the
`GraphKeys.GLOBAL_VARIABLES` and `GraphKeys.MODEL_VARIABLES` collections.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
var = variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections,
caching_device=caching_device, device=device,
partitioner=partitioner, custom_getter=custom_getter)
return var
def add_model_variable(var):
"""Adds a variable to the `GraphKeys.MODEL_VARIABLES` collection.
Args:
var: a variable.
"""
if var not in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES):
ops.add_to_collection(ops.GraphKeys.MODEL_VARIABLES, var)
def get_variables(scope=None, suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return. Can be a
variable scope or a string.
suffix: an optional suffix for filtering the variables to return.
collection: in which collection search for. Defaults to
`GraphKeys.GLOBAL_VARIABLES`.
Returns:
a list of variables in collection with scope and suffix.
"""
if isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
if ':' not in suffix:
suffix += ':'
scope = (scope or '') + '.*' + suffix
return ops.get_collection(collection, scope)
def get_model_variables(scope=None, suffix=None):
"""Gets the list of model variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.MODEL_VARIABLES)
def get_local_variables(scope=None, suffix=None):
"""Gets the list of local variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.LOCAL_VARIABLES)
def get_variables_to_restore(include=None, exclude=None):
"""Gets the list of the variables to restore.
Args:
include: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to include. None would include all
the variables.
exclude: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to exclude. None it would not
exclude any.
Returns:
a list of variables to restore.
Raises:
TypeError: include or exclude is provided but is not a list or a tuple.
"""
if include is None:
# Include all variables.
vars_to_include = get_variables()
else:
if not isinstance(include, (list, tuple)):
raise TypeError('include is provided but is not a list or a tuple.')
vars_to_include = []
for scope in include:
vars_to_include += get_variables(scope)
vars_to_exclude = set()
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
raise TypeError('exclude is provided but is not a list or a tuple.')
for scope in exclude:
vars_to_exclude |= set(get_variables(scope))
# Exclude the variables in vars_to_exclude
return [v for v in vars_to_include if v not in vars_to_exclude]
def get_variables_by_suffix(suffix, scope=None):
"""Gets the list of variables that end with the given suffix.
Args:
suffix: suffix for filtering the variables to return.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=suffix)
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without any scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and scope.
"""
suffix = '/' + given_name + ':|^' + given_name + ':'
return get_variables(scope=scope, suffix=suffix)
def get_unique_variable(var_op_name):
"""Gets the variable uniquely identified by that var_op_name.
Args:
var_op_name: the full name of the variable op, including the scope.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = get_variables(scope=var_op_name)
if not candidates:
raise ValueError('Couldnt find variable %s' % var_op_name)
for candidate in candidates:
if candidate.op.name == var_op_name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable',
var_op_name)
def assign_from_values(var_names_to_values):
"""Creates an assignment operation from a given mapping.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
assign_op: An `Operation` that assigns each of the given variables to the
requested values.
feed_dict: The feed dictionary to use when evaluating `assign_op`.
Raises:
ValueError: if any of the given variable names were not found.
"""
feed_dict = {}
assign_ops = []
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)
if not var:
raise ValueError('Variable %s wasnt found', var_name)
elif len(var) > 1:
# tf.get_collection is just a filter on the prefix: find the exact match:
found = False
for v in var:
if v.op.name == var_name:
var = v
found = True
break
if not found:
raise ValueError('Variable %s doesnt uniquely identify a variable',
var_name)
else:
var = var[0]
# TODO(nsilberman): ensure placeholder and assign are on the same device.
# Assign a placeholder to the value that will be filled later.
placeholder_name = 'placeholder/' + var.op.name
placeholder_value = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name=placeholder_name)
assign_ops.append(var.assign(placeholder_value))
feed_dict[placeholder_value] = var_value.reshape(var.get_shape())
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
def assign_from_values_fn(var_names_to_values):
"""Returns a function that assigns specific variables from the given values.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: if any of the given variable names were not found.
"""
assign_op, feed_dict = assign_from_values(var_names_to_values)
def callback(session):
return session.run(assign_op, feed_dict)
return callback
# TODO(nsilberman): add flag to load exponential moving averages instead
def assign_from_checkpoint(model_path, var_list):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the corresponding variables to initialize. If empty or
None, it would return no_op(), None.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If the checkpoint specified at `model_path` is missing one of
the variables in `var_list`.
"""
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
if isinstance(var_list, (tuple, list)):
var_list = {var.op.name: var for var in var_list}
feed_dict = {}
assign_ops = []
for checkpoint_var_name in var_list:
var = var_list[checkpoint_var_name]
if not reader.has_tensor(checkpoint_var_name):
raise ValueError(
'Checkpoint is missing variable [%s]' % checkpoint_var_name)
var_value = reader.get_tensor(checkpoint_var_name)
placeholder_name = 'placeholder/' + var.op.name
placeholder_value = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name=placeholder_name)
assign_ops.append(var.assign(placeholder_value))
if var.get_shape() != var_value.shape:
raise ValueError(
'Total size of new array must be unchanged for %s '
'lh_shape: [%s], rh_shape: [%s]'
% (checkpoint_var_name, str(var_value.shape), str(var.get_shape())))
feed_dict[placeholder_value] = var_value.reshape(var.get_shape())
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
def assign_from_checkpoint_fn(model_path, var_list, ignore_missing_vars=False,
reshape_variables=False):
"""Returns a function that assigns specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the correspoing variables to initialize. If empty or None,
it would return no_op(), None.
ignore_missing_vars: Boolean, if True it would ignore variables missing in
the checkpoint with a warning instead of failing.
reshape_variables: Boolean, if True it would automatically reshape variables
which are of different shape then the ones stored in the checkpoint but
which have the same number of elements.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: If the checkpoint specified at `model_path` is missing one of
the variables in `var_list`.
"""
if ignore_missing_vars:
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
if isinstance(var_list, dict):
var_dict = var_list
else:
var_dict = {var.op.name: var for var in var_list}
available_vars = {}
for var in var_dict:
if reader.has_tensor(var):
available_vars[var] = var_dict[var]
else:
logging.warning(
'Variable %s missing in checkpoint %s', var, model_path)
var_list = available_vars
saver = tf_saver.Saver(var_list, reshape=reshape_variables)
def callback(session):
saver.restore(session, model_path)
return callback
class VariableDeviceChooser(object):
"""Device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU or CPU placement.
"""
def __init__(self,
num_tasks=0,
job_name='ps',
device_type='CPU',
device_index=0):
"""Initialize VariableDeviceChooser.
Usage:
To use with 2 parameter servers:
VariableDeviceChooser(2)
To use without parameter servers:
VariableDeviceChooser()
VariableDeviceChooser(device_type='GPU') # For GPU placement
Args:
num_tasks: number of tasks.
job_name: String, a name for the parameter server job.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self._job_name = job_name
self._device_type = device_type
self._device_index = device_index
self._num_tasks = num_tasks
self._next_task_id = 0
def __call__(self, op):
device_spec = tf_device.DeviceSpec(device_type=self._device_type,
device_index=self._device_index)
if self._num_tasks > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_tasks
device_spec.job = self._job_name
device_spec.task = task_id
return device_spec.to_string()
def filter_variables(var_list, include_patterns=None, exclude_patterns=None,
reg_search=True):
"""Filter a list of variables using regular expressions.
First includes variables according to the list of include_patterns.
Afterwards, eliminates variables according to the list of exclude_patterns.
For example, one can obtain a list of variables with the weights of all
convolutional layers (depending on the network definition) by:
```python
variables = tf.contrib.framework.get_model_variables()
conv_weight_variables = tf.contrib.framework.filter_variables(
variables,
include_patterns=['Conv'],
exclude_patterns=['biases', 'Logits'])
```
Args:
var_list: list of variables.
include_patterns: list of regular expressions to include. Defaults to None,
which means all variables are selected according to the include rules.
A variable is included if it matches any of the include_patterns.
exclude_patterns: list of regular expressions to exclude. Defaults to None,
which means all variables are selected according to the exclude rules.
A variable is excluded if it matches any of the exclude_patterns.
reg_search: boolean. If True (default), performs re.search to find matches
(i.e. pattern can match any substring of the variable name). If False,
performs re.match (i.e. regexp should match from the beginning of the
variable name).
Returns:
filtered list of variables.
"""
if reg_search:
reg_exp_func = re.search
else:
reg_exp_func = re.match
# First include variables.
if include_patterns is None:
included_variables = list(var_list)
else:
included_variables = []
for var in var_list:
if any(reg_exp_func(ptrn, var.name) for ptrn in include_patterns):
included_variables.append(var)
# Afterwards, exclude variables.
if exclude_patterns is None:
filtered_variables = included_variables
else:
filtered_variables = []
for var in included_variables:
if not any(reg_exp_func(ptrn, var.name) for ptrn in exclude_patterns):
filtered_variables.append(var)
return filtered_variables
|
apache-2.0
| -135,211,928,952,083,820
| 35.701016
| 90
| 0.693835
| false
| 4.16384
| false
| false
| false
|
Daniel-Brosnan-Blazquez/DIT-100
|
debugging/trajectory_planning_profiles/trapezoidal-profile.py
|
1
|
7690
|
import numpy
import time
from matplotlib import pyplot
def main (params):
angle = params['p0']
vel = params['v0']
sign = params['sign']
# Plan the trajectory if it is not planned
T = 0
Ta = 0
Td = 0
dt = params['dt']
if not params['trajectory']:
# Maximum acceleration and velocity values in degrees/s^2 and
# degrees/s respectively
amax = params['acc_limit_d']*sign*(-1)
vmax = params['vel_limit']*sign*(-1)
v0 = vel
h = angle
vlim = vmax
# Check if the trajectory is feasible
print "abs (amax*h) >= v0**2/2.0 = %s" % (abs (amax*h) >= v0**2/2.0)
if abs (amax*h) >= v0**2/2.0:
# The trajectory is feasible
# Check if the maximum value of velocity can be reached
if abs (h*amax) > vmax**2 - v0**2/2.0:
# The maximum value of velocity can be reached
Ta = (vmax - v0)/amax
Td = vmax/amax
term1 = abs (h/vmax)
term2 = (vmax/(2*amax)) * (1 - (v0/vmax))**2
term3 = (vmax/(2*amax))
T = term1 + term2 + term3
else:
# The maximum value of velocity can't be reached
vlim = ((abs (h * amax) + v0**2/2.0)**(1/2.0))*sign*(-1)
Ta = abs ((vlim - v0)/amax)
Td = abs (vlim/amax)
T = Ta + Td
# end if
# The time has to be positive
Ta = abs (Ta)
Td = abs (Td)
T = abs (T)
print "Ta = %s, Td = %s" % (Ta, Td)
params['trajectory'] = True
params['T'] = T
params['Ta'] = Ta
params['Td'] = Td
params['T_sign'] = sign*(-1)
params['vv'] = vlim
# if Ta > dt and Td > dt:
# params['trajectory'] = True
# params['T'] = T
# params['Ta'] = Ta
# params['Td'] = Td
# params['T_sign'] = sign*(-1)
# params['vv'] = vlim
# else:
# Ta = 0
# Td = 0
# T = 0
# end if
# end if
return
def plot (params):
t = 0
interval = params['dt']
# Sign
sign = params['T_sign']
# Maximum values
amax = params['acc_limit_d']*sign
vmax = params['vel_limit']*sign
# Buffers to store the motion
positions = []
vels = []
accs = []
# Initial values of the motion
v0 = params['v0']
p0 = params['p0']
vv = params['vv']
T = params['T']
Ta = params['Ta']
Td = params['Td']
# Acceleration phase
while t < Ta:
# Position
pos = p0 + v0*t + ((vv - v0)/(2*Ta))*t**2
positions.append (pos)
# Velocity
vel = v0 + ((vv - v0)/(Ta))*t
vels.append (vel)
# Acceleration
acc = (vv - v0)/Ta
accs.append (acc)
t += interval
# end while
# Constant velocity phase
while t < (T - Td):
# Position
pos = p0 + v0*(Ta/2.0) + vv*(t-(Ta/2.0))
positions.append (pos)
# Velocity
vel = vv
vels.append (vel)
# Acceleration
acc = 0
accs.append (acc)
t += interval
# end while
# Deceleration phase
while t < T:
# Position
pos = 0 - (vv/(2*Td))*(T-t)**2
positions.append (pos)
# Velocity
vel = (vv/Td)*(T-t)
vels.append (vel)
# Acceleration
acc = -(vv/Td)
accs.append (acc)
t += interval
# end while
fig = pyplot.figure (1, figsize = (20,10))
s = fig.add_subplot (311)
p, = s.plot(positions)
s.grid (True)
s.set_title ("position")
s = fig.add_subplot (312)
p, = s.plot(vels)
s.grid (True)
s.set_title ("velocity")
s = fig.add_subplot (313)
p, = s.plot(accs)
s.grid (True)
s.set_title ("acceleration")
pyplot.show ()
pyplot.close (1)
return
if __name__ == "__main__":
params = {}
# Period
params['dt'] = 0.015
# Flag to indicate if it is necessary to compute the trajectory
# (not needed here)
params['trajectory'] = False
# Velocity, acceleration and jerk limits in degrees/s^2
params['vel_limit'] = 150.0
rad_to_degrees = 180.0/numpy.pi
radius = 0.3
# m/s^2
params['acc_limit'] = 7.5
# degrees/s^2
params['acc_limit_d'] = (params['acc_limit']*rad_to_degrees)/radius
# # p0 = 0. Checked, trajectory unfeasible
# # p0
# params['p0'] = 0.0
# # v0
# params['v0'] = 100.0
# p0 > 50 v0 = 0. Checked, trajectory feasible
# p0
params['p0'] = 80.0
# v0
params['v0'] = 0.0
# # p0 > 50 v0 < limit. Checked, trajectory feasible
# # p0
# params['p0'] = 80.0
# # v0
# params['v0'] = 50.0
# # p0 > 50 v0 = limit. Checked, trajectory feasible
# # p0
# params['p0'] = 80.0
# # v0
# params['v0'] = 100.0
# # p0 > 50 v0 > limit. Checked, trajectory feasible
# # p0
# params['p0'] = 80.0
# # v0
# params['v0'] = -150.0
# # p0 < 50 p0 > 0 v0 = 0. Checked, trajectory feasible
# # p0
# params['p0'] = 20.0
# # v0
# params['v0'] = 0.0
# # p0 < 50 p0 > 0 v0 < limit. REVIEW IT!!!!!!!!!
# # p0
# params['p0'] = 20.0
# # v0
# params['v0'] = 50.0
# # p0 < 50 p0 > 0 v0 = limit. Checked, trajectory feasible
# # p0
# params['p0'] = 20.0
# # v0
# params['v0'] = 100.0
# # p0 < 50 p0 > 0 v0 > limit. Checked, trajectory feasible
# # p0
# params['p0'] = 20.0
# # v0
# params['v0'] = 150.0
# # p0 < -50 v0 = 0. Checked, trajectory feasible
# # p0
# params['p0'] = -80.0
# # v0
# params['v0'] = 0.0
# # p0 < -50 v0 < limit. Checked, trajectory feasible
# # p0
# params['p0'] = -80.0
# # v0
# params['v0'] = 50.0
# # p0 < -50 v0 = limit. Checked, trajectory feasible
# # p0
# params['p0'] = -80.0
# # v0
# params['v0'] = 100.0
# # p0 < -50 v0 > limit. Checked, trajectory feasible
# # p0
# params['p0'] = -80.0
# # v0
# params['v0'] = 150.0
# # p0 > -50 p0 < 0 v0 = 0. Checked, trajectory feasible
# # p0
# params['p0'] = -20.0
# # v0
# params['v0'] = 0.0
# # p0 > -50 p0 < 0 v0 < limit. Checked, trajectory feasible
# # p0
# params['p0'] = -20.0
# # v0
# params['v0'] = -50.0
# # p0 > -50 p0 < 0 v0 = limit. Checked, trajectory feasible
# # p0
# params['p0'] = -20.0
# # v0
# params['v0'] = 100.0
# # p0 > -50 p0 < 0 v0 > limit. Checked, trajectory feasible
# # p0
# params['p0'] = -20.0
# # v0
# params['v0'] = 150.0
# # p0 > -50 p0 < 0 v0 > limit. Checked, trajectory feasible
# # p0
# params['p0'] = -20.0
# # v0
# params['v0'] = 200.0
# sign
params['sign'] = 1
# params['sign'] = -1
# # p0
# params['p0'] = 11.0962258945
# # params['p0'] = 22.0
# # v0
# params['v0'] = 71.19
# # params['v0'] = 0.0
main(params)
print "Trajectory performed: %s" % params['trajectory']
if params['trajectory']:
T = params['T']
Ta = params['Ta']
Td = params['Td']
print "T = %s, Ta = %s, Td = %s" %(T, Ta, Td)
plot (params)
|
gpl-3.0
| -1,022,844,989,781,801,000
| 23.258675
| 76
| 0.447854
| false
| 3.067411
| false
| false
| false
|
Oreder/PythonSelfStudy
|
Exe_18.py
|
1
|
8002
|
# Creating class
#
# +++ Syntax +++
# class ClassName:
# 'Optional class documentation string'
# class_suite
#
class Employee:
'common base class for all employees'
empCount = 0
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print("Total Employee:", empCount)
def display(self):
print("Name:", self.name, "with Salary:", self.salary)
# Here,
# The variable empCount is a class variable whose value is shared among all
# instances of a this class. This can be accessed as Employee.empCount from
# inside the class or outside the class.
# The first method __init__() is a special method, which is called class
# constructor or initialization method that Python calls when you create a new
# instance of this class.
# You declare other class methods like normal functions with the exception that
# the first argument to each method is self. Python adds the self argument to
# the list for you; you do not need to include it when you call the methods.
#
# Creating Instance Objects
"This would create first object of Employee class"
emp1 = Employee("Zara", 2000)
"This would create second object of Employee class"
emp2 = Employee("Manni", 5000)
# Accessing Attributes
emp1.display()
emp2.display()
print("Total Employee:", Employee.empCount)
# We can add, remove, or modify attributes of classes and objects at any time
emp1.age = 7 # Add an 'age' attribute
print(emp1.age)
emp1.age = 8 # Modify 'age' attribute
print(emp1.age)
del emp1.age # Delete 'age' attribute
# Instead of using the normal statements to access attributes, we can use the
# following functions:
#
# The getattr(obj, name[, default]) : to access the attribute of object.
# The hasattr(obj,name) : to check if an attribute exists or not.
# The setattr(obj,name,value) : to set an attribute.
# If attribute does not exist, then it would be created.
# The delattr(obj, name) : to delete an attribute.
print(hasattr(emp1, 'age')) # Returns true if 'age' attribute exists
print(setattr(emp1, 'age', 8)) # Set attribute 'age' at 8
print(getattr(emp1, 'age')) # Returns value of 'age' attribute
print(delattr(emp1, 'age')) # Delete attribute 'age'
# +++ Built-In Class Attributes
# Every Python class keeps following built-in attributes and they can be accessed using
# dot operator like any other attribute:
# __dict__: Dictionary containing the class's namespace.
# __doc__: Class documentation string or none, if undefined.
# __name__: Class name.
# __module__: Module name in which the class is defined. This attribute is
# "__main__" in interactive mode.
# __bases__: A possibly empty tuple containing the base classes, in the order
# of their occurrence in the base class list.
print("Employee.__doc__:", Employee.__doc__)
print("Employee.__name__:", Employee.__name__)
print("Employee.__module__:", Employee.__module__)
print("Employee.__bases__:", Employee.__bases__)
print("Employee.__dict__:", Employee.__dict__)
# +++ Destroying Objects (Garbage Collection)
# The __del__() destructor prints the class name of an instance that is about to be destroyed.
class Point:
def __init__(self, x = 0, y = 0):
self.x = x
self.y = y
def __del__(self):
class_name = self.__class__.__name__
print(class_name, "is destroyed!")
p1 = Point()
p2 = p1
p3 = p1
print("Id(P1):", id(p1))
print("Id(P2):", id(p2))
print("Id(P3):", id(p3))
del p1
del p2
del p3
# +++ Class Inheritance +++
# ---------------------------------------------------------
# Syntax
# class SubClassName (ParentClass1[, ParentClass2, ...]):
# 'Optional class documentation string'
# class_suite
# ---------------------------------------------------------
class Parent: # define parent class
parentAttr = 100
def __init__(self):
print("Calling parent constructor")
def parentMethod(self):
print('Calling parent method')
def setAttr(self, attr):
Parent.parentAttr = attr
def getAttr(self):
print("Parent attribute:", Parent.parentAttr)
class Child(Parent): # define child class
def __init__(self):
print("Calling child constructor")
def childMethod(self):
print('Calling child method')
c = Child() # instance of child
c.childMethod() # child calls its method
c.parentMethod() # calls parent's method
c.setAttr(200) # again call parent's method
c.getAttr() # again call parent's method
# Similar way, we can drive a class from multiple parent classes as follows:
# -----------------------------------------------
# class A: # define class A |
# ..... |
# class B: # define class B |
# ..... |
# class C(A, B): # subclass of A and B |
# ..... |
# -----------------------------------------------
# +++ Overriding Methods +++
class Parent:
def myMethod(self):
print("Calling parent method")
class Child(Parent):
def myMethod(self):
print("Calling child method")
c = Child()
c.myMethod()
# +++ Base Overloading Methods
# ===========================================================
# Sr. No. # Method, Description and Sample Call #
# ===========================================================
# # __init__ ( self [,args...] ) #
# 1 # Constructor (with any optional arguments) #
# # Sample Call : obj = className(args) #
# -----------------------------------------------------------
# # __del__( self ) #
# 2 # Destructor, deletes an object #
# # Sample Call : del obj #
# -----------------------------------------------------------
# # __repr__( self ) #
# 3 # Evaluatable string representation #
# # Sample Call : repr(obj) #
# -----------------------------------------------------------
# # __str__( self ) #
# 4 # Printable string representation #
# # Sample Call : str(obj) #
# -----------------------------------------------------------
# # __cmp__ ( self, x ) #
# 5 # Object comparison #
# # Sample Call : cmp(obj, x) #
# ===========================================================
# +++ Overloading Operators: using __add__ method
class Vector:
def __init__(self, a, b):
self.a = a
self.b = b
def __str__(self):
return 'Vector (%d, %d)' % (self.a, self.b)
def __add__(self, other):
return Vector(self.a + other.a, self.b + other.b)
v1 = Vector(2, 10)
v2 = Vector(5, -2)
print(v1 + v2)
# Data Hiding
class JustCounter:
__secretCount = 0
def count(self):
self.__secretCount += 1
print(self.__secretCount)
counter = JustCounter()
counter.count()
counter.count()
print(counter.__secretCount) # Error!
# When the above code is executed, it produces the following result:
# 1
# 2
# Traceback (most recent call last):
# File "Exe_18.py", line 225, in <module>
# print counter.__secretCount
# AttributeError: JustCounter instance has no attribute '__secretCount'
#
# Python protects those members by internally changing the name to include the class
# name. We can access such attributes as
# object._className__attrName
# If we would replace our last line as following, then it works for us:
print(counter._JustCounter__secretCount) # Worked!
|
mit
| 4,672,089,774,815,598,000
| 32.241667
| 94
| 0.543369
| false
| 3.916544
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.