repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
codemeow5/vehiclenet-python
|
web.py
|
Python
|
gpl-2.0
| 2,732
| 0.027086
|
#!/usr/bin/python
import sys, os
import tornado.ioloop
import tornado.web
import logging
import logging.handlers
import re
from urllib import unquote
import config
from vehiclenet import *
reload(sys)
sys.setdefaultencoding('utf8')
def deamon(chdir = False):
try:
if os.fork() > 0:
os._exit(0)
except OSError, e:
print 'fork #1 failed: %d (%s)' % (e.errno, e.strerror)
os._exit(1)
def init():
WeatherHandler.cache()
class DefaultHandler(tornado.web.RequestHandler):
def get(self):
self.write('VehicleNet Say Hello!')
class LogHandler(tornado.web.RequestHandler):
def get(self):
log_filename = 'logs/logging'
if not os.path.exists(log_filename):
self.write('The log file is empty.')
return
log_file = None
log_file_lines = None
try:
log_file = open(log_filename, 'r')
if log_file is None:
raise Exception('log_file is None')
log_file_lines = log_file.readlines()
if log_file_lines is None:
raise Exception('log_file_lines is None')
except Exception, e:
logger = logging.getLogger('web')
logger.error('Failed to read the log file (logs/logging), error: %s' % e)
finally:
if log_file is not None:
log_file.close()
if log_file_lines is None:
self.write('Failed to read the log file.')
line_limit = 500
for _ in log_file_lines[::-1]:
line_limit -= 1
if line_limit > 0:
self.write(unquote(_) + '<BR/>')
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
}
routes = [
(r"/", DefaultHandler),
(r"/carlink/weather/findWeather.htm", WeatherHandler),
(r"/carlink/music/findMusic.htm", MusicSearchHandler)
|
,
(r"/carlink//music/findMusic.htm", MusicSearchHandler),
(r"/carlink/music/findMusicTop.htm", MusicTopHandler),
(r"/ca
|
rlink/music/findMusicLrc.htm", LrcSearchHandler),
(r"/carlink/news/findNews.htm", NewsHandler),
]
if config.Mode == 'DEBUG':
routes.append((r"/log", LogHandler))
application = tornado.web.Application(routes, **settings)
if __name__ == "__main__":
if '-d' in sys.argv:
deamon()
logdir = 'logs'
if not os.path.exists(logdir):
os.makedirs(logdir)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler = logging.handlers.TimedRotatingFileHandler(
'%s/logging' % logdir, 'M', 20, 360)
handler.suffix = '%Y%m%d%H%M%S.log'
handler.extMatch = re.compile(r'^\d{4}\d{2}\d{2}\d{2}\d{2}\d{2}')
handler.setFormatter(formatter)
logger = logging.getLogger('web')
logger.addHandler(handler)
if config.Mode == 'DEBUG':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.ERROR)
init()
application.listen(80)
print 'Server is running, listening on port 80....'
tornado.ioloop.IOLoop.instance().start()
|
yanjinbin/learnPython
|
chapter_7/chapter7.py
|
Python
|
gpl-3.0
| 3,243
| 0.000699
|
"""映射 集合 ... 高级数据结构类型"""
from string import Template
val_dict = {1: 'a', 2: 'b', 3: 'c'}
print(val_dict)
print(val_dict.keys())
print(val_dict.items())
print(val_dict.values())
factory_dict = dict((['x', 1], ['y', 2]))
print(factory_dict)
ddcit = {}.fromkeys(('x', 'y', 'z'), -24)
ddcit.update(val_dict) # 新值覆盖旧值
print(ddcit)
print(ddcit.get("m", "no such key "))
print(ddcit.setdefault('x', "new value "))
print(type(ddcit.keys()))
for key in ddcit.keys():
s = Template("key is ${key} and value is ${value}")
# 不加 key 和 value 就出错了 为什么
print(s.substitute(key=key, value=ddcit[key]))
# has_key 方法取消了 参见 Python3 文档 https://docs.python.org/3.1/whatsnew/3.0.html#builtins
var_tuple = (1, 'acs')
var_list = [1, 2, 3]
strange_dict = {var_tuple: 11, 1: 'abcd'}
# 键成员关系操作
print(1 in strange_dict)
# strange_dict = {var_tuple: 11, 1: 'abcd', var_list: 'acv'}
# 语法上没错误,但是 会包 unhashable type: 'list' 错误 所有基于 dict 的操作都会报错误
# 因为 check key 是否 hashable 的合法性
print(strange_dict[var_tuple])
# print(strange_dict[var_list])
# strange_dict.pop(var_list)
strange_dict.pop(var_tuple)
strange_dict.clear()
del strange_dict
val_dict1 = {1: 'a', '2': "v"}
val_dict2 = {1: 'v'}
# print(val_dict > val_dict2) Python3 不再支持了
print(dict([['x', 1], ['z', 2]]))
# fixed zip(函数) map(lambda 表达式 等价于 zip
print(type(hash((1, 2, 3))))
print(hash((1, 2, 'a')))
# print(hash(([1, 23, 34], 'a')))
# 集合保证元素不重复 ,真正意义上的数学集合(元素不重复)
# 而不是编程意义上的集合
print("------set-----")
var_set = set('aasn223wuerhe')
print(type(var_
|
set))
print(var_set)
print("frozensetr ")
var_frozen_set = frozenset('aaddk2u9m3pq40aiwoe27na')
print(var_frozen_set)
print('a' in var_set)
print('2' in var_frozen_set) # True 数字被当做字符处理
print(2 in var_frozen_set) # False
# 可变集合 的 CRUD
var_set.update("anddipwq")
print(var_set)
var_se
|
t.discard("n")
print(var_set)
var_set.remove("a")
print(var_set)
var_set.pop()
print(var_set)
var_set.clear()
print(var_set)
var_set.add("$")
print(var_set)
var_set1 = set('rtyufghvb')
print(var_set1)
var_set2 = set('qwertyuiop')
print(var_set2)
var_set3 = set('qwertyuiop')
print(var_set3)
var_set4 = var_set1
print(var_set4)
var_set5 = set('qwert')
print(var_set5)
# 数学意义上的集合操作
print(var_set1 == var_set2)
print(var_set1 != var_set2)
print(var_set5 < var_set3)
print(var_set5.issubset(var_set3))
print(var_set1 <= var_set4)
print(var_set1.issuperset(var_set4))
print(var_set1 ^ var_set2) # A B 公共集合的剩余部分 A△B
print(var_set1.symmetric_difference(var_set2))
print(var_set1.union(var_set5))
print(var_set1 | var_set5)
print(var_set5 & var_set3)
print(var_set5.intersection(var_set3))
print(var_set3 - var_set5)
print(var_set3.difference(var_set5))
# 混合集合类型操作 根据左边操作数 确定集合是不是可变
immutable_set = frozenset("ansaskwke")
mutable_set = set("24m9sjwe")
immutable_set_1 = immutable_set | mutable_set
print(type(immutable_set_1))
# print(1 | 2) python3 居然支持了 我擦啊
|
welch/seasonal
|
tests/adjust_seasons_test.py
|
Python
|
mit
| 1,815
| 0.007163
|
# test seasonal.adjust_seasons() options handling
#
# adjust_seasons() handles a variety of optional arguments.
# verify that adjust_trend() correctly calledfor different option combinations.
#
# No noise in this test set.
#
from __future__ import division
import numpy as np
from seasonal import fit_trend, adjust_seasons # pylint:disable=import-error
from seasonal.sequences import sine # pylint:disable=import-error
PERIOD = 25
CYCLES = 4
AMP = 1.0
TREND = AMP / PERIOD
LEVEL = 1000.0
SEASONS = sine(AMP, PERIOD, 1)
DATA = LEVEL + np.arange(PERIOD * CYCLES) * TREND + np.tile(SEASONS, CYCLES)
ZEROS = np.zeros(PERIOD * CYCLES)
def iszero(a):
return np.all(np.isclose(a, ZEROS))
def isseasons(a):
return np.all(np.isclose(a, SEASONS))
def test_auto():
adjusted = adjust_seasons(DATA)
assert adjusted.std() < DATA.std()
def test_trend_line():
adjusted = adjust_seaso
|
ns(DATA, trend="line")
assert adjusted.std() < DATA.std()
def test_explicit_trend():
trend = fit_trend(DATA, kind="line")
adjusted = adjust_seasons(DATA, trend=trend)
assert adjusted.std() < DATA.std()
def test_trend_period():
adjusted = adjust_seasons(DATA, trend="line", period=PERIOD)
assert adjusted.std() < DATA.std()
def
|
test_trend_seasons():
adjusted = adjust_seasons(DATA, trend="line", seasons=SEASONS)
assert adjusted.std() < DATA.std()
def test_trend_spline():
adjusted = adjust_seasons(DATA, trend="spline")
assert adjusted.std() < DATA.std()
def test_period():
adjusted = adjust_seasons(DATA, period=PERIOD)
assert adjusted.std() < DATA.std()
adjusted = adjust_seasons(DATA, period=PERIOD // 2) # no seasonality
assert adjusted is None
def test_seasons():
adjusted = adjust_seasons(DATA, seasons=SEASONS)
assert adjusted.std() < DATA.std()
|
foobarbazblarg/stayclean
|
stayclean-2020-july/serve-challenge-with-flask.py
|
Python
|
mit
| 12,690
| 0.003546
|
#!/usr/bin/env python
import subprocess
import praw
import datetime
import pyperclip
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
challengePageSubmissionId = 'hjbcvx'
flaskport = 8936
thisMonthName = "July"
nextMonthName = "August"
readAllCommentsWhichCanBeSlower = False
sorryTooLateToSignUpReplyText = "Sorry, but the late signup grace period for " + thisMonthName + " is over, so you can't officially join this challenge. But feel free to follow along anyway, and comment all you want. And be sure to join us for the " + nextMonthName + " challenge. Signup posts for " + nextMonthName + " will begin during the last week of " + thisMonthName + "."
reinstatedReplyText = "OK, I've reinstated you. You should start showing up on the list again starting tomorrow."
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
submission = None
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subredd
|
it('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True
|
, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
# submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
submission = redditSession.submission(id=challengePageSubmissionId)
if readAllCommentsWhichCanBeSlower:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submission
def getCommentsForSubmission(submission):
# return [comment for comment in praw.helpers.flatten_tree(submission.comments) if comment.__class__ == praw.models.Comment]
commentForest = submission.comments
# commentForest.replace_more(limit=None, threshold=0)
return [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatechallenge.html')
def moderatechallenge():
currentDayOfMonthIndex = datetime.date.today().day
lateCheckinGracePeriodIsInEffect = currentDayOfMonthIndex <= 3
global commentHashesAndComments
global submission
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submission = getSubmissionForRedditSession(redditSession)
flat_comments = getCommentsForSubmission(submission)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
stringio.write(submission.title)
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplaytoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Copy display.py stdout to clipboard">')
stringio.write('<input type="submit" name="actiontotake" value="Automatically post display.py stdout">')
stringio.write('</form>')
stringio.write('<form action="updategooglechart.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="update-google-chart.py">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
if participant.isStillIn:
stringio.write(' <small><font color="green">(still in)</font></small>')
else:
stringio.write(' <small><font color="red">(out)</font></small>')
if participant.hasCheckedIn:
stringio.write(' <small><font color="green">(checked in)</font></small>')
else:
stringio.write(' <small><font color="orange">(not checked in)</font></small>')
if participant.hasRelapsed:
stringio.write(' <small><font color="red">(relapsed)</font></small>')
else:
stringio.write(' <small><font color="green">(not relapsed)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
if lateCheckinGracePeriodIsInEffect:
stringio.write('<input type="submit" name="actiontotake" value="Checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin" style="color:white;background-color:green">')
else:
stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Relapse" style="color:white;background-color:red">')
stringio.write('<input type="submit" name="actiontotake" value="Reinstate with automatic comment">')
stringio.write('<input type="submit" name="actiontotake" value="Reply with sorry-too-late comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="bodyencodedformlcorpus" value="' + b64encode(comment.body.encode('utf-8')) + '">')
stringio.write('<input type="hidden" n
|
lutianming/leetcode
|
reverse_bits.py
|
Python
|
mit
| 321
| 0.003115
|
class Solution:
# @param n,
|
an integer
# @return an integer
def reverseBits(self, n):
reverse = 0
r = n
for i in range(32):
bit = r % 2
reverse += bit << (32-i-1)
r = r / 2
return reverse
s = So
|
lution()
r = s.reverseBits(43261596)
print(r)
|
shearern/rsync-usb
|
src/rsync_usb_tests/ChunkLocationTests.py
|
Python
|
gpl-2.0
| 4,596
| 0.001958
|
import unittest
from rsync_usb.ChunkLocation import ChunkLocation
class ChunkLocationTests(unittest.TestCase):
'''Test TargetHashesWriter and TargetHashesReader'''
def testProperties(self):
pos = ChunkLocation('dummy', 100, 10)
self.assertEqual(pos.path, 'dummy')
self.assertEqual(pos.start_pos, 100)
self.assertEqual(pos.data_len, 10)
def testEndPos(self):
pos = ChunkLocation('dummy', 100, 10)
self.assertEqual(pos.start_pos + pos.data_len - 1, pos.end_pos)
self.assertEqual(pos.end_pos, 109)
def testEqual(self):
pos_a = ChunkLocation('dummy', 100, 10)
pos_b = ChunkLocation('dummy', 100, 10)
self.assertEqual(pos_a, pos_b)
# -- Overlaping chunk tests -----------------------------------------------
def assertOverlaping(self, pos_a, pos_b):
msg = "%s should overlap %s but did not"
self.assertTrue(pos_a.overlaps(pos_b), msg % (str(pos_a), str(pos_b)))
self.assertTrue(pos_b.overlaps(pos_a), msg % (str(pos_b), str(pos_a)))
def assertNotOverlaping(self, pos_a, pos_b):
msg = "%s should not overlap %s but does"
self.assertFalse(pos_a.overlaps(pos_b), msg % (str(pos_a), str(pos_b)))
self.assertFalse(pos_b.overlaps(pos_a), msg % (str(pos_b), str(pos_a)))
def testNoOverlapBefore(self):
pos_a = ChunkLocation('dummy', 10, 10)
pos_b = ChunkLocation('dummy', 100, 10)
self.assertNotOverlaping(pos_a, pos_b)
def testNoOverlapAfter(self):
pos_a = ChunkLocation('dummy', 1000, 10)
pos_b = ChunkLocation('dummy', 100, 10)
self.assertNotOverlaping(pos_a, pos_b)
def testNoOverlapDifferentPaths(self):
pos_a = ChunkLocation('dummy_a', 100, 10)
pos_b = ChunkLocation('dummy_b', 100, 10)
self.assertNotOverlaping(pos_a, pos_b)
def testOverlapEqual(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=======|-------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapStartsBefore(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ----|=======|---------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 4, 9)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapStartsBeforeAndEqual(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ----|=========|-------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 4, 11)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapInside(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=======|-------
# B: -----|=========|------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkLocation('dummy', 5, 11)
self.assertOverlaping(pos_a, pos_b)
def testOverlapInsideSameStart(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=======|-------
# B: ------|========|------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkL
|
ocation('dummy', 6, 10)
self.assertOverlaping(pos_a, pos_b)
def testOverlapInsideSameEnd(self):
# 0000000000111111111112
# 012345
|
6789001234567890
# A: ------|=======|-------
# B: -----|========|-------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkLocation('dummy', 5, 10)
self.assertOverlaping(pos_a, pos_b)
def testOverlapEndsAfter(self):
# 0000000000111111111112
# 0123456789001234567890
# A: -------|=======|------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 7, 9)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapEndsAfterAndEqual(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=========|-----
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 6, 11)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
|
wenottingham/ansible
|
lib/ansible/plugins/lookup/subelements.py
|
Python
|
gpl-3.0
| 4,311
| 0.002784
|
# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.six import string_types
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.boolean import boolean
FLAGS = ('skip_missing',)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
def _raise_terms_error(msg=""):
raise AnsibleError(
"subelements lookup expects a list of two or three items, "
+ msg)
terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar, loader=self._loader)
# check lookup terms - ch
|
eck number of terms
if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
_raise_terms_error()
|
# first term should be a list (or dict), second a string holding the subkey
if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], string_types):
_raise_terms_error("first a dict or a list, second a string pointing to the subkey")
subelements = terms[1].split(".")
if isinstance(terms[0], dict): # convert to list:
if terms[0].get('skipped', False) is not False:
# the registered result was completely skipped
return []
elementlist = []
for key in terms[0].iterkeys():
elementlist.append(terms[0][key])
else:
elementlist = terms[0]
# check for optional flags in third term
flags = {}
if len(terms) == 3:
flags = terms[2]
if not isinstance(flags, dict) and not all([isinstance(key, string_types) and key in FLAGS for key in flags]):
_raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
# build_items
ret = []
for item0 in elementlist:
if not isinstance(item0, dict):
raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
if item0.get('skipped', False) is not False:
# this particular item is to be skipped
continue
skip_missing = boolean(flags.get('skip_missing', False))
subvalue = item0
lastsubkey = False
sublist = []
for subkey in subelements:
if subkey == subelements[-1]:
lastsubkey = True
if not subkey in subvalue:
if skip_missing:
continue
else:
raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
if not lastsubkey:
if not isinstance(subvalue[subkey], dict):
if skip_missing:
continue
else:
raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
else:
subvalue = subvalue[subkey]
else: # lastsubkey
if not isinstance(subvalue[subkey], list):
raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
else:
sublist = subvalue.pop(subkey, [])
for item1 in sublist:
ret.append((item0, item1))
return ret
|
ntt-pf-lab/backup_keystone
|
keystone/middleware/remoteauth.py
|
Python
|
apache-2.0
| 4,006
| 0.00025
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2010 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agre
|
ed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Auth Middleware that handles auth for a service
This module can be installed as a filter in front of your service to validate
that requests are coming from a trusted component that has handled
authenticating the call. If a call c
|
omes from an untrusted source, it will
redirect it back to be properly authenticated. This is done by sending our a
305 proxy redirect response with the URL for the auth service.
The auth service settings are specified in the INI file (keystone.ini). The ini
file is passed in as the WSGI config file when starting the service. For this
proof of concept, the ini file is in echo/echo/echo.ini.
In the current implementation use a basic auth password to verify that the
request is coming from a valid auth component or service
Refer to: http://wiki.openstack.org/openstack-authn
HEADERS
-------
HTTP_ is a standard http header
HTTP_X is an extended http header
> Coming in from initial call
HTTP_X_AUTH_TOKEN : the client token being passed in
HTTP_X_STORAGE_TOKEN: the client token being passed in (legacy Rackspace use)
to support cloud files
> Used for communication between components
www-authenticate : only used if this component is being used remotely
HTTP_AUTHORIZATION : basic auth password used to validate the connection
> What we add to the request for use by the OpenStack service
HTTP_X_AUTHORIZATION: the client identity being passed in
"""
from webob.exc import HTTPUseProxy, HTTPUnauthorized
class RemoteAuth(object):
# app is the downstream WSGI component, usually the OpenStack service
#
# if app is not provided, the assumption is this filter is being run
# from a separate server.
def __init__(self, app, conf):
# app is the next app in WSGI chain - eventually the OpenStack service
self.app = app
self.conf = conf
# where to redirect untrusted requests to
self.proxy_location = conf.get('proxy_location')
# secret that will tell us a request is coming from a trusted auth
# component
self.remote_auth_pass = conf.get('remote_auth_pass')
print 'Starting Remote Auth middleware'
def __call__(self, env, start_response):
# Validate the request is trusted
# Authenticate the Auth component itself.
headers = [('www-authenticate', 'Basic realm="API Auth"')]
if 'HTTP_AUTHORIZATION' not in env:
# Redirect to proxy (auth component) and show that basic auth is
# required
return HTTPUseProxy(location=self.proxy_location,
headers=headers)(env, start_response)
else:
auth_type, encoded_creds = env['HTTP_AUTHORIZATION'].split(None, 1)
if encoded_creds != self.remote_auth_pass:
return HTTPUnauthorized(headers=headers)(env, start_response)
# Make sure that the user has been authenticated by the Auth Service
if 'HTTP_X_AUTHORIZATION' not in env:
return HTTPUnauthorized()(env, start_response)
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return RemoteAuth(app, conf)
return auth_filter
|
audantic/smartystreets.py
|
smartystreets/client.py
|
Python
|
bsd-3-clause
| 6,663
| 0.002852
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Client module for connecting to and interacting with SmartyStreets API
"""
import json
import numbers
import requests
from .data import Address, AddressCollection
from .exceptions import SmartyStreetsError, ERROR_CODES
def validate_args(f):
"""
Ensures that *args consist of a consistent type
:param f: any client method with *args parameter
:return: function f
"""
def wrapper(self, args):
arg_types = set([type(arg) for arg in args])
if len(arg_types) > 1:
raise TypeError("Mixed input types are not allowed")
elif list(arg_types)[0] not in (dict, str):
raise TypeError("Only dict and str types accepted")
return f(self, args)
return wrapper
def truncate_args(f):
"""
Ensures that *args do not exceed a set limit or are truncated to meet that limit
:param f: any Client method with *args parameter
:return: function f
"""
def wrapper(self, args):
if len(args) > 100:
if self.truncate_addresses:
args = args[:100]
else:
raise ValueError("This exceeds 100 address at a time SmartyStreets limit")
return f(self, args)
return wrapper
def stringify(data):
"""
Ensure all values in the dictionary are strings, except for the value for `candidate` which
should just be an integer.
:param data: a list of addresses in dictionary format
:return: the same list with all values except for `candidate` count as a string
"""
def serialize(k, v):
if k == "candidates":
return int(v)
if isinstance(v, numbers.Number):
if k == "zipcode":
# If values are presented as integers then leading digits may be cut off,
# and these are significant for the zipcode. Add them back.
return str(v).zfill(5)
return str(v)
return v
return [
{
k: serialize(k, v) for k, v in json_dict.items()
}
for json_dict in data
]
class Client(object):
"""
Client class for interacting with the SmartyStreets API
"""
BASE_URL = "https://api.smartystreets.com/"
def __init__(self, auth_id, auth_token, standardize=False, invalid=False, logging=True,
accept_keypair=False, truncate_addresses=False, timeout=None):
"""
Constructs the client
:param auth_id: authentication ID from SmartyStreets
:param auth_token: authentication token
:param standardize: boolean include addresses that match zip+4 in addition to DPV confirmed
addresses
:param invalid: boolean to include address candidates that may not be deliverable
:param logging: boolean to allow SmartyStreets to log requests
:param accept_keypair: boolean to toggle default keypair behavior
:param truncate_addresses: boolean to silently truncate address lists in excess of the
SmartyStreets maximum rather than raise an error.
:param timeout: optional timeout value in seconds for requests.
:return: the configured client object
"""
self.auth_id = auth_id
self.auth_token = auth_token
self.standardize = standardize
self.invalid = invalid
self.logging = logging
self.accept_keypair = accept_keypair
self.truncate_addresses = truncate_addresses
self.timeout = timeout
self.session = requests.Session()
self.session.mount(self.BASE_URL, requests.adapters.HTTPAdapter(max_retries=5))
def post(self, endpoint, data):
"""
Executes the HTTP POST request
:param endpoint: string indicating the URL component to call
:param data: the data to submit
:return: the dumped JSON response content
"""
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'x-standardize-only': 'true' if self.standardize else 'false',
'x-include-invalid': 'true' if self.invalid else 'false',
'x-accept-keypair': 'true' if self.accept_keypair else 'false',
}
if not self.logging:
headers['x-suppress-logging'] = 'true'
params = {'auth-id': self.auth_id, 'auth-token': self.auth_token}
url = self.BASE_URL + endpoint
response = self.session.post(url, json.dumps(stringify(data)),
params=params, headers=headers, timeout=self.timeout)
if response.status_code == 200:
return response.json()
raise ERROR_CODES.get(response.status_code, SmartyStreetsError)
@truncate_args
@validate_args
def street_addresses(self, addresses):
"""
API method for verifying street address and geolocating
Returns an AddressCollection always for consistency. In common usage it'd be simple and
sane to return an Address when only one address was searched, however this makes
populating search addresses from lists of unknown length problematic. If that list
returns only one address now the code has to check the type of return value to ensure
that it isn't applying behavior for an expected list type rather than a single dictionary.
>>> client.street_addresses(["100 Main St, Anywhere, USA"], ["6 S Blvd, Richmond, VA"])
>>> client.street_addresses([{"street": "100 Main St, anywhere USA"}, ... ])
:param addresses: 1 or more addresses in string or dict format
:return: an AddressCollection
"""
# While it's okay in theory to accept freeform addresses they do need to be submitted in
# a dictionary format.
if type(addresses[0]) != dict:
addresses = [{'street': arg} for arg in addresses]
r
|
eturn AddressCollection(self.post('street-address', data=addresses))
def street_address(self, address):
"""
Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street addr
|
ess information
:return: an Address object or None for no match
"""
address = self.street_addresses([address])
if not len(address):
return None
return Address(address[0])
def zipcode(self, *args):
raise NotImplementedError("You cannot lookup zipcodes yet")
|
m45t3r/i3pystatus
|
i3pystatus/spotify.py
|
Python
|
mit
| 183
| 0
|
from i3pystatus.playerctl import Playerctl
class Spotify(Playerctl):
"""
Get Spotify info using playerct
|
l. Based
|
on `Playerctl`_ module.
"""
player_name = "spotify"
|
liuqr/edx-xiaodun
|
cms/djangoapps/models/settings/course_grading.py
|
Python
|
agpl-3.0
| 9,046
| 0.003869
|
from datetime import timedelta
from contentstore.utils import get_modulestore
from xmodule.modulestore.django import loc_mapper
from xblock.fields import Scope
class CourseGradingModel(object):
"""
Basical
|
ly a DAO and Model combo for CRUD operations pertaining to grading policy.
"""
# Within this class, allow access to protected members of client classes.
# This comes up when accessing kvs data and caches during kvs saves and modulestore writes.
def __init__(self, course_descriptor):
self.graders = [
CourseGradingModel
|
.jsonize_grader(i, grader) for i, grader in enumerate(course_descriptor.raw_grader)
] # weights transformed to ints [0..100]
self.grade_cutoffs = course_descriptor.grade_cutoffs
self.grace_period = CourseGradingModel.convert_set_grace_period(course_descriptor)
@classmethod
def fetch(cls, course_locator):
"""
Fetch the course grading policy for the given course from persistence and return a CourseGradingModel.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_locator)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
model = cls(descriptor)
return model
@staticmethod
def fetch_grader(course_location, index):
"""
Fetch the course's nth grader
Returns an empty dict if there's no such grader.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
index = int(index)
if len(descriptor.raw_grader) > index:
return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index])
# return empty model
else:
return {"id": index,
"type": "",
"min_count": 0,
"drop_count": 0,
"short_label": None,
"weight": 0
}
@staticmethod
def update_from_json(course_locator, jsondict, user):
"""
Decode the json into CourseGradingModel and save any changes. Returns the modified model.
Probably not the usual path for updates as it's too coarse grained.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_locator)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
graders_parsed = [CourseGradingModel.parse_grader(jsonele) for jsonele in jsondict['graders']]
descriptor.raw_grader = graders_parsed
descriptor.grade_cutoffs = jsondict['grade_cutoffs']
get_modulestore(course_old_location).update_item(descriptor, user.id)
CourseGradingModel.update_grace_period_from_json(course_locator, jsondict['grace_period'], user)
return CourseGradingModel.fetch(course_locator)
@staticmethod
def update_grader_from_json(course_location, grader, user):
"""
Create or update the grader of the given type (string key) for the given course. Returns the modified
grader which is a full model on the client but not on the server (just a dict)
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
# parse removes the id; so, grab it before parse
index = int(grader.get('id', len(descriptor.raw_grader)))
grader = CourseGradingModel.parse_grader(grader)
if index < len(descriptor.raw_grader):
descriptor.raw_grader[index] = grader
else:
descriptor.raw_grader.append(grader)
get_modulestore(course_old_location).update_item(descriptor, user.id)
return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index])
@staticmethod
def update_cutoffs_from_json(course_location, cutoffs, user):
"""
Create or update the grade cutoffs for the given course. Returns sent in cutoffs (ie., no extra
db fetch).
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
descriptor.grade_cutoffs = cutoffs
get_modulestore(course_old_location).update_item(descriptor, user.id)
return cutoffs
@staticmethod
def update_grace_period_from_json(course_location, graceperiodjson, user):
"""
Update the course's default grace period. Incoming dict is {hours: h, minutes: m} possibly as a
grace_period entry in an enclosing dict. It is also safe to call this method with a value of
None for graceperiodjson.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
# Before a graceperiod has ever been created, it will be None (once it has been
# created, it cannot be set back to None).
if graceperiodjson is not None:
if 'grace_period' in graceperiodjson:
graceperiodjson = graceperiodjson['grace_period']
grace_timedelta = timedelta(**graceperiodjson)
descriptor.graceperiod = grace_timedelta
get_modulestore(course_old_location).update_item(descriptor, user.id)
@staticmethod
def delete_grader(course_location, index, user):
"""
Delete the grader of the given type from the given course.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
index = int(index)
if index < len(descriptor.raw_grader):
del descriptor.raw_grader[index]
# force propagation to definition
descriptor.raw_grader = descriptor.raw_grader
get_modulestore(course_old_location).update_item(descriptor, user.id)
@staticmethod
def delete_grace_period(course_location, user):
"""
Delete the course's grace period.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
del descriptor.graceperiod
get_modulestore(course_old_location).update_item(descriptor, user.id)
@staticmethod
def get_section_grader_type(location):
old_location = loc_mapper().translate_locator_to_location(location)
descriptor = get_modulestore(old_location).get_item(old_location)
return {
"graderType": descriptor.format if descriptor.format is not None else 'notgraded',
"location": unicode(location),
}
@staticmethod
def update_section_grader_type(descriptor, grader_type, user):
if grader_type is not None and grader_type != u'notgraded':
descriptor.format = grader_type
descriptor.graded = True
else:
del descriptor.format
del descriptor.graded
get_modulestore(descriptor.location).update_item(descriptor, user.id)
return {'graderType': grader_type}
@staticmethod
def convert_set_grace_period(descriptor):
# 5 hours 59 minutes 59 seconds => converted to iso format
rawgrace = descriptor.graceperiod
if rawgrace:
hours_from_days = rawgrace.days * 24
seconds = rawgrace.seconds
hours_from_seconds = int(seconds / 3600)
hours = hours_from_days + hours_from_seconds
seconds -= hours_from_seconds * 3600
minutes = int(seconds / 60)
seconds -= minutes * 60
graceperiod = {'hours': 0, 'minutes': 0, 'seconds': 0}
if hours > 0:
graceperiod['hours'] = hours
if minutes > 0:
graceperiod['minutes'] =
|
hehongliang/tensorflow
|
tensorflow/python/util/tf_export_test.py
|
Python
|
apache-2.0
| 6,573
| 0.005021
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf_export tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.platform import test
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
def _test_function(unused_arg=0):
pass
def _test_function2(unused_arg=0):
pass
class TestClassA(object):
pass
class TestClassB(TestClassA):
pass
class ValidateExportTest(test.TestCase):
"""Tests for tf_export class."""
class MockModule(object):
def __init__(self, name):
self.__name__ = name
def setUp(self):
self._modules = []
def tearDown(self):
for name in self._modules:
del sys.modules[name]
self._modules = []
for symbol in [_test_function, _test_function, TestClassA, TestClassB]:
if hasattr(symbol, '_tf_api_names'):
del symbol._tf_api_names
if hasattr(symbol, '_tf_api_names_v1'):
del symbol._tf_api_names_v1
def _CreateMockModule(self, name):
mock_module = self.MockModule(name)
sys.modules[name] = mock_module
self._modules.append(name)
return mock_module
def testExportSingleFunction(self):
export_decorator = tf_export.tf_export('nameA', 'nameB')
decorated_function = export_decorator(_test_function)
self.assertEquals(decorated_function, _test_function)
self.assertEquals(('nameA', 'nameB'), decorated_function._tf_api_names)
def testExportMultipleFunctions(self):
export_decorator1 = tf_export.tf_export('nameA', 'nameB')
export_decorator2 = tf_export.tf_export('nameC', 'nameD')
decorated_function1 = export_decorator1(_test_function)
decorated_function2 = export_decorator2(_test_function2)
self.assertEquals(decorated_function1, _test_function)
self.assertEquals(decorated_function2, _test_function2)
self.assertEquals(('nameA', 'nameB'), decorated_function1._tf_api_names)
self.assertEquals(('nameC', 'nameD'), decorated_function2._tf_api_names)
def testExportClasses(self):
export_decorator_a = tf_export.tf_export('TestClassA1')
export_decorator_a(TestClassA)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertTrue('_tf_api_names' not in TestClassB.__dict__)
export_decorator_b = tf_export.tf_export('TestClassB1')
export_decorator_b(TestClassB)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertEquals(('TestClassB1',), TestClassB._tf_api_names)
def testExportSingleConstant(self):
module1 = self._CreateMockModule('module1')
export_decorator = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator.export_constant('module1', 'test_constant')
self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
module1._tf_api_constants)
def testExportMultipleConstants(self):
module1 = self._CreateMockModule('module1')
module2 = self._CreateMockModule('module2')
test_constant1 = 123
test_constant2 = 'abc'
test_constant3 = 0.5
export_decorator1 = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator2 = tf_export.tf_export('NAME_C', 'NAME_D')
export_decorator3 = tf_export.tf_export('NAME_E', 'NAME_F')
export_decorator1.export_constant('module1', test_constant1)
export_decorator2.export_constant('module2', test_constant2)
export_decorator3.export_constant('module2', test_constant3)
self.assertEquals([(('NAME_A', 'NAME_B'), 123)],
module1._tf_api_constants)
self.assertEquals([(('NAME_C', 'NAME_D'), 'abc'),
(('NAME_E', 'NAME_F'), 0.5)],
module2._tf_api_constants)
def testRaisesExceptionIfAlreadyHasAPINames(self):
_test_function._tf_api_names = ['abc']
export_decorator = tf_export.tf_export('nameA', 'nameB')
with self.assertRaises(tf_export.SymbolAlreadyExposedError):
exp
|
ort_decorator(_test_function)
def testRaisesExceptionIfInvalidSymbolName(self):
# TensorFlow code is not allowed to export symbols under package
|
# tf.estimator
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('estimator.invalid')
# All symbols exported by Estimator must be under tf.estimator package.
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('Estimator.invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid.estimator')
def testRaisesExceptionIfInvalidV1SymbolName(self):
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('valid', v1=['estimator.invalid'])
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('estimator.valid', v1=['invalid'])
def testOverridesFunction(self):
_test_function2._tf_api_names = ['abc']
export_decorator = tf_export.tf_export(
'nameA', 'nameB', overrides=[_test_function2])
export_decorator(_test_function)
# _test_function overrides _test_function2. So, _tf_api_names
# should be removed from _test_function2.
self.assertFalse(hasattr(_test_function2, '_tf_api_names'))
def testMultipleDecorators(self):
def get_wrapper(func):
def wrapper(*unused_args, **unused_kwargs):
pass
return tf_decorator.make_decorator(func, wrapper)
decorated_function = get_wrapper(_test_function)
export_decorator = tf_export.tf_export('nameA', 'nameB')
exported_function = export_decorator(decorated_function)
self.assertEquals(decorated_function, exported_function)
self.assertEquals(('nameA', 'nameB'), _test_function._tf_api_names)
if __name__ == '__main__':
test.main()
|
Joccalor/PFunc
|
PFunc.py
|
Python
|
gpl-3.0
| 130,625
| 0.000322
|
#!/usr/bin/env python3
# sudo apt-get install python3-tk
# This file is part of PFunc. PFunc provides a set of simple tools for users
# to analyze preference functions and other function-valued traits.
#
# Copyright 2016-2022 Joseph Kilmer
#
# PFunc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PFunc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import statements
from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
import tkinter.font as tkFont
from sys import argv
from sys import platform
from os import getcwd
from os import environ
from os import listdir
from os import path
from math import log10
from math import ceil as ceiling
import shelve
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt # must come after matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg as FigureCanvas
from matplotlib.figure import Figure
from datetime import datetime
# If using matplotlib 2+, make it look like matplotlib 1.5.x
if int(matplotlib.__version__.split('.')[0]) >= 2:
matplotlib.style.use('classic')
# For opening the PDF help file:
if platform == 'win32':
from os import startfile
else:
import subprocess
# For finding R on the system:
try:
import rpy2.robjects as robjects # must come after matplotlib or numpy
environ['R_HOME']
except:
custom_path = '0'
if 'PFuncPath.txt' in listdir():
with open('PFuncPath.txt') as pathfile:
lines = pathfile.readlines()
for l in lines:
if l[0:11] == 'custom_path':
custom_path = str(l[12:-1])
break
if custom_path == '0':
if platform == 'win32' and 'R' in listdir('C:\\Program Files'):
r_versions = []
for d in listdir('C:\\Program Files\\R'):
if d[0:2] == 'R-':
r_versions.append(d)
custom_path = 'C:\\Program Files\\R\\' + r_versions[-1]
elif platform == 'darwin':
custom_path = '/Library/Frameworks/R.framework/Resources'
elif platform == 'linux':
custom_path = '/usr/bin'
environ['R_HOME'] = custom_path
environ['R_USER'] = path.dirname(path.realpath(argv[0]))
import rpy2.robjects as robjects
r = robjects.r
class PrefFunc():
'''This is the base-level data structure for the program. Each PrefFunc
object corresponds to an individual in the dataset. This is called when
opening a new file and when creating group-level splines.
As input, it takes a dataframe that originated in R, and the names of
a bunch of different variables that act as settings for generating splines.
'''
def __init__(self, r_data_frame, id_number, smoothing_value, current_sp,
sp_lim, sp_min, sp_max,
loc_peak, peak_min, peak_max,
tol_type, tol_drop, tol_absolute, tol_mode,
tol_floor, strength_mode, spline_type='individual'):
self.smoothing_value = smoothing_value
self.current_sp = current_sp
self.sp_lim = sp_lim
self.sp_min = sp_min
self.sp_max = sp_max
self.loc_peak = loc_peak
self.peak_min = peak_min
self.peak_max = peak_max
self.tol_type = tol_type
self.tol_drop = tol_drop
self.tol_absolute = tol_absolute
self.tol_mode = tol_mode
self.tol_floor = tol_floor
self.strength_mode = strength_mode
self.r_data_frame = r_data_frame
self.id_number = id_number
self.type = spline_type
self.sp_status = 'magenta' # magenta = default, cyan = adjusted
self.update()
self.name = r('names(%s)[2]' % self.r_data_frame.r_repr())[0]
self.data_x = r('curr.func$data.x')
self.data_y = r('curr.func$data.y')
self.page = ((self.id_number - 1) // 9) + 1
self.slot = ((self.id_number - 1) % 9) + 1
self.background = 'white'
if self.type == 'group':
self.constituents = r('mydf')
self.background = '#ffff99'
self.name = r('names(%s)[3]' % self.r_data_frame.r_repr())[0]
def update(self):
self.generate_spline()
self.populate_stats()
def generate_spline(self):
if self.tol_type.get() == 'relative':
instance_drop = self.tol_drop.get()
instance_floor = self.tol_floor.get()
elif self.tol_type.get() == 'absolute':
instance_drop = 1
instance_floor = self.tol_absolute.get()
if self.loc_peak.get() == 0:
instance_peak = '1'
elif self.loc_peak.get() == 1:
instance_peak = 'c(%s, %s)' % (self.peak_min.get(),
self.peak_max.get())
if self.sp_status == 'magenta':
self.reset_sp()
if self.type == 'group':
r("ind.data <- %s[2:3]" % self.r_data_frame.r_repr())
else:
r("ind.data <- %s" % self.r_data_frame.r_repr())
r("""curr.func <- PFunc(ind.data, 2, %s, peak.within = %s,
drop = %s, tol.mode = '%s',
sp.binding = %d, min.sp = %s, max.sp = %s,
graph.se = TRUE,
forgui = TRUE, tol.floor = %s
)""" % (self.smoothing_value.get(),
instance_peak, instance_drop, self.tol_mode.get(),
self.sp_lim.get(), self.sp_min.get(), self.sp_max.get(),
instance_floor))
r("master.gam.list[[%s]] <- curr.func$gam.object" % self.id_number)
def populate_stats(self):
self.spline_x = r('curr.func$stimulus')
self.spline_y = r('curr.func$response')
self.se = r('curr.func$se')
self.peak_pref = ('%s' % r('curr.func$peak.preference')).split()[1]
self.peak_resp = ('%s' % r('curr.func$peak.response')).split()[1]
self.broad_tolerance = ('%s' % r('curr.func$broad.tol')).split()[1]
self.strict_tolerance = ('%s' % r('
|
curr.func$strict.tol')).split()[1]
self.broad_tolerance_points = r('curr.func$broad.tol.points')
self.strict_tolerance_point
|
s = r('curr.func$strict.tol.points')
self.tolerance_height = ('%s' % r('curr.func$tol.height')).split()[1]
self.hd_strength = ('%s' % r('curr.func$hd.strength')).split()[1]
self.hi_strength = ('%s' % r('curr.func$hi.strength')).split()[1]
self.responsiveness = ('%s' % r('curr.func$responsiveness')).split()[1]
self.axes_ranges = r('range.bundle') # min.x, max.x, min.y, max.y
self.smoothing_value.set((
'%s' % r('curr.func$smoothing.parameter')).split()[1])
self.is_flat = r('curr.func$is.flat')
def stiffen(self):
'''Increase the smoothing parameter'''
self.smoothing_value.set(self.increment_sp(by=0.1))
self.sp_status = 'cyan'
self.update()
self.current_sp.set(self.smoothing_value.get())
def loosen(self):
'''Decrease the smoothing parameter'''
self.smoothing_value.set(self.increment_sp(by=-0.1))
self.sp_status = 'cyan'
self.update()
self.current_sp.set(self.smoothing_value.get())
def reset_sp(self):
'''Reset the smoothing parameter to the default value'''
self.smoothing_value.set('-1')
self.sp_status = 'none' # Protection against infinite loops in update
self.update()
self.sp_status = 'magenta'
def increment_sp(self, by):
'''Adjust the smoothing parameter by one step up or down.
|
datamade/dedupe
|
dedupe/labeler.py
|
Python
|
mit
| 15,590
| 0.000513
|
import random
from abc import ABC, abstractmethod
import logging
import numpy
import rlr
from typing import List
from typing_extensions import Protocol
import dedupe.sampling as sampling
import dedupe.core as core
import dedupe.training as training
import dedupe.datamodel as datamodel
from dedupe._typing import TrainingExample
logger = logging.getLogger(__name__)
class ActiveLearner(ABC):
@abstractmethod
def transform(self) -> None:
pass
@abstractmethod
def pop(self) -> TrainingExample:
pass
@abstractmethod
def mark(self) -> None:
pass
@abstractmethod
def __len__(self) -> int:
pass
class HasDataModel(Protocol):
data_model: datamodel.DataModel
class DedupeSampler(object):
def _sample(self: HasDataModel, data, blocked_proportion, sample_size) -> List[TrainingExample]:
blocked_sample_size = int(blocked_proportion * sample_size)
predicates = list(self.data_model.predicates(index_predicates=False))
data = sampling.randomDeque(data)
blocked_sample_keys = sampling.dedupeBlockedSample(blocked_sample_size,
predicates,
|
data)
random_sample_size = sample_size - len(blocked_sample_keys)
random_sample_keys = set(core.randomPairs(len(data),
random_sample_size))
data = dict(data)
return [(data[k1], data[k2])
for k1, k2
in blocked_sample_keys | random_sample_keys]
class RecordLinkSampler(object):
def _sample(self: HasDataModel, data_1, data_2, bl
|
ocked_proportion, sample_size) -> List[TrainingExample]:
offset = len(data_1)
blocked_sample_size = int(blocked_proportion * sample_size)
predicates = list(self.data_model.predicates(index_predicates=False))
deque_1 = sampling.randomDeque(data_1)
deque_2 = sampling.randomDeque(data_2)
blocked_sample_keys = sampling.linkBlockedSample(blocked_sample_size,
predicates,
deque_1,
deque_2)
random_sample_size = sample_size - len(blocked_sample_keys)
random_sample_keys = core.randomPairsMatch(len(deque_1),
len(deque_2),
random_sample_size)
unique_random_sample_keys = {(a, b + offset)
for a, b in random_sample_keys}
return [(data_1[k1], data_2[k2])
for k1, k2
in blocked_sample_keys | unique_random_sample_keys]
class RLRLearner(ActiveLearner, rlr.RegularizedLogisticRegression):
def __init__(self, data_model):
super().__init__(alpha=1)
self.data_model = data_model
self._candidates: List[TrainingExample]
@property
def candidates(self) -> List[TrainingExample]:
return self._candidates
@candidates.setter
def candidates(self, new_candidates):
self._candidates = new_candidates
self.distances = self.transform(self._candidates)
random_pair = random.choice(self._candidates)
exact_match = (random_pair[0], random_pair[0])
self.fit_transform([exact_match, random_pair],
[1, 0])
def transform(self, pairs):
return self.data_model.distances(pairs)
def fit(self, X, y):
self.y = numpy.array(y)
self.X = X
super().fit(self.X, self.y, cv=False)
def fit_transform(self, pairs, y):
self.fit(self.transform(pairs), y)
def pop(self) -> TrainingExample:
if not len(self.candidates):
raise IndexError("No more unlabeled examples to label")
target_uncertainty = self._bias()
probabilities = self.candidate_scores()
distance_to_target = numpy.abs(target_uncertainty - probabilities)
uncertain_index = distance_to_target.argmin()
self.distances = numpy.delete(self.distances, uncertain_index, axis=0)
uncertain_pair = self.candidates.pop(uncertain_index)
return uncertain_pair
def _remove(self, index):
self.distances = numpy.delete(self.distances, index, axis=0)
def mark(self, pairs, y):
self.y = numpy.concatenate([self.y, y])
self.X = numpy.vstack([self.X, self.transform(pairs)])
self.fit(self.X, self.y)
def _bias(self):
positive = numpy.sum(self.y == 1)
n_examples = len(self.y)
bias = 1 - (positive / n_examples if positive else 0)
# When we have just a few examples we are okay with getting
# examples where the model strongly believes the example is
# going to be positive or negative. As we get more examples,
# prefer to ask for labels of examples the model is more
# uncertain of.
uncertainty_weight = min(positive, n_examples - positive)
bias_weight = 10
weighted_bias = 0.5 * uncertainty_weight + bias * bias_weight
weighted_bias /= uncertainty_weight + bias_weight
return weighted_bias
def candidate_scores(self):
return self.predict_proba(self.distances)
def __len__(self):
return len(self.candidates)
class DedupeRLRLearner(DedupeSampler, RLRLearner):
def __init__(self, data_model, data, blocked_proportion, sample_size):
super().__init__(data_model)
self.candidates = self._sample(data, blocked_proportion, sample_size)
class RecordLinkRLRLearner(RecordLinkSampler, RLRLearner):
def __init__(self, data_model, data_1, data_2, blocked_proportion, sample_size):
super.__init__(data_model)
self.candidates = self._sample(data_1, data_2, blocked_proportion, sample_size)
class BlockLearner(object):
def __init__(self, data_model, candidates, *args):
self.data_model = data_model
self.candidates = candidates
self.current_predicates = ()
self._cached_labels = None
self._old_dupes = []
self.block_learner: training.BlockLearner
def fit_transform(self, pairs, y):
dupes = [pair for label, pair in zip(y, pairs) if label]
new_dupes = [pair for pair in dupes if pair not in self._old_dupes]
new_uncovered = (not all(self.predict(new_dupes)))
if new_uncovered:
self.current_predicates = self.block_learner.learn(dupes,
recall=1.0)
self._cached_labels = None
self._old_dupes = dupes
def candidate_scores(self):
if self._cached_labels is None:
labels = self.predict(self.candidates)
self._cached_labels = numpy.array(labels).reshape(-1, 1)
return self._cached_labels
def predict(self, candidates):
labels = []
for record_1, record_2 in candidates:
for predicate in self.current_predicates:
keys = predicate(record_2, target=True)
if keys:
if set(predicate(record_1)) & set(keys):
labels.append(1)
break
else:
labels.append(0)
return labels
def _remove(self, index):
if self._cached_labels is not None:
self._cached_labels = numpy.delete(self._cached_labels,
index,
axis=0)
class DedupeBlockLearner(BlockLearner):
def __init__(self, data_model,
candidates,
data,
index_include):
super().__init__(data_model, candidates)
index_data = Sample(data, 50000)
sampled_records = Sample(index_data, 5000)
preds = self.data_model.predicates()
self.block_learner = training.DedupeBlockLearner(preds,
|
rbong/gimptools
|
preview.py
|
Python
|
gpl-2.0
| 2,246
| 0.01959
|
#!/usr/bin/env python2
from gimpfu import *
import time
import re
def preview (image, delay, loops, force_delay, ignore_hidden, restore_hide):
if not image:
raise "No image given."
layers = image.layers
nlayers = len (layers)
visible = []
length = []
i = 0
while i < nlayers:
visible += [pdb.gimp_item_get_visible (layers [i])]
if visible [i]:
pdb.gimp_item_set_visible (layers [i], False)
name = pdb.gimp_item_get_name (layers [i])
l = None
if not force_delay:
l = re.search ("\([0-9]+ms\)", name)
if l:
l = tuple (map (sum, zip (l.span (), tuple ([+1, -3]))))
l = name [slice (*l)]
if not l:
l = delay
length += [float (l) / 1000.0]
i += 1
j = 0
while j < loops:
while i > 0:
i -= 1
if (not ignore_hidden) or visible [i]:
pdb.gimp_item_set_visible (layers [i], True)
pdb.gimp_displays_flush ()
time.sleep (length [i])
j += 1
# unhides everything for optimized
if j < loops:
while i < nlayers:
if (not ignore_hidden) or visible [i]:
pdb.gimp_item_set_visible (layers [i], False)
i += 1
else:
i = nlayers
i = nlayers
if restore_hide:
while i > 0:
i -= 1
if visible [i]:
pdb.gimp_item_set_visible (layers [i], True)
register(
"preview",
"preview",
"Preview the animation of a gif",
"Roger Bongers",
"Roger Bongers",
"2016",
"Preview...",
"*",
[
(PF_IMAGE, "image", "The image to modify", None),
(PF_INT3
|
2, "delay", "The default length in ms of each frame", 100),
(PF_INT32, "loops", "The number of times to loop the animation", 1),
(PF_BOOL, "force-delay", "Force the default length on every frame", 0),
(PF_BOOL, "ignore-hidden", "Ignore currently hidden items", 0),
(PF_BOOL, "restore-hide", "Restore the hidden status after preview", 0),
],
[],
preview,
menu = "<I
|
mage>/Filters/Animation")
main()
|
LumPenPacK/NetworkExtractionFromImages
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/networkx/algorithms/hybrid.py
|
Python
|
bsd-2-clause
| 6,084
| 0.010355
|
# coding: utf-8
"""
Provides functions for finding and testing for locally `(k, l)`-connected
graphs.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
_all__ = ['kl_connected_subgraph', 'is_kl_connected']
import copy
import networkx as nx
def kl_connected_subgraph(G, k, l, low_memory=False, same_as_graph=False):
"""Returns the maximum locally `(k, l)`-connected subgraph of ``G``.
A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the
graph there are at least `l` edge-disjoint paths of length at most `k`
joining `u` to `v`.
Parameters
----------
G : NetworkX graph
The graph in which to find a maximum locally `(k, l)`-connected
subgraph.
k : integer
The maximum length of paths to consider. A higher number means a looser
connectivity requirement.
l : integer
The number of edge-disjoint paths. A higher number means a stricter
connectivity requirement.
low_memory : bool
If this is ``True``, this function uses an algorithm that uses slightly
more time but less memory.
same_as_graph : bool
If this is ``True`` then return a tuple of the form ``(H, is_same)``,
where ``H`` is the maximum locally `(k, l)`-connected subgraph and
``is_same`` is a Boolean representing whether ``G`` is locally `(k,
l)`-connected (and hence, whether ``H`` is simply a copy of the input
graph ``G``).
Returns
-------
NetworkX graph or two-tuple
If ``same_as_graph`` is ``True``, then this function returns a
two-tuple as described above. Otherwise, it returns only the maximum
locally `(k, l)`-connected subgraph.
See also
--------
is_kl_connected
References
----------
.. [1]: Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid
Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg,
2004. 89--104.
"""
H=copy.deepcopy(G) # subgraph we construct by removing from G
graphOK=True
deleted_some=True # hack to start off the while loop
while deleted_some:
deleted_some=False
for edge in H.edges():
(u,v)=edge
### Get copy of graph needed for this search
if low_memory:
verts=set([u,v])
for i in range(k):
[verts.update(G.neighbors(w)) for w in verts.copy()]
G2=G.subgraph(list(verts))
else:
G2=copy.deepcopy(G)
###
path=[u,v]
cnt=0
accept=0
while path:
cnt += 1 # Found a path
if cnt>=l:
accept=1
break
# record edges along this gr
|
aph
prev=u
for w in path:
if prev!=w:
|
G2.remove_edge(prev,w)
prev=w
# path=shortest_path(G2,u,v,k) # ??? should "Cutoff" be k+1?
try:
path=nx.shortest_path(G2,u,v) # ??? should "Cutoff" be k+1?
except nx.NetworkXNoPath:
path = False
# No Other Paths
if accept==0:
H.remove_edge(u,v)
deleted_some=True
if graphOK: graphOK=False
# We looked through all edges and removed none of them.
# So, H is the maximal (k,l)-connected subgraph of G
if same_as_graph:
return (H,graphOK)
return H
def is_kl_connected(G, k, l, low_memory=False):
"""Returns ``True`` if and only if ``G`` is locally `(k, l)`-connected.
A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the
graph there are at least `l` edge-disjoint paths of length at most `k`
joining `u` to `v`.
Parameters
----------
G : NetworkX graph
The graph to test for local `(k, l)`-connectedness.
k : integer
The maximum length of paths to consider. A higher number means a looser
connectivity requirement.
l : integer
The number of edge-disjoint paths. A higher number means a stricter
connectivity requirement.
low_memory : bool
If this is ``True``, this function uses an algorithm that uses slightly
more time but less memory.
Returns
-------
bool
Whether the graph is locally `(k, l)`-connected subgraph.
See also
--------
kl_connected_subgraph
References
----------
.. [1]: Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid
Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg,
2004. 89--104.
"""
graphOK=True
for edge in G.edges():
(u,v)=edge
### Get copy of graph needed for this search
if low_memory:
verts=set([u,v])
for i in range(k):
[verts.update(G.neighbors(w)) for w in verts.copy()]
G2=G.subgraph(verts)
else:
G2=copy.deepcopy(G)
###
path=[u,v]
cnt=0
accept=0
while path:
cnt += 1 # Found a path
if cnt>=l:
accept=1
break
# record edges along this graph
prev=u
for w in path:
if w!=prev:
G2.remove_edge(prev,w)
prev=w
# path=shortest_path(G2,u,v,k) # ??? should "Cutoff" be k+1?
try:
path=nx.shortest_path(G2,u,v) # ??? should "Cutoff" be k+1?
except nx.NetworkXNoPath:
path = False
# No Other Paths
if accept==0:
graphOK=False
break
# return status
return graphOK
|
mogui/pyorient
|
tests/test_raw_messages_2.py
|
Python
|
apache-2.0
| 12,897
| 0.016283
|
__author__ = 'Ostico <ostico@gmail.com>'
import sys
import os
import unittest
from pyorient.exceptions import *
from pyorient import OrientSocket
from pyorient.messages.database import *
from pyorient.messages.commands import *
from pyorient.messages.cluster import *
from pyorient.messages.records import *
from pyorient.messages.connection import *
from pyorient.constants import DB_TYPE_DOCUMENT, QUERY_SYNC, \
STORAGE_TYPE_PLOCAL, DB_TYPE_GRAPH, STORAGE_TYPE_MEMORY
os.environ['DEBUG'] = "0"
os.environ['DEBUG_VERBOSE'] = "0"
if os.path.realpath( '../' ) not in sys.path:
sys.path.insert( 0, os.path.realpath( '../' ) )
if os.path.realpath( '.' ) not in sys.path:
sys.path.insert( 0, os.path.realpath( '.' ) )
class RawMessages_2_TestCase(unittest.TestCase):
""" Command Test Case """
def test_record_object(self):
x = OrientRecord()
assert x._rid is None
assert x._version is None
assert x._class is None
def test_record_load(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
def _test_callback(record):
assert record is not []
assert record._rid is not None # assert no exception
req_msg = RecordLoadMessage( connection )
res = req_msg.prepare( [ "#11:0", "*:2", _test_callback ] ) \
.send().fetch_response()
assert res._rid == "#11:0"
assert res._class == 'followed_by'
assert res._in != 0
assert res._out != 0
def test_record_count_with_no_opened_db(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
conn_msg = ConnectMessage( connection )
session_id = conn_msg.prepare( ("root", "root") )\
.send().fetch_response()
assert session_id == connection.session_id
assert session_id != -1
try:
count_msg = DbCountRecordsMessage( connection )
res = count_msg.prepare().send().fetch_response()
assert False # we expect an exception because we need a db opened
except PyOrientDatabaseException:
assert True
def test_record_count(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
session_id = connection.session_id
assert session_id != -1
count_msg = DbCountRecordsMessage( connection )
res = count_msg.prepare().send().fetch_response()
assert res is not 0
assert res > 0
def test_record_create_update(self):
connection = OrientSocket( "localhost", 2424 )
conn_msg = ConnectMessage( connection )
assert connection.protocol != -1
session_id = conn_msg.prepare( ("root", "root") ) \
.send().fetch_response()
assert session_id == connection.session_id
assert session_id != -1
# ##################
db_name = "my_little_test"
msg = DbExistsMessage( connection )
exists = msg.prepare( [db_name] ).send().fetch_response()
print("Before %r" % exists)
try:
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
assert True
except PyOrientCommandException as e:
print(str(e))
finally:
( DbCreateMessage( connection ) ).prepare(
(db_name, DB_TYPE_GRAPH, STORAGE_TYPE_MEMORY)
).send().fetch_response()
msg = DbOpenMessage( connection )
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_GRAPH, "")
).send().fetch_response()
assert len(cluster_info) != 0
try:
create_class = CommandMessage(connection)
cluster = create_class.prepare((QUERY_CMD, "create class my_class "
"extends V"))\
.send().fetch_response()[0]
except PyOrientCommandException:
# class my_class already exists
pass
# classes are not allowed in record create/update/load
rec = { '@my_class': { 'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare' } }
rec_position = ( RecordCreateMessage(connection) )\
.prepare( ( cluster, rec ) )\
.send().fetch_response()
print("New Rec Position: %s" % rec_position._rid)
assert rec_position._rid is not None
rec = { '@my_class': { 'alloggio': 'albergo', 'lavoro': 'ufficio', 'vacanza': 'montagna' } }
update_success = ( RecordUpdateMessage(connection) )\
|
.prepare( ( cluster, rec_position._rid, rec ) )\
.send().fetch_response()
assert update_success[0] != 0
if connection.protocol <= 21:
return unittest.skip("Protocol {!r} does not works well".format(
connection.protocol )) # ski
|
p test
res = ( CommandMessage( connection ) )\
.prepare( [ QUERY_SYNC, "select from " + rec_position._rid ] )\
.send().fetch_response()
# res = [ ( RecordLoadMessage(connection) ).prepare(
# [ rec_position._rid ]
# ).send().fetch_response() ]
print("%r" % res[0]._rid)
print("%r" % res[0]._class)
print("%r" % res[0]._version)
print("%r" % res[0].alloggio)
print("%r" % res[0].lavoro)
print("%r" % res[0].vacanza)
assert res[0]._rid == '#11:0'
# assert res[0]._class == 'my_class'
assert res[0]._version >= 0
assert res[0].alloggio == 'albergo'
assert res[0].lavoro == 'ufficio'
assert res[0].vacanza == 'montagna'
sid = ( ConnectMessage( connection ) ).prepare( ("root", "root") ) \
.send().fetch_response()
# at the end drop the test database
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
def test_record_delete(self):
connection = OrientSocket( "localhost", 2424 )
conn_msg = ConnectMessage( connection )
assert connection.protocol != -1
session_id = conn_msg.prepare( ("root", "root") ) \
.send().fetch_response()
print("Sid: %s" % session_id)
assert session_id == connection.session_id
assert session_id != -1
db_name = "my_little_test"
msg = DbExistsMessage( connection )
exists = msg.prepare( [db_name] ).send().fetch_response()
print("Before %r" % exists)
try:
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
assert True
except PyOrientCommandException as e:
print(str(e))
finally:
( DbCreateMessage( connection ) ).prepare(
(db_name, DB_TYPE_DOCUMENT, STORAGE_TYPE_MEMORY)
).send().fetch_response()
msg = DbOpenMessage( connection )
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
rec = { 'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare' }
rec_position = ( RecordCreateMessage(connection) )\
.prepare( ( 1, rec ) )\
.send().fetch_response()
print("New Rec Position: %s" % rec_position._rid)
assert rec_position._rid is not None
######################## Check Success
|
clarkperkins/stackdio
|
stackdio/api/formulas/exceptions.py
|
Python
|
apache-2.0
| 753
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITION
|
S OF ANY KIND, either express or i
|
mplied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
class InvalidFormula(Exception):
pass
class InvalidFormulaComponent(InvalidFormula):
pass
|
facebookresearch/ParlAI
|
parlai/tasks/dialog_babi/build.py
|
Python
|
mit
| 1,182
| 0
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in
|
the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
from parlai.core.build_data import DownloadableFile
import parlai.core.build_data as build_data
import os
RESOURCES = [
DownloadableFile(
'http://parl.ai/downloads/dialog_babi/dialog_babi.tar.gz',
'dialog_babi.tar.gz',
'bb36155ccd41eac91f806446c5728ee90374e5596156a9f7c1b86f8342cfc383',
)
]
def build(opt):
dpath = os.path.
|
join(opt['datapath'], 'dialog-bAbI')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
|
barrachri/epcon
|
microblog/views.py
|
Python
|
bsd-2-clause
| 7,631
| 0.003014
|
# -*- coding: UTF-8 -*-
from django.conf import settings as dsettings
from django.contrib.auth import models as authModels
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import HttpResponse, Http404
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.defaultfilters import slugify
from microblog import models, settings
from taggit.models import Tag, TaggedItem
from decorator import decorator
try:
import json
except ImportError:
import simplejson as json
def render_json(f):
"""
decoratore da applicare ad una vista per serializzare in json il risultato.
"""
if dsettings.DEBUG:
ct = 'text/plain'
j = lambda d: json.dumps(d, indent=2)
else:
ct = 'application/json'
j = json.dumps
def wrapper(func, *args, **kw):
try:
result = func(*args, **kw)
except Exception, e:
result = j(str(e))
status = 500
else:
if isinstance(result, HttpResponse):
return result
else:
result = j(result)
status = 200
return HttpResponse(content=result, content_type=ct, status=status)
return decorator(wrapper, f)
def post_list(request):
return render(request, 'microblog/post_list.html', {})
def category(request, category):
category = get_object_or_404(models.Category, name=category)
return render_to_response(
'microblog/category.html',
{
'category': category,
},
context_instance=RequestContext(request)
)
def post_list_by_year(request, year, month=None):
return render_to_response(
'microblog/list_by_year.html',
{
'year': year,
'month': month,
},
context_instance=RequestContext(request)
)
def tag(request, tag):
tag = get_object_or_404(Tag, name=tag)
return render_to_response(
'microblog/tag.html',
{
'tag': tag,
},
context_instance=RequestContext(request)
)
def author(request, author):
user = [
u for u in authModels.User.objects.all()
if slugify('%s-%s' % (u.first_name, u.last_name)) == author
]
if not user:
raise Http404()
else:
user = user[0]
return render_to_response(
'microblog/author.html',
{
'author': user,
},
context_instance=RequestContext(request)
)
def _paginate_posts(post_list, request):
if settings.MICROBLOG_POST_LIST_PAGINATION:
paginator = Paginator(post_list, settings.MICROBLOG_POST_PER_PAGE)
try:
page = int(request.GET.get("page", "1"))
except ValueError:
page = 1
try:
posts = paginator.page(page)
except (EmptyPage, InvalidP
|
age):
posts = paginator.page(1)
else:
paginator = Paginator(post_list, len(post_list) or 1)
posts = paginator.page(1)
return posts
def _posts_list(request, featured=False):
if settings.MICROBLOG_LANGUAGE_FALLBACK_ON_POST_LIST:
lang = None
else:
lang = request.LANGUAGE_CODE
return models.Post.objects\
.byLanguage(lang)\
.byFeatured(featured)\
.published()
|
def _post_detail(request, content):
if not settings.MICROBLOG_POST_FILTER([content.post], request.user):
raise Http404()
return render_to_response(
'microblog/post_detail.html',
{
'post': content.post,
'content': content
},
context_instance=RequestContext(request)
)
def _trackback_ping(request, content):
def success():
x = ('<?xml version="1.0" encoding="utf-8"?>\n'
'<response><error>0</error></response>')
return HttpResponse(content=x, content_type='text/xml')
def failure(message=''):
x = ('<?xml version="1.0" encoding="utf-8"?>\n'
'<response><error>1</error><message>%s</message></response>') % message
return HttpResponse(content=x, content_type='text/xml', status=400)
if request.method != 'POST':
return failure('only POST method is supported')
if not request.POST.get('url'):
return failure('url argument is mandatory')
t = {
'url': request.POST['url'],
'blog_name': request.POST.get('blog_name', ''),
'title': request.POST.get('title', ''),
'excerpt': request.POST.get('excerpt', ''),
}
from microblog.moderation import moderate
if not moderate(request, 'trackback', t['title'], url=t['url']):
return failure('moderated')
content.new_trackback(**t)
return success()
@render_json
def _comment_count(request, content):
post = content.post
if settings.MICROBLOG_COMMENT == 'comment':
import django_comments as comments
from django.contrib.contenttypes.models import ContentType
model = comments.get_model()
q = model.objects.filter(
content_type=ContentType.objects.get_for_model(post),
object_pk=post.id,
is_public=True
)
return q.count()
else:
import httplib2
from urllib import quote
h = httplib2.Http()
params = {
'forum_api_key': settings.MICROBLOG_COMMENT_DISQUS_FORUM_KEY,
'url': content.get_url(),
}
args = '&'.join('%s=%s' % (k, quote(v)) for k, v in params.items())
url = settings.MICROBLOG_COMMENT_DISQUS_API_URL + 'get_thread_by_url?%s' % args
resp, page = h.request(url)
if resp.status != 200:
return -1
page = json.loads(page)
if not page['succeeded']:
return -1
elif page['message'] is None:
return 0
else:
return page['message']['num_comments']
def _post404(f):
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except models.PostContent.DoesNotExist:
raise Http404()
return wrapper
if settings.MICROBLOG_URL_STYLE == 'date':
def _get(slug, year, month, day):
return models.PostContent.objects\
.select_related('post')\
.getBySlugAndDate(slug, year, month, day)
@_post404
def post_detail(request, year, month, day, slug):
return _post_detail(
request,
content=_get(slug, year, month, day)
)
@_post404
def trackback_ping(request, year, month, day, slug):
return _trackback_ping(
request,
content=_get(slug, year, month, day)
)
@_post404
def comment_count(request, year, month, day, slug):
return _comment_count(
request,
content = _get(slug, year, month, day)
)
elif settings.MICROBLOG_URL_STYLE == 'category':
def _get(slug, category):
return models.PostContent.objects\
.select_related('post')\
.getBySlugAndCategory(slug, category)
@_post404
def post_detail(request, category, slug):
return _post_detail(
request,
content=_get(slug, category),
)
@_post404
def trackback_ping(request, category, slug):
return _trackback_ping(
request,
content=_get(slug, category),
)
@_post404
def comment_count(request, category, slug):
return _comment_count(
request,
content=_get(slug, category),
)
|
nick-monto/SpeechRecog_CNN
|
create_spectrograms_16k.py
|
Python
|
mit
| 6,802
| 0.002205
|
#!/usr/bin/env python
import os
import numpy as np
import math
import fnmatch
from my_spectrogram import my_specgram
from collections import OrderedDict
from scipy.io import wavfile
import matplotlib.pylab as plt
from pylab import rcParams
from sklearn.model_selection import train_test_split
rcParams['figure.figsize'] = 6, 3
SCRIPT_DIR = os.getcwd()
INPUT_FOLDER = 'Input_audio_wav_16k/'
OUTPUT_FOLDER = 'Input_spectrogram_16k/'
languages = os.listdir(INPUT_FOLDER)
languages.sort()
audio_dict = OrderedDict()
for l in languages:
audio_dict[l] = sorted(os.listdir(INPUT_FOLDER + l))
def plot_spectrogram(audiopath, plotpath=None, NFFT_window=0.025,
noverlap_window=0.023, freq_min=None, freq_max=None,
axis='off'):
fs, data = wavfile.read(audiopat
|
h)
data = data / data.max()
center = data.mean() * 0.2
data = data + np.random.normal(center, abs(center * 0.5), len(data))
NFFT = pow(2, int(math.log(int(fs*NFFT_window), 2) + 0.5)) # 25ms window, nearest power of 2
noverlap = int(fs*noverlap_window)
fc = int(np.sqrt(freq_min*freq
|
_max))
# Pxx is the segments x freqs array of instantaneous power, freqs is
# the frequency vector, bins are the centers of the time bins in which
# the power is computed, and im is the matplotlib.image.AxesImage
# instance
Pxx, freqs, bins, im = my_specgram(data, NFFT=NFFT, Fs=fs,
Fc=fc, detrend=None,
window=np.hanning(NFFT),
noverlap=noverlap, cmap='Greys',
xextent=None,
pad_to=None, sides='default',
scale_by_freq=None,
minfreq=freq_min, maxfreq=freq_max)
plt.axis(axis)
im.axes.axis('tight')
im.axes.get_xaxis().set_visible(False)
im.axes.get_yaxis().set_visible(False)
if plotpath:
plt.savefig(plotpath, bbox_inches='tight',
transparent=False, pad_inches=0, dpi=96)
else:
plt.show()
plt.clf()
# same as training but no added noise
def plot_spectrogram_val(audiopath, plotpath=None, NFFT_window=0.025,
noverlap_window=0.023, freq_min=None, freq_max=None,
axis='off'):
fs, data = wavfile.read(audiopath)
data = data / data.max()
NFFT = pow(2, int(math.log(int(fs*NFFT_window), 2) + 0.5)) # 25ms window, nearest power of 2
noverlap = int(fs*noverlap_window)
fc = int(np.sqrt(freq_min*freq_max))
# Pxx is the segments x freqs array of instantaneous power, freqs is
# the frequency vector, bins are the centers of the time bins in which
# the power is computed, and im is the matplotlib.image.AxesImage
# instance
Pxx, freqs, bins, im = my_specgram(data, NFFT=NFFT, Fs=fs,
Fc=fc, detrend=None,
window=np.hanning(NFFT),
noverlap=noverlap, cmap='Greys',
xextent=None,
pad_to=None, sides='default',
scale_by_freq=None,
minfreq=freq_min, maxfreq=freq_max)
plt.axis(axis)
im.axes.axis('tight')
im.axes.get_xaxis().set_visible(False)
im.axes.get_yaxis().set_visible(False)
if plotpath:
plt.savefig(plotpath, bbox_inches='tight',
transparent=False, pad_inches=0, dpi=96)
else:
plt.show()
plt.clf()
# create spectrograms of randomly drawn samples from each language
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result[0]
random_wav = []
for key in audio_dict:
random_wav.append(sorted(np.random.choice(audio_dict[key], 500, replace=False)))
training_list = []
validation_list = []
for i in range(0, len(random_wav)):
x_train, x_val = train_test_split(random_wav[i],
test_size=0.4,
random_state=42)
training_list.append(x_train)
validation_list.append(x_val)
if not os.path.exists(OUTPUT_FOLDER + 'Training'):
os.makedirs(OUTPUT_FOLDER + 'Training')
print('Successfully created a training folder!')
print('Populating training folder with spectrograms...')
for i in range(0, len(training_list)):
if not os.path.exists(OUTPUT_FOLDER + 'Training/' + str(languages[i])):
os.makedirs(OUTPUT_FOLDER + 'Training/' + str(languages[i]))
print('Successfully created a {} training folder!'.format(languages[i]))
print('Populating {} training folder with spectrograms...'.format(languages[i]))
for j in range(0, len(training_list[i])):
for k in range(0, 3):
plot_spectrogram(find(training_list[i][j], INPUT_FOLDER),
plotpath=OUTPUT_FOLDER + 'Training/' +
str(languages[i]) + '/' +
str(training_list[i][j][:-4]) + '_' +
str(k) + '.jpeg',
NFFT_window=0.025, noverlap_window=0.023,
freq_min=0, freq_max=5500)
print('Done with {}.'.format(training_list[i][j][:-4]))
if not os.path.exists(OUTPUT_FOLDER + 'Validation'):
os.makedirs(OUTPUT_FOLDER + 'Validation')
print('Successfully created a validation folder!')
print('Populating validation folder with spectrograms...')
for i in range(0, len(validation_list)):
if not os.path.exists(OUTPUT_FOLDER + 'Validation/' + str(languages[i])):
os.makedirs(OUTPUT_FOLDER + 'Validation/' + str(languages[i]))
print('Successfully created a {} validation folder!'.format(languages[i]))
print('Populating {} validation folder with spectrograms...'.format(languages[i]))
for j in range(0, len(validation_list[i])):
for k in range(0, 1):
plot_spectrogram_val(find(validation_list[i][j], INPUT_FOLDER),
plotpath=OUTPUT_FOLDER + 'Validation/' +
str(languages[i]) + '/' +
str(validation_list[i][j][:-4]) + '_' +
str(k) + '.jpeg',
NFFT_window=0.025, noverlap_window=0.023,
freq_min=0, freq_max=5500)
print('Done with {}.'.format(validation_list[i][j][:-4]))
|
cypreess/csvkit
|
csvkit/cli.py
|
Python
|
mit
| 15,243
| 0.007479
|
#!/usr/bin/env python
import argparse
import bz2
import gzip
import os.path
import sys
from csvkit import CSVKitReader
from csvkit.exceptions import ColumnIdentifierError, RequiredHeaderE
|
rror
def lazy_opener(fn):
def wrapped(self, *args, **kwargs):
|
self._lazy_open()
fn(*args, **kwargs)
return wrapped
class LazyFile(object):
"""
A proxy for a File object that delays opening it until
a read method is called.
Currently this implements only the minimum methods to be useful,
but it could easily be expanded.
"""
def __init__(self, init, *args, **kwargs):
self.init = init
self.f = None
self._is_lazy_opened = False
self._lazy_args = args
self._lazy_kwargs = kwargs
def __getattr__(self, name):
if not self._is_lazy_opened:
self.f = self.init(*self._lazy_args, **self._lazy_kwargs)
self._is_lazy_opened = True
return getattr(self.f, name)
def __iter__(self):
return self
def close(self):
self.f.close()
self.f = None
self._is_lazy_opened = False
def next(self):
if not self._is_lazy_opened:
self.f = self.init(*self._lazy_args, **self._lazy_kwargs)
self._is_lazy_opened = True
return self.f.next()
class CSVFileType(object):
"""
An argument factory like argparse.FileType with compression support.
"""
def __init__(self, mode='rb'):
"""
Initialize the factory.
"""
self._mode = mode
def __call__(self, path):
"""
Build a file-like object from the specified path.
"""
if path == '-':
if 'r' in self._mode:
return sys.stdin
elif 'w' in self._mode:
return sys.stdout
else:
raise ValueError('Invalid path "-" with mode {0}'.format(self._mode))
else:
(_, extension) = os.path.splitext(path)
if extension == '.gz':
return LazyFile(gzip.open, path, self._mode)
if extension == '.bz2':
return LazyFile(bz2.BZ2File, path, self._mode)
else:
return LazyFile(open, path, self._mode)
class CSVKitUtility(object):
description = ''
epilog = ''
override_flags = ''
def __init__(self, args=None, output_file=None):
"""
Perform argument processing and other setup for a CSVKitUtility.
"""
self._init_common_parser()
self.add_arguments()
self.args = self.argparser.parse_args(args)
self.reader_kwargs = self._extract_csv_reader_kwargs()
self.writer_kwargs = self._extract_csv_writer_kwargs()
self._install_exception_handler()
if output_file is None:
self.output_file = sys.stdout
else:
self.output_file = output_file
# Ensure SIGPIPE doesn't throw an exception
# Prevents [Errno 32] Broken pipe errors, e.g. when piping to 'head'
# To test from the shell:
# python -c "for i in range(5000): print 'a,b,c'" | csvlook | head
# Without this fix you will see at the end:
# [Errno 32] Broken pipe
# With this fix, there should be no error
# For details on Python and SIGPIPE, see http://bugs.python.org/issue1652
try:
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except (ImportError, AttributeError):
#Do nothing on platforms that don't have signals or don't have SIGPIPE
pass
def add_arguments(self):
"""
Called upon initialization once the parser for common arguments has been constructed.
Should be overriden by individual utilities.
"""
raise NotImplementedError('add_arguments must be provided by each subclass of CSVKitUtility.')
def main(self):
"""
Main loop of the utility.
Should be overriden by individual utilities and explicitly called by the executing script.
"""
raise NotImplementedError(' must be provided by each subclass of CSVKitUtility.')
def _init_common_parser(self):
"""
Prepare a base argparse argument parser so that flags are consistent across different shell command tools.
If you want to constrain which common args are present, you can pass a string for 'omitflags'. Any argument
whose single-letter form is contained in 'omitflags' will be left out of the configured parser. Use 'f' for
file.
"""
self.argparser = argparse.ArgumentParser(description=self.description, epilog=self.epilog)
# Input
if 'f' not in self.override_flags:
self.argparser.add_argument('file', metavar="FILE", nargs='?', type=CSVFileType(), default=sys.stdin,
help='The CSV file to operate on. If omitted, will accept input on STDIN.')
if 'd' not in self.override_flags:
self.argparser.add_argument('-d', '--delimiter', dest='delimiter',
help='Delimiting character of the input CSV file.')
if 't' not in self.override_flags:
self.argparser.add_argument('-t', '--tabs', dest='tabs', action='store_true',
help='Specifies that the input CSV file is delimited with tabs. Overrides "-d".')
if 'q' not in self.override_flags:
self.argparser.add_argument('-q', '--quotechar', dest='quotechar',
help='Character used to quote strings in the input CSV file.')
if 'u' not in self.override_flags:
self.argparser.add_argument('-u', '--quoting', dest='quoting', type=int, choices=[0,1,2,3],
help='Quoting style used in the input CSV file. 0 = Quote Minimal, 1 = Quote All, 2 = Quote Non-numeric, 3 = Quote None.')
if 'b' not in self.override_flags:
self.argparser.add_argument('-b', '--doublequote', dest='doublequote', action='store_true',
help='Whether or not double quotes are doubled in the input CSV file.')
if 'p' not in self.override_flags:
self.argparser.add_argument('-p', '--escapechar', dest='escapechar',
help='Character used to escape the delimiter if --quoting 3 ("Quote None") is specified and to escape the QUOTECHAR if --doublequote is not specified.')
if 'z' not in self.override_flags:
self.argparser.add_argument('-z', '--maxfieldsize', dest='maxfieldsize', type=int,
help='Maximum length of a single field in the input CSV file.')
if 'e' not in self.override_flags:
self.argparser.add_argument('-e', '--encoding', dest='encoding', default='utf-8',
help='Specify the encoding the input CSV file.')
if 'S' not in self.override_flags:
self.argparser.add_argument('-S', '--skipinitialspace', dest='skipinitialspace', default=False, action='store_true',
help='Ignore whitespace immediately following the delimiter.')
if 'H' not in self.override_flags:
self.argparser.add_argument('-H', '--no-header-row', dest='no_header_row', action='store_true',
help='Specifies that the input CSV file has no header row. Will create default headers.')
if 'v' not in self.override_flags:
self.argparser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Print detailed tracebacks when errors occur.')
# Output
if 'l' not in self.override_flags:
self.argparser.add_argument('-l', '--linenumbers', dest='line_numbers', action='store_true',
help='Insert a column of line numbers at the front of the output. Useful when piping to grep or as a simple primary key.')
# Input/Output
if 'zero' not in self.override_flags:
self.argpars
|
szaydel/psutil
|
test/_sunos.py
|
Python
|
bsd-3-clause
| 1,322
| 0.000756
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sun OS specific tests. These are implicitly run by test_psutil.py."""
import psutil
from test_psutil import *
class SunOSSpecificTestCase(unittest.TestCase):
def test_swap_memory(self):
out = sh('swap -l -k')
lines = out.strip().split('\n')[1:]
|
if not lines:
raise ValueError('no swap device(s) configured')
total = free = 0
for l
|
ine in lines:
line = line.split()
t, f = line[-2:]
t = t.replace('K', '')
f = f.replace('K', '')
total += int(int(t) * 1024)
free += int(int(f) * 1024)
used = total - free
psutil_swap = psutil.swap_memory()
self.assertEqual(psutil_swap.total, total)
self.assertEqual(psutil_swap.used, used)
self.assertEqual(psutil_swap.free, free)
def test_main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(SunOSSpecificTestCase))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not test_main():
sys.exit(1)
|
akkana/pi-zero-w-book
|
ch2/blink-rpigpio.py
|
Python
|
gpl-2.0
| 468
| 0
|
#!/usr/bin/env python
# Blink an LED using the RPi.GPIO library.
import RPi.GPIO as GPIO
from time import sleep
# Use GPIO numbering:
GPIO.setmode(GPIO.BCM)
# Set pin GPIO
|
14 to be output:
GPIO.setup(14, GPIO.OUT)
try:
while True:
GPIO.output(14, GPIO.HIGH)
sleep(.5)
GPIO.output(14, GPIO.LOW)
sleep(.5)
# If we get a Ctrl-C, clean up so
|
we don't get warnings from other programs:
except KeyboardInterrupt:
GPIO.cleanup()
|
RobLoach/lutris
|
tests/check_prefixes.py
|
Python
|
gpl-3.0
| 1,465
| 0.002048
|
#!/usr/bin/python3
import os
import sys
import subprocess
sys.path.insert(0,
|
os.path.dirname(os.path.d
|
irname(os.path.abspath(__file__))))
from lutris.util.wineregistry import WineRegistry
PREFIXES_PATH = os.path.expanduser("~/Games/wine/prefixes")
def get_registries():
registries = []
directories = os.listdir(PREFIXES_PATH)
directories.append(os.path.expanduser("~/.wine"))
for prefix in directories:
for path in os.listdir(os.path.join(PREFIXES_PATH, prefix)):
if path.endswith(".reg"):
registries.append(os.path.join(PREFIXES_PATH, prefix, path))
return registries
def check_registry(registry_path):
with open(registry_path, 'r') as registry_file:
original_content = registry_file.read()
try:
registry = WineRegistry(registry_path)
except:
sys.stderr.write("Error parsing {}\n".format(registry_path))
raise
content = registry.render()
if content != original_content:
wrong_path = os.path.join(os.path.dirname(__file__), 'error.reg')
with open(wrong_path, 'w') as wrong_reg:
wrong_reg.write(content)
print("Content of parsed registry doesn't match: {}".format(registry_path))
subprocess.call(["meld", registry_path, wrong_path])
sys.exit(2)
registries = get_registries()
for registry in registries:
check_registry(registry)
print("All {} registry files validated!".format(len(registries)))
|
zrhans/python
|
exemplos/Examples.lnk/bokeh/glyphs/dateaxis.py
|
Python
|
gpl-2.0
| 1,293
| 0
|
from __future__ import print_function
from numpy import pi, arange, sin
import numpy as np
import time
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle
from bokeh.models import (
Plot, DataRange1d, DatetimeAxis,
ColumnDataSource, PanTool, WheelZoomTool
)
from bokeh.resources import INLINE
x = arange(-2 * pi, 2 * pi, 0.1)
y = sin(x)
# Create an array of times, starting at the current time, and extending
# for len(x) number of hours.
times = np.arange(len(x)) * 3600000 + time.time()
source = ColumnDataSource(
data=dict(x=x, y=y, times=times)
)
xdr = DataRange1d(sources=[source.columns("times")])
ydr = DataRange1d(sources=[source.columns("y")])
plot = Plot(x_range=xdr, y_range=ydr, min_border=80)
circle = Circle(x="times", y="y", fill_color="red", size=5, line_color="black")
plot.add_glyph(source, circle)
plot.add_layout(DatetimeAxis(), 'below')
plot.add_layout(DatetimeAxis(), 'left')
plot.add_tools(PanTool(), WheelZoomTool())
doc = Document()
doc.add(plot)
if __name__ == "__main__":
filename = "date
|
axis.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Date Axis
|
Example"))
print("Wrote %s" % filename)
view(filename)
|
joeymeyer/raspberryturk
|
raspberryturk/core/vision/chessboard_frame.py
|
Python
|
mit
| 371
| 0.002695
|
import numpy as np
from square import Square
from constants import SQUARE_SIZE, BOARD_SIZE
class ChessboardFrame():
def __init__(self,
|
img):
self.img = img
def square_at(self, i):
y = BOARD_SIZE - ((i / 8) % 8) * SQUARE_SIZE - SQUARE_SIZE
x = (i % 8) * SQUARE_SIZE
return Square(i, self.img[y:y+SQUARE_SIZE, x:x+SQUA
|
RE_SIZE, :])
|
Mlieou/leetcode_python
|
leetcode/python/ex_402.py
|
Python
|
mit
| 405
| 0.002469
|
class Solution(object):
|
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
stack = []
length = len(num) - k
for c in num:
while k and stack and stack[-1] > c:
stack.pop()
k -= 1
stack.append(c)
|
return ''.join(stack[:length]).lstrip('0') or '0'
|
liulion/mayavi
|
tvtk/plugins/scene/scene_manager.py
|
Python
|
bsd-3-clause
| 2,756
| 0.002177
|
""" Manage the TVTK scenes. """
# Enthought library imports.
from tvtk.pyface.tvtk_scene
|
import TVTKScene
from
|
pyface.workbench.api import WorkbenchWindow
from traits.api import HasTraits, List, Instance, Property
from traits.api import implements, on_trait_change
from tvtk.plugins.scene.scene_editor import SceneEditor
# Local imports.
from i_scene_manager import ISceneManager
class SceneManager(HasTraits):
""" Manage the TVTK scenes. """
implements(ISceneManager)
#### 'SceneManager' interface #############################################
# The currently active scene (None, if no scene is active).
current_scene = Property(Instance(TVTKScene))
# A list of all open scenes.
scenes = List(TVTKScene)
# The workbench window that the manager is in (there is one scene manager
# per workbench window).
window = Instance(WorkbenchWindow)
#### Private interface ####################################################
# Shadow trait for the 'current_scene' property.
_current_scene = Instance(TVTKScene)
###########################################################################
# 'SceneManager' interface.
###########################################################################
#### Trait properties #####################################################
def _get_current_scene(self):
""" Property getter. """
scene_count = len(self.scenes)
if scene_count == 0:
scene = None
elif scene_count == 1:
scene = self.scenes[0]
else:
scene = self._current_scene
return scene
def _set_current_scene(self, scene):
""" Property setter. """
self._current_scene = scene
return
#### Trait change handlers ################################################
@on_trait_change('window:editor_opened')
def _on_editor_opened(self, obj, trait_name, old, new):
""" Dynamic trait change handler. """
if isinstance(new, SceneEditor):
self.scenes.append(new.scene)
return
@on_trait_change('window:editor_closing')
def _on_editor_closed(self, obj, trait_name, old, new):
""" Dynamic trait change handler. """
if isinstance(new, SceneEditor):
self.scenes.remove(new.scene)
return
@on_trait_change('window:active_editor')
def _on_active_editor_changed(self, obj, trait_name, old, new):
""" Dynamic trait change handler. """
if isinstance(new, SceneEditor):
self.current_scene = new.scene
else:
self.current_scene = None
return
#### EOF ######################################################################
|
vbeffara/Simulations
|
tools/massage-box.py
|
Python
|
gpl-3.0
| 341
| 0.01173
|
#! /usr/bin/env python
import sys
|
g = {}
n = {}
for line in sys.stdin:
(n1, n2, p,
|
q, t, tg, x) = line.strip().split(' ')
t = int(t)
x = float(x)
key = ' '.join((n1,n2,p,q))
if not key in n:
n[key] = 0
g[key] = 0
n[key] += t
g[key] += x*t
for key in n:
print key, n[key], g[key]/n[key]
|
jsexauer/networkx_viewer
|
networkx_viewer/viewer.py
|
Python
|
gpl-3.0
| 17,151
| 0.005073
|
try:
# Python 3
import tkinter as tk
import tkinter.messagebox as tkm
import tkinter.simpledialog as tkd
except ImportError:
# Python 2
import Tkinter as tk
import tkMessageBox as tkm
import tkSimpleDialog as tkd
import networkx as nx
from networkx_viewer.graph_canvas import GraphCanvas
from networkx_viewer.tokens import TkPassthroughEdgeToken, TkPassthroughNodeToken
from networkx_viewer.autocomplete_entry import AutocompleteEntry
class ViewerApp(tk.Tk):
"""Example simple GUI to plot a NetworkX Graph"""
def __init__(self, graph, **kwargs):
"""Additional keyword arguments beyond graph are passed down to the
GraphCanvas. See it's docs for details"""
tk.Tk.__init__(self)
self.geometry('1000x600')
self.title('NetworkX Viewer')
bottom_row = 10
self.columnconfigure(0, weight=1)
self.rowconfigure(bottom_row, weight=1)
self.canvas = GraphCanvas(graph, width=400, height=400, **kwargs)
self.canvas.grid(row=0, column=0, rowspan=bottom_row+2, sticky='NESW')
self.canvas.onNodeSelected = self.onNodeSelected
self.canvas.onEdgeSelected = self.onEdgeSelected
r = 0 # Current row
tk.Label(self, text='Nodes:').grid(row=r, column=1, sticky='W')
self.node_entry = AutocompleteEntry(self.canvas.dataG.nodes)
self.node_entry.bind('<Return>',self.add_node, add='+')
self.node_entry.bind('<Control-Return>', self.buildNewShortcut, add='+')
self.node_entry.grid(row=r, column=2, columnspan=2, sticky='NESW', pady=2)
tk.Button(self, text='+', command=self.add_node, width=2).grid(
row=r, column=4,sticky=tk.NW,padx=2,pady=2)
r += 1
nlsb = tk.Scrollbar(self, orient=tk.VERTICAL)
self.node_list = tk.Listbox(self, yscrollcommand=nlsb.set, height=5)
self.node_list.grid(row=r, column=1, columnspan=3, sticky='NESW')
self.node_list.bind('<Delete>',lambda e: self.node_list.delete(tk.ANCHOR))
nlsb.config(command=self.node_list.yview)
nlsb.grid(row=r, column=4, sticky='NWS')
r += 1
tk.Label(self, text='Neighbors Levels:').grid(row=r, column=1,
columnspan=2, sticky=tk.NW)
self.level_entry = tk.Entry(self, width=4)
self.level_entry.insert(0,'1')
self.level_entry.grid(row=r, column=3, sticky=tk.NW, padx=5)
r += 1
tk.Button(self, text='Build New', command=self.onBuildNew).grid(
row=r, column=1)
tk.Button(self, text='Add to Existing', command=self.onAddToExisting
).grid(row=r, column=2, columnspan=2)
r += 1
line = tk.Canvas(self, height=15, width=200)
line.create_line(0,13,250,13)
line.create_line(0,15,250,15)
line.grid(row=r, column=1, columnspan=4, sticky='NESW')
r += 1
tk.Label(self, text='Filters:').grid(row=r, column=1, sticky=tk.W)
self.filter_entry = tk.Entry(self)
self.filter_entry.bind('<Return>',self.add_filter, add='+')
self.filter_entry.grid(row=r, column=2, columnspan=2, sticky='NESW', pady=2)
tk.Button(self, text='+', command=self.add_filter, width=2).grid(
row=r, column=4,sticky=tk.NW,padx=2,pady=2)
r += 1
flsb = tk.Scrollbar(self, orient=tk.VERTICAL)
self.filter_list = tk.Listbox(self, yscrollcommand=flsb.set, height=5)
self.filter_list.grid(row=r, column=1, columnspan=3, sticky='NESW')
|
self.filter_list.bind('<Delete>',self.remove_filter)
flsb.config(command=self.node_list.yview)
flsb.grid(row=r, column=4, sticky='NWS')
r += 1
tk.Button(self, text='Clear',command=self.remove_filter).grid(
row=r, column=1, sticky='W')
tk.Button(self, text='?', command=self.filter_help
).grid(row=r, column=4, stick='NESW', padx=2)
|
r += 1
line2 = tk.Canvas(self, height=15, width=200)
line2.create_line(0,13,250,13)
line2.create_line(0,15,250,15)
line2.grid(row=r, column=1, columnspan=4, sticky='NESW')
r += 1
self.lbl_attr = tk.Label(self, text='Attributes',
wraplength=200, anchor=tk.SW, justify=tk.LEFT)
self.lbl_attr.grid(row=r, column=1, columnspan=4, sticky='NW')
r += 1
self.tbl_attr = PropertyTable(self, {})
self.tbl_attr.grid(row=r, column=1, columnspan=4, sticky='NESW')
assert r == bottom_row, "Set bottom_row to %d" % r
self._build_menu()
def _build_menu(self):
self.menubar = tk.Menu(self)
self.config(menu=self.menubar)
view = tk.Menu(self.menubar, tearoff=0)
view.add_command(label='Undo', command=self.canvas.undo, accelerator="Ctrl+Z")
self.bind_all("<Control-z>", lambda e: self.canvas.undo()) # Implement accelerator
view.add_command(label='Redo', command=self.canvas.redo)
view.add_separator()
view.add_command(label='Center on node...', command=self.center_on_node)
view.add_separator()
view.add_command(label='Reset Node Marks', command=self.reset_node_markings)
view.add_command(label='Reset Edge Marks', command=self.reset_edge_markings)
view.add_command(label='Redraw Plot', command=self.canvas.replot)
view.add_separator()
view.add_command(label='Grow display one level...', command=self.grow_all)
self.menubar.add_cascade(label='View', menu=view)
def center_on_node(self):
node = NodeDialog(self, "Name of node to center on:").result
if node is None: return
self.canvas.center_on_node(node)
def reset_edge_markings(self):
for u,v,k,d in self.canvas.dispG.edges(data=True, keys=True):
token = d['token']
if token.is_marked:
self.canvas.mark_edge(u,v,k)
def reset_node_markings(self):
for u,d in self.canvas.dispG.nodes(data=True):
token = d['token']
if token.is_marked:
self.canvas.mark_node(u)
def add_node(self, event=None):
node = self.node_entry.get()
if node.isdigit() and self.canvas.dataG.has_node(int(node)):
node = int(node)
if self.canvas.dataG.has_node(node):
self.node_list.insert(tk.END, node)
self.node_entry.delete(0, tk.END)
else:
tkm.showerror("Node not found", "Node '%s' not in graph."%node)
def add_filter(self, event=None, filter_lambda=None):
if filter_lambda is None:
filter_lambda = self.filter_entry.get()
if self.canvas.add_filter(filter_lambda):
# We successfully added the filter; add to list and clear entry
self.filter_list.insert(tk.END, filter_lambda)
self.filter_entry.delete(0, tk.END)
def filter_help(self, event=None):
msg = ("Enter a lambda function which returns True if you wish\n"
"to show nodes with ONLY a given property.\n"
"Parameters are:\n"
" - u, the node's name, and \n"
" - d, the data dictionary.\n\n"
"Example: \n"
" d.get('color',None)=='red'\n"
"would show only red nodes.\n"
"Example 2:\n"
" str(u).is_digit()\n"
"would show only nodes which have a numerical name.\n\n"
"Multiple filters are ANDed together.")
tkm.showinfo("Filter Condition", msg)
def remove_filter(self, event=None):
all_items = self.filter_list.get(0, tk.END)
if event is None:
# When no event passed, this function was called via the "clear"
# button.
items = all_items
else:
# Remove currently selected item
items = (self.filter_list.get(tk.ANCHOR),)
for item in items:
self.canvas.remove_filter(item)
idx = all_items.index(item)
self.filter_list.delete(idx)
all_items = self.fi
|
open-mmlab/mmdetection
|
mmdet/datasets/coco_panoptic.py
|
Python
|
apache-2.0
| 24,271
| 0
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
import os
from collections import defaultdict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from mmdet.core import INSTANCE_OFFSET
from .api_wrappers import COCO, pq_compute_multi_core
from .builder import DATASETS
from .coco i
|
mport CocoDataset
try:
import panopticapi
from panopticapi.evaluation import VOID
from panopticapi.utils import id2rgb
except ImportError:
panopticapi = None
id2rgb = None
VOID = None
__all__ =
|
['CocoPanopticDataset']
class COCOPanoptic(COCO):
"""This wrapper is for loading the panoptic style annotation file.
The format is shown in the CocoPanopticDataset class.
Args:
annotation_file (str): Path of annotation file.
"""
def __init__(self, annotation_file=None):
if panopticapi is None:
raise RuntimeError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
super(COCOPanoptic, self).__init__(annotation_file)
def createIndex(self):
# create index
print('creating index...')
# anns stores 'segment_id -> annotation'
anns, cats, imgs = {}, {}, {}
img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann, img_info in zip(self.dataset['annotations'],
self.dataset['images']):
img_info['segm_file'] = ann['file_name']
for seg_ann in ann['segments_info']:
# to match with instance.json
seg_ann['image_id'] = ann['image_id']
seg_ann['height'] = img_info['height']
seg_ann['width'] = img_info['width']
img_to_anns[ann['image_id']].append(seg_ann)
# segment_id is not unique in coco dataset orz...
if seg_ann['id'] in anns.keys():
anns[seg_ann['id']].append(seg_ann)
else:
anns[seg_ann['id']] = [seg_ann]
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
for seg_ann in ann['segments_info']:
cat_to_imgs[seg_ann['category_id']].append(ann['image_id'])
print('index created!')
self.anns = anns
self.imgToAnns = img_to_anns
self.catToImgs = cat_to_imgs
self.imgs = imgs
self.cats = cats
def load_anns(self, ids=[]):
"""Load anns with the specified ids.
self.anns is a list of annotation lists instead of a
list of annotations.
Args:
ids (int array): integer ids specifying anns
Returns:
anns (object array): loaded ann objects
"""
anns = []
if hasattr(ids, '__iter__') and hasattr(ids, '__len__'):
# self.anns is a list of annotation lists instead of
# a list of annotations
for id in ids:
anns += self.anns[id]
return anns
elif type(ids) == int:
return self.anns[ids]
@DATASETS.register_module()
class CocoPanopticDataset(CocoDataset):
"""Coco dataset for Panoptic segmentation.
The annotation format is shown as follows. The `ann` field is optional
for testing.
.. code-block:: none
[
{
'filename': f'{image_id:012}.png',
'image_id':9
'segments_info': {
[
{
'id': 8345037, (segment_id in panoptic png,
convert from rgb)
'category_id': 51,
'iscrowd': 0,
'bbox': (x1, y1, w, h),
'area': 24315,
'segmentation': list,(coded mask)
},
...
}
}
},
...
]
"""
CLASSES = [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
' truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner',
'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff',
'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light',
'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',
'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',
'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',
'wall-wood', 'water-other', 'window-blind', 'window-other',
'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
'cabinet-merged', 'table-merged', 'floor-other-merged',
'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',
'paper-merged', 'food-other-merged', 'building-other-merged',
'rock-merged', 'wall-other-merged', 'rug-merged'
]
THING_CLASSES = [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
STUFF_CLASSES = [
'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain',
'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house',
'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',
'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',
'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',
'wall-wood', 'water-other', 'window-blind', 'window-other',
'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
'cabinet-merged', 'table-merged', 'floor-other-merged',
'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',
'paper-merged', 'food-other-merged', 'building-other-merged',
'rock-merged', 'wall-other-merged', 'rug-merged'
]
PALETTE = [(220, 20, 60
|
windskyer/k_nova
|
paxes_nova/compute/notify_messages.py
|
Python
|
apache-2.0
| 4,325
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# =================================================================
# =================================================================
# NOTE: notify message MUST follow these rules:
#
# - Messages must be wrappered with _() for translation
#
# - Replacement variables must be wrappered with brackets
#
# - Replacement variables must be from the following list:'
# {instance_id}
# {instance_name}
# {host_name}
# {source_host_name}
# {target_host_name}
# {error}
from paxes_nova import _
PAUSE_SUCCESS = (_("Pause of virtual machine {instance_name} on host "
"{host_name} was successful."))
PAUSE_ERROR = (_("Pause of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
SUSPEND_SUCCESS = (_("Suspend of virtual machine {instance_name} on host "
"{host_name} was successful."))
SUSPEND_ERROR = (_("Suspend of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESUME_SUCCESS = (_("Resume of virtual machine {instance_name} on host "
"{host_name} was successful."))
RESUME_ERROR = (_("Resume of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
DEPLOY_SUCCESS = (_("Deploy of virtual machine {instanc
|
e_name} on host "
"{host_name} was successful."))
DEPLOY_ERROR = (_("Deploy of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
START_SUCCESS = (_("Start of virtual machine {instance_name} on host "
"{host_name} was successful."))
START_ERROR = (_("Start of virtual machine {instance_name} on host "
"{host_name
|
} failed with exception: {error}"))
STOP_SUCCESS = (_("Stop of virtual machine {instance_name} on host "
"{host_name} was successful."))
STOP_ERROR = (_("Stop of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESTART_SUCCESS = (_("Restart of virtual machine {instance_name} on host "
"{host_name} was successful."))
RESTART_ERROR = (_("Restart of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
LPM_SUCCESS = (_("Migration of virtual machine {instance_name} from host "
"{source_host_name} to host {target_host_name} was "
"successful."))
LPM_ERROR = (_("Migration of virtual machine {instance_name} to host "
"{target_host_name} failed with exception: {error}"))
LPM_ERROR_DEST = (_("Migration of virtual machine {instance_name} to host "
"{host_name} failed with exception: {error}"))
DELETE_ERROR = (_("Delete of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
DELETE_SUCCESS = (_("Delete of virtual machine {instance_name} on host "
"{host_name} was successful. "))
RESIZE_ERROR = (_("Resize of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESIZE_SUCCESS = (_("Resize of virtual machine {instance_name} on host "
"{host_name} was successful."))
CAPTURE_SUCCESS = (_("Capture of virtual machine {instance_name} on host "
"{host_name} was successful"))
CAPTURE_ERROR = (_("Capture of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
ATTACH_SUCCESS = (_("Volume {volume_id} was successfully attached to "
"virtual machine {instance_name}."))
ATTACH_ERROR = (_("Volume {volume_id} could not be attached to "
"virtual machine {instance_name}. Error message: {error}"))
DETACH_SUCCESS = (_("Volume {volume_id} was successfully detached from "
"virtual machine {instance_name}."))
DETACH_ERROR = (_("Volume {volume_id} could not be detached from "
"virtual machine {instance_name}. Error message: {error}"))
|
codeaudit/ampcrowd
|
ampcrowd/crowd_server/wsgi.py
|
Python
|
apache-2.0
| 399
| 0.002506
|
"""
WSGI config for crowd_server pr
|
oject.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crowd_server.settings")
from django.core.
|
wsgi import get_wsgi_application
application = get_wsgi_application()
|
Alfredx/django-db2charts
|
db2charts/router.py
|
Python
|
mit
| 619
| 0.003231
|
# -*- coding: utf-8 -*-
# author: Alfred
import os
import re
DB_MODULE_PATTERN = re.compile(r'db2charts_models\.(?P<module>.*)_models')
class DB2ChartsRouter(object):
def db_for_module(self, module):
mat
|
ch = DB_MODULE_PATTERN.match(module)
if match:
return match.groupdict()['module']
return None
def db_for_read(self, model, **hints):
return self.d
|
b_for_module(model.__module__)
def db_for_write(self, model, **hints):
return self.db_for_module(model.__module__)
def allow_migrate(self, db, app_label, model=None, **hints):
return False
|
SickGear/SickGear
|
lib/apprise/plugins/NotifyD7Networks.py
|
Python
|
gpl-3.0
| 16,906
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# To use this service you will need a D7 Networks account from their website
# at https://d7networks.com/
#
# After you've established your account you can get your api login credentials
# (both user and password) from the API Details section from within your
# account profile area: https://d7networks.com/accounts/profile/
import re
import six
import requests
import base64
from json import dumps
from json import loads
from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyType
from ..utils import parse_list
from ..utils import parse_bool
from ..AppriseLocale import gettext_lazy as _
# Extend HTTP Error Messages
D7NETWORKS_HTTP_ERROR_MAP = {
401: 'Invalid Argument(s) Specified.',
403: 'Unauthorized - Authentication Failure.',
412: 'A Routing Error Occured',
500: 'A Serverside Error Occured Handling the Request.',
}
# Some Phone Number Detection
IS_PHONE_NO = re.compile(r'^\+?(?P<phone>[0-9\s)(+-]+)\s*$')
# Priorities
class D7SMSPriority(object):
"""
D7 Networks SMS Message Priority
"""
LOW = 0
MODERATE = 1
NORMAL = 2
HIGH = 3
D7NETWORK_SMS_PRIORITIES = (
D7SMSPriority.LOW,
D7SMSPriority.MODERATE,
D7SMSPriority.NORMAL,
D7SMSPriority.HIGH,
)
class NotifyD7Networks(NotifyBase):
"""
A wrapper for D7 Networks Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'D7 Networks'
# The services URL
service_url = 'https://d7networks.com/'
# All notification requests are secure
secure_protocol = 'd7sms'
# Allow 300 requests per minute.
# 60/300 = 0.2
request_rate_per_sec = 0.20
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_d7networks'
# D7 Networks batch notification URL
notify_batch_url = 'http://rest-api.d7networks.com/secure/sendbatch'
# D7 Networks single notification URL
notify_url = 'http://rest-api.d7networks.com/secure/send'
# The maximum length of the body
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Define object templates
templates = (
'{schema}://{user}:{password}@{targets}',
)
# Define our template tokens
t
|
emplate_tokens = dict(NotifyBase.template_tokens, **{
'user': {
'name': _('Username'),
'type': 'string',
'required': True,
},
'password': {
'name': _('Password'),
|
'type': 'string',
'private': True,
'required': True,
},
'target_phone': {
'name': _('Target Phone No'),
'type': 'string',
'prefix': '+',
'regex': (r'^[0-9\s)(+-]+$', 'i'),
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'priority': {
'name': _('Priority'),
'type': 'choice:int',
'min': D7SMSPriority.LOW,
'max': D7SMSPriority.HIGH,
'values': D7NETWORK_SMS_PRIORITIES,
# The website identifies that the default priority is low; so
# this plugin will honor that same default
'default': D7SMSPriority.LOW,
},
'batch': {
'name': _('Batch Mode'),
'type': 'bool',
'default': False,
},
'to': {
'alias_of': 'targets',
},
'source': {
# Originating address,In cases where the rewriting of the sender's
# address is supported or permitted by the SMS-C. This is used to
# transmit the message, this number is transmitted as the
# originating address and is completely optional.
'name': _('Originating Address'),
'type': 'string',
'map_to': 'source',
},
'from': {
'alias_of': 'source',
},
})
def __init__(self, targets=None, priority=None, source=None, batch=False,
**kwargs):
"""
Initialize D7 Networks Object
"""
super(NotifyD7Networks, self).__init__(**kwargs)
# The Priority of the message
if priority not in D7NETWORK_SMS_PRIORITIES:
self.priority = self.template_args['priority']['default']
else:
self.priority = priority
# Prepare Batch Mode Flag
self.batch = batch
# Setup our source address (if defined)
self.source = None \
if not isinstance(source, six.string_types) else source.strip()
# Parse our targets
self.targets = list()
for target in parse_list(targets):
# Validate targets and drop bad ones:
result = IS_PHONE_NO.match(target)
if result:
# Further check our phone # for it's digit count
# if it's less than 10, then we can assume it's
# a poorly specified phone no and spit a warning
result = ''.join(re.findall(r'\d+', result.group('phone')))
if len(result) < 11 or len(result) > 14:
self.logger.warning(
'Dropped invalid phone # '
'({}) specified.'.format(target),
)
continue
# store valid phone number
self.targets.append(result)
continue
self.logger.warning(
'Dropped invalid phone # ({}) specified.'.format(target))
if len(self.targets) == 0:
msg = 'There are no valid targets identified to notify.'
self.logger.warning(msg)
raise TypeError(msg)
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Depending on whether we are set to batch mode or single mode this
redirects to the appropriate handling
"""
# error tracking (used for function return)
has_error = False
auth = '{user}:{password}'.format(
user=self.user, password=self.password)
if six.PY3:
# Python 3's versio of b64encode() expects a byte array and not
# a string. To accomodate this, we encode the content here
auth = auth.encode('utf-8')
# Prepare our headers
headers = {
'User-Agent': self.app_id,
'Accept': 'application/json',
'Authorization': 'Basic {}'.format(base64.b64encode(auth))
}
# Our URL varies depending if we're doing a batch m
|
schreiberx/sweet
|
mule/platforms/50_cheyenne_intel/JobPlatform.py
|
Python
|
mit
| 8,312
| 0.006136
|
import platform
import socket
import sys
import os
from mule_local.JobGeneration import *
from mule.JobPlatformResources import *
from . import JobPlatformAutodetect
def _whoami(depth=1):
"""
String of function name to recycle code
https://www.oreilly.com/library/view/python-cookbook/0596001673/ch14s08.html
Returns
-------
string
Return function name
"""
return sys._getframe(depth).f_code.co_name
def p_gen_script_info(jg : JobGeneration):
return """#
# Generating function: """+_whoami(2)+"""
# Platform: """+get_platform_id()+"""
# Job id: """+jg.getUniqueID()+"""
#
"""
def get_platform_autodetect():
"""
Returns
-------
bool
True if current platform matches, otherwise False
"""
return JobPlatformAutodetect.autodetect()
def get_platform_id():
"""
Return platform ID
Returns
-------
string
unique ID of platform
"""
return "cheyenne_intel"
def get_platform_resources():
"""
Return information about hardware
"""
r = JobPlatformResources()
r.num_cores_per_node = 36
# Physical number of nodes, maybe the limit is different
r.num_nodes = 4032
r.num_cores_per_socket = 18
# 12h limit
r.max_wallclock_seconds = 60*60*12
return r
def jobscript_setup(jg : JobGeneration):
"""
Setup data to generate job script
"""
return
def jobscript_get_header(jg : JobGeneration):
"""
These headers typically contain the information on e.g. Job exection, number of compute nodes, etc.
Returns
-------
string
multiline text for scripts
"""
job_id = jg.getUniqueID()
p = jg.parallelization
time_str = p.get_max_wallclock_seconds_hh_mm_ss()
# Available queues:
# premium (only use this in extreme cases)
# regular
# economy
queue = 'economy'
# Use regular queue if we need more than 32 nodes
# Otherwise, the job doesn't seem to be scheduled
if p.num_nodes >= 32:
queue = 'premium'
elif p.num_nodes >= 16:
queue = 'regular'
#
# See https://www.lrz.de/services/compute/linux-cluster/batch_parallel/example_jobs/
#
content = """#! /bin/bash
#
## project code
#PBS -A NCIS0002
#PBS -q """+queue+"""
## wall-clock time (hrs:mins:secs)
#PBS -l walltime="""+time_str+"""
## select: number of nodes
## ncpus: number of CPUs per node
## mpiprocs: number of ranks per node
#PBS -l select="""+str(p.num_nodes)+""":ncpus="""+str(p.num_cores_per_node)+""":mpiprocs="""+str(p.num_ranks_per_node)+""":ompthreads="""+str(p.num_threads_per_rank)+"\n"
#"default": 2301000
#"turbo": 2301000
#"rated": 2300000
#"slow": 1200000
if p.force_turbo_off:
content += "#PBS -l select=cpufreq=2300000\n"
content += """#
#PBS -N """+job_id[0:100]+"""
#PBS -o """+jg.p_job_stdout_filepath+"""
#PBS -e """+jg.p_job_stderr_filepath+"""
#source /etc/profile.d/modules.sh
#module load openmpi
"""+("module load mkl" if jg.compile.mkl==True or jg.compile.mkl=='enable' else "")+"""
"""+p_gen_script_info(jg)+"""
echo
echo "hostname"
hostname
echo
echo
echo "lscpu -e"
lscpu -e
echo
echo
echo "CPU Frequencies (uniquely reduced):"
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq | sort -u
echo
"""
if jg.compile.threading != 'off':
content += """
export OMP_NUM_THREADS="""+str(p.num_threads_per_rank)+"""
"""
# if jg.compile.sweet_mpi != 'enable':
if True:
#
# https://software.intel.com/en-us/node/522691
if p.core_oversubscription:
if p.core_affinity != None:
if p.core_affinity == 'compact':
content += "export KMP_AFFINITY=granularity=fine,compact\n"
elif p.core_affinity == 'scatter':
content += "export KMP_AFFINITY=granularity=fine,scatter\n"
else:
Exception("Affinity '"+str(p.core_affinity)+"' not supported")
else:
#raise Exception("Please specify core_affinity!")
content += "# No core affinity selected\n"
else:
if p.core_affinity != None:
content += "\necho \"Affnity: "+str(p.core_affinity)+"\"\n"
if p.core_affinity == 'compact':
content += "export KMP_AFFINITY=granularity=fine,compact,1,0\n"
elif p.core_affinity == 'scatter':
content += "export KMP_AFFINITY=granularity=fine,scatter\n"
else:
raise Exception("Affinity '"+str(
|
p.core_affinity)+"' not supported")
else:
#raise Exception("Please specify core_affinity!")
content += "# No core affinity selected\n"
if p.core_affinity != None:
content += "export KMP_AFFINITY=\"verbose,$KMP_AFFINITY\"\n"
return content
def jobscript_get_exec_prefix(jg : JobGeneration):
"""
Prefix before executable
Returns
-------
st
|
ring
multiline text for scripts
"""
content = ""
content += jg.runtime.get_jobscript_plan_exec_prefix(jg.compile, jg.runtime)
content += """
EXEC=\""""+jg.compile.getProgramPath()+"""\"
PARAMS=\""""+jg.runtime.getRuntimeOptions()+"""\"
"""
return content
def jobscript_get_exec_command(jg : JobGeneration):
"""
Prefix to executable command
Returns
-------
string
multiline text for scripts
"""
p = jg.parallelization
mpiexec = ""
#
# Only use MPI exec if we are allowed to do so
# We shouldn't use mpiexec for validation scripts
#
if not p.mpiexec_disabled:
# Use mpiexec_mpt for Intel MPI
#mpiexec = "mpiexec_mpt -n "+str(p.num_ranks)
# Use mpiexec for GNU
if jg.compile.sweet_mpi == 'enable':
mpiexec = "mpiexec_mpt -n "+str(p.num_ranks)
mpiexec += " omplace "
mpiexec += " -nt "+str(p.num_threads_per_rank)+" "
mpiexec += " -tm intel"
mpiexec += " -vv"
if mpiexec[-1] != ' ':
mpiexec += ' '
#
# Fix the mess on Cheyenne!
#
# We prefix the current LD_LIBRARY_PATH with the one from the shell where the job was submitted
# This is required since Cheyenne scripts mess around with the existing path in a way
# which results in e.g. the system-wide installed fftw to be loaded.
#
# What we basically accomplish here is to suggest to really first
# lookup the MULE local_software/local/lib directory, then the system libraries
#
sweet_ld_library_path = os.getenv('MULE_LD_LIBRARY_PATH')
if sweet_ld_library_path == None:
raise Exception("Environment variable MULE_LD_LIBRARY_PATH not found!")
content = """
# Make sure that MULE library path is really known
export LD_LIBRARY_PATH=\""""+sweet_ld_library_path+""":$LD_LIBRARY_PATH\"
echo
echo "LD_LIBRARY_PATH"
echo "${LD_LIBRARY_PATH}"
echo
echo
echo "ldd"
ldd $EXEC
echo
E=\""""+mpiexec+"""${EXEC} ${PARAMS}\"
echo
echo "Executing..."
echo "$E"
$E || exit 1
"""
return content
def jobscript_get_exec_suffix(jg : JobGeneration):
"""
Suffix before executable
Returns
-------
string
multiline text for scripts
"""
content = """
echo
echo "CPU Frequencies (uniquely reduced):"
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq | sort -u
echo
"""
content += jg.runtime.get_jobscript_plan_exec_suffix(jg.compile, jg.runtime)
return content
def jobscript_get_footer(jg : JobGeneration):
"""
Footer at very end of job script
Returns
-------
string
multiline text for scripts
"""
content = ""
return content
def jobscript_get_compile_command(jg : JobGeneration):
"""
Compile command(s)
This is separated here to put it either
* into the job script (handy for workstations)
or
* into a separate compile file (handy for clusters)
Returns
-------
string
multiline text with compile command to generate executable
"""
|
sandervenema/netzpolitik
|
petitions/urls.py
|
Python
|
gpl-2.0
| 251
| 0
|
from django.conf.urls import url
from . import view
|
s
urlpatterns = [
url(r'^(?P<lang>[a-z]{2})?$', views.index, name='index'),
url(r'^sign/$', views.sign, name='sign'),
url(r'^co
|
nfirm/([0-9a-z]{64})/$', views.confirm, name='confirm'),
]
|
tkaitchuck/nupic
|
examples/opf/experiments/spatial_classification/base/description.py
|
Python
|
gpl-3.0
| 14,847
| 0.002694
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalClassification',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'field1': {
'fieldname': u'field1',
'n': 121,
'name': u'field1',
'type': 'SDRCategoryEncoder',
'w': 21},
u'classification': {
'classifierOnly': True,
'fieldname': u'classification',
'n': 121,
'name': u'classification',
'type': 'SDRCategoryEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': False,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActivePerInhArea': 40,
'seed': 1956,
# coincInputPoolPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose coincInputPoolPct * (2*coincInputRadius+1)^2
'coincInputPoolPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level bef
|
ore inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
'randomSP': 0,
},
# Controls whether TP is enable
|
d or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : False,
'tpParams': {
# TP diagnostic output ver
|
tensorflow/benchmarks
|
scripts/tf_cnn_benchmarks/variable_mgr_util.py
|
Python
|
apache-2.0
| 26,469
| 0.005743
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for VariableMgr."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import operator
import numpy as np
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
PS_SHADOW_VAR_PREFIX = 'ps_var'
AutoLossScaleParams = pycoll.namedtuple(
'AutoLossScaleParams',
[
# If true, enable automatic loss scaling.
'enable_auto_loss_scale',
# The value to scale the loss before computing gradients.
'loss_scale',
# Number of normal steps with the current `loss_scale`.
'loss_scale_normal_steps',
# Increase loss scale every n steps.
'inc_loss_scale_every_n',
# If true, the current worker is chief. The current implementation
# relies on the chief to update loss_scale value, but in future, we
# might change this to ask the parameter server to update loss_scales
# for better performance.
# TODO(tanmingxing): remove this if loss_scale is updated in ps.
'is_chief',
])
def get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n):
"""Returns the update op for loss scaling variables.
We maintain the counter `loss_scale_normal_steps` to count the number of steps
we have been using the current `loss_scale`. In most cases, this function
increments `loss_scale_normal_steps`. However, if `loss_scale_normal_steps` is
greater than the threshold `inc_loss_scale_every_n`, we double `loss_scale`
and reset `loss_scale_normal_steps` to zero.
This op is only called if the gradients don't have any infs or nans. Instead,
if infs or nans occur in the gradients, we immeditately halve `loss_scale` and
reset `loss_scale_normal_steps` to zero.
Args:
loss_scale: a tf.Variable represneting the loss_scale value.
loss_scale_normal_steps: a tf.Variable representing the number of training
steps that have run since the loss_scale last changed.
inc_loss_scale_every_n: a Python integer threshold. `loss_scale` is
increased every `inc_loss_scale_every_n` steps, unless the gradients have
infs or nans.
Returns:
An op for updating `loss_scale` and `loss_scale_normal_steps`.
"""
def increment_loss_scale_normal_steps_func():
return tf.group(loss_scale_normal_steps.assign_add(1))
def increase_loss_scale_func():
return tf.group(
tf.assign(loss_scale_normal_steps, 0),
tf.assign(loss_scale, loss_scale * 2))
# true_fn and false_fn must have the same type.
return tf.cond(loss_scale_normal_steps < inc_loss_scale_every_n,
increment_loss_scale_normal_steps_func,
increase_loss_scale_func)
def append_gradients_with_loss_scale(training_ops, get_apply_gradients_ops_func,
loss_scale_params, grad_has_inf_nan):
"""Selectively appends gradients update ops with loss scaling.
Args:
training_ops: a list of training ops to be executed.
get_apply_gradients_ops_func: a function that returns a list of ops for
applying gradients. Here, we must pass a function instead of the actual
list of ops; otherwise, those ops would be executed unconditionally due to
the semantics of tf.cond.
loss_scale_params: An AutoLossScaleParams tuple.
grad_has_inf_nan: Boolean tensor indicating whether the gradients have infs
or nans.
"""
is_chief = loss_scale_params.is_chief
loss_scale = loss_scale_params.loss_scale
loss_scale_normal_steps = loss_scale_params.loss_scale_normal_steps
inc_loss_scale_every_n = loss_scale_params.inc_loss_scale_every_n
enable_auto_loss_scale = loss_scale_params.enable_auto_loss_scale
if loss_scale is None or not enable_auto_loss_scale or not is_chief:
training_ops.extend(get_apply_gradients_ops_func())
else:
# If nans/infs occurred, skip applying gradients and instead update
# loss_scale (halve loss_scale and reset loss_scale_normal_steps to zero).
def update_op_if_nan_or_inf():
"""Update loss_scale and discard gradients if nans/infs occurred."""
return tf.group(
tf.assign(loss_scale, loss_scale / 2.),
tf.assign(loss_scale_normal_steps, 0))
# Otherwise, apply gradients, and update loss_scale and
# loss_scal
|
e_normal_steps.
def update_op_if_no_nan_or_inf():
"""Apply gradients, and update loss scaling."""
return tf.group(
get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n),
*get_apply_gradients_ops_func())
# T
|
ODO(tanmingxing): Add support for independent and distributed all_reduce.
assert grad_has_inf_nan is not None
update_op = tf.cond(
grad_has_inf_nan,
update_op_if_nan_or_inf,
update_op_if_no_nan_or_inf,
name='cond_if_grad_has_inf_nan'
)
training_ops.append(update_op)
# To be used with custom_getter on tf.get_variable.
class OverrideCachingDevice(object):
"""Variable getter which caches variables on the least loaded device.
Variables smaller than a certain threshold are cached on a single specific
device, as specified in the constructor. All other variables are load balanced
across a pool of devices, by caching each variable on the least loaded device.
Note that variable creation only happen when building the model graph on the
first device (see how it sets the 'reuse' parameter in
VariableMgr.*.create_outer_variable_scope()). That means, for all other
devices, the variable scope will reuse the variables created before, which
requires that we set the caching_device correctly as otherwise it may not be
able to find the previously created variable and will create a new one. This
requires when building the model graph on different devices, variables with
the same name should have same size.
TODO(laigd): consider adding tests or verification logic to enforce this, or
refactor it.
"""
def __init__(self, devices, device_for_small_variables,
small_variable_size_threshold):
self.devices = devices
self.sizes = [0] * len(self.devices)
self.device_for_small_variables = device_for_small_variables
self.small_variable_size_threshold = small_variable_size_threshold
def __call__(self, getter, *args, **kwargs):
size = tf.TensorShape(kwargs['shape']).num_elements()
if size < self.small_variable_size_threshold:
device_name = self.device_for_small_variables
else:
device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
device_name = self.devices[device_index]
self.sizes[device_index] += size
kwargs['caching_device'] = device_name
var = getter(*args, **kwargs)
return var
# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
class OverrideToLocalVariableIfNotPsVar(object):
# args and kwargs come from the custom_getter interface for Tensorflow
# variables, and matches tf.get_variable's signature, with the addition of
# 'getter' at the beginning.
de
|
att-comdev/deckhand
|
deckhand/context.py
|
Python
|
apache-2.0
| 1,765
| 0
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_context import context
CONF = cfg.CONF
class RequestContext(context.RequestContext):
"""User security context object
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, project=None, **kwargs):
if project:
kwargs['tenant'] = project
|
self.project = project
super(RequestContext, self).__init__(**kwargs)
|
def to_dict(self):
out_dict = super(RequestContext, self).to_dict()
out_dict['roles'] = self.roles
if out_dict.get('tenant'):
out_dict['project'] = out_dict['tenant']
out_dict.pop('tenant')
return out_dict
@classmethod
def from_dict(cls, values):
return cls(**values)
def get_context():
"""A helper method to get a blank context (useful for tests)."""
return RequestContext(user_id=None,
project_id=None,
roles=[],
is_admin=False,
overwrite=False)
|
UFRB/chdocente
|
cadastro/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 11,352
| 0.007488
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Docente'
db.create_table('cadastro_docente', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('matricula', self.gf('django.db.models.fields.CharField')(max_length=7, unique=True)),
('nome', self.gf('django.db.models.fields.CharField')(max_length=100, unique=True)),
))
db.send_create_signal('cadastro', ['Docente'])
# Adding model 'Disciplina'
db.create_table('cadastro_disciplina', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('codigo', self.gf('django.db.models.fields.CharField')(max_length=7)),
('nivel', self.gf('django.db.models.fields.CharField')(max_length=11)),
('multicampia', self.gf('django.db.models.fields.BooleanField')(default=False)),
('tipo', self.gf('django.db.models.fields.CharField')(max_length=11)),
('cargahoraria', self.gf('django.db.models.fields.IntegerField')(max_length=3)),
('estudantes', self.gf('django.db.models.fields.IntegerField')(max_length=3)),
))
db.send_create_signal('cadastro', ['Disciplina'])
# Adding model 'Pesquisa'
db.create_table('cadastro_pesquisa', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('area', self.gf('django.db.models.fields.CharField')(max_length=20)),
('financiador', self.gf('django.db.models.fields.CharField')(max_length=20)),
('estudantes_graduacao', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('estudantes_pos', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_pibic', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_ppq', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('voluntarios', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('parceria', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('parceria_inter', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal('cadastro', ['Pesquisa'])
# Adding model 'Extensao'
db.create_table('cadastro_extensao', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('area', self.gf('django.db.models.fields.CharField')(max_length=20)),
('financiador', self.gf('django.db.models.fields.CharField')(max_length=20)),
('estudantes_graduacao', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('estudantes_pos', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_pibex', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_ppq', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('voluntarios', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('parceria', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('parceria_inter', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal('cadastro', ['Extensao'])
# Adding model 'Atividade'
db.create_table('cadastro_atividade', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('docente',
|
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cadastro.Docente'])),
('afastamento', self.gf('django.db.models.fields.BooleanField')(default=True)),
('cargo', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('comissoes', self.gf('django.db.models.fields.IntegerField')()),
|
('semestre', self.gf('django.db.models.fields.CharField')(max_length=6)),
))
db.send_create_signal('cadastro', ['Atividade'])
# Adding M2M table for field disciplinas on 'Atividade'
m2m_table_name = db.shorten_name('cadastro_atividade_disciplinas')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('atividade', models.ForeignKey(orm['cadastro.atividade'], null=False)),
('disciplina', models.ForeignKey(orm['cadastro.disciplina'], null=False))
))
db.create_unique(m2m_table_name, ['atividade_id', 'disciplina_id'])
# Adding M2M table for field pesquisa on 'Atividade'
m2m_table_name = db.shorten_name('cadastro_atividade_pesquisa')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('atividade', models.ForeignKey(orm['cadastro.atividade'], null=False)),
('pesquisa', models.ForeignKey(orm['cadastro.pesquisa'], null=False))
))
db.create_unique(m2m_table_name, ['atividade_id', 'pesquisa_id'])
# Adding M2M table for field extensao on 'Atividade'
m2m_table_name = db.shorten_name('cadastro_atividade_extensao')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('atividade', models.ForeignKey(orm['cadastro.atividade'], null=False)),
('extensao', models.ForeignKey(orm['cadastro.extensao'], null=False))
))
db.create_unique(m2m_table_name, ['atividade_id', 'extensao_id'])
def backwards(self, orm):
# Deleting model 'Docente'
db.delete_table('cadastro_docente')
# Deleting model 'Disciplina'
db.delete_table('cadastro_disciplina')
# Deleting model 'Pesquisa'
db.delete_table('cadastro_pesquisa')
# Deleting model 'Extensao'
db.delete_table('cadastro_extensao')
# Deleting model 'Atividade'
db.delete_table('cadastro_atividade')
# Removing M2M table for field disciplinas on 'Atividade'
db.delete_table(db.shorten_name('cadastro_atividade_disciplinas'))
# Removing M2M table for field pesquisa on 'Atividade'
db.delete_table(db.shorten_name('cadastro_atividade_pesquisa'))
# Removing M2M table for field extensao on 'Atividade'
db.delete_table(db.shorten_name('cadastro_atividade_extensao'))
models = {
'cadastro.atividade': {
'Meta': {'object_name': 'Atividade'},
'afastamento': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cargo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'comissoes': ('django.db.models.fields.IntegerField', [], {}),
'disciplinas': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Disciplina']", 'symmetrical': 'False'}),
'docente': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Docente']"}),
'extensao': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Extensao']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pesquisa': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Pesquisa']", 'symmetrical': 'False'}),
'semestre': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'cadastro.disciplina': {
'Meta': {'object_name': 'Disciplina'},
'cargahoraria': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'codigo': ('django.db.models.fields.CharField', [], {'max_length': '7'})
|
GoogleCloudPlatform/ml-on-gcp
|
example_zoo/tensorflow/models/ncf_main/official/recommendation/ncf_main.py
|
Python
|
apache-2.0
| 17,012
| 0.006348
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF framework to train and evaluate the NeuMF model.
The NeuMF model assembles both MF and MLP models under the NCF framework. Check
`neumf_model.py` for more details about the models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import heapq
import json
import logging
import math
import multiprocessing
import os
import signal
import typing
# pylint: disable=g-bad-import-order
import numpy as np
from absl import app as absl_app
from absl import flags
flags.DEFINE_string(name="job-dir", default="/tmp", help="AI Platform Training passes this to the training script.")
import tensorflow as tf
# pylint: enable=g-bad-import-order
from tensorflow.contrib.compiler import xla
from official.datasets import movielens
from official.recommendation import constants as rconst
from official.recommendation import data_pipeline
from official.recommendation import data_preprocessing
from official.recommendation import neumf_model
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.logs import logger
from official.utils.logs import mlperf_helper
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
FLAGS = flags.FLAGS
def construct_estimator(model_dir, params):
"""Construct either an Estimator or TPUEstimator for NCF.
Args:
model_dir: The model directory for the estimator
params: The params dict for the estimator
Returns:
An Estimator or TPUEstimator.
"""
if params["use_tpu"]:
# Some of the networking libraries are quite chatty.
for name in ["googleapiclient.discovery", "googleapiclient.discovery_cache",
"oauth2client.transport"]:
logging.getLogger(name).setLevel(logging.ERROR)
tpu_cluster_resolver = tf.contrib.
|
cluster_resolver.TPUClusterResolver(
tpu=params["tpu"],
zone=params["tpu_zone"],
project=params["tpu_gcp_project"],
coordinator_name="coordinator"
)
tf.logging.info("Issuing reset command to TPU to ensure a clean state.")
tf.Session.reset(tpu_cluster_resolver.get_mast
|
er())
# Estimator looks at the master it connects to for MonitoredTrainingSession
# by reading the `TF_CONFIG` environment variable, and the coordinator
# is used by StreamingFilesDataset.
tf_config_env = {
"session_master": tpu_cluster_resolver.get_master(),
"eval_session_master": tpu_cluster_resolver.get_master(),
"coordinator": tpu_cluster_resolver.cluster_spec()
.as_dict()["coordinator"]
}
os.environ['TF_CONFIG'] = json.dumps(tf_config_env)
distribution = tf.contrib.distribute.TPUStrategy(
tpu_cluster_resolver, steps_per_run=100)
else:
distribution = distribution_utils.get_distribution_strategy(
num_gpus=params["num_gpus"])
run_config = tf.estimator.RunConfig(train_distribute=distribution,
eval_distribute=distribution)
model_fn = neumf_model.neumf_model_fn
if params["use_xla_for_gpu"]:
tf.logging.info("Using XLA for GPU for training and evaluation.")
model_fn = xla.estimator_model_fn(model_fn)
estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=model_dir,
config=run_config, params=params)
return estimator
def log_and_get_hooks(eval_batch_size):
"""Convenience function for hook and logger creation."""
# Create hooks that log information about the training and metric values
train_hooks = hooks_helper.get_train_hooks(
FLAGS.hooks,
model_dir=FLAGS.model_dir,
batch_size=FLAGS.batch_size, # for ExamplesPerSecondHook
tensors_to_log={"cross_entropy": "cross_entropy"}
)
run_params = {
"batch_size": FLAGS.batch_size,
"eval_batch_size": eval_batch_size,
"number_factors": FLAGS.num_factors,
"hr_threshold": FLAGS.hr_threshold,
"train_epochs": FLAGS.train_epochs,
}
benchmark_logger = logger.get_benchmark_logger()
benchmark_logger.log_run_info(
model_name="recommendation",
dataset_name=FLAGS.dataset,
run_params=run_params,
test_id=FLAGS.benchmark_test_id)
return benchmark_logger, train_hooks
def parse_flags(flags_obj):
"""Convenience function to turn flags into params."""
num_gpus = flags_core.get_num_gpus(flags_obj)
num_devices = FLAGS.num_tpu_shards if FLAGS.tpu else num_gpus or 1
batch_size = (flags_obj.batch_size + num_devices - 1) // num_devices
eval_divisor = (rconst.NUM_EVAL_NEGATIVES + 1) * num_devices
eval_batch_size = flags_obj.eval_batch_size or flags_obj.batch_size
eval_batch_size = ((eval_batch_size + eval_divisor - 1) //
eval_divisor * eval_divisor // num_devices)
return {
"train_epochs": flags_obj.train_epochs,
"batches_per_step": num_devices,
"use_seed": flags_obj.seed is not None,
"batch_size": batch_size,
"eval_batch_size": eval_batch_size,
"learning_rate": flags_obj.learning_rate,
"mf_dim": flags_obj.num_factors,
"model_layers": [int(layer) for layer in flags_obj.layers],
"mf_regularization": flags_obj.mf_regularization,
"mlp_reg_layers": [float(reg) for reg in flags_obj.mlp_regularization],
"num_neg": flags_obj.num_neg,
"num_gpus": num_gpus,
"use_tpu": flags_obj.tpu is not None,
"tpu": flags_obj.tpu,
"tpu_zone": flags_obj.tpu_zone,
"tpu_gcp_project": flags_obj.tpu_gcp_project,
"beta1": flags_obj.beta1,
"beta2": flags_obj.beta2,
"epsilon": flags_obj.epsilon,
"match_mlperf": flags_obj.ml_perf,
"use_xla_for_gpu": flags_obj.use_xla_for_gpu,
"epochs_between_evals": FLAGS.epochs_between_evals,
}
def main(_):
with logger.benchmark_context(FLAGS), \
mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging):
mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0])
run_ncf(FLAGS)
def run_ncf(_):
"""Run NCF training and eval loop."""
if FLAGS.download_if_missing and not FLAGS.use_synthetic_data:
movielens.download(FLAGS.dataset, FLAGS.data_dir)
if FLAGS.seed is not None:
np.random.seed(FLAGS.seed)
params = parse_flags(FLAGS)
total_training_cycle = FLAGS.train_epochs // FLAGS.epochs_between_evals
if FLAGS.use_synthetic_data:
producer = data_pipeline.DummyConstructor()
num_users, num_items = data_preprocessing.DATASET_TO_NUM_USERS_AND_ITEMS[
FLAGS.dataset]
num_train_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH
num_eval_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH
else:
num_users, num_items, producer = data_preprocessing.instantiate_pipeline(
dataset=FLAGS.dataset, data_dir=FLAGS.data_dir, params=params,
constructor_type=FLAGS.constructor_type,
deterministic=FLAGS.seed is not None)
num_train_steps = (producer.train_batches_per_epoch //
params["batches_per_step"])
num_eval_steps = (producer.eval_batches_per_epoch //
params["batches_per_step"])
assert not producer.train_batches_per_epoch % params["batches_per_step"]
assert not producer.eval_batches_per_epoch % params["batches_per_step"]
producer.start()
params["num_users"], params["num_items"] = num_users, num_items
model_helpers.apply_clean(flags.FLAGS)
estimator = construct_estimator(model_dir
|
eyeofhell/parabridge
|
parabridge/settings.py
|
Python
|
gpl-3.0
| 3,770
| 0.032891
|
#!/usr/bin/env python
# coding:utf-8 vi:et:ts=2
# parabridge persistent settings module.
# Copyright 2013 Grigory Petrov
# See LICENSE for details.
import xmlrpclib
import socket
import sqlite3
import uuid
import info
SQL_CREATE = """
CREATE TABLE IF NOT EXISTS task (
guid TEXT UNIQUE,
name TEXT UNIQUE,
src TEXT,
dst TEXT);
CREATE TABLE IF NOT EXISTS index_last (
guid TEXT,
file TEXT,
index_last INTEGER);
"""
SQL_TASK_ADD = """INSERT INTO task (guid, name, src, dst)
VALUES (:guid, :name, :src, :dst)"""
SQL_TASK_LIST = """SELECT * FROM task"""
SQL_TASK_DEL_BY_NAME = """DELETE FROM task WHERE name = :name"""
SQL_TASK_GUID_BY_NAME = """SELECT guid FROM task WHERE name = :name"""
SQL_INDEX_LAST_DEL = """DELETE FROM index_last WHERE guid = :guid"""
SQL_INDEX_LAST_UPDATE = """UPDATE index_last SET index_last = :index_last
WHERE guid = :guid AND file = :file"""
SQL_INDEX_LAST_ADD = """INSERT INTO index_last (guid, file, index_last)
VALUES (:guid, :file, :index_last)"""
SQL_INDEX_LAST_GET = """SELECT index_last FROM index_last WHERE
guid = :guid AND file = :file"""
class Settings( object ):
def __init__( self ):
self._init_f = False
self._notify_f = False
def init( self, f_notify = False ):
self._notify_f = f_notify
self._init_f = True
with sqlite3.connect( info.FILE_CFG ) as oConn:
oConn.executescript( SQL_CREATE )
## Notify daemon process so it can read updated settings.
def notifyIfNeeded( self ):
if not self._notify_f:
return
try:
xmlrpclib.ServerProxy( info.COMM_ADDR ).cfg_changed()
except socket.error:
pass
def taskAdd( self, s_name, s_src, s_dst ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
try:
mValues = {
'guid': str( uuid.uuid4() ),
'name': s_name,
'src': s_src,
'dst': s_dst }
oConn.execute( SQL_TASK_ADD, mValues )
except sqlite3.IntegrityError:
## Name not unique.
return False
else:
return True
finally:
self.notifyIfNeeded()
def indexLastSet( self, s_guid, s_file, n_index ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
mArgs = {
'guid': s_guid,
'file': s_file,
'index_last': n_index }
oRet = oConn.execute( SQL_INDEX_LAST_UPDATE, mArgs )
if oRet.rowcount > 0:
return
## No record for guid and name pair: add one.
oConn.execute( SQL_INDEX_LAST_ADD, mArgs )
def indexLastGet( self, s_guid, s_file ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
oConn.row_factory = sqlite3.Row
mArgs = { 'guid': s_guid, 'file': s_file }
lRet = oConn.execute( SQL_INDEX_LAST_GET, mArgs ).fetchall()
if 0 == len( lRet ):
return None
if len( lRet ) > 1:
raise Exception( "Consistency error." )
return lRet[ 0 ][ 'index_last' ]
def taskDelByName( self, s_name ):
with sqlite3.
|
connect( info.FILE_CFG ) as oConn:
oConn.row_factory = sqlite3.Row
try:
mArgs = { 'name': s_name }
oRow = oConn.execute( SQL_TASK_GUID_BY_NAME, mArgs ).fetchone()
if oRow is None:
return False
mArgs[ 'guid' ] = oRow[ 'guid' ]
oRet = oConn.execute( SQL_TASK_DEL_BY_NAME, mArgs )
if 0 == oRet.rowcount:
raise Exception( "Consistency error" )
oConn.execute( SQL_INDEX_LAST_DEL, mArgs )
return True
|
finally:
self.notifyIfNeeded()
def taskList( self ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
try:
oConn.row_factory = sqlite3.Row
return oConn.execute( SQL_TASK_LIST ).fetchall()
finally:
self.notifyIfNeeded()
instance = Settings()
|
stan-cap/bt_rssi
|
test/main_test.py
|
Python
|
mit
| 1,713
| 0.010508
|
from bt_proximity import BluetoothRSSI
import time
import sys
import datetime
#////////////////////////////////
BT_ADDR = 'xx:xx:xx:xx:xx:xx'#/// Enter your bluetooth address here!
#////////////////////////////////
# ----------------------- DO NOT EDIT ANYTHING BELOW THIS LINE --------------------------- #
def write(records, count
|
):
f = open("test_records.txt", "a+") # open records for append. If not present create
for i in range(count): # write out each record
f.write(str(records[i][0]) + "," + str(records[i][1]) + '\n')
f.close()
def time_diff(start_time):
current_time = datetime
|
.datetime.now() # get current time
diff = (current_time - start_time).total_seconds() # get difference of startime and current time
return str(round(diff,2))
def main(start_time):
records = [] # initialize array of records
count = 0 # initialize count
addr = BT_ADDR # assign BT_ADDR
num = 10 # amount of records to be recorded
while(count < num):
btrssi = BluetoothRSSI(addr=addr)
time_e = time_diff(start_time) # get seconds elapsed
record = (btrssi.get_rssi(), time_e) # create record
records.append(record) # add record to records array
count += 1
time.sleep(.5) # wait time to get next record
write(records, count) # write out records
if __name__ == '__main__':
main()
|
atvcaptain/enigma2
|
lib/python/Plugins/SystemPlugins/FrontprocessorUpgrade/plugin.py
|
Python
|
gpl-2.0
| 2,732
| 0.032211
|
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Label import Label
from Plugins.Plugin import PluginDescriptor
def getUpgradeVersion():
import os
try:
r = os.popen("fpupgrade --version").read()
except IOError:
return None
if r[:16] != "FP update tool v":
return None
else:
return int(r[16:17])
class FPUpgrade(Screen):
skin = """
<screen position="150,200" size="450,200" title="FP upgrade required" >
<widget name="text" position="0,0" size="550,50" font="Regular;20" />
<widget name="oldversion_label" position="10,100" size="290,25" font="Regular;20" />
<widget name="newversion_label" position="10,125" size="290,25" font="Regular;20" />
<widget name="oldversion" position="300,100" size="50,25" font="Regular;20" />
<widget name="newversion" position="300,125" size="50,25" font="Regular;20" />
</screen>"""
def __init__(self, session):
self.skin = FPUpgrade.skin
Screen.__init__(self, session)
from Tools.StbHardware import getFPVersion
version = str(getFPVersion() or "N/A")
newversion = str(getUpgradeVersion() or "N/A")
self["text"] = Label(_("Your frontprocessor firmware must be upgraded.\nPress OK to start upgrade."))
self["oldversion_label"] = Label(_("Current version:"))
self["newversion_label"] = Label(_("New version:"))
self["oldversion"] = Label(version)
self["newversion"] = Label(newversion)
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.ok,
"cancel": self.close,
})
def ok(self):
self.close(4)
class SystemMessage(Screen):
skin = """
<screen position="150,200" size="450,200" title="System Message" >
<widget source="text" position="0,0" size="450,200" font="Regular;20" halign="center" valign="center" render="Label" />
<ePixmap pixmap="icons/input_error.png" position="5,5" size="53,53" alphatest="on" />
</screen>"""
def __init__(self, session, message):
from Components.Sources.StaticText import StaticText
Screen.__init__(self, session)
self["text"] = StaticText(message)
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.ok,
})
def ok(self):
self.close()
def Plugins(**kwar
|
gs):
from Tools.StbHardware import getFPVersion
version = getFPVersion()
newversion = getUpgradeVersion() or 0
list = []
if version is not None and version < newversion:
|
list.append(PluginDescriptor(name=_("FP Upgrade"), where = PluginDescriptor.WHERE_WIZARD, needsRestart = True, fnc=(8, FPUpgrade)))
try:
msg = open("/proc/stb/message").read()
list.append(PluginDescriptor(name=_("System Message Check"), where = PluginDescriptor.WHERE_WIZARD, needsRestart = True, fnc=(9, SystemMessage, msg)))
except:
pass
return list
|
schakrava/rockstor-core
|
src/rockstor/scripts/pwreset.py
|
Python
|
gpl-3.0
| 2,030
| 0.00197
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FI
|
TNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import pwd
from django.db import transaction
from django.co
|
ntrib.auth.models import User as DjangoUser
from storageadmin.models import User
from system import users
@transaction.atomic
def change_password(username, password):
try:
duser = DjangoUser.objects.get(username=username)
duser.set_password(password)
duser.save()
except:
sys.exit('username: %s does not exist in the admin database' %
username)
try:
User.objects.get(username=username)
except:
sys.exit('username: %s does not exist in the database' % username)
try:
pwd.getpwnam(username)
except KeyError:
sys.exit('username: %s does not exist in the system' % username)
try:
users.usermod(username, password)
users.smbpasswd(username, password)
except:
sys.exit('Low level error occured while changing password of user: %s'
% username)
def main():
if (len(sys.argv) < 3 or
(len(sys.argv) > 1 and sys.argv[1] == '-h')):
sys.exit('Usage: pwreset <username> <new_password>')
try:
change_password(sys.argv[1], sys.argv[2])
except:
sys.exit('Error changing password for user: %s. Check the username '
'and try again.' % sys.argv[1])
|
reinbach/django-machina
|
example_projects/demo/demo_project/urls.py
|
Python
|
bsd-3-clause
| 1,208
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from django.views.i18n import JavaScriptCatalog
from demo.apps.app import application
js_info_dict = {
'packages': ('b
|
ase', ),
}
urlpatterns = [
url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript_catalog'),
# Admin
url(r'^' + settings.ADMIN_URL, admin.site.urls),
# Apps
url(r'', include(application.urls)),
]
if settings.DEBUG:
# Ad
|
d the Debug Toolbar’s URLs to the project’s URLconf
import debug_toolbar
urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls)), ]
# In DEBUG mode, serve media files through Django.
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views import static
urlpatterns += staticfiles_urlpatterns()
# Remove leading and trailing slashes so the regex matches.
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += [
url(r'^%s/(?P<path>.*)$' % media_url, static.serve,
{'document_root': settings.MEDIA_ROOT}),
]
|
persandstrom/home-assistant
|
homeassistant/components/switch/netio.py
|
Python
|
apache-2.0
| 5,530
| 0
|
"""
The Netio switch component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.netio/
"""
import logging
from collections import namedtuple
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
from homeassistant import util
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
EVENT_HOMEASSISTANT_STOP, STATE_ON)
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pynetio==0.1.6']
_LOGGER = logging.getLogger(__name__)
ATTR_START_DATE = 'start_date'
ATTR_TOTAL_CONSUMPTION_KWH = 'total_energy_kwh'
CONF_OUTLETS = 'outlets'
DEFAULT_PORT = 1234
DEFAULT_USERNAME = 'admin'
DEPENDENCIES = ['http']
Device = namedtuple('device', ['netio', 'entities'])
DEVICES = {}
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
REQ_CONF = [CONF_HOST, CONF_OUTLETS]
URL_API_NETIO_EP = '/api/netio/{host}'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_OUTLETS): {cv.string: cv.string},
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Netio platform."""
from pynetio import Netio
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
if not DEVICES:
hass.http.register_view(NetioApiView)
dev = Netio(host, port, username, password)
DEVICES[host] = Device(dev, [])
# Throttle the update for all Netio switches of one Netio
dev.update = util.Throttle(MIN_TIME_BETWEEN_SCANS)(dev.update)
for key in config[CONF_OUTLETS]:
switch = NetioSwitch(
DEVICES[host].netio, key, config[CONF_OUTLETS][key])
DEVICES[host].entities.append(switch)
add_entities(DEVICES[host].entities)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, dispose)
return True
def dispose(event):
"""Close connections to Netio Devices."""
for _, value in DEVICES.items():
value.netio.stop()
class NetioApiView(HomeAssistantView):
"""WSGI handler class."""
url = URL_API_NETIO_EP
name = 'api:netio'
@callback
def get(self, request, host):
"""Request handler."""
hass = request.app['hass']
data = request.query
states, consumptions, cumulated_consumptions, start_dates = \
[], [], [], []
for i in range(1, 5):
out = 'output%d' % i
states.append(data.get('%s_state' % out) == STATE_ON)
consumptions.append(float(data.get('%s_consumption' % out, 0)))
cumulated_consumptions.append(
float(data.get('%s_cumulatedConsumption' % out, 0)) / 1000)
start_dates.append(data.get('%s_consumptionStart' % out, ""))
_LOGGER.debug('%s: %s, %s, %s since %s', host, states,
consumptions, cumulated_consumptions, start_dates)
ndev = DEVICES[host].netio
ndev.consumptions = consumptions
ndev.cumulated_consumptions = cumulated_consumptions
ndev.states = states
ndev.start_dates = start_dates
for dev in DEVICES[host].entities:
hass.async_add_job(dev.async_update_ha_state())
return self.json(True)
class NetioSwitch(SwitchDevice):
"""Provide a Netio linked switch."""
def __init__(self, netio, outlet, name):
"""Initialize the Netio switch."""
self._name = name
self.outlet = outlet
self.netio = netio
@property
def name(self):
"""Return the device's name."""
return self._name
@property
def available(self):
"""Return true if entity is available."""
return not hasattr(self, 'telnet')
def t
|
urn_on(self, **kwargs):
"""Turn switch on."""
self._set(True)
def turn_off(self, **kwargs):
"""Turn switch off."""
self._set(False)
def _set(self, value):
val = list('uuuu')
val[self.outlet - 1] = '1' if value else '0'
self.netio.get('port list %s' % ''.join(val))
self.netio.states[self
|
.outlet - 1] = value
self.schedule_update_ha_state()
@property
def is_on(self):
"""Return the switch's status."""
return self.netio.states[self.outlet - 1]
def update(self):
"""Update the state."""
self.netio.update()
@property
def state_attributes(self):
"""Return optional state attributes."""
return {
ATTR_TOTAL_CONSUMPTION_KWH: self.cumulated_consumption_kwh,
ATTR_START_DATE: self.start_date.split('|')[0]
}
@property
def current_power_w(self):
"""Return actual power."""
return self.netio.consumptions[self.outlet - 1]
@property
def cumulated_consumption_kwh(self):
"""Return the total enerygy consumption since start_date."""
return self.netio.cumulated_consumptions[self.outlet - 1]
@property
def start_date(self):
"""Point in time when the energy accumulation started."""
return self.netio.start_dates[self.outlet - 1]
|
MakersLab/Farm-server
|
server/main.py
|
Python
|
gpl-3.0
| 3,234
| 0.002474
|
#!/usr/bin/env python3.7
from multiprocessing import Process
import time
import os
from printerState import main as printerStateMain
from server import main as serverMain
from websocket import main as websocketServerMain
servicesTemplate = {
'server': {
'name': 'Server',
'run': serverMain,
'running': False
},
'printerState': {
'name': 'Printer State',
'run': printerStateMain,
'running': False
},
'websocketServer': {
'name': 'Websocket server',
'run': websocketServerMain,
'running': False
}
}
class ServiceManager:
def __init__(self, services, autoStart=False):
self.log('Creating processes')
self.services = services
for serviceName in services:
newProcess = Process(target=self.services[serviceName]['run'])
newProcess.daemon = True
self.services[serviceName]['process'] = newProcess
if (autoStart):
newProcess.start()
self.log('Creating and starting process for {0} with pid {1}'.format(self.services[serviceName]['name'], newProcess.pid))
self.services[serviceName]['running'] = True
else:
self.log('Creating process for {0}'.format(self.services[serviceName]['name']))
self.services[serviceName]['running'] = False
def updateServiceState(self):
servicesRunning = []
servicesStopped = []
for serviceName in self.services:
self.services[serviceName]['running'] = self.services[serviceName]['process'].is_alive()
if(self.services[serviceName]['running']):
servicesRunning.append(self.services[serviceName]['name'])
else:
servicesStopped.append(self.services[serviceName]['name'])
if(len(servicesStopped) != 0):
self.log('Services stopped: {0}'.format(','.join(servicesStopped)))
def restartStoppedServices(self):
for serviceName in self.services:
if (not self.services[serviceName]['running']):
self.startService(serviceName)
def startService(self, serviceName):
if(self.services[serviceName]['running']):
self.log('Cant start service which is already running', 'warning')
else:
self.services[serviceName]['process'].terminate()
self.services[serviceName]['process'] = Proc
|
ess(target=self.services[serviceName]['run'])
self.services[serviceName]['proc
|
ess'].start()
self.log('Creating and starting process for {0} with pid {1}'.format(
self.services[serviceName]['name'],
self.services[serviceName]['process'].pid))
self.services[serviceName]['running'] = True
def loop(self):
while True:
self.updateServiceState()
self.restartStoppedServices()
time.sleep(4)
def log(self, message, level='info'):
print('{0}-[Service Manager][{2}] {1}'.format(round(time.time()), message, level))
def main():
services = ServiceManager(servicesTemplate, autoStart=True)
services.loop()
if __name__ == '__main__':
main()
|
DaVinAhn/EPubMaker
|
EPubMaker.py
|
Python
|
mit
| 16,500
| 0.030364
|
# -*- coding: utf-8 -*-
import sublime, sublime_plugin
import os
import shutil
import subprocess
import zipfile
import glob
import sys
import codecs
import re
import json
import xml.etree.ElementTree
###
### Global Value
###
PACKAGE_NAME = 'EPubMaker'
OPEN_COMMAND = 'epub_maker_open'
SAVE_COMMAND = 'epub_maker_save'
PREVIEW_COMMAND = 'epub_maker_preview'
WORKSPACES_PATH = None
SUMMARY_EXTENSION = 'sublime-epub-summary'
IDENTIFIER_EXTENSION = 'sublime-epub-identifier'
PROJECT_EXTENSION = 'sublime-project'
IGNORE_EXTENSIONS = [
SUMMARY_EXTENSION,
IDENTIFIER_EXTENSION,
PROJECT_EXTENSION,
'sublime-workspace'
]
PREVIEW_PREFIX = 'epub-preview-'
SETTINGS = {}
ST3 = sublime.version() >= '3000'
###
### EventListener
###
class EpubMakerEventListener(sublime_plugin.EventListener):
def on_load(self, view):
filename = view.file_name()
if is_valid_format(filename, [SUMMARY_EXTENSION]): # summary 파일은 수정할 수 없도록
view.set_read_only(True)
elif not is_valid_format(filename): # epub 확장자 확인
return
elif ST3: # Sublime Text 3 확인
global WORKSPACES_PATH
if WORKSPACES_PATH is None: # workspaces 초기화 확인
return
else:
view.run_command(OPEN_COMMAND) # epub 열기
def on_post_save(self, view):
if not get_setting('auto_save'):
return
view.run_command(SAVE_COMMAND) # epub 저장
###
### TextCommand
###
class EpubMakerOpenCommand(sublime_plugin.TextCommand):
def is_enabled(self):
return is_valid_format(self.view.file_name())
def run(self, edit):
def extract(workpath, namelist):
os.makedirs(workpath)
for name in namelist:
filepath = os.path.join(workpath, name)
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname): # 디렉토리가 존재하지 않는지
os.makedirs(dirname)
if os.path.isdir(filepath): # 디렉토리인지
continue
else:
with open(filepath, 'wb') as descriptor:
descriptor.write(epub.read(name))
def close_views(workpath, namelist):
activewindow = sublime.active_window()
activeview = activewindow.active_view()
for name in namelist:
if name.startswith(workpath): # 절대경로 인지
filepath = name
else:
filepath = os.path.join(workpath, name)
for window in sublime.windows():
for view in window.views():
if view.file_name() == filepath:
view.set_scratch(True)
window.focus_view(view)
window.run_command('close_file')
break
activewindow.focus_view(activeview)
def close_folders(workpath):
for window in sublime.windows():
for folder in window.folders():
if folder == workpath:
window.run_command('remove_folder', {'dirs': [folder]})
break
window.run_command('refresh_folder_list')
# 압축 해제
epubpath = self.view.file_name()
try:
epub = zipfile.ZipFile(epubpath)
except Exception as e:
sublime.error_message('압축을 해제하는 중 오류가 발생했습니다')
print(PACKAGE_NAME + ':open: \'' + epubpath + '\'의 압축을 해제하는 중 오류가 발생했습니다')
return
# workspace 생성
global WORKSPACES_PATH
workpath = os.path.join(WORKSPACES_PATH, os.path.splitext(os.path.basename(epubpath))[0])
namelist = epub.namelist()
close_views(workpath, namelist + [get_sumblime_project_path(workpath), get_epub_identifier_path(workpath), get_epub_summary_path(workpath), get_preview_path(workpath)])
close_folders(workpath)
if not os.path.exists(workpath):
extract(workpath, namelist)
elif not sublime.ok_cancel_dialog('이전에 작업하던 ePub입니다.\n이어서 작업하시겠습니까?'):
shutil.rmtree(workpath)
extract(workpath, namelist)
# 프로젝트 파일 생성
idpath = create_epub_identifier(workpath, epubpath)
projectpath = create_sublime_project(workpath)
summarypath = create_epub_summary(workpath, epubpath)
# epub 뷰 닫음
view = self.view
window = view.window()
view.set_scratch(True)
window.focus_view(view)
window.run_command('close_file')
# 생성된 프로젝트 오픈
if is_windows():
sumlpath = os.path.join(os.path.dirname(sublime.__file__), 'subl.exe')
else:
sumlpath = os.path.join(os.path.dirname(os.path.dirname(sublime.__file__)), 'SharedSupport', 'bin', 'subl')
cmd = '"' + sumlpath + '" --project "' + projectpath + '" --add "' + summarypath + '"'
if get_setting('new_window'):
cmd += ' --new-window'
subprocess.Popen(cmd, shell=True)
window.run_command('refresh_folder_list')
sublime.status_message('Opend ePub ' + epubpath)
print(PACKAGE_NAME + ':open: \'' + epubpath + '\' -> \'' + workpath + '\'')
class EpubMakerSaveCommand(sublime_plugin.TextCommand):
def run(self, edit):
workpath = get_work_path(self.view)
if workpath is None:
return
# epub-identifier 찾기
idpath = get_epub_identifier_path(workpath)
if not os.path.exists(idpath):
sublime.error_message('\'' + idpath + '\'를 찾을 수 없습니다')
print(PACKAGE_NAME + ':save: \'' + idpath + '\'를 찾을 수 없습니다')
return
if get_setting('require_confirm_save'):
if not sublime.ok_cancel_dialog('변경된 내용을 ePub에도 반영 하시겠습니까?'):
return
# epub-identifier 읽기
idfile = open(idpath, 'r')
epubid = json.loads(idfile.read())
idfile.close()
epubpath = None
if get_setting('overwite_original'):
epubpath = epubid['src_path']
if not epubpath is None and get_setting('backup_original'):
def backup(path):
try:
shutil.copy(path, set_extension(path, get_setting('backup_extension')))
except Exception as e:
sublime.error_message('\'' + epubpath + '\'을 백업하는 중 오류가 발생했습니다')
print(PACKAGE_NAME + ':save: \'' + epubpath + '\'을 백업하는 중 오류가 발생했습니다')
backup(epubpath)
if epubpath is None:
epubpath = set_extension(os.path.join(workpath, '..', os.path.basename(workpath)), 'epub')
epub = zipfile.ZipFile(epubpath, 'w')
# ePub OCF에 따라 mimetype을 제일 먼저 압축없이 압축파일에 포함
epub.writestr('mimetype', 'application/epub+zip', zipfile
|
.ZIP_STORED)
# 이후 디렉토리와 파일을 추가
for root, dirs, files in os.walk(workpath):
|
if root == workpath:
continue
epub.write(root, root[len(workpath + os.sep):], zipfile.ZIP_STORED)
for f in files:
if is_ignore_file(f) or f == 'mimetype' or f.startswith(PREVIEW_PREFIX):
continue
f = os.path.join(root, f)
epub.write(f, f[len(workpath + os.sep):], zipfile.ZIP_DEFLATED)
epub.close()
sublime.status_message('Saved ePub ' + epubpath)
print(PACKAGE_NAME + ':save: \'' + epubpath + '\'')
class EpubMakerPreviewCommand(sublime_plugin.TextCommand):
def run(self, edit):
workpath = get_work_path(self.view)
if workpath is None:
return
filename = self.view.file_name()
if not is_valid_format(filename, ['html', 'htm', 'xhtml', 'xhtm']):
return
previewfile = open(get_resource_path('preview.html'), 'r')
preview = previewfile.read()
previewfile.close()
preview = preview.replace('#EPUB_NAME#', os.path.basename(workpath))
preview = preview.replace('#EPUB_SPINE_NAME#', os.path.basename(filename))
preview = preview.replace('#EPUB_SPINE_PATH#', filename.replace(workpath + os.sep, ''))
previewpath = get_preview_path(workpath)
with codecs.open(previewpath, 'w', 'utf-8') as html:
html.write(preview)
html.close()
sublime.active_window().run_command('side_bar_open_in_browser', {'browser': 'chromium', 'paths': [previewpath], 'type': 'testing'})
###
### Global Def (utility)
###
def get_platform_name():
return sublime.platform()
def is_windows():
return get_platform_name().startswith('windows')
def is_osx():
return get_platform_name().startswith('osx')
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def set_extension(path=None, extension=None):
if path is None or extension is None:
return None
else:
return path + '.' + extension
def is_valid_format(filename=None, extensions=['epub']):
if filename is None or '.' not in filename:
return False
else:
return filename.rsplit('.', 1)[1] in extensions
def is_ignore_file(filename=None):
if filename is None:
return True
elif is_valid_format(filename, IGNORE_EXTENSIONS):
return True
else:
return False
def get_setting(key):
return SETTINGS[key];
def load_settings():
settings = sublime.load_settings(PACKAGE_NAME + '
|
BRAINSia/ITK
|
Modules/Core/Common/wrapping/test/itkVariableLengthVectorTest.py
|
Python
|
apache-2.0
| 1,870
| 0
|
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with t
|
he License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions a
|
nd
# limitations under the License.
#
# ==========================================================================*/
import itk
itk.auto_progress(2)
n_channels = 31
# Verify UC addition operation
vector_type = itk.VariableLengthVector[itk.UC]
vector1 = vector_type(n_channels)
vector2 = vector_type(n_channels)
assert len(vector1) == n_channels and len(vector2) == n_channels
vector1.Fill(16)
for idx in range(n_channels):
vector2[idx] = idx
sum = vector1 + vector2
print(f'UC sum: {sum}')
for idx in range(n_channels):
assert sum[idx] == 16 + idx, "Got unexpected result from vector sum"
# Verify float addition operation
vector_float_type = itk.VariableLengthVector[itk.F]
vector3 = vector_float_type(n_channels)
vector4 = vector_float_type(n_channels)
assert len(vector3) == n_channels and len(vector4) == n_channels
vector3.Fill(0.5)
for idx in range(n_channels):
vector4.SetElement(idx, 0.1 * idx)
float_sum = vector3 + vector4
print(f'float sum: {float_sum}')
tolerance = 1e-6
for idx in range(n_channels):
diff = abs(float_sum[idx] - (0.5 + 0.1 * idx))
print(f'float sum[{idx}]: {float_sum[idx]:0.9f} diff: {diff:0.2e}')
assert diff < tolerance, "Got unexpected result from vector float sum"
|
drewrobb/marathon-python
|
marathon/_compat.py
|
Python
|
mit
| 173
| 0
|
"""
Support for python 2 & 3, ripped pieces from si
|
x.py
"""
import sys
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
else:
string_types
|
= basestring,
|
DIT524-V17/group-7
|
TCP raspberry/server.py
|
Python
|
gpl-3.0
| 181
| 0.005525
|
# Author: Pontus Laestadius.
# Since: 2nd of March,
|
2017.
# Maintained since: 17th of April 2017.
from receiver import Receiver
print("Version 2.2")
R
|
eceiver("172.24.1.1", 9005)
|
KonradBreitsprecher/espresso
|
doc/tutorials/06-active_matter/SOLUTIONS/rectification_geometry.py
|
Python
|
gpl-3.0
| 5,253
| 0.008186
|
################################################################################
# #
# Copyright (C) 2010,2011,2012,2013,2014, 2015,2016 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it und
|
er the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
#
|
#
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Active Matter: Rectification System Setup #
# #
################################################################################
from __future__ import print_function
from math import cos, pi, sin
import numpy as np
import os
import sys
from espressomd import assert_features, lb
from espressomd.lbboundaries import LBBoundary
from espressomd.shapes import Cylinder, Wall, HollowCone
assert_features(["LB_GPU","LB_BOUNDARIES_GPU"])
# Setup constants
outdir = "./RESULTS_RECTIFICATION_GEOMETRY/"
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
# Setup the box (we pad the diameter to ensure that the LB boundaries
# and therefore the constraints, are away from the edge of the box)
length = 100
diameter = 20
dt = 0.01
# Setup the MD parameters
system = espressomd.System(box_l=[length, dieameter+4, diameter+4])
system.cell_system.skin = 0.1
system.time_step = dt
system.min_global_cut = 0.5
# Setup LB parameters (these are irrelevant here) and fluid
agrid = 1
vskin = 0.1
frict = 20.0
visco = 1.0
densi = 1.0
lbf = lb.LBFluidGPU(agrid=agrid, dens=densi, visc=visco, tau=dt, fric=frict)
system.actors.add(lbf)
################################################################################
#
# Now we set up the three LB boundaries that form the rectifying geometry.
# The cylinder boundary/constraint is actually already capped, but we put
# in two planes for safety's sake. If you want to create an cylinder of
# 'infinite length' using the periodic boundaries, then the cylinder must
# extend over the boundary.
#
################################################################################
# Setup cylinder
cylinder = LBBoundary(shape=Cylinder(center=[length/2.0, (diameter+4)/2.0, (diameter+4)/2.0],
axis=[1,0,0],
radius=diameter/2.0,
length=length,
direction=-1))
system.lbboundaries.add(cylinder)
# Setup walls
wall = LBBoundary(shape=Wall(dist=2, normal=[1,0,0]))
system.lbboundaries.add(wall)
wall = LBBoundary(shape=Wall(dist=-(length - 2), normal=[-1,0,0]))
system.lbboundaries.add(wall)
# Setup cone
irad = 4.0
angle = pi/4.0
orad = (diameter - irad)/sin(angle)
shift = 0.25*orad*cos(angle)
hollow_cone = LBBoundary(shape=HollowCone(position_x=length/2.0 - shift,
position_y=(diameter+4)/2.0,
position_z=(diameter+4)/2.0,
orientation_x=1,
orientation_y=0,
orientation_z=0,
outer_radius=orad,
inner_radius=irad,
width=2.0,
opening_angle=angle,
direction=1))
system.lbboundaries.add(hollow_cone)
################################################################################
# Output the geometry
lbf.print_vtk_boundary("{}/boundary.vtk".format(outdir))
################################################################################
|
everyevery/programming_study
|
lgecodejam/2014-mar/c/c.py
|
Python
|
mit
| 1,284
| 0.010125
|
def process(target, other):
result = [[] for ch in target]
ret = []
for xi, xv in enumerate(target):
for yi, yv in enumerate(other):
if xv != yv:
result[xi].append(0)
elif 0 == xi or 0 == yi:
result[xi].append(1)
else:
result[xi].append(result[xi-1][yi-1]+1)
ret.append(max(result[xi]))
return ret
def find_shortest(word_lengt
|
h, sub_map):
for l in range(1, word_length+1):
# print "LEN: ", l
for pos in range(l-1, word_length):
# print "POS: ", pos
flag = True
for other in sub_map:
#
|
print l, other[pos]
if l <= other[pos]:
flag = False
break
if flag:
return l
def solve(n, word_list):
for (xi, xv) in enumerate(word_list):
result = []
for (yi, yv) in enumerate(word_list):
if (xv != yv):
result.append(process(xv, yv))
# print xv, len(xv), result
print find_shortest(len(xv), result)
if __name__ == '__main__':
N = int(raw_input())
WORD = []
for n in xrange(N):
WORD.append(raw_input().strip())
solve(N, WORD)
|
tobikausk/nest-simulator
|
pynest/nest/tests/test_sp/test_enable_multithread.py
|
Python
|
gpl-2.0
| 2,237
| 0
|
# -*- coding: utf-8 -*-
#
# test_enable_multithread.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR P
|
URPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
|
with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import unittest
__author__ = 'sdiaz'
# Structural plasticity currently does not work with multiple threads.
# An exception should be rised if structural plasticity is enabled
# and multiple threads are set, or if multiple threads are set and
# the enable_structural_plasticity function is called.
HAVE_OPENMP = nest.sli_func("is_threaded")
@unittest.skipIf(not HAVE_OPENMP, 'NEST was compiled without multi-threading')
class TestEnableMultithread(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
def test_enable_multithread(self):
nest.ResetKernel()
nest.EnableStructuralPlasticity()
# Setting multiple threads when structural plasticity is enabled should
# throw an exception
with self.assertRaises(nest.NESTError):
nest.SetKernelStatus(
{
'local_num_threads': 2
}
)
def test_multithread_enable(self):
nest.ResetKernel()
nest.SetKernelStatus(
{
'local_num_threads': 2
}
)
# Setting multiple threads when structural plasticity is enabled should
# throw an exception
with self.assertRaises(nest.NESTError):
nest.EnableStructuralPlasticity()
def suite():
test_suite = unittest.makeSuite(TestEnableMultithread, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
|
cnvogelg/fs-uae-gles
|
launcher/fs_uae_launcher/editor/XMLControl.py
|
Python
|
gpl-2.0
| 2,828
| 0.001061
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import xml.etree.ElementTree
from xml.etree.cElementTree import ElementTree, Element, SubElement
from xml.etree.cElementTree import fromstring, tostring
import fs_uae_launcher.fsui as fsui
from ..Config import Config
from ..Settings import Settings
from ..I18N import _, ngettext
class XMLControl(fsui.TextArea):
def __init__(self, parent):
fsui.TextArea.__init__(self, parent, horizontal_scroll=True)
self.path = ""
def connect_game(self, info):
tree = self.get_tree()
root = tree.getroot()
if not root.tag == "config":
return
game_node = self.find_or_create_node(root, "game")
game_node.set("uuid", info["uuid"])
game_name_node = self.find_or_create_node(game_node, "name")
game_name_nod
|
e.text = info["name"]
self.set_tree(tree)
def find_or_create_node(self, element, name):
node = element.find(name)
if node is None:
node = SubElement(element, name)
return node
def set_path(self, path):
if not os.path.exists(path):
path = ""
self.path = path
if path:
self.load_xml(path)
else:
self.set_text("")
def g
|
et_tree(self):
text = self.get_text().strip()
try:
root = fromstring(text.encode("UTF-8"))
except Exception:
# FIXME: show message
import traceback
traceback.print_exc()
return
tree = ElementTree(root)
indent_tree(root)
return tree
def set_tree(self, tree):
data = tostring(tree.getroot(), encoding="UTF-8").decode("UTF-8")
std_decl = "<?xml version='1.0' encoding='UTF-8'?>"
if data.startswith(std_decl):
data = data[len(std_decl):].strip()
self.set_text(data)
def load_xml(self, path):
with open(path, "rb") as f:
data = f.read()
self.set_text(data)
def save(self):
if not self.path:
print("no path to save XML to")
return
self.save_xml(self.path)
def save_xml(self, path):
self.get_tree().write(self.path)
def indent_tree(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent_tree(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
|
gwq5210/litlib
|
thirdparty/sources/protobuf/python/google/protobuf/descriptor.py
|
Python
|
gpl-3.0
| 37,400
| 0.006364
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Descriptors essentially contain exactly the information found in a .proto
file, in types that make this information accessible in Python.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import six
from google.protobuf.internal import api_implementation
_USE_C_DESCRIPTORS = False
if api_implementation.Type() == 'cpp':
# Used by MakeDescriptor in cpp mode
import os
import uuid
from google.protobuf.pyext import _message
_USE_C_DESCRIPTORS = getattr(_message, '_USE_C_DESCRIPTORS', False)
class Error(Exception):
"""Base error for this module."""
class TypeTransformationError(Error):
"""Error transforming between python proto type and corresponding C++ type."""
if _USE_C_DESCRIPTORS:
# This metaclass allows to override the behavior of code like
# isinstance(my_descriptor, FieldDescriptor)
# and make it return True when the descriptor is an instance of the extension
# type written in C++.
class DescriptorMetaclass(type):
def __instancecheck__(cls, obj):
if super(DescriptorMetaclass, cls).__instancecheck__(obj):
return True
if isinstance(obj, cls._C_DESCRIPTOR_CLASS):
return True
return False
else:
# The standard metaclass; nothing changes.
DescriptorMetaclass = type
class DescriptorBase(six.with_metaclass(DescriptorMetaclass)):
"""Descriptors base class.
This class is the base of all descriptor classes. It provides common options
related functionality.
Attributes:
has_options: True if the descriptor has non-default options. Usually it
is not necessary to read this -- just call GetOptions() which will
happily return the default instance. However, it's sometimes useful
for efficiency, and also useful inside the protobuf implementation to
avoid some bootstrapping issues.
"""
if _USE_C_DESCRIPTORS:
# The class, or tuple of classes, that are considered as "virtual
# subclasses" of this descriptor class.
_C_DESCRIPTOR_CLASS = ()
def __init__(self, options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options
class _NestedDescriptorBase(DescriptorBase):
"""Common class for descriptors that can be nested."""
def __init__(self, options, options_class_name, name, full_name,
file, containing_type, serialized_start=None,
serialized_end=None):
"""Constructor.
Args:
options: Protocol message options or None
to use default message options.
options_class_name: (str) The class name of the above options.
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
file: (FileDescriptor) Reference to file info.
containing_type: if provided, this is a nested descriptor, with this
descriptor as parent, otherwise None.
serialized_start: The start index (inclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_end: The end index (exclusive) in block in the
file.serialized_pb that describes this descriptor.
"""
super(_NestedDescriptorBase, self).__init__(
options, options_class_name)
self.name = name
# TODO(falk): Add function to calculate full_name instead of having it in
# memory?
self.full_name = full_name
self.file = file
self.containing_type = containing_type
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def GetTopLevelContainingType(self):
"""Returns the root if this is a nested type, or itself if its the root."""
desc = self
while desc.containing_type is not None:
desc = desc.containing_type
return desc
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
class Descriptor(_NestedDescriptorBase):
"""Descriptor for a protocol message type.
A Descriptor instance has the following attributes:
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include prot
|
ocol "package" name and the name of any
enclosing types.
containing_type: (Descriptor) Reference to the descriptor of the
type containing us, or None if this is top-level.
fields: (list of FieldDescriptors) Field descriptors
|
for all
fields in this type.
fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "number" attribute in each
FieldDescriptor.
fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
objects as in |f
|
hlmnrmr/liveblog
|
server/liveblog/syndication/exceptions.py
|
Python
|
agpl-3.0
| 199
| 0
|
class APIConnectionError(Exception):
pass
class DownloadErro
|
r(Exception):
pass
class ProducerAPIError(APIConnectionError):
pass
class ConsumerAPIError(APIConnectionE
|
rror):
pass
|
libor-m/scrimer
|
scrimer/primer3_connector.py
|
Python
|
agpl-3.0
| 7,094
| 0.006061
|
"""
A Python interface to the primer3_core executable.
TODO: it is not possible to keep a persistent primer3 process
using subprocess module - communicate() terminates the input
stream and waits for the process to finish
Author: Libor Morkovsky 2012
"""
# This file is a part of Scrimer.
# See LICENSE.txt for details on licensing.
# Copyright (C) 2012, 2013 Libor Morkovsky
class BoulderIO:
"""Provides Python interface for ``BoulderIO`` format used by Primer3.
"""
@classmethod
def parse(self, string):
r"""Parse a BoulderIO string ``(KEY=VAL\n)``
return a list of records, where each record is a dictionary
end of the string implies a single ``'=\n'`` (record separator).
"""
record_strings = string.split("=\n")
return [dict(tuple(line.split("=", 1)) for line in record.split("\n") if len(line) > 3) for record in record_strings if len(record) > 3]
@classmethod
def deparse(self, records):
r"""Accepts a dict or a list of dicts, produces a BoulderIO string ``(KEY=VAL\n)``
with records separated by ``'=\n'``.
"""
# unify the input, create a list with single element
if type(records) == dict:
records = [records]
return "\n=\n".join("\n".join("=".join(kval) for kval in record.iteritems()) for record in records) + "\n=\n"
class Primer3:
"""Wraps Primer3 executable. `kwargs` are converted to strings and used as default parameters
for each call of primer3 binary.
"""
def __init__(self, p3path="primer3_core", **kwargs):
# store path to primer3
self.p3path = p3path
# add stringized versions of all kwargs to default args
self.default_params = {}
str_kw = dict((key, str(val)) for key, val in kwargs.iteritems())
self.default_params.update(str_kw)
def call(self, records):
"""Merge each of the records with `default_params`, the record taking precedence,
call the ``primer3`` binary,
parse the output and return a list of dictionaries,
``{RIGHT:[], LEFT:[], PAIR:[], INTERNAL:[]}`` for each input record
uppercase keys (in the result) are the original names from BoulderIO format,
lowercase keys have no direct equivalent in primer3 output (``position``, ``other-keys``)
"""
# merge the defaults with current query
full_records = [dict(self.default_params.items() + record.items()) for record in records]
# call primer3
import subprocess
self.child = subprocess.Popen([self.p3path, '-strict_tags'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = self.child.communicate(BoulderIO.deparse(full_records))
# simple check for errors in stderr
if len(err):
raise Exception(err)
results = BoulderIO.parse(out)
# parse the results to {RIGHT:[], LEFT:[], PAIR:[], INTERNAL:[]}
sides = ['RIGHT', 'LEFT', 'PAIR', 'INTERNAL']
primers = []
for result in results:
# primers for current result
res_primers = dict((side, []) for side in sides)
used_keys = []
for side in sides:
nret_key = 'PRIMER_%s_NUM_RETURNED' % side
nret = int(result.get(nret_key, 0))
used_keys.append(nret_key)
# extract the values for each single primer and put those to
# equivalent key
for num in xrange(nret):
template = 'PRIMER_%s_%d_' % (side, num)
primer_keys = filter(lambda k: template in k, result.iterkeys())
primer = dict((key[len(template):], result[key]) for key in primer_keys)
# extract the position, which itself has no extractible name in BoulderIO
# only 'PRIMER_LEFT_0'
if side != 'PAIR':
pos_key = template[:len(template)-1]
primer['position'] = result.get(pos_key, "#error!")
used_keys.append(pos_key)
# keep track of keys used in current record
used_keys.extend(primer_keys)
res_primers[side].append(primer)
# store all the unused keys for current result
res_primers['other-keys'] = dict((key, result[key]) for key in result.iterkeys() if key not in used_keys)
primers.append(res_primers)
return primers
if __name__ == "__main__":
print "Running tests"
import textwrap
record = BoulderIO.parse(textwrap.dedent(
"""
SEQUENCE_ID=example
SEQUENCE_TEMPLATE=GTAGTCAGTAGACGATGACTACTGACGATGCAGACNACACACACACACACAGCACACAGGTATTAGTGGGCCATTCGATCCCGACCCAAATCGATAGCTACGATGACG
SEQUENCE_TARGET=37,21
PRIMER_PICK_INTERNAL_OLIGO=0
PRIMER_OPT_SIZE=18
PRIMER_MIN_SIZE=15
PRIMER_MAX_SIZE=21
PRIMER_MAX_NS_ACCEPTED=3
PRIMER_PRODUCT_SIZE_RANGE=50-100
"""))
record_no_res = BoulderIO.parse(textwrap.dedent(
"""
SEQUENCE_ID=example
SEQUENCE_TEMPLATE=GTAGTCAGTAGACNATGACNACTGACGATGCAGACNACACACACACACACAGCACACAGGTATTAGTGGGCCATTCGATCCCGACCCAAATCGATAGCTACGATGACG
SEQUENCE_TARGET=37,21
PRIMER_TASK=pick_detection_primers
PRIMER_PICK_LEFT_PRIMER=1
PRIMER_PICK_INTERNAL_OLIGO=1
PRIMER_PICK_RIGHT_PRIMER=1
PRIMER_OPT_SIZE=18
PRIMER_MIN_SIZE=15
PRIMER_MAX_SIZE=21
PRIMER_MAX_NS_ACCEPTED=1
PRIMER_PRODUCT_SIZE_RANGE=75-100
SEQUENCE_INTERNAL_EXCLUDED_REGION=37,21
"""))
default_params = BoulderIO.parse(textwrap.dedent(
"""
PRIMER_THERMODYNAMIC_PARAMETERS_PATH=/opt/primer3/bin/primer3_config/
PRIMER_MAX_NS_ACCEPTED=0
PRIMER_EXPLAIN_FLAG=1
"""))[0]
print "Testing BoulderIO, single record:",
record_dp = BoulderIO.deparse(record)
record_reparsed = BoulderIO.parse(record_dp)
if record == record_reparsed:
print "OK
|
"
else:
print "Failed!"
print "Testing BoulderIO, two records:",
two_records = record + record_no_res
record_dp = BoulderIO.deparse(two_records)
record_reparsed = BoulderIO.parse(record_dp)
if two_records == record_reparsed:
print "OK"
else:
print "Failed!"
print "Testing
|
Primer3, single record:",
p3 = Primer3(**default_params)
# test for single record
res = p3.call(record)
if res[0]['RIGHT'][0]['SEQUENCE'] == 'GTCGGGATCGAATGGCCC':
print "OK"
else:
print "Failed!"
# test for multiple records
print "Testing Primer3, two records:",
res = p3.call(two_records)
# second record should produce no results
if len(res[1]['RIGHT']) == 0:
print "OK"
else:
print "Failed!"
# if no exception occurs, the test should be OK
print "Tests ran OK"
|
EricssonResearch/scott-eu
|
robot-emulator/main.py
|
Python
|
apache-2.0
| 3,749
| 0.005346
|
import paho.mqtt.client as mqtt
import os,binascii
import logging
import time
from enum import Enum
from threading import Timer
import json
import random
import math
ID_STRING = binascii.hexlify(os.urandom(15)).decode('utf-8')[:4]
CLIENT_ID = "robot-emulator-" + ID_STRING
BROKER_HOST = "mosquitto"
TOPIC_STATUS = "twin/%s/status" % ID_STRING
TOPIC_PLANS = "twin/%s/plans" % ID_STRING
TOPIC_REGISTRATION = "twins/registration/announce"
TOPIC_HANDSHAKE = "twins/registration/handshake"
class TwinStatus(Enum):
NOT_CONNECTED = 1
SEARCHING = 2
SELECTED = 3
CONNECTED = 4
DISCONNECTED = 5
status = TwinStatus.NOT_CONNECTED
timer = None
def main():
logging.info("Client '%s' is connecting...", CLIENT_ID)
# Client(client_id=””, clean_session=True, userdata=None, protocol=MQTTv311, transport=”tcp”)
client = mqtt.Client(CLIENT_ID)
client.on_connect = on_connect
client.on_message = on_message
try:
client.connect(BROKER_HOST)
logging.info("Client '%s' CONNECTED to '%s'", CLIENT_ID, BROKER_HOST)
except Exception as e:
logging.error("Failed to connect to the MQTT broker on host '%s' (CLIENT_ID='%s')", BROKER_HOST, CLIENT_ID)
logging.debug(e)
client.loop_forever()
def twin_search_timeout(client, n):
if not status == TwinStatus.CONNECTED:
logging.warning("Twin connection is not established (%s)", status)
request_twin(client)
schedule_reconnect(client, n+1)
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# no need to sub to our own statuses
# sub(client, TOPIC_STATUS)
sub(client, TOPIC_PLANS)
sub(client, TOPIC_HANDSHAKE)
# client.publish(TOPIC_STATUS, "{'status': 'on'}")
request_twin(client)
schedule_reconnect(client, 1)
# TODO also publish some message on the 'registration' topic
def sub(client, topic):
client.subscribe(topic)
logging.info("Subscribed to %s", topic)
def schedule_reconnect(client, n):
delay = min(0.1 * 2 ** (n-1) + (random.randint(0, 200) / 1000), 10)
logging.debug("Next reconnection attempt in %fs", delay)
timer = Timer(delay, twin_search_timeout, [client, n])
timer.start()
def request_twin(client):
client.publish(TOPIC_REGISTRATION, json.dumps({'twin': ID_STRING, 'status': 'awaiting'}))
status = TwinStatus.SEARCHING
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
logging.debug("New message '%s' (topic: '%s', QoS%d)", msg.payload, msg.topic, msg.qos)
if not msg.topic == TOPIC_STATUS:
client.publish(TOPIC_STATUS, json.dumps({'status': 'done'}))
if msg.topic == TOPIC_HANDSHAKE:
reg_reply = json.loads(msg.payload)
process_reg_reply(reg_reply, client, msg)
def process_reg_reply(reg_reply, client, msg):
if reg_reply["device"] != ID_STRING:
logging.debug("A registration message for another device received: %s", msg.payload)
else:
t = reg_reply["twin"]
logging.debug("Trying to select
|
the twin '%s'", t)
# TODO do we really need this status?
|
status = TwinStatus.SELECTED
register_with_twin(t)
def register_with_twin(t):
logging.warning("Not implemented yet")
status = TwinStatus.CONNECTED
twin = t
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d %H:%M')
main()
logging.warning("Client '%s' is shutting down", CLIENT_ID)
|
auth0/auth0-python
|
auth0/v3/test/management/test_resource_servers.py
|
Python
|
mit
| 3,056
| 0.000327
|
import unittest
import mock
from ...management.resource_servers import ResourceServers
class TestResourceServers(unittest.TestCase):
def test_init_with_optionals(self):
t = ResourceServers(domain='domain', token='jwttoken', telemetry=False, timeout=(10, 2))
self.assertEqual(t.client.options.timeout, (10, 2))
telemetry_header = t.client.base_headers.get('Auth0-Client', None)
self.assertEqual(telemetry_header, None)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.create({'name': 'TestApi', 'identifier': 'https://test.com/api'})
mock_instance.post.assert_called_with(
'https://doma
|
in/api/v2/resource-servers',
data={'name': 'TestApi', 'identifier': 'https://test.com/api'}
)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_get_all(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
# with
|
default params
r.get_all()
mock_instance.get.assert_called_with(
'https://domain/api/v2/resource-servers',
params={
'page': None,
'per_page': None,
'include_totals': 'false'
}
)
# with pagination params
r.get_all(page=3, per_page=27, include_totals=True)
mock_instance.get.assert_called_with(
'https://domain/api/v2/resource-servers',
params={
'page': 3,
'per_page': 27,
'include_totals': 'true'
}
)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.get('some_id')
mock_instance.get.assert_called_with(
'https://domain/api/v2/resource-servers/some_id'
)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.delete('some_id')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/resource-servers/some_id'
)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.update('some_id', {'name': 'TestApi2',
'identifier': 'https://test.com/api2'})
mock_instance.patch.assert_called_with(
'https://domain/api/v2/resource-servers/some_id',
data={'name': 'TestApi2',
'identifier': 'https://test.com/api2'}
)
|
wandec/grr
|
client/client_actions/plist.py
|
Python
|
apache-2.0
| 2,452
| 0.008564
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Client actions related to plist files."""
import cStringIO
import types
from grr.client import actions
from grr.client import vfs
from grr.lib import plist as plist_lib
from grr.lib import rdfvalue
from grr.parsers import binplist
class PlistQuery(actions.ActionPlugin):
"""Parses the plist request specified and returns the results.
PlistQuery allows you to obtain data from a plist, optionally only if it
matches the given filter.
Querying for a plist is done in two steps. First, its contents are
retrieved.
For plists where the top level element is a dict, you can use the key
parameter of the PlistRequest to specify a path into the dict to retrieve.
When specifying a key, the requested
|
key values are places under a dictionary
key called "key".
Whether you've specified a key or not, the query parameter allows you to
filter based on the
"""
in_rdfvalue = rdfvalue.PlistRequest
out_rdfvalue = rdfvalue.RDFValueArray
MAX_PLIST_SIZE = 1024 * 1024 * 100 # 100 MB
def Run(self, args):
self.context = args.context
self.filter_query = args.query
with vfs.VFSOpen(args.pat
|
hspec, progress_callback=self.Progress) as fd:
data = fd.Read(self.MAX_PLIST_SIZE)
plist = binplist.readPlist(cStringIO.StringIO(data))
# Create the query parser
parser = plist_lib.PlistFilterParser(self.filter_query).Parse()
filter_imp = plist_lib.PlistFilterImplementation
matcher = parser.Compile(filter_imp)
if self.context:
# Obtain the values for the context using the value expander
value_expander = filter_imp.FILTERS["ValueExpander"]
iterator = value_expander().Expand(plist, self.context)
else:
# If we didn't get a context, the context is the whole plist
iterator = [plist]
reply = rdfvalue.RDFValueArray()
for item in iterator:
# As we're setting the context manually, we need to account for types
if isinstance(item, types.ListType):
for sub_item in item:
partial_plist = plist_lib.PlistValueToPlainValue(sub_item)
if matcher.Matches(partial_plist):
reply.Append(sub_item)
else:
partial_plist = plist_lib.PlistValueToPlainValue(item)
if matcher.Matches(partial_plist):
reply.Append(partial_plist)
self.SendReply(reply)
|
renalreg/radar
|
radar/models/patient_addresses.py
|
Python
|
agpl-3.0
| 8,540
| 0.000117
|
#! -*- coding: utf-8 -*-
from collections import OrderedDict
from sqlalchemy import Column, Date, ForeignKey, Index, String
from sqlalchemy import Integer
from sqlalchemy.orm import relationship
from radar.database import db
from radar.models.common import MetaModelMixin, patient_id_column, patient_relationship, uuid_pk_column
from radar.models.logs import log_changes
COUNTRIES = OrderedDict([
('AF', 'Afghanistan'),
('AX', 'Åland Islands'),
('AL', 'Albania'),
('DZ', 'Algeria'),
('AS', 'American Samoa'),
('AD', 'Andorra'),
('AO', 'Angola'),
('AI', 'Anguilla'),
('AQ', 'Antarctica'),
('AG', 'Antigua and Barbuda'),
('AR', 'Argentina'),
('AM', 'Armenia'),
('AW', 'Aruba'),
('AU', 'Australia'),
('AT', 'Austria'),
('AZ', 'Azerbaijan'),
('BS', 'Bahamas'),
('BH', 'Bahrain'),
('BD', 'Bangladesh'),
('BB', 'Barbados'),
('BY', 'Belarus'),
('BE', 'Belgium'),
('BZ', 'Belize'),
('BJ', 'Benin'),
('BM', 'Bermuda'),
('BT', 'Bhutan'),
('BO', 'Bolivia, Plurinational State of'),
('BQ', 'Bonaire, Sint Eustatius and Saba'),
('BA', 'Bosnia and Herzegovina'),
('BW', 'Botswana'),
('BV', 'Bouvet Island'),
('BR', 'Brazil'),
('IO', 'British Indian Ocean Territory'),
('BN', 'Brunei Darussalam'),
('BG', 'Bulgaria'),
('BF', 'Burkina Faso'),
('BI', 'Burundi'),
('KH', 'Cambodia'),
('CM', 'Cameroon'),
('CA', 'Canada'),
('CV', 'Cape Verde'),
('KY', 'Cayman Islands'),
('CF', 'Central African Republic'),
('TD', 'Chad'),
('CL', 'Chile'),
('CN', 'China'),
('CX', 'Christmas Island'),
('CC', 'Cocos (Keeling) Islands'),
('CO', 'Colombia'),
('KM', 'Comoros'),
('CG', 'Congo'),
('CD', 'Congo, the Democratic Republic of the'),
('CK', 'Cook Islands'),
('CR', 'Costa Rica'),
('CI', 'Côte d\'Ivoire'),
('HR', 'Croatia'),
('CU', 'Cuba'),
('CW', 'Curaçao'),
('CY', 'Cyprus'),
('CZ', 'Czech Republic'),
('DK', 'Denmark'),
('DJ', 'Djibouti'),
('DM', 'Dominica'),
('DO', 'Dominican Republic'),
('EC', 'Ecuador'),
('EG', 'Egypt'),
('SV', 'El Salvador'),
('GQ', 'Equatorial Guinea'),
('ER', 'Eritrea'),
('EE', 'Estonia'),
('ET', 'Ethiopia'),
('FK', 'Falkland Islands (Malvinas)'),
('FO', 'Faroe Islands'),
('FJ', 'Fiji'),
('FI', 'Finland'),
('FR', 'France'),
('GF', 'French Guiana'),
('PF', 'French Polynesia'),
('TF', 'French Southern Territories'),
('GA', 'Gabon'),
('GM', 'Gambia'),
('GE', 'Georgia'),
('DE', 'Germany'),
('GH', 'Ghana'),
('GI', 'Gibraltar'),
('GR', 'Greece'),
('GL', 'Greenland'),
('GD', 'Grenada'),
('GP', 'Guadeloupe'),
('GU', 'Guam'),
('GT', 'Guatemala'),
('GG', 'Guernsey'),
('GN', 'Guinea'),
('GW', 'Guinea-Bissau'),
('GY', 'Guyana'),
('HT', 'Haiti'),
('HM', 'Heard Island and McDonald Islands'),
('VA', 'Holy See (Vatican City State)'),
('HN', 'Honduras'),
('HK', 'Hong Kong'),
('HU', 'Hungary'),
('IS', 'Iceland'),
('IN', 'India'),
('ID', 'Indonesia'),
('IR', 'Iran, Islamic Republic of'),
('IQ', 'Iraq'),
('IE', 'Ireland'),
('IM', 'Isle of Man'),
('IL', 'Israel'),
('IT', 'Italy'),
('JM', 'Jamaica'),
('JP', 'Japan'),
('JE', 'Jersey'),
('JO', 'Jordan'),
('KZ', 'Kazakhstan'),
('KE', 'Kenya'),
('KI', 'Kiribati'),
('KP', 'Korea, Democratic People\'s Republic of'),
('KR', 'Korea, Republic of'),
('KW', 'Kuwait'),
('KG', 'Kyrgyzstan'),
('LA', 'Lao People\'s Democratic Republic'),
('LV', 'Latvia'),
('LB', 'Lebanon'),
('LS', 'Lesotho'),
('LR', 'Liberia'),
('LY', 'Libya'),
('LI', 'Liechtenstein'),
('LT', 'Lithuania'),
('LU', 'Luxembourg'),
('MO', 'Macao'),
('MK', 'Macedonia, the former Yugoslav Republic of'),
('MG', 'Madagascar'),
('MW', 'Malawi'),
('MY', 'Malaysia'),
('MV', 'Maldives'),
('ML', 'Mali'),
('MT', 'Malta'),
('MH', 'Marshall Islands'),
('MQ', 'Martinique'),
('MR', 'Mauritania'),
('MU', 'Mauritius'),
('YT', 'Mayotte'),
('MX', 'Mexico'),
('FM', 'Micronesia, Federated States of'),
('MD', 'Moldova, Republic of'),
('MC', 'Monaco'),
('MN', 'Mongolia'),
('ME', 'Montenegro'),
('MS', 'Montserrat'),
|
('MA', 'Morocco'),
('MZ', 'Mozambique'),
('MM', 'Myanmar'),
('NA', 'Na
|
mibia'),
('NR', 'Nauru'),
('NP', 'Nepal'),
('NL', 'Netherlands'),
('NC', 'New Caledonia'),
('NZ', 'New Zealand'),
('NI', 'Nicaragua'),
('NE', 'Niger'),
('NG', 'Nigeria'),
('NU', 'Niue'),
('NF', 'Norfolk Island'),
('MP', 'Northern Mariana Islands'),
('NO', 'Norway'),
('OM', 'Oman'),
('PK', 'Pakistan'),
('PW', 'Palau'),
('PS', 'Palestinian Territory, Occupied'),
('PA', 'Panama'),
('PG', 'Papua New Guinea'),
('PY', 'Paraguay'),
('PE', 'Peru'),
('PH', 'Philippines'),
('PN', 'Pitcairn'),
('PL', 'Poland'),
('PT', 'Portugal'),
('PR', 'Puerto Rico'),
('QA', 'Qatar'),
('RE', 'Réunion'),
('RO', 'Romania'),
('RU', 'Russian Federation'),
('RW', 'Rwanda'),
('BL', 'Saint Barthélemy'),
('SH', 'Saint Helena, Ascension and Tristan da Cunha'),
('KN', 'Saint Kitts and Nevis'),
('LC', 'Saint Lucia'),
('MF', 'Saint Martin (French part)'),
('PM', 'Saint Pierre and Miquelon'),
('VC', 'Saint Vincent and the Grenadines'),
('WS', 'Samoa'),
('SM', 'San Marino'),
('ST', 'Sao Tome and Principe'),
('SA', 'Saudi Arabia'),
('SN', 'Senegal'),
('RS', 'Serbia'),
('SC', 'Seychelles'),
('SL', 'Sierra Leone'),
('SG', 'Singapore'),
('SX', 'Sint Maarten (Dutch part)'),
('SK', 'Slovakia'),
('SI', 'Slovenia'),
('SB', 'Solomon Islands'),
('SO', 'Somalia'),
('ZA', 'South Africa'),
('GS', 'South Georgia and the South Sandwich Islands'),
('SS', 'South Sudan'),
('ES', 'Spain'),
('LK', 'Sri Lanka'),
('SD', 'Sudan'),
('SR', 'Suriname'),
('SJ', 'Svalbard and Jan Mayen'),
('SZ', 'Swaziland'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('SY', 'Syrian Arab Republic'),
('TW', 'Taiwan, Province of China'),
('TJ', 'Tajikistan'),
('TZ', 'Tanzania, United Republic of'),
('TH', 'Thailand'),
('TL', 'Timor-Leste'),
('TG', 'Togo'),
('TK', 'Tokelau'),
('TO', 'Tonga'),
('TT', 'Trinidad and Tobago'),
('TN', 'Tunisia'),
('TR', 'Turkey'),
('TM', 'Turkmenistan'),
('TC', 'Turks and Caicos Islands'),
('TV', 'Tuvalu'),
('UG', 'Uganda'),
('UA', 'Ukraine'),
('AE', 'United Arab Emirates'),
('GB', 'United Kingdom'),
('US', 'United States'),
('UM', 'United States Minor Outlying Islands'),
('UY', 'Uruguay'),
('UZ', 'Uzbekistan'),
('VU', 'Vanuatu'),
('VE', 'Venezuela, Bolivarian Republic of'),
('VN', 'Viet Nam'),
('VG', 'Virgin Islands, British'),
('VI', 'Virgin Islands, U.S.'),
('WF', 'Wallis and Futuna'),
('EH', 'Western Sahara'),
('YE', 'Yemen'),
('ZM', 'Zambia'),
('ZW', 'Zimbabwe'),
])
@log_changes
class PatientAddress(db.Model, MetaModelMixin):
__tablename__ = 'patient_addresses'
id = uuid_pk_column()
patient_id = patient_id_column()
patient = patient_relationship('patient_addresses')
source_group_id = Column(Integer, ForeignKey('groups.id'), nullable=False)
source_group = relationship('Group')
source_type = Column(String, nullable=False)
from_date = Column(Date)
to_date = Column(Date)
address1 = Column(String)
address2 = Column(String)
address3 = Column(String)
address4 = Column(String)
postcode = Column(String)
country = Column(String)
@property
def full_address(self):
parts = []
parts.extend([
self.address1,
self.address2,
self.address3,
self.address4,
self.postcode,
self.country,
])
return '\n'.join(x for x in parts if x)
@propert
|
erangre/Dioptas
|
dioptas/model/util/smooth_bruckner_python.py
|
Python
|
gpl-3.0
| 2,059
| 0.004371
|
# -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher (clemens.prescher@gmail.com)
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019 DESY, Hamburg, Germany
#
# This program is free software: you can redistr
|
ibute it and/or modify
# it under the terms of the GNU General Publ
|
ic License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
def smooth_bruckner(y, smooth_points, iterations):
y_original = y
N_data = y.size
N = smooth_points
N_float = float(N)
y = np.empty(N_data + N + N)
y[0:N].fill(y_original[0])
y[N:N + N_data] = y_original[0:N_data]
y[N + N_data:N_data + N + N].fill(y_original[-1])
y_avg = np.average(y)
y_min = np.min(y)
y_c = y_avg + 2. * (y_avg - y_min)
y[y > y_c] = y_c
window_size = N_float*2+1
for j in range(0, iterations):
window_avg = np.average(y[0: 2*N + 1])
for i in range(N, N_data - 1 - N - 1):
if y[i]>window_avg:
y_new = window_avg
#updating central value in average (first bracket)
#and shifting average by one index (second bracket)
window_avg += ((window_avg-y[i]) + (y[i+N+1]-y[i - N]))/window_size
y[i] = y_new
else:
#shifting average by one index
window_avg += (y[i+N+1]-y[i - N])/window_size
return y[N:N + N_data]
|
RedHatInsights/insights-core
|
examples/rules/stand_alone.py
|
Python
|
apache-2.0
| 2,746
| 0
|
#!/usr/bin/env python
"""
Standaone Rule
==============
This is a customer spec, parser and rule and can be run
against the local host using the following command::
$ insights-run -p examples.rules.stand_alone
or from the examples/rules directory::
$ ./stand_alone.py
"""
from __future__ import print_function
from collections import namedtuple
from insights import get_active_lines, parser, Parser
from insights import make_fail, make_pass, rule, run
from insights.core.spec_factory import SpecSet, simple_file
from insights.parsers.redhat_release import RedhatRelease
# Error key used in make_fail
ERROR_KEY = "TOO_MANY_HOSTS"
# jinga2 template displayed for rule responses
CONTENT = {
make_fail: """Too many hosts in /etc/hosts: {{num}}""",
make_pass: """Just right"""
}
class Specs(SpecSet):
""" Datasources for collection from local host """
hosts = simple_file("/etc/hosts")
@parser(Specs.hosts)
class HostParser(Parser):
"""
Parses the results of the ``hosts`` Specs
Attributes:
hosts (list): List of the namedtuple Host
which are the contents of the hosts file
including ``.ip``, ``.host``, and ``.aliases``.
"""
Host = namedtuple("Host", ["ip", "host", "aliases
|
"])
def parse_content(self, content):
"""
Method to pars
|
e the contents of file ``/etc/hosts``
This method must be implemented by each parser.
Arguments:
content (list): List of strings that are the contents
of the /etc/hosts file.
"""
self.hosts = []
for line in get_active_lines(content):
# remove inline comments
line = line.partition("#")[0].strip()
# break the line into parts
parts = line.split()
ip, host = parts[:2]
aliases = parts[2:]
self.hosts.append(HostParser.Host(ip, host, aliases))
def __repr__(self):
""" str: Returns string representation of the class """
me = self.__class__.__name__
msg = "%s([" + ", ".join([str(d) for d in self.hosts]) + "])"
return msg % me
@rule(HostParser, RedhatRelease, content=CONTENT)
def report(hp, rhr):
"""
Rule reports a response if there is more than 1 host
entry defined in the /etc/hosts file.
Arguments:
hp (HostParser): Parser object for the custom parser in this
module.
rhr (RedhatRelease): Parser object for the /etc/redhat-release
file.
"""
if len(hp.hosts) > 1:
return make_fail("TOO_MANY_HOSTS", num=len(hp.hosts))
return make_pass("TOO_MANY_HOSTS", num=len(hp.hosts))
if __name__ == "__main__":
run(report, print_summary=True)
|
sdrogers/ms2ldaviz
|
ms2ldaviz/ms2ldaviz/views.py
|
Python
|
mit
| 936
| 0.013889
|
from django.shortcuts import render
from django.template.loader import render_to_string
def home(request):
context_dict = {}
return render(request,'ms2ldaviz/index.h
|
tml',context_dict)
def people(request):
context_dict = {}
return render(request,'ms2ldaviz/people.html',context_dict)
def api(request):
context_dict = {}
return render(request,'ms2ldaviz/api.html',context_dict)
def user_guide(request):
markdown_str = render_to_str
|
ing('markdowns/user_guide.md')
return render(request, 'markdowns/user_guide.html', {'markdown_str':markdown_str})
def disclaimer(request):
markdown_str = render_to_string('markdowns/disclaimer.md')
return render(request, 'markdowns/disclaimer.html', {'markdown_str':markdown_str})
def confidence(request):
markdown_str = render_to_string('markdowns/confidence.md')
return render(request, 'markdowns/confidence.html', {'markdown_str':markdown_str})
|
walterbender/speak
|
sleepy.py
|
Python
|
gpl-3.0
| 2,928
| 0.000683
|
# Speak.activity
# A simple front end to the espeak text-to-speech engine on the XO laptop
# http://wiki.laptop.org/go/Speak
#
# Copyright (C) 2008 Joshua Minor
# Copyright (C) 2014 Walter Bender
# This file is part of Speak.activity
#
# Parts of Speak.activity are based on code from Measure.activity
# Copyright (C) 2007 Arjun Sarwal - arjun@laptop.org
#
# Speak.activity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Speak.activity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Speak.activity. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from eye import Eye
from utils import svg_str_to_pixbuf
class Sleepy(Eye):
def __init__(self, fill_color):
Eye.__init__(self, fill_color)
self._pixbuf = svg_str_to_pixbuf(eye_svg())
def draw(self, widget, cr):
bounds = self.get_allocation()
# background
cr.set_source_rgba(*self.fill_color.get_rgba())
cr.rectangle(0, 0, bounds.width, bounds.height)
cr.fill()
w = h = min(bounds.width, bounds.height)
x = int((bounds.width - w) // 2)
y = int((bounds.height - h) // 2)
|
pixbuf = self._pixbuf.scale_simple(w, h, GdkPixbuf.InterpType.BILINEAR)
cr.translate(x + w / 2., y + h / 2.)
cr.translate(-x - w / 2., -y - h / 2.)
Gdk.cairo_set_source_pixbuf(cr, pixbuf, x, y)
cr.rectangle(x, y, w, h)
cr.fill()
return True
def eye_svg():
return \
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n' + \
'<svg\n' + \
' xmlns:svg="http://www.w3.org/2000/svg"\n' + \
' xm
|
lns="http://www.w3.org/2000/svg"\n' + \
' version="1.1"\n' + \
' width="300"\n' + \
' height="300">\n' + \
' <path\n' + \
' d="m 260.26893,151.09803 c -6.07398,14.55176 -15.05894,27.89881 -26.27797,39.03563 -11.21904,11.13683 -24.66333,20.05466 -39.32004,26.08168 -14.65671,6.02702 -30.51431,9.15849 -46.37814,9.15849 -15.86384,0 -31.72144,-3.13147 -46.37815,-9.15849 C 87.257925,210.18832 73.813631,201.27049 62.594594,190.13366 51.375557,178.99684 42.3906,165.64979 36.316616,151.09803"\n' + \
' style="fill:none;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:13.18636799;stroke-linecap:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />\n' + \
'</svg>\n'
|
NicovincX2/Python-3.5
|
Algorithmique/Algorithme/Algorithme de tri/Tri par tas (Heapsort)/maxheap.py
|
Python
|
gpl-3.0
| 1,134
| 0.000882
|
# -*- coding: utf-8 -*-
from minheap import minheap
class maxheap(minheap):
"""
Heap class - made of keys and items
methods: build_heap, heappush, heappop
"""
MAX_HEAP = True
def __str__(self):
return "Max-heap with %s items" % (len(self.heap))
def heapify(self, i):
l = self.leftchild(i)
r = self.rightchild(i)
largest = i
if l < self.max_elements() and self.heap[l] > self.heap[largest]:
largest = l
if r < self.max_elements() and self.heap[r] > self.heap[largest]:
largest = r
if largest != i:
self.heap[i], self.heap[largest] = self.heap[largest], self.heap[i]
self.heapify(largest)
def heappush(self, x):
""" Adds a new item x in the heap"""
i = len(self.heap)
self.heap.append(x)
parent = self.parent(i)
while parent != -1 and self.heap[int(i)] > self.heap
|
[int(parent)]:
self.heap[int(i)], self.heap[int(parent)] = self.heap[
int(parent)], self.heap[int(i)]
|
i = parent
parent = self.parent(i)
|
vejmelkam/emotiv-reader
|
albow/grid_view.py
|
Python
|
gpl-3.0
| 1,254
| 0.039075
|
from pygame import Rect
from widget import Widget
class GridView(Widget):
# cell_size (width, height) size of each cell
#
# Abstract methods:
#
# num_rows() --> no. of rows
# num_cols() --> no. of columns
# draw_cell(surface, row, col, rect)
# click_cell(row, col, event)
def __init__(self, cell_size, nrows, ncols, **kwds):
"""nrows, ncols are for calculating initial size of widget"""
Widget.__init__(self, **kwds)
self.cell_size = cell_size
w, h = cell_size
d = 2 * self.margin
self.size = (w * ncols + d, h * nrows + d)
self.cell_size = cell_size
def draw(self, surface):
for row in xrange(self.num_rows()):
for col in xrange(self.num_cols()):
r = self.cell_rect(row, col)
|
self.draw_cell(surface, r
|
ow, col, r)
def cell_rect(self, row, col):
w, h = self.cell_size
d = self.margin
x = col * w + d
y = row * h + d
return Rect(x, y, w, h)
def draw_cell(self, surface, row, col, rect):
pass
def mouse_down(self, event):
x, y = event.local
w, h = self.cell_size
W, H = self.size
d = self.margin
if d <= x < W - d and d <= y < H - d:
row = (y - d) // h
col = (x - d) // w
self.click_cell(row, col, event)
def click_cell(self, row, col, event):
pass
|
zhaowenxiang/chisch
|
vod/views.py
|
Python
|
mit
| 446
| 0
|
# -*- cod
|
ing: utf-8 -*-
import logging
from chisch.common.retwrapper import RetWrapper
import cores
logger = logging.getLogger('django')
def signature_url(request):
params_query_dict = request.GET
params = {k: v for k, v in params_query_dict.items()}
try:
url = cores.get_url()
except Exception, e:
return RetWrapper.wrap_and_return(e)
result = {'url': url}
return RetWrapper.wrap_and_return(
|
result)
|
AdrianGaudebert/socorro
|
webapp-django/crashstats/crashstats/urls.py
|
Python
|
mpl-2.0
| 3,962
| 0.000252
|
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from django.conf import settings
from . import views
products = r'/products/(?P<product>\w+)'
versions = r'/versions/(?P<versions>[;\w\.()]+)'
version = r'/versions/(?P<version>[;\w\.()]+)'
perm_legacy_redirect = settings.PERMANENT_LEGACY_REDIRECTS
|
urlpatterns = patterns(
'', # prefix
url('^robots\.txt$',
views.robots_txt,
name='ro
|
bots_txt'),
url(r'^status/json/$',
views.status_json,
name='status_json'),
url(r'^status/revision/$',
views.status_revision,
name='status_revision'),
url(r'^crontabber-state/$',
views.crontabber_state,
name='crontabber_state'),
url('^crashes-per-day/$',
views.crashes_per_day,
name='crashes_per_day'),
url(r'^exploitability/$',
views.exploitability_report,
name='exploitability_report'),
url(r'^report/index/(?P<crash_id>[\w-]+)$',
views.report_index,
name='report_index'),
url(r'^search/quick/$',
views.quick_search,
name='quick_search'),
url(r'^buginfo/bug', views.buginfo,
name='buginfo'),
url(r'^rawdumps/(?P<crash_id>[\w-]{36})-(?P<name>\w+)\.'
r'(?P<extension>json|dmp|json\.gz)$',
views.raw_data,
name='raw_data_named'),
url(r'^rawdumps/(?P<crash_id>[\w-]{36}).(?P<extension>json|dmp)$',
views.raw_data,
name='raw_data'),
url(r'^login/$',
views.login,
name='login'),
url(r'^graphics_report/$',
views.graphics_report,
name='graphics_report'),
url(r'^about/throttling/$',
views.about_throttling,
name='about_throttling'),
# if we do a permanent redirect, the browser will "cache" the redirect and
# it will make it very hard to ever change the DEFAULT_PRODUCT
url(r'^$',
RedirectView.as_view(
url='/home/product/%s' % settings.DEFAULT_PRODUCT,
permanent=False # this is not a legacy URL
)),
# redirect deceased Advanced Search URL to Super Search
url(r'^query/$',
RedirectView.as_view(
url='/search/',
query_string=True,
permanent=True
)),
# redirect deceased Report List URL to Signature report
url(r'^report/list$',
RedirectView.as_view(
pattern_name='signature:signature_report',
query_string=True,
permanent=True
)),
# redirect deceased Daily Crashes URL to Crasher per Day
url(r'^daily$',
RedirectView.as_view(
pattern_name='crashstats:crashes_per_day',
query_string=True,
permanent=True
)),
# Redirect old independant pages to the unified Profile page.
url(r'^your-crashes/$',
RedirectView.as_view(
url='/profile/',
permanent=perm_legacy_redirect
)),
url(r'^permissions/$',
RedirectView.as_view(
url='/profile/',
permanent=perm_legacy_redirect
)),
# Redirect deleted status page to monitoring page.
url(
r'^status/$',
RedirectView.as_view(
pattern_name='monitoring:index',
permanent=not settings.DEBUG,
),
name='status_redirect',
),
# handle old-style URLs
url(r'^products/(?P<product>\w+)/$',
RedirectView.as_view(
url='/home/products/%(product)s',
permanent=perm_legacy_redirect
)),
url(r'^products/(?P<product>\w+)/versions/(?P<versions>[;\w\.()]+)/$',
RedirectView.as_view(
url='/home/products/%(product)s/versions/%(versions)s',
permanent=perm_legacy_redirect
)),
url('^home' + products + '/versions/$',
RedirectView.as_view(
url='/home/products/%(product)s',
permanent=perm_legacy_redirect
)),
)
|
mindcube/mindcube-django-cookiecutter
|
{{cookiecutter.repo_name}}/project/apps/geo_locator/views.py
|
Python
|
mit
| 265
| 0.003774
|
"""Main view for geo locator application"""
from dja
|
ngo.shortcuts import render
def index(request):
if request.location:
location = request.location
else:
|
location = None
return render(request, "homepage.html", {'location': location})
|
gcobos/rft
|
app/primitives/__init__.py
|
Python
|
agpl-3.0
| 1,163
| 0.029235
|
# Generated file. Do not edit
__author__="drone"
from Abs import Abs
from And import And
from Average import Average
from Ceil import Ceil
from Cube import Cube
from Divide import Divide
from Double import Double
from Equal import Equal
from Even import Even
from Floor import Floor
from Greaterorequal import Greaterorequal
from Greaterthan import Greaterthan
from Half import Half
from If import If
from Increment import Increment
from Lessorequal import Lessorequal
from Lessthan import Lessthan
from Max import Max
from
|
Min import Min
from Modu
|
le import Module
from Multiply import Multiply
from Negate import Negate
from Not import Not
from Odd import Odd
from One import One
from Positive import Positive
from Quadruple import Quadruple
from Sign import Sign
from Sub import Sub
from Sum import Sum
from Two import Two
from Zero import Zero
__all__ = ['Abs', 'And', 'Average', 'Ceil', 'Cube', 'Divide', 'Double', 'Equal', 'Even', 'Floor', 'Greaterorequal', 'Greaterthan', 'Half', 'If', 'Increment', 'Lessorequal', 'Lessthan', 'Max', 'Min', 'Module', 'Multiply', 'Negate', 'Not', 'Odd', 'One', 'Positive', 'Quadruple', 'Sign', 'Sub', 'Sum', 'Two', 'Zero']
|
sheagcraig/python-jss
|
jss/misc_endpoints.py
|
Python
|
gpl-3.0
| 13,525
| 0.000074
|
#!/usr/bin/env python
# Copyright (C) 2014-2017 Shea G Craig
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""misc_endpoints.py
Classes representing API endpoints that don't subclass JSSObject
"""
from __future__ import print_function
from __future__ import absolute_import
import mimetypes
import os
import sys
from xml.etree import ElementTree
from .exceptions import MethodNotAllowedError, PostError
from .tools import error_handler
__all__ = ('CommandFlush', 'FileUpload', 'LogFlush')
# Map Python 2 basestring type for Python 3.
if sys.version_info.major == 3:
basestring = str
# pylint: disable=missing-docstring
# pylint: disable=too-few-public-methods
class CommandFlush(object):
_endpoint_path = "commandflush"
can_get = False
can_put = False
can_post = False
def __init__(self, jss):
"""Initialize a new CommandFlush
Args:
jss: JSS object.
"""
self.jss = jss
@property
def url(self):
"""Return the path subcomponent of the url to this object."""
return self._endpoint_path
def command_flush_with_xml(self, data):
"""Flush commands for devices with a supplied xml string.
From the Casper API docs:
Status and devices specified in an XML file. Id lists may be
specified for <computers>, <computer_groups>, <mobile_devices>,
<mobile_device_groups>. Sample file:
<commandflush>
<status>Pending+Failed</status>
<mobile_devices>
<mobile_device>
<id>1</id>
</mobile_device>
<mobile_device>
<id>2</id>
</mobile_device>
</mobile_devices>
</commandflush>
Args:
data (string): XML string following the above structure or
an ElementTree/Element.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
if not isinstance(data, basestring):
data = ElementTree.tostring(data, encoding='UTF-8')
self.jss.delete(self.url, data)
def command_flush_for(self, id_type, command_id, status):
"""Flush commands for an individual device.
Args:
id_type (str): One of 'computers', 'computergroups',
'mobiledevices', or 'mobiledevicegroups'.
id_value (str, int, list): ID value(s) for the devices to
flush. More than one device should be passed as IDs
in a list or tuple.
status (str): One of 'Pending', 'Failed', 'Pending+Failed'.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
id_types = ('computers', 'computergroups', 'mobiledevices',
'mobiledevicegroups')
status_types = ('Pending', 'Failed', 'Pending+Failed')
if id_type not in id_types or status not in status_types:
raise ValueError("Invalid arguments.")
if isinstance(command_id, list):
command_id = ",".join(str(item) for item in command_id)
flush_url = "{}/{}/id/{}/status/{}".format(
self.url, id_type, command_id, status)
self.jss.delete(flush_url)
# pylint: disable=too-few-public-methods
class FileUpload(object):
"""FileUploads are a special case in the API. They allow you to add
file resources to a number of objects on the JSS.
To use, instantiate a new FileUpload object, then use the save()
method to upload.
Once the upload has been posted you may only interact with it
through the web interface. You cannot list/get it or delete it
through the API.
However, you can reuse the FileUpload object if you wish, by
changing the parameters, and issuing another save().
"""
_endpoint_path = "fileuploads"
allowed_kwargs = ('subset',)
def __init__(self, j, resource_type, id_type, _id, resource):
"""Prepare a new FileUpload.
Args:
j: A JSS object to POST the upload to.
resource_type:
String. Acceptable Values:
Attachments:
computers
mobiledevices
enrollmentprofiles
peripherals
mobiledeviceenrollmentprofiles
Icons:
policies
ebooks
mobiledeviceapplicationsicon
Mobile Device Application:
mobile
|
deviceapplicationsipa
Disk Encryption
diskencryptionconfigurations
diskencryptions (synonymous)
PPD
|
printers
id_type:
String of desired ID type:
id
name
_id: Int or String referencing the identity value of the
resource to add the FileUpload to.
resource: String path to the file to upload.
"""
resource_types = ["computers", "mobiledevices", "enrollmentprofiles",
"peripherals", "mobiledeviceenrollmentprofiles",
"policies", "ebooks", "mobiledeviceapplicationsicon",
"mobiledeviceapplicationsipa",
"diskencryptionconfigurations", "printers"]
id_types = ["id", "name"]
self.jss = j
# Do some basic error checking on parameters.
if resource_type in resource_types:
self.resource_type = resource_type
else:
raise TypeError(
"resource_type must be one of: %s" % ', '.join(resource_types))
if id_type in id_types:
self.id_type = id_type
else:
raise TypeError("id_type must be one of: %s" % ', '.join(id_types))
self._id = str(_id)
basename = os.path.basename(resource)
content_type = mimetypes.guess_type(basename)[0]
self.resource = {"name": (basename, open(resource, "rb"),
content_type)}
self._set_upload_url()
def _set_upload_url(self):
"""Generate the full URL for a POST."""
# pylint: disable=protected-access
self._upload_url = "/".join([
self.jss._url, self._endpoint_path, self.resource_type,
self.id_type, str(self._id)])
# pylint: enable=protected-access
def save(self):
"""POST the object to the JSS."""
try:
response = self.jss.session.post(
self._upload_url, files=self.resource)
except PostError as error:
if error.status_code == 409:
raise PostError(error)
else:
raise MethodNotAllowedError(self.__class__.__name__)
if response.status_code == 201:
if self.jss.verbose:
print("POST: Success")
print(response.content)
elif response.status_code >= 400:
error_handler(PostError, response)
class LogFlush(object):
_endpoint_path = "logflush"
def __init__(self, jss):
"""Initialize a new LogFlush
Args:
jss: JSS object.
"""
self.jss = jss
@property
def url(self):
"""Return the path subcomponent of the url to this object."""
return self._endpoint_path
def log_flush_with_xml(self, data):
|
CanonicalLtd/landscape-client
|
landscape/client/upgraders/tests/test_monitor.py
|
Python
|
gpl-2.0
| 317
| 0
|
from landscape.client.tests.helpers import LandscapeTest
from landscape.client.patch import UpgradeManager
from landscape.client.upgraders import monitor
c
|
lass TestMonitorUpgraders(LandscapeTest):
def test_monitor_upgrade_manager(self):
self.assertEqual(type(monitor.upgrade_
|
manager), UpgradeManager)
|
stryder199/RyarkAssignments
|
Assignment2/web2py/gaehandler.py
|
Python
|
mit
| 3,279
| 0.005489
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo D
|
i Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
##############################################################################
# Configuration parameters for Google App Engine
##############################################################################
KEEP_CACHED = False # request a dummy url every 10secs to force caching app
LOG_STATS = False # web2py l
|
evel log statistics
APPSTATS = True # GAE level usage statistics and profiling
DEBUG = False # debug mode
AUTO_RETRY = True # force gae to retry commit on failure
#
# Read more about APPSTATS here
# http://googleappengine.blogspot.com/2010/03/easy-performance-profiling-with.html
# can be accessed from:
# http://localhost:8080/_ah/stats
##############################################################################
# All tricks in this file developed by Robin Bhattacharyya
##############################################################################
import time
import os
import sys
import logging
import cPickle
import pickle
import wsgiref.handlers
import datetime
path = os.path.dirname(os.path.abspath(__file__))
sys.path = [path]+[p for p in sys.path if not p==path]
sys.modules['cPickle'] = sys.modules['pickle']
from gluon.settings import global_settings
from google.appengine.api.labs import taskqueue
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
global_settings.web2py_runtime_gae = True
global_settings.db_sessions = True
if os.environ.get('SERVER_SOFTWARE', '').startswith('Devel'):
(global_settings.web2py_runtime, DEBUG) = \
('gae:development', True)
else:
(global_settings.web2py_runtime, DEBUG) = \
('gae:production', False)
import gluon.main
def log_stats(fun):
"""Function that will act as a decorator to make logging"""
def newfun(env, res):
"""Log the execution time of the passed function"""
timer = lambda t: (t.time(), t.clock())
(t0, c0) = timer(time)
executed_function = fun(env, res)
(t1, c1) = timer(time)
log_info = """**** Request: %.2fms/%.2fms (real time/cpu time)"""
log_info = log_info % ((t1 - t0) * 1000, (c1 - c0) * 1000)
logging.info(log_info)
return executed_function
return newfun
logging.basicConfig(level=logging.INFO)
def wsgiapp(env, res):
"""Return the wsgiapp"""
if env['PATH_INFO'] == '/_ah/queue/default':
if KEEP_CACHED:
delta = datetime.timedelta(seconds=10)
taskqueue.add(eta=datetime.datetime.now() + delta)
res('200 OK',[('Content-Type','text/plain')])
return ['']
env['PATH_INFO'] = env['PATH_INFO'].encode('utf8')
return gluon.main.wsgibase(env, res)
if LOG_STATS or DEBUG:
wsgiapp = log_stats(wsgiapp)
if AUTO_RETRY:
from gluon.contrib.gae_retry import autoretry_datastore_timeouts
autoretry_datastore_timeouts()
def main():
"""Run the wsgi app"""
if APPSTATS:
run_wsgi_app(wsgiapp)
else:
wsgiref.handlers.CGIHandler().run(wsgiapp)
if __name__ == '__main__':
main()
|
wolvespack/alcor
|
alcor/services/plots/velocity_clouds.py
|
Python
|
mit
| 7,567
| 0.000793
|
from typing import (Tuple,
List)
import matplotlib
# More info at
# http://matplotlib.org/faq/usage_faq.html#what-is-a-backend for details
# TODO: use this: https://stackoverflow.com/a/37605654/7851470
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.axes import Axes
import numpy as np
import pandas as pd
from .utils import to_cartesian_from_equatorial
# Kinematic properties of the thin disk taken from the paper of
# N.Rowell and N.C.Hambly (mean motions are relative to the Sun):
# "White dwarfs in the SuperCOSMOS Sky Survey: the thin disc,
# thick disc and spheroid luminosity functions"
# Mon. Not. R. Astron. Soc. 417, 93–113 (2011)
# doi:10.1111/j.1365-2966.2011.18976.x
AVERAGE_POPULATION_VELOCITY_U = -8.62
AVERAGE_POPULATION_VELOCITY_V = -20.04
AVERAGE_POPULATION_VELOCITY_W = -7.1
STD_POPULATION_U = 32.4
STD_POPULATION_V = 23
STD_POPULATION_W = 18.1
def plot(stars: pd.DataFrame,
*,
filename: str = 'velocity_clouds.ps',
figure_size: Tuple[float, float] = (8, 12),
spacing: float = 0.25,
u_label: str = '$U(km/s)$',
v_label: str = '$V(km/s)$',
w_label: str = '$W(km/s)$',
u_limits: Tuple[float, float] = (-150, 150),
v_limits: Tuple[float, float] = (-150, 150),
w_limits: Tuple[float, float] = (-150, 150)) -> None:
figure, (uv_subplot,
uw_subplot,
vw_subplot) = plt.subplots(nrows=3,
figsize=figure_size)
draw_subplot(subplot=uv_subplot,
xlabel=u_label,
ylabel=v_label,
xlim=u_limits,
ylim=v_limits,
x=stars['u_velocity'],
y=stars['v_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_V,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_V)
draw_subplot(subplot=uw_subplot,
xlabel=u_label,
ylabel=w_label,
xlim=u_limits,
ylim=w_limits,
x=stars['u_velocity'],
y=stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_W)
draw_subplot(subplot=vw_subplot,
xlabel=v_label,
ylabel=w_label,
xlim=v_limits,
ylim=w_limits,
x=stars['v_velocity'],
y=stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_V,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_V,
y_std=STD_POPULATION_W)
figure.subplots_adjust(hspace=spacing)
plt.savefig(filename)
def plot_lepine_case(stars: pd.DataFrame,
*,
filename: str = 'velocity_clouds.ps',
figure_size: Tuple[float, float] = (8, 12),
spacing: float = 0.25,
u_label: str = '$U(km/s)$',
v_label: str = '$V(km/s)$',
w_label: str = '$W(km/s)$',
u_limits: Tuple[float, float] = (-150, 150),
v_limits: Tuple[float, float] = (-150, 150),
w_limits: Tuple[float, float] = (-150, 150)) -> None:
x_coordinates, y_coordinates, z_coordinates = to_cartesian_from_equatorial(
stars)
highest_coordinates = np.maximum.reduce([np.abs(x_coordinates),
np.abs(y_coordinates),
np.abs(z_coordinates)])
uv_cloud_stars = stars[(highest_coordinates == z_coordinates)]
uw_cloud_stars = stars[(highest_coordinates == y_coordinates)]
vw_cloud_stars = stars[(highest_coordinates == x_coordinates)]
figure, (uv_subplot,
uw_subplot,
vw_subplot) = plt.subplots(nrows=3,
figsize=figure_size)
draw_subplot(subplot=uv_subplot,
xlabel=u_label,
ylabel=v_label,
xlim=u_limits,
ylim=v_limits,
x=uv_cloud_stars['u_velocity'],
y=uv_cloud_stars['v_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_V,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_V)
draw_subplot(subplot=uw_subplot,
xlabel=u_label,
ylabel=w_label,
xlim=u_limits,
ylim=w_limits,
x=uw_cloud_stars['u_velocity'],
y=uw_cloud_stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_W)
draw_subplot(subplot=vw_subplot,
xlabel=v_label,
ylabel=w_label,
xlim=v_limits,
|
ylim=w_limits,
x=vw_cloud_stars['v_velocity'],
y=vw_cloud_stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_V,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_V,
y_std=STD_POPULATION_W)
|
figure.subplots_adjust(hspace=spacing)
plt.savefig(filename)
def draw_subplot(*,
subplot: Axes,
xlabel: str,
ylabel: str,
xlim: Tuple[float, float],
ylim: Tuple[float, float],
x: List[float],
y: List[float],
cloud_color: str = 'k',
point_size: float = 0.5,
x_avg: float,
y_avg: float,
x_std: float,
y_std: float,
ratio: float = 10 / 13) -> None:
subplot.set(xlabel=xlabel,
ylabel=ylabel,
xlim=xlim,
ylim=ylim)
subplot.scatter(x=x,
y=y,
color=cloud_color,
s=point_size)
plot_ellipses(subplot=subplot,
x_avg=x_avg,
y_avg=y_avg,
x_std=x_std,
y_std=y_std)
subplot.minorticks_on()
subplot.xaxis.set_ticks_position('both')
subplot.yaxis.set_ticks_position('both')
subplot.set_aspect(ratio / subplot.get_data_ratio())
def plot_ellipses(subplot: Axes,
x_avg: float,
y_avg: float,
x_std: float,
y_std: float,
ellipse_color: str = 'b') -> None:
std_ellipse = Ellipse(xy=(x_avg, y_avg),
width=x_std * 2,
height=y_std * 2,
fill=False,
edgecolor=ellipse_color,
linestyle='dashed')
double_std_ellipse = Ellipse(xy=(x_avg, y_avg),
width=x_std * 4,
height=y_std * 4,
fill=False,
edgecolor=ellipse_color)
subplot.add_artist(std_ellipse)
subplot.add_artist(double_std_ellipse)
|
pisskidney/leetcode
|
medium/16.py
|
Python
|
mit
| 1,070
| 0
|
#!/usr/bin/python
from typing import List, Optional
"""
16. 3Sum Closest
https://leetcode.com/problems/3sum-closest/
"""
def bsearch(nums, left, right, res, i, j, target):
while left <= right:
middle = (left + righ
|
t) // 2
|
candidate = nums[i] + nums[j] + nums[middle]
if res is None or abs(candidate - target) < abs(res - target):
res = candidate
if candidate == target:
return res
elif candidate > target:
right = middle - 1
else:
left = middle + 1
return res
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> Optional[int]:
res = None
nums = sorted(nums)
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
res = bsearch(nums, j + 1, len(nums) - 1, res, i, j, target)
return res
def main():
sol = Solution()
print(sol.threeSumClosest([-111, -111, 3, 6, 7, 16, 17, 18, 19], 13))
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
pwollstadt/IDTxl
|
test/generate_test_data.py
|
Python
|
gpl-3.0
| 8,187
| 0.000733
|
"""Generate test data for IDTxl network comparison unit and system tests.
Generate test data for IDTxl network comparison unit and system tests. Simulate
discrete and continous data from three correlated Gaussian data sets. Perform
network inference using bivariate/multivariate mutual information (MI)/transfer
entropy (TE) analysis. Results are saved used for unit and system testing of
network comparison (systemtest_network_comparison.py).
A coupling is simulated as a lagged, linear correlation between three Gaussian
variables and looks like this:
1 -> 2 -> 3 with a delay of 1 sample for each coupling
"""
import pickle
import numpy as np
from idtxl.multivariate_te import MultivariateTE
from idtxl.bivariate_te import BivariateTE
from idtxl.multivariate_mi import MultivariateMI
from idtxl.bivariate_mi import BivariateMI
from idtxl.estimators_jidt import JidtDiscreteCMI
from idtxl.data import Data
# path = os.path.join(os.path.dirname(__file__) + '/data/')
path = 'data/'
def analyse_mute_te_data():
# Generate example data: the following was ran once to generate example
# data, which is now in the data sub-folder of the test-folder.
data = Data()
data.generate_mute_data(100, 5)
# analysis settings
settings = {
'cmi_estimator': 'JidtKraskovCMI',
'n_perm_max_stat': 50,
'n_perm_min_stat': 50,
'n_perm_omnibus': 200,
'n_perm_max_seq': 50,
'max_lag_target': 5,
'max_lag_sources': 5,
'min_lag_sources': 1,
'permute_in_time': True
}
# network inference for individual data sets
nw_0 = MultivariateTE()
res_0 = nw_0.analyse_network(
settings, data, targets=[0, 1], sources='all')
pickle.dump(res_0, open(path + 'mute_results_0.p', 'wb'))
res_1 = nw_0.analyse_network(
settings, data, targets=[1, 2], sources='all')
pickle.dump(res_1, open(path + 'mute_results_1.p', 'wb'))
res_2 = nw_0.analyse_network(
settings, data, targets=[0, 2], sources='all')
pickle.dump(res_2, open(path + 'mute_results_2.p', 'wb'))
res_3 = nw_0.analyse_network(
settings, data, targets=[0, 1, 2], sources='all')
pickle.dump(res_3, open(path + 'mute_results_3.p', 'wb'))
res_4 = nw_0.analyse_network(
settings, data, targets=[1, 2], sources='all')
pickle.dump(res_4, open(path + 'mute_results_4.p', 'wb'))
res_5 = nw_0.analyse_network(settings, data)
pickle.dump(res_5, open(path + 'mute_results_full.p', 'wb'))
def generate_discrete_data(n_replications=1):
"""Generate Gaussian test data: 1 -> 2 -> 3, delay 1."""
d = generate_gauss_data(n_replications=n_replications, discrete=True)
data = Data(d, dim_order='psr', normalise=False)
return data
def generate_continuous_data(n_replications=1):
"""Generate Gaussian test data: 1 -> 2 -> 3, delay 1."""
d = generate_gauss_data(n_replications=n_replications, discrete=False)
data = Data(d, dim_order='psr', normalise=True)
return data
def generate_gauss_data(n_replications=1, discrete=False):
settings = {'discretise_method': 'equal',
'n_discrete_bins': 5}
est = JidtDiscreteCMI(settings)
covariance_1 = 0.4
covariance_2 = 0.3
n = 10000
delay = 1
if discrete:
d = np.zeros((3, n - 2*delay, n_replications), dtype=int)
else:
d = np.zeros((3, n - 2*delay, n_replications))
for r in range(n_replications):
proc_1 = np.random.normal(0, 1, size=n)
proc_2 = (covariance_1 * proc_1 + (1 - covariance_1) *
np.random.normal(0, 1, size=n))
proc_3 = (covariance_2 * proc_2 + (1 - covariance_2) *
np.random.normal(0, 1, size=n))
proc_1 = proc_1[(2*delay):]
proc_2 = proc_2[delay:-delay]
proc_3 = proc_3[:-(2*delay)]
if discrete: # discretise data
proc_1_dis, proc_2_dis = est._discretise_vars(
var1=proc_1, var2=proc_2)
proc_1_dis, proc_3_dis = est._discretise_vars(
var1=proc_1, var2=proc_3)
d[0, :, r] = proc_1_dis
d[1, :, r] = proc_2_dis
d[2, :, r] = proc_3_dis
else:
d[0, :, r] = proc_1
d[1, :, r] = proc_2
d[2, :, r] = proc_3
return d
def analyse_discrete_data():
"""Run network inference on discrete data."""
data = generate_discrete_data()
settings = {
'cmi_estimator': 'JidtDiscreteCMI',
'discretise_method': 'none',
'n_discrete_bins': 5, # alphabet size of the variables analysed
'min_lag_sources': 1,
'max_lag_sources': 3,
'max_lag_target': 1}
nw = MultivariateTE()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_mte_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = BivariateTE()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_bte_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = MultivariateMI()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_mmi_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = BivariateMI()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_bmi_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
def analyse_continuous_data():
"""Run network inference on continuous data."""
data = generate_continuous_data()
settings = {
'min_lag_sources': 1,
'max_lag_sources': 3,
'max_lag_target': 1}
nw = MultivariateTE()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_mte_{1}.p'.format(
path, estimator), 'wb'))
nw = BivariateTE()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_bte_{1}.p'.format(
path, estimator), 'wb'))
nw = MultivariateMI()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_mmi_{1}.p'.format(
path, estimator), 'wb'))
nw = BivariateMI()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_bmi_{1}.p'.format(
path, estimator), 'wb'))
def assert_results():
for algo in ['mmi', 'mte', 'bmi', 'bte']:
# Test continuous data:
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
res = pickle.load(open(
'data/continuous_results_{0}_{1}.p'.format(
algo, estimator), 'rb'))
print('\nInference algorithm: {0} (estimator: {1})'.format(
algo, estimator))
_print_result(res)
# Test discrete data:
estimator = 'JidtDiscreteCMI'
res = pickle.load(open(
'data/discrete_results_{0}_{1}.p'.format(
algo, estimator), 'rb'))
print('\nInference algorithm: {0} (estimator: {1})'.format(
algo, estimator))
_print_result(res)
def _print_result
|
(res):
res.adjacency_matrix.print_matrix()
tp = 0
|
fp = 0
if res.adjacency_matrix._edge_matrix[0, 1] == True: tp += 1
if res.adjacency_matrix._edge_matrix[1, 2] == True: tp += 1
if res.adjacency_matrix._edge_matrix[0, 2] == True: fp += 1
fn = 2 - tp
print('TP: {0}, FP: {1}, FN: {2}'.format(tp, fp, fn))
if __name__ == '__main__':
analyse_discrete_data()
analyse_mute_te_data()
analyse_continuous_data()
assert_res
|
rusiv/BSScript
|
bsscript/bsscriptSblm/Spinner.py
|
Python
|
mit
| 784
| 0.034392
|
import sublime
from . import SblmCmmnFnctns
class Spinner:
SYMBOLS_ROW = u'←↑→↓'
SYMBOLS_BOX = u'⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏'
def __init__(self, symbols, view, startStr, endStr):
self.symbols = symbols
self.length = len(symbols)
self.position = 0
self.stopFlag = False
self.view = view
self.startStr = startStr
self.endStr = endStr
def __next__(self):
self.position = self
|
.position + 1
return self.startStr + self.symbols[self.position % self.length] + self.endStr
def start(self):
if not self.stopFlag:
self.view.set_status(SblmCmmnFnctns.SUBLIME_STATUS_SPINNER, self.__next__())
sublime.set_timeout(lambda: self.start(), 300)
def stop(self):
self.view.erase_status(SblmCmmnFnctns.SUBLIM
|
E_STATUS_SPINNER)
self.stopFlag = True
|
uliss/quneiform
|
tests/py/lpod/rst2odt.py
|
Python
|
gpl-3.0
| 22,265
| 0.00265
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: David Versmisse <david.versmisse@itaapy.com>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Import from the Standard Library
from sys import stdout
# Import from docutils
from docutils import nodes
from docutils.core import publish_doctree
# I
|
mport from imaging
from PIL import Image
# Import from lpod
from document import odf_new_document_from_type
from frame import odf_create_image
|
_frame, odf_create_text_frame
from heading import odf_create_heading
from link import odf_create_link
from list import odf_create_list, odf_create_list_item
from note import odf_create_note
from paragraph import odf_create_paragraph, odf_create_line_break
from paragraph import odf_create_undividable_space
from span import odf_create_span
from scriptutils import printwarn
from style import odf_create_style
from table import odf_create_cell, odf_create_table, odf_create_row
from table import odf_create_column, odf_create_header_rows
from toc import odf_create_toc
DPI = 72
def convert_text(node, context):
context["top"].append(node.astext())
def convert_section(node, context):
# Inc the heading level
context["heading-level"] += 1
# Reset the top to body
context["top"] = context["body"]
# Convert
for child in node:
convert_node(child, context)
# Restore the heading level
context["heading-level"] -= 1
def convert_title(node, context):
level = context["heading-level"]
if level == 0:
# The document did not start with a section
level = 1
heading = odf_create_heading(level, node.astext(),
style='Heading_20_%s' % level)
context["body"].append(heading)
def convert_paragraph(node, context):
# Search for a default style
style = context['styles'].get('paragraph')
paragraph = odf_create_paragraph(style=style)
context["top"].append(paragraph)
# Save the current top
old_top = context["top"]
# Convert
context["top"] = paragraph
for child in node:
convert_node(child, context)
# And restore the top
context["top"] = old_top
def convert_list(node, context, list_type):
# Reuse template styles
if list_type == "enumerated":
style_name = "Numbering_20_1"
else:
style_name = "List_20_1"
odf_list = odf_create_list(style=style_name)
context["top"].append(odf_list)
# Save the current top
old_top = context["top"]
for item in node:
if item.tagname != "list_item":
printwarn("node not supported: %s" % item.tagname)
continue
# Create a new item
odf_item = odf_create_list_item()
odf_list.append(odf_item)
# A new top
context["top"] = odf_item
for child in item:
convert_node(child, context)
# And restore the top
context["top"] = old_top
def convert_list_enumerated(node, context):
return convert_list(node, context, "enumerated")
def convert_list_bullet(node, context):
return convert_list(node, context, "bullet")
def convert_topic(node, context):
# Reset the top to body
context["top"] = context["body"]
# Yet an other TOC ?
if context["skip_toc"]:
return
if context["toc"] is not None:
printwarn("a TOC is already inserted")
return
title = node.next_node(condition=nodes.title).astext()
toc = odf_create_toc(title=title)
context["body"].append(toc)
context["toc"] = toc
def convert_footnote(node, context):
# XXX ids is a list ??
refid = node.get("ids")[0]
# Find the footnote
footnotes = context["footnotes"]
if refid not in footnotes:
printwarn('unknown footnote "%s"' % refid)
return
footnote_body = footnotes[refid].get_element("text:note-body")
# Save the current top
old_top = context["top"]
# Fill the note
context["top"] = footnote_body
for child in node:
# We skip the label (already added)
if child.tagname == "label":
continue
convert_node(child, context)
# And restore the top
context["top"] = old_top
def convert_footnote_reference(node, context):
refid = node.get("refid")
citation = node.astext()
footnote = odf_create_note(note_id=refid, citation=citation)
context["top"].append(footnote)
context["footnotes"][refid] = footnote
def _convert_style_like(node, context, style_name):
# Create the span
span = odf_create_span(style=style_name)
context["top"].append(span)
# Save the current top
old_top = context["top"]
# Convert
context["top"] = span
for child in node:
convert_node(child, context)
# And restore the top
context["top"] = old_top
def _get_emphasis_style(context):
styles = context['styles']
emphasis_style = styles.get('emphasis')
if emphasis_style is not None:
return emphasis_style
emphasis_style = odf_create_style("text", italic=True)
context['doc'].insert_style(emphasis_style, automatic=True)
styles['emphasis'] = emphasis_style
return emphasis_style
def convert_emphasis(node, context):
emphasis_style = _get_emphasis_style(context).get_style_name()
# Convert
_convert_style_like(node, context, emphasis_style)
def _get_strong_style(context):
styles = context['styles']
strong_style = styles.get('strong')
if strong_style is not None:
return strong_style
strong_style = odf_create_style("text", bold=True)
context['doc'].insert_style(strong_style, automatic=True)
styles['strong'] = strong_style
return strong_style
def convert_strong(node, context):
strong_style = _get_strong_style(context).get_style_name()
# Convert
_convert_style_like(node, context, strong_style)
def convert_literal(node, context):
# Convert
_convert_style_like(node, context, "Example")
def convert_literal_block(node, context):
paragraph = odf_create_paragraph(style="Preformatted_20_Text")
context["top"].append(paragraph)
# Convert
for child in node:
# Only text
if child.tagname != "#text":
printwarn('node "%s" not supported in literal block' % (
child.tagname))
continue
text = child.astext()
tmp = []
spaces = 0
for c in text:
if c == '\n':
if tmp:
tmp = u"".join(tmp)
paragraph.append(tmp)
tmp = []
spaces = 0
paragraph.append(odf_create_line_break())
elif c == '\r':
continue
elif c == ' ':
spaces += 1
elif c == '\t':
# Tab = 4 spaces
spaces += 4
else:
if spaces >= 2:
if tmp:
tmp = u"".join(tmp)
paragraph.append(tmp)
tmp = []
paragraph.append(
odf_create_undividable_space(spaces))
spaces = 0
elif spaces == 1:
|
lk-geimfari/elizabeth
|
mimesis/data/int/address.py
|
Python
|
mit
| 20,986
| 0
|
"""Provides all the generic data related to the address."""
COUNTRY_CODES = {
"a2": [
"AD",
"AE",
"AF",
"AG",
"AI",
"AL",
"AM",
"AN",
"AO",
"AQ",
"AR",
"AS",
"AT",
"AU",
"AW",
"AX",
"AZ",
"BA",
"BB",
"BD",
"BE",
"BF",
"BG",
"BH",
"BI",
"BJ",
"BL",
"BM",
"BN",
"BO",
"BR",
"BS",
"BT",
"BV",
"BW",
"BY",
"BZ",
"CA",
"CC",
"CD",
"CF",
"CG",
"CH",
"CI",
"CK",
"CL",
"CM",
"CN",
"CO",
"CR",
"CU",
"CV",
"CX",
"CY",
"CZ",
"DE",
"DJ",
"DK",
"DM",
"DO",
"DZ",
"EC",
"EE",
"EG",
"EH",
"ER",
"ES",
"ET",
"FI",
"FJ",
"FK",
"FM",
"FO",
"FR",
"GA",
"GB",
"GD",
"GE",
"GF",
"GG",
"GH",
"GI",
"GL",
"GM",
"GN",
"GP",
"GQ",
"GR",
"GS",
"GT",
"GU",
"GW",
"GY",
"HK",
"HM",
"HN",
"HR",
"HT",
"HU",
"ID",
"IE",
"IL",
"IM",
"IN",
"IO",
"IQ",
"IR",
"IS",
"IT",
"JE",
"JM",
"JO",
"JP",
"KE",
"KG",
"KH",
"KI",
"KM",
"KN",
"KP",
"KR",
"KW",
"KY",
"KZ",
"LA",
"LB",
"LC",
"LI",
"LK",
"LR",
"LS",
"LT",
"LU",
"LV",
"LY",
"MA",
"MC",
"MD",
"ME",
"MF",
"MG",
"MH",
"MK",
"ML",
"MM",
"MN",
"MO",
"MP",
"MQ",
"MR",
"MS",
"MT",
"MU",
"MV",
"MW",
"MX",
"MY",
"MZ",
"NA",
"NC",
"NE",
"NF",
"NG",
"NI",
"NL",
"NO",
"NP",
"NR",
"NU",
"NZ",
"OM",
"PA",
"PE",
"PF",
"PG",
"PH",
"PK",
"PL",
"PM",
"PN",
"PR",
"PS",
"PT",
"PW",
"PY",
"QA",
"RE",
"RO",
"RS",
"RU",
"RW",
"SA",
"SB",
"SC",
"SD",
"SE",
"SG",
"SH",
"SI",
"SJ",
"SK",
"SL",
"SM",
"SN",
"SO",
"SR",
"SS",
"ST",
"SV",
"SY",
"SZ",
"TC",
"TD",
"TF",
"TG",
"TH",
"TJ",
"TK",
"TL",
"TM",
"TN",
"TO",
"TR",
"TT",
"TV",
"TW",
"TZ",
"UA",
"UG",
"UM",
"US",
"UY",
"UZ",
"VA",
"VC",
"VE",
"VG",
"VI",
"VN",
"VU",
"WF",
"WS",
"YE",
"YT",
"ZA",
"ZM",
"ZW",
],
"a3": [
"AND",
"ARE",
"AFG",
"ATG",
"AIA",
"ALB",
"ARM",
"ANT",
"AGO",
"ATA",
"ARG",
"ASM",
"AUT",
"AUS",
"ABW",
"ALA",
"AZE",
"BIH",
"BRB",
"BGD",
"BEL",
"BFA",
"BGR",
"BHR",
"BDI",
"BEN",
"BLM",
"BMU",
"BRN",
"BOL",
"BRA",
"BHS",
"BTN",
"BVT",
"BWA",
"BLR",
"BLZ",
"CAN",
"CCK",
"COD",
"CAF",
"COG",
"CHE",
"CIV",
"COK",
"CHL",
"CMR",
"CHN",
"COL",
"CRI",
"CUB",
"CPV",
"CXR",
"CYP",
"CZE",
"DEU",
"DJI",
"DNK",
"DMA",
"DOM",
"DZA",
"ECU",
"EST",
"EGY",
"ESH",
"ERI",
"ESP",
"ETH",
"FIN",
"FJI",
"FLK",
"FSM",
"FRO",
"FRA",
"GAB",
"GBR",
"GRD",
"GEO",
"GUF",
"GGY",
"GHA",
"GIB",
"GRL",
"GMB",
"GIN",
"GLP",
"GNQ",
"GRC",
"SGS",
"GTM",
"GUM",
"GNB",
"GUY",
"HKG",
"HMD",
"HND",
"HRV",
"HTI",
"HUN",
"IDN",
"IRL",
"ISR",
"IMN",
"IND",
"IOT",
"IRQ",
"IRN",
"ISL",
"ITA",
"JEY",
"JAM",
"JOR",
"JPN",
"KEN",
"KGZ",
"KHM",
"KIR",
"COM",
"KNA",
"PRK",
"KOR",
"KWT",
"CYM",
"KAZ",
"LAO",
"LBN",
"LCA",
"LIE",
"LKA",
"LBR",
"LSO",
"LTU",
"LUX",
"LVA",
"LBY",
"MAR",
"MCO",
"MDA",
"MNE",
"MAF",
"MDG",
"MHL",
"MKD",
"MLI",
"MMR",
"MNG",
"MAC",
"MNP",
"MTQ",
"MRT",
"MSR",
"MLT",
"MUS",
"MDV",
"MWI",
"MEX",
"MYS",
"MOZ",
"NAM",
"NCL",
"NER",
"NFK",
"NGA",
"NIC",
"NLD",
"NOR",
"NPL",
"NRU",
"NIU",
"NZL",
"OMN",
"PAN",
"PER",
"PYF",
"PNG",
"PHL",
"PAK",
"POL",
"SPM",
"PCN",
"PRI",
"PSE",
"PRT",
"PLW",
"PRY",
"QAT",
"REU",
"ROU",
"SRB",
"RUS",
"RWA",
"SAU",
"SLB",
"SYC",
"SDN",
"SWE",
"SGP",
"SHN",
"SVN",
"SJM",
"SVK",
"SLE",
"SMR",
"SEN",
"SOM",
"SUR",
"SSD",
"STP",
"SLV",
"SYR",
"SWZ",
"TCA",
"TCD",
"ATF",
"TGO",
"THA",
"TJK",
"TKL",
"TLS",
"TKM",
"TUN",
"TON",
"TUR",
"TTO",
"TUV",
"TWN",
"TZA",
"UKR",
"UGA",
"UMI",
"USA",
"URY",
"UZB",
"VAT",
"VCT",
"VEN",
"VGB",
"VIR",
"VNM",
"VUT",
"WLF",
"WSM",
"YEM",
"MYT",
"ZAF",
"ZMB",
"ZWE",
],
"fifa": [
"AFG",
"AIA",
"ALB",
"ALG",
"AND",
"ANG",
"ARG",
"ARM",
"ARU",
"ARU",
"ASA",
"ATG",
"AUT",
"AZE",
"BAH",
"BAN",
|
"BDI",
"BEL",
"BEN",
"BER",
"BFA",
"BHR",
"BHU",
"BIH",
"BLR",
"BLZ",
"BOE",
"BOL",
"BOT",
"BRA",
"BRB",
"BRU",
"BUL",
"CAM",
"CAN",
"CAY",
"CGO",
"CHA",
"CHI",
"CHN",
"CIV",
"CMR",
"COD",
"COK",
"COL",
"COM",
"CPV",
"CRC",
"CRO",
"CTA"
|
,
"CUB",
"CUW",
"CYP",
"CZE",
"DEN",
"DJI",
"DMA",
"DOM",
"ECU",
|
a358003542/python-guide-book
|
codes/ch12/asyncio_get_poetry2.py
|
Python
|
gpl-2.0
| 1,899
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import argparse
import asyncio
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
python3 select_get_poetry3.py port1 port2 port3 ...
"""
parser = argparse.ArgumentParser(usage)
parser.add_argument('port', nargs='+')
args = vars(parser.parse_args())
addresses = args['port']
if not addresses:
print(parser.format_help())
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return map(parse_address, addresses)
class PoetryClientProtocol(asyncio.Protocol):
def __init__(self, infile):
self.infile = infile
def connection_made(self, transport):
print(transport.get_extra_info('peername'))
self.transport = tra
|
nsport
self.transport.write(b'poems')
def data_received(self, data):
if data:
print(data)
print('writing to {}'.format(self.infile.name))
self.infile.write(data)
self.tr
|
ansport.write(b'poems')
def eof_received(self):
print('end of writing')
self.infile.close()
def main():
addresses = parse_args()
eventloop = asyncio.get_event_loop()
for address in addresses:
host, port = address
filename = str(port) + '.txt'
infile = open(filename, 'wb')
coro = eventloop.create_connection(
lambda: PoetryClientProtocol(infile), host, port)
t, p = eventloop.run_until_complete(coro)
print(t, p)
try:
eventloop.run_forever()
finally:
eventloop.close()
if __name__ == '__main__':
main()
|
samcoveney/GP_emu_UQSA
|
gp_emu_uqsa/design_inputs/__init__.py
|
Python
|
gpl-3.0
| 29
| 0
|
from
|
.design_inputs import *
| |
dhermes/project-euler
|
python/complete/no121.py
|
Python
|
apache-2.0
| 2,079
| 0.000481
|
#!/usr/bin/env python
# A bag contains one red disc and one blue disc. In a game of chance a player
# takes a disc at random and its colour is noted. After each turn the disc is
# returned to the bag, an extra red disc is added, and another disc is
# taken at random.
# The player... wins if they have taken more blue discs than red discs a
# the end of the game.
# ------------------------------------------------------------------------
# P_n = prob(disc n is blue) = 1/(n + 1)
# For n discs, let C_1-C_2-...-C_n be the colors drawn, let i_1,...,i_k be the
# indices j such that disk i_j was drawn red. The probability of this event
# is (i_1 * ... * i_k)/factorial(n + 1)
# We can enumeratively define n_{j,k} to be the aggregate numerator
# of all possible draws with j blues drawn out of k draws
#
# The initial conditions are n_{0,1} = 1, n_{1,1} = 1
# The recurrence is defined by the fact that the n_{j + 1,k + 1} is
# can only have the (k + 1)'st element be blue or red, hence
# n_{j + 1,k + 1} = numer(blue)*n_{j,k} + numer(red)*n_{j + 1,k}
# = n_{j,k} + (k + 1)*n_{j + 1,k}
# except for the cases j = k, where n_{j,k} = numer(all blue) = 1
# except for the cases j = 0, where n_{0,k} = k!
from math import factorial
from python.decorators import euler_timer
def iterative_numerator(n):
numerators = {}
for k in range(1, n + 1):
for j in range(k + 1):
if j == 0:
numerators[(j, k)] = factorial(k)
elif j == k:
numerators[(j, k)] = 1
else:
numerators[(j, k)] = (numerators[(j - 1, k - 1)] +
k * numerators[(j, k - 1)])
min_blue = (n / 2) + 1
count = 0
for blue in range(
|
min_blue, n + 1):
count += numerators[(blue, n)]
return count
def max_payout(n):
# Integer division precludes floor operation
return factorial(n + 1) / iterative_numerator(n)
def main(verbose=False):
return max_pay
|
out(15)
if __name__ == '__main__':
print euler_timer(121)(main)(verbose=True)
|
CompassionCH/compassion-modules
|
message_center_compassion/tests/test_onramp_controller.py
|
Python
|
agpl-3.0
| 2,068
| 0.000484
|
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Nicolas Bornand
#
# The licence is in the file __manifest__.py
#
##############################################################################
from mock import patch
from .onramp_base_test import TestOnramp
mock_oauth = (
"odoo.addons.message_center_compassion.models.ir_http.IrHTTP._oauth_validation"
)
class TestOnRampController(TestOnramp):
def setUp(self):
super().setUp()
def test_no_token(self):
""" Check we have an access denie
|
d if token is not provided
"""
del self.opener.headers["Authorization"]
response = self._send_post({"nothing": "nothing"})
self.assertEqual(response.status_code, 401)
error = response.json()
self.assertEqual(error["ErrorMethod"], "ValidateToken")
def test_bad_token(self):
""" Check we have an acc
|
ess denied if token is not valid
"""
self.opener.headers["Authorization"] = "Bearer notrealtoken"
response = self._send_post({"nothing": "nothing"})
self.assertEqual(response.status_code, 401)
@patch(mock_oauth)
def test_wrong_client_id(self, oauth_patch):
""" Check that if we get a token with unrecognized client_id,
access is denied. """
oauth_patch.return_value = "wrong_user"
response = self._send_post({"nothing": "nothing"})
self.assertEqual(response.status_code, 401)
@patch(mock_oauth)
def test_good_client_id(self, oauth_patch):
""" Check that if we connect with admin as client_id,
access is granted. """
oauth_patch.return_value = "admin"
response = self._send_post({"nothing": "nothing"})
json_result = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(
json_result["Message"], "Unknown message type - not processed."
)
|
CNS-OIST/STEPS_Example
|
publication_models/API_2/Chen_FNeuroinf_2014/AD/AD_single.py
|
Python
|
gpl-2.0
| 2,125
| 0.004706
|
########################################################################
# #
# Anomalous Diffusion #
# #
########################################################################
import steps.interface
########################################################################
# Create Model
from steps.model import *
from steps.geom import *
from steps.rng import *
from steps.sim import *
from steps.saving import *
from steps.visual import *
import time
mdl = Model()
r = ReactionManager()
with mdl:
X = Species.Create()
vsys = VolumeSystem.Create()
with vsys:
dif_X = Diffusion.Create(X, 2e-09)
########################################################################
# Create Gemoetry
tetmesh = TetMesh.LoadAbaqus('2_20_0.7.inp', scale=1e-06, ebs=None, shadow_mesh="2_20_0.7_conf")
########################################################################
# Create Random number generator
rng = RNG('mt19937', 512, int(time.time()%4294967295))
########################################################################
# Initialize simulation
sim = Simulation('Tetexact', mdl, tetmesh, rng)
sim.injection.X.Count = 2000
########################################################################
# Visualization
rs = ResultSelector(sim)
# Create control
sc = SimControl(end_time = 1.0, upd_interval = 0.00001)
with sc:
with SimDisplay('Show Spine Species'):
# Static mesh element
ElementDisplay(rs.dend, color=[0, 0, 1, 0.2])
# Dynamic element
ElementDisplay(rs.LIST('dend'
|
, 'shaft').X, color=[1.0, 0.0, 0.
|
0, 1.0], spec_size=0.1)
with SimDisplay('Hide Spine Species'):
ElementDisplay(rs.dend, color=[0, 0, 1, 0.2])
ElementDisplay(rs.shaft.X, color=[1.0, 0.0, 0.0, 1.0], spec_size=0.1)
with PlotDisplay('Plots'):
SpatialPlot(rs.TETS(tetmesh.shaft.tets).X.Count, axis=[0, 0, 1], nbins=100)
# Enter visualization loop
sc.run()
|
Poom1997/GMan
|
sendMessageForm.py
|
Python
|
mit
| 2,434
| 0.012736
|
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtUiTools import *
import plugin.databaseConnect as database
from datetime import datetime
class sendMessageUI(QMainWindow):
def __init__(self, id = None, bulk = None, parent = None):
QMainWindow.__init__(self,None)
self.setMinimumSize(626,380)
self.setWindowTitle("Message")
self.parent = parent
self.id = id
self.bulk = bulk
self.UIinit()
def UIinit(self):
loader = QUiLoader()
form = loader.load("resources/UI/sendMessage.ui",None)
self.setCentralWidget(form)
#QPushButton
self.send_button = form.findChild(QPushButton,"sendButton")
self.close_button = form.findChild(QPushButton,"closeButton")
#LineEdit
self.to_user = form.findChild(QLineEdit,"to")
self.message = form.findChild(QTextEdit,"message")
#Connect
self.send_button.clicked.connect(self.sendMes)
self.close_button.clicked.connect(self.closeWindow)
if(self.id != None):
self.to_user.setText(self.id)
def closeWindow(self):
self.close()
##Create message and send it to other user##
def sendMes(self):
db = database.databaseMessage()
toUser = self.to_user.text()
message = self.message.toPlainText()
time = datetime.now()
if(self.bulk == None):
data = self.parent.getCurrentUser()
fromUser = data.getID()
if(db.sendMessage(toUser, fromUser, message, time)):
db.disconnect()
|
self.parent.showOK("Message Sent", "The message has been sent to the user!")
self.closeWindow()
else:
self.parent.showERROR("UserID Not Found", "The UserID you entered does not exists.")
else:
data = self.parent.parent.getCurrentUser()
fromUser = data.getID()
val = 0
for
|
id in self.bulk:
val = db.sendMessage(id, fromUser, message, time)
if (val):
db.disconnect()
self.parent.parentshowOK("All Message Sent to user.", "The message has been sent to all user!")
self.closeWindow()
else:
self.parent.parent.showERROR("ERROR!", "Some Messages are not delivered.")
|
wiltonlazary/arangodb
|
3rdParty/iresearch/external/text/scripts/generate_unicode_break_tests.py
|
Python
|
apache-2.0
| 22,214
| 0.002206
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2020 T. Zachary Laine
#
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
prop_lookup_test_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/{0}_break.hpp>
#include <gtest/gtest.h>
TEST({0}, prop_lookups_{2})
{{{1}
}}
'''
prop_lookup_perf_test_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/{0}_break.hpp>
#include <benchmark/benchmark.h>
#include <iostream>
void BM_{0}_prop(benchmark::State & state)
{{
while (state.KeepRunning()) {{
{1}
}}
std::cout << "Divide result by {2} to get mean time.\\n";
}}
BENCHMARK(BM_{0}_prop);
BENCHMARK_MAIN()
'''
break_test_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/{0}_break.hpp>
#include <gtest/gtest.h>
#include <algorithm>
TEST({0}, breaks_{2})
{{{1}
}}
'''
grapheme_iterator_test_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/grapheme_iterator.hpp>
#include <boost/text/transcode_iterator.hpp>
#include <gtest/gtest.h>
#include <algorithm>
{0}
'''
bidi_test_file_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/bidirectional.hpp>
#include "bidi_tests.hpp"
#include <gtest/gtest.h>
#include <algorithm>
std::vector<int> expected_levels;
std::vector<int> expected_reordered_indices;
TEST(bidi, bidi_{1:03}_000)
{{
{0}
}}
'''
bidi_test_form = '''
{{
// {0} ('{5}') (line {3})
std::vector<uint32_t> const cps = {{ {1} }};
std::vector<int> const levels =
bidi_levels(cps.begin(), cps.end(), {4});
int i = 0;
for (int l : expected_levels) {{
if (0 <= l) {{
EXPECT_EQ(levels[i], l) << "i=" << i;
++i;
}}
}}
EXPECT_EQ((int)levels.size(), i);
std::vector<int> const reordered =
bidi_reordered_indices(cps.begin(), cps.end(), {4});
i = 0;
for (int idx : expected_reordered_indices) {{
// Skip FSI, LRI, RLI, and PDI.
if (cps[idx] < 0x2066 || 0x2069 < cps[idx]) {{
EXPECT_EQ(reordered[i], (int)cps[idx])
<< std::hex
<< " 0x" << reordered[i]
<< " 0x" << cps[idx]
<< std::dec << " i=" << i;
}}
++i;
}}
std::vector<int> reordered_2;
for (auto subrange :
boost::text::bidirectional_subranges(cps, {4})) {{
for (auto cp : subrange) {{
reordered_2.push_back(cp);
}}
}}
i = 0;
for (int idx : expected_reordered_indices) {{
if (cps[idx] < 0x2066 || 0x2069 < cps[idx]) {{
EXPECT_EQ(reordered_2[i], (int)cps[idx])
<< std::hex
<< " 0x" << reordered_2[i]
<< " 0x" << cps[idx]
<< std::dec << " i=" << i;
}}
++i;
}}
EXPECT_EQ(i, (int)reordered_2.size());
}}
'''
bidi_character_test_file_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/bidirectional.hpp>
#include "bidi_test
|
s.hpp"
#include <gtest/gtest.h>
#include <algorithm>
TEST(bidi_character, bidi_character_{1:03}_000)
{{
{0}
}}
'''
bidi_character_test_form = '''
{{
// line {4}
std::vector<uint32_t> cons
|
t cps = {{ {0} }};
std::vector<int> const expected_levels =
{{ {2} }};
std::vector<int> const levels =
bidi_levels(cps.begin(), cps.end(), {1});
int i = 0;
for (int l : expected_levels) {{
if (0 <= l) {{
EXPECT_EQ(levels[i], l) << "i=" << i;
++i;
}}
}}
EXPECT_EQ((int)levels.size(), i);
std::vector<uint32_t> const expected_reordered_indices =
{{ {3} }};
std::vector<int> const reordered =
bidi_reordered_indices(cps.begin(), cps.end(), {1});
i = 0;
for (int idx : expected_reordered_indices) {{
EXPECT_EQ(reordered[i], (int)cps[idx])
<< std::hex
<< " 0x" << reordered[i]
<< " 0x" << cps[idx]
<< std::dec << " i=" << i;
++i;
}}
}}
'''
def extract_cps_and_breaks(filename, batch_size = 50):
current_batch = []
retval = []
lines = open(filename, 'r').readlines()
num_lines = 0
for line in lines:
if num_lines == batch_size:
retval.append(current_batch)
current_batch = []
num_lines = 0
line = line[:-1]
if not line.startswith('#') and len(line) != 0:
comment_start = line.find('#')
comment = ''
if comment_start != -1:
comment = line[comment_start + 1:].strip()
line = line[:comment_start]
fields = line.split(' ')[1:-1]
cps = []
active_break = True
for i in range(len(fields)):
f = fields[i]
if f[0] in '0123456789ABCDEF':
cps.append((f, active_break))
else:
active_break = f == '÷'
current_batch.append((cps, line, comment))
num_lines += 1
if len(current_batch):
retval.append(current_batch)
return retval
def generate_prop_lookup_tests(cps_and_breaks, prop_, prop_names):
for i in range(len(cps_and_breaks)):
prop_lookup_tests = ''
chunk = cps_and_breaks[i]
for elem in chunk:
(cps, line, comment) = elem
comment_fields = comment.split(' ')
j = 0
for f in comment_fields:
if f.startswith('(') and f.endswith(')'):
prop_lookup_tests += \
'\n EXPECT_EQ(boost::text::{0}_prop(0x{1}), {2});'.format(
prop_, cps[j][0], prop_names[f[1:-1]]
)
j += 1
cpp_file = open('{}_prop_lookup_{:02}.cpp'.format(prop_, i), 'w')
cpp_file.write(prop_lookup_test_form.format(prop_, prop_lookup_tests, i))
def generate_prop_lookup_perf_tests(cps_and_breaks, prop_):
prop_lookup_perf_tests = ''
lines = 0
for i in range(len(cps_and_breaks)):
chunk = cps_and_breaks[i]
for elem in chunk:
(cps, line, comment) = elem
comment_fields = comment.split(' ')
j = 0
for f in comment_fields:
if f.startswith('(') and f.endswith(')'):
prop_lookup_perf_tests += \
' benchmark::DoNotOptimize(boost::text::{0}_prop(0x{1}));\n'.format(
|
linzhaolover/myansible
|
openstackfile/getgpulocked.py
|
Python
|
apache-2.0
| 696
| 0.027299
|
#!/usr/bin/env python
path="/var/lib/gpu/gpu_locked.txt"
import os,sys
import ast
import socket
def getHost():
return socket.gethostname()
def getlocked():
hostname=getHost()
#print path
fp=open(path, "r")
info=fp.read()
#print info
d=ast.literal_eval(info)
#print len(d)
print "%s,nvidia0,%d" % (hostname, (9999 - d['nvidia0']['available_count']))
print "
|
%s,nvidia1,%d" % (hostname, (9999 - d['nvidia1']['available_count']))
print "%s,nvidia2,%d" % (hostname, (9999 - d['nvidia2']['available_count']))
print "%s,nvidia3,%d" % (hostname, (9999 - d['nvidia3']['available_count']))
fp.cl
|
ose()
if __name__ == "__main__":
getlocked()
|
AnsonShie/system_monitor
|
messages_monitor.py
|
Python
|
apache-2.0
| 2,645
| 0.003403
|
#!/usr/bin/python
__author__ = 'anson'
import optparse
import re
import sys
from utils.utils_cmd import execute_sys_cmd
from lib_monitor.monitor_default_format import nagios_state_to_id
class messages_check():
def __init__(self, rex, config, type):
self.rex = rex
self.config = config
self.type = type
def run(self):
result, infos = execute_sys_cmd('/usr/local/nagios/libexec/check_logfiles -f ' + self.config)
v_protocol = None
exit_state = 3
if len(infos) > 0:
state = infos[0].split()[0]
if state not in nagios_state_to_id.keys():
print infos
sys.exit(exit_state)
exit_state = nagios_state_to_id[state]
if nagios_state_to_id[state] > 0:
m_protocol = re.search(r'\(\d+ errors in ([^ ]+)\)', infos[0])
v_protocol = m_protocol.group(1) if m_protocol else None
else:
sys.exit(exit_state)
if v_protocol is not None:
rex_dict = []
with open(self.rex, buffering=
|
2000000) as rex_all:
for rex_split in rex_all:
rex_dict.append(rex_split)
with open('/tmp/' + v_protocol, buffering=2000000) as file_to_check:
for pa
|
rt in file_to_check:
for rex_rule in rex_dict:
m_iface = re.search(rex_rule, part)
v_dev = m_iface.group(1) if m_iface else 'none'
print v_dev
sys.exit(exit_state)
def main():
"""
messages_monitor.py
unit test example
python messages_monitor.py
"""
parser = optparse.OptionParser(
usage="%prog [options] [--parameter]",
description="To monitor system log file."
)
parser.add_option("--config",
dest="config",
help="Config file for error extraction",
type="string",
default="/usr/local/nagios/libexec/check_log.log"
)
parser.add_option("--type",
dest="type",
help="Event type",
type="string",
default="disk"
)
parser.add_option("--rex",
dest="rex",
help="Regular Expression",
type="string",
default="/usr/local/nagios/libexec/rule.conf"
)
(options, args) = parser.parse_args()
check = messages_check(options.rex, options.config, options.type)
check.run()
if __name__ == '__main__':
main()
|
C2SM-RCM/testsuite
|
tools/comp_table.py
|
Python
|
mit
| 5,041
| 0.026384
|
#!/usr/bin/env python2
"""
COSMO TECHNICAL TESTSUITE
General purpose script to compare two files containing tables
Only lines with given table pattern are considered
"""
# built-in modules
import os, sys, string
# information
__author__ = "Xavier Lapillonne"
__maintainer__ = "xavier.lapillonne@meteoswiss.ch"
def cmp_table(file1,file2,colpattern,minval,threshold,verbose=1,maxcompline=-1):
# General purpose script to compare two files containing tables
# Only lines with given table column pattern. Column to be compared are marked with c
# column to discard with x
#init
ncomp=0
nerror=0
lerror=False
epsilon=1e-16 #used to avoid division by zero in case minval is zero
# check file existence
if not(os.path.exists(file1)):
print('File %s does not exist' %(file1))
return -1
elif not(os.path.exists(file2)):
print('File %s does not exist' %(file2))
print('File '+file2+' does not exist')
return -1
# convert input
colpattern=[x=='c' for x in list(colpattern)]
threshold=float(threshold)
minval=float(minval)
# open file
data1=open(file1).readlines()
data2=open(file2).readlines()
# get max record
nd1=len(data1)
nd2=len(data2)
# check that files are not empty
if nd1==0:
print('file %s is empty!' %(file1))
return -1
if nd2==0:
print('file %s is empty!' %(file2))
return -1
if nd1!=nd2 and verb
|
ose>1:
print('Warning: %s and %s have different size, comparing commun set only \n' %(file1,file2))
ncdata=min(nd1,nd2)
if (maxcompline>0):
ncdata=min(ncdata,maxcomp
|
line)
# Iterates through the lines
for il in range(ncdata):
l1=data1[il].split()
l2=data2[il].split()
l1match=matchColPattern(l1,colpattern)
l2match=matchColPattern(l2,colpattern)
# compare values if both lines are compatible
if l1match and l2match:
for ic in range(len(colpattern)):
if colpattern[ic]:
v1=float(l1[ic])
v2=float(l2[ic])
val_abs_max=max(abs(v1),abs(v2))
if val_abs_max > minval:
ncomp+=1
diff=abs(v1-v2)/(val_abs_max+epsilon)
if diff>threshold:
nerror+=1
# Print error
if verbose>1:
print('Error %2.2e above %2.2e thresold at line %i, col %i' %(diff,threshold,il+1,ic+1))
print('> %s' %(file1))
print(data1[il])
print('< %s' %(file2))
print(data2[il])
#save line for first error
if not lerror:
differ=diff
linerr=il+1
colerr=ic+1
linerr1=data1[il]
linerr2=data2[il]
lerror=True
if ncomp==0:
print('Warning :no line to compare')
nerror=-2
if lerror and verbose>0:
print('Compared values: %i, errors above threshold: %i ; %i %% ' %(ncomp,nerror,nerror*100./ncomp))
if verbose==1:
print('First error %2.2e above %2.2e thresold at line %i, col %i' %(differ,threshold,linerr,colerr))
print('> %s' %(file1))
print(linerr1)
print('< %s' %(file2))
print(linerr2)
return nerror
#----------------------------------------------------------------------------
# Local functions
def matchColPattern(line,colpattern):
if len(line)!=len(colpattern):
return False
try:
for i in range(len(colpattern)):
if colpattern[i]: f=float(line[i])
except ValueError:
return False
return True
#-----------------------------------
#execute as a script
if __name__ == "__main__":
if len(sys.argv)==6:
cmp_table(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4], \
sys.argv[5])
elif len(sys.argv)==7:
cmp_table(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4], \
sys.argv[5],sys.argv[6])
elif len(sys.argv)==8:
cmp_table(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4], \
sys.argv[5],sys.argv[6],sys.argv[7])
else:
print('''USAGE : ./comp_table file1 file2 colpattern minval threshold [verbose maxcompline]
General purpose script to compare two files containing tables
Only lines with given table column pattern. Column to be compared must be numbers are marked with c
column to discard with x
colpattern c for compare or x for ignore, ex: xccx discard first and last column of a 4 column table
''')
|
neilLasrado/erpnext
|
erpnext/accounts/doctype/sales_invoice/pos.py
|
Python
|
gpl-3.0
| 21,154
| 0.02496
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import json
import frappe
from erpnext.accounts.party import get_party_account_currency
from erpnext.controllers.accounts_controller import get_taxes_and_charges
from erpnext.setup.utils import get_exchange_rate
from erpnext.stock.get_item_details import get_pos_profile
from frappe import _
from frappe.core.doctype.communication.email import make
from frappe.utils import nowdate, cint
from six import string_types, iteritems
@frappe.whitelist()
def get_pos_data():
doc = frappe.new_doc('Sales Invoice')
doc.is_pos = 1
pos_profile = get_pos_profile(doc.company) or {}
if not pos_profile:
frappe.throw(_("POS Profile is required to use Point-of-Sale"))
if not doc.company:
doc.company = pos_profile.get('company')
doc.update_stock = pos_profile.get('update_stock')
if pos_profile.get('name'):
pos_profile = frappe.get_doc('POS Profile', pos_profile.get('name'))
pos_profile.validate()
company_data = get_company_data(doc.company)
update_pos_profile_data(doc, pos_profile, company_data)
update_multi_mode_option(doc, pos_profile)
default_print_format = pos_profile.get('print_format') or "Point of Sale"
print_template = frappe.db.get_value('Print Format', default_print_format, 'html')
items_list = get_items_list(pos_profile, doc.company)
customers = ge
|
t_custo
|
mers_list(pos_profile)
doc.plc_conversion_rate = update_plc_conversion_rate(doc, pos_profile)
return {
'doc': doc,
'default_customer': pos_profile.get('customer'),
'items': items_list,
'item_groups': get_item_groups(pos_profile),
'customers': customers,
'address': get_customers_address(customers),
'contacts': get_contacts(customers),
'serial_no_data': get_serial_no_data(pos_profile, doc.company),
'batch_no_data': get_batch_no_data(),
'barcode_data': get_barcode_data(items_list),
'tax_data': get_item_tax_data(),
'price_list_data': get_price_list_data(doc.selling_price_list, doc.plc_conversion_rate),
'customer_wise_price_list': get_customer_wise_price_list(),
'bin_data': get_bin_data(pos_profile),
'pricing_rules': get_pricing_rule_data(doc),
'print_template': print_template,
'pos_profile': pos_profile,
'meta': get_meta()
}
def update_plc_conversion_rate(doc, pos_profile):
conversion_rate = 1.0
price_list_currency = frappe.get_cached_value("Price List", doc.selling_price_list, "currency")
if pos_profile.get("currency") != price_list_currency:
conversion_rate = get_exchange_rate(price_list_currency,
pos_profile.get("currency"), nowdate(), args="for_selling") or 1.0
return conversion_rate
def get_meta():
doctype_meta = {
'customer': frappe.get_meta('Customer'),
'invoice': frappe.get_meta('Sales Invoice')
}
for row in frappe.get_all('DocField', fields=['fieldname', 'options'],
filters={'parent': 'Sales Invoice', 'fieldtype': 'Table'}):
doctype_meta[row.fieldname] = frappe.get_meta(row.options)
return doctype_meta
def get_company_data(company):
return frappe.get_all('Company', fields=["*"], filters={'name': company})[0]
def update_pos_profile_data(doc, pos_profile, company_data):
doc.campaign = pos_profile.get('campaign')
if pos_profile and not pos_profile.get('country'):
pos_profile.country = company_data.country
doc.write_off_account = pos_profile.get('write_off_account') or \
company_data.write_off_account
doc.change_amount_account = pos_profile.get('change_amount_account') or \
company_data.default_cash_account
doc.taxes_and_charges = pos_profile.get('taxes_and_charges')
if doc.taxes_and_charges:
update_tax_table(doc)
doc.currency = pos_profile.get('currency') or company_data.default_currency
doc.conversion_rate = 1.0
if doc.currency != company_data.default_currency:
doc.conversion_rate = get_exchange_rate(doc.currency, company_data.default_currency, doc.posting_date, args="for_selling")
doc.selling_price_list = pos_profile.get('selling_price_list') or \
frappe.db.get_value('Selling Settings', None, 'selling_price_list')
doc.naming_series = pos_profile.get('naming_series') or 'SINV-'
doc.letter_head = pos_profile.get('letter_head') or company_data.default_letter_head
doc.ignore_pricing_rule = pos_profile.get('ignore_pricing_rule') or 0
doc.apply_discount_on = pos_profile.get('apply_discount_on') or 'Grand Total'
doc.customer_group = pos_profile.get('customer_group') or get_root('Customer Group')
doc.territory = pos_profile.get('territory') or get_root('Territory')
doc.terms = frappe.db.get_value('Terms and Conditions', pos_profile.get('tc_name'), 'terms') or doc.terms or ''
doc.offline_pos_name = ''
def get_root(table):
root = frappe.db.sql(""" select name from `tab%(table)s` having
min(lft)""" % {'table': table}, as_dict=1)
return root[0].name
def update_multi_mode_option(doc, pos_profile):
from frappe.model import default_fields
if not pos_profile or not pos_profile.get('payments'):
for payment in get_mode_of_payment(doc):
payments = doc.append('payments', {})
payments.mode_of_payment = payment.parent
payments.account = payment.default_account
payments.type = payment.type
return
for payment_mode in pos_profile.payments:
payment_mode = payment_mode.as_dict()
for fieldname in default_fields:
if fieldname in payment_mode:
del payment_mode[fieldname]
doc.append('payments', payment_mode)
def get_mode_of_payment(doc):
return frappe.db.sql("""
select mpa.default_account, mpa.parent, mp.type as type
from `tabMode of Payment Account` mpa,`tabMode of Payment` mp
where mpa.parent = mp.name and mpa.company = %(company)s and mp.enabled = 1""",
{'company': doc.company}, as_dict=1)
def update_tax_table(doc):
taxes = get_taxes_and_charges('Sales Taxes and Charges Template', doc.taxes_and_charges)
for tax in taxes:
doc.append('taxes', tax)
def get_items_list(pos_profile, company):
cond = ""
args_list = []
if pos_profile.get('item_groups'):
# Get items based on the item groups defined in the POS profile
for d in pos_profile.get('item_groups'):
args_list.extend([d.name for d in get_child_nodes('Item Group', d.item_group)])
if args_list:
cond = "and i.item_group in (%s)" % (', '.join(['%s'] * len(args_list)))
return frappe.db.sql("""
select
i.name, i.item_code, i.item_name, i.description, i.item_group, i.has_batch_no,
i.has_serial_no, i.is_stock_item, i.brand, i.stock_uom, i.image,
id.expense_account, id.selling_cost_center, id.default_warehouse,
i.sales_uom, c.conversion_factor, it.item_tax_template, it.valid_from
from
`tabItem` i
left join `tabItem Default` id on id.parent = i.name and id.company = %s
left join `tabItem Tax` it on it.parent = i.name
left join `tabUOM Conversion Detail` c on i.name = c.parent and i.sales_uom = c.uom
where
i.disabled = 0 and i.has_variants = 0 and i.is_sales_item = 1
{cond}
group by i.item_code
""".format(cond=cond), tuple([company] + args_list), as_dict=1)
def get_item_groups(pos_profile):
item_group_dict = {}
item_groups = frappe.db.sql("""Select name,
lft, rgt from `tabItem Group` order by lft""", as_dict=1)
for data in item_groups:
item_group_dict[data.name] = [data.lft, data.rgt]
return item_group_dict
def get_customers_list(pos_profile={}):
cond = "1=1"
customer_groups = []
if pos_profile.get('customer_groups'):
# Get customers based on the customer groups defined in the POS profile
for d in pos_profile.get('customer_groups'):
customer_groups.extend([d.get('name') for d in get_child_nodes('Customer Group', d.get('customer_group'))])
cond = "customer_group in (%s)" % (', '.join(['%s'] * len(customer_groups)))
return frappe.db.sql(""" select name, customer_name, customer_group,
territory, customer_pos_id from tabCustomer where disabled = 0
and {cond}""".format(cond=cond), tuple(customer_groups), as_dict=1) or {}
def get_customers_address(customers):
customer_address = {}
if isinstance(customers, string_types):
customers = [frappe._dict({'name': customers})]
for data in customers:
address = frappe.db.sql(
|
JetChars/vim
|
vim/bundle/python-mode/pymode/autopep8.py
|
Python
|
apache-2.0
| 120,700
| 0.000033
|
#!/usr/bin/env python
# Copyright (C) 2010-2011 Hideo Hattori
# Copyright (C) 2011-2013 Hideo Hattori, Steven Myint
# Copyright (C) 2013-2015 Hideo Hattori, Steven Myint, Bill Wendling
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Automatically formats Python code to conform to the PEP 8 style guide.
Fixes that only need be done once can be added by adding a function of the form
"fix_<code>(source)" to this module. They should return the fixed source code.
These fixes are picked up by apply_global_fixes().
Fixes that depend on pep8 should be added as methods to FixPEP8. See the class
documentation for more information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import collections
import copy
import difflib
import fnmatch
import inspect
import io
import keyword
import locale
import os
import re
import signal
import sys
import textwrap
import token
import tokenize
import pep8
try:
unicode
except NameError:
unicode = str
__version__ = '1.2.1a0'
CR = '\r'
LF = '\n'
CRLF = '\r\n'
PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$')
# For generating line shortening candidates.
SHORTEN_OPERATOR_GROUPS = frozenset([
frozenset([',']),
frozenset(['%']),
frozenset([',', '(', '[', '{']),
frozenset(['%', '(', '[', '{']),
frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']),
frozenset(['%', '+', '-', '*', '/', '//']),
])
DEFAULT_IGNORE = 'E24'
DEFAULT_INDENT_SIZE = 4
# W602 is handled separately due to the need to avoid "with_traceback".
CODE_TO_2TO3 = {
'E231': ['ws_comma'],
'E721': ['idioms'],
'W601': ['has_key'],
'W603': ['ne'],
'W604': ['repr'],
'W690': ['apply',
'except',
'exitfunc',
'numliterals',
'operator',
'paren',
'reduce',
'renames',
'standarderror',
'sys_exc',
'throw',
'tuple_params',
'xreadlines']}
if sys.platform == 'win32': # pragma: no cover
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
def open_with_encoding(filename, encoding=None, mode='r'):
"""Return opened file with a specific encoding."""
if not encoding:
encoding = detect_encoding(filename)
return io
|
.op
|
en(filename, mode=mode, encoding=encoding,
newline='') # Preserve line endings
def detect_encoding(filename):
"""Return file encoding."""
try:
with open(filename, 'rb') as input_file:
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
# Check for correctness of encoding
with open_with_encoding(filename, encoding) as test_file:
test_file.read()
return encoding
except (LookupError, SyntaxError, UnicodeDecodeError):
return 'latin-1'
def readlines_from_file(filename):
"""Return contents of file."""
with open_with_encoding(filename) as input_file:
return input_file.readlines()
def extended_blank_lines(logical_line,
blank_lines,
blank_before,
indent_level,
previous_logical):
"""Check for missing blank lines after class declaration."""
if previous_logical.startswith('class '):
if logical_line.startswith(('def ', 'class ', '@')):
if indent_level and not blank_lines and not blank_before:
yield (0, 'E309 expected 1 blank line after class declaration')
elif previous_logical.startswith('def '):
if blank_lines and pep8.DOCSTRING_REGEX.match(logical_line):
yield (0, 'E303 too many blank lines ({0})'.format(blank_lines))
elif pep8.DOCSTRING_REGEX.match(previous_logical):
# Missing blank line between class docstring and method declaration.
if (
indent_level and
not blank_lines and
not blank_before and
logical_line.startswith(('def ')) and
'(self' in logical_line
):
yield (0, 'E301 expected 1 blank line, found 0')
pep8.register_check(extended_blank_lines)
def continued_indentation(logical_line, tokens, indent_level, indent_char,
noqa):
"""Override pep8's function to provide indentation information."""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented. Assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line. In turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (
(DEFAULT_INDENT_SIZE,)
if indent_char != '\t' else (DEFAULT_INDENT_SIZE,
2 * DEFAULT_INDENT_SIZE)
)
# Remember how many brackets were opened on each line.
parens = [0] * nrows
# Relative indents of physical lines.
rel_indent = [0] * nrows
# For each depth, collect a list of opening rows.
open_rows = [[0]]
# For each depth, memorize the hanging indentation.
hangs = [None]
# Visual indents.
indent_chances = {}
last_indent = tokens[0][2]
indent = [last_indent[1]]
last_token_multiline = None
line = None
last_line = ''
last_line_begins_with_multiline = False
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = (not last_token_multiline and
token_type not in (tokenize.NL, tokenize.NEWLINE))
last_line_begins_with_multiline = last_token_multiline
if newline:
# This is the beginning of a continuation line.
last_indent = start
# Record the initial indent.
rel_indent[row] = pep8.expand_indent(line) - indent_level
# Identify closing bracket.
close_bracket = (token_type == tokenize.OP and text in ']})')
# Is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# Closing bracket for v
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/rds.py
|
Python
|
bsd-3-clause
| 56,122
| 0.002441
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing
instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely
on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take. The 'reboot' option is available starting at version 2.0
required: true
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
db_engine:
description:
- The type of database. Used only when command=create.
- mariadb was added in version 2.2
required: false
default: null
choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee',
'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore.
If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used
required: false
default: null
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only
when command=create or command=modify.
required: false
default: null
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or
command=modify.
choices: [ "yes", "no" ]
required: false
default: null
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
port:
description:
- Port number that the DB instance uses for connections. Used only when command=create or command=replicate.
- Prior to 2.0 it always defaults to null and the API would use 3306, it had to be set to other DB default values when not using MySql.
Starting at 2.0 it automatically defaults to what is expected for each C(db_engine).
required: false
default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL.
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
maint_window:
description:
- >
Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is
assigned. Used only when command=create or command=modify.
required: false
default: null
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
backup_retention:
description:
- >
Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only wh
|
en command=create or
command=modify.
required: false
default: null
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
|
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with
no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for
|
sauloal/PiCastPy
|
sqlalchemy/orm/interfaces.py
|
Python
|
mit
| 28,330
| 0.000671
|
# orm/interfaces.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines the now deprecated ORM extension classes as well
as ORM internals.
Other than the deprecated extensions, this module and the
classes within should be considered mostly private.
"""
from __future__ import absolute_import
from .. import exc as sa_exc, util, inspect
from ..sql import operators
from collections import deque
orm_util = util.importlater('sqlalchemy.orm', 'util')
collections = util.importlater('sqlalchemy.orm', 'collections')
__all__ = (
'AttributeExtension',
'EXT_CONTINUE',
'EXT_STOP',
'ExtensionOption',
'InstrumentationManager',
'LoaderStrategy',
'MapperExtension',
'MapperOption',
'MapperProperty',
'PropComparator',
'PropertyOption',
'SessionExtension',
'StrategizedOption',
'StrategizedProperty',
)
EXT_CONTINUE = util.symbol('EXT_CONTINUE')
EXT_STOP = util.symbol('EXT_STOP')
ONETOMANY = util.symbol('ONETOMANY')
MANYTOONE = util.symbol('MANYTOONE')
MANYTOMANY = util.symbol('MANYTOMANY')
from .deprecated_interfaces import AttributeExtension, \
SessionExtension, \
MapperExtension
NOT_EXTENSION = util.symbol('NOT_EXTENSION')
"""Symbol indicating an :class:`_InspectionAttr` that's
not part of sqlalchemy.ext.
Is assigned to the :attr:`._InspectionAttr.extension_type`
attibute.
"""
class _InspectionAttr(object):
"""A base class applied to all ORM objects that can be returned
by the :func:`.inspect` function.
The attributes defined here allow the usage of simple boolean
checks to test basic facts about the object returned.
While the boolean checks here are basically the same as using
the Python isinstance() function, the flags here can be used without
the need to import all of these classes, and also such that
the SQLAlchemy class system can change while leaving the flags
here intact for forwards-compatibility.
"""
is_selectable = False
"""Return True if this object is an instance of :class:`.Selectable`."""
is_aliased_class = False
"""True if this object is an instance of :class:`.AliasedClass`."""
is_instance = False
"""True if this object is an instance of :class:`.InstanceState`."""
is_mapper = False
"""True if this object is an instance of :class:`.Mapper`."""
is_property = False
"""True if this object is an instance of :class:`.MapperProperty`."""
is_attribute = False
"""True if this object is a Python :term:`descriptor`.
This can refer to one of many types. Usually a
:class:`.QueryableAttribute` which handles attributes events on behalf
of a :class:`.MapperProperty`. But can also be an extension type
such as :class:`.AssociationProxy` or :class:`.hybrid_property`.
The :attr:`._InspectionAttr.extension_type` will refer to a constant
identifying the specific subtype.
.. seealso::
:attr:`.Mapper.all_orm_descriptors`
"""
is_clause_element = False
"""True if this object is an instance of :class:`.ClauseElement`."""
extension_type = NOT_EXTENSION
"""The extension type, if any.
Defaults to :data:`.interfaces.NOT_EXTENSION`
.. versionadded:: 0.8.0
.. seealso::
:data:`.HYBRID_METHOD`
:data:`.HYBRID_PROPERTY`
:data:`.ASSOCIATION_PROXY`
"""
class _MappedAttribute(object):
"""Mixin for attributes which should be replaced by mapper-assigned
attributes.
"""
class MapperProperty(_MappedAttribute, _InspectionAttr):
"""Manage the relationship of a ``Mapper`` to a single class
attribute, as well as that attribute as it appears on individual
instances of the class, including attribute instrumentation,
attribute access, loading behavior, and dependency calculations.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
"""
is_property = True
def setup(self, context, entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
pass
def create_row_processor(self, context, path,
mapper, row, adapter):
"""Return a 3-tuple consisting of three row processing functions.
"""
return None, None, None
def cascade_iterator(self, type_, state, visited_instances=None,
halt_on=None):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
See PropertyLoader for the related instance implementation.
"""
return iter(())
def set_parent(self, parent, init):
self.parent = parent
def instrument_class(self, mapper): # pragma: no-coverage
raise NotImplementedError()
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.MapperProperty`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`.relationship`, or :func:`.composite`
functions.
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
_configure_started = False
_configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
MapperProperty."""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
pass
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
"""
pass
def is_primary(self):
"""Ret
|
urn True if this ``MapperProperty``'s mapper is the
primary mapper for its class.
This flag is used to indicate that the ``MapperProperty`` can
define attribute instrumentation for the class at the class
level (as opposed to the individual instance level).
"""
return not self.parent.non_primary
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive):
"""Merge
|
the attribute represented by this ``MapperProperty``
from source to destination object"""
|
lahwaacz/tvnamer
|
tvnamer/renamer.py
|
Python
|
unlicense
| 4,157
| 0.001203
|
#!/usr/bin/env python
import os
import shutil
import logging
from unicode_helper import p
__all__ = ["Renamer"]
def log():
"""Returns the logger for current file
"""
return logging.getLogger(__name__)
def same_partition(f1, f2):
"""Returns True if both files or directories are on the same partition
"""
return os.stat(f1).st_dev == os.stat(f2).st_dev
def delete_file(fpath):
"""On OS X: Trashes a path using the Finder, via OS X's Scripting Bridge.
On other platforms: unlinks file.
"""
try:
from AppKit import NSURL
from ScriptingBridge import SBApplication
except ImportError:
p("Deleting %s" % fpath)
log().debug("Deleting %r" % fpath)
os.unlink(fpath)
else:
p("Trashing %s" % fpath)
log().debug("Trashing %r" % fpath)
targetfile = NSURL.fileURLWithPath_(fpath)
finder = SBApplication.applicationWithBundleIdentifier_("com.apple.Finder")
items = finder.items().objectAtLocation_(targetfile)
items.delete()
def rename_file(old, new):
"""Rename 'old' file to 'new'. Both files must be on the same partition.
Preserves access and modification time.
"""
p("Renaming %s to %s" % (old, new))
log().debug("Renaming %r to %r" % (old, new))
stat = os.stat(old)
os.rename(old, new)
os.utime(new, (stat.st_atime, stat.st_mtime))
def copy_file(old, new):
"""Copy 'old' file to 'new'.
"""
p("Copying %s to %s" % (old, new))
log().debug("Copying %r to %r" % (old, new))
|
shutil.copyfile(old, new)
shutil.copystat(old, new)
def symlink_file(target, name):
"""Create symbolic
|
link named 'name' pointing to 'target'.
"""
p("Creating symlink %s to %s" % (name, target))
log().debug("Creating symlink %r to %r" % (name, target))
os.symlink(target, name)
class Renamer(object):
"""Deals with renaming of files
"""
def __init__(self, filename):
self.filename = os.path.abspath(filename)
def rename(self, new_fullpath, force=False, always_copy=False, always_move=False, leave_symlink=False, create_dirs=True):
"""Moves the file to a new path.
If it is on the same partition, it will be moved (unless always_copy is True)
If it is on a different partition, it will be copied, and the original
only deleted if always_move is True.
If the target file already exists, it will raise OSError unless force is True.
If it was moved, a symlink will be left behind with the original name
pointing to the file's new destination if leave_symlink is True.
"""
new_dir = os.path.dirname(new_fullpath)
if create_dirs:
p("Creating directory %s" % new_dir)
try:
os.makedirs(new_dir)
except OSError, e:
if e.errno != 17:
raise
if os.path.exists(new_fullpath):
# If the destination exists, raise exception unless force is True
if not force:
raise OSError("File %s already exists, not forcefully moving %s" % (
new_fullpath, self.filename))
if same_partition(self.filename, new_dir):
if always_copy:
# Same partition, but forced to copy
copy_file(self.filename, new_fullpath)
else:
# Same partition, just rename the file to move it
rename_file(self.filename, new_fullpath)
# Leave a symlink behind if configured to do so
if leave_symlink:
symlink_file(new_fullpath, self.filename)
else:
# File is on different partition (different disc), copy it
copy_file(self.filename, new_fullpath)
if always_move:
# Forced to move file, we just trash old file
delete_file(self.filename)
# Leave a symlink behind if configured to do so
if leave_symlink:
symlink_file(new_fullpath, self.filename)
self.filename = new_fullpath
|
badbytes/pymeg
|
gui/gtk/data_editor.py
|
Python
|
gpl-3.0
| 30,723
| 0.01494
|
#!/usr/bin/python2
#!/usr/bin/env python
#
# Copyright 2010 dan collins <danc@badbytes.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
'''
Requires the following...
srate,timeaxes,data,chanlabels,
'''
import sys,os
from gtk import gdk
from numpy import * #fromstring, arange, int16, float, log10
from matplotlib import rcParams
from meg import nearest
from pylab import xticks,ion
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as \
FigureCanvas
import matplotlib.cm as
|
cm
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
#from meg import megcontour_gtk
from pdf2py import pdf, readwrite
from gui.gtk
|
import contour as contour_gtk
from gui.gtk import meg_assistant,event_process#,offset_correct
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
print("GTK Not Availible")
sys.exit(1)
class setup_gui:
def __init__(self):
self.builder = gtk.Builder()
self.builder.add_from_file(os.path.splitext(__file__)[0]+".glade")
self.window = self.builder.get_object("window")
dic = {
"on_toolbutton_refresh_clicked" : self.generate_testdata,
"on_button1_clicked" : self.generate_testdata,
"on_vboxMain_button_press_event" : self.button_press_event,
"on_vboxMain_button_release_event" : self.button_release_event,
"on_vboxMain_drag" : self.drag_begin,
"on_vboxMain_motion_notify_event" : self.drag_begin,
"on_toolbar_clear_clicked" : self.clear_selections,
"on_toolbar_zoomin_clicked" : self.zoomin_time,
"on_toolbar_zoomout_clicked" : self.zoomout_time,
"on_go_back_clicked" : self.page_back,
"on_go_forward_clicked" : self.page_forward,
"on_toolbutton_setup_toggled" : self.preferences_open,
"on_button_channel_apply_clicked" : self.channel_selection_apply,
"set_channel_groups" : self.set_channel_groups,
"showpopupmenu" : self.showpopupmenu,
"on_toolbar_plot_clicked" : self.plot_contour,
"on_plot_contour_activate" : self.plot_contour,
"on_button_delete_selection_clicked" : self.event_selection_delete,
"gtk_widget_hide" : self.hideinsteadofdelete,
"on_button_display_apply_clicked": self.display_apply,
"on_go_up_clicked" : self.page_up,
"on_go_down_clicked" : self.page_down,
"on_toolbutton_load_clicked" : self.load_data,
"on_menu_offset_correct_clicked" : self.offset_correct,
"on_button_epoch_clicked" : self.add_selections_to_event_process,
"on_store_event_clicked" : self.store_event,
"on_menu_save_noise_activate" : self.store_noise,
"on_menu_save_event_activate" : self.store_event,
"on_key_press_event" : self.key_press_event,
}
self.builder.connect_signals(dic)
try: self.prefs = readwrite.readdata(os.getenv('HOME')+'/.pymeg.pym')
except IOError: self.prefs = {}; readwrite.writedata(self.prefs, os.getenv('HOME')+'/.pymeg')
try:
self.line_r,self.line_g,self.line_b = self.prefs['LineColor'][0],self.prefs['LineColor'][1],self.prefs['LineColor'][2]
self.back_r,self.back_g,self.back_b = self.prefs['BackColor'][0],self.prefs['BackColor'][1],self.prefs['BackColor'][2]
except:
self.line_r,self.line_g,self.line_b = 1.,1.,1.
self.back_r,self.back_g,self.back_b = .9,.9,.9
self.color = (self.line_r,self.line_g,self.line_b)
self.create_draw_frame('none')
self.create_spec_frame('none')
self.create_csd_frame('none')
self.space = 0
#self.generate_testdata(None)
self.preferences_open(None)
def printtest(self,widget):
print 'something'
def store_noise(self,widget):
print widget,'wid',widget.get_parent().get_name()
self.callback(widget)
def store_event(self,widget):
print widget,'wid',widget.get_parent().get_name()
self.callback(widget)
def create_draw_frame(self,widget):
self.fig = Figure(figsize=[100,100], dpi=40)
self.canvas = FigureCanvas(self.fig)
self.canvas.connect("scroll_event", self.scroll_event)
self.canvas.connect("key-press-event", self.key_press_event)
#self.canvas.connect('button_press_event', self.button_press_event)
self.canvas.show()
self.figure = self.canvas.figure
self.axes = self.fig.add_axes([0.045, 0.05, 0.93, 0.925], \
axisbg=(self.back_r,self.back_g,self.back_b))
#axisbg='#FFFFCC')
self.vb = self.builder.get_object("vbox3")
self.vb.pack_start(self.canvas, gtk.TRUE, gtk.TRUE)
self.vb.show()
def create_spec_frame(self,widget):
self.specfig = Figure(figsize=[10,10], dpi=40)
self.specfig.text(0.25,0.5,'Middle Click Channel for Specgram',\
fontsize=20)
self.speccanvas = FigureCanvas(self.specfig)
self.speccanvas.show()
self.specfigure = self.speccanvas.figure
self.specaxes = self.specfig.add_axes([0.045, 0.05, 0.93, 0.925], \
axisbg=(self.back_r,self.back_g,self.back_b))
#self.specaxes.axis('off')
self.vb2 = self.builder.get_object("vbox8")
self.vb2.pack_end(self.speccanvas, gtk.TRUE, gtk.TRUE)
self.vb2.show()
def create_csd_frame(self,widget):
self.csdfig = Figure(figsize=[10,10], dpi=40)
self.csdfig.text(0.25,0.5,'Middle Click Channel for CSD',fontsize=20)
self.csdcanvas = FigureCanvas(self.csdfig)
self.csdcanvas.show()
self.csdfigure = self.csdcanvas.figure
self.csdaxes = self.csdfig.add_axes([0.045, 0.05, 0.93, 0.925], \
axisbg=(self.back_r,self.back_g,self.back_b))
#self.csdaxes.axis('off')
self.vb3 = self.builder.get_object("vbox9")
self.vb3.pack_end(self.csdcanvas, gtk.TRUE, gtk.TRUE)
self.vb3.show()
def data_loaded_setup(self):
self.channel_tree(None)
self.builder.get_object("spinbutton1").set_range(0,self.numchannels)
self.builder.get_object("spinbutton1").set_value(self.numchannels)
self.builder.get_object("spinbutton2").set_range(self.t[0],self.t[-1])
self.builder.get_object("spinbutton2").set_value(self.t[0])
self.builder.get_object("spinbutton3").set_range(self.t[0],self.t[-1])
#if self.t[-1] - self.t[0] > 1: #alot of time, save time in plotting and set low
if len(self.t) > 1000:
self.builder.get_object("spinbutton3").set_value(self.t[1000])
print '.....reducing time var'
else:
print '.....showing all time'
self.builder.get_object("spinbutton3").set_value(self.t[-1])
#self.builder.get_object("spinbutton3").set_value(self.t[-1])
#self.builder.get_object("spinbutton5").set_value(self.scalefact)
self.builder.get_object("entry1").set_text(str(self.space))
self.builder.get_object("entry2").set_text(str(self.scalefact))
def preferences_open(self,widget):
self.win_prefs = self.builder.get_object("window_prefs")
#try: self.prefs = readwrite.readdata(os.getenv('HOME')+'/.pymeg.pym')
#except IOError: self.prefs = {}; readwrite.writedata(self.prefs,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.