repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Lex0ne/trafaret_validator | tests/test_metaclass.py | Python | mit | 1,906 | 0 | from unittest import TestCase
import trafaret as t
from trafaret_validator import TrafaretValidator
class ValidatorForTest(TrafaretValidator):
t_value = t.Int()
value = 5 |
class ValidatorForTest2(ValidatorForTest):
test = t.String()
class TestMetaclass(TestCase):
def test_metaclass(self):
self.assertIsInstance(ValidatorForTest._validators, dict,
'Value should be instance of dict')
self.assertIn('t_value', ValidatorForTest._validators,
| 'Value should be in _validators')
self.assertNotIn('value', ValidatorForTest._validators,
'Value should not be in _validators')
self.assertIsInstance(ValidatorForTest._trafaret, t.Trafaret,
'Value should be instance of Trafaret')
self.assertFalse(ValidatorForTest._data,
'_data should be empty')
self.assertFalse(ValidatorForTest._errors,
'_data should be empty')
def test_inheritance(self):
self.assertIsInstance(ValidatorForTest2._validators, dict,
'Value should be instance of dict')
self.assertIn('t_value', ValidatorForTest2._validators,
'Value should be in _validators')
self.assertIn('test', ValidatorForTest2._validators,
'Value should be in _validators')
self.assertNotIn('value', ValidatorForTest2._validators,
'Value should not be in _validators')
self.assertIsInstance(ValidatorForTest2._trafaret, t.Trafaret,
'Value should be instance of Trafaret')
self.assertFalse(ValidatorForTest2._data,
'_data should be empty')
self.assertFalse(ValidatorForTest2._errors,
'_data should be empty')
|
adalmieres/scriptsIBMConnections | IBMConnectionsSocialGraph.py | Python | gpl-3.0 | 8,111 | 0.041435 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# dépendances
import requests
import xml.dom.minidom
import sys
import signal
import os
import getopt
from queue import Queue
from threading import Thread
import time
class SetQueue(Queue):
def _init(self, maxsize):
Queue._init(self, maxsize)
self.all_items = set()
def _put(self, item):
if item not in self.all_items:
Queue._put(self, item)
self.all_items.add(item)
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
sys.exit(0)
def usage():
"""usage de la ligne de commande"""
print ("usage : " + sys.argv[0] + "-h --help -s --server someurl.com -u --user login -p --password password")
def getAtomFeed(url, login, pwd):
# var
MAX_TRY = 10
essai = 0
# get atom document
while essai < MAX_TRY:
try:
r = requests.get('http://' + url, auth=(login,pwd), timeout=10)
except:
essai += 1
continue
break
else:
raise ('Erreur lors de la requête')
# parse atom document
try:
dom = xml.dom.minidom.parseString(r.text)
except:
raise ('Erreur lors du parsing du document Atom')
return dom
def getManagerInfo(atomFeed):
try:
entries = atomFeed.getElementsByTagName('entry')[1]
except:
return None
try:
managerId = entries.getElementsByTagName('snx:userid')[0]
return managerId.firstChild.data
except:
return None
def buildUrlSearchList(server, login, pwd, q):
# var
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
#alphabet = ['a']
for i in alphabet:
url = server + '/profiles/atom/search.do?search=' + i + '*&ps=250'
dom = getAtomFeed(url, lo | gin, pwd)
totalResult = dom.getElementsByT | agName('opensearch:totalResults')[0]
totalResult = int(totalResult.firstChild.data)
if totalResult > 250:
nbPage = int(float(totalResult) / 250) + 1
for n in range(1,nbPage,1):
item = url + "&page=" + str(n)
q.put(item)
else:
nbPage = 1
q.put(url)
def getUserIdsWorker(login, pwd, qin, qout):
while True:
url = qin.get()
if url == None:
break
qin.task_done()
try:
dom = getAtomFeed(url, login, pwd)
except:
continue
userIds = dom.getElementsByTagName('snx:userid')
for index, item, in enumerate(userIds):
qout.put(item.firstChild.data)
def getRelationsWorker(server, login, pwd, qin, qout, getManager, qmgmt):
while True:
userid = qin.get()
if userid == None:
break
qin.task_done()
url = server + '/profiles/atom/connections.do?userid=' + userid + '&connectionType=colleague&ps=250'
try:
dom = getAtomFeed(url, login, pwd)
except:
continue
feed = dom.firstChild
entries = feed.getElementsByTagName('entry')
for entry in entries:
# get date
dateRelation = entry.getElementsByTagName('updated')[0]
dateRelation = dateRelation.firstChild.data
dateRelation = dateRelation[:10]
# get author user id
author = entry.getElementsByTagName('author')[0]
try:
authorName = author.getElementsByTagName('name')[0]
authorName = authorName.firstChild.data
except:
authorName = ""
try:
authorEMail = author.getElementsByTagName('email')[0]
authorEMail = authorEMail.firstChild.data
except:
authorEMail = ""
authorUserId = author.getElementsByTagName('snx:userid')[0]
authorUserId = authorUserId.firstChild.data
# get contributor user id
contributor = entry.getElementsByTagName('contributor')[0]
try:
contribName = contributor.getElementsByTagName('name')[0]
contribName = contribName.firstChild.data
except:
contribName = ""
try:
contribEMail = contributor.getElementsByTagName('email')[0]
contribEMail = contribEMail.firstChild.data
except:
contribEMail = ""
contribUserId = contributor.getElementsByTagName('snx:userid')[0]
contribUserId = contribUserId.firstChild.data
# build dict
authorInfo = { "userid" : authorUserId, "name" : authorName, "email" : authorEMail }
contribInfo = { "userid" : contribUserId, "name" : contribName, "email" : contribEMail }
relation = "\"" + authorUserId + "\",\"" + contribUserId + "\",\"<(" + str(dateRelation) + ",Infinity)>\""
qout.put(authorInfo)
qout.put(contribInfo)
qout.put(relation)
# get manager
if getManager == True:
url = server + "/profiles/atom/reportingChain.do?userid=" + userid
rc = getAtomFeed(url, login, pwd)
managerId = getManagerInfo(rc)
if managerId is not None:
reportingChain = str(userid) + "," + str(managerId)
qmgmt.put(reportingChain)
def printStatusThread(q0, q1, q2, q3):
strtime = time.time()
while True:
sys.stdout.write('\r\x1b[K')
sys.stdout.write("urls:" + str(q0.qsize()) + " | ")
sys.stdout.write("userids:" + str(q1.qsize()) + " | ")
sys.stdout.write("user infos:" + str(q2.qsize()) + " | ")
sys.stdout.write("manager infos:" + str(q3.qsize()))
sys.stdout.flush()
time.sleep(1)
def writeFileThread(usersFilename, relationsFilename, qin):
# file for user details
u = open(usersFilename + ".csv", "w")
u.write("Id,Label,eMail\n")
# file for relations
r = open(relationsFilename + ".csv", "w")
r.write("Source,Target,Time Interval\n")
doneUsers = []
while True:
data = qin.get()
if data == None:
u.flush()
r.flush()
u.close()
r.close()
break
# write data
if type(data) is dict:
string = str(data["userid"]) + ',' + str(data["name"]) + ',' + str(data["email"])
if string not in doneUsers:
u.write(string + "\n")
doneUsers.append(string)
elif type(data) is str:
r.write(str(data) + "\n")
qin.task_done()
def writeManagerFileThread(managerFilename, qin):
m = open(managerFilename + ".csv", "w")
m.write("Source,Target\n")
while True:
data = qin.get()
if data == None:
break
m.write(str(data) + "\n")
qin.task_done()
def main(argv):
# global
serverUrl = ""
login = ""
pwd = ""
getManager = False
urlQueue = SetQueue(maxsize=5000)
userIdsQueue = SetQueue(maxsize=5000)
userInfosQueue = Queue(maxsize=5000)
userManagerQueue = Queue(maxsize=5000)
# signal handler
signal.signal(signal.SIGINT, signal_handler)
# retrive arguments
try:
opts, args = getopt.getopt(argv, "hs:u:p:m", ["help", "server=", "user=", "password=", "manager"])
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--server"):
serverUrl = arg
elif opt in ("-u", "--user"):
login = arg
elif opt in ("-p", "--password"):
pwd = arg
elif opt in ("-m", "--manager"):
getManager = True
except:
usage()
sys.exit()
# threading get userinfo worker
userIdWorker = []
for i in range(10):
w1 = Thread(target=getUserIdsWorker, args=(login, pwd, urlQueue, userIdsQueue,))
w1.setDaemon(True)
w1.start()
userIdWorker.append(w1)
# threading get relations worker
userInfoWorker = []
for i in range(20):
w2 = Thread(target=getRelationsWorker, args=(serverUrl, login, pwd, userIdsQueue, userInfosQueue, getManager, userManagerQueue,))
w2.setDaemon(True)
w2.start()
userInfoWorker.append(w2)
# thread to print size of queue
w3 = Thread(target=printStatusThread, args=(urlQueue, userIdsQueue, userInfosQueue, userManagerQueue,))
w3.setDaemon(True)
w3.start()
# thread to write files
w4 = Thread(target=writeFileThread, args=("users", "relations", userInfosQueue,))
w4.setDaemon(True)
w4.start()
if getManager == True:
w5 = Thread(target=writeManagerFileThread, args=("manager", userManagerQueue,))
w5.setDaemon(True)
w5.start()
# build Queue url list
MAX_TRY = 10
essai = 0
while essai < MAX_TRY:
try:
buildUrlSearchList(serverUrl, login, pwd, urlQueue)
except KeyboardInterrupt:
break
except:
essai += 1
continue
break
while not (urlQueue.empty() and userIdsQueue.empty() and userInfosQueue.empty()):
pass
print ("end threads")
urlQueue.put(None)
userIdsQueue.put(None)
userInfosQueue.put(None)
# end of workers
for i in userIdWorker:
i.join()
for i in userInfoWorker:
i.join()
time.sleep(5)
sys.exit(0)
if __name__ == '__main__':
main(sys.argv[1:])
|
sparkslabs/kamaelia_ | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Support/Data/tests/test_Escape.py | Python | apache-2.0 | 7,515 | 0.006121 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may no | t use this file except in compliance with the License.
# You may obtain a copy of the License a | t
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import Kamaelia.Support.Data.Escape as Escape
class Escape_tests(unittest.TestCase):
def test_escape_emptyString(self):
message = ""
expectResult = message
result = Escape.escape(message)
self.assertEqual(expectResult, result)
def test_escape_nonEmptyStringNoEscapeNeeded(self):
message = "XXXXXX"
expectResult = message
result = Escape.escape(message)
self.assertEqual(expectResult, result)
def test_escape_nonEmptyString_EscapePercent(self):
message = "XXX%XXX"
expectResult = "XXX%25XXX"
result = Escape.escape(message)
self.assertEqual(expectResult, result)
def test_escape_LongString_ManyEscapePercents(self):
message = "XXX%XXXXXXXXXXXXX%XXXXXXXXXXXXXXXX%XXXXXXXXXXXXXXXXXXX%XXXXXXXXXXXXXXXXX%XXXXXXXXXXXXXX"
expectResult = "XXX%25XXXXXXXXXXXXX%25XXXXXXXXXXXXXXXX%25XXXXXXXXXXXXXXXXXXX%25XXXXXXXXXXXXXXXXX%25XXXXXXXXXXXXXX"
result = Escape.escape(message)
self.assertEqual(expectResult, result)
def test_escape_LongString_EscapeSubStr(self):
message = "XXXXhelloXXXX"
expectResult = "XXXX%68%65%6c%6c%6fXXXX"
escape_string = "hello"
result = Escape.escape(message,escape_string)
self.assertEqual(expectResult, result)
def test_escape_LongString_EscapeSubStr_MixedPercents(self):
message = "X%X%X%XhelloX%X%X%X"
expectResult = "X%25X%25X%25X%68%65%6c%6c%6fX%25X%25X%25X"
escape_string = "hello"
result = Escape.escape(message,escape_string)
self.assertEqual(expectResult, result)
def test_escape_LongString_EscapeSubStr_MixedPercents_ButtingUp(self):
message = "X%X%helloX%XhelloX%X%X%X"
escape_string = "hello"
expectResult = "X%25X%25%68%65%6c%6c%6fX%25X%68%65%6c%6c%6fX%25X%25X%25X"
result = Escape.escape(message,escape_string)
self.assertEqual(expectResult, result)
def test_escape_LongString_EscapeSubStr_PartialMatching(self):
# We should not be able to find the escaped string earlier than it
# was inserted into an escaped sequence.
messages = [ " x", " x"]
escape_string = "xxxx"
encoded = [ Escape.escape(message,escape_string) for message in messages ]
joined = escape_string + escape_string.join(encoded)
self.assertEqual(joined.find(escape_string),0)
self.assert_(joined.find(escape_string,1)>7)
def test_escape_AllChars(self):
"""All possible chars can be escaped, without interfering with eachother."""
escape_string = "".join([ chr(c) for c in range(0,256) ])
message = "BLURBLE".join([ chr(c) for c in range(0,256) ]) + "BLURBLE"
expected = "BLURBLE" + \
"%00%01%02%03%04%05%06%07%08%09%0a%0b%0c%0d%0e%0f" + \
"%10%11%12%13%14%15%16%17%18%19%1a%1b%1c%1d%1e%1f" + \
"%20%21%22%23%24%25%26%27%28%29%2a%2b%2c%2d%2e%2f" + \
"%30%31%32%33%34%35%36%37%38%39%3a%3b%3c%3d%3e%3f" + \
"%40%41%42%43%44%45%46%47%48%49%4a%4b%4c%4d%4e%4f" + \
"%50%51%52%53%54%55%56%57%58%59%5a%5b%5c%5d%5e%5f" + \
"%60%61%62%63%64%65%66%67%68%69%6a%6b%6c%6d%6e%6f" + \
"%70%71%72%73%74%75%76%77%78%79%7a%7b%7c%7d%7e%7f" + \
"%80%81%82%83%84%85%86%87%88%89%8a%8b%8c%8d%8e%8f" + \
"%90%91%92%93%94%95%96%97%98%99%9a%9b%9c%9d%9e%9f" + \
"%a0%a1%a2%a3%a4%a5%a6%a7%a8%a9%aa%ab%ac%ad%ae%af" + \
"%b0%b1%b2%b3%b4%b5%b6%b7%b8%b9%ba%bb%bc%bd%be%bf" + \
"%c0%c1%c2%c3%c4%c5%c6%c7%c8%c9%ca%cb%cc%cd%ce%cf" + \
"%d0%d1%d2%d3%d4%d5%d6%d7%d8%d9%da%db%dc%dd%de%df" + \
"%e0%e1%e2%e3%e4%e5%e6%e7%e8%e9%ea%eb%ec%ed%ee%ef" + \
"%f0%f1%f2%f3%f4%f5%f6%f7%f8%f9%fa%fb%fc%fd%fe%ff" + "BLURBLE"
# expected = "BLURBLE".join([ "%" + hex(c).zfill(2) for c in range(0,256) ])
encoded = Escape.escape(message, escape_string)
self.assertEqual(encoded,expected)
class Unescape_tests(unittest.TestCase):
def test_unescape_emptyString(self):
message = ""
expectResult = message
result = Escape.unescape(message)
self.assertEqual(expectResult, result)
def test_unescape_nonEmptyStringNoEscapeNeeded(self):
message = "XXXXXX"
expectResult = message
result = Escape.unescape(message)
self.assertEqual(expectResult, result)
def test_unescape_nonEmptyString_UnEscapePercent(self):
message = "XXX%25XXX"
expectResult = "XXX%XXX"
result = Escape.unescape(message)
self.assertEqual(expectResult, result)
def test_unescape_LongString_ManyUnEscapePercents(self):
message = "XXX%25XXXXXXXXXXXXX%25XXXXXXXXXXXXXXXX%25XXXXXXXXXXXXXXXXXXX%25XXXXXXXXXXXXXXXXX%25XXXXXXXXXXXXXX"
expectResult = "XXX%XXXXXXXXXXXXX%XXXXXXXXXXXXXXXX%XXXXXXXXXXXXXXXXXXX%XXXXXXXXXXXXXXXXX%XXXXXXXXXXXXXX"
result = Escape.unescape(message)
self.assertEqual(expectResult, result)
def test_unescape_LongString_UnEscapeSubStr(self):
message = "XXXX%68%65%6c%6c%6fXXXX"
expectResult = "XXXXhelloXXXX"
escape_string = "hello"
result = Escape.unescape(message,escape_string)
self.assertEqual(expectResult, result)
def test_unescape_LongString_UnEscapeSubStr_MixedPercents(self):
message = "X%25X%25X%25X%68%65%6c%6c%6fX%25X%25X%25X"
expectResult = "X%X%X%XhelloX%X%X%X"
escape_string = "hello"
result = Escape.unescape(message,escape_string)
self.assertEqual(expectResult, result)
def test_unescape_LongString_UnEscapeSubStr_MixedPercents_ButtingUp(self):
message = "X%25X%25%68%65%6c%6c%6fX%25X%68%65%6c%6c%6fX%25X%25X%25X"
expectResult = "X%X%helloX%XhelloX%X%X%X"
escape_string = "hello"
result = Escape.unescape(message,escape_string)
self.assertEqual(expectResult, result)
def test_escape_LongString_UnEscapeSubStr_PartialMatch(self):
# We should not be able to find the escaped string earlier than it
# was inserted into an escaped sequence.
messages = [ " x", " x"]
escape_string = "xxxx"
encoded = [ Escape.escape(message,escape_string) for message in messages ]
decoded = [ Escape.unescape(message,escape_string) for message in encoded ]
self.assertEqual(messages, decoded)
if __name__=="__main__":
unittest.main()
# RELEASE: MH, MPS
|
alephdata/ingestors | ingestors/support/package.py | Python | mit | 1,257 | 0 | import shutil
import logging
from followthemoney import model
from ingestors.support.temp import TempFileSupport
from ingestors.support.encoding import EncodingSupport
from ingestors.directory import DirectoryIngestor
log = logging.getLogger(__name__)
class PackageSupport(TempFileSupport, EncodingSupport):
def ensure_path(self, base_dir, name, encoding="utf-8"):
if isinstance(name, bytes):
name = name.decode(encoding, "ignore")
return self.make_work_file(name, prefix=base_dir)
def extract_member(self, base_dir, name, fh, encoding):
out_path = self.ensure_path(base_dir, name, encoding=encoding)
if out_path is | None or fh is None:
return
file_name = out_path.name
try:
log.debug("Unpack: %s", file_name)
with open(out_path, "wb") as out_fh:
shutil.copyfileobj(fh, out_fh)
finally:
fh.close()
def ingest(self, file_path, entity):
entity.schema = model.get("Package")
temp_dir = self.make_empty_directory()
self.unpack | (file_path, entity, temp_dir)
self.manager.delegate(DirectoryIngestor, temp_dir, entity)
def unpack(self, file_path, entity, temp_dir):
pass
|
hellais/luigi | test/central_planner_test.py | Python | apache-2.0 | 28,970 | 0.001277 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from helpers import unittest
import luigi.notifications
from luigi.scheduler import DISABLED, DONE, FAILED, CentralPlannerScheduler
luigi.notifications.DEBUG = True
WORKER = 'myworker'
class CentralPlannerTest(unittest.TestCase):
def setUp(self):
super(CentralPlannerTest, self).setUp()
conf = self.get_scheduler_config()
self.sch = CentralPlannerScheduler(**conf)
self.time = time.time
def get_scheduler_config(self):
return {
'retry_delay': 100,
'remove_delay': 1000,
'worker_disconnect_delay': 10,
'disable_persist': 10,
'disable_window': 10,
'disable_failures': 3,
}
def tearDown(self):
super(CentralPlannerTest, self).tearDown()
if time.time != self.time:
time.time = self.time
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', status=DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'B')
self.sch.add_task(WORKER, 'B', status=DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_failed_dep(self):
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None) # can still wait and retry: TODO: do we want this?
self.sch.add_task(WORKER, 'A', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'B')
self.sch.add_task(WORKER, 'B', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_broken_dep(self):
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'A', runnable=False)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None) # can still wait and retry: TODO: do we want this?
self.sch.add_task(WORKER, 'A', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'B')
self.sch.add_task(WORKER, 'B', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_two_workers(self):
# Worker X wants to build A -> B
# Worker Y wants to build A -> C
self.sch.add_task(worker='X', task_id='A')
self.sch.add_task(worker='Y', task_id='A')
self.sch.add_task(task_id='B', deps=('A',), worker='X')
self.sch.add_task(task_id='C', deps=('A',), worker='Y')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], None) # Worker Y is pending on A to be done
self.sch.add_task(worker='X', task_id='A', status=DONE)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'C')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'B')
def test_retry(self):
# Try to build A but fails, will retry after 100s
self.setTime(0)
self.sch.add_task(WORKE | R, 'A')
| self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', FAILED)
for t in range(100):
self.setTime(t)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
self.sch.ping(WORKER)
if t % 10 == 0:
self.sch.prune()
self.setTime(101)
self.sch.prune()
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
def test_disconnect_running(self):
# X and Y wants to run A.
# X starts but does not report back. Y does.
# After some timeout, Y will build it instead
self.setTime(0)
self.sch.add_task(task_id='A', worker='X')
self.sch.add_task(task_id='A', worker='Y')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
for t in range(200):
self.setTime(t)
self.sch.ping(worker='Y')
if t % 10 == 0:
self.sch.prune()
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'A')
def test_remove_dep(self):
# X schedules A -> B, A is broken
# Y schedules C -> B: this should remove A as a dep of B
self.sch.add_task(task_id='A', worker='X', runnable=False)
self.sch.add_task(task_id='B', deps=('A',), worker='X')
# X can't build anything
self.assertEqual(self.sch.get_work(worker='X')['task_id'], None)
self.sch.add_task(task_id='B', deps=('C',), worker='Y') # should reset dependencies for A
self.sch.add_task(task_id='C', worker='Y', status=DONE)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'B')
def test_timeout(self):
# A bug that was earlier present when restarting the same flow
self.setTime(0)
self.sch.add_task(task_id='A', worker='X')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.setTime(10000)
self.sch.add_task(task_id='A', worker='Y') # Will timeout X but not schedule A for removal
for i in range(2000):
self.setTime(10000 + i)
self.sch.ping(worker='Y')
self.sch.add_task(task_id='A', status=DONE, worker='Y') # This used to raise an exception since A was removed
def test_disallowed_state_changes(self):
# Test that we can not schedule an already running task
t = 'A'
self.sch.add_task(task_id=t, worker='X')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], t)
self.sch.add_task(task_id=t, worker='Y')
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], None)
def test_two_worker_info(self):
# Make sure the scheduler returns info that some other worker is running task A
self.sch.add_task(worker='X', task_id='A')
self.sch.add_task(worker='Y', task_id='A')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
r = self.sch.get_work(worker='Y')
self.assertEqual(r['task_id'], None) # Worker Y is pending on A to be done
s = r['running_tasks'][0]
self.assertEqual(s['task_id'], 'A')
self.assertEqual(s['worker'], 'X')
def test_assistant_get_work(self):
self.sch.add_task(worker='X', task_id='A')
self.sch.add_worker('Y', [])
self.assertEqual(self.sch.get_work('Y', assistant=True)['task_id'], 'A')
# check that the scheduler recognizes tasks as running
running_tasks = self.sch.task_list('RUNNING', '')
self.assertEqual(len(running_tasks), 1)
self.assertEqual(list(running_tasks.keys()), ['A'])
self.assertEqual(running_tasks['A']['worker_running'], 'Y')
def test_assistant_get_work_external_task(self):
self.sch.add_task('X', task_id='A', runnable=False)
self.assertTrue(self.sch.get_work('Y', assistant=True)['task_id'] is None)
def test_task_fails_when_assistant_dies(self):
self.setTime(0)
self.sch.add_task(worker='X', task_id='A')
self.sch.add_worker('Y', [])
self.assertEqual(self.sch.get_work('Y', assistant=True)['task_id'], 'A')
self.assertEqual(list(self.sch.task_list('RUNNING', '').keys()), ['A'])
# Y dies for 50 seconds, X st |
timwee/emacs-starter-kit-mr-flip-forked | vendor/rope/ropetest/builtinstest.py | Python | gpl-3.0 | 20,183 | 0.00104 | import unittest
from rope.base import pyobjects, builtins
from ropetest import testutils
class BuiltinTypesTest(unittest.TestCase):
def setUp(self):
super(BuiltinTypesTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
def tearDown(self):
testutils.remove_project(self.project)
super(BuiltinTypesTest, self).tearDown()
def test_simple_case(self):
self.mod.write('l = []\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
self.assertTrue('append' in pymod['l'].get_object())
def test_holding_type_information(self):
self.mod.write('class C(object):\n pass\nl = [C()]\na_var = l.pop()\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_get_items(self):
self.mod.write('class C(object):\n def __getitem__(self, i):\n return C()\n'
'c = C()\na_var = c[0]')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_get_items_for_lists(self):
self.mod.write('class C(object):\n pass\nl = [C()]\na_var = l[0]\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_get_items_from_slices(self):
self.mod.write('class C(object):\n pass\nl = [C()]\na_var = l[:].pop()\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_simple_for_loops(self):
self.mod.write('class C(object):\n pass\nl = [C()]\n'
'for c in l:\n a_var = c\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_definition_location_for_loop_variables(self):
self.mod.write('class C(object):\n pass\nl = [C()]\n'
'for c in l:\n pass\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_var = pymod['c']
self.assertEquals((pymod, 4), c_var.get_definition_location())
def test_simple_case_for_dicts(self):
self.mod.write('d = {}\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
self.assertTrue('get' in pymod['d'].get_object())
def test_get_item_for_dicts(self):
self.mod.write('class C(object):\n pass\nd = {1: C()}\na_var = d[1]\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_popping_dicts(self):
self.mod.write('class C(object):\n pass\nd = {1: C()}\na_var = d.pop(1)\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_getting_keys_from_dicts(self):
self.mod.write('class C1(object):\n pass\nclass C2(object):\n pass\n'
'd = {C1(): C2()}\nfor c in d.keys():\n a_var = c\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C1'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_getting_values_from_dicts(self):
self.mod.write('class C1(object):\n pass\nclass C2(object):\n pass\n'
'd = {C1(): C2()}\nfor c in d.values():\n a_var = c\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C2'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_getting_iterkeys_from_dicts(self):
self.mod.write('class C1(object):\n pass\nclass C2(ob | ject):\n pass\n'
'd = {C1(): C2()}\nfor c in d.keys():\n a_var = c\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C1'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_getting_itervalues_from_dicts(self):
self.mod.write('class C1(object) | :\n pass\nclass C2(object):\n pass\n'
'd = {C1(): C2()}\nfor c in d.values():\n a_var = c\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C2'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_using_copy_for_dicts(self):
self.mod.write('class C1(object):\n pass\nclass C2(object):\n pass\n'
'd = {C1(): C2()}\nfor c in d.copy():\n a_var = c\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C1'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_tuple_assignments_for_items(self):
self.mod.write('class C1(object):\n pass\nclass C2(object):\n pass\n'
'd = {C1(): C2()}\nkey, value = d.items()[0]\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c1_class = pymod['C1'].get_object()
c2_class = pymod['C2'].get_object()
key = pymod['key'].get_object()
value = pymod['value'].get_object()
self.assertEquals(c1_class, key.get_type())
self.assertEquals(c2_class, value.get_type())
def test_tuple_assignment_for_lists(self):
self.mod.write('class C(object):\n pass\nl = [C(), C()]\na, b = l\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C'].get_object()
a_var = pymod['a'].get_object()
b_var = pymod['b'].get_object()
self.assertEquals(c_class, a_var.get_type())
self.assertEquals(c_class, b_var.get_type())
def test_tuple_assignments_for_iteritems_in_fors(self):
self.mod.write('class C1(object):\n pass\nclass C2(object):\n pass\n'
'd = {C1(): C2()}\nfor x, y in d.items():\n a = x;\n b = y\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c1_class = pymod['C1'].get_object()
c2_class = pymod['C2'].get_object()
a_var = pymod['a'].get_object()
b_var = pymod['b'].get_object()
self.assertEquals(c1_class, a_var.get_type())
self.assertEquals(c2_class, b_var.get_type())
def test_simple_tuple_assignments(self):
self.mod.write('class C1(object):\n pass\nclass C2(object):\n pass\n'
'a, b = C1(), C2()\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c1_class = pymod['C1'].get_object()
c2_class = pymod['C2'].get_object()
a_var = pymod['a'].get_object()
b_var = pymod['b'].get_object()
self.assertEquals(c1_class, a_var.get_type())
self.assertEquals(c2_class, b_var.get_type())
def test_overriding_builtin_names(self):
self.mod.write('class C(object):\n pass\nlist = C\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
c_class = pymod['C'].get_object()
list_var = pymod['list'].get_object()
self.assertEquals(c_class, list_var)
def test_simple_builtin_scope_test(self):
self.mod.write('l = list()\n')
pymod = self.pycore.resource_to_pyobject(self.mod)
self.assertTrue('append' in pymod['l'].get_object())
def test_simple_sets(self):
|
improlabs/Banglish-Sentiment-Analysis | python3/senalysis-1.py | Python | gpl-3.0 | 530 | 0 | #!/usr/local/bin/python
from pyavrophonetic import avro
from textblob import TextBlob
from vade | rSentiment.vaderSentiment import SentimentIntensityAnalyzer
import sys
b = input("Enter Banglish :")
textBlobTrans = TextBlob(avro.parse(b))
c = str(textBlobTrans.translate(to='en'))
print(c)
analyze = TextBlob(c)
print('By TextBlob Sentiment analysis')
print(analyze.sentiment)
print('By vaderSentiment analyzer')
analyzer = Sent | imentIntensityAnalyzer()
vs = analyzer.polarity_scores(c)
print(str(vs))
|
theithec/bookhelper | bookhelper/tests/test_init.py | Python | mit | 741 | 0.006748 | from unittest import TestCase
# from unittest import mock
from mwclient import | Site
from bookhelper import ExistingBook
from . import PageMock, SiteMock, TESTBOOK1_TXT
#Site.login=mock.MagicMock(return_value=True)
class TestInit(TestCase):
def setUp(self):
self.site = SiteMock()
def test_book(self):
book = ExistingBook(self.site, "Testbook1", "live")
self.assertEqual(book.errors, [])
self.assertEqual(book.book_page | .friendly_title, "Testbook1")
self.assertEqual(book.book_page.text.strip(), TESTBOOK1_TXT)
self.assertEqual(book.toc[0].target,'Testbook1/Page1')
self.assertEqual(book.toc[1].target,'Testbook1/Page2')
self.assertEqual(book.toc[1].text,'Page2')
|
cgwire/zou | zou/app/blueprints/crud/search_filter.py | Python | agpl-3.0 | 370 | 0 | from zou.app.models.search_filter import Search | Filter
from .base import BaseModelResource, BaseModelsResource
class SearchFiltersResource(BaseModelsResource):
def __init__(self):
BaseModelsResource.__init__(self, SearchFilter)
class SearchFilterResource(BaseModelResource):
def _ | _init__(self):
BaseModelResource.__init__(self, SearchFilter)
|
jdthorpe/archiver | __main__.py | Python | mit | 13,106 | 0.011064 | # this is the interface for `python archiver`
import archiver
import appdirs
import os
import sys
import pickle
import json
from archiver.archiver import Archiver
from archiver.parser import parseArgs
args = parseArgs()
from edit import edit
# ==============================================
print args
# TODO: see http://stackoverflow.com/questions/13168083/python-raw-input-replacement-that-uses-a-configurable-text-editor
#-- import pdb
#-- pdb.set_trace()
# ---------------------------- | --------------------------------
# load the user data
# ------------------------------------------------------------
# get the user data directory
user_data_dir = appdirs.user_data_dir('FileArchiver', 'jdthorpe')
| if not os.path.exists(user_data_dir) :
os.makedirs(user_data_dir)
# LOAD THE INDEX NAMES AND ACTIVE INDEX
indexes_path = os.path.join(user_data_dir,'INDEXES.json')
if os.path.exists(indexes_path):
with open(indexes_path,'rb') as fh:
indexes = json.load(fh)
else:
indexes= {'active':None,'names':[]}
if not os.path.exists(user_data_dir):
os.makedirs(user_data_dir)
def dumpIndexes():
with open(indexes_path,'wb') as fh:
json.dump(indexes,fh)
# ------------------------------------------------------------
# ------------------------------------------------------------
def getActiveName():
# ACTIVE INDEX NUMER
activeIndex = indexes['active']
if activeIndex is None:
print "No active index. Use 'list -i' to list available indexies and 'use' to set an active index."
sys.exit()
# GET THE NAME OF THE INDEX
try:
activeIndexName = indexes['names'][indexes['active']]
except:
print "Invalid index number"
sys.exit()
return activeIndexName
# ------------------------------------------------------------
# READ-WRITE UTILITY FUNCTIONS
# ------------------------------------------------------------
# TODO: catch specific excepitons:
# except IOError:
# # no such file
# except ValueError as e:
# # invalid json file
def readSettings(name):
""" A utility function which loads the index settings from file
"""
try:
with open(os.path.join(user_data_dir,name+".settings"),'rb') as fh:
settings = json.load(fh)
except Exception as e:
print "Error reading index settings"
import pdb
pdb.set_trace()
sys.exit()
return settings
def readData(name):
""" A utility function which loads the index data from file
"""
try:
with open(os.path.join(user_data_dir,name+".data"),'rb') as fh: data = pickle.load(fh)
except Exception as e:
print "Error reading index data"
import pdb
pdb.set_trace()
sys.exit()
return data
def dumpSettings(settings,name):
""" A utility function which saves the index settings to file
"""
try:
with open(os.path.join(user_data_dir,name+".settings"),'wb') as fh:
json.dump(settings,fh)
except Exception as e:
print "Error writing index settings"
import pdb
pdb.set_trace()
sys.exit()
def dumpData(data,name):
""" A utility function which saves the index settings to file
"""
try:
with open(os.path.join(user_data_dir,name+".data"),'wb') as fh:
pickle.dump(data,fh)
except:
print "Error writing index data"
import pdb
pdb.set_trace()
sys.exit()
# ------------------------------------------------------------
# ------------------------------------------------------------
if args.command == 'add':
activeName = getActiveName()
settings = readSettings(activeName)
if args.source is not None:
source = os.path.abspath(args.source)
if not os.path.exists(source):
print 'WARNING: no such directory "%s"'%(source)
elif not os.path.isdir(source):
print 'ERROR: "%s" is not a directory'%(source)
sys.exit()
print 'Adding source directory: %s'%(source)
if not any(samefile(source,f) for f in settings['sourceDirectories']):
settings['sourceDirectories'].append(source)
elif args.exclusions is not None:
import re
try:
re.compile(args.exclusion)
except re.error:
print 'Invalid regular expression "%s"'%(args.exclusion)
sys.exit()
if args.noic:
settings['directoryExclusionPatterns'].append(args.exclusion)
else:
settings['directoryExclusionPatterns'].append((args.exclusion,2)) # re.I == 2
elif args.archive is not None:
raise NotImplementedError
if settings['archiveDirectory'] is not None:
print "Archive path has already been set use 'remove' to delete the archive path before setting a new archive path"
archiveDirectory = os.path.abspath(args.archive)
if not os.path.exists(archiveDirectory):
if args.create :
os.makedirs(archiveDirectory)
else:
print 'ERROR: no such directory "%s"'%(archiveDirectory)
sys.exit()
elif not os.path.isdir(archiveDirectory):
print '"%s" is not a directory'%(archiveDirectory)
sys.exit()
print 'Setting archive directory to: %s'%(archiveDirectory)
settings['archiveDirectory'] = args.archive
else:
raise NotImplementedError
print 'Error in Arg Parser'
sys.exit()
dumpSettings(settings,activeName)
elif args.command == 'list':
if args.sources:
for f in readSettings(getActiveName())['sourceDirectories']:
print f
elif args.exclusions:
for f in readSettings(getActiveName())['directoryExclusionPatterns']:
print f
elif args.archive:
print readSettings(getActiveName())['archiveDirectory']
elif args.files:
archiver = Archiver()
archiver.data = readData(getActiveName())
for f in archiver:
print f
elif args.indexes:
print 'Active Index: %s (*)'%(getActiveName())
print 'Index Names: '
for i,name in enumerate(indexes['names']):
print ' %s %i: %s'%(
(' ','*')[(i == indexes['active'])+0],
i+1,
name,
)
else:
print 'Error in Arg Parser'
elif args.command == 'remove':
activeName = getActiveName()
settings = readSettings(activeName)
if args.source is not None:
if not (1 <= args.source <= len(settings['sourceDirectories'])):
print 'Invalid index %i'%(args.source)
del settings['sourceDirectories'][args.source - 1]
elif args.exclusion is not None:
raise NotImplementedError
if not (1 <= args.exclusion <= len(settings['directoryExclusionPatterns'])):
print 'Invalid index %i'%(args.exclusion)
del settings['directoryExclusionPatterns'][args.exclusion - 1]
elif args.archive is not None:
raise NotImplementedError
settings['archiveDirectory'] = None
else:
raise NotImplementedError
print 'Error in Arg Parser'
sys.exit()
dumpSettings(settings,activeName)
elif args.command == 'update':
activeName = getActiveName()
settings = readSettings(activeName)
if not len(settings['sourceDirectories']):
print "Error: no source directories in the active index. Please add a source directory via 'add -s'"
archiver = Archiver(
settings = readSettings(activeName),
data = readData(activeName))
archiver.update()
dumpSettings(archiver.settings,activeName)
dumpData(archiver.data,activeName)
elif args.command == 'clean':
raise NotImplementedError
activeName = getActiveName()
archiver = Archiver(
settings = readSettings(activeName),
data = readData(activeName))
archiver.clean()
dumpSettings(archiver.settings,activeName)
dumpData(archiver.data,activeName)
elif args.command == 'copy': |
elastic/elasticsearch-py | elasticsearch/_sync/client/xpack.py | Python | apache-2.0 | 4,223 | 0.00071 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import typing as t
from elastic_transport import ObjectApiResponse
from ._base import NamespacedClient
from .utils import _rewrite_parameters
class XPackClient(NamespacedClient):
def __getattr__(self, attr_name: str) -> t.Any:
return getattr(self.client, attr_name)
# AUTO-GENERATED-API-DEFINITIONS #
@_rewrite_parameters()
def info(
self,
*,
accept_enterprise: t.Optional[bool] = None,
categories: t.Optional[t.Union[t.List[str], t.Tuple[str, ...]]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[
t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]
] = None,
human: t.Optional[bool] = None,
pretty: t.Optional[bool] = None,
) -> ObjectApiResponse[t.Any]:
"""
Retrieves information about the installed X-Pack features.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/info-api.html>`_
:param accept_enterprise: If this param is used it must be set to true
:param categories: A comma-separated list of the information categories to include
in the response. For example, `build,license,features`.
"""
__path = "/_xpack"
__query: t.Dict[str, t.Any] = {}
if accept_enterprise is not None:
__query["accept_enterprise"] = accept_enterprise
if categories is not None:
__query["categories"] = categories
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
__headers = {"accept": "application/json"}
return self.perform_request( # type: ignore[return-value]
"GET", __path, params=__query, headers=__headers
)
@_rewrite_parameters()
def usage(
self,
*,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[
t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]
] = None,
human: t.Optional[bool] = None,
master_timeout: t.Optional[t.Union[int, str]] = None,
pretty: t.Optional[bool] = None,
) -> ObjectApiResponse[t.Any]:
"""
Retrie | ves usage information about the installed X-Pack features.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/usage-api.html>`_
| :param master_timeout: Period to wait for a connection to the master node. If
no response is received before the timeout expires, the request fails and
returns an error.
"""
__path = "/_xpack/usage"
__query: t.Dict[str, t.Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
__headers = {"accept": "application/json"}
return self.perform_request( # type: ignore[return-value]
"GET", __path, params=__query, headers=__headers
)
|
loic/django | django/core/validators.py | Python | bsd-3-clause | 17,864 | 0.002183 | from __future__ import unicode_literals
import os
import re
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_text
from django.utils.functional import SimpleLazyObject
from django.utils.ipv6 import is_valid_ipv6_address
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
def _lazy_re_compile(regex, flags=0):
"""Lazily compile a regex with flags."""
def _compile():
# Compile the regex if it was not passed pre-compiled.
if isinstance(regex, six.string_types):
return re.compile(regex, flags)
else:
assert not flags, "flags must be empty if regex is passed pre-compiled"
return regex
return SimpleLazyObject(_compile)
@deconstructible
class RegexValidator(object):
regex = ''
message = _('Enter a valid value.')
code = 'invalid'
inverse_match = False
flags = 0
def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if inverse_match is not None:
self.inverse_match = inverse_match
if flags is not None:
self.flags = flags
if self.flags and not isinstance(self.regex, six.string_types):
raise TypeError("If the flags are set, regex must be a regular expression string.")
self.regex = _lazy_re_compile(self.regex, self.flags)
def __call__(self, value):
"""
Validates that the input matches the regular expression
if inverse_match is False, otherwise raises ValidationError.
"""
if not (self.inverse_match is not bool(self.regex.search(
force_text(value)))):
raise ValidationError(self.message, code=self.code)
def __eq__(self, other):
return (
isinstance(other, RegexValidator) and
self.regex.pattern == other.regex.pattern and
self.regex.flags == other.regex.flags and
(self.message == other.message) and
(self.code == other.code) and
(self.inverse_match == other.inverse_match)
)
def __ne__(self, other):
return not (self == other)
@deconstructible
class URLValidator(RegexValidator):
ul = '\u00a1-\uffff' # unicode letters range (must be a unicode string, not a raw string)
# IP patterns
ipv4_re = r'(?:25[0-5]|2[0-4]\d|[ | 0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}'
ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, | validated later)
# Host patterns
hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?'
# Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1
domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(?<!-))*'
tld_re = (
'\.' # dot
'(?!-)' # can't start with a dash
'(?:[a-z' + ul + '-]{2,63}' # domain label
'|xn--[a-z0-9]{1,59})' # or punycode label
'(?<!-)' # can't end with a dash
'\.?' # may have a trailing dot
)
host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)'
regex = _lazy_re_compile(
r'^(?:[a-z0-9\.\-\+]*)://' # scheme is validated separately
r'(?:\S+(?::\S*)?@)?' # user:pass authentication
r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')'
r'(?::\d{2,5})?' # port
r'(?:[/?#][^\s]*)?' # resource path
r'\Z', re.IGNORECASE)
message = _('Enter a valid URL.')
schemes = ['http', 'https', 'ftp', 'ftps']
def __init__(self, schemes=None, **kwargs):
super(URLValidator, self).__init__(**kwargs)
if schemes is not None:
self.schemes = schemes
def __call__(self, value):
value = force_text(value)
# Check first if the scheme is valid
scheme = value.split('://')[0].lower()
if scheme not in self.schemes:
raise ValidationError(self.message, code=self.code)
# Then check full URL
try:
super(URLValidator, self).__call__(value)
except ValidationError as e:
# Trivial case failed. Try for possible IDN domain
if value:
try:
scheme, netloc, path, query, fragment = urlsplit(value)
except ValueError: # for example, "Invalid IPv6 URL"
raise ValidationError(self.message, code=self.code)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
# Now verify IPv6 in the netloc part
host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc)
if host_match:
potential_ip = host_match.groups()[0]
try:
validate_ipv6_address(potential_ip)
except ValidationError:
raise ValidationError(self.message, code=self.code)
url = value
# The maximum length of a full host name is 253 characters per RFC 1034
# section 3.1. It's defined to be 255 bytes or less, but this includes
# one byte for the length of the name and one byte for the trailing dot
# that's used to indicate absolute names in DNS.
if len(urlsplit(value).netloc) > 253:
raise ValidationError(self.message, code=self.code)
integer_validator = RegexValidator(
_lazy_re_compile('^-?\d+\Z'),
message=_('Enter a valid integer.'),
code='invalid',
)
def validate_integer(value):
return integer_validator(value)
@deconstructible
class EmailValidator(object):
message = _('Enter a valid email address.')
code = 'invalid'
user_regex = _lazy_re_compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string
re.IGNORECASE)
domain_regex = _lazy_re_compile(
# max length for domain name labels is 63 characters per RFC 1034
r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z',
re.IGNORECASE)
literal_regex = _lazy_re_compile(
# literal form, ipv4 or ipv6 address (SMTP 4.1.3)
r'\[([A-f0-9:\.]+)\]\Z',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
value = force_text(value)
if not value or '@' not in value:
raise ValidationError(self.message, code=self.code)
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
raise ValidationError(self.message, code=self.code)
if (domain_part not in self.domain_whitelist and
not self.validate_domain_part(domain_part)):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
if self.validate_domain_part(domain_part):
return
except UnicodeError:
|
pshchelo/ironic | ironic/tests/unit/api/test_root.py | Python | apache-2.0 | 3,515 | 0 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic.api.controllers.v1 import versions
from ironic.tests.unit.api import base
class TestRoot(base.BaseApiTest):
def test_get_root(self):
response = self.get_json('/', path_prefix='')
# Check fields are not empty
[self.assertNotIn(f, ['', []]) for f in response]
self.assertEqual('OpenStack Ironic API', response['name'])
self.assertTrue(response['description'])
self.assertEqual([response['default_version']], response['versions'])
version1 = response['default_version']
self.assertEqual('v1', version1['id'])
self.assertEqual('CURRENT', version1['status'])
self.assertEqual(versions.MIN_VERSION_STRING, version1['min_version'])
self.assertEqual(versions.MAX_VERSION_STRING, version1['version'])
class TestV1Root(base.BaseApiTest):
def _test_get_root(self, headers=None, additional_expected_resources=None):
if headers is None:
headers = {}
if additional_expected_resources is None:
additional_expected_resources = []
data = self.get_json('/', headers=headers)
self.assertEqual('v1', data['id'])
# Check fields are not empty
for f in data:
self.assertNotIn(f, ['', []])
# Check if all known resources are present and there are no extra ones.
not_resources = ('id', 'links', 'media_types')
actual_resources = tuple(set(data.keys()) - set(not_resources))
expected_resources = (['chassis', 'drivers', 'nodes', 'ports'] +
additional_expected_resources)
self.assertEqual(sorted(expected_resources), sorted(actual_resources))
self.assertIn({'type': 'application/vnd.openstack.ironic.v1+json',
'base': 'application/json'}, data['media_types'])
def test_get_v1_root(self):
self._test_get_root()
def test_get_v1_22_root(self):
self._test_get_root(headers={'X-OpenStack-Ironic-API-Version': '1.22'},
additional_expected_resources=['heartbeat',
'lookup'])
def test_get_v1_23_root(self):
self._test_get_root(headers={'X-OpenStack-Ironic-API-Version': '1.23'},
additional_expected_resources=['heartbeat',
'lookup',
'portgroups'])
def test_get_v1_32_root(self):
self._test_get_root(headers={'X-OpenStack-Ir | onic-API-Version': '1.32'},
additional_expected_resources=['heartbeat',
'lookup',
'portgroups',
| 'volume'])
|
thomasbarillot/DAQ | eTOF/WetLab_NewportDelayStage.py | Python | mit | 1,437 | 0.020181 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 04 11:17:15 2016
@author: atto
Piezostage remote control (ie writingdelay into file read by humongium)
"""
import sys
# The CLR module provide functions for interacting with the underlying
# .NET runtime
import visa
import time
# Add reference to assembly and import names from namespace
import system
class DelayStage():
def __init__(self):
self.rm=visa.ResourceManager()
self.controller=self.rm.open_resource('GPIB0::1::INSTR', rea | d_termination='\r')
self.controller.timeout = 1000
| self.stagenum=1
def __del__(self):
self.controller.clear()
self.controller.close()
self.rm.close()
def MoveAbs(self,target):
#
cmd = str(self.stagenum)+'PA'+str(target)+';'+str(self.stagenum)+'WS\r'
status=self.controller.write(unicode(cmd,"utf-8"))
time.sleep(0.5)
return status
def MoveRel(self,step,pol):
if pol==1:
cmd = str(self.stagenum)+'PR+'+str(step)+';'+str(self.stagenum)+'WS\r'
elif pol==-1:
cmd = str(self.stagenum)+'PR-'+str(step)+';'+str(self.stagenum)+'WS\r'
status=self.controller.write(unicode(cmd,"utf-8"))
time.sleep(0.5)
return status
# def QueryMotion(self):
#
# return bool(self.controller.query_ascii_values('MD?'))
|
SuLab/PyBioC | src/bioc/bioc_collection.py | Python | bsd-2-clause | 1,302 | 0 | __all__ = ['BioCCollection']
from meta import _MetaInfons, _MetaIter
from compat import _Py2Next
class BioCCollection( | _Py2Next, _MetaInfons, _MetaIter):
def __init__(self, collection=None):
self.infons = dict()
self.source = ''
self.date = ''
self.key = ''
self.documents = list()
if collection is not None:
self.infons = collection.infons
self.source = collection.source
self.date = collection.date
self.key = collection.key
| self.documents = collection.documents
def __str__(self):
s = 'source: ' + self.source + '\n'
s += 'date: ' + self.date + '\n'
s += 'key: ' + self.key + '\n'
s += str(self.infons) + '\n'
s += str(self.documents) + '\n'
return s
def _iterdata(self):
return self.documents
def clear_documents(self):
self.documents = list()
def get_document(self, doc_idx):
return self.documents[doc_idx]
def add_document(self, document):
self.documents.append(document)
def remove_document(self, document):
if type(document) is int:
self.dcouments.remove(self.documents[document])
else:
self.documents.remove(document) # TBC
|
PMEAL/OpenPNM | openpnm/utils/__init__.py | Python | mit | 983 | 0.001017 | r"""
Utilities and helper classes/functions
==== | ==================================
This module contains two very important classes (Project and Workspace)
as well as a number of helper classes.
"""
import logging as logging
from .misc import *
from ._settings import *
from ._workspace import *
from ._project import *
# You can add info to the logger message by inserting the desired %(item)
# For a list | of available items see:
# https://docs.python.org/3/library/logging.html#logrecord-attributes
# NOTE: If the calling locations appears as 'root' it's because the logger
# was not given a name in a file somewhere. A good option is __name__.
log_format = \
'-' * 60 + '\n\
%(levelname)-11s: %(message)s \n\
SOURCE : %(name)s.%(funcName)s \n\
TIME STAMP : %(asctime)s\
\n' + '-' * 60
logging.basicConfig(level=logging.WARNING, format=log_format)
del log_format
def _get_version():
from openpnm.__version__ import __version__
return __version__.strip(".dev0")
|
jmcnamara/XlsxWriter | xlsxwriter/test/comparison/test_hyperlink28.py | Python | bsd-2-clause | 1,640 | 0 | ############################# | ##################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
| """
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('hyperlink28.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format = workbook.add_format({'hyperlink': True})
worksheet.write_url('A1', 'http://www.perl.org/', format)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_workbook_format(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format = workbook.get_default_url_format()
worksheet.write_url('A1', 'http://www.perl.org/', format)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_default_format(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_url('A1', 'http://www.perl.org/')
workbook.close()
self.assertExcelEqual()
|
ddietze/FSRStools | rraman/__init__.py | Python | gpl-3.0 | 36,173 | 0.003594 | """
.. module: FSRStools.rraman
:platform: Windows
.. moduleauthor:: Daniel Dietze <daniel.dietze@berkeley.edu>
Resonance Raman excitation profile calculation based on the time-domain picture of resonance Raman. See Myers and Mathies in *Biological Applications of Raman Spectroscopy*, Vol. 2, pp. 1-58 (John Wiley and Sons, New York, 1987) for details (referred to as Myers in the following). The code is mainly based on Myers' Fortran 77 code (see Appendix of PhD Thesis of K. M. Spillane, 2011, UC Berkeley for source code).
**Changelog:**
*10-7-2015:*
- Added / modified functions for calculating fluorescence spectra.
- Added a convenience function to calculate Raman spectra from a set of excitation profiles.
- Added some more damping functions and phenomenological support for Stokes shift in simple homogeneous damping function.
*10-21-2015:*
- Some bug fixes concerning the prefactors and the normalization of the fluorescence spectra.
- Fixed a bug regarding the Raman overlaps.
**Example Code**
Here is a short example calculating Myers' *Gedankenmolecule* from Myers and Mathies::
import numpy as np
import FSRStools.rraman as rr
# parameters:
# -----------
# displacements
D = np.array([1.27, 0.3, 0.7, 0.53])
# ground state frequencies
RMg = np.array([1550.0, 1300.0, 1150.0, 1000.0])
# excited state frequencies
RMe = np.array([1550.0, 1300.0, 1150.0, 1000.0])
# electronic zero-zero energy
E0 = 20700.0
# homogeneous linewidth and shape parameter
Gamma = 200.0
halpha = 0
# inhomogeneous linewidth and shape parameter
sig = 400.0
ialpha = 1
# electronic transition dipole length
M = 0.8
# index of refraction of surrounding medium
IOR = 1.0
# time axis parameters for integrations
tmax = 5000
dt = 0.2
# just calculate fundamentals
nquanta = np.identity(len(RMg))
sshift = np.dot(nquanta, RMg)
# calculation part
# ----------------
# create axes
t, wn = rr.getAxes(tmax, dt)
# zero-zero energy and damping
# add here all time domain stuff
TDpart = rr.getHomogeneousDamping(t, Gamma, halpha)
# time dependent overlap integrals
OVLPS = rr.getOverlaps(t, D, RMg, RMe, nquanta)
# calculate cross-sections
sigmaA, sigmaR, kF = rr.getCrossSections(t, wn, E0, OVLPS, sshift, M, IOR, TDpart, sig, ialpha)
..
This file is part of the FSRStools python module.
The FSRStools python module is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The FSRStools python module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the FSRStools python module. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014, 2015 Daniel Dietze <daniel.dietze@berkeley.edu>.
"""
import numpy as np
# some constants
hbar = 5308.880986 #: Planck's constant over 2 pi, hbar, in `cm-1 fs`
c0 = 2.99792458e-5 #: speed of light in `cm / fs`
kB = 0.695 #: Boltzman's constant in `cm-1 / K`
# -------------------------------------------------------------------------------------------------------------------
# some useful functions
def radperfs2wn(w):
"""Angular frequency (rad / fs) to wavenumber (cm-1).
"""
return hbar * w
def wn2radperfs(e):
"""Wavenumber (cm-1) to angular frequency (rad / fs).
"""
return e / hbar
def wn2lambda(w):
"""Convert wavenumber (cm-1) to wavelength (nm).
"""
return 1e7 / w
def lambda2wn(w):
"""Convert wavelength (nm) to wavenumber (cm-1).
"""
return 1e7 / w
def getWnIndex(wn, wn0):
"""Get the index into an array of wavenumbers wn with wavenumber closest to wn0. Use this function for :py:func:`getRamanSpectrum`.
"""
if np.amin(wn) > wn0 or np.amax(wn) < wn0:
print "Warning: wn0 lies outside of wn."
return np.argmin(np.absolute(wn - wn0))
def getAxes(tmax, dt):
"""Create time and frequency axes for the resonance Raman calculations.
:param float tmax: Endpoint for time domain calculation (fs). This value should be high enough to capture the full dephasing.
:param float dt: Increment of time axis (fs). This value should be small enough to capture the highest vibronic feature in the excited state.
:returns: Time axis (fs) and frequency axis (cm-1).
"""
t = np.arange(0, tmax + dt, dt)
numPoints = len(t)
wn = n | p.arange(numPoints) / (c0 * dt * numPoints)
return t, wn
|
def molarExtinction2AbsCS(eSpctr, IOR):
"""Convert molar extinction (cm-1 / M) to molecular absorption cross section (A**2 / molec).
See McHale, Resonance Raman Spectroscopy, Wiley, (2002), p. 545 or Myers & Mathies for details. The absorption cross section in solution has to be scaled by index of refraction unless the molar extinction has not been corrected.
:param array eSpctr: Extinction spectrum in (cm-1 / M).
:param float IOR: Index of refraction of surrounding solvent / medium.
:returns: Absorption spectrum in units of (A**2 / molec.), same shape as eSpcrt.
"""
return 1e3 * np.log(10.0) * eSpctr / 6.0221e23 * 1e8 * 1e8 / IOR
def diff2absRamanCS(diffRaCS, rho):
"""Convert the differential Raman cross section (A**2/molec sr) to absolute Raman cross section in (A**2 / molec) for a given depolarization ratio rho.
:param float diffRaCS: Differential Raman cross section (A**2/molec sr).
:param float rho: Associated depolarization ratio of this Raman mode.
:returns: Absolute Raman cross section in (A**2 / molec).
"""
return 8.0 * np.pi / 3.0 * (1.0 + 2.0 * rho) / (1.0 + rho) * diffRaCS
def getRamanSpectrum(wn, iEL, RMg, nquanta, sigmaR, dw=10.0, alpha=0):
"""
Convenience function to calculate the Raman spectrum. The spectrum is scattered power per infinitesimal frequency normalized to incident power times molecular density (cm-3) times path length (cm). See Myers, *Chem. Phys.* **180**, 215 (1994), Eq. 7 for details.
:param array wn: Wavenumber axis (Stokes shift, not electronic).
:param int iEL: Index into sigmaR corresponding to the pump energy of the laser.
:param array RMg: Ground state Raman frequencies
:param array nquanta: M x N array containing the quanta of the N possible Raman modes for the M Raman lines to calculate. Use :py:func:`numpy.identity` to just calculate the fundamentals. Possible values are 0, 1, 2.
:param array sigmaR: Array of M Raman cross sections that have been calculated by :py:func:`getCrossSections` (in A**2 / molec).
:param float dw: Phenomenological FWHM linewidth of the Raman lines in cm-1 (default = 10 cm-1).
:param float alpha: Line shape parameter to be used for the Raman spectrum:
- 1 = Gaussian
- 0 = Lorentzian (default)
:returns: Calculated Raman spectrum (same shape as wn).
"""
spectrum = np.zeros(len(wn))
if iEL < 0 or iEL >= len(sigmaR[0]):
print "Error: iEL is out of range!"
return spectrum
# iterate over all M modes
for i, nM in enumerate(nquanta):
# get frequency of this mode
wR = np.sum(nM * RMg)
# add Lorentzian part of lineshape
spectrum = spectrum + (1.0 - alpha) * sigmaR[i][iEL] * 1e-16 * (dw / (2.0 * np.pi * ((wn - wR)**2 + dw**2 / 4.0)))
# add Gaussian part of lineshape
spectrum = spectrum + alpha * sigmaR[i][iEL] * 1e-16 * ((2.0 * np.sqrt(np.log(2) / np.pi)) / dw * np.exp(-4.0 * np.log(2.0) * (wn - wR)**2 / dw**2))
return spectrum
# ------------------------------- |
ttm/indicadores-participativos | puxaTweets.py | Python | lgpl-3.0 | 5,174 | 0.024966 | #-*- coding: utf8 -*-
import pymongo, time as T, threading as t, sys
from twython import TwythonStreamer
from twython import Twython
from maccess import TW, mdc
HTAG=["#aao0","#arenaNETmundial","#Participabr"]
HTAG_=[i.replace("#","H") for i in HTAG]
class MyStreamer(TwythonStreamer):
def on_success(self, data):
if 'text' in data:
self.C.insert(data)
print data['text'].encode('utf-8')
def on_error(self, status_code, data):
print status_code
class AcompanhaTweet(t.Thread):
def __init__(self,hashtag="#aao0",connection_specs=TW[0],mongo_uri=mdc.u1,collection=None):
t.Thread.__init__(self)
self.hashtag=hashtag
if collection==None:
collection=hashtag.replace("#","HH")
self.collection=collection
self.mongo_collection =self.connectMongo(mongo_uri,collection)
self.twitter_connection=self.connectAPI(connection_specs)
self.cs=connection_specs
self._stop = t.Event()
print "iniciado", self.hashtag
def stop(self):
self._stop.set()
self._client.disconnect()
def connectMongo(self,mongo_uri,collection):
client=pymongo.MongoClient(mongo_uri)
self._client=client
db = client['sna']
self.mdb=db
C=db[collection]
print "conectado ao mongo", self.hashtag
return C
def connectAPI(self,cs):
c = Twython(app_key=cs.tak,
app_secret=cs.taks,
oauth_token=cs.tat,
oauth_token_secret=cs.tats)
print "conectado ao twitter", self.hashtag
return c
def run(self):
if self.mongo_collection.count(): # caso a coleção já exista
self.syncExisting()
else: # caso a coleção não exista
self.syncNonExisting()
self.startStreaming()
def startStreaming(self):
print "iniciando streaming", self.hashtag
cs=self.cs
stream=MyStreamer(cs.tak,cs.taks,cs.tat,cs.tats)
stream.C=self.mongo_collection
print "estabelecendo hashtag para o streaming", self.hashtag
stream.statuses.filter(track=self.hashtag)
def syncNonExisting(self):
tweets= self.twitter_connection.search(q=self.hashtag,count=100,result_type="recent")["statuses"][::-1]
self.ttweets=tweets
primeira= tweets[0]["id"] # mais antiga
ultima= tweets[-1]["id"]
print "sync no bd não existente", self.hashtag
self.theSync(primeira,ultima)
def syncExisting(self):
quantos=self.mongo_collection.count()
#self.query=query=self.mongo_collection.find()
self.query=query=self.mongo_collection.find({},{"id":1,"_id":0}).sort("id",pymongo.ASCENDING).limit(1)
primeira= query[0]["id"] # mais antiga
self.query=query=self.mongo_collection.find({},{"id":1,"_id":0}).sort("id",pymongo.DESCENDING).limit(1)
ultima= query[0]["id"]
#ultima= query[quantos-1]["id"]
print "sync no bd existente ", self.hashtag
self.theSync(primeira,ultima)
def theSync(self,primeira,ultima):
tweets_antes =self.todosDeAntes(max_id=primeira)
tweets_depois=self.todosDeDepois(since_id=ultima)
if len(tweets_antes):
if self.mongo_collection.count():
self.ttweets=[i for i in self.query]
tweets=tweets_antes+self.ttweets+tweets_depois
self.mdb.drop_collection(self.collection)
self.mongo_collection.insert(tweets)
elif len(tweets_depois):
self.mongo_c | ollection.insert(tweets_depois)
elif 'ttweets' in dir(self):
self.mongo_collection.insert(self.ttweets)
print "sync"
def todosDeDepois(self,si | nce_id):
tweets=[]
tweets_=self.buscaProgressiva(since_id=since_id)[::-1]
while len(tweets_):
tweets+=tweets_[::-1]
T.sleep(20)
tweets_=self.buscaProgressiva(since_id=tweets[-1]["id"])
print "puxados todos os tweets mais recentes ",self.hashtag
return tweets
def todosDeAntes(self,max_id):
tweets=[]
tweets_=self.buscaRetroativa(max_id=max_id)
while len(tweets_):
tweets+=tweets_
T.sleep(60)
tweets_=self.buscaRetroativa(max_id=tweets[-1]["id"])
print "puxados todos os tweets de antes ",self.hashtag
return tweets[::-1]
def buscaRetroativa(self,max_id):
tweets=self.twitter_connection.search(q=self.hashtag,count=100,max_id=max_id-1,result_type="recent")["statuses"]
print "puxado retroativo",len(tweets),self.hashtag
return tweets
def buscaProgressiva(self,since_id):
tweets = self.twitter_connection.search(q=self.hashtag,count=100,since_id=since_id,result_type="recent")["statuses"]
print "puxado progressivo",len(tweets),self.hashtag
return tweets
aa=AcompanhaTweet()
aa.start()
bb=AcompanhaTweet("#arenaNETmundial",TW[1])
bb.start()
cc=AcompanhaTweet("#Participabr",TW[2])
cc.start()
def texit():
global aa, bb, cc
#global cc
aa.stop()
bb.stop()
cc.stop()
|
NSLS-II-XPD/ipython_ophyd | archived/profile_collection-dev/startup/99-subtracted-tiff-exporter.py | Python | bsd-2-clause | 1,827 | 0.002189 | from time import sleep
from bluesky.callbacks.broker import LiveTiffExporter
from databroker import process
from bluesky import Msg
from bluesky.plans import DeltaScanPlan, DeltaListScanPlan
RE = gs.RE # an alias
def take_dark():
print('closing shutter...')
shctl1.put(0) # close shutter
sleep(2)
print('taking dark frame....')
uid, = RE(Count([pe1c]))
print('opening shutter...')
shctl1.put(1)
sleep(2)
return uid
def run(motor, x, start, stop, num_steps, loops, *, exposure=1, **metadata):
print('moving %s to initial position' % motor.name)
subs = [LiveTable(['pe1_stats1_total', motor.name]),
LivePlot('pe1_stats1_total', motor.name)]
motor.move(x)
pe1c.images_per_set.put(exposure // 0.1)
dark_uid = take_dark()
step | s = loops * list(np.linspace(start, stop, num=num_steps, endpoint=True))
plan = DeltaListScanPlan([pe1c], motor, steps)
uid = RE(plan, subs, dark_frame=dark_uid, **metadata)
sleep(3)
process(db[uid], exporter)
class SubtractedTiffExporter(LiveTiffExporter):
"Intercept image | s before saving and subtract dark image"
def start(self, doc):
# The metadata refers to the scan uid of the dark scan.
if 'dark_frame' not in doc:
raise ValueError("No dark_frame was recorded.")
uid = doc['dark_frame']
dark_header = db[uid]
self.dark_img, = get_images(db[uid], 'pe1_image')
super().start(doc)
def event(self, doc):
img = doc['data'][self.field]
subtracted_img = img - self.dark_img
doc['data'][self.field] = subtracted_img
super().event(doc)
template = "/home/xf28id1/xpdUser/tiff_base/LaB6_EPICS/{start.sa_name}_{start.scan_id}_step{event.seq_num}.tif"
exporter = SubtractedTiffExporter('pe1_image', template)
|
rodo/tsung-gis | build_scenario.py | Python | gpl-3.0 | 3,719 | 0.003227 | #!/usr/bin/env python3
"""
Generate all the xml files for each actions, the module are
defined in the src directory
Example :
build_directory.py -m map
"""
import os
import sys
from optparse import OptionParser
__version__ = "0.0.2"
def arg_parse():
""" Parse command line arguments """
arg_list = "-m module [-o OUTPUT_DIRFILE]"
usage = "Usage: %prog " + arg_list
parser = OptionParser(usage, version=__version__)
parser.add_option("-o", "--output_dir", dest="outputdir",
help="output directory",
default='.')
parser.add_option("-m", "--module", dest="module",
help="module",
action="append",
default=None)
parser.add_option("-f", "--force", dest="force",
help="overwrite files",
action="store_true",
default=False)
return parser.parse_args()[0]
def main(options):
move = ['north', 'west', 'south', 'east', 'first', 'random']
action = ['random']
zoom = ['less', 'more', 'random']
skeleton = """
<!--
Module : {0}
Action : {1}
-->
<setdynvars sourcetype="erlang" callback="{0}:{1}">
<var name="list_url" />
</setdynvars>
<foreach name="element" in="list_url">
<request subst="true">
<http url="/%%_element%%.png" method="GET" version="1.1"/>
</request>
</foreach>
"""
entity = '<!ENTITY {0}_{1}_{2} SYSTEM "{0}_{1}_{2}.xml">'
ents = []
for module in options.module:
for elmt in move:
render(options, module, 'move', elmt, skeleton)
ents.append(entity.format(module, 'move', elmt))
for elmt in zoom:
render(options, module, 'zoom', elmt, skeleton)
ents.append(entity.format(module, 'zoom', elmt))
for elmt in action:
render(options, module, 'action', elmt, skeleton)
ents.append(entity.format(module, 'action', elmt))
mainfile(options, ents)
def mainfile(options, ents):
"""Create or update the main file
"""
fpath = os.path.join(options.outputdir, "tsung.xml")
if not os.path.exists(fpath) or options.force:
print ("write {}".format(fpath))
write_file(fpath,
xmlheader().format('\n'.join(ents)))
else:
print ("{} exists, add -f to overwrite it".format(fpath))
def write_file(fpath, datas):
"""Write a file with datas
"""
with open(fpath, 'w') as f:
f.write(datas)
f.close()
def xmlheader():
"""xml header use to generate the main file
Return : (string)
"""
return """<?xml version="1.0"?>
<!DOCTYPE tsung SYSTEM "/usr/share/tsung/tsung-1.0.dtd"
[
{}
]
>
<tsung loglevel="notice" version="1.0">
<!-- Client side setup -->
<clients>
<client host="localhost" use_controller_vm="true"/>
</clients>
<!-- Server side setup -->
<servers>
<server host="myserver" port="80" type="tcp"></server>
</servers>
&options;
| &load;
<sessions>
<session name="init">
</session>
</sessions>
</tsung>"""
def render(options, module, action, subaction, skeleton):
"""Write an entity file
"""
name = '{}_{}_{}.xml'.format(module, action, subaction)
fpath = os.path.join(options.outputdir, name)
print ("write {}".format(fpath))
write_file(fpath, skeleton.format(module, name))
if __name__ == '__main__':
opts = arg_parse()
if not os.path.isdir(opts.outputdir):
prin | t ('{} directory does not exists'.format(opts.outputdir))
sys.exit(1)
if opts.module is not None:
main(opts)
else:
print("please add at least one module with -m")
sys.exit(1)
|
shub0/algorithm-data-structure | python/PopSugar.py | Python | bsd-3-clause | 1,436 | 0.002786 | #! /usr/bin/python
class Seat:
# @param index, an integer
def __init__(self, index):
self.index = index
self.next = None
class Room:
# @param num_seat, an integer
def __init__(self, num_seat):
if num_seat < 1:
raise VauleError('invalid room config') |
seat = Seat(1)
constructor = seat
for index in range(2, num_seat + 1):
constructor.next = Seat(index)
constructor = constructor.next
constructor.next = | seat
self.token = seat
self.num_seat = num_seat
# @param start_index, an integer
def initial_token(self, start_index=1):
while self.token.next.index != start_index:
self.token = self.token.next
# @return a boolean
def remove_seat(self):
if self.num_seat == 1:
self.survivor = self.token.index
return False
self.token.next = self.token.next.next
self.num_seat -= 1
return True
def move_token(self):
self.token = self.token.next
# @param num_seat, an integer
# @return an integer
def play_game(num_seat, start_index):
# Initial a room with given number of seats
room = Room(num_seat)
# Put token at the given position
room.initial_token(start_index)
while room.remove_seat():
room.move_token()
return room.survivor
if __name__ == '__main__':
print play_game(100)
|
unnikrishnankgs/va | venv/lib/python3.5/site-packages/IPython/testing/tools.py | Python | bsd-2-clause | 13,960 | 0.003438 | """Generic testing tools.
Authors
-------
- Fernando Perez <Fernando.Perez@berkeley.edu>
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import re
import sys
import tempfile
from contextlib import contextmanager
from io import StringIO
from subprocess import Popen, PIPE
from unittest.mock import patch
try:
# These tools are used by parts of the runtime, so we make the nose
# dependency optional at this point. Nose is a hard dependency to run the
# test suite, but NOT to use ipython itself.
import nose.tools as nt
has_nose = True
except ImportError:
has_nose = False
from traitlets.config.loader import Config
from IPython.utils.process import get_output_error_code
from IPython.utils.text import list_strings
from IPython.utils.io import temp_pyfile, Tee
from IPython.utils import py3compat
from IPython.utils.encoding import DEFAULT_ENCODING
from . import decorators as dec
from . import skipdoctest
# The docstring for full_path doctests differently on win32 (different path
# separator) so just skip the doctest there. The example remains informative.
doctest_deco = skipdoctest.skip_doctest if sys.platform == 'win32' else dec.null_deco
@doctest_deco
def full_path(startPath,files):
"""Make full paths for all the listed files, based on startPath.
Only the base part of startPath is kept, since this routine is typically
used with a script's ``__file__`` variable as startPath. The base of startPath
is then prepended to all the listed files, forming the output list.
Parameters
----------
startPath : string
Initial path to use as the base for the results. This path is split
using os.path.split() and only its first component is kept.
files : string or list
One or more files.
Examples
--------
>>> full_path('/foo/bar.py',['a.txt','b.txt'])
['/foo/ | a.txt', '/f | oo/b.txt']
>>> full_path('/foo',['a.txt','b.txt'])
['/a.txt', '/b.txt']
If a single file is given, the output is still a list::
>>> full_path('/foo','a.txt')
['/a.txt']
"""
files = list_strings(files)
base = os.path.split(startPath)[0]
return [ os.path.join(base,f) for f in files ]
def parse_test_output(txt):
"""Parse the output of a test run and return errors, failures.
Parameters
----------
txt : str
Text output of a test run, assumed to contain a line of one of the
following forms::
'FAILED (errors=1)'
'FAILED (failures=1)'
'FAILED (errors=1, failures=1)'
Returns
-------
nerr, nfail
number of errors and failures.
"""
err_m = re.search(r'^FAILED \(errors=(\d+)\)', txt, re.MULTILINE)
if err_m:
nerr = int(err_m.group(1))
nfail = 0
return nerr, nfail
fail_m = re.search(r'^FAILED \(failures=(\d+)\)', txt, re.MULTILINE)
if fail_m:
nerr = 0
nfail = int(fail_m.group(1))
return nerr, nfail
both_m = re.search(r'^FAILED \(errors=(\d+), failures=(\d+)\)', txt,
re.MULTILINE)
if both_m:
nerr = int(both_m.group(1))
nfail = int(both_m.group(2))
return nerr, nfail
# If the input didn't match any of these forms, assume no error/failures
return 0, 0
# So nose doesn't think this is a test
parse_test_output.__test__ = False
def default_argv():
"""Return a valid default argv for creating testing instances of ipython"""
return ['--quick', # so no config file is loaded
# Other defaults to minimize side effects on stdout
'--colors=NoColor', '--no-term-title','--no-banner',
'--autocall=0']
def default_config():
"""Return a config object with good defaults for testing."""
config = Config()
config.TerminalInteractiveShell.colors = 'NoColor'
config.TerminalTerminalInteractiveShell.term_title = False,
config.TerminalInteractiveShell.autocall = 0
f = tempfile.NamedTemporaryFile(suffix=u'test_hist.sqlite', delete=False)
config.HistoryManager.hist_file = f.name
f.close()
config.HistoryManager.db_cache_size = 10000
return config
def get_ipython_cmd(as_string=False):
"""
Return appropriate IPython command line name. By default, this will return
a list that can be used with subprocess.Popen, for example, but passing
`as_string=True` allows for returning the IPython command as a string.
Parameters
----------
as_string: bool
Flag to allow to return the command as a string.
"""
ipython_cmd = [sys.executable, "-m", "IPython"]
if as_string:
ipython_cmd = " ".join(ipython_cmd)
return ipython_cmd
def ipexec(fname, options=None, commands=()):
"""Utility to call 'ipython filename'.
Starts IPython with a minimal and safe configuration to make startup as fast
as possible.
Note that this starts IPython in a subprocess!
Parameters
----------
fname : str
Name of file to be executed (should have .py or .ipy extension).
options : optional, list
Extra command-line flags to be passed to IPython.
commands : optional, list
Commands to send in on stdin
Returns
-------
(stdout, stderr) of ipython subprocess.
"""
if options is None: options = []
cmdargs = default_argv() + options
test_dir = os.path.dirname(__file__)
ipython_cmd = get_ipython_cmd()
# Absolute path for filename
full_fname = os.path.join(test_dir, fname)
full_cmd = ipython_cmd + cmdargs + [full_fname]
env = os.environ.copy()
# FIXME: ignore all warnings in ipexec while we have shims
# should we keep suppressing warnings here, even after removing shims?
env['PYTHONWARNINGS'] = 'ignore'
# env.pop('PYTHONWARNINGS', None) # Avoid extraneous warnings appearing on stderr
for k, v in env.items():
# Debug a bizarre failure we've seen on Windows:
# TypeError: environment can only contain strings
if not isinstance(v, str):
print(k, v)
p = Popen(full_cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=env)
out, err = p.communicate(input=py3compat.str_to_bytes('\n'.join(commands)) or None)
out, err = py3compat.bytes_to_str(out), py3compat.bytes_to_str(err)
# `import readline` causes 'ESC[?1034h' to be output sometimes,
# so strip that out before doing comparisons
if out:
out = re.sub(r'\x1b\[[^h]+h', '', out)
return out, err
def ipexec_validate(fname, expected_out, expected_err='',
options=None, commands=()):
"""Utility to call 'ipython filename' and validate output/error.
This function raises an AssertionError if the validation fails.
Note that this starts IPython in a subprocess!
Parameters
----------
fname : str
Name of the file to be executed (should have .py or .ipy extension).
expected_out : str
Expected stdout of the process.
expected_err : optional, str
Expected stderr of the process.
options : optional, list
Extra command-line flags to be passed to IPython.
Returns
-------
None
"""
import nose.tools as nt
out, err = ipexec(fname, options, commands)
#print 'OUT', out # dbg
#print 'ERR', err # dbg
# If there are any errors, we must check those befor stdout, as they may be
# more informative than simply having an empty stdout.
if err:
if expected_err:
nt.assert_equal("\n".join(err.strip().splitlines()), "\n".join(expected_err.strip().splitlines()))
else:
raise ValueError('Running file %r produced error: %r' %
(fname, err))
# If no errors or output on stderr was expected, match stdout
nt.assert_equal("\n".join(out.strip().splitlines()), "\n".join(expected_out.strip().splitlines()))
class TempFileMixin(object):
"""Utility class to create temporary Python/IPython files.
Meant as a mixin class for test cases."""
def mktmp(self, src, ext='.py'):
"""Make a valid python temp |
listyque/TACTIC-Handler | thlib/ui_classes/ui_drop_plate_classes.py | Python | epl-1.0 | 18,599 | 0.001505 | # file ui_drop_plate_classes.py
import os
from thlib.side.Qt import QtWidgets as QtGui
from thlib.side.Qt import QtGui as Qt4Gui |
from thlib.side.Qt import QtC | ore
from thlib.environment import env_mode, env_inst, env_write_config, env_read_config
import thlib.global_functions as gf
import thlib.ui.checkin_out.ui_drop_plate as ui_drop_plate
import thlib.ui.checkin_out.ui_drop_plate_config as ui_drop_plate_config
from thlib.ui_classes.ui_custom_qwidgets import Ui_horizontalCollapsableWidget
#reload(ui_drop_plate)
#reload(ui_drop_plate_config)
class Ui_matchingTemplateConfigWidget(QtGui.QDialog, ui_drop_plate_config.Ui_matchingTemplateConfig):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent=parent)
self.current_templates_list = []
self.setupUi(self)
self.create_ui()
def create_ui(self):
self.setWindowTitle('Matching Template Config')
self.fill_templates()
self.templatesTreeWidget.resizeColumnToContents(0)
self.templatesTreeWidget.resizeColumnToContents(1)
self.templatesTreeWidget.resizeColumnToContents(2)
self.templatesTreeWidget.resizeColumnToContents(3)
self.create_drop_plate_config_widget()
self.readSettings()
def create_drop_plate_config_widget(self):
from thlib.ui_classes.ui_conf_classes import Ui_checkinOptionsPageWidget
self.drop_plate_config_widget = Ui_checkinOptionsPageWidget(self)
self.drop_plate_config_widget.snapshotsSavingOptionsGroupBox.setHidden(True)
self.drop_plate_config_widget.checkinMiscOptionsGroupBox.setHidden(True)
self.drop_plate_config_widget.defaultRepoPathsGroupBox.setHidden(True)
self.drop_plate_config_widget.customRepoPathsGroupBox.setHidden(True)
self.configGridLayout.addWidget(self.drop_plate_config_widget)
self.configGridLayout.setContentsMargins(0, 0, 0, 9)
def fill_templates(self):
templates = [
(True, '$FILENAME'),
(True, '$FILENAME.$EXT'),
(True, '$FILENAME.$FRAME.$EXT'),
(True, '$FILENAME_$UDIM.$EXT'),
(True, '$FILENAME_$UV.$EXT'),
(True, '$FILENAME.$FRAME_$UDIM.$EXT'),
(True, '$FILENAME.$FRAME_$UV.$EXT'),
(True, '$FILENAME_$UV.$FRAME.$EXT'),
(False, '$FILENAME_$LAYER.$EXT'),
(False, '$FILENAME.$LAYER.$EXT'),
(False, '$FILENAME_$LAYER.$FRAME.$EXT'),
(False, '$FILENAME.$LAYER.$FRAME.$EXT'),
(False, '$FILENAME.$LAYER_$UV.$EXT'),
(False, '$FILENAME.$LAYER.$FRAME_$UV.$EXT'),
(False, '$FILENAME.$LAYER_$UV.$FRAME.$EXT'),
(False, '$FILENAME.$LAYER_$UDIM.$EXT'),
(False, '$FILENAME.$LAYER.$FRAME_$UDIM.$EXT'),
(False, '$FILENAME.$LAYER_$UDIM.$FRAME.$EXT'),
(False, '$FILENAME_$LAYER.$FRAME_$UDIM.$EXT'),
]
# templates = [
# (True, '$FILENAME'),
# (True, '$FILENAME.$EXT'),
# (True, '$FILENAMEFrame$FRAME.$EXT'),
# ]
for enabled, template in templates:
tree_item = QtGui.QTreeWidgetItem()
if enabled:
tree_item.setCheckState(0, QtCore.Qt.Checked)
self.current_templates_list.append(template)
else:
tree_item.setCheckState(0, QtCore.Qt.Unchecked)
tree_item.setText(1, template)
match_template = gf.MatchTemplate([template], padding=self.get_min_padding())
tree_item.setText(2, match_template.get_preview_string())
tree_item.setText(3, match_template.get_type_string())
if template in ['$FILENAME', '$FILENAME.$EXT']:
tree_item.setDisabled(True)
self.templatesTreeWidget.addTopLevelItem(tree_item)
def get_min_padding(self):
return 3
# return int(self.minFramesPaddingSpinBox.value())
def get_templates_list(self):
return self.current_templates_list
def set_settings_from_dict(self, settings_dict=None):
if settings_dict:
self.move(settings_dict['pos'][0], settings_dict['pos'][1])
self.resize(settings_dict['size'][0], settings_dict['size'][1])
def get_settings_dict(self):
settings_dict = dict()
settings_dict['pos'] = self.pos().toTuple()
settings_dict['size'] = self.size().toTuple()
return settings_dict
def readSettings(self):
self.set_settings_from_dict(env_read_config(filename='ui_drop_plate', unique_id='ui_main', long_abs_path=True))
def writeSettings(self):
env_write_config(self.get_settings_dict(), filename='ui_drop_plate', unique_id='ui_main', long_abs_path=True)
def hideEvent(self, event):
self.writeSettings()
event.accept()
class Ui_dropPlateWidget(QtGui.QWidget, ui_drop_plate.Ui_dropPlate):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent=parent)
self.tree_items = []
self.setupUi(self)
self.setAcceptDrops(True)
self.create_ui()
self.create_config_widget()
self.controls_actions()
def threads_fill_items(self, kwargs, exec_after_added=None):
worker = env_inst.local_pool.add_task(self.get_files_objects, kwargs)
worker.result.connect(self.append_items_to_tree)
if exec_after_added:
worker.finished.connect(exec_after_added)
worker.error.connect(gf.error_handle)
worker.start()
def create_ui(self):
self.clearPushButton.setIcon(gf.get_icon('trash'))
self.configPushButton.setIcon(gf.get_icon('settings', icons_set='mdi'))
self.create_progress_bar_widget()
self.create_collapsable_toolbar()
self.setAcceptDrops(True)
if env_mode.get_mode() == 'standalone':
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
self.setSizePolicy(sizePolicy)
self.setMinimumWidth(300)
self.move_controls_to_collapsable_toolbar()
self.customize_ui()
def customize_ui(self):
self.dropTreeWidget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.dropTreeWidget.setDragDropMode(QtGui.QAbstractItemView.DragOnly)
self.dropTreeWidget.setStyleSheet(gf.get_qtreeview_style())
def create_progress_bar_widget(self):
self.progressBar = QtGui.QProgressBar()
self.progressBar.setMaximum(100)
self.progressBarLayout.addWidget(self.progressBar)
self.progressBar.setTextVisible(True)
self.progressBar.setVisible(False)
def create_config_widget(self):
self.config_widget = Ui_matchingTemplateConfigWidget(self)
def create_collapsable_toolbar(self):
self.collapsable_toolbar = Ui_horizontalCollapsableWidget()
self.collapsable_toolbar.setText('Quick Config')
self.buttons_layout = QtGui.QHBoxLayout()
self.buttons_layout.setSpacing(0)
self.buttons_layout.setContentsMargins(0, 0, 0, 0)
self.collapsable_toolbar.setLayout(self.buttons_layout)
self.collapsable_toolbar.setCollapsed(True)
self.expandingLayout.addWidget(self.collapsable_toolbar)
def add_widget_to_collapsable_toolbar(self, widget):
self.buttons_layout.addWidget(widget)
def move_controls_to_collapsable_toolbar(self):
self.add_widget_to_collapsable_toolbar(self.groupCheckinCheckBox)
self.add_widget_to_collapsable_toolbar(self.keepFileNameCheckBox)
self.add_widget_to_collapsable_toolbar(self.includeSubfoldersCheckBox)
self.collapsable_toolbar.setCollapsed(False)
def controls_actions(self):
self.clearPushButton.clicked.connect(self.clear_tree_widget)
self.configPushButton.clicked.connect(self.config_widget.exec_)
# self.groupCheckinCheckBox.stateChanged.connect(self.enable_group_checkin)
self.create_files_tree_context_menu()
def clear_tree_widget(self):
self.dropTreeWidget.clear()
self.tree_ite |
pipex/gitbot | config.py | Python | apache-2.0 | 1,016 | 0.002953 | from __future__ import absolute_import
from __future__ import unicode_literals
class Config(object):
DEBUG = False
TESTING = False
# Host for the redis server
REDIS = 'redis'
# Do not push this to a public repo
SLACK_DEFAULT_CHANNEL = '#general'
SLACK_DEVELOPERS_CHANNEL = '#developers'
# Define the applica | tion directory
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Log file (the directory must exist)
APPLICATION_LOG = os.path.j | oin(BASE_DIR, 'log', 'application.log')
# Secret key for flask sessions and CSRF protection
SECRET_KEY = "secret key that you need to change, seriously!"
class ProductionConfig(Config):
SLACK_TOKEN = 'here you put the slack token'
# Gitlab hook url
GITLAB_HOOK = '/hooks/bWxNGVQij55cCZigeKDlXf9P6L14bKc4AhdPmPL5mEc='
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
DEBUG = True
# Default configuration
default = DevelopmentConfig
|
Aerojspark/PyFR | pyfr/readers/gmsh.py | Python | bsd-3-clause | 6,856 | 0.000729 | # -*- coding: utf-8 -*-
from collections import defaultdict
import re
import numpy as np
from pyfr.readers import BaseReader, NodalMeshAssembler
from pyfr.readers.nodemaps import GmshNodeMaps
def msh_section(mshit, section):
endln = '$End{}\n'.format(section)
endix = int(next(mshit)) - 1
for i, l in enumerate(mshit):
if l == endln:
raise ValueError('Unexpected end of section $' + section)
yield l.strip()
if i == endix:
break
else:
raise ValueError('Unexpected EOF')
if next(mshit) != endln:
raise ValueError('Expected $End' + section)
class GmshReader(BaseReader):
# Supported file types and extensions
name = 'gmsh'
extn = ['.msh']
# Gmsh element types to PyFR type (petype) and node counts
_etype_map = {
1: ('line', 2), 8: ('line', 3), 26: ('line', 4), 27: ('line', 5),
2: ('tri', 3), 9: ('tri', 6), 21: ('tri', 10), 23: ('tri', 15),
3: ('quad', 4), 10: ('quad', 9), 36: ('quad', 16), 37: ('quad', 25),
4: ('tet', 4), 11: ('tet', 10), 29: ('tet', 20), 30: ('tet', 35),
5: ('hex', 8), 12: ('hex', 27), 92: ('hex', 64), 93: ('hex', 125),
6: ('pri', 6), 13: ('pri', 18), 90: ('pri', 40), 91: ('pri', 75),
7: ('pyr', 5), 14: ('pyr', 14), 118: ('pyr', 30), 119: ('pyr', 55)
}
# First-order node numbers associated with each element face
_petype_fnmap = {
'tri': {'line': [[0, 1], [1, 2], [2, 0]]},
'quad': {'line': [[0, 1], [1, 2], [2, 3], [3, 0]]},
'tet': {'tri': [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]},
'hex': {'quad': [[0, 1, 2, 3], [0, 1, 4, 5], [1, 2, 5, 6],
[2, 3, 6, 7], [0, 3, 4, 7], [4, 5, 6, 7]]},
'pri': {'quad': [[0, 1, 3, 4], [1, 2, 4, 5], [0, 2, 3, 5]],
'tri': [[0, 1, 2], [3, 4, 5]]},
'pyr': {'quad': [[0, 1, 2, 3]],
'tri': [[0, 1, 4], [1, 2, 4], [2, 3, 4], [0, 3, 4]]}
}
# Mappings between the node ordering of PyFR and that of Gmsh
_nodemaps = GmshNodeMaps
def __init__(self, msh):
if | isinstance(msh, str):
msh = open(msh)
# Get an iterator over the lines of the mesh
mshit = iter(msh)
# Requir | ed section readers
sect_map = {'MeshFormat': self._read_mesh_format,
'Nodes': self._read_nodes,
'Elements': self._read_eles,
'PhysicalNames': self._read_phys_names}
req_sect = frozenset(sect_map)
# Seen sections
seen_sect = set()
for l in filter(lambda l: l != '\n', mshit):
# Ensure we have encountered a section
if not l.startswith('$'):
raise ValueError('Expected a mesh section')
# Strip the '$' and '\n' to get the section name
sect = l[1:-1]
# If the section is known then read it
if sect in sect_map:
sect_map[sect](mshit)
seen_sect.add(sect)
# Else skip over it
else:
endsect = '$End{}\n'.format(sect)
for el in mshit:
if el == endsect:
break
else:
raise ValueError('Expected $End' + sect)
# Check that all of the required sections are present
if seen_sect != req_sect:
missing = req_sect - seen_sect
raise ValueError('Required sections: {} not found'
.format(missing))
def _read_mesh_format(self, mshit):
ver, ftype, dsize = next(mshit).split()
if ver != '2.2':
raise ValueError('Invalid mesh version')
if ftype != '0':
raise ValueError('Invalid file type')
if dsize != '8':
raise ValueError('Invalid data size')
if next(mshit) != '$EndMeshFormat\n':
raise ValueError('Expected $EndMeshFormat')
def _read_phys_names(self, msh):
# Physical entities can be divided up into:
# - fluid elements ('the mesh')
# - boundary faces
# - periodic faces
self._felespent = None
self._bfacespents = {}
self._pfacespents = defaultdict(list)
# Seen physical names
seen = set()
# Extract the physical names
for l in msh_section(msh, 'PhysicalNames'):
m = re.match(r'(\d+) (\d+) "((?:[^"\\]|\\.)*)"$', l)
if not m:
raise ValueError('Malformed physical entity')
pent, name = int(m.group(2)), m.group(3).lower()
# Ensure we have not seen this name before
if name in seen:
raise ValueError('Duplicate physical name: {}'.format(name))
# Fluid elements
if name == 'fluid':
self._felespent = pent
# Periodic boundary faces
elif name.startswith('periodic'):
p = re.match(r'periodic[ _-]([a-z0-9]+)[ _-](l|r)$', name)
if not p:
raise ValueError('Invalid periodic boundary condition')
self._pfacespents[p.group(1)].append(pent)
# Other boundary faces
else:
self._bfacespents[name] = pent
seen.add(name)
if self._felespent is None:
raise ValueError('No fluid elements in mesh')
if any(len(pf) != 2 for pf in self._pfacespents.values()):
raise ValueError('Unpaired periodic boundary in mesh')
def _read_nodes(self, msh):
self._nodepts = nodepts = {}
for l in msh_section(msh, 'Nodes'):
nv = l.split()
nodepts[int(nv[0])] = np.array([float(x) for x in nv[1:]])
def _read_eles(self, msh):
elenodes = defaultdict(list)
for l in msh_section(msh, 'Elements'):
# Extract the raw element data
elei = [int(i) for i in l.split()]
enum, etype, entags = elei[:3]
etags, enodes = elei[3:3 + entags], elei[3 + entags:]
if etype not in self._etype_map:
raise ValueError('Unsupported element type {}'.format(etype))
# Physical entity type (used for BCs)
epent = etags[0]
elenodes[etype, epent].append(enodes)
self._elenodes = {k: np.array(v) for k, v in elenodes.items()}
def _to_raw_pyfrm(self):
# Assemble a nodal mesh
maps = self._etype_map, self._petype_fnmap, self._nodemaps
pents = self._felespent, self._bfacespents, self._pfacespents
mesh = NodalMeshAssembler(self._nodepts, self._elenodes, pents, maps)
rawm = {}
rawm.update(mesh.get_connectivity())
rawm.update(mesh.get_shape_points())
return rawm
|
tensorflow/lingvo | docs/apidoc/conf.py | Python | apache-2.0 | 6,969 | 0.001004 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration file for the Sphinx documentation builder.
This file does only contain a selection of the most common options. For a
full list see the documentation:
http://www.sphinx-doc.org/en/master/config
"""
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, u'/tmp/lingvo/lingvo')
# -- Project information -----------------------------------------------------
project = u'Lingvo'
copyright = u'2018'
author = u''
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx.ext.viewcode'
]
autodoc_default_flags = [
'members', 'undoc-members', 'private-members', 'show-inheritance'
]
autodoc_member_order = 'bysource'
napoleon_google_docstring = True
default_role = 'py:obj'
intersphinx_mapping = {
'python': ('https://docs.python.org/3.7', None),
'numpy': ('http://numpy.org/doc/stable/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
from docutils.transforms import Transform
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'lingvodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'lingvo.tex', u'Lingvo Documentation', u'', | 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'lingvo', u'Lingvo Documentation', [author], 1)]
# -- Options for Texinfo output ------------------------ | ----------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'lingvo', u'Lingvo Documentation', author, 'Lingvo',
'One line description of project.', 'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
class ResetFlags(Transform):
default_priority = 999
def apply(self):
from absl import flags # pylint: disable=g-import-not-at-top
for flag in list(flags.FLAGS):
if flag not in ('showprefixforinfo',):
delattr(flags.FLAGS, flag)
def setup(app):
app.add_transform(ResetFlags)
|
chnops/code | fastq_to_fasta.py | Python | mit | 177 | 0.00565 | import os
import sys
from Bio import SeqIO
f = open(sys.argv[1], 'rU')
out = open(sys.argv[2], 'w')
for | records in SeqIO.pars | e(f, 'fastq'):
SeqIO.write(records, out, 'fasta')
|
sboily/xivo-admin-ivr-plugins | xivo_ivr/plugins/home/load.py | Python | gpl-3.0 | 148 | 0.006757 | from xivo_admin import BasePlugi | n
from views import home
class XiVOHome(BasePlugin):
def load(self, app):
app.register_b | lueprint(home)
|
dgu-dna/DNA-Bot | apps/delete_memo.py | Python | mit | 1,080 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from apps.decorators import on_command
from apps.slackutils import isNumber
from time import localtime, strftime
import json
CACHE_DEFAULT_URL = './apps/memo_cache/memo_cache.json'
@on_command(['!메모삭제', '!ㅁㅁㅅㅈ', '!aatw'])
def run(robot, channel, tokens, user, command):
'''메모 지워줌'''
token_count = len(tokens)
user = str(user)
if token_count < 1:
return channel, '사용법) !메모삭제 <메모 번호>'
del_line = []
for num in tokens:
if(isNumber(num)):
del_line.append(int(num))
del_line.sort(reverse=True)
jdat = json.loads(open(CACHE_DEFAULT_URL).read())
if del_line[0] > len(jdat[user]):
return channel, '그건 안댐;'
for l | ine in del_line:
del jdat[user][l | ine - 1]
with open(CACHE_DEFAULT_URL, 'w') as fp:
json.dump(jdat, fp, indent=4)
del_line = map(lambda s: str(s), del_line)
msg = '<' + ', '.join(sorted(del_line)) + '> 메모를 삭제 했음.'
return channel, msg
|
ingadhoc/demo | demo_simple/__manifest__.py | Python | agpl-3.0 | 3,001 | 0 | ##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either ve | rsion 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licens | es/>.
#
##############################################################################
{
'name': 'Demo Simple',
'version': '13.0.1.0.0',
'category': 'Tools',
'sequence': 14,
'summary': '',
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'l10n_ar_demo',
# 'l10n_ar_stock',
# 'l10n_ar_account_tax_settlement',
# 'l10n_ar_account_reports',
'account_accountant',
# 'account_multic_fix',
# 'account_debt_management',
'base_currency_inverse_rate',
# 'l10n_ar_aeroo_purchase',
# 'l10n_ar_aeroo_sale',
# 'l10n_ar_aeroo_stock',
# 'l10n_ar_aeroo_payment_group',
'l10n_ar_bank',
# 'product_catalog_aeroo_report_public_categ',
# 'product_price_taxes_included',
'purchase_quotation_products',
'sale_quotation_products',
# 'project_description',
# 'l10n_ar_website_sale',
'account_transfer_unreconcile',
'purchase_subscription',
# 'payment_todopago',
'account_accountant_ux',
'account_ux',
'base_ux',
'helpdesk_timesheet_ux',
'hr_timesheet_ux',
'helpdesk_ux',
# 'hr_timesheet_attendance_ux',
# 'stock_ux',
'purchase_ux',
'project_ux',
# 'sale_stock_ux',
'sale_ux',
'product_ux',
'sale_subscription_ux',
# 'account_multicompany_ux',
'sale_timesheet_ux',
# 'purchase_multic_fix',
# 'sale_stock_multic_fix',
'web_decimal_numpad_dot',
# 'mail_internal',
# oca
# 'stock_picking_invoice_link',
'mail_tracking',
'mass_editing',
# 'web_advanced_search',
# 'web_search_with_and',
# 'stock_no_negative',
# odoo modules
'stock',
'hr_attendance',
'purchase',
'project',
],
'data': [
],
'demo': [
'users_data.xml',
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
|
tdda/tdda | tdda/constraints/pdverify.py | Python | mit | 126 | 0.007937 | # -*- coding: utf-8 -*-
"""
Compatibility module for TDDA Pandas constraints.
"""
from | tdda.constraints.pd.verify import | *
|
gdanezis/rousseau-chain | hippiehug-package/tests/test_tree.py | Python | bsd-2-clause | 5,884 | 0.03416 | from hippiehug import RedisStore, Tree, Leaf, Branch
import pytest
## ============== TESTS ===================
def test_evidence():
t = Tree()
# Test positive case
t.add(b"Hello", b"Hello")
t.add(b"World", b"World")
root, E = t.evidence(b"World")
assert len(E) == 2
store = dict((e.identity(), e) for e in E)
t2 = Tree(store, root)
assert t2.is_in(b"World")
def test_store(rstore):
l = Leaf(b"Hello", b"Hello")
rstore[l.identity()] = l
assert rstore[l.identity()].identity() == l.identity()
def test_store_tree(rstore):
t = Tree(store=rstore)
from os import urandom
for _ in range(100):
item = urandom(32)
t.add(item, item)
assert t.is_in(item)
assert not t.is_in(urandom(32))
def test_leaf_isin():
l = Leaf(b"Hello", b"Hello")
store = {l.identity() : l}
b = l.add(store, b"Woitemrld", b"Woitemrld")
assert l.is_in(store, b"Hello", b"Hello")
def test_leaf_isin_map():
l = Leaf(item=b"Hello", key=b"World")
store = {l.identity() : l}
b = l.add(store, b"World", b"World")
assert l.is_in(store, item=b"Hello", key=b"World")
def test_Branch_isin():
l = Leaf(b"Hello", b"Hello")
store = {l.identity() : l}
b = l.add(store, b"World", b"World")
assert b.is_in(store, b"Hello", b"Hello")
assert b.is_in(store, b"World", b"World")
def test_Branch_isin_map():
l = Leaf(item=b"Hello", key=b"A")
store = {l.identity() : l}
b = l.add(store, item=b"World", key=b"B")
assert b.is_in(store, b"Hello", b"A")
assert b.is_in(store, b"World", b"B")
assert not b.is_in(store, b"World", b"C")
def test_Branch_multi():
l = Leaf(b"Hello", b"Hello")
store = {l.identity() : l}
b = l.multi_add(store, [b"B", b"C"], [b"B", b"C"])
b.check(store)
assert b.is_in(store, b"B", b"B")
assert b.is_in(store, b"C", b"C")
assert b.is_in(store, b"Hello", b"Hello")
def test_Branch_add():
l = Leaf(b"Hello", b"Hello")
store = {l.identity() : l}
b = l.add(store, b"World", b"World")
b2 = b.add(store, b"Doom", b"Doom")
assert isinstance(b2, Branch)
assert b2.left_branch in store
assert b2.right_branch in store
assert b2.identity() in store
b2.check(store)
def test_add_like_a_monkey():
root = Leaf(b"Hello",b"Hello")
store = {root.identity() : root}
from os import urandom
for _ in range(100):
item = urandom(32)
root = root.add(store, item, item)
root.check(store)
assert root.is_in(store, item, item)
def test_Leaf_add():
l = Leaf(b"Hello", b"Hello")
store = {l.identity() : l}
b = l.add(store, b"World", b"World")
assert isinstance(b, Branch)
assert b.left_branch in store
assert b.right_branch in store
assert b.identity() in store
assert store[b.left_branch].item <= b.pivot
assert store[b.right_branch].item > b.pivot
def test_Tree():
t = Tree()
def test_add_isin():
t = Tree()
# Test positive case
t.add(b"Hello")
assert t.is_in(b"Hello") == True
# Infix operator
assert b"Hello" in t
def test_fail_isin():
t = Tree()
# Test negative case
assert t.is_in(b"World") == False
def test_massive():
t = Tree()
from os import urandom
for _ in range(100):
item = urandom(32)
t.add(item)
assert t.is_in(item)
assert not t.is_in(urandom(32))
def test_multi_add():
t = Tree()
from os import urandom
X = [urandom(32) for _ in range(100)]
t.multi_add(X)
for x in X:
assert x in t
X = [urandom(32) for _ in range(100)]
t.multi_add(X)
for x in X:
assert x in t
Y = [urandom(32) for _ in range(100)]
for y in Y:
assert y not in t
def test_multi_small():
t = Tree()
t.multi_add([b"Hello", b"World"])
assert b"Hello" in t
assert b"World" in t
t.multi_add([b"A", b"B", b"C", b"D", b"E", b"F"])
assert b"E" in t
assert b"F" in t
def t | est_multi_test | ():
t = Tree()
t.multi_add([b"Hello", b"World"])
assert t.multi_is_in([b"Hello", b"World"]) == [True, True]
answer, head, evidence = t.multi_is_in([b"Hello", b"World"], evidence=True)
assert answer == [True, True]
e = dict((k.identity(), k) for k in evidence)
t2 = Tree(e, head)
assert t2.multi_is_in([b"Hello", b"World"]) == [True, True]
def test_lookup():
l = Leaf(item=b"Hello", key=b"A")
store = {l.identity() : l}
b = l.add(store, item=b"World", key=b"B")
assert b.is_in(store, b"Hello", b"A")
assert b.is_in(store, b"World", b"B")
assert not b.is_in(store, b"World", b"C")
assert b.lookup(store, b"B") == (b"B", b"World")
try:
b.lookup(store, b"B") == (b"B", b"World2")
assert False
except:
assert True
try:
b.lookup(store, b"C") == (b"B", b"World2")
assert False
except:
assert True
def test_double_add():
l = Leaf(item=b"Hello", key=b"A")
store = {l.identity() : l}
b = l.add(store, item=b"World", key=b"B")
assert b.is_in(store, b"Hello", b"A")
assert b.is_in(store, b"World", b"B")
assert not b.is_in(store, b"World", b"C")
b = b.add(store, item=b"World2", key=b"B")
assert b.lookup(store, b"B") == (b"B", b"World")
assert not b.lookup(store, b"B") == (b"B", b"World2")
def test_tree_default_store():
t = Tree()
t.multi_add([b"test"])
assert t.is_in(b"test")
t2 = Tree()
assert not t2.is_in(b"test")
def test_tree_empty_store():
store = {}
t = Tree(store)
t.multi_add([b"test"])
assert t.is_in(b"test")
t2 = Tree(store, root_hash=t.root())
assert t2.is_in(b"test")
|
razisayyed/django-ads | ads/templatetags/ads_tags.py | Python | apache-2.0 | 956 | 0 | from __future__ import unicode_literals
from django import template
from django.conf import settings
from django.utils import timezone
from ads.models import Ad
register = template.Library()
@register.inclusion_tag('ads/tags/render_ads_zone.html', takes_context=True)
def render_ads_zone(context, zone):
"""
Ret | urns an advertise for a ``zone``.
Tag usage:
{% load ads_tags %}
{% render_zone 'zone' %}
"""
context.update({
'google_adsense_client': settings.ADS_GOOGLE_ADSENSE_CLIENT,
'zone': zone,
'zone_info': settings.ADS_ZONES.get(zone, None)
})
return context
@register.simple_tag
def get_ads_count(zone):
"""
Returns ads count for ``zone``.
Tag usage:
{% load ads_tags %}
{% get_ads_count 'zone' as ads_ | count %}
{% get_ads_count 'zone1,zone2,zone3' as ads_count %}
"""
zone = zone.split(',')
return Ad.objects.public().filter(zone__in=zone).count()
|
xtao/flask-weixin | setup.py | Python | bsd-3-clause | 1,257 | 0 | #!/usr/bin/env python
# coding: utf-8
|
try:
# python setup.py test
import multiprocessing
except ImportError:
pass
import os
import re
from setuptools import setup
def fread(fname):
filepath = os.path.join(os.path.dirname(__file__), fname)
with open(filepath) as f:
return f.read()
content = fread('flask_weixin.py')
m = re.find | all(r'__version__\s*=\s*\'(.*)\'', content)
version = m[0]
setup(
name='Flask-Weixin',
version=version,
url='https://github.com/lepture/flask-weixin',
author='Hsiaoming Yang',
author_email='me@lepture.com',
description='Weixin for Flask.',
long_description=fread('README.rst'),
license='BSD',
py_modules=['flask_weixin'],
zip_safe=False,
platforms='any',
tests_require=['nose', 'Flask'],
test_suite='nose.collector',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
westurner/pwd | setup.py | Python | bsd-3-clause | 1,441 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
| from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
| readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='pwd',
version='0.3.5',
description="pwd",
long_description=readme + '\n\n' + history,
author="Wes Turner",
author_email='wes@wrd.nu',
url='https://github.com/westurner/pwd',
packages=[
'pwd',
],
package_dir={'pwd':
'pwd'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='pwd',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
|
lehmannro/translate | storage/wordfast.py | Python | gpl-2.0 | 16,175 | 0.006636 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in t | he hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundat | ion, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Manage the Wordfast Translation Memory format
Wordfast TM format is the Translation Memory format used by the
U{Wordfast<http://www.wordfast.net/>} computer aided translation tool.
It is a bilingual base class derived format with L{WordfastTMFile}
and L{WordfastUnit} providing file and unit level access.
Wordfast tools
==============
Wordfast is a computer aided translation tool. It is an application
built on top of Microsoft Word and is implemented as a rather
sophisticated set of macros. Understanding that helps us understand
many of the seemingly strange choices around this format including:
encoding, escaping and file naming.
Implementation
==============
The implementation covers the full requirements of a Wordfast TM file.
The files are simple Tab Separated Value (TSV) files that can be read
by Microsoft Excel and other spreadsheet programs. They use the .txt
extension which does make it more difficult to automatically identify
such files.
The dialect of the TSV files is specified by L{WordfastDialect}.
Encoding
--------
The files are UTF-16 or ISO-8859-1 (Latin1) encoded. These choices
are most likely because Microsoft Word is the base editing tool for
Wordfast.
The format is tab separated so we are able to detect UTF-16 vs Latin-1
by searching for the occurance of a UTF-16 tab character and then
continuing with the parsing.
Timestamps
----------
L{WordfastTime} allows for the correct management of the Wordfast
YYYYMMDD~HHMMSS timestamps. However, timestamps on individual units are
not updated when edited.
Header
------
L{WordfastHeader} provides header management support. The header
functionality is fully implemented through observing the behaviour of the
files in real use cases, input from the Wordfast programmers and
public documentation.
Escaping
--------
Wordfast TM implements a form of escaping that covers two aspects:
1. Placeable: bold, formating, etc. These are left as is and ignored.
It is up to the editor and future placeable implementation to manage
these.
2. Escapes: items that may confuse Excel or translators are
escaped as &'XX;. These are fully implemented and are converted to
and from Unicode. By observing behaviour and reading documentation
we where able to observe all possible escapes. Unfortunately the
escaping differs slightly between Windows and Mac version. This
might cause errors in future.
Functions allow for L{conversion to Unicode<_wf_to_char>} and L{back to
Wordfast escapes<_char_to_wf>}.
Extended Attributes
-------------------
The last 4 columns allow users to define and manage extended attributes.
These are left as is and are not directly managed byour implemenation.
"""
import csv
import sys
import time
from translate.storage import base
WF_TIMEFORMAT = "%Y%m%d~%H%M%S"
"""Time format used by Wordfast"""
WF_FIELDNAMES_HEADER = ["date", "userlist", "tucount", "src-lang", "version", "target-lang", "license", "attr1list", "attr2list", "attr3list", "attr4list", "attr5list"]
"""Field names for the Wordfast header"""
WF_FIELDNAMES = ["date", "user", "reuse", "src-lang", "source", "target-lang", "target", "attr1", "attr2", "attr3", "attr4"]
"""Field names for a Wordfast TU"""
WF_FIELDNAMES_HEADER_DEFAULTS = {
"date": "%19000101~121212",
"userlist": "%User ID,TT,TT Translate-Toolkit",
"tucount": "%TU=00000001",
"src-lang": "%EN-US",
"version": "%Wordfast TM v.5.51w9/00",
"target-lang": "",
"license": "%---00000001",
"attr1list": "",
"attr2list": "",
"attr3list": "",
"attr4list": "" }
"""Default or minimum header entries for a Wordfast file"""
# TODO Needs validation. The following need to be checked against a WF TM file to ensure
# that the correct Unicode values have been chosen for the characters. For now these look
# correct and have been taken from Windows CP1252 and Macintosh code points found for
# the respective character sets on Linux.
WF_ESCAPE_MAP = (
("&'26;", u"\u0026"), # & - Ampersand (must be first to prevent escaping of escapes)
("&'82;", u"\u201A"), # ‚ - Single low-9 quotation mark
("&'85;", u"\u2026"), # … - Elippsis
("&'91;", u"\u2018"), # ‘ - left single quotation mark
("&'92;", u"\u2019"), # ’ - right single quotation mark
("&'93;", u"\u201C"), # “ - left double quotation mark
("&'94;", u"\u201D"), # ” - right double quotation mark
("&'96;", u"\u2013"), # – - en dash (validate)
("&'97;", u"\u2014"), # — - em dash (validate)
("&'99;", u"\u2122"), # ™ - Trade mark
# Windows only
("&'A0;", u"\u00A0"), # - Non breaking space
("&'A9;", u"\u00A9"), # © - Copyright
("&'AE;", u"\u00AE"), # ® - Registered
("&'BC;", u"\u00BC"), # ¼
("&'BD;", u"\u00BD"), # ½
("&'BE;", u"\u00BE"), # ¾
# Mac only
("&'A8;", u"\u00AE"), # ® - Registered
("&'AA;", u"\u2122"), # ™ - Trade mark
("&'C7;", u"\u00AB"), # « - Left-pointing double angle quotation mark
("&'C8;", u"\u00BB"), # » - Right-pointing double angle quotation mark
("&'C9;", u"\u2026"), # … - Horizontal Elippsis
("&'CA;", u"\u00A0"), # - Non breaking space
("&'D0;", u"\u2013"), # – - en dash (validate)
("&'D1;", u"\u2014"), # — - em dash (validate)
("&'D2;", u"\u201C"), # “ - left double quotation mark
("&'D3;", u"\u201D"), # ” - right double quotation mark
("&'D4;", u"\u2018"), # ‘ - left single quotation mark
("&'D5;", u"\u2019"), # ’ - right single quotation mark
("&'E2;", u"\u201A"), # ‚ - Single low-9 quotation mark
("&'E3;", u"\u201E"), # „ - Double low-9 quotation mark
# Other markers
#("&'B;", u"\n"), # Soft-break - XXX creates a problem with roundtripping could also be represented by \u2028
)
"""Mapping of Wordfast &'XX; escapes to correct Unicode characters"""
TAB_UTF16 = "\x00\x09"
"""The tab \\t character as it would appear in UTF-16 encoding"""
def _char_to_wf(string):
"""Char -> Wordfast &'XX; escapes
Full roundtripping is not possible because of the escaping of NEWLINE \\n
and TAB \\t"""
# FIXME there is no platform check to ensure that we use Mac encodings when running on a Mac
if string:
for code, char in WF_ESCAPE_MAP:
string = string.replace(char.encode('utf-8'), code)
string = string.replace("\n", "\\n").replace("\t", "\\t")
return string
def _wf_to_char(string):
"""Wordfast &'XX; escapes -> Char"""
if string:
for code, char in WF_ESCAPE_MAP:
string = string.replace(code, char.encode('utf-8'))
string = string.replace("\\n", "\n").replace("\\t", "\t")
return string
class WordfastDialect(csv.Dialect):
"""Describe the properties of a Wordfast generated TAB-delimited file."""
delimiter = "\t"
lineterminator = "\r\n"
quoting = csv.QUOTE_NONE
if sys.version_ |
cangencer/hazelcast-python-client | hazelcast/protocol/codec/list_is_empty_codec.py | Python | apache-2.0 | 1,038 | 0.000963 | from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.list_message_type import *
REQUEST_TYPE = LIST | _ISEMPTY
RESPONSE_TYPE = 101
RETRYABLE = True
d | ef calculate_size(name):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
return data_size
def encode_request(name):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_bool()
return parameters
|
shanestockall/EECS-Grading-Rig | src/packages/EECS-211-Grader/acceptancetest_osx.py | Python | mit | 2,279 | 0.035542 | import hashlib
import shutil
import os
from datetime import datetime
list_of_paths_and_strings = [
["assignment1.cpp", "main()"]
]
def main():
abspath = os.path.abspath(__file__)
dname = os.path.dirname(sys.argv[0])
os.chdir(dname)
if acceptance_test():
make_txt_file()
zip_dir()
def get_md5_hash(file):
# opening file
file_to_hash = open(file)
read_file = file_to_hash.read()
# get hash of file
md5_hash = hashlib.md5(read_file)
md5_hash_output = md5_hash.hexdigest()
# print file name and hash
print "File Name: %s" % file
print "MD5 Hash: %r" % md5_hash_output
# return hash
return file, md5_hash_output
def get_current_time():
print "The current time is " + " datetime.today()"
return datetime.today()
def acceptance_test():
# for each list of the list of paths and strings
# make sure that a file with that name exists within the folder
for my_list in list_of_paths_and_strings:
path = my_list[0]
list_of_strings = my_list[1:]
try:
with open(path) as file:
for string in list_of_strings:
if string in file.read():
print "Found " + string + " in file."
else:
print string + "not found in file."
return False
file.close()
return True
except:
print 'File does not exist. Please make sure all necessary files are in the correct place.'
return False
def make_txt_file():
# writes a text | file with each of the hashes for each of the files using MD5
write_file = open("hash.txt", "w+")
write_file.write("Write time: " + str(get_current_time()) + '\n')
for file in os.listdir(os.getcwd()):
if "." in file:
f_name, file_hash = get_md5_hash(file)
write_file.write(f_name + '\n')
write_file.write(file_hash + '\n')
write_file.close()
def zip_dir():
# zips directory using shutil.make_archive()
zip_name = "submission"
directory_name = "./tmp"
os.mkdi | r("./tmp")
for file in os.listdir(os.getcwd()):
try:
if ".pdf" in file:
continue
elif "acceptancetest" in file:
continue
else:
shutil.copy(file, './tmp/')
except:
continue
shutil.make_archive(zip_name, 'zip', directory_name)
shutil.rmtree('./tmp')
if __name__ == '__main__':
main() |
jorisvandenbossche/DS-python-data-analysis | notebooks/_solutions/case2_biodiversity_analysis6.py | Python | bsd-3-clause | 123 | 0.01626 | survey_data_processed[survey_data_processed.duplicated(keep=False)].sort_valu | es(["eventDate", "verbatimLocality"]) | .head(10) |
jchristman/adventofcode | 13/win.py | Python | mit | 2,809 | 0.00712 | # ----------------------------------------------------------------------------------------------------------------------
# Advent of Code - Day 13
#
# The Python One-Liner challenge:
#
# Rules:
#
# - Reading input into a variable from another file or with an assignment is acceptable and does not count against
# your total for lines.
# - Your solution must take the form of 'print INSERT_CODE_HERE'
# - Formatting your print with a format string is acceptable so long as you are only substituting ONE value (multiple
# calculations are not allowed)
# - No global variables (outside of the input variable)
#
# ----------------------------------------------------------------------------------------------------------------------
import re, itertools
_input = { name: { re.findall(r'[A-Z][a-z]+', line)[1]: int(re.findall(r'[0-9]+', line)[0]) if len(re.findall(r'lose', line)) == 0 else -int(re.findall(r'[0-9]+', line)[0]) for line in open('input', 'r').read().splitlines() if re.findall(r'[A-Z][a-z]+', line)[0] == name } for name in set(re.findall(r'[A-Z][a-z]+', open('input', 'r').read())) }
# ----------------------------------------------------------------------------------------------------------------------
# Part 1 (Find maximum happiness at a table)
#
# Find the best combination of seats by iterating all permutations and calculating the happiness with a reduce func.
# ----------------------------------------------------------------------------------------------------------------------
print max((reduce(lambda first_total, second: (first_total[0] + [second], first_total[1] + _input[first_total[0][-1]][second] + _input[second][first_total[0][-1]]), [([arrangement[-1]], 0)] + list(arrangement)) for arrangement in itertools.permutations(_input.iterkeys())), key=lambda x: x[1])
# ----------------------------------------------------------------------------------------------------------------------
# Part 2 (Find maximum happiness at a table)
#
# Add myself into the table and print the best happiness
# ----------------------------------------------------------------------------------------------------------------------
_input = { name: { re.findall(r'[A-Z][a-z]+', line)[1]: int(re.findall(r'[0-9]+', line)[0]) if len(re.findall(r'lose', line)) | == 0 else -int(re.findall(r'[0-9]+', line)[0]) for line in open('input2', 'r').read().splitlines() if re.findall(r'[A-Z][a-z]+', line)[0] == n | ame } for name in set(re.findall(r'[A-Z][a-z]+', open('input2', 'r').read())) }
print max((reduce(lambda first_total, second: (first_total[0] + [second], first_total[1] + _input[first_total[0][-1]][second] + _input[second][first_total[0][-1]]), [([arrangement[-1]], 0)] + list(arrangement)) for arrangement in itertools.permutations(_input.iterkeys())), key=lambda x: x[1])
|
ajpocus/pizzeria | order/forms.py | Python | bsd-3-clause | 1,411 | 0.019135 | from django import forms
from or | der.models import Pizza, Bread, Customer
class PizzaForm(forms.ModelForm):
class Meta:
model = Pizza
fields = ('size', 'toppings', 'crust')
widgets = {
'size': forms.RadioSelect(),
'crust': forms.RadioSelect(),
'toppings': forms.CheckboxSelectMultiple(),
}
def process(self, order):
data = self.cleaned_data
size = data['size']
crust = data['crust']
toppings = data['toppings']
p | izza = Pizza.objects.create()
pizza.size = size
pizza.crust = crust
for topping in toppings:
pizza.toppings.add(topping)
pizza.save()
order.pizzas.add(pizza)
order.save()
class BreadForm(forms.ModelForm):
class Meta:
model = Bread
fields = ('flavor',)
widgets = {
'type': forms.RadioSelect(),
}
def process(self, order):
data = self.cleaned_data
flavor = data['flavor']
bread = Bread.objects.create(flavor=flavor)
order.breads.add(bread)
order.save()
class CustomerForm(forms.ModelForm):
class Meta:
model = Customer
def process(self, order):
data = self.cleaned_data
name = str(data['name'])
number = str(data['number'])
customer = Customer.objects.create(name=name, number=number)
order.customer = customer
order.save()
|
iticus/spotlipy | settings_default.py | Python | mit | 1,141 | 0.008764 | '''
Created on Jun 20, 2016
@author: ionut
'''
import logging
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s] - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("u | rllib3").setLevel(logging.WARNING)
logging.getLogger("spotify.search").setLevel(logging.WARNING)
logging.getLogger("spotify.session").setLevel(logging.WARNING)
#Database connection - Postgres / Amazon RDS
DSN = "dbname='spotlipy' user='postgres' | host='127.0.0.1' password='password'"
#dogstarradio search URL
SEARCH_URL = 'http://www.dogstarradio.com/search_playlist.php'
JUNK_INDICATORS = ['@', '#', '.com', 'Hip Hop Nation', 'SiriusXM']
#for stations numbers and names see stations.txt
STATIONS = [
34, 44
]
#if MONTH or DATE are None we will use yesterday for searching
MONTH = None
DATE = None
#Spotify settings
SPOTIFY = {
'username': 'username',
'client_id': 'client_id',
'client_secret': 'client_secret',
'redirect_url': 'redirect_url',
'api_scopes': 'playlist-read-private playlist-modify-public playlist-modify-private'
}
|
dcramer/django-db-log | djangodblog/migrations/0004_fill_error_checksums.py | Python | bsd-3-clause | 3,710 | 0.007817 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
from djangodblog.helpers import construct_checksum
for e in orm.Error.objects.all():
orm.Error.objects.filter(pk=e.pk).update(checksum=construct_checksum(e))
for e in orm.ErrorBatch.objects.all():
orm.ErrorBatch.objects.filter(pk=e.pk).update(checksum=construct_checksum(e))
def backwards(self, orm):
"Write your backwards methods here."
models = {
'djangodblog.error': {
'Meta': {'object_name': 'Error'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dateti | me': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
' | id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'djangodblog.errorbatch': {
'Meta': {'unique_together': "(('logger', 'server_name', 'checksum'),)", 'object_name': 'ErrorBatch'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_column': "'is_resolved'"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['djangodblog']
|
tswast/google-cloud-python | asset/google/cloud/asset_v1/gapic/transports/asset_service_grpc_transport.py | Python | apache-2.0 | 7,999 | 0.001375 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
import google.api_core.operations_v1
from google.cloud.asset_v1.proto import asset_service_pb2_grpc
class AssetServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.asset.v1 AssetService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self, channel=None, credentials=None, address="cloudasset.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
ad | dress (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel i | s None:
channel = self.create_channel(
address=address,
credentials=credentials,
options={
"grpc.max_send_message_length": -1,
"grpc.max_receive_message_length": -1,
}.items(),
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"asset_service_stub": asset_service_pb2_grpc.AssetServiceStub(channel)
}
# Because this API includes a method that returns a
# long-running operation (proto: google.longrunning.Operation),
# instantiate an LRO client.
self._operations_client = google.api_core.operations_v1.OperationsClient(
channel
)
@classmethod
def create_channel(
cls, address="cloudasset.googleapis.com:443", credentials=None, **kwargs
):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def export_assets(self):
"""Return the gRPC stub for :meth:`AssetServiceClient.export_assets`.
Exports assets with time and resource types to a given Cloud Storage
location. The output format is newline-delimited JSON. This API
implements the ``google.longrunning.Operation`` API allowing you to keep
track of the export.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["asset_service_stub"].ExportAssets
@property
def batch_get_assets_history(self):
"""Return the gRPC stub for :meth:`AssetServiceClient.batch_get_assets_history`.
Batch gets the update history of assets that overlap a time window. For
RESOURCE content, this API outputs history with asset in both non-delete
or deleted status. For IAM\_POLICY content, this API outputs history
when the asset and its attached IAM POLICY both exist. This can create
gaps in the output history. If a specified asset does not exist, this
API returns an INVALID\_ARGUMENT error.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["asset_service_stub"].BatchGetAssetsHistory
@property
def create_feed(self):
"""Return the gRPC stub for :meth:`AssetServiceClient.create_feed`.
Creates a feed in a parent project/folder/organization to listen to its
asset updates.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["asset_service_stub"].CreateFeed
@property
def get_feed(self):
"""Return the gRPC stub for :meth:`AssetServiceClient.get_feed`.
Gets details about an asset feed.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["asset_service_stub"].GetFeed
@property
def list_feeds(self):
"""Return the gRPC stub for :meth:`AssetServiceClient.list_feeds`.
Lists all asset feeds in a parent project/folder/organization.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["asset_service_stub"].ListFeeds
@property
def update_feed(self):
"""Return the gRPC stub for :meth:`AssetServiceClient.update_feed`.
Updates an asset feed configuration.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["asset_service_stub"].UpdateFeed
@property
def delete_feed(self):
"""Return the gRPC stub for :meth:`AssetServiceClient.delete_feed`.
Deletes an asset feed.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["asset_service_stub"].DeleteFeed
|
axant/tgapp-mailtemplates | mailtemplates/model/ming/models.py | Python | mit | 997 | 0 | from ming.odm import FieldProperty
from ming.odm import ForeignIdProperty
from ming.odm import MappedClass
from ming.odm import RelationProperty
from mailtemplates.model import DBSession
from ming import schema as s
class MailModel(MappedClass):
class __mongometa__:
name = 'mailtemplates_mail_models'
session = DBSession
unique_indexes = [('name',)]
_id = FieldProperty(s.ObjectId)
name = FieldProperty(s.String, required=True)
| usage = FieldProperty(s.String, required=True)
template_translations = RelationProperty('TemplateTranslation')
class TemplateTranslation(MappedClass):
class __mongometa__:
nam | e = 'mailtemplates_template_translations'
session = DBSession
_id = FieldProperty(s.ObjectId)
mail_model_id = ForeignIdProperty('MailModel')
mail_model = RelationProperty('MailModel')
language = FieldProperty(s.String, required=True)
subject = FieldProperty(s.String)
body = FieldProperty(s.String)
|
petterreinholdtsen/cinelerra-hv | thirdparty/OpenCV-2.3.1/samples/python2/facedetect.py | Python | gpl-2.0 | 1,942 | 0.010299 | import numpy as np
import cv2
import cv2.cv as cv
from video import create_capture
from common import clock, draw_str
help_message = '''
USAGE: facedetect.py [--cascade <cascade_fn>] [--nested-cascade <cascade_fn>] [<video_source>]
'''
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv.CV_HAAR_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
if __name__ == '__main__':
import sys, getopt
print help_message
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
try: video_src = video_src[0]
except: video_src = 'synth:bg=../cpp/lena.jpg:noise=0.05'
args = dict(args)
cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml")
nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")
cascade = cv2.CascadeClassifier(cascade_fn)
nested = cv2.CascadeClassifier(nested_fn)
cam = create_capture(video_src)
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
t = clock()
rects = detect(gray, cascade)
vis = img.copy()
draw_rects(vis, rects, (0, 255, 0))
for x1, y1, x2, y2 in rects:
roi = gray[y1:y2, x1:x2]
vi | s_roi = vis[y1:y2, x1:x2]
subrects = detect(roi.copy(), nested)
draw_rects(vis_roi, subrects, (255, 0, 0))
dt = clock() - t
draw | _str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
cv2.imshow('facedetect', vis)
if cv2.waitKey(5) == 27:
break
|
tencentyun/cos-python-sdk | setup.py | Python | mit | 523 | 0.026769 | #!/usr/bin/env python
# coding=utf-8
import sys
if sys.version_info[0] != 2 or sys.version_info[1] < 6:
sys.exit('Sorry, only python 2.6 or 2.7 is | supported')
from setuptools import setup, find_packages
setup(
name = 'qcloud_cos',
version = '3.3',
description = 'python sdk for tencent qcloud cos',
| license = 'MIT License',
install_requires=['requests'],
author = 'chengwu',
author_email = 'chengwu@tencent.com',
packages = find_packages(),
)
|
rafalcieslinski/pgspecial | scripts/docparser.py | Python | bsd-3-clause | 1,456 | 0.001374 | from bs4 import BeautifulSoup
import sys
import os
import json
def get_secti | on(doc, section):
element = doc.find(section)
if element:
return element.get_text | ()
def get_description(doc):
text = get_section(doc, "refsect1")
if text:
lines = filter(lambda x: x.strip(), text.split("\n"))
if len(lines) > 1 and lines[0] == "Description":
return lines[0] + "\n" + lines[1]
def parse(file_name):
with open(file_name, "r") as file:
doc = BeautifulSoup(file.read(), "html.parser")
desc = get_description(doc)
synopsis = get_section(doc, "synopsis")
if desc and synopsis:
return {
"description": desc,
"synopsis": synopsis
}
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Parse postgres SGML reference files into JSON")
print("Usage:")
print("echo -n \"commands = \" > command_help.py; python parser.py ref/ | python -mjson.tool | sed 's/\"\: null/\": None/g' >> command_help.py")
print("")
sys.exit(0)
dir = sys.argv[1]
docs = {}
for file_name in os.listdir(dir):
if file_name.endswith(".sgml"):
path = dir.rstrip("/") + "/" + file_name
command = file_name[:-5].replace("_", " ")
parsed = parse(path)
if parsed:
docs[command.upper()] = parsed
print(json.dumps(docs))
|
jonathanmorgan/django_reference_data | migrations/0001_initial.py | Python | gpl-3.0 | 4,641 | 0.008403 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Reference_Domain'
db.create_table(u'django_reference_data_reference_domain', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('domain_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('domain_path', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('long_name', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('source', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('source_details', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('domain_type', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('is_news', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_multimedia', self.gf('django.db.models.fields.BooleanField')(default=False)),
('rank', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('address', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('state', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('county', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('zip_code', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('create_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_update', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'django_reference_data', ['Reference_Domain'])
def backwards(self, orm):
# Deleting model 'Reference_Domain'
db.delete_table(u'django_reference_data_reference_domain')
models = {
u'django_reference_data.reference_domain': {
'Meta': {' | object_name': 'Reference_Domain'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'bla | nk': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'domain_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'domain_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'domain_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_multimedia': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_news': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'long_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'source_details': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['django_reference_data'] |
RaveYoda/Shellmen | src/core/mixins/StylesMixin.py | Python | gpl-2.0 | 2,111 | 0.001895 | # Python imports
# Lib imports
from PyInqui | rer import style_from_dict, Token
# Application imports
class StylesMixin:
"""
The StylesMixin has style methods that get called and
return their respective objects.
"""
def default(self):
retur | n style_from_dict({
Token.Separator: '#6C6C6C',
Token.QuestionMark: '#FF9D00 bold',
# Token.Selected: '', # default
Token.Selected: '#5F819D',
Token.Pointer: '#FF9D00 bold',
Token.Instruction: '', # default
Token.Answer: '#5F819D bold',
Token.Question: '',
})
def orange(self):
return style_from_dict({
Token.Pointer: '#6C6C6C bold',
Token.QuestionMark: '#FF9D00 bold',
Token.Separator: '#FF9D00',
Token.Selected: '#FF9D00',
Token.Instruction: '', # default
Token.Answer: '#FF9D00 bold',
Token.Question: '', # default
})
def red(self):
return style_from_dict({
Token.Pointer: '#c70e0e bold',
Token.QuestionMark: '#c70e0e bold',
Token.Separator: '#c70e0e',
Token.Selected: '#c70e0e',
Token.Instruction: '', # default
Token.Answer: '#c70e0e bold',
Token.Question: '', # default
})
def purple(self):
return style_from_dict({
Token.Pointer: '#673ab7 bold',
Token.QuestionMark: '#673ab7 bold',
Token.Selected: '#673ab7',
Token.Separator: '#673ab7',
Token.Instruction: '', # default
Token.Answer: '#673ab7 bold',
Token.Question: '', # default
})
def green(self):
return style_from_dict({
Token.Pointer: '#ffde00 bold',
Token.QuestionMark: '#29a116 bold',
Token.Selected: '#29a116',
Token.Separator: '#29a116',
Token.Instruction: '', # default
Token.Answer: '#29a116 bold',
Token.Question: '', # default
})
|
garoa/concorrente.py | tkinter/tkapp2.py | Python | cc0-1.0 | 7,362 | 0.003667 | import tkinter as tk
import asyncio
import threading
from tkevents import TkEventLoop
from HardWork import *
import concurrent.futures
@asyncio.coroutine
def async(it, *args):
return (yield from asyncio.get_event_loop().run_in_executor(None, it, *args))
class Application(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master, width=800, height=400)
self.grid(sticky=tk.N+tk.S+tk.W+tk.E)
self.grid_propagate(0)
self.clear = tk.Button(self, font='Consolas 18')
self.clear["text"] = "Clear status"
self.clear["command"] = self.clear_status
self.clear.grid(column=0, row=0, sticky=tk.E+tk.W)
buttons = [
("Count words sequentially", self.do_count_sequentially),
("Count words with callbacks", self.do_count_with_callbacks),
("Count words with futures", lambda: asyncio.async(self.do_count()))
]
for row, (text, command) in enumerate(buttons, 1):
button = tk.Button(self, font='Consolas 18')
button["text"] = text
button["command"] = command
button.grid(column=0, row=row, sticky=tk.E+tk.W)
self.QUIT = tk.Button(self, text="QUIT", fg="red", font='Consolas 18', command=root.destroy)
self.QUIT.grid(column=0, row=4)
header = tk.Label(self, font='Consolas 16')
header["text"] = "Status"
header.grid(column=1, row=0, sticky=tk.W)
self.status = []
for i in range(4):
self.status.append(tk.Label(self, font='Consolas 14'))
self.status[i].grid(column=1, row=1+i, sticky=tk.W)
def clear_status(self):
for label in self.status:
label["text"] = ""
def do_count_sequentially(self):
self.clear_status()
print('Loading words on ', threading.get_ident())
words = load_words("Holmes.txt")
print("{0} words loaded".format(len(words)))
self.status[0]["text"] = "{0} words loaded".format(len(words))
print('Cleaning on ', threading.get_ident())
self.status[1]["text"] = "Cleaning..."
words = clean_words(words)
print("{0} remain after cleaning".format(len(words)))
self.status[1]["text"] = "{0} remain after cleaning".format(len(words))
print('Counting on', threading.get_ident())
self.status[2]["text"] = "Counting..."
count = count_words(words)
print("{0} distinct words after counting".format(len(count)))
self.status[2]["text"] = "{0} distinct words after counting".format(len(count))
print('Sorting on thread ', threading.get_ident())
self.status[3]["text"] = "Sorting..."
most_common = get_most_common(count, 10)
print("The ten most common words: {0}".format(', '.join(most_common)))
self.status[3]["text"] = "The ten most common words: {0}".format(', '.join(most_common))
def do_count_with_callbacks(self):
self.clear_status()
executor = concurrent.futures.ThreadPoolExecutor(5)
print('Loading words on ', threading.get_ident())
future_words = executor.submit(load_words, "Holmes.txt")
def words_loaded(future_words):
print('Words loaded completed on thread', threading.get_ident())
words = future_words.result()
def words_loaded_update_ui():
print("{0} words loaded".format(len(words)))
self.status[0]["text"] = "{0} words loaded".format(len(words))
print('Cleaning on ', threading.get_ident())
self.status[1]["text"] = "Cleaning..."
def words_cleaned(future_clean_words):
def words_cleaned_update_ui():
words = future_clean_words.result()
print("{0} remain after cleaning".format(len(words)))
self.status[1]["text"] = "{0} remain after cleaning".format(len(words))
def words_counted(future_count):
def words_counted_update_ui():
count = future_count.result()
print("{0} distinct words after counting".format(len(count)))
self.status[2]["text"] = "{0} distinct word | s after counting".format(len(count))
def words_sorted(future_most_common):
def words_sorted_update_ui():
self.status[3]["text"] = "Sorting..."
most_common = future_most_common.result()
print("The ten most common words: {0}".format(', '.join(most_common)))
self.status[3][" | text"] = "The ten most common words: {0}".format(', '.join(most_common))
print('Sorting on thread ', threading.get_ident())
self.after(0, words_sorted_update_ui)
future_most_common = executor.submit(get_most_common, count, 10)
future_most_common.add_done_callback(words_sorted)
self.after(0, words_counted_update_ui)
future_count = executor.submit(count_words, words)
future_count.add_done_callback(words_counted)
self.after(0, words_cleaned_update_ui)
future_clean_words = executor.submit(clean_words, words)
future_clean_words.add_done_callback(words_cleaned)
self.after(0, words_loaded_update_ui)
future_words.add_done_callback(words_loaded)
@asyncio.coroutine
def do_count(self):
self.clear_status()
print('Loading words on ', threading.get_ident())
words = yield from async(load_words, "Holmes.txt")
print("{0} words loaded".format(len(words)))
self.status[0]["text"] = "{0} words loaded".format(len(words))
print('Cleaning on ', threading.get_ident())
self.status[1]["text"] = "Cleaning..."
words = yield from async(clean_words, words)
print("{0} remain after cleaning".format(len(words)))
self.status[1]["text"] = "{0} remain after cleaning".format(len(words))
print('Counting on', threading.get_ident())
self.status[2]["text"] = "Counting..."
count = yield from async(count_words, words)
print("{0} distinct words after counting".format(len(count)))
self.status[2]["text"] = "{0} distinct words after counting".format(len(count))
print('Sorting on thread ', threading.get_ident())
self.status[3]["text"] = "Sorting..."
most_common = yield from async(get_most_common, count, 10)
print("The ten most common words: {0}".format(', '.join(most_common)))
self.status[3]["text"] = "The ten most common words: {0}".format(', '.join(most_common))
root = tk.Tk()
app = Application(master=root)
print('Starting on thread', threading.get_ident())
#app.mainloop()
TkEventLoop(app).mainloop()
|
hkv-consultants/bowa | bowa/tools.py | Python | gpl-3.0 | 382 | 0 | # (c) Nelen & Schuurma | ns. GPL lice | nsed, see LICENSE.rst.
from __future__ import unicode_literals
def version():
"""
return version string
"""
from pkginfo.installed import Installed
import bowa
installed = Installed(bowa)
if installed.version:
return 'Versie april 2014 (%s)' % (installed.version)
else:
return 'Versie april 2014'
|
robotframework/mabot | src/mabot/settings/defaultsettings.py | Python | apache-2.0 | 846 | 0 | # Copyright 2008 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file e | xcept in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed | to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
default_message = "Not Executed!"
ask_tags_added_to_modified_tests_at_startup = False
tags_added_to_modified_tests = []
tags_allowed_only_once = []
always_load_old_data_from_xml = False
check_simultaneous_save = False
include = []
exclude = []
|
Pierre-Sassoulas/django-survey | survey/migrations/0008_translated_name_for_models.py | Python | agpl-3.0 | 6,920 | 0.002601 | # Generated by Django 1.11.13 on 2018-06-26 16:51
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("survey", "0007_auto_20180217_1515")]
operations = [
migrations.AlterModelOptions(
name="response",
options={"verbose_name": "Set of answers to surveys", "verbose_name_plural": "Sets of answers to surveys"},
),
migrations.AlterField(
model_name="answer", name="body", field=models.TextField(blank=True, null=True, verbose_name="Content")
),
migrations.AlterField(
model_name="answer",
name="created",
| field=models.DateTimeField(auto_now_add=True, verbose_name="Creation date"),
),
migrations.AlterField(
model_name="answer",
name="question",
field=mo | dels.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="answers",
to="survey.Question",
verbose_name="Question",
),
),
migrations.AlterField(
model_name="answer",
name="response",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="answers",
to="survey.Response",
verbose_name="Response",
),
),
migrations.AlterField(
model_name="answer", name="updated", field=models.DateTimeField(auto_now=True, verbose_name="Update date")
),
migrations.AlterField(
model_name="category",
name="description",
field=models.CharField(blank=True, max_length=2000, null=True, verbose_name="Description"),
),
migrations.AlterField(
model_name="category", name="name", field=models.CharField(max_length=400, verbose_name="Name")
),
migrations.AlterField(
model_name="category",
name="order",
field=models.IntegerField(blank=True, null=True, verbose_name="Display order"),
),
migrations.AlterField(
model_name="category",
name="survey",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="categories",
to="survey.Survey",
verbose_name="Survey",
),
),
migrations.AlterField(
model_name="question",
name="category",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="questions",
to="survey.Category",
verbose_name="Category",
),
),
migrations.AlterField(
model_name="question",
name="choices",
field=models.TextField(
blank=True,
help_text="""The choices field is only used if the question type
if the question type is 'radio', 'select', or
'select multiple' provide a comma-separated list of
options for this question.""",
null=True,
verbose_name="Choices",
),
),
migrations.AlterField(model_name="question", name="order", field=models.IntegerField(verbose_name="Order")),
migrations.AlterField(
model_name="question", name="required", field=models.BooleanField(verbose_name="Required")
),
migrations.AlterField(
model_name="question",
name="survey",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="questions",
to="survey.Survey",
verbose_name="Survey",
),
),
migrations.AlterField(model_name="question", name="text", field=models.TextField(verbose_name="Text")),
migrations.AlterField(
model_name="question",
name="type",
field=models.CharField(
choices=[
("text", "text (multiple line)"),
("short-text", "short text (one line)"),
("radio", "radio"),
("select", "select"),
("select-multiple", "Select Multiple"),
("select_image", "Select Image"),
("integer", "integer"),
],
default="text",
max_length=200,
verbose_name="Type",
),
),
migrations.AlterField(
model_name="response",
name="created",
field=models.DateTimeField(auto_now_add=True, verbose_name="Creation date"),
),
migrations.AlterField(
model_name="response",
name="survey",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="responses",
to="survey.Survey",
verbose_name="Survey",
),
),
migrations.AlterField(
model_name="response", name="updated", field=models.DateTimeField(auto_now=True, verbose_name="Update date")
),
migrations.AlterField(
model_name="response",
name="user",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
verbose_name="User",
),
),
migrations.AlterField(
model_name="survey", name="description", field=models.TextField(verbose_name="Description")
),
migrations.AlterField(
model_name="survey",
name="display_by_question",
field=models.BooleanField(verbose_name="Display by question"),
),
migrations.AlterField(
model_name="survey",
name="is_published",
field=models.BooleanField(verbose_name="Users can see it and answer it"),
),
migrations.AlterField(
model_name="survey", name="name", field=models.CharField(max_length=400, verbose_name="Name")
),
migrations.AlterField(
model_name="survey",
name="need_logged_user",
field=models.BooleanField(verbose_name="Only authenticated users can see it and answer it"),
),
migrations.AlterField(
model_name="survey",
name="template",
field=models.CharField(blank=True, max_length=255, null=True, verbose_name="Template"),
),
]
|
thica/ORCA-Remote | src/ORCA/utils/Platform/generic/generic_GetIPAddressV6.py | Python | gpl-3.0 | 2,158 | 0.008341 | # -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
| the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This | program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import List
from typing import Dict
from kivy import Logger
try:
import netifaces
except Exception as ex:
Logger.error("Can't load netifaces:"+str(ex))
__all__ = ['GetIPAddressV6']
def GetIPAddressV6() -> str:
uPreferredAdapter:str = u'eth0'
uInet_Type:str = u'AF_INET6'
uRet:str = u'127.0.0.0'
aFound:List[str] = []
iInet_num:int
try:
iInet_num = getattr(netifaces, uInet_Type)
aInterfaces:List = netifaces.interfaces()
for uNetiface in aInterfaces:
dNetInfo:Dict = netifaces.ifaddresses(uNetiface)
aNetDetails:List = dNetInfo.get(iInet_num)
if aNetDetails is not None and len(aNetDetails)>0:
dNetDetails:Dict = aNetDetails[0]
uIP = dNetDetails["addr"]
if uIP != "::1":
aFound.append(uIP)
if uNetiface == uPreferredAdapter:
aFound = [uIP]
break
except Exception as e:
Logger.error("Error on GetIPAddressV6:"+str(e))
if len(aFound)>0:
uRet = aFound[-1]
# remove stuff like %eth0 that gets thrown on end of some addrs
uRet=uRet.split('%')[0]
return uRet |
nagyistoce/edx-platform | lms/djangoapps/courseware/masquerade.py | Python | agpl-3.0 | 9,747 | 0.004001 | '''
---------------------------------------- Masquerade ----------------------------------------
Allow course staff to see a student or staff view of courseware.
Which kind of view has been selected is stored in the session state.
'''
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from student.models import CourseEnrollment
from util.json_request import expect_json, JsonResponse
from opaque_keys.edx.keys import CourseKey
from xblock.fragment import Fragment
from xblock.runtime import KeyValueStore
log = logging.getLogger(__name__)
# The key used to store a user's course-level masquerade information in the Django session.
# The value is a dict from course keys to CourseMasquerade objects.
MASQUERADE_SETTINGS_KEY = 'masquerade_settings'
# The key used to store temporary XBlock field data in the Django session. This is where field
# data is stored to avoid modifying the state of the user we are masquerading as.
MASQUERADE_DATA_KEY = 'masquerade_data'
class CourseMasquerade(object):
"""
Masquerade settings for a particular course.
"""
def __init__(self, course_key, role='student', user_partition_id=None, group_id=None, user_name=None):
self.course_key = course_key
self.role = role
self.user_partition_id = user_partition_id
self.group_id = group_id
self.user_name = user_name
@require_POST
@login_required
@expect_json
def handle_ajax(request, course_key_string):
"""
Handle AJAX posts to update the current user's masquerade for the specified course.
The masquerade settings are stored in the Django session as a dict from course keys
to CourseMasquerade objects.
"""
course_key = CourseKey.from_string(course_key_string)
masquerade_settings = request.session.get(MASQUERADE_SETTINGS_KEY, {})
request_json = request.json
role = request_json.get('role', 'student')
group_id = request_json.get('group_id', None)
user_partition_id = request_json.get('user_partition_id', None) if group_id is not None else None
user_name = request_json.get('user_name', None)
if user_name:
users_in_course = CourseEnrollment.objects.users_enrolled_in(course_key)
try:
if '@' in user_name:
user_name = users_in_course.get(email=user_name).username
else:
users_in_course.get(username=user_name)
except User.DoesNotExist:
return JsonResponse({
'success': False,
'error': _(
'There is no user with the username or email address {user_name} '
'enrolled in this course.'
).format(user_name=user_name)
})
masquerade_settings[course_key] = CourseMasquerade(
course_key,
role=role,
user_partition_id=user_partition_id,
group_id=group_id,
user_name=user_name,
)
request.session[MASQUERADE_SETTINGS_KEY] = masquerade_settings
return JsonResponse({'success': True})
def setup_masquerade(request, course_key, staff_access=False, reset_masquerade_data=False):
"""
Sets up masquerading for the current user within the current request. The request's user is
updated to have a 'masquerade_settings' attribute with the dict of all masqueraded settings if
called from within a request context. The function then returns a pair (CourseMasquerade, User)
with the masquerade settings for the specified course key or None if there isn't one, and the
user we are masquerading as or request.user if masquerading as a specific user is not active.
If the reset_masquerade_data flag is set, the field data stored in the session will be cleared.
"""
if (
request.user is None or
not settings.FEATURES.get('ENABLE_MASQUERADE', False) or
not staff_access
):
return None, request.user
if reset_masquerade_data:
request.session.pop(MASQUERADE_DATA_KEY, None)
masquerade_settings = request.session.setdefault(MASQUERADE_SETTINGS_KEY, {})
# Store the masquerade settings on the user so it can be accessed without the request
request.user.masquerade_settings = masquerade_settings
course_masquerade = masquerade_settings.get(course_key, None)
masquerade_user = None
if course_masquerade and course_masquerade.user_name:
try:
masquerade_user = CourseEnrollment.objects.users_enrolled_in(course_key).get(
username=course_masquerade.user_name
)
except User.DoesNotExist:
# This can only happen if the user was unenrolled from the course since masquerading
# was enabled. We silently reset the masquerading configuration in this case.
course_masquerade = None
del masquerade_settings[course_key]
request.session.modified = True
else:
# Store the masquerading settings on the masquerade_user as well, since this user will
# be used in some places instead of request.user.
masquerade_user.masquerade_settings = request.user.masquerade_settings
masquerade_user.real_user = request.user
return course_masquerade, masquerade_user or request.user
def get_course_masquerade(user, course_key):
"""
Returns the masquerade for the current user for the specified course. If no masquerade has
been installed, then a default no-op masquerade is returned.
"""
masquerade_settings = getattr(user, 'masquerade_settings', {})
return masquerade_settings.get(course_key, None)
def get_masquerade_role(user, course_key):
"""
Returns the role that the user is | masquerading as, or None | if no masquerade is in effect.
"""
course_masquerade = get_course_masquerade(user, course_key)
return course_masquerade.role if course_masquerade else None
def is_masquerading_as_student(user, course_key):
"""
Returns true if the user is a staff member masquerading as a student.
"""
return get_masquerade_role(user, course_key) == 'student'
def is_masquerading_as_specific_student(user, course_key): # pylint: disable=invalid-name
"""
Returns whether the user is a staff member masquerading as a specific student.
"""
course_masquerade = get_course_masquerade(user, course_key)
return bool(course_masquerade and course_masquerade.user_name)
def get_masquerading_group_info(user, course_key):
"""
If the user is masquerading as belonging to a group, then this method returns
two values: the id of the group, and the id of the user partition that the group
belongs to. If the user is not masquerading as a group, then None is returned.
"""
course_masquerade = get_course_masquerade(user, course_key)
if not course_masquerade:
return None, None
return course_masquerade.group_id, course_masquerade.user_partition_id
# Sentinel object to mark deleted objects in the session cache
_DELETED_SENTINEL = object()
class MasqueradingKeyValueStore(KeyValueStore):
"""
A `KeyValueStore` to avoid affecting the user state when masquerading.
This `KeyValueStore` wraps an underlying `KeyValueStore`. Reads are forwarded to the underlying
store, but writes go to a Django session (or other dictionary-like object).
"""
def __init__(self, kvs, session):
"""
Arguments:
kvs: The KeyValueStore to wrap.
session: The Django session used to store temporary data in.
"""
self.kvs = kvs
self.session = session
self.session_data = session.setdefault(MASQUERADE_DATA_KEY, {})
def _serialize_key(self, key):
"""
Convert the key of Type KeyValueStore.Key to a string.
Keys are not JSON-serializable, so we can't use them as keys for the Django session.
The implementation is taken from cms/djangoapps/contentstore/views/session_kv_store.py.
|
endlessm/chromium-browser | chrome/android/monochrome/scripts/monochrome_apk_checker.py | Python | bsd-3-clause | 9,387 | 0.008735 | #!/usr/bin/env python2.7
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import re
import os
import posixpath
import StringIO
import sys
import subprocess
from contextlib import closing
def BuildFileMatchRegex(*file_matchers):
return re.compile('^' + '|'.join(file_matchers) + '$')
# Chrome specific files which are not in Monochrome.apk
CHROME_SPECIFIC = BuildFileMatchRegex(
r'lib/.*/libchrome\.so',
r'lib/.*/libchrome\.\d{4}\.\d{2,3}\.so', # libchrome placeholders
r'lib/.*/libchromium_android_linker\.so',
r'lib/.*/libchromeview\.so', # placeholder library
r'lib/.*/libchrome_crashpad_handler\.so',
r'lib/.*/crazy\.libchrome\.so',
r'lib/.*/crazy\.libchrome\.align',
r'lib/.*/gdbserver',
# Monochrome doesn't have any res directories whose api number is less
# than v24.
r'res/.*-v1\d/.*\.xml',
r'res/.*-v2[0-3]/.*\.xml',
r'META-INF/.*',
r'assets/metaresources.arsc',
r'assets/AndroidManifest.xml')
# WebView specific files which are not in Monochrome.apk
WEBVIEW_SPECIFIC = BuildFileMatchRegex(
r'lib/.*/libwebviewchromium\.so',
r'lib/.*/libchromium_android_linker\.so',
r'assets/webview_licenses.notice',
r'res/.*/icon_webview(.webp)?',
r'META-INF/.*',
# Monochrome doesn't have any res directories
# whose api level is less than v24.
r'res/.*-v1\d/.*\.xml',
r'res/.*-v2[0-3]/.*\.xml',
r'lib/.*/gdbserver')
# The files in Chrome are not same as those in Monochrome
CHROME_CHANGES = BuildFileMatchRegex(
r'AndroidManifest\.xml',
r'resources\.arsc',
r'classes\.dex',
r'classes2\.dex',
r'res/.*\.xml', # Resource id isn't same
r'assets/unwind_cfi_32', # Generated from apk's shared library
# All pak files except chrome_100_percent.pak are different
r'assets/resources\.pak',
r'assets/locales/am\.pak',
r'assets/locales/ar\.pak',
r'assets/locales/bg\.pak',
r'assets/locales/ca\.pak',
r'assets/locales/cs\.pak',
r'assets/locales/da\.pak',
r'assets/locales/de\.pak',
r'assets/locales/el\.pak',
r'assets/locales/en-GB\.pak',
r'assets/locales/en-US\.pak',
r'assets/locales/es-419\.pak',
r'assets/locales/es\.pak',
r'assets/locales/fa\.pak',
r'assets/locales/fi\.pak',
r'assets/locales/fil\.pak',
r'assets/locales/fr\.pak',
r'assets/locales/he\.pak',
r'assets/locales/hi\.pak',
r'assets/locales/hr\.pak',
r'assets/locales/hu\.pak',
r'assets/locales/id\.pak',
r'assets/locales/it\.pak',
r'assets/locales/ja\.pak',
r'assets/locales/ko\.pak',
r'assets/locales/lt\.pak',
r'assets/locales/lv\.pak',
r'assets/locales/nb\.pak',
r'assets/locales/nl\.pak',
r'assets/locales/pl\.pak',
r'assets/locales/pt-BR\.pak',
r'assets/locales/pt-PT\.pak',
r'assets/locales/ro\.pak',
r'assets/locales/ru\.pak',
r'assets/locales/sk\.pak',
r'assets/locales/sl\.pak',
r'assets/locales/sr\.pak',
r'assets/locales/sv\.pak',
r'assets/locales/sw\.pak',
r'assets/locales/th\.pak',
r'assets/locales/tr\.pak',
r'assets/locales/uk\.pak',
r'assets/locales/vi\.pak',
r'assets/locales/zh-CN\.pak',
r'assets/locales/zh-TW\.pak')
# The files in WebView are not same as those in Monochrome
WEBVIEW_CHANGES = BuildFileMatchRegex(
r'AndroidManifest\.xml',
r'resources\.arsc',
r'classes\.dex',
r'res/.*\.xml', # Resource id isn't same
r'assets/.*\.pak') # All pak files are not same as Monochrome
# Parse the output of unzip -lv, like
# 2384 Defl:N 807 66% 2001-01-01 00:00 2f2d9fce res/xml/privacy.xml
ZIP_ENTRY = re.compile(
"^ *[0-9]+ +\S+ +[0-9]+ +(?P<cmpr>[0-9]{1,2})% +\S+ +\S+ +"
"(?P<crc>[0-9a-fA-F]+) +(?P<name>\S+)"
)
class APKEntry:
def __init__(self, filename, crc, uncompressed):
self.filename = filename
self.CRC = crc
self.uncompressed = uncompressed
def DumpAPK(apk):
args = ['unzip', '-lv']
args.append(apk)
content = subprocess.check_output(args)
apk_entries = []
with closing(StringIO.StringIO(content)) as f:
for line in f:
match = ZIP_ENTRY.match(line)
if match:
apk_entries.append(
APKEntry(
match.group('name'), match.group('crc'),
match.group('cmpr') == 0))
return apk_entries
def VerifySameFile(monochrome_dict, apk, changes):
"""Verify apk file content matches same files in monochrome.
Verify files from apk are same as those in monochrome except files
in changes.
"""
diff = []
for a in apk:
# File may not exists due to exists_in_some_form().
m = monochrome_dict.get(a.filename)
if m and m.CRC != a.CRC and not changes.match(m.filename):
diff.append(a.filename)
if len(diff):
raise Exception("The following files are not same as Monochrome:\n %s" %
'\n'.join(diff))
def VerifyUncompressed(monochrome, apk):
"""Verify uncompressed files in apk are a subset of tho | se in monochrome.
Verify files not being compressed in apk are also uncompressed in
Monochrome APK.
"""
uncompressed = [i.filename for i in apk if i.uncompressed ]
monochrome_uncompressed = [i.filename for i in monochrome if i.uncompressed | ]
compressed = [u for u in uncompressed if u not in monochrome_uncompressed]
if len(compressed):
raise Exception("The following files are compressed in Monochrome:\n %s" %
'\n'.join(compressed))
def SuperSetOf(monochrome, apk):
"""Verify Monochrome is super set of apk."""
def exists_in_some_form(f):
if f in monochrome:
return True
if not f.startswith('res/'):
return False
name = '/' + posixpath.basename(f)
# Some resources will exists in apk but not in monochrome due to the
# difference in minSdkVersion. https://crbug.com/794438
# E.g.:
# apk could have: res/drawable/foo.png, res/drawable-v23/foo.png
# monochrome (minSdkVersion=24) would need only: res/drawable-v23/foo.png
return any(x.endswith(name) for x in monochrome)
missing_files = [f for f in apk if not exists_in_some_form(f)]
if len(missing_files):
raise Exception('The following files are missing in Monochrome:\n %s' %
'\n'.join(missing_files))
def RemoveSpecific(apk_entries, specific):
return [i for i in apk_entries
if not specific.search(i.filename) ]
def LoadPathmap(pathmap_path):
"""Load the pathmap of obfuscated resource paths.
Returns: A dict mapping from obfuscated paths to original paths or an
empty dict if passed a None |pathmap_path|.
"""
if pathmap_path is None:
return {}
pathmap = {}
with open(pathmap_path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('#') or line == '':
continue
original, renamed = line.split(' -> ')
pathmap[renamed] = original
return pathmap
def DeobfuscateFilename(obfuscated_filename, pathmap):
return pathmap.get(obfuscated_filename, obfuscated_filename)
def ParseArgs(args):
"""Parses command line options.
Returns:
An Namespace from argparse.parse_args()
"""
parser = argparse.ArgumentParser(prog='monochrome_apk_checker')
parser.add_argument(
'--monochrome-apk', required=True, help='The monochrome APK path.')
parser.add_argument(
'--monochrome-pathmap', help='The monochrome APK resources pathmap path.')
parser.add_argument('--chrome-apk',
required=True,
help='The chrome APK path.')
parser.add_argument(
'--chrome-pathmap', help='The chrome APK resources pathmap path.')
parser.add_argument('--system-webview-apk',
required=True,
help='The system webview APK path.')
parser.add_argument(
'--system-webview-pathmap',
help='The system webview APK resources pathmap path.')
return parser.parse_args(args)
def main():
options = ParseArgs(sys.argv[1:])
monochrome = DumpAPK(options.monochrome_apk)
monochrome_pathmap = LoadPathmap(options.m |
krig/booth | test/runtests.py | Python | gpl-2.0 | 1,540 | 0.004545 | #!/usr/bin/python
import os
import re
import shutil
import sys
import tempfile
import time
import unittest
from clienttests import ClientConfigTests
from sitetests import SiteConfigTests
from arbtests import ArbitratorConfigTests
if __name__ == '__main__':
if os.geteuid() == 0:
sys.stderr.write("Must be run non-root; aborting.\n")
sys.exit(1)
tmp_path = '/tmp/booth-tests'
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
test_run_path = tempfile.mkdtemp(prefix='%d.' % time.time(), dir=tmp_path)
suite = unittest.TestSuite()
testclasses = [
SiteConfigTests,
#ArbitratorConfigTests,
ClientConfigTests,
]
for testclass in testclasses:
testclass.test_run_path = test_run_path
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(testclass))
runner_args = {
#'verbosity' : 2 | ,
}
major, minor, micro, releaselevel, serial = sys.version_info
if major > 2 or (major == 2 and minor >= 7):
# New in 2.7
runner_args['buffer'] = True
runner_args['failfast'] = True
pass
# not root anymore, so safe
# needed because old instances might still use the UDP port.
os.system("killall boothd")
runner = unittest.TextTestRunner(**runner_args)
result = runner.run(suite)
if result.wasSuccessful():
sh | util.rmtree(test_run_path)
sys.exit(0)
else:
print "Left %s for debugging" % test_run_path
sys.exit(1)
|
atvcaptain/enigma2 | lib/python/Tools/CopyFiles.py | Python | gpl-2.0 | 2,263 | 0.02563 | from __future__ import print_function
from Components.Task import PythonTask, Task, Job, job_manager as JobManager
from Tools.Directories import fileExists
from enigma import eTimer
from os import path
from shutil import rmtree, copy2, move
class DeleteFolderTask(PythonTask):
def openFiles(self, fileList):
self.fileList = fileList
def work(self):
print("[DeleteFolderTask] files ", self.fileList)
errors = []
try:
rmtree(self.fileList)
except Exception as e:
errors.append(e)
if errors:
raise errors[0]
class CopyFileJob(Job):
def __init__(self, srcfile, destfile, name):
Job.__init__(self, _("Copyi | ng files"))
cmdline = 'cp -Rf "%s" "%s"' % (srcfile, destfile)
AddFileProcessTask(self, cmdline, srcfile, destfile, name)
class MoveFileJob(Job):
def __init__(self, srcfile, destfile, name):
Job.__init__(self, _("Moving files"))
cmdlin | e = 'mv -f "%s" "%s"' % (srcfile, destfile)
AddFileProcessTask(self, cmdline, srcfile, destfile, name)
class AddFileProcessTask(Task):
def __init__(self, job, cmdline, srcfile, destfile, name):
Task.__init__(self, job, name)
self.setCmdline(cmdline)
self.srcfile = srcfile
self.destfile = destfile
self.ProgressTimer = eTimer()
self.ProgressTimer.callback.append(self.ProgressUpdate)
def ProgressUpdate(self):
if self.srcsize <= 0 or not fileExists(self.destfile, 'r'):
return
self.setProgress(int((path.getsize(self.destfile)/float(self.srcsize))*100))
self.ProgressTimer.start(5000, True)
def prepare(self):
if fileExists(self.srcfile, 'r'):
self.srcsize = path.getsize(self.srcfile)
self.ProgressTimer.start(5000, True)
def afterRun(self):
self.setProgress(100)
self.ProgressTimer.stop()
def copyFiles(fileList, name):
for src, dst in fileList:
if path.isdir(src) or int(path.getsize(src))/1000/1000 > 100:
JobManager.AddJob(CopyFileJob(src, dst, name))
else:
copy2(src, dst)
def moveFiles(fileList, name):
for src, dst in fileList:
if path.isdir(src) or int(path.getsize(src))/1000/1000 > 100:
JobManager.AddJob(MoveFileJob(src, dst, name))
else:
move(src, dst)
def deleteFiles(fileList, name):
job = Job(_("Deleting files"))
task = DeleteFolderTask(job, name)
task.openFiles(fileList)
JobManager.AddJob(job)
|
hlzz/dotfiles | graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/test/process_echoer.py | Python | bsd-3-clause | 225 | 0 | """Write back all data it receives."""
import sys
data = sys.stdin.read(1)
while data:
sys.stdout.write(data)
sys.stdout.flush()
data = sys.stdi | n.read(1)
sys.stderr.write("byebye")
sys.stderr | .flush()
|
raphonic/pomodorotimer | pomodoro.py | Python | mit | 3,109 | 0.00579 | import wx
import datetime
from time import strptime
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, Date
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Engine = create_engine('sqlite:///pomodoros.db', echo=False)
Base = declarative_base()
def get_first_day_of_week(date):
day_of_week = datetime.date(*strptime(str(date), '%Y-%m-%d')[0:3]).weekday()
start = date - datetime.timedelta(days=day_of_week)
end = date + datetime.timedelta(days=6 - day_of_week)
return start,end
class PomodoroLog(Base):
__table | name__ = "pomodorologs"
id = Column(Integer, primary_key= | True)
completed_at = Column(Date, default=datetime.date.today())
def __init__(self, completed_at=None):
if completed_at is not None: self.completed_at = completed_at
@staticmethod
def get_logs():
today = Session.query(PomodoroLog).filter(PomodoroLog.completed_at == datetime.date.today()).count()
start, end = get_first_day_of_week(datetime.date.today())
this_week = Session.query(PomodoroLog).filter(PomodoroLog.completed_at.between(start, end)).count()
all_time = Session.query(PomodoroLog).count()
return [today, this_week, all_time]
Base.metadata.create_all(Engine)
SessionMaker = sessionmaker(bind=Engine)
Session = SessionMaker()
class TimeDuration(wx.Timer):
Duration = 0
def __init__(self, parent):
wx.Timer.__init__(self,parent)
self.parent = parent
self.completed = False
self.time_left = self.Duration
self.parent.Bind(wx.EVT_TIMER, self.on_update, self)
def decrement_one_sec(self):
self.time_left -= 1
def reset(self):
self.Stop()
self.time_left = self.Duration
def mark_as_complete(self):
self.completed = True
def is_complete(self):
return self.time_left == 0
def formatted_time_left(self):
hours = self.time_left / 60
mins = self.time_left % 60
fmt_str = str(hours).zfill(2) + ":" + str(mins).zfill(2)
return fmt_str
def on_update(self, event):
if self.is_complete():
self.on_completed()
else:
self.decrement_one_sec()
self.parent.timer_label.SetLabel(self.formatted_time_left())
def on_completed(self):
self.reset()
self.mark_as_complete()
class Pomodoro(TimeDuration):
Duration = 25 * 60
def on_completed(self):
TimeDuration.on_completed(self)
self.parent.break_timer.Start()
self.parent.current_timer = self.parent.break_timer
self.parent.set_defaults()
self.parent.status_bar.SetStatusText("Break Time")
self.parent.media_player.Play()
class Break(TimeDuration):
Duration = 5 * 60
def on_completed(self):
TimeDuration.on_completed(self)
self.parent.current_timer = self.parent.pomodoro_timer
self.parent.set_defaults()
self.parent.media_player.Play()
Session.add(PomodoroLog())
Session.commit()
|
GeorgiaTechDHLab/TOME | topics/migrations/0017_auto_20170630_1627.py | Python | bsd-3-clause | 916 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-30 16:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topics', '0016_auto_20170627_1602'),
]
operations = [
migrations.AddField(
model_name='articletopicrank',
name='rank',
field=models.IntegerField(default=-1),
),
migration | s.AddField(
model_name='topic',
name='rank',
field=models.IntegerField(default=-1),
),
migrations.AddField(
model_name='wordtopicrank',
name='rank',
field=models.IntegerField(default=-1),
),
migrations.AddField(
model_name='yeartopicrank',
name='rank',
field=mo | dels.IntegerField(default=-1),
),
]
|
jgraef/pyAGE | age/descriptor.py | Python | lgpl-3.0 | 4,547 | 0.007259 | # age/descriptor.py
# pyAGE - A Python implementation of the Analog Genetic Encoding
# Copyright (C) 2010 Janosch Gräf
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class Descriptor:
def __init__(self, **params):
self.alphabet = params.get("alphabet", None)
self.devices = params.get("devices", None)
self.terminal = params.get("terminal", None)
self.parameter = params.get("parameter", None)
self.possibilities = params.get("possibilities", None)
self.scoring = params.get("scoring", None)
# TODO rename scoring matrix in substitution/insert/delete matrix
self.come_alpha = params.get("come_alpha", 1.0)
# used in populations
self.elitism = params.get("elitism", 0.2)
def check_alphabet(self):
if (type(self.alphabet)!=str):
return False
if (self.alphabet==""):
return False
for l in self.alphabet:
if (self.alphabet.count(l)>1):
return False
return True
def check_devices(self):
if (type(self.devices)!=list):
return False
for d in self.devices:
return self.check_token(d)
def check_token(self, token):
if (type(token)!=str or len(token)==0):
return False
for t in token:
if (not t in self.alphabet):
return False
return True
def | check_possibilities(self):
keys = ["char_delete", "char_insert", "char_replace", "frag_delete", "frag_move", "frag_copy", "device_insert", "chromosome_delete", "chromosome_copy", "chromosome_crossover"]
if (s | elf.possibilities==None):
self.possibilities = {}
elif (type(self.possibilities)!=dict):
return False
for p in keys:
if (p not in self.possibilities):
self.possibilities[p] = 0.0
else:
try:
self.possibilities[p] = float(self.possibilities[p])
except ValueError:
return False
return True
def check_scoring(self):
if (self.scoring==None):
return True
b = len(self.alphabet)
if (type(self.scoring)!=tuple or len(self.scoring)!=b):
return False
for l in self.scoring:
if (type(l)!=tuple or len(l)!=b):
return False
for s in l:
if (type(s)!=float):
return False
def check_come_alpha(self):
if (type(self.come_alpha)!=float):
try:
self.come_alpha = float(self.come_alpha)
except ValueError:
return False
return (self.come_alpha>=0.0 and self.come_alpha<=1.0)
def check_elitism(self):
return (self.elitism>0.0 and self.elitism<=1.0)
def check(self):
# TODO raise ValueError when check goes wrong
return self.check_alphabet() \
and self.check_devices() \
and self.check_token(self.terminal) \
and self.check_token(self.parameter) \
and self.check_possibilities() \
and self.check_scoring() \
and self.check_come_alpha() \
and self.check_elitism
def __str__(self):
return self.__repr__()
def __repr__(self):
return "Descriptor(alphabet = "+repr(self.alphabet)+",\n" \
+" devices = "+repr(self.devices)+",\n" \
+" terminal = "+repr(self.terminal)+",\n" \
+" parameter = "+repr(self.parameter)+",\n" \
+" possibilities = "+repr(self.possibilities)+",\n" \
+" scoring = "+repr(self.scoring)+",\n" \
+" come_alpha = "+repr(self.come_alpha)+",\n" \
+" elitism = "+repr(self.elitism)+")"
__all__ = ["Descriptor"]
|
GerritCodeReview/gerrit | tools/maven/mvn.py | Python | apache-2.0 | 2,729 | 0.000366 | #!/usr/bin/env python3
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
from os import path, environ
from subprocess import check_output, CalledProcessError
from sys import stderr
parser = argparse.ArgumentParser()
parser.add_argument('--repository', help='maven repository id')
parser.add_argument('--url', help='maven repository url')
parser.add_argument('-o')
parser.add_argument('-a', help='action (valid actions are: install,deploy)')
parser.add_argument('-v', help='gerrit version')
parser.add_argument('-s', action='append', help='triplet of artifactId:type:path')
args = parser.parse_args()
if not args.v:
print('version is empty', file=stderr)
exit(1)
root = path.abspath(__file__)
while not path.exists(path.join(root, 'WORKSPACE')):
root = path.dirname(root)
if 'install' == args.a:
cmd = [
'mvn',
'install:install-file',
'-Dversion=%s' % args.v,
]
elif 'deploy' == ar | gs.a:
cmd = [
'mvn',
'gpg:sign-and-deploy-file',
'-Dversion=%s' % args.v,
'-DrepositoryId=%s' % args.repository,
'-Durl=%s' % args.url,
]
else:
print("unknown action -a %s" % args.a, file=stderr)
exit(1)
for spec in args.s:
artifact, packaging_type, src | = spec.split(':')
exe = cmd + [
'-DpomFile=%s' % path.join(root, 'tools', 'maven',
'%s_pom.xml' % artifact),
'-Dpackaging=%s' % packaging_type,
'-Dfile=%s' % src,
]
try:
if environ.get('VERBOSE'):
print(' '.join(exe), file=stderr)
check_output(exe)
except Exception as e:
print('%s command failed: %s\n%s' % (args.a, ' '.join(exe), e),
file=stderr)
if environ.get('VERBOSE') and isinstance(e, CalledProcessError):
print('Command output\n%s' % e.output, file=stderr)
exit(1)
out = stderr
if args.o:
out = open(args.o, 'w')
with out as fd:
if args.repository:
print('Repository: %s' % args.repository, file=fd)
if args.url:
print('URL: %s' % args.url, file=fd)
print('Version: %s' % args.v, file=fd)
|
grepory/tomboy | test/DBusSearchTest.py | Python | lgpl-2.1 | 859 | 0.023283 | #!/usr/bin/python
"""Usage : ./tomboy.py <True|False> <search-term> <search-term> ...
For Example:
./tomboy.py | True Hello Res
./tomboy.py True Hello
./tomboy.py True hlo
"""
import sys
import dbus
import gobject
import dbus.glib
# Get the D-Bus session bus
bus = | dbus.SessionBus()
# Access the Tomboy D-Bus object
obj = bus.get_object("org.gnome.Tomboy", "/org/gnome/Tomboy/RemoteControl")
# Access the Tomboy remote control interface
tomboy = dbus.Interface(obj, "org.gnome.Tomboy.RemoteControl")
def func(a):
if (len(a)<2):
print __doc__
elif (a[1][0].lower() =="t" or a[1][0].lower() == "f"):
#TODO: Fix passing of case sensitive flag (this will always look True)
for i in tomboy.SearchNotes(' '.join(a[2:]),a[1]):
print tomboy.GetNoteTitle(i)
def f(a):
print ' '.join(a[1:])
if __name__=='__main__':
sys.exit(func(sys.argv))
|
frougon/CondConfigParser | condconfigparser/__init__.py | Python | bsd-2-clause | 2,270 | 0 | # -*- coding: utf-8 -*-
# __init__.py --- Initialization of the condconfigparser package.
#
# Copyright (c) 2014, Florent Roug | on
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduc | e the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the CondConfigParser Project.
"""A configuration file parser with Python-style variables and boolean \
expressions to define conditional sections.
"""
from .version import __version__, version_info
from .exceptions import error, \
ParseError, \
InvalidUsage, \
UndefinedVariablesInAssignmentOrPredicate, \
UndefinedVariablesInAssignment, \
UndefinedVariablesInPredicate, \
InTestTypeError
from .lexer import Lexer
from .parser import Parser
from .condconfig import RawConditionalConfig
|
alejob/mdanalysis | package/MDAnalysis/topology/tpr/setting.py | Python | gpl-2.0 | 8,002 | 0 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
# TPR parser and tpr support module
# Copyright (c) 2011 Zhuyi Xue
# Released under the GNU Public Licence, v2
"""
TPRParser settings
==================
Definition of constants.
The currently read file format versions are defined in
:data:`SUPPORTED_VERSIONS`.
"""
from six.moves import range
#: Gromacs TPR file format versions that can be read by the TPRParser.
SUPPORTED_VERSIONS = (58, 73, 83, 100, 103, 110)
# Some constants
STRLEN = 4096
BIG_STRLEN = 1048576
DIM = 3
NR_RBDIHS = 6 # <gromacs-5.1-dir>/src/gromacs/topology/idef.h
NR_CBTDIHS = 6 # <gromacs-5.1-dir>/src/gromacs/topology/idef.h
NR_FOURDIHS = 4 # <gromacs-5.1-dir>/src/gromacs/topology/idef.h
egcNR = 10 # include/types/topolog.h
TPX_TAG_RELEASE = "release" # <gromacs-5.1-dir>/src/gromacs/fileio/tpxio.c
tpx_version = 103 # <gromacs-5.1-dir>/src/gromacs/fileio/tpxio.c
tpx_generation = 26 # <gromacs-5.1-dir>/src/gromacs/fileio/tpxio.c
tpxv_RestrictedBendingAndCombinedAngleTorsionPotentials = 98
#: Function types from ``<gromacs_dir>/include/types/idef.h``
(
F_BONDS, F_G96BONDS, F_MORSE, F_CUBICBONDS,
F_CONNBONDS, F_HARMONIC, F_FENEBONDS, F_TABBONDS,
F_TABBONDSNC, F_RESTRBONDS, F_ANGLES, F_G96ANGLES, F_RESTRANGLES,
F_LINEAR_ANGLES, F_CROSS_BOND_BONDS, F_CROSS_BOND_ANGLES, F_UREY_BRADLEY,
F_QUARTIC_ANGLES, F_TABANGLES, F_PDIHS, F_RBDIHS, F_RESTRDIHS, F_CBTDIHS,
F_FOURDIHS, F_IDIHS, F_PIDIHS, F_TABDIHS,
F_CMAP, F_GB12, F_GB13, F_GB14,
F_GBPOL, F_NPSOLVATION, F_LJ14, F_COUL14,
F_LJC14_Q, F_LJC_PAIRS_NB, F_LJ, F_BHAM,
F_LJ_LR, F_BHAM_LR, F_DISPCORR, F_COUL_SR,
F_COUL_LR, F_RF_EXCL, F_COUL_RECIP, F_LJ_RECIP, F_DPD,
F_POLARIZATION, F_WATER_POL, F_THOLE_POL, F_ANHARM_POL,
F_POSRES, F_FBPOSRES, F_DISRES, F_DISRESVIOL, F_ORIRES,
F_ORIRESDEV, F_ANGRES, F_ANGRESZ, F_DIHRES,
F_DIHRESVIOL, F_CONSTR, F_CONSTRNC, F_SETTLE,
F_VSITE2, F_VSITE3, F_VSITE3FD, F_VSITE3FAD,
F_VSITE3OUT, F_VSITE4FD, F_VSITE4FDN, F_VSITEN,
F_COM_PULL, F_EQM, F_EPOT, F_EKIN,
F_ETOT, F_ECONSERVED, F_TEMP, F_VTEMP_NOLONGERUSED,
F_PDISPCORR, F_PRES, F_DHDL_CON, F_DVDL,
F_DKDL, F_DVDL_COUL, F_DVDL_VDW, F_DVDL_BONDED,
F_DVDL_RESTRAINT, F_DVDL_TEMPERATURE, F_NRE) = list(range(92))
#: Function types from ``<gromacs_dir>/src/gmxlib/tpxio.c``
ftupd = [
(20, F_CUBICBONDS), (20, F_CONNBONDS), (20, F_HARMONIC), (34, F_FENEBONDS),
(43, F_TABBONDS), (43, F_TABBONDSNC), (70, F_RESTRBONDS),
(tpxv_RestrictedBendingAndCombinedAngleTorsionPotentials, F_RESTRANGLES),
(76, F_LINEAR_ANGLES), (30, F_CROSS_BOND_BONDS), (30, F_CROSS_BOND_ANGLES),
(30, F_UREY_BRADLEY), (34, F_QUARTIC_ANGLES), (43, F_TABANGLES),
(tpxv_RestrictedBendingAndCombinedAngleTorsi | onPotentials, F_RESTRDIHS),
(tpxv_RestrictedBendingAndCombinedAngleTorsionPotentials, F_CBTDIHS),
(26, F_FOURDIHS), (26, F_PIDIHS), (43, F_TABDIHS), (65, F_CMAP),
(60, F_GB12), (61, F_GB13), (61, F_GB14), (72, F_GBPOL),
(72, F_NPSOLVATION), (41, F_LJC14_Q), (41, F_LJC_PAIRS_NB),
(32, F_ | BHAM_LR), (32, F_RF_EXCL), (32, F_COUL_RECIP), (93, F_LJ_RECIP),
(46, F_DPD), (30, F_POLARIZATION), (36, F_THOLE_POL), (90, F_FBPOSRES),
(22, F_DISRESVIOL), (22, F_ORIRES), (22, F_ORIRESDEV),
(26, F_DIHRES), (26, F_DIHRESVIOL), (49, F_VSITE4FDN),
(50, F_VSITEN), (46, F_COM_PULL), (20, F_EQM),
(46, F_ECONSERVED), (69, F_VTEMP_NOLONGERUSED), (66, F_PDISPCORR),
(54, F_DHDL_CON), (76, F_ANHARM_POL), (79, F_DVDL_COUL),
(79, F_DVDL_VDW,), (79, F_DVDL_BONDED,), (79, F_DVDL_RESTRAINT),
(79, F_DVDL_TEMPERATURE),
]
#: Interaction types from ``<gromacs_dir>/gmxlib/ifunc.c``
interaction_types = [
("BONDS", "Bond", 2),
("G96BONDS", "G96Bond", 2),
("MORSE", "Morse", 2),
("CUBICBONDS", "Cubic Bonds", 2),
("CONNBONDS", "Connect Bonds", 2),
("HARMONIC", "Harmonic Pot.", 2),
("FENEBONDS", "FENE Bonds", 2),
("TABBONDS", "Tab. Bonds", 2),
("TABBONDSNC", "Tab. Bonds NC", 2),
("RESTRAINTPOT", "Restraint Pot.", 2),
("ANGLES", "Angle", 3),
("G96ANGLES", "G96Angle", 3),
("RESTRANGLES", "Restricted Angles", 3),
("LINEAR_ANGLES", "Lin. Angle", 3),
("CROSS_BOND_BOND", "Bond-Cross", 3),
("CROSS_BOND_ANGLE", "BA-Cross", 3),
("UREY_BRADLEY", "U-B", 3),
("QANGLES", "Quartic Angles", 3),
("TABANGLES", "Tab. Angles", 3),
("PDIHS", "Proper Dih.", 4),
("RBDIHS", "Ryckaert-Bell.", 4),
("RESTRDIHS", "Restricted Dih.", 4),
("CBTDIHS", "CBT Dih.", 4),
("FOURDIHS", "Fourier Dih.", 4),
("IDIHS", "Improper Dih.", 4),
("PIDIHS", "Improper Dih.", 4),
("TABDIHS", "Tab. Dih.", 4),
("CMAP", "CMAP Dih.", 5),
("GB12", "GB 1-2 Pol.", 2),
("GB13", "GB 1-3 Pol.", 2),
("GB14", "GB 1-4 Pol.", 2),
("GBPOL", "GB Polarization", None),
("NPSOLVATION", "Nonpolar Sol.", None),
("LJ14", "LJ-14", 2),
("COUL14", "Coulomb-14", None),
("LJC14_Q", "LJC-14 q", 2),
("LJC_NB", "LJC Pairs NB", 2),
("LJ_SR", "LJ (SR)", 2),
("BHAM", "Buck.ham (SR)", 2),
("LJ_LR", "LJ (LR)", None),
("BHAM_LR", "Buck.ham (LR)", None),
("DISPCORR", "Disper. corr.", None),
("COUL_SR", "Coulomb (SR)", None),
("COUL_LR", "Coulomb (LR)", None),
("RF_EXCL", "RF excl.", None),
("COUL_RECIP", "Coul. recip.", None),
("LJ_RECIP", "LJ recip.", None),
("DPD", "DPD", None),
("POLARIZATION", "Polarization", 2),
("WATERPOL", "Water Pol.", 5),
("THOLE", "Thole Pol.", 4),
("ANHARM_POL", "Anharm. Pol.", 2),
("POSRES", "Position Rest.", 1),
("FBPOSRES", "Flat-bottom posres", 1),
("DISRES", "Dis. Rest.", 2),
("DISRESVIOL", "D.R.Viol. (nm)", None),
("ORIRES", "Orient. Rest.", 2),
("ORDEV", "Ori. R. RMSD", None),
("ANGRES", "Angle Rest.", 4),
("ANGRESZ", "Angle Rest. Z", 2),
("DIHRES", "Dih. Rest.", 4),
("DIHRESVIOL", "Dih. Rest. Viol.", None),
("CONSTR", "Constraint", 2),
("CONSTRNC", "Constr. No Conn.", 2),
("SETTLE", "Settle", 3),
("VSITE2", "Virtual site 2", 3),
("VSITE3", "Virtual site 3", 4),
("VSITE3FD", "Virtual site 3fd", 4),
("VSITE3FAD", "Virtual site 3fad", 4),
("VSITE3OUT", "Virtual site 3out", 4),
("VSITE4FD", "Virtual site 4fd", 5),
("VSITE4FDN", "Virtual site 4fdn", 5),
("VSITEN", "Virtual site N", 2),
("COM_PULL", "COM Pull En.", None),
("EQM", "Quantum En.", None),
("EPOT", "Potential", None),
("EKIN", "Kinetic En.", None),
("ETOT", "Total Energy", None),
("ECONS", "Conserved En.", None),
("TEMP", "Temperature", None),
("VTEMP", "Vir. Temp. (not used)", None),
("PDISPCORR", "Pres. DC", None),
("PRES", "Pressure", None),
("DH/DL_CON", "dH/dl constr.", None),
("DV/DL", "dVremain/dl", None),
("DK/DL", "dEkin/dl", None),
("DVC/DL", "dVcoul/dl", None),
("DVV/DL", "dVvdw/dl", None),
("DVB/DL", "dVbonded/dl", None),
("DVR/DL", "dVrestraint/dl", None),
("DVT/DL", "dVtemperature/dl", None)
]
|
gnuworldman/site-analytics | src/site_analytics/wsgi.py | Python | gpl-3.0 | 223 | 0 | """WSGI config for | site_analytics project."""
import os
from django.core.wsgi import g | et_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "site_analytics.settings")
application = get_wsgi_application()
|
toranb/django-bower-registry | api/migrations/0001_initial.py | Python | mit | 1,703 | 0.007046 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Package'
db.create_table(u'api_package', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=500, db_index=True)),
('url', self.gf('django.db.models.fields.CharField')(uniqu | e=True, max_length=500)),
('created_at', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'api', ['Package'])
# Adding unique constraint on 'Package', fields ['name', 'url']
db.create_unique(u'api_package' | , ['name', 'url'])
def backwards(self, orm):
# Removing unique constraint on 'Package', fields ['name', 'url']
db.delete_unique(u'api_package', ['name', 'url'])
# Deleting model 'Package'
db.delete_table(u'api_package')
models = {
u'api.package': {
'Meta': {'unique_together': "(('name', 'url'),)", 'object_name': 'Package'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '500', 'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '500'})
}
}
complete_apps = ['api'] |
mick-d/nipype | nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py | Python | bsd-3-clause | 1,610 | 0.023602 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..brainsuite import Hemisplit
def test_Hemisplit_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputHemisphereLabelFile=dict(argstr='-l %s',
mandatory=True,
),
inputSurfaceFile=dict(argstr='-i %s',
mandatory=True,
),
outputLeftHemisphere=dict(argstr='--left %s',
genfile=True,
),
outputLeftPialHemisphere=dict(argstr='-pl %s',
genfile=True,
),
outputRightHemisphere=dict(argstr='--right %s',
genfile=True,
),
outputRightPialHemisphere=dict(argstr='-pr %s',
genfile=True,
),
pialSurfaceFile=dict(argstr='-p %s',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
timer=dict(argstr='--timer',
),
verbosity=dict(argstr='-v %d',
),
)
i | nputs = Hemisplit.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Hemisplit_outputs():
output_map = dict(outputLeftHemisphere=dict(),
outputLeftPialHemisphere=dict(),
outputRightHemisphere=dict(),
outputRightPialHemisphere=dict(),
)
outputs = Hemispli | t.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
apmoore1/semeval | lstms/EarlyStoppingLSTM.py | Python | gpl-3.0 | 2,914 | 0.002059 | from semeval import helper as helper
from semeval.lstms.LSTMModel import LSTMModel
import numpy
from keras.models import Sequential
from keras.layers import Dense, Activation, Bidirectional, LSTM, Dropout
from keras.callbacks import EarlyStopping
class EarlyStoppingLSTM(LSTMModel):
'''Model that can train an LSTM and apply the trainned model to unseen
data. Inherits from LSTMModel.
Instance Arguments:
self._word2vec_model - gensim.models.Word2Vec required as an argument to __init__
self._max_length = 0
self._model = None
public methods:
train - trains a Bi-directional LSTM with dropout and early stopping on
the texts and sentiment values given.
test - Using the trained model saved at self._model will return a list of
sentiment values given the texts in the argument of the method.
'''
def __init__(self, word2vec_model):
super().__init__(word2vec_model)
def fit(self, train_texts, sentiment_values):
'''Given a list of Strings and a list of floats (sentiments) or numpy
array of floats. It will return a trained LSTM model and `save` the model to
self._model for future use using self.test(texts).
The model converts the list of strings into list of numpy matrixs
which | has the following dimensions:
length of the longest train text broken down into tokens
by
the vector size of the word2vec mo | del given in the constructor
e.g. 21, 300 if the word2vec model vector size if 300 and the length of
the longest train text in tokens is 21.
For more details on the layers use read the source or after training
visualise using visualise_model function.
'''
super().fit()
max_length = self._set_max_length(train_texts)
vector_length = self._word2vec_model.vector_size
train_vectors = self._text2vector(train_texts)
model = Sequential()
model.add(Dropout(0.5, input_shape=(max_length, vector_length)))
# Output of this layer is of max_length by max_length * 2 dimension
# instead of max_length, vector_length
model.add(Bidirectional(LSTM(max_length, activation='softsign',
return_sequences=True)))
model.add(Dropout(0.5))
model.add(Bidirectional(LSTM(max_length, activation='softsign')))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('linear'))
model.compile(loss='mse',
optimizer='rmsprop',
metrics=['cosine_proximity'],
clipvalue=5)
early_stopping = EarlyStopping(monitor='val_loss', patience=10)
model.fit(train_vectors, sentiment_values, validation_split=0.1,
callbacks=[early_stopping] , nb_epoch=100)
return self._set_model(model)
|
leifos/tango_with_django_19 | code/tango_with_django_project/rango/forms.py | Python | apache-2.0 | 2,036 | 0.005894 | from django import forms
from rango.models import Page, Category, UserProfile
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text="Please enter the category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
# An inline class to provide additional information on the form.
class Meta:
# Provide an association between the ModelForm and a model
model = Category
fields = ('name',)
class PageForm(forms.ModelForm):
title = forms.CharField(max_length=128, help_text="Please enter the title of the page.")
url = forms.URLField(max_length=200, help_text="Please enter the URL of the page.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
def clean(self):
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
# If url is not empty and doesn't start with 'http://', prepend 'http://'.
if url and not url.startswith('http://'):
url = 'http://' + url
cleaned_data['url'] = url
return cleaned_data
| class Meta:
# Provide an association between the ModelForm and a model
model = Page
# What fields do we want to include in our form?
# This way we don't need every field in the model present.
# Some fields may allow NULL values, so we may not want to include them...
# Here, we are hiding the foreign key.
# we can either exclude the category field from the form,
exclude = ('category',)
| #or specify the fields to include (i.e. not include the category field)
#fields = ('title', 'url', 'views')
class UserProfileForm(forms.ModelForm):
website = forms.URLField(required=False)
picture = forms.ImageField(required=False)
class Meta:
model = UserProfile
exclude = ('user',) |
Mashdon/RaspAquaLight | WebServ/WebController.py | Python | mit | 484 | 0.002066 | from threading import RLock
from Model import C_Model
class C_WebCont | roller:
instance_WebController = None
locker = RLock()
@staticmethod
def getInstance():
with C_WebController.locker:
if C_WebController.instance_WebController is None:
C_WebController.instance_WebController = C_WebController()
return C_WebController.instance_WebController
def __init__(self):
self.model = C_Model. | getInstance()
|
jammie080/Twitter-Bot | twitter/user.py | Python | bsd-2-clause | 5,841 | 0.012498 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from settings import config
import scrape, logger
from datetime import datetime
class user(object):
def __init__(self,driver):
self.scrape = scrape.scrape(driver)
self.logger = logger.logger()
self.url = "https://www.twitter.com/"
def is_protected(self,user_id):
soup = self.scrape.filter_data(user_id,self.url+user_id)
is_protected = ""
try:
protected = soup.find('span',class_="Icon--protected")
protects = protected.find('span',class_="u-hiddenVisually")
if "Protected Tweets" in protects.text:
return True
except:
return False
def follow_back_score(self,user_id):
followers = self.followers(user_id)
following = self.following(user_id)
profile_pic = self.is_default_pic(user_id)
protected = self.is_protected(user_id)
last_tweet = self.last_tweeted_more_than(user_id,4)
days_ago = self.days_ago(user_id)
try:
ratio = float(followers.encode('utf-8')) / float(following.encode("utf-8"))
self.ratio = ratio
if (ratio > .60 and ratio < 1.20):
if (following >= followers or ratio < 1.20):
ratio *= 100
print "[+] Profile : Ratio : {0:000.0f}%".format(ratio)
if (profile_pic != True):
if (protected != True):
if (last_tweet != True):
print "[+] Profile : add to list \n"
self.logger.write_data(config.twitter['files']['follow-users'],user_id)
else:
print "[+] Profile : Don't follow \n"
self.logger.write_data(config.twitter['files']['dont-follow-users'],user_id)
else:
ratio *= 100
print "[+] Profile : Ratio : {0:000.0f}%".format(ratio)
print "[+] Profile : Don't follow \n"
self.logger.write_data(config.twitter['files']['dont-follow-users'],user_id)
except:
self.ratio *= 100
print "[+] Profile : Ratio : {0:000.0f}%".format(self.ratio)
print "[+] Profile : Don't follow \n"
self.logger.write_data(config.twitter['files']['dont-follow-users'],user_id)
def is_default_pic(self,user_id):
soup = self.scrape.filter_data(user_id,self.url+user_id)
profile_pic = soup.find('img',class_="ProfileAvatar-image")
if "default_profile_images" in profile_pic["src"]:
return True
else:
return False
def followers(self,user_id):
soup = self.scrape.filter_data(user_id,self.url+user_id)
try:
profile_nav = soup.find('li',class_="ProfileNav-item--followers")
profile_stats = profile_nav.find('a',class_="ProfileNav-stat")
followers = profile_stats.find('span',class_="ProfileNav-value")
if "K" in followers.text:
followers = profile_nav.find('a',{"class":"ProfileNav-stat"})['title']
followers = followers.encode('utf-8').replace(",","")
followers = followers.encode('utf-8').replace(" Followers","")
else:
followers = followers.text
followers = followers.encode('utf-8').replace(",","")
except:
followers = 0.0
return followers
def following(self,user_id):
soup = self.scrape.filter_data(user_id,self.url+user_id)
try:
profile_nav = soup.find('li',class_="ProfileNav-item--following")
profile_stats = profile_nav.find('a',class_="ProfileNav-stat")
followers = profile_stats.find('span',class_="ProfileNav-value")
if "K" in followers.text:
followers = profile_nav.find('a',{"class":"ProfileNav-stat"})['title']
followers = followers.encode('utf-8').replace(",","")
followers = followers.encode('utf-8').replace(" Following","")
else:
followers = followers.text
followers = followers.encode('utf-8').replace(",","")
except:
followers = 0.0
return followers
def last_tweet(self,user_id):
soup = self.scrape.filter_data(user_id,self.url+user_id)
last_tweet = soup.find("p",class_="tweet-text")
return last_tweet.text
def last_tweet_date(self,user_id):
soup = self.scrape.filter_data(user_id,self.url+user_id)
try:
date_time = soup.find('small',class_="time")
date = date_time.find('a',class_="tweet-timestamp")
date_a = date.find('span',class_="_timestamp")
timestamp = date_a['data-time']
except:
timestamp = 1453291262
return timestamp
def last_tweeted_more_than(self,user_id,days=3):
mytime = self.last_tweet_date(user_id)
date = datetime.fromtimestam | p(float(mytime))
if(datetime.today() - date).days > days:
return True
else:
return False
def days_since_last_tweet(self,user_id):
mytime | = self.last_tweet_date(user_id)
date = datetime.fromtimestamp(float(mytime))
today = datetime.today()
diff = today - date
days_ago = diff.days
return days_ago
def days_ago(self,user_id):
mytime = self.last_tweet_date(user_id)
date = datetime.fromtimestamp(float(mytime))
today = datetime.today()
diff = today - date
days_ago = diff.days
return days_ago |
esosn/euler | 55.py | Python | mit | 273 | 0.007326 | import time
from fu | nctions import islychrel
times = []
times.append(time.clock())
limit = 10000
count = 0
for i in range(10, limit):
if islychrel(i, 0, 50):
co | unt += 1
print(count)
times.append(time.clock())
print(times[-1] - times[-2]) |
vzhong/pystacks | pystacks/optimizer.py | Python | mit | 2,213 | 0.002711 | from theano import tensor as T, function, shared, config
import numpy as np
class Optimizer(object):
def update(self, param, grad, lr, transformer=None, grad_transformer=None):
if grad_transformer is not None:
grad = grad_transformer.transform(grad)
param_update, helper_update = self.update_(param, grad, lr)
if transformer is not None:
param, new_param = param_update
new_param = transformer.transform(new_param)
param_update = param, new_param
return [param_update] if helper_update is None else [param_update, helper_update]
def update_(self, param, grad, lr):
raise NotImplementedError
class SGD(Optimizer):
def update_(self, param, grad, lr):
r"""
.. math:: \theta := \theta - lr * \nabla_{\theta} J
"""
return (param, param - lr * grad), None
class Adagrad(Optimizer):
def update_(self, param, grad, lr):
r"""
.. math::
A := A + (\nabla_\theta J)^2
\theta := \theta - lr * \frac{\nabla_\theta J}{\sqrt A}
"""
helper = shared(value=np.zeros(param.get_value().shape, dtype=config.floatX) | )
new_he | lper = helper + grad**2
return (param, param - lr * grad / T.sqrt(1e-8 + new_helper)), (helper, new_helper)
class RMSProp(Optimizer):
def __init__(self, alpha=0.9, beta=0.1):
super(RMSProp, self).__init__()
self.alpha, self.beta = alpha, beta
def update_(self, param, grad, lr):
r"""
.. math::
A := \alpha A + \beta (\nabla_\theta J)^2
\theta := \theta - lr * \frac{\nabla_\theta J}{\sqrt A}
:param param: parameter to be updated
:param grad: gradient
:param lr: learning rate
:param alpha: cache mixing portion for the previous cache
:param beta: cache mixing portion for the new gradient
:return: updates for rmsprop
"""
helper = shared(value=np.zeros(param.get_value().shape, dtype=config.floatX))
new_helper = self.alpha * helper + self.beta * grad**2
return (param, param - lr * grad / T.sqrt(1e-8 + new_helper)), (helper, new_helper)
|
figueira/Orinoco-Urgencias | app_enfermedad/admin.py | Python | gpl-3.0 | 294 | 0.003401 | from django. | contrib import admin
from app_enfermedad.models import *
admin.site.register(ParteCuerpo)
admin.site.register(ZonaCuerpo)
admin.site.register(Aspecto)
admin.site.register(AspectoAtencion)
admin.site.register(Anomalia)
admin.site.register(Zo | naParte)
admin.site.register(ParteAspecto) |
google/makani | gs/monitor2/apps/plugins/layouts/debug_hover_layout.py | Python | apache-2.0 | 909 | 0.0033 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under th | e License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layout to monitor hover status through full telemetry."""
from makani.gs.monitor2.apps.plugins import common
from makani.gs.monitor2.apps.plugins.lay | outs import hover_template
class DebugHoverLayout(hover_template.HoverLayout):
"""The hover layout."""
_NAME = 'Debug (Hover)'
_MODE = common.FULL_COMMS_MODE
|
diogocs1/comps | web/addons/hr_payroll/wizard/__init__.py | Python | apache-2.0 | 1,159 | 0.001726 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS F | OR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employee | s
import hr_payroll_contribution_register_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
open-mmlab/mmdetection | configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py | Python | apache-2.0 | 287 | 0 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
init_cfg=dict(
type='Pretrained',
c | heckpoint='open-mmlab://res2net10 | 1_v1d_26w_4s')))
|
cawka/ns-3-dev-ndnSIM | src/lr-wpan/bindings/modulegen__gcc_LP64.py | Python | gpl-2.0 | 385,494 | 0.014791 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.lr_wpan', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyEnumeration [enumeration]
module.add_enum('LrWpanPhyEnumeration', ['IEEE_802_15_4_PHY_BUSY', 'IEEE_802_15_4_PHY_BUSY_RX', 'IEEE_802_15_4_PHY_BUSY_TX', 'IEEE_802_15_4_PHY_FORCE_TRX_OFF', 'IEEE_802_15_4_PHY_IDLE', 'IEEE_802_15_4_PHY_INVALID_PARAMETER', 'IEEE_802_15_4_PHY_RX_ON', 'IEEE_802_15_4_PHY_SUCCESS', 'IEEE_802_15_4_PHY_TRX_OFF', 'IEEE_802_15_4_PHY_TX_ON', 'IEEE_802_15_4_PHY_UNSUPPORTED_ATTRIBUTE', 'IEEE_802_15_4_PHY_READ_ONLY', 'IEEE_802_15_4_PHY_UNSPECIFIED'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanTxOption [enumeration]
module.add_enum('LrWpanTxOption', ['TX_OPTION_NONE', 'TX_OPTION_ACK', 'TX_OPTION_GTS', 'TX_OPTION_INDIRECT'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMcpsDataConfirmStatus [enumeration]
module.add_enum('LrWpanMcpsDataConfirmStatus', ['IEEE_802_15_4_SUCCESS', 'IEEE_802_15_4_TRANSACTION_OVERFLOW', 'IEEE_802_15_4_TRANSACTION_EXPIRED', 'IEEE_802_15_4_CHANNEL_ACCESS_FAILURE', 'IEEE_802_15_4_INVALID_ADDRESS', 'IEEE_802_15_4_INVALID_GTS', 'IEEE_802_15_4_NO_ACK', 'IEEE_802_15_4_COUNTER_ERROR', 'IEEE_802_15_4_FRAME_TOO_LONG', 'IEEE_802_15_4_UNAVAILABLE_KEY', 'IEEE_802_15_4_UNSUPPORTED_SECURITY', 'IEEE_802_15_4_INVALID_PARAMETER'])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPibAttributeIdentifier [enumeration]
module.add_enum('LrWpanPibAttributeIdentifier', ['phyCurrentChannel', 'phyChannelsSupported', 'phyTransmitPower', 'phyCCAMode', 'phyCurrentPage', 'phyMaxFrameDuration', 'phySHRDuration', 'phySymbolsPerOctet'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMacState [enumeration]
module.add_enum('LrWpanMacState', ['MAC_IDLE', 'MAC_CSMA', 'MAC_SENDING', 'MAC_ACK_PENDING', 'CHANNEL_ACCESS_FAILURE', 'CHANNEL_IDLE', 'SET_PHY_TX_ON'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanAssociationStatus [enumeration]
module.add_enum('LrWpanAssociationStatus', ['ASSOCIATED', 'PAN_AT_CAPACITY', 'PAN_ACCESS_DENIED', 'ASSOCIATED_WITHOUT_ADDRESS', 'DISASSOCIATED'])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyOption [enumeration]
module.add_enum('LrWpanPhyOption', ['IEEE_802_15_4_868MHZ_BPSK', 'IEEE_802_15_4_915MHZ_BPSK', 'IEEE_802_15_4_868MHZ_ASK', 'IEEE_802_15_4_915MHZ_ASK', 'IEEE_802_15_4_868MHZ_OQPSK', 'IEEE_802_15_4_915MHZ_OQPSK', 'IEEE_802_15_4_2_4GHZ_OQPSK', 'IEEE_802_15_4_INVALID_PHY_OPTION'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanAddressMode [enumeration]
module.add_enum('LrWpanAddressMode', ['NO_PANID_ADDR', 'ADDR_MODE_RESERVED', 'SHORT_ADDR', 'EXT_ADDR'])
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class]
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class]
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', imp | ort_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class | ]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower [struct]
module.add_class('LrWpanEdPower')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyDataAndSymbolRates [struct]
module.add_class('LrWpanPhyDataAndSymbolRates')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes [struct]
module.add_class('LrWpanPhyPibAttributes')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber [struct]
module.add_class('LrWpanPhyPpduHeaderSymbolNumber')
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): ns3::LrWpanSpectrumValueHelper [class]
module.add_class('LrWpanSpectrumValueHelper')
## mac16-address.h (module 'network'): ns3::Mac16Address [class]
module.add_class('Mac16Address', import_from_module='ns.network')
## mac16-address.h (module 'network'): ns3::Mac16Address [class]
root_module['ns3::Mac16Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac64-address.h (module 'network'): ns3::Mac64Address [class]
module.add_class('Mac64Address', import_from_module='ns.network')
## mac64-address.h (module 'network'): ns3::Mac64Address [class]
root_module['ns3::Mac64Address'].implicitly_converts_to(root_module['ns3::Address'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataConfirmParams [struct]
module.add_class('McpsDataConfirmParams')
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams [struct]
module.add_class('McpsDataIndicationParams')
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams [struct]
module.add_class('McpsDataRequestParams')
## net-device-contain |
medialab/reanalyse | reanalyseapp/update.py | Python | lgpl-3.0 | 11,321 | 0.02297 | # -*- coding: utf-8 -*-
#
# Import script for .csv files.
# Note: manifest a strong printaholism.
#
import sys, os, csv, re
from optparse import OptionParser
# get path of the django project
path = ("/").join( sys.path[0].split("/")[:-1] )
ppath = ("/").join( sys.path[0].split("/")[:-2] )
if path not in sys.path:
sys.path.append(path)
if ppath not in sys.path:
sys.path.append(ppath)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
# django specific import
from django.conf import settings
from reanalyseapp.models import Enquete, Texte, Tag
from datetime import datetime
from reanalyseapp.views import *
def update( textes, enquete, csvdict ):
print " %s documents found in enquete: \"%s\", id:%s" % ( textes.count(), enquete.name, enquete.id )
print
for (counter, row) in enumerate(csvdict):
# print row
if counter == 0:
print " keys: %s" % row.keys()
# normally, the second meta_documents csv file line is a field description header.
continue
print " %s." % counter
try:
texte_url = row['*file']
texte_name = row['*name']
locationgeo = re.sub( r'[^0-9\.,-]', '', row['*locationgeo'])
#researcher = row['*researcher']
article = row['*article']
if('/' in row['*date']):
dateFormat = "%d/%m/%y"
elif('_' in row['*date']) :
dateFormat = "%d_%m_%y"
elif('-' in row['*date']) :
dateFormat = "%d-%m-%y"
#print(row['*date'])
date = row['*date']#datetime.datetime.strptime(row['*date'], dateFormat) #"31-12-12"
#date = datetime.datetime.strptime(row['*date'], '%d/%m/%y').strftime(dateFormat)
except KeyError, e:
print " Field format is not valid: %s " % ( e )
break
# print row['*name']doc_name = row['*name']
try:
texte = Texte.objects.get( enquete=enquete, name=row['*name'], locationpath__regex=( ".?%s" % os.path.basename( texte_url ) ) )
except Texte.DoesNotExist, e:
print " No texte found with : \"%s\", %s " % ( texte_name, e )
foo=raw_input('\n Skip this line and go on ? [ Y / N ] : ')
if foo.upper() == 'N':
print " Script stopped !"
break
continue
except Texte.MultipleObjectsReturned, e:
print " More than one texte found with : \"%s\", %s, %s " % ( texte_name, os.path.basename( texte_url ), e )
foo=raw_input('\n Skip this line and go on ? [ Y / N ] : ')
if foo.upper() == 'N':
print " Script stopped !"
break
print " %s \"%s\": %s" % ( texte.id, texte_name, locationgeo )
# get or save tag
print " %s \"%s\": %s" % ( texte.id, texte_name, article )
try:
t = Tag.objects.get( type=Tag.ARTICLE, slug=article )
except | Tag.DoesNotExist, e:
print " %s \"%s\": creating tag [%s:%s]" % ( texte.id, texte_name, article, Tag.ARTICLE )
| t = Tag( type=Tag.ARTICLE, slug=article, name=article)
t.save()
# save location geo
texte.locationgeo = locationgeo
texte.tags.add( t )
texte.date = date
texte.save()
#try
def install( upload_path, enquete_path ) :
from imexport import importEnqueteUsingMeta
print " from upload path '%s'" % upload_path
if not os.path.exists( upload_path ):
print " upload_path folder '%s' does not exists or it is not readable !" % upload_path
print
return
print " from upload path '%s'" % enquete_path
if not os.path.exists( enquete_path ):
print " enquete_path folder '%s' does not exists or it is not readable !" % enquete_path
print
return
print " call importEnqueteUsingMeta (please follow up in log file)"
importEnqueteUsingMeta( upload_path, enquete_path )
print " installation completed."
from reanalyseapp.models import *
def testTEIparse(texte_id):
texte = Texte.objects.get(id=texte_id)
parseXmlDocument(texte)
def testEnqueteImport(foldName):
folname = foldName
upPath = settings.REANALYSEUPLOADPATH+folname+"/"
enqueterootpath=''
for f in os.listdir(upPath+"extracted/"):
if os.path.exists(upPath+"extracted/"+f+"/_meta/"):
enqueterootpath = upPath+"extracted/"+f+"/"
e = importEnqueteUsingMeta(upPath,enqueterootpath)
if(e != None):
doFiestaToEnquete(e)
else:
print('ok')
def deleteSpeakers(enquete_id):
textes = Texte.objects.filter(enquete_id=enquete_id, doctype="TEI")
for t in textes :
speakers = t.speaker_set.filter()
print(speakers)
for s in speakers :
s.delete()
#
#CheckMetaDocuments
#Check if every file exists in MetaDocuments
#return False with error dictionnary or True
#
def isMetaDocOK(upload_path, enquete_path):
from imexport import importEnqueteUsingMeta
if os.path.exists(enquete_path):
#mandatoryFields = ['*id','*name','*category','*description','*location','*date']
print("=========== PARSING META_DOCUMENTS.CSV TO CHECK IF A FILE IS MISSING IF TRUE IMPORT IS CANCELLED")
###### Parsing Documents
doc = csv.DictReader(open(enquete_path+'_meta/meta_documents.csv'),delimiter='\t')
error = False
error_dict = {}
for counter, row in enumerate(doc):
if row['*id']!='*descr':
file_location = upload_path+row['*file']
try:
open(file_location)
except IOError, e:
if(e.args[0] == 2):#no such file or directory
error = True
error_dict.update({file_location:e.args[1]})
print file_location
if(error is True):
return {'status':False, 'error_dict':error_dict}
else:
return True
def commit_enquete( enquete_id ):
#create dump of prod bdd
os.system('pg_dump -C -h 10.36.1.15 -U app app | psql -h localhost -U reanalyse reanalyse > prod_db.dump')
#create dump of dev bdd
os.system('pg_dump -Ft -b reanalyse > dev_db.dump')
#Create update sql file for production
os.system('apgdiff prod_db.dump dev_db.dump > diff.sql')
#change absolute path (/var/opt/reanalyse to /datas/www/app
#transfert
def main( argv ):
print """
WELCOME TO APP UPDATER
-------------------------------
"""
parser = OptionParser( usage="\n\n%prog --enquete=34 --csv=/home/dgu/meta_documents.csv" )
parser.add_option("-c", "--csv", dest="csvfile", help="csv file absolute path", default="" )
parser.add_option("-e", "--enquete", dest="enquete_id", help="enquete identifier", default=0 )
parser.add_option("-p", "--upload_path", dest="upload_path", help="enquete upload path", default="" ) #use with --func=install
parser.add_option("-x", "--enquete_path", dest="enquete_path", help="enquete extracted path", default="" ) #use with --func=install
parser.add_option("-f", "--function", dest="func", help="update function", default="update" )
parser.add_option("-d", "--document_id", dest="document_id", help="document id (Texte)", default="" )
parser.add_option("-D", "--directory", dest="directory", help="upload directory study", default="" )
( options, argv ) = parser.parse_args()
if options.func == "isMetaDocOK" :
print(options.func)
# install the enquete
return isMetaDocOK( options.upload_path, options.enquete_path )
if options.func == "install" :
if options. |
openstack/neutron-lib | neutron_lib/tests/unit/db/test_sqlalchemytypes.py | Python | apache-2.0 | 9,414 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from oslo_db import exception
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_utils import timeutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from neutron_lib import context
from neutron_lib.db import sqlalchemytypes
from neutron_lib.tests import _base as test_base
from neutron_lib.tests import tools
from neutron_lib.utils import net
class SqlAlchemyTypesBaseTestCase(test_fixtures.OpportunisticDBTestMixin,
test_base.BaseTestCase,
metaclass=abc.ABCMeta):
def setUp(self):
super(SqlAlchemyTypesBaseTestCase, self).setUp()
self.engine = enginefacade.writer.get_engine()
meta = sa.MetaData(bind=self.engine)
self.test_table = self._get_test_table(meta)
self.test_table.create()
self.addCleanup(meta.drop_all)
self.ctxt = context.get_admin_context()
@abc.abstractmethod
def _get_test_table(self, meta):
"""Returns a new sa.Table() object for this test case."""
def _add_row(self, **kargs):
self.engine.execute(self.test_table.insert().values(**kargs))
def _get_all(self):
rows_select = self.test_table.select()
return self.engine.execute(rows_select).fetchall()
def _update_row(self, **kargs):
self.engine.execute(self.test_table.update().values(**kargs))
def _delete_rows(self):
self.engine.execute(self.test_table.delete())
def _validate_crud(self, data_field_name, expected=None):
objs = self._get_all()
self.assertEqual(len(expected) if expected else 0, len(objs))
if expected:
for obj in objs:
name = obj['id']
self.assertEqual(expected[name], obj[data_field_name])
class IPAddressTestCase(SqlAlchemyTypesBaseTestCase):
def _get_test_table(self, meta):
return sa.Table(
'fakeipaddressmodels',
meta,
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('ip', sqlalchemytypes.IPAddress))
def _validate_ip_address(self, data_field_name, expected=None):
objs = self._get_all()
self.assertEqual(len(expected) if expected else 0, len(objs))
if expected:
for obj in objs:
name = obj['id']
self.assertEqual(expected[name], obj[data_field_name])
def _test_crud(self, ip_addresses):
ip = netaddr.IPAddress(ip_addresses[0])
self._add_row(id='fake_id', ip=ip)
self._validate_ip_address(data_field_name='ip',
expected={'fake_id': ip})
ip2 = netaddr.IPAddress(ip_addresses[1])
self._update_row(ip=ip2)
self._validate_ip_address(data_field_name='ip',
expected={'fake_id': ip2})
self._delete_rows()
self._validate_ip_address(data_field_name='ip', expected=None)
def test_crud(self):
ip_addresses = ["10.0.0.1", "10.0.0.2"]
self._test_crud(ip_addresses)
ip_addresses = ["2210::ffff:ffff:ffff:ffff",
"2120::ffff:ffff:ffff:ffff"]
self._test_crud(ip_addresses)
def test_wrong_type(self):
self.assertRaises(exception.DBError, self._add_row,
id='fake_id', ip="")
self.assertRaises(exception.DBError, self._add_row,
id='fake_id', ip="10.0.0.5")
def _test_multiple_create(self, entries):
reference = {}
for entry in entries:
ip = netaddr.IPAddress(entry['ip'])
name = entry['name']
self._add_row(id=name, ip=ip)
refer | ence[name] = ip
self._validate_ip_address(data_field_name='ip', expected=reference)
self._delete_rows()
self._validate_ip_address(data_field_name='ip', expected=None)
def test_multiple_create(self):
ip_addresses = [
{'name': 'fake_id1', 'ip': "10.0.0.5"},
{'name': 'fake_id2', 'ip': "10.0.0.1"},
{'name': 'fake_id3',
'ip': "2210::ffff:ffff:ffff:ffff"},
{'name': 'fake_id4',
| 'ip': "2120::ffff:ffff:ffff:ffff"}]
self._test_multiple_create(ip_addresses)
class CIDRTestCase(SqlAlchemyTypesBaseTestCase):
def _get_test_table(self, meta):
return sa.Table(
'fakecidrmodels',
meta,
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('cidr', sqlalchemytypes.CIDR)
)
def _get_one(self, value):
row_select = self.test_table.select().\
where(self.test_table.c.cidr == value)
return self.engine.execute(row_select).first()
def _update_row(self, key, cidr):
self.engine.execute(
self.test_table.update().values(cidr=cidr).
where(self.test_table.c.cidr == key))
def test_crud(self):
cidrs = ["10.0.0.0/24", "10.123.250.9/32", "2001:db8::/42",
"fe80::21e:67ff:fed0:56f0/64"]
for cidr_str in cidrs:
cidr = netaddr.IPNetwork(cidr_str)
self._add_row(id=uuidutils.generate_uuid(), cidr=cidr)
obj = self._get_one(cidr)
self.assertEqual(cidr, obj['cidr'])
random_cidr = netaddr.IPNetwork(tools.get_random_cidr())
self._update_row(cidr, random_cidr)
obj = self._get_one(random_cidr)
self.assertEqual(random_cidr, obj['cidr'])
objs = self._get_all()
self.assertEqual(len(cidrs), len(objs))
self._delete_rows()
objs = self._get_all()
self.assertEqual(0, len(objs))
def test_wrong_cidr(self):
wrong_cidrs = ["10.500.5.0/24", "10.0.0.1/40", "10.0.0.10.0/24",
"cidr", "", '2001:db8:5000::/64', '2001:db8::/130']
for cidr in wrong_cidrs:
self.assertRaises(exception.DBError, self._add_row,
id=uuidutils.generate_uuid(), cidr=cidr)
class MACAddressTestCase(SqlAlchemyTypesBaseTestCase):
def _get_test_table(self, meta):
return sa.Table(
'fakemacaddressmodels',
meta,
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('mac', sqlalchemytypes.MACAddress)
)
def _get_one(self, value):
row_select = self.test_table.select().\
where(self.test_table.c.mac == value)
return self.engine.execute(row_select).first()
def _get_all(self):
rows_select = self.test_table.select()
return self.engine.execute(rows_select).fetchall()
def _update_row(self, key, mac):
self.engine.execute(
self.test_table.update().values(mac=mac).
where(self.test_table.c.mac == key))
def _delete_row(self):
self.engine.execute(
self.test_table.delete())
def test_crud(self):
mac_addresses = ['FA:16:3E:00:00:01', 'FA:16:3E:00:00:02']
for mac in mac_addresses:
mac = netaddr.EUI(mac)
self._add_row(id=uuidutils.generate_uuid(), mac=mac)
obj = self._get_one(mac)
self.assertEqual(mac, obj['mac'])
random_mac = netaddr.EUI(net.get_random_mac(
['fe', '16', '3e', '00', '00', '00']))
self._update_row(mac, random_mac)
obj = self._get_one(random_mac)
self.assertEqual(random_mac, obj['mac'])
objs = s |
googleapis/python-recommender | google/cloud/recommender_v1/services/recommender/async_client.py | Python | apache-2.0 | 50,233 | 0.001553 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.recommender_v1.services.recommender import pagers
from google.cloud.recommender_v1.types import insight
from google.cloud.recommender_v1.types import recommendation
from google.cloud.recommender_v1.types import recommender_service
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import RecommenderTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import RecommenderGrpcAsyncIOTransport
from .client import RecommenderClient
class RecommenderAsyncClient:
"""Provides insights and recommendations for cloud customers for
various categories like performance optimization, cost savings,
reliability, feature discovery, etc. Insights and
recommendations are generated automatically based on analysis of
user resources, configuration and monitoring metrics.
"""
_client: RecommenderClient
DEFAULT_ENDPOINT = RecommenderClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = RecommenderClient.DEFAULT_MTLS_ENDPOINT
insight_path = staticmethod(RecommenderClient.insight_path)
parse_insight_path = staticmethod(RecommenderClient.parse_insight_path)
insight_type_path = staticmethod(RecommenderClient.insight_type_path)
parse_insight_type_path = staticmethod(RecommenderClient.parse_insight_type_path)
recommendation_path = staticmethod(RecommenderClient.recommendation_path)
parse_recommendation_path = staticmethod(
RecommenderClient.parse_recommendation_path
)
recommender_path = staticmethod(RecommenderClient.recommender_path)
parse_recommender_path = staticmethod(RecommenderClient.parse_recommender_path)
common_billing_account_path = staticmethod(
RecommenderClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
RecommenderClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(RecommenderClient.common_folder_path)
parse_common_folder_path = staticmethod(RecommenderClient.parse_common_folder_path)
common_organization_path = staticmethod(RecommenderClient.common_organization_path)
parse_common_organization_path = staticmethod(
RecommenderClient.parse_common_organization_path
)
common_project_path = staticmethod(RecommenderClient.common_project_path)
parse_common_project_path = staticmethod(
RecommenderClient.parse_common_project_path
)
common_location_path = staticmethod(RecommenderClient.common_location_path)
parse_common_location_path = staticmethod(
RecommenderClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RecommenderAsyncClient: The constructed client.
"""
return RecommenderClient.from_service_account_info.__func__(RecommenderAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *a | rgs, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments t | o pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RecommenderAsyncClient: The constructed client.
"""
return RecommenderClient.from_service_account_file.__func__(RecommenderAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return RecommenderClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> RecommenderTransport:
"""Returns the transport used by the client instance.
Returns:
RecommenderTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(RecommenderClient).get_transport_class, type(RecommenderClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, RecommenderTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the recommender client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
|
zhsso/ubunto-one | src/backends/db/__init__.py | Python | agpl-3.0 | 833 | 0 | # Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received | a copy of the GNU Affero General P | ublic License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""Database schemas and tools for accessing and testing with the database."""
|
opesci/devito | devito/passes/iet/instrument.py | Python | mit | 2,261 | 0.000885 | from devito.ir.iet import MapNodes, Section, TimedList, Transformer
from devito.mpi.routines import (HaloUpdateCall, HaloWaitCall, MPICall, MPIList,
HaloUpdateList, HaloWaitList, RemainderCall)
from devito.passes.iet.engine import iet_pass
from devito.passes.iet.orchestration import BusyWait
from devito.types import Timer
__all__ = ['instrument']
def instrument(graph, **kwargs):
track_subsections(graph, **kwargs)
# Construct a fresh Timer object
profiler = kwargs['profiler']
timer = Timer(profiler.name, list(profiler.all_sections))
instrument_sections(graph, timer=timer, **kwargs)
@iet_pass
| def track_subsections(iet, **kwargs):
"""
Add custom Sections to the `profiler`. Custom Sections include:
* MPI Calls (e.g., HaloUpdateCall and HaloUpda | teWait)
* Busy-waiting on While(lock) (e.g., from host-device orchestration)
"""
profiler = kwargs['profiler']
sregistry = kwargs['sregistry']
name_mapper = {
HaloUpdateCall: 'haloupdate',
HaloWaitCall: 'halowait',
RemainderCall: 'remainder',
HaloUpdateList: 'haloupdate',
HaloWaitList: 'halowait',
BusyWait: 'busywait'
}
mapper = {}
for NodeType in [MPIList, MPICall, BusyWait]:
for k, v in MapNodes(Section, NodeType).visit(iet).items():
for i in v:
if i in mapper or not any(issubclass(i.__class__, n)
for n in profiler.trackable_subsections):
continue
name = sregistry.make_name(prefix=name_mapper[i.__class__])
mapper[i] = Section(name, body=i, is_subsection=True)
profiler.track_subsection(k.name, name)
iet = Transformer(mapper).visit(iet)
return iet, {}
@iet_pass
def instrument_sections(iet, **kwargs):
"""
Instrument the Sections of the input IET based on `profiler.sections`.
"""
profiler = kwargs['profiler']
timer = kwargs['timer']
piet = profiler.instrument(iet, timer)
if piet is iet:
return piet, {}
headers = [TimedList._start_timer_header(), TimedList._stop_timer_header()]
return piet, {'args': timer, 'headers': headers}
|
ideascube/ideascube | ideascube/serveradmin/urls.py | Python | agpl-3.0 | 562 | 0 | from django.conf.urls import url
from . import views
app_name = 'server'
urlpatterns = [
url(r'^settings/$', views.server_info, name='settings'),
url(r'^power/$', views.power, name='power'),
url(r'^backup/$', views.backup, name='backup'),
url(r'^battery/$', views.battery, name='battery'),
url(r'^wifi/(?P<ssid>.+)?$', views.wifi, name= | 'wifi'),
url(r'^wifi_history/$', views.wifi_history, nam | e='wifi_history'),
url(r'^home_page/$', views.home_page, name='home_page'),
url(r'^languages/$', views.languages, name='languages'),
]
|
ermanno-pirotta/playground | google-python-exercises/copyspecial/copyspecial.py | Python | lgpl-3.0 | 2,723 | 0.012119 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google | -python-class/
import sys
import re
import os
import shutil
import zipfile
"""Copy Special exercise
"""
""" The goal of this program is to copy all files that are contained in
the provided list of directories to anoth | er directory or zip them to a zip file"""
# +++your code here+++
# Write functions and modify main() to call them
# a special file is one with __w+__ pattern
def find_special_files(from_dirs):
special_files = []
for directory in from_dirs:
# skip non existing directories
if os.path.exists(directory):
filenames = os.listdir(directory)
for name in filenames:
special_file_match = re.search(r'\w*__\w+__.*', name)
if special_file_match:
special_files.append(os.path.join(directory, special_file_match.group(0)))
return special_files
def copy_to_dir(from_dirs, todir):
print("copying files from " + str(from_dirs) + "to " + str(todir))
if not os.path.exists(todir):
os.mkdir(todir)
files_to_copy = find_special_files(from_dirs)
for file_to_copy in files_to_copy:
print("processing file" + filename)
shutil.copy(file_to_copy, todir)
def zip_to_file(from_dirs, tozip):
print("zipping files from " + str(from_dirs) + "to " + str(tozip))
files_to_copy = find_special_files(from_dirs)
zip_file = zipfile.ZipFile(tozip,'w')
for filename in files_to_copy:
print("processing file" + filename)
zip_file.write(filename)
zip_file.close()
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print("usage: [--todir dir][--tozip zipfile] dir [dir ...]");
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print("error: must specify one or more dirs")
sys.exit(1)
if todir:
copy_to_dir(args, todir)
elif tozip:
zip_to_file(args,tozip)
else:
print("error: must specify either --todir or --tozip arguments")
# +++your code here+++
# Call your functions
if __name__ == "__main__":
main()
|
subodhchhabra/airflow | airflow/hooks/samba_hook.py | Python | apache-2.0 | 1,807 | 0 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/ | LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS I | S" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from smbclient import SambaClient
import os
from airflow.hooks.base_hook import BaseHook
class SambaHook(BaseHook):
"""
Allows for interaction with an samba server.
"""
def __init__(self, samba_conn_id):
self.conn = self.get_connection(samba_conn_id)
def get_conn(self):
samba = SambaClient(
server=self.conn.host,
share=self.conn.schema,
username=self.conn.login,
ip=self.conn.host,
password=self.conn.password)
return samba
def push_from_local(self, destination_filepath, local_filepath):
samba = self.get_conn()
if samba.exists(destination_filepath):
if samba.isfile(destination_filepath):
samba.remove(destination_filepath)
else:
folder = os.path.dirname(destination_filepath)
if not samba.exists(folder):
samba.mkdir(folder)
samba.upload(local_filepath, destination_filepath)
|
opadron/girder | plugins/provenance/plugin_tests/provenance_test.py | Python | apache-2.0 | 17,716 | 0.000056 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013, 2014 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
from tests import base
from girder import events
from girder.constants import AccessType
from server import constants
def setUpModule():
base.enabledPlugins.append('provenance')
base.startServer()
def tearDownModule():
base.stopServer()
class ProvenanceTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
# Create some test documents with an item
admin = {
'email': 'admin@email.com',
'login': 'adminlogin',
'firstName': 'Admin',
'lastName': 'Last',
'password': 'adminpassword',
'admin': True
}
self.admin = self.model('user').createUser(**admin)
user = {
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword',
'admin': False
}
self.user = self.model('user').createUser(**user)
# Track folder, item, and setting provenance initially
self.model('setting').set(
constants.PluginSettings.PROVENANCE_RESOURCES, 'folder,setting')
coll1 = {
'name': 'Test Collection',
'description': 'test coll',
'public': True,
'creator': self.admin
}
self.coll1 = self.model('collection').createCollection(**coll1)
folder1 = {
'parent': self.coll1,
'parentType': 'collection',
'name': 'Public test folder',
'creator': self.admin
}
self.folder1 = self.model('folder').createFolder(**folder1)
self.model('folder').setUserAccess(
self.folder1, self.user, level=AccessType.WRITE, save=False)
self.model('folder').setPublic(self.folder1, True, save=True)
item1 = {
'name': 'Public object',
'creator': self.admin,
'folder': self.folder1
}
self.item1 = self.model('item').createItem(**item1)
def _checkProvenance(self, resp, item, version, user, eventType,
matches=None, fileInfo=None, resource='item'):
if resp is None:
resp = self._getProvenance(item, user, resource=resource)
self.assertStatusOk(resp)
itemProvenance = resp.json
self.assertEqual(itemProvenance['resourceId'], str(item['_id']))
provenance = itemProvenance['provenance']
self.assertEqual(provenance['eventType'], eventType)
self.assertEqual(provenance['version'], version)
self.assertEqual(str(provenance['eventUser']), str(user['_id']))
if matches:
for key in matches:
self.assertEqual(provenance[key], matches[key])
if fileInfo:
for key in fileInfo:
if isinstance(fileInfo[key], dict):
for subkey in fileInfo[key]:
self.assertEqual(provenance['file'][0][key][subkey],
fileInfo[key][subkey])
else:
self.assertEqual(provenance['file'][0][key], fileInfo[key])
def | _getProvenance(self, item, user, version=None, resource='item',
checkOk=True):
params = {}
if version is not None:
params = {'version': version}
resp = self.request(
path='/%s/%s/provenance' % (resource, item['_id']),
method='GET', user=user, type='application/json', params=params)
if checkOk:
self.assertStatusOk(resp)
return resp
def _getProvenanceAfterM | etadata(self, item, meta, user):
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=user, body=json.dumps(meta),
type='application/json')
self.assertStatusOk(resp)
return self._getProvenance(item, user)
def testProvenanceItemMetadata(self):
"""
Test item provenance endpoint with metadata and basic changes
"""
item = self.item1
user = self.user
admin = self.admin
# check that the first version of the item exists
# ensure version 1, created by admin user, with creation event
self._checkProvenance(None, item, 1, admin, 'creation')
# update meta to {x:y}
metadata1 = {'x': 'y'}
resp = self._getProvenanceAfterMetadata(item, metadata1, admin)
# ensure version 2, updated by admin user, with update event, and meta
# in provenance matches
self._checkProvenance(resp, item, 2, admin, 'update',
{'new': {'meta': metadata1}})
# update meta to {} by regular user, we have to send in the key to
# remove it but check the saved metadata against {}
metadata2 = {'x': None}
resp = self._getProvenanceAfterMetadata(item, metadata2, user)
# ensure version 3, updated by regular user, with update event, and
# meta in provenance matches
self._checkProvenance(resp, item, 3, user, 'update',
{'old': {'meta': metadata1},
'new': {'meta': {}}})
# update meta to {x:y} by regular user
metadata3 = {'x': 'y'}
resp = self._getProvenanceAfterMetadata(item, metadata3, user)
# ensure version 4, updated by regular user, with update event, and
# meta in provenance matches
self._checkProvenance(resp, item, 4, user, 'update',
{'old': {'meta': {}},
'new': {'meta': metadata3}})
# update meta to {x:z} by regular user
metadata4 = {'x': 'z'}
resp = self._getProvenanceAfterMetadata(item, metadata4, user)
# ensure version 5, updated by regular user, with update event, and
# meta in provenance matches
self._checkProvenance(resp, item, 5, user, 'update',
{'old': {'meta': metadata3},
'new': {'meta': metadata4}})
# update meta to {x:z, q:u} by regular user
metadata5 = {'x': 'z', 'q': 'u'}
resp = self._getProvenanceAfterMetadata(item, metadata5, user)
# ensure version 6, updated by regular user, with update event, and
# meta in provenance matches
self._checkProvenance(resp, item, 6, user, 'update',
{'old': {'meta': metadata4},
'new': {'meta': metadata5}})
# update meta to {q:a} by regular user
metadata6 = {'x': None, 'q': 'a'}
resp = self._getProvenanceAfterMetadata(item, metadata6, user)
# ensure version 7, updated by regular user, with update event, and
# meta in provenance matches
self._checkProvenance(resp, item, 7, user, 'update',
{'old': {'meta': metadata5},
'new': {'meta': {'q': 'a'}}})
# Change the item name and description
params = {'name': 'Renamed object', 'description': 'New description'}
resp = self.request(path='/item/%s' % item['_id'], method='PUT',
user=admin, params=params)
self.assertStatusOk(resp)
params['lo |
jkossen/imposter | examples/frontend_fcgi.py | Python | bsd-2-clause | 149 | 0 | #!/usr/bin/env python
from flup.server.fcgi import WSGISe | rver
from frontend impo | rt app
WSGIServer(app, bindAddress=app.config['FCGI_SOCKET']).run()
|
Tanych/CodeTracking | 39-Combination-Sum/solution.py | Python | mit | 677 | 0.029542 | class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
def recurhelper( | nums,res,path,target,start):
if target==0:
res.append(path)
return
if target<0:
return
if target>0:
for i in xrange(start,len(nums)):
if nums[i]<=target:
recurhelper(nums,res,path+[nums[i]],target-nums[i],i)
res=[]
candidates.sort()
recurhelper(candidates,res,[],target,0) |
return res |
mahmoud/womp | womp/stats.py | Python | gpl-3.0 | 2,081 | 0.001442 | def mean(vals):
if vals:
return sum(vals, 0.0) / len(vals)
else:
return 0.0
def trim(vals, trim=0.25):
if trim > 0.0:
trim = float(trim)
size = len(vals)
size_diff = int(size * trim)
vals = vals[size_diff:-size_diff]
return vals
def median(vals):
if not vals:
return 0
copy = sorted(vals)
size = len(copy)
if size % 2 == 1:
return copy[(size - 1) / 2]
else:
return | (copy[size / 2 - 1] + copy[size / 2]) / 2.0
def pow_diff(vals, power):
m = mean(vals)
return [(v - m) ** power f | or v in vals]
def variance(vals):
return mean(pow_diff(vals, 2))
def std_dev(vals):
return variance(vals) ** 0.5
def absolute_dev(vals, x):
return [abs(x - v) for v in vals]
def median_abs_dev(vals):
x = median(vals)
return median(absolute_dev(vals, x))
def rel_std_dev(vals):
val_mean = mean(vals)
if val_mean:
return std_dev(vals) / val_mean
else:
return 0.0
def skewness(vals):
s_dev = std_dev(vals)
if len(vals) > 1 and s_dev > 0:
return (sum(pow_diff(vals, 3)) /
float((len(vals) - 1) * (s_dev ** 3)))
else:
return 0.0
def kurtosis(vals):
s_dev = std_dev(vals)
if len(vals) > 1 and s_dev > 0:
return (sum(pow_diff(vals, 4)) /
float((len(vals) - 1) * (s_dev ** 4)))
else:
return 0.0
def dist_stats(vals):
trimmed_vals = trim(vals)
return {
'mean': mean(vals),
'mean_trimmed': mean(trimmed_vals),
'median': median(vals),
'median_abs_dev': median_abs_dev(vals),
'variance': variance(vals),
'std_dev': std_dev(vals),
'std_dev_trimmed': std_dev(trimmed_vals),
'rel_std_dev': rel_std_dev(vals),
'skewness': skewness(vals),
'skewness_trimmed': skewness(trimmed_vals),
'kurtosis': kurtosis(vals),
'kurtosis_trimmed': kurtosis(trimmed_vals),
'count': len(vals) # used to be called size; sample/population size
}
|
wheeler-microfluidics/serial_device | serial_device/threaded.py | Python | gpl-3.0 | 9,719 | 0 | import queue
import logging
import platform
import threading
import datetime as dt
import serial
import serial.threaded
import serial_device
from .or_event import OrEvent
logger = logging.getLogger(__name__)
# Flag to indicate whether queues should be polled.
# XXX Note that polling performance may vary by platform.
POLL_QUEUES = (platform.system() == 'Windows')
class EventProtocol(serial.threaded.Protocol):
def __init__(self):
self.transport = None
self.connected = threading.Event()
self.disconnected = threading.Event()
self.port = None
def connection_made(self, transport):
"""Called when reader thread is started"""
self.port = transport.serial.port
logger.debug('connection_made: `%s` `%s`', self.port, transport)
self.transport = transport
self.connected.set()
self.disconnected.clear()
def data_received(self, data):
"""Called with snippets received from the serial port"""
raise NotImplementedError
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.debug('Connection to port `%s` lost: %s', self.port,
exception)
else:
logger.debug('Connection to port `%s` closed', self.port)
self.connected.clear()
self.disconnected.set()
class KeepAliveReader(threading.Thread):
'''
Keep a serial connection alive (as much as possible).
Parameters
----------
state : dict
State dictionary to share ``protocol`` object reference.
comport : str
Name of com port to connect to.
default_timeout_s : float, optional
Default time to wait for serial operation (e.g., connect).
By default, block (i.e., no time out).
**kwargs
Keyword arguments passed to ``serial_for_url`` function, e.g.,
``baudrate``, etc.
'''
def __init__(self, protocol_class, comport, **kwargs):
super(KeepAliveReader, self).__init__()
self.daemon = True
self.protocol_class = protocol_class
self.comport = comport
self.kwargs = kwargs
self.protocol = None
self.default_timeout_s = kwargs.pop('default_timeout_s', None)
# Event to indicate serial connection has been established.
self.connected = threading.Event()
# Event to request a break from the run loop.
self.close_request = threading.Event()
# Event to indicate thread has been closed.
self.closed = threading.Event()
# Event to indicate an exception has occurred.
self.error = threading.Event()
# Event to indicate that the thread has connected to the specified port
# **at least once**.
self.has_connected = threading.Event()
@property
def alive(self):
return not self.closed.is_set()
def run(self):
# Verify requested serial port is available.
try:
if self.comport not in (serial_device
.comports(only_available=True).index):
raise NameError('Port `%s` not available. Available ports: '
'`%s`' % (self.comport,
', '.join(serial_device.comports()
| .index)))
except NameError as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
| while True:
# Wait for requested serial port to become available.
while self.comport not in (serial_device
.comports(only_available=True).index):
# Assume serial port was disconnected temporarily. Wait and
# periodically check again.
self.close_request.wait(2)
if self.close_request.is_set():
# No connection is open, so nothing to close. Just quit.
self.closed.set()
return
try:
# Try to open serial device and monitor connection status.
logger.debug('Open `%s` and monitor connection status',
self.comport)
device = serial.serial_for_url(self.comport, **self.kwargs)
except serial.SerialException as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
except Exception as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
else:
with serial.threaded.ReaderThread(device, self
.protocol_class) as protocol:
self.protocol = protocol
connected_event = OrEvent(protocol.connected,
self.close_request)
disconnected_event = OrEvent(protocol.disconnected,
self.close_request)
# Wait for connection.
connected_event.wait(None if self.has_connected.is_set()
else self.default_timeout_s)
if self.close_request.is_set():
# Quit run loop. Serial connection will be closed by
# `ReaderThread` context manager.
self.closed.set()
return
self.connected.set()
self.has_connected.set()
# Wait for disconnection.
disconnected_event.wait()
if self.close_request.is_set():
# Quit run loop.
self.closed.set()
return
self.connected.clear()
# Loop to try to reconnect to serial device.
def write(self, data, timeout_s=None):
'''
Write to serial port.
Waits for serial connection to be established before writing.
Parameters
----------
data : str or bytes
Data to write to serial port.
timeout_s : float, optional
Maximum number of seconds to wait for serial connection to be
established.
By default, block until serial connection is ready.
'''
self.connected.wait(timeout_s)
self.protocol.transport.write(data)
def request(self, response_queue, payload, timeout_s=None,
poll=POLL_QUEUES):
'''
Send
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
'''
self.connected.wait(timeout_s)
return request(self, response_queue, payload, timeout_s=timeout_s,
poll=poll)
def close(self):
self.close_request.set()
# - - context manager, returns protocol
def __enter__(self):
"""\
Enter context handler. May raise RuntimeError in case the connection
could not be created.
"""
self.start()
# Wait for protocol to connect.
ev |
alirizakeles/tendenci | tendenci/apps/invoices/reports.py | Python | gpl-3.0 | 2,731 | 0.002563 | from django.utils.translation import ugettext_lazy as _
from django.utils.html import mark_safe
from django.core.urlresolvers import reverse
from tendenci.libs.model_report.report import reports, ReportAdmin
from tendenci.libs.model_report.utils import (sum_column, us_date_format, date_label,
obj_type_format, date_from_datetime,
entity_format)
from tendenci.apps.invoices.models import Invoice
from tendenci.apps.site_settings.utils import get_setting
CURRENCY_SYMBOL = get_setting("site", "global", "currencysymbol")
def id_format(value, instance):
link = reverse('invoice.view', args=[value])
html = "<a href=\"%s\">%s</a>" % (link, value)
return mark_safe(html)
def currency_format(value, instance):
return "%s%s" % (CURRENCY_SYMBOL, | value)
class InvoiceReport(ReportAdmin):
# choose a title for your report for h1, title tag and report list
title = _('Invoice Report')
# specify your model
model = Invoice
# fields in the specified model to display in the report table
fields = [
'id',
'bill_ | to',
'create_dt',
'status_detail',
'object_type',
'entity',
'payments_credits',
'balance',
'total'
]
# fields in the model to show filters for
list_filter = ('status_detail', 'create_dt', 'object_type')
# fields in the model to order results by
list_order_by = ('create_dt', 'status_detail')
# fields to group results by
list_group_by = ('object_type', 'status_detail', 'entity', 'create_dt')
# allowed export formats. default is excel and pdf
exports = ('excel', 'pdf',)
# type = report for report only, type = chart for report and charts. default is report.
type = 'report'
# override field formats by referencing a function
override_field_formats = {
'create_dt': us_date_format,
'object_type': obj_type_format,
'id': id_format,
'balance': currency_format,
'total': currency_format,
'payments_credits': currency_format
}
# override the label for a field by referencing a function
override_field_labels = {
'create_dt': date_label
}
override_group_value = {
'create_dt': date_from_datetime,
'entity': entity_format
}
group_totals = {
'balance': sum_column,
'total': sum_column,
'payments_credits': sum_column
}
report_totals = {
'balance': sum_column,
'total': sum_column,
'payments_credits': sum_column
}
# register your report with the slug and name
reports.register('invoices', InvoiceReport)
|
USGSDenverPychron/pychron | pychron/entry/mass_spec_reverter.py | Python | apache-2.0 | 8,096 | 0.001729 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import absolute_import
from pychron.core.ui import set_qt
from six.moves import range
from six.moves import zip
set_qt()
# ============= enthought library reverts =======================
from traits.api import Any, Str
# ============= standard library imports ========================
import os
import struct
from numpy import array
# ============= local library imports ==========================
from pychron.core.helpers.filetools import pathtolist
from pychron.loggable import Loggable
from pychron.core.helpers.logger_setup import logging_setup
from pychron.mass_spec.database.massspec_database_adapter import MassSpecDatabaseAdapter
from pychron.database.isotope_database_manager import IsotopeDatabaseManager
from pychron.experiment.utilities.identifier import (
convert_identifier_to_int,
strip_runid,
)
logging_setup("ms_reverter")
class MassSpecReverter(Loggable):
"""
use to revert data from Pychron to MassSpec.
uses the MassSpecDatabasereverter to do the actual work.
This class takes a list of run ids, extracts data from
the pychron database, prepares data for use with MassSpecDatabasereverter,
then writes to the MassSpec database
"""
source = Any
destination = Any
path = Str
def do_revert(self):
# if self._connect_to_source():
if self._connect_to_destination():
self._do_revert()
def do_reimport(self):
if self._connect_to_source():
if self._connect_to_destination():
self._do_reimport()
def setup_source(self):
src = IsotopeDatabaseManager(connect=False, bind=False)
db = src.db
db.trait_set(
name="pychrondata",
kind="mysql",
host=os.environ.get("HOST"),
username="root",
password=os.environ.get("DB_PWD"),
)
self.source = src
def setup_destination(self):
dest = MassSpecDatabaseAdapter()
dest.trait_set(
name="massspecdata_crow",
kind="mysql",
username="root",
password=os.environ.get("DB_PWD"),
)
self.destination = dest
def _connect_to_source(self):
return self.source.connect()
def _connect_to_destination(self):
return self.destination.connect()
def _load_runids(self):
runids = pathtolist(self.path)
return runids
def _do_reimport(self):
rids = self._load_runids()
for rid in rids:
self._reimport_rid(rid)
def _reimport_rid(self, rid):
self.debug("========= Reimport {} =========".format(rid))
dest = self.destination
src_an = self._get_analysis_from_source(rid)
if src_an is None:
self.warning("could not find {}".format(rid))
else:
dest_an = dest.get_analysis_rid(rid)
for iso in dest_an.isotopes:
pb, pbnc = self._generate_blobs(src_an, iso.Label)
pt = iso.peak_time_series[0]
pt.PeakTimeBlob = pb
pt.PeakNeverBslnCorBlob = pbnc
dest.commit()
def _generate_blobs(self, src, isok):
dbiso = next(
(
i
for i in src.isotopes
if i.molecular_weight.name == isok and i.kind == "signal"
),
None,
)
dbiso_bs = next(
(
i
for i in src.isotopes
if i.molecular_weight.name == isok and i.kind == "baseline"
),
None,
)
xs, ys = self._unpack_data(dbiso.signal.data)
bsxs, bsys = self._unpack_data(dbiso_bs.signal.data)
bs = bsys.mean()
cys = ys - bs
ncblob = "".join([struct.pack(">f", v) for v in ys])
cblob = "".join([struct.pack(">ff", y, x) for y, x in zip(cys, xs)])
return cblob, ncblob
def _unpack_data(self, blob):
endianness = ">"
sx, sy = list(
zip(
*[
struct.unpack("{}ff".format(endianness), blob[i : i + 8])
for i in range(0, len(blob), 8)
]
)
)
return array(sx), array(sy)
def _get_analysis_from_source(self, rid):
if rid.count("-") > 1:
args = rid.split("-")
step = None
lan = "-".join(args[:-1])
aliquot = args[-1]
else:
lan, aliquot, step = strip_runid(rid)
lan = convert_identifier_to_int(lan)
db = self.source.db
dban = db.get_unique_analysis(lan, aliquot, step)
return dban
def _do_revert(self):
rids = self._load_runids()
for rid in rids:
self._revert_rid(rid)
def _revert_rid(self, rid):
"""
rid: str. typical runid e.g 12345, 12345-01, 12345-01A
if rid lacks an aliquot revert all aliquots and steps for
this rid
"""
self.debug("reverting {}".format(rid))
if "-" in rid:
# this is a specific analysis
self._revert_analysis(rid)
else:
self._revert_analyses(rid)
def _revert_analyses(self, rid):
"""
rid: str. e.g 12345
revert all analyses with this labnumber
"""
def _revert_analysis(self, rid):
"""
rid: str. e.g 12345-01 or 12345-01A
only revert this specific analysis
"""
# l,a,s = strip_runid(rid)
# db = self.source.db
dest = self.destination
# with db.session_ctx():
self.debug("========= Revert {} =========".format(rid))
dest_an = dest.get_analysis_rid(rid)
for iso in dest_an.isotopes:
isol = iso.Label
self.debug("{} reverting isotope id = {}".format(isol, iso.IsotopeID))
# fix IsotopeTable.NumCnts
n = len(iso.peak_time_series[0].PeakTimeBlob) / 8
self.debug(
"{} fixing NumCnts. current={} new={}".format(isol, iso.NumCnts, n)
)
iso.NumCnts = n
nf = len(iso.peak_time_series)
if nf > 1:
self.debug("{} deleting {} refits".format(isol, nf - 1))
# delete peak time blobs
for i, pt in enumerate(iso.peak_time_series[1:]):
self.debug(
"{} A {:02d} deleting pt series {} | ".format(
isol, i + 1, pt.Count | er
)
)
dest.delete(pt)
# delete isotope results
for i, ir in enumerate(iso.results[1:]):
self.debug(
"{} B {:02d} deleting results {}".format(
isol, i + 1, ir.Counter
)
)
dest.delete(ir)
dest.commit()
if __name__ == "__main__":
m = MassSpecReverter(path="/Users/ross/Sandbox/crow_revert.txt")
m.setup_source()
m.setup_destination()
m.do_reimport()
# m.do_revert()
# ============= EOF =============================================
#
# def _get_analyses_from_source(self, labnumber):
# db = self.source.db
# with db.session_ctx():
# pass
|
benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/aaa/aaatacacsparams.py | Python | apache-2.0 | 8,097 | 0.037545 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class aaatacacsparams(base_resource) :
""" Configuration for tacacs parameters resource. """
def __init__(self) :
self._serverip = ""
self._serverport = 0
self._authtimeout = 0
self._tacacssecret = ""
self._authorization = ""
self._accounting = ""
self._auditfailedcmds = ""
self._defaultauthenticationgroup = ""
@property
def serverip(self) :
ur"""IP address of your TACACS+ server.<br/>Minimum length = 1.
"""
try :
return self._serverip
except Exception as e:
raise e
@serverip.setter
def serverip(self, serverip) :
ur"""IP address of your TACACS+ server.<br/>Minimum length = 1
"""
try :
self._serverip = serverip
except Exception as e:
raise e
@property
def serverport(self) :
ur"""Port number on which the TACACS+ server listens for connections.<br/>Default value: 49<br/>Minimum length = 1.
"""
try :
return self._serverport
except Exception as e:
raise e
@serverport.setter
def serverport(self, serverport) :
ur"""Port number on which the TACACS+ server listens for connections.<br/>Default value: 49<br/>Minimum length = 1
"""
try :
self._serverport = serverport
except Exception as e:
raise e
@property
def authtimeout(self) :
ur"""Maximum number of seconds that the NetScaler appliance waits for a response from the TACACS+ server.<br/>Default value: 3<br/>Minimum length = 1.
"""
try :
return self._authtimeout
except Exception as e:
raise e
@authtimeout.setter
def authtimeout(self, authtimeout) :
ur"""Maximum number of seconds that the NetScaler appliance waits for a response from the TACACS+ server.<br/>Default value: 3<br/>Minimum length = 1
"""
try :
self._authtimeout = authtimeout
except Exception as e:
raise e
@property
def tacacssecret(self) :
ur"""Key shared between the TACACS+ server and clients. Required for allowing the NetScaler appliance to communicate with the TACACS+ server.<br/>Minimum length = 1.
"""
try :
return self._tacacssecret
except Exception as e:
raise e
@tacacssecret.setter
def tacacssecret(self, tacacssecret) :
ur"""Key shared between the TACACS+ server and clients. Required for allowing the NetScaler appliance to communicate with the TACACS+ server.<br/>Minimum length = 1
"""
try :
self._tacacssecret = tacacssecret
except Exception as e:
raise e
@property
def authorization(self) :
ur"""Use streaming authorization on the TACACS+ server.<br/>Possible values = ON, OFF.
"""
try :
return self._authorization
except Exception as e:
raise e
@authorization.setter
def authorization(self, authorization) :
ur"""Use streaming authorization on the TACACS+ server.<br/>Possible values = ON, OFF
"""
try :
self._authorization = authorization
except Exception as e:
raise e
@property
def accounting(self) :
ur"""Send accounting messages to the TACACS+ server.<br/>Possible values = ON, OFF.
"""
try :
return self._accounting
except Exception as e:
raise e
@accounting.setter
def accounting(self, accounting) :
ur"""Send accounting messages to the TACACS+ server.<br/>Possible values = ON, OFF
"""
try :
self._accounting = accounting
except Exception as e:
raise e
@property
def auditfailedcmds(self) :
ur"""The option for sending accounting messages to the TACACS+ server.<br/>Possible values = ON, OFF.
""" |
try :
return self._auditfailedcmds
except | Exception as e:
raise e
@auditfailedcmds.setter
def auditfailedcmds(self, auditfailedcmds) :
ur"""The option for sending accounting messages to the TACACS+ server.<br/>Possible values = ON, OFF
"""
try :
self._auditfailedcmds = auditfailedcmds
except Exception as e:
raise e
@property
def defaultauthenticationgroup(self) :
ur"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.<br/>Maximum length = 64.
"""
try :
return self._defaultauthenticationgroup
except Exception as e:
raise e
@defaultauthenticationgroup.setter
def defaultauthenticationgroup(self, defaultauthenticationgroup) :
ur"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.<br/>Maximum length = 64
"""
try :
self._defaultauthenticationgroup = defaultauthenticationgroup
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(aaatacacsparams_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.aaatacacsparams
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update aaatacacsparams.
"""
try :
if type(resource) is not list :
updateresource = aaatacacsparams()
updateresource.serverip = resource.serverip
updateresource.serverport = resource.serverport
updateresource.authtimeout = resource.authtimeout
updateresource.tacacssecret = resource.tacacssecret
updateresource.authorization = resource.authorization
updateresource.accounting = resource.accounting
updateresource.auditfailedcmds = resource.auditfailedcmds
updateresource.defaultauthenticationgroup = resource.defaultauthenticationgroup
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of aaatacacsparams resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = aaatacacsparams()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the aaatacacsparams resources that are configured on netscaler.
"""
try :
if not name :
obj = aaatacacsparams()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Auditfailedcmds:
ON = "ON"
OFF = "OFF"
class Authorization:
ON = "ON"
OFF = "OFF"
class Accounting:
ON = "ON"
OFF = "OFF"
class aaatacacsparams_response(base_response) :
def __init__(self, length=1) :
self.aaatacacsparams = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.aaatacacsparams = [aaatacacsparams() for _ in range(length)]
|
partofthething/home-assistant | homeassistant/components/aemet/abstract_aemet_sensor.py | Python | apache-2.0 | 1,752 | 0 | """Abstraction form AEMET OpenData sensors."""
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ATTRIBUTION, SENSOR_DEVICE_CLASS, SENSOR_NAME, SENSOR_UNIT
from .weather_update_coordinator import WeatherUpdateCoordinator
class AbstractAemetSensor(CoordinatorEntity):
"""Abstract class for an AEMET OpenData sensor."""
def __init__(
self,
name,
unique_id,
sensor_type,
sensor_configuration,
coordinator: WeatherUpdateCoordinator,
):
"""Initialize the sensor."""
super().__init__(coordinator)
self._name = name
self._unique_id = unique_id
self._sensor_type = sensor_type
self._sensor_name = sensor_configuration[SENSOR_NAME]
self._unit_of_measurement = sensor_configuration.get(SENSOR_UNIT)
self._device_class = sensor_configuration.get(SENSOR_DEVICE_CLAS | S)
@propert | y
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self._sensor_name}"
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def device_class(self):
"""Return the device_class."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
|
tvieira/did | examples/mr.bob/hooks.py | Python | gpl-2.0 | 706 | 0 | #!/usr/bin/env python
# coding: utf-8
# Author: "Chris Ward" <cward@redhat.com>
'''
Mr.Bob Hooks
src: http://mrbob.readthedocs.org/en/latest/api.html#module-mrbob.hooks
'''
def pre_re | nder(configurator):
pass
def | post_render(configurator):
# remove unnecessary __init__.py, hooks.py
pass
def pre_ask_question(configurator, question):
pass
def post_ask_question(configurator, question, answer):
pass
def set_name_email(configurator, question, answer):
'''
prepare "Full Name" <email@eg.com>" string
'''
name = configurator.variables['author.name']
configurator.variables['author.name_email'] = '"{0}" <{1}>'.format(
name, answer)
return answer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.