repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
markdrago/caboose
|
src/test/files/file_matcher_glob_tests.py
|
Python
|
mit
| 1,146
| 0.002618
|
from nose.tools import *
from unittest import TestCase
import os
from shutil import rmtree
from tempfile import mkdtemp
from fnmatch import fnmatch
from files.file_matcher_glob import FileMatcherGlob
class FileMatcherGlobTests(TestCase):
def setUp(self):
self.directory = mkdtemp('-caboose-file-matcher-glob-tests')
def tearDown(self):
rmtree(self.directory)
def test_file_matcher_matches_against_glob(self):
self.file_matcher = FileMatcherGlob("*.java")
eq_(True, self.file_matcher.match("hello.java"))
eq_(False, self.file_matcher.match("hello.java2"))
def test_file_matcher_matches_against_unicode_glob(self):
self.file_matcher = FileMatcherGlob(u"*.java")
eq_(True, self.file_matcher.match("hello.java"))
eq_(False, self.file_matcher.match("hello.java2"))
def test_glob_matcher_handles_lis
|
t_of_globs(self):
self.file_matcher = FileMatcherGlob(["*.one", "*.two"])
eq_(True, self.file_matcher.match("hello.one"))
eq_(True, self.file_matcher.match("hello.
|
two"))
eq_(False, self.file_matcher.match("hello.three"))
|
kytos/kytos
|
tests/unit/test_core/test_buffers.py
|
Python
|
mit
| 3,932
| 0
|
"""Test kytos.core.buffers module."""
import asyncio
from unittest import TestCase
from unittest.mock import MagicMock, patch
from kytos.core.buffers import KytosBuffers, KytosEventBuffer
# pylint: disable=protected-access
class TestKytosEventBuffer(TestCase):
"""KytosEventBuffer tests."""
def setUp(self):
"""Instantiate a KytosEventBuffer."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.kytos_event_buffer = KytosEventBuffer('name', loop=self.loop)
@staticmethod
def create_event_mock(name='any'):
"""Create a new event mock."""
event = MagicMock()
event.name = name
return event
def test_put_get(self):
"""Test put and get methods."""
event = self.create_event_mock()
self.kytos_event_buffer.put(event)
queue_event = self.kytos_event_buffer.get()
self.assertEqual(queue_event, event)
def test_put__shutdown(self):
"""Test put method to shutdown event."""
event = self.create_event_mock('kytos/core.shutdown')
self.kytos_event_buffer.put(event)
self.assertTrue(self.kytos_event_buffer._reject_new_events)
def test_aput(self):
"""Test aput async method."""
event = MagicMock()
event.name = 'kytos/core.shutdown'
self.loop.run_until_complete(self.kytos_event_buffer.aput(event))
self.assertTrue(self.kytos_event_buffer._reject_new_events)
def test_aget(self):
"""Test aget async method."""
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync_q.put(event)
expected = self.loop.run_until_complete(self.kytos_event_buffer.aget())
self.assertEqual(event, expected)
@patch('janus._SyncQueueProxy.task_done')
def test_task_done(self, mock_task_done):
"""Test task_done method."""
self.kytos_event_buffer.task_done()
mock_task_done.assert_called()
@patch('janus._SyncQueueProxy.join')
def test_join(self, mock_join):
"""Test join method."""
self.kytos_event_buffer.join()
mock_join.assert_called()
def test_qsize(self):
"""Test qsize method to empty and with one event in query."""
qsize_1 = self.kytos_event_buffer.qsize()
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync
|
_q.put(event)
qsize_2 = self.kytos_event_buffer.qsize()
self.as
|
sertEqual(qsize_1, 0)
self.assertEqual(qsize_2, 1)
def test_empty(self):
"""Test empty method to empty and with one event in query."""
empty_1 = self.kytos_event_buffer.empty()
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync_q.put(event)
empty_2 = self.kytos_event_buffer.empty()
self.assertTrue(empty_1)
self.assertFalse(empty_2)
@patch('janus._SyncQueueProxy.full')
def test_full(self, mock_full):
"""Test full method to full and not full query."""
mock_full.side_effect = [False, True]
full_1 = self.kytos_event_buffer.full()
full_2 = self.kytos_event_buffer.full()
self.assertFalse(full_1)
self.assertTrue(full_2)
class TestKytosBuffers(TestCase):
"""KytosBuffers tests."""
def setUp(self):
"""Instantiate a KytosBuffers."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.kytos_buffers = KytosBuffers(loop=self.loop)
def test_send_stop_signal(self):
"""Test send_stop_signal method."""
self.kytos_buffers.send_stop_signal()
self.assertTrue(self.kytos_buffers.raw._reject_new_events)
self.assertTrue(self.kytos_buffers.msg_in._reject_new_events)
self.assertTrue(self.kytos_buffers.msg_out._reject_new_events)
self.assertTrue(self.kytos_buffers.app._reject_new_events)
|
lahwaacz/python-wikeddiff
|
WikEdDiff/__init__.py
|
Python
|
gpl-3.0
| 125
| 0
|
#! /usr/bin/env python3
from .config import *
f
|
rom .diff import *
from .HtmlFormatter import *
from .AnsiFormatter
|
import *
|
marhoy/nrk-download
|
tests/test_utils.py
|
Python
|
gpl-3.0
| 721
| 0
|
import datetime
import nrkdownload.utils
def test_valid_filename(string=r":blah/bl:ah.ext"):
filename = nrkdownload.utils.valid_filename(string)
assert filename == "blahblah.ext"
def te
|
st_parse_duration(string="PT3H12M41.6S"):
# PT28M39S : 28m39s
# PT3H12M41.6S : 3h12m41.6s
duration = nrkdownload.utils.parse_duration(string)
assert duration == datetime.timedelta(hours=3, minutes=12, seconds=41.6)
duration = nrkdownload.utils.parse_duration("")
assert duration == datetime.timedelta()
# duration = nrkdownload.utils.parse_datetime('not_a_duration')
# assert duration == datetime.timedelta(
|
)
def test_classmethod():
c = nrkdownload.utils.ClassProperty()
assert c
|
jevinw/rec_utilities
|
babel_util/parsers/aminer_test.py
|
Python
|
agpl-3.0
| 350
| 0.005714
|
#!/u
|
sr/bin/env python
import unittest
from aminer import AMinerParser
class AMinerParserTest(unittest.TestCase)
|
:
SINGLE_TEST_FILE = "./aminer_single.txt"
def setUp(self):
self.single_test = open(self.SINGLE_TEST_FILE, "r")
def test_single_parse(self):
p = AMinerParser()
if __name__ == "__main__":
unittest.main()
|
OriHoch/Open-Knesset
|
simple/parsers/parse_laws.py
|
Python
|
bsd-3-clause
| 21,686
| 0.003465
|
# encoding: utf-8
import datetime
import logging
import os
import re
import urllib
import urllib2
from HTMLParser import HTMLParseError
f
|
rom urlparse import urlparse
from BeautifulSoup import BeautifulSoup, Comment, NavigableString
from django.contrib.contenttypes.models import ContentType
from django.core.files.base import ContentFile
import parse_knesset_bill_pdf
from knesset.utils import send_chat_notification
from laws.models import Bill, Law, GovProposal
from links.models import Link, LinkedFile
from mks.models import Knesset
from simple.constants import PRIVATE_LAWS_URL, KNESSET_LAWS_URL, GOV
|
_LAWS_URL
from simple.government_bills.parse_government_bill_pdf import GovProposalParser
from simple.parsers.utils import laws_parser_utils
from simple.parsers.utils.laws_parser_utils import normalize_correction_title_dashes, clean_line
logger = logging.getLogger("open-knesset.parse_laws")
# don't parse laws from an older knesset
CUTOFF_DATE = datetime.date(2009, 2, 24)
class ParseLaws(object):
"""partially abstract class for parsing laws. contains one function used in few
cases (private and other laws). this function gives the required page
"""
url = None
def get_page_with_param(self, params):
logger.debug('get_page_with_param: self.url=%s, params=%s' % (self.url, params))
if not params:
try:
html_page = urllib2.urlopen(self.url).read().decode('windows-1255').encode('utf-8')
except urllib2.URLError as e:
logger.error("can't open URL: %s" % self.url)
send_chat_notification(__name__, 'failed to open url', {'url': self.url, 'params': params})
return None
try:
soup = BeautifulSoup(html_page)
except HTMLParseError as e:
logger.debug("parsing URL: %s - %s. will try harder." % (self.url, e))
html_page = re.sub("(?s)<!--.*?-->", " ", html_page) # cut anything that looks suspicious
html_page = re.sub("(?s)<script>.*?</script>", " ", html_page)
html_page = re.sub("(?s)<!.*?>", " ", html_page)
try:
soup = BeautifulSoup(html_page)
except HTMLParseError as e:
logger.debug("error parsing URL: %s - %s" % (self.url, e))
send_chat_notification(__name__, 'failed to parse url', {'url': self.url, 'params': None})
return None
comments = soup.findAll(text=lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
return soup
else:
data = urllib.urlencode(params)
try:
url_data = urllib2.urlopen(self.url, data)
except urllib2.URLError:
logger.error("can't open URL: %s" % self.url)
send_chat_notification(__name__, 'failed to open url', {'url': self.url, 'params': data})
return None
html_page = url_data.read().decode('windows-1255').encode('utf-8')
try:
soup = BeautifulSoup(html_page)
except HTMLParseError as e:
logger.debug("error parsing URL: %s - %s" % (self.url, e))
send_chat_notification(__name__, 'failed to parse url', {'url': self.url, 'params': data})
return None
comments = soup.findAll(text=lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
return soup
class ParsePrivateLaws(ParseLaws):
"""a class that parses private laws proposed
"""
# the constructor parses the laws data from the required pages
def __init__(self, days_back):
self.url = PRIVATE_LAWS_URL
self.rtf_url = r"http://www.knesset.gov.il/privatelaw"
self.laws_data = []
self.parse_pages_days_back(days_back)
# parses the required pages data
def parse_pages_days_back(self, days_back):
today = datetime.date.today()
last_required_date = today + datetime.timedelta(days=-days_back)
last_law_checked_date = today
index = None
while last_law_checked_date > last_required_date:
if index:
params = {'RowStart': index}
else:
params = None
soup_current_page = self.get_page_with_param(params)
if not soup_current_page:
return
index = self.get_param(soup_current_page)
self.parse_private_laws_page(soup_current_page)
last_law_checked_date = self.update_last_date()
def get_param(self, soup):
name_tags = soup.findAll(
lambda tag: tag.name == 'a' and tag.has_key('href') and re.match("javascript:SndSelf\((\d+)\);",
tag['href']))
if name_tags and name_tags[0].get('href'):
m = re.match("javascript:SndSelf\((\d+)\);", name_tags[0]['href'])
return m.groups(1)[0]
else:
logger.error('Can not find any more name tags')
return None
def parse_private_laws_page(self, soup):
name_tag = soup.findAll(lambda tag: tag.name == 'tr' and tag.has_key('valign') and tag['valign'] == 'Top')
for tag in name_tag:
tds = tag.findAll(lambda td: td.name == 'td')
law_data = {}
law_data['knesset_id'] = int(tds[0].string.strip())
law_data['law_id'] = int(tds[1].string.strip())
if tds[2].findAll('a')[0].has_key('href'):
law_data['text_link'] = self.rtf_url + r"/" + tds[2].findAll('a')[0]['href']
law_data['law_full_title'] = tds[3].string.strip()
parsed_law_title = laws_parser_utils.parse_title(law_data['law_full_title'])
if not parsed_law_title:
logger.warn("can't parse proposal title: %s" % law_data['law_full_title'])
continue
law_data['law_name'] = clean_line(parsed_law_title.group(1))
comment1 = parsed_law_title.group(3)
comment2 = parsed_law_title.group(5)
if comment2:
law_data['correction'] = clean_line(comment2)
law_data['comment'] = comment1
else:
law_data['comment'] = None
if comment1:
law_data['correction'] = clean_line(comment1)
else:
law_data['correction'] = None
law_data['correction'] = normalize_correction_title_dashes(law_data['correction'])
law_data['law_year'] = parsed_law_title.group(7)
law_data['proposal_date'] = datetime.datetime.strptime(tds[4].string.strip(), '%d/%m/%Y').date()
names_string = ''.join([unicode(y) for y in tds[5].findAll('font')[0].contents])
names_string = clean_line(names_string)
proposers = []
joiners = []
# Old deprecated way to search for joiners
if re.search('ONMOUSEOUT', names_string) > 0:
splitted_names = names_string.split('ONMOUSEOUT')
joiners = [name for name in re.match('(.*?)\',\'', splitted_names[0]).group(1).split('<br />') if
len(name) > 0]
proposers = splitted_names[1][10:].split('<br />')
else:
proposers = names_string.split('<br />')
more_joiners = [name for name in tds[6].findAll(text=lambda text: isinstance(text, NavigableString)) if
name.strip() not in [u'מצטרפים לחוק:', u'אין מצטרפים לחוק']]
if len(more_joiners) and not joiners:
joiners = more_joiners
law_data['proposers'] = proposers
law_data['joiners'] = joiners
self.laws_data.append(law_data)
def update_last_date(self):
return self.laws_data[-1]['proposal_date']
class ParseKnessetLaws(ParseLaws):
"""
A class that parses Knesset Laws (laws after committees)
the constructor parses the laws d
|
mehmetkose/react-websocket
|
example/server.py
|
Python
|
mit
| 1,627
| 0.003688
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import logging, os.path
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import tornado.gen
class Application(tornado.web.Application):
def __init__(self):
base_dir = os.path.dirname(__file__)
app_settings = {
"debug": True,
'static_path': os.path.join(base_dir, "static"),
}
tornado.web.Application.__init__(self, [
tornado.web.url(r"/", MainHandler, name="main"),
tornado.web.url(r"/live", WebSocketHandler, name="websocket"),
], **app_settings)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
class WebSocketHandler(tornado.websocket.WebSocketHandler):
listenners = []
def check_origin(self, origin):
return True
@tornado.gen.engine
def open(self):
WebSocketHandler.listenners.append(self)
def on_close(self):
if self in WebS
|
ocketHandler.listenners:
WebSocketHandler.listenners.remove(self)
@tornado.gen.engine
def on_message(self, wsdata):
for listenner in WebSocketHandler.listenners:
listenner.write_message(wsdata)
@tornado.gen.coroutine
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(88
|
88)
logging.info("application running on http://localhost:8888")
if __name__ == "__main__":
tornado.ioloop.IOLoop.current().run_sync(main)
tornado.ioloop.IOLoop.current().start()
|
jcarlson23/lammps
|
tools/moltemplate/examples/CG_membrane_examples/membrane_BranniganPRE2005/moltemplate_files/version_charmm_cutoff/calc_table.py
|
Python
|
gpl-2.0
| 2,403
| 0.012901
|
#!/usr/bin/env python
# Calculate a table of pairwise energies and forces between "INT" atoms
# in the lipid membrane model described in
# Brannigan et al, Phys Rev E, 72, 011915 (2005)
# The energy of this interaction U(r) = eps*(0.4*(sigma/r)^12 - 3.0*(sigma/r)^2)
# I realized later this is not what we want because although energy is conserved
# all enrgies are shifted with respect to energies used in the Brannigan paper
# (by 0.27 kCal/mole) and the later Watson JCP 2011 paper (by 0.224 kCal/mole).
# (So don't use this.)
# Calculate and pri
|
nt a
def S(r, rc1, rc2, derivative=False):
"""
Calculate the switching function S(r) which decays continuously
between 1 and 0 in the range from rc1 to rc2 (rc2>rc1):
S(r) = (rc2^2 - r^2)^2 * (rc2^2 + 2*r^2 - 3*rc1^2) / (rc2^2-rc1^2)^3
I'm using the same smoothing/switching cutoff function used by the CHARMM
force-fields. (I'm even using the same code to implement it, taken
from lammps charmm/coul/charmm
|
pair style, rewritten in python.)
"""
assert(rc2>rc1)
rsq = r*r
rc1sq = rc1*rc1
rc2sq = rc2*rc2
denom_lj_inv = (1.0 / ((rc2sq-rc1sq)*
(rc2sq-rc1sq)*
(rc2sq-rc1sq)))
if rsq > rc2sq:
return 0.0
elif rsq < rc1sq:
if derivative:
return 0.0
else:
return 1.0
else:
rc2sq_minus_rsq = (rc2sq - rsq)
rc2sq_minus_rsq_sq = rc2sq_minus_rsq * rc2sq_minus_rsq
if derivative:
return (12.0 * rsq * rc2sq_minus_rsq * (rsq-rc1sq) * denom_lj_inv)
else:
return (rc2sq_minus_rsq_sq *
(rc2sq + 2.0*rsq - 3.0*rc1sq) * denom_lj_inv)
def U(r, eps, sigma):
return eps* (0.4*pow((sigma/r),12) - 3.0*sigma*sigma/(r*r))
def F(r, eps, sigma):
return eps*(12*0.4*pow((sigma/r),13)/sigma - 2*3.0*sigma*sigma/(r*r*r))
epsilon = 2.75/4.184 # kCal/mole
sigma = 7.5
Rmin = 2.6
Rmax = 22.6
Rc1 = 22.0
Rc2 = 22.5
N = 1001
for i in range(0,N):
r = Rmin + i*(Rmax-Rmin)/(N-1)
U_r = U(r, epsilon, sigma)
F_r = F(r, epsilon, sigma)
# Multiply U(r) & F(r) by the smoothing/switch function
U_r = U_r * S(r, Rc1, Rc2)
F_r = U_r * S(r, Rc1, Rc2, True) + F_r * S(r, Rc1, Rc2, False)
print(str(i+1)+' '+str(r)+' '+str(U_r)+' '+str(F_r))
|
ptorrestr/clean_text
|
clean_text/tests/test_cleaner.py
|
Python
|
gpl-2.0
| 7,295
| 0.010981
|
# -*- coding: utf-8 -*-
import unittest
import logging
from os.path import isfile
from os import popen
from os import remove
from t2db_objects import objects
from t2db_objects.utilities import formatHash
from t2db_objects.parameters import generate_config_yaml
from clean_text.cleaner import sentenceCleaner
from clean_text.cleaner import tokenCleaner
from clean_text.cleaner import tokenize
from clean_text.cleaner import sentenize
from clean_text.cleaner import cleanSentence
from clean_text.cleaner import Processor
from clean_text.cleaner import cleaner
from clean_text.utilities import load_stopwords
from clean_text.run import param_fields
from clean_text.run import conf_fields
from clean_text import functions
logger = logging.getLogger('clean_text')
""" Count the word in the file given"""
def wordCount(word, file_):
p = popen("cat " + file_ + " | awk -F '\t' '{print $6}' | grep -w " + word + " | wc -l")
# Get result and cast it
pOut = p.read()
p.close()
return int(pOut)
class TestCleanerFunctions(unittest.TestCase):
def setUp(self):
pass
def test_sentenceCleaner(self):
sentence = "this is a @user sample and a http://hi.com sample"
goldenSentence = "this is a sample and a sample"
self.assertEqual(sentenceCleaner(sentence, ["removeUrl", "removeUserMention"]), goldenSentence)
def test_tokenize(self):
sentence = "Hello didn't very happy 1313"
goldenTokens = ["Hello" , "did", "n't", "very", "happy", "1313"]
tokens = tokenize(sentence)
for i in range(0, len(tokens)):
self.assertEqual(tokens[i][0], goldenTokens[i])
def test_sentenize(self):
sentence = "Hello I'm very happy 1313"
goldenSentence = "Hello I 'm very happy 1313"
tokens = tokenize(sentence)
self.assertEqual(sentenize(tokens), goldenSentence)
def test_tokenCleaner(self):
sentence = "Hello I'm very happy 1313"
goldenSentence = "hello"
tokens = tokenize(sentence)
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
newTokens = tokenCleaner(tokens, ["stemming", "toLowerCase", "removePunctuationAndNumbers", "stopwording"])
self.assertEqual(sentenize(newTokens), goldenSentence)
def test_cleanSentence(self):
sentence = ("At 8 o'clock on Thursday morning, the boys and girls didn't feel very good.")
sentenceProcList = ["removeUrl", "removeUserMention"]
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
tokenProcList = ["stemming", "toLowerCase", "removePunctuationAndNumbers", "stopwording", "removeSingleChar", "removeDoubleChar"]
newSentence = cleanSentence(sentence, sentenceProcList, tokenProcList)
goldSentence = "oclock thursday morning boy girl feel good"
self.assertEqual(newSentence, goldSentence)
def test_cleanSentenceUnicode(self):
sentence = u"Según @NWS_PTWC, no hay riesgo generalizado de #tsunami tras el #sismo de Japón http://t.co/icErcNfSCf"
sentenceProcList = ["removeUrl", "removeUserMention"]
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
tokenProcList = ["stemming", "toLowerCase", "removePunctuationAndNumbers", "stopwording", "removeSingleChar", "removeDoubleChar"]
newSentence = cleanSentence(sentence, sentenceProcList, tokenProcList)
goldSentence = u"según hay riesgo generalizado tsunami tras sismo japón"
self.assertEqual(newSentence, goldSentence)
@unittest.skip("demonstrating skipping")
def test_processFile(self):
rawObject = {
"date":"Sun Aug 07 01:28:32 IST 2011",
"id":"100000335933878272",
"user_id":"71610408",
"status":"@baloji you were so awesome, it was amazing and you were shining like the star that you are...MERCI!! #baloji i_i"
}
goldenRawObject = {
"date":"Sun Aug 07 01:28:32 IST 2011",
"id":"100000335933878272",
"user_id":"71610408",
"status":"@baloji you were so awesome, it was amazing and you were shining like the star that you are...MERCI!! #baloji i_i",
"status_clean":"awesome amaze shin star merci baloji"
}
rawObjects = [rawObject]
text_field = 'status'
new_text_field = 'status_clean'
sentence_proc_list = {'removeUrl', 'removeUserMention'}
token_proc_list = {'stemming', 'toLowerCase', 'removePunctuationAndNumbers',
'stopwording', 'removeSingleChar', 'removeDoubleChar'}
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
proc = Processor(text_field, new_text_field, sentence_proc_list, token_proc_list)
newRawObject = proc.processFile(rawObjects)
self.assertEqual(rawObject, goldenRawObject)
@unittest.skip("demonstrating skipping")
def test_processFileUnicode(self):
rawObject = {
"date":u"Sun Aug 07 01:28:32 IST 2011",
"id":u"100000335933878272",
"user_id":u"71610408",
"status":u"Según @NWS_PTWC, no hay riesgo generalizado de #tsunami tras el #sismo de Japón http://t.co/icErcNfSCf",
}
goldenRawObject = {
"date":u"Sun Aug 07 01:28:32 IST 2011",
"id":u"100000335933878272",
"user_id":u"71610408",
"status":u"Según @NWS_PTWC, no hay riesgo generalizado de #tsunami tras el #sismo de Japón http://t.co/icErcNfSCf",
"status_clean":u"Según hay riesgo generalizado tsunami tras sismo Japón"
}
rawObjects = [rawObject]
text_field = 'status'
new_text_field = 'status_clean'
sentence_proc_list = {'removeUrl', 'removeUserMention'}
token_proc_list = {'stemming', 'toLowerCase', 'removePunctuationAndNumbers',
'stopwording', 'removeSingleChar', 'removeDoubleChar'}
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
proc = Processor(text_field, new_text_field, sentence_proc_list, token_proc_list)
newRawObject = proc.processFile(rawObjects)
self.assertEqual(rawObject, goldenRawObject)
@unittest.skip("demonstrating skipping")
def test_notValidProcessFile(self):
rawObject = {
"date":"Sun Aug 07 01:28:32 IST 2011",
"id":"100000335933878272",
"user_id":"71610408",
"status":"@baloji you were so awesome, it was amazing and you were shining like the star that you are...MERCI!! #baloji i_i"
}
rawObjects = [rawObject]
text_field = 'otherfield'
new_text_
|
field = 'status_clean'
sentence_proc_list = {'removeUrl', 'removeUserMention'}
token_proc_list = {'stemming', 'toLowerCase', 'removePunctuationAndNumbers',
'stopwording', 'removeSingleChar', 'removeDoubleChar'}
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
proc = Processor(text_field, new_text_field, sentence_proc_list, token_proc_list)
proc = Processor(config)
self.assertRaises(Exception, proc.processFile, rawObjects)
#@unittest.sk
|
ip("avoid big files")
def test_cleaner(self):
rawParams = {
'input_file':'etc/example.tsv',
'output_file':'output.tmp',
'config_file':'etc/config.yaml',
}
params = objects.Configuration(param_fields, rawParams)
config = generate_config_yaml(conf_fields, params.config_file)
if isfile(params.output_file):
remove(params.output_file)
cleaner(params, config)
self.assertTrue(isfile(params.output_file))
self.assertEqual(wordCount(" to ", params.output_file), 0)
self.assertEqual(wordCount(" photo ", params.output_file), 0)
|
flyapen/UgFlu
|
flumotion/worker/__init__.py
|
Python
|
gpl-2.0
| 962
| 0
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation
|
.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server lice
|
nse may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""
code for workers executing jobs for the manager
"""
__version__ = "$Rev: 6125 $"
|
UITools/saleor
|
saleor/graphql/api.py
|
Python
|
bsd-3-clause
| 1,246
| 0
|
import graphene
from .account.schema import AccountMutations, AccountQueries
from .checkout.schema import CheckoutMutations, CheckoutQueries
from .core.schema import CoreMutations
from .discount.schema import DiscountMutations, DiscountQueries
from .menu.schema import MenuMutations, MenuQueries
from .order.schema import OrderMutations, Order
|
Queries
from .page.schema import PageMutations, PageQueries
from .payment.schema import PaymentMutations, PaymentQueries
from .product.schema import ProductMutations, ProductQueries
from .sh
|
ipping.schema import ShippingMutations, ShippingQueries
from .shop.schema import ShopMutations, ShopQueries
from .translations.schema import TranslationQueries
class Query(AccountQueries, CheckoutQueries, DiscountQueries, MenuQueries,
OrderQueries, PageQueries, PaymentQueries, ProductQueries,
ShippingQueries, ShopQueries, TranslationQueries):
node = graphene.Node.Field()
class Mutations(AccountMutations, CheckoutMutations, CoreMutations,
DiscountMutations, MenuMutations, OrderMutations,
PageMutations, PaymentMutations, ProductMutations,
ShippingMutations, ShopMutations):
pass
schema = graphene.Schema(Query, Mutations)
|
Canpio/Paddle
|
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py
|
Python
|
apache-2.0
| 3,760
| 0.000266
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import paddle.fluid as fluid
import paddle
import sys
import numpy
import unittest
import math
import sys
import os
BATCH_SIZE = 64
def inference_program():
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
hidden = fluid.layers.fc(input=img, size=200, act='tanh')
hidden =
|
fluid.layers.fc(input=hidden, size=200, act='tanh')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
return prediction
def train_program():
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
predict = inference_program()
cost = fluid.layers.
|
cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
acc = fluid.layers.accuracy(input=predict, label=label)
return [avg_cost, acc]
def optimizer_func():
return fluid.optimizer.Adam(learning_rate=0.001)
def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
def event_handler(event):
if isinstance(event, fluid.EndEpochEvent):
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
print("avg_cost: %s" % avg_cost)
print("acc : %s" % acc)
if acc > 0.2: # Smaller value to increase CI speed
trainer.save_params(params_dirname)
else:
print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
trainer.train(
num_epochs=1,
event_handler=event_handler,
reader=train_reader,
feed_order=['img', 'label'])
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 1
tensor_img = numpy.random.uniform(-1.0, 1.0,
[batch_size, 1, 28, 28]).astype("float32")
results = inferencer.infer({'img': tensor_img})
print("infer results: ", results[0])
def main(use_cuda):
params_dirname = "recognize_digits_mlp.inference.model"
# call train() with is_local argument to run distributed train
train(
use_cuda=use_cuda,
train_program=train_program,
params_dirname=params_dirname)
infer(
use_cuda=use_cuda,
inference_program=inference_program,
params_dirname=params_dirname)
if __name__ == '__main__':
# for use_cuda in (False, True):
main(use_cuda=False)
|
cysuncn/python
|
spark/crm/PROC_O_IBK_WSYH_ECUSRLOGINTYPE.py
|
Python
|
gpl-3.0
| 6,837
| 0.013234
|
#coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_O_IBK_WSYH_ECUSRLOGINTYPE').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), i
|
nt(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
O_CI_WSYH_ECUSRLOGINTYPE = sqlContext.read.parquet(hdfs+'/O_CI_WSYH_ECUSRLOGINTYPE/*')
O_CI_WSYH_ECUSRLOGINTYPE.registerTempTable(
|
"O_CI_WSYH_ECUSRLOGINTYPE")
#任务[12] 001-01::
V_STEP = V_STEP + 1
#先删除原表所有数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE/*.parquet")
#从昨天备表复制一份全量过来
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE_BK/"+V_DT_LD+".parquet /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE/"+V_DT+".parquet")
F_CI_WSYH_ECUSRLOGINTYPE = sqlContext.read.parquet(hdfs+'/F_CI_WSYH_ECUSRLOGINTYPE/*')
F_CI_WSYH_ECUSRLOGINTYPE.registerTempTable("F_CI_WSYH_ECUSRLOGINTYPE")
sql = """
SELECT A.USERSEQ AS USERSEQ
,A.MCHANNELID AS MCHANNELID
,A.LOGINTYPE AS LOGINTYPE
,A.USERID AS USERID
,A.PASSWORD AS PASSWORD
,A.LOGINTYPESTATE AS LOGINTYPESTATE
,A.UPDATEPASSWORDDATE AS UPDATEPASSWORDDATE
,A.WRONGPASSCOUNT AS WRONGPASSCOUNT
,A.UNLOCKDATE AS UNLOCKDATE
,A.FIRSTLOGINTIME AS FIRSTLOGINTIME
,A.LASTLOGINTIME AS LASTLOGINTIME
,A.LASTLOGINADDR AS LASTLOGINADDR
,A.CREATEUSERSEQ AS CREATEUSERSEQ
,A.CREATEDEPTSEQ AS CREATEDEPTSEQ
,A.CREATETIME AS CREATETIME
,A.UPDATEUSERSEQ AS UPDATEUSERSEQ
,A.UPDATEDEPTSEQ AS UPDATEDEPTSEQ
,A.UPDATETIME AS UPDATETIME
,A.FR_ID AS FR_ID
,V_DT AS ODS_ST_DATE
,'IBK' AS ODS_SYS_ID
FROM O_CI_WSYH_ECUSRLOGINTYPE A --电子银行用户认证信息表
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1 = sqlContext.sql(sql)
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1.registerTempTable("F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1")
#F_CI_WSYH_ECUSRLOGINTYPE = sqlContext.read.parquet(hdfs+'/F_CI_WSYH_ECUSRLOGINTYPE/*')
#F_CI_WSYH_ECUSRLOGINTYPE.registerTempTable("F_CI_WSYH_ECUSRLOGINTYPE")
sql = """
SELECT DST.USERSEQ --用户顺序号:src.USERSEQ
,DST.MCHANNELID --模块渠道代号:src.MCHANNELID
,DST.LOGINTYPE --登录类型:src.LOGINTYPE
,DST.USERID --用户登录号:src.USERID
,DST.PASSWORD --用户登录密码:src.PASSWORD
,DST.LOGINTYPESTATE --开通状态:src.LOGINTYPESTATE
,DST.UPDATEPASSWORDDATE --最近密码修改时间:src.UPDATEPASSWORDDATE
,DST.WRONGPASSCOUNT --密码错误次数:src.WRONGPASSCOUNT
,DST.UNLOCKDATE --最后一次解锁日期时间:src.UNLOCKDATE
,DST.FIRSTLOGINTIME --首次登录时间:src.FIRSTLOGINTIME
,DST.LASTLOGINTIME --最后登录时间:src.LASTLOGINTIME
,DST.LASTLOGINADDR --最后一次登录地址:src.LASTLOGINADDR
,DST.CREATEUSERSEQ --创建用户顺序号:src.CREATEUSERSEQ
,DST.CREATEDEPTSEQ --创建机构顺序号:src.CREATEDEPTSEQ
,DST.CREATETIME --创建时间:src.CREATETIME
,DST.UPDATEUSERSEQ --更新用户顺序号:src.UPDATEUSERSEQ
,DST.UPDATEDEPTSEQ --更新机构顺序号:src.UPDATEDEPTSEQ
,DST.UPDATETIME --更新时间:src.UPDATETIME
,DST.FR_ID --法人号:src.FR_ID
,DST.ODS_ST_DATE --系统日期:src.ODS_ST_DATE
,DST.ODS_SYS_ID --系统标志:src.ODS_SYS_ID
FROM F_CI_WSYH_ECUSRLOGINTYPE DST
LEFT JOIN F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1 SRC
ON SRC.USERSEQ = DST.USERSEQ
AND SRC.MCHANNELID = DST.MCHANNELID
AND SRC.LOGINTYPE = DST.LOGINTYPE
WHERE SRC.USERSEQ IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2 = sqlContext.sql(sql)
dfn="F_CI_WSYH_ECUSRLOGINTYPE/"+V_DT+".parquet"
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2=F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2.unionAll(F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1)
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1.cache()
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2.cache()
nrowsi = F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1.count()
nrowsa = F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2.count()
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1.unpersist()
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_WSYH_ECUSRLOGINTYPE lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
ret = os.system("hdfs dfs -mv /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE/"+V_DT_LD+".parquet /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE_BK/")
#先删除备表当天数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE_BK/"+V_DT+".parquet")
#从当天原表复制一份全量到备表
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE/"+V_DT+".parquet /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE_BK/"+V_DT+".parquet")
|
repotvsupertuga/tvsupertuga.repository
|
script.module.universalscrapers/lib/universalscrapers/executor.py
|
Python
|
gpl-2.0
| 842
| 0.001188
|
import concurrent.futures
from itertools import islice
import xbmc
import threading
|
Executor = concurrent.futures.ThreadPoolExecutor
def
|
execute(f, iterable, stop_flag=None, workers=10, timeout=30):
with Executor(max_workers=workers) as executor:
threading.Timer(timeout, stop_flag.set)
for future in _batched_pool_runner(executor, workers, f,
iterable, timeout):
if xbmc.abortRequested:
break
if stop_flag and stop_flag.isSet():
break
yield future.result()
def _batched_pool_runner(pool, batch_size, f, iterable, timeout):
futures = [pool.submit(f, x) for x in iterable]
try:
for item in concurrent.futures.as_completed(futures, timeout):
yield item
except:
pass
|
vapkarian/soccer-analyzer
|
src/colors/v20/default.py
|
Python
|
mit
| 1,079
| 0
|
from src.settings import Colors
def league_color(league: str) -> Colors:
if league in [
]:
return Colors.GREEN
if league in [
'1 CFL (Montenegro)',
'A Lyga (Lithuania)',
'Bikar (Iceland)',
'Coupe de la Ligue (France)',
'EURO Qualifiers (Europe)',
'FA Cup (England)',
'J-League (Japan)',
'J-League 2 (Japan)',
'K-League (South Korea)',
'Landspokal (Denmark)',
'League Cup (Scotland)',
'Meistriliiga (Estonia)',
'OFB Cup (Austria)',
'Pohar CMFS (Czech Republic)',
|
'Premier League (Wales)',
'Primera Division (Chile)',
'Proximus League (Belgium)',
'Serie A (Italy)',
'S-League (Singapore)',
'Slovensky Pohar (Slovakia)',
'Svenska Cupen (Sweden)',
'Swiss Cup (Switzerland)',
'Virsliga (Latvia)',
'Vyscha Liga (Ukraine)',
'Ú
|
rvalsdeild (Iceland)',
]:
return Colors.RED
if league in [
]:
return Colors.YELLOW
return Colors.EMPTY
|
adrn/gala
|
gala/potential/hamiltonian/tests/helpers.py
|
Python
|
mit
| 5,372
| 0.001862
|
# Third-party
import astropy.units as u
import numpy as np
# Project
from ....dynamics import PhaseSpacePosition, Orbit
from ....units import galactic
PSP = PhaseSpacePosition
ORB = Orbit
class _TestBase(object):
use_half_ndim = False
E_unit = u.erg/u.kg
@classmethod
def setup_class(cls):
np.random.seed(42)
ndim = 6
r_ndim = ndim # return ndim
if cls.use_half_ndim:
r_ndim = r_ndim // 2
norbits = 16
ntimes = 8
# some position or phase-space position arrays we will test methods on:
cls.w0s = []
cls.energy_return_shapes = []
cls.gradient_return_shapes = []
cls.hessian_return_shapes = []
# 1D - phase-space position
cls.w0s.append(PSP(pos=np.random.random(size=ndim//2),
vel=np.random.random(size=ndim//2)))
cls.w0s.append(PSP(pos=np.random.random(size=ndim//2)*u.kpc,
vel=np.random.random(size=ndim//2)*u.km/u.s))
cls.energy_return_shapes += [(1,)]*2
cls.gradient_return_shapes += [(r_ndim, 1)]*2
cls.hessian_return_shapes += [(r_ndim, r_ndim, 1)]*2
# 2D - phase-space position
cls.w0s.append(PSP(pos=np.random.random(size=(ndim//2, norbits)),
vel=np.random.random(size=(ndim//2, norbits))))
cls.w0s.append(PSP(pos=np.random.random(size=(ndim//2, norbits))*u.kpc,
vel=np.random.random(size=(ndim//2, norbits))*u.km/u.s))
cls.energy_return_shapes += [(norbits,)]*2
cls.gradient_return_shapes += [(r_ndim, norbits)]*2
cls.hessian_return_shapes += [(r_ndim, r_ndim, norbits)]*2
# 3D - phase-space position
cls.w0s.append(PSP(pos=np.random.random(size=(ndim//2, norbits, ntimes)),
vel=np.random.random(size=(ndim//2, norbits, ntimes))))
cls.w0s.append(PSP(pos=np.random.random(size=(ndim//2, norbits, ntimes))*u.kpc,
vel=np.random.random(size=(ndim//2, norbits, ntimes))*u.km/u.s))
cls.energy_return_shapes += [(norbits, ntimes)]*2
cls.gradient_return_shapes += [(r_ndim, norbits, ntimes)]*2
cls.hessian_return_shapes += [(r_ndim, r_ndim, norbits, ntimes)]*2
# 2D - orbit
cls.w0s.append(ORB(pos=np.random.random(size=(ndim//2, ntimes)),
vel=np.random.random(size=(ndim//2, ntimes))))
cls.w0s.append(ORB(pos=np.random.random(size=(ndim//2, ntimes))*u.kpc,
vel=np.random.random(size=(ndim//2, ntimes))*u.km/u.s))
cls.energy_return_shapes += [(ntimes,)]*2
cls.gradient_return_shapes += [(r_ndim, ntimes,)]*2
cls.hessian_return_shapes += [(r_ndim, r_ndim, ntimes,)]*2
# 3D - orbit
cls.w0s.append(ORB(pos=np.random.random(size=(ndim//2, ntimes, norbits)),
vel=np.random.random(size=(ndim//2, ntimes, norbits))))
cls.w0s.append(ORB(pos=np.random.random(size=(ndim//2, ntimes, norbits))*u.kpc,
vel=np.random.random(size=(ndim//2, ntimes, norbits))*u.km/u.s))
cls.energy_return_shapes += [(ntimes, norbits)]*2
cls.gradient_return_shapes += [(r_ndim, ntimes, norbits)]*2
cls.hessian_return_shapes += [(r_ndim, r_ndim, ntimes, norbits)]*2
_obj_w0s = cls.w0s[:]
for w0, eshp, gshp, hshp in zip(_obj_w0s,
cls.energy_return_shapes,
cls.gradient_return_shapes,
cls.hessian_return_shapes):
cls.w0s.append(w0.w(galactic))
cls.energy_return_shapes.append(eshp)
cls.gradient_return_shapes.append(gshp)
cls.hessian_return_shapes.append(hshp)
def test_energy(self):
for arr, shp in zip(self.w0s, self.energy_return_shapes):
if self.E_unit.is_equivalent(u.one) and hasattr(arr, 'pos') and \
not arr.xyz.unit.is_equivalent(u.one):
continue
v = self.obj.energy(arr)
assert v.shape == shp
assert v.unit.is_equivalent(self.E_unit)
|
t = np.zeros(np.array(arr).shape[1:]) + 0.1
self.obj.energy(arr, t=0.1)
sel
|
f.obj.energy(arr, t=t)
self.obj.energy(arr, t=0.1*self.obj.units['time'])
def test_gradient(self):
for arr, shp in zip(self.w0s, self.gradient_return_shapes):
if self.E_unit.is_equivalent(u.one) and hasattr(arr, 'pos') and \
not arr.xyz.unit.is_equivalent(u.one):
continue
v = self.obj.gradient(arr)
assert v.shape == shp
# TODO: check return units
t = np.zeros(np.array(arr).shape[1:]) + 0.1
self.obj.gradient(arr, t=0.1)
self.obj.gradient(arr, t=t)
self.obj.gradient(arr, t=0.1*self.obj.units['time'])
def test_hessian(self):
for arr, shp in zip(self.w0s, self.hessian_return_shapes):
if self.E_unit.is_equivalent(u.one) and hasattr(arr, 'pos') and \
not arr.xyz.unit.is_equivalent(u.one):
continue
g = self.obj.hessian(arr)
assert g.shape == shp
# TODO: check return units
|
iktakahiro/sphinx_theme_pd
|
sphinx_theme_pd/__init__.py
|
Python
|
mit
| 136
| 0
|
import os
|
def get_html_theme_path():
theme_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return theme_di
|
r
|
yonahbox/ardupilot
|
Tools/ardupilotwaf/chibios.py
|
Python
|
gpl-3.0
| 13,403
| 0.005596
|
#!/usr/bin/env python
# encoding: utf-8
"""
Waf tool for ChibiOS build
"""
from waflib import Errors, Logs, Task, Utils
from waflib.TaskGen import after_method, before_method, feature
import os
import shutil
import sys
import re
import pickle
_dynamic_env_data = {}
def _load_dynamic_env_data(bld):
bldnode = bld.bldnode.make_node('modules/ChibiOS')
tmp_str = bldnode.find_node('include_dirs').read()
tmp_str = tmp_str.replace(';\n','')
tmp_str = tmp_str.replace('-I','') #remove existing -I flags
# split, coping with separator
idirs = re.split('; ', tmp_str)
# create unique list, coping with relative paths
idirs2 = []
for d in idirs:
if d.startswith('../'):
# relative paths from the make build are relative to BUILDROOT
d = os.path.join(bld.env.BUILDROOT, d)
d = os.path.normpath(d)
if not d in idirs2:
idirs2.append(d)
_dynamic_env_data['include_dirs'] = idirs2
@feature('ch_ap_library', 'ch_ap_program')
@before_method('process_source')
def ch_dynamic_env(self):
# The generated files from configuration possibly don't exist if it's just
# a list command (TODO: figure out a better way to address that).
if self.bld.cmd == 'list':
return
if not _dynamic_env_data:
_load_dynamic_env_data(self.bld)
self.use += ' ch'
self.env.append_value('INCLUDES', _dynamic_env_data['include_dirs'])
class upload_fw(Task.Task):
color='BLUE'
always_run = True
def run(self):
upload_tools = self.env.get_flat('UPLOAD_TOOLS')
src = self.inputs[0]
return self.exec_command("python '{}/px_uploader.py' '{}'".format(upload_tools, src))
def exec_command(self, cmd, **kw):
kw['stdout'] = sys.stdout
return super(upload_fw, self).exec_command(cmd, **kw)
def keyword(self):
return "Uploading"
class set_default_parameters(Task.Task):
color='CYAN'
always_run = True
def keyword(self):
return "apj_tool"
def run(self):
rel_default_parameters = self.env.get_flat('DEFAULT_PARAMETERS')
abs_default_parameters = os.path.join(self.env.SRCROOT, rel_default_parameters)
apj_tool = self.env.APJ_TOOL
sys.path.append(os.path.dirname(apj_tool))
from apj_tool import embedded_defaults
defaults = embedded_defaults(self.inputs[0].abspath())
if not defaults.find():
print("Error: Param defaults support not found in firmware")
sys.exit(1)
defaults.set_file(abs_default_parameters)
defaults.save()
class generate_bin(Task.Task):
color='CYAN'
run_str="${OBJCOPY} -O binary ${SRC} ${TGT}"
always_run = True
def keyword(self):
return "Generating"
def __str__(self):
return self.outputs[0].path_from(self.generator.bld.bldnode)
class generate_apj(Task.Task):
'''generate an apj firmware file'''
color='CYAN'
always_run = True
def keyword(self):
return "apj_gen"
def run(self):
import json, time, base64, zlib
img = open(self.inputs[0].abspath(),'rb').read()
d = {
"board_id": int(self.env.APJ_BOARD_ID),
"magic": "APJFWv1",
"description": "Firmware for a %s board" % self.env.APJ_BOARD_TYPE,
"image": base64.b64encode(zlib.compress(img,9)).decode('utf-8'),
"build_time": int(time.time()),
"summary": self.env.BOARD,
"version": "0.1",
"image_size": len(img),
"git_identity": self.generator.bld.git_head_hash(short=True),
"board_revision": 0
}
apj_file = self.outputs[0].abspath()
f = open(apj_file, "w")
f.write(json.dumps(d, indent=4))
f.close()
class build_abin(Task.Task):
'''build an abin file for skyviper firmware upload via web UI'''
color='CYAN'
run_str='${TOOLS_SCRIPTS}/make_abin.sh ${SRC}.bin ${SRC}.abin'
always_run = True
def keyword(self):
return "Generating"
def __str__(self):
return self.outputs[0].path_from(self.generator.bld.bldnode)
class build_intel_hex(Task.Task):
'''build an intel hex file for upload with DFU'''
color='CYAN'
run_str='${TOOLS_SCRIPTS}/make_intel_hex.py ${SRC} ${FLASH_RESERVE_START_KB}'
always_run = True
def keyword(self):
return "Generating"
def __str__(self):
return self.outputs[0].path_from(self.generator.bld.bldnode)
@feature('ch_ap_program')
@after_method('process_source')
def chibios_firmware(self):
self.link_task.always_run = True
link_output = self.link_task.outputs[0]
bin_target = self.bld.bldnode.find_or_declare('bin/' + link_output.change_ext('.bin').name)
apj_target = self.bld.bldnode.find_or_declare('bin/' + link_output.change_ext('.apj').name)
generate_bin_task = self.create_task('generate_bin', src=link_output, tgt=bin_target)
generate_bin_task.set_run_after(self.link_task)
generate_apj_task = self.create_task('generate_apj', src=bin_target, tgt=apj_target)
generate_apj_task.set_run_after(generate_bin_task)
if self.env.BUILD_ABIN:
abin_target = self.bld.bldnode.find_or_declare('bin/' + link_output.change_ext('.abin').name)
abin_task = self.create_task('build_abin', src=link_output, tgt=abin_target)
abin_task.set_run_after(generate_apj_task)
bootloader_bin = self.bld.srcnode.make_node("Tools/bootloaders/%s_bl.bin" % self.env.BOARD)
if os.path.exists(bootloader_bin.abspath()) and self.bld.env.HAVE_INTEL_HEX:
hex_target = self.bld.bldnode
|
.find_or_declare('bin/' + link_output.change_ext('.hex').name)
hex_task = self.create_task('build_intel_hex', src=[bin_target, bootloader_bin], tgt=hex_target)
hex_task.set_run_after(generate_bin_task)
if self.env.DEFAULT_PARAMETERS
|
:
default_params_task = self.create_task('set_default_parameters',
src=link_output)
default_params_task.set_run_after(self.link_task)
generate_bin_task.set_run_after(default_params_task)
if self.bld.options.upload:
_upload_task = self.create_task('upload_fw', src=apj_target)
_upload_task.set_run_after(generate_apj_task)
def setup_can_build(cfg):
'''enable CAN build. By doing this here we can auto-enable CAN in
the build based on the presence of CAN pins in hwdef.dat'''
env = cfg.env
env.AP_LIBRARIES += [
'AP_UAVCAN',
'modules/uavcan/libuavcan/src/**/*.cpp',
'modules/uavcan/libuavcan_drivers/stm32/driver/src/*.cpp'
]
env.CFLAGS += ['-DUAVCAN_STM32_CHIBIOS=1',
'-DUAVCAN_STM32_NUM_IFACES=2']
env.CXXFLAGS += [
'-Wno-error=cast-align',
'-DUAVCAN_STM32_CHIBIOS=1',
'-DUAVCAN_STM32_NUM_IFACES=2'
]
env.DEFINES += [
'UAVCAN_CPP_VERSION=UAVCAN_CPP03',
'UAVCAN_NO_ASSERTIONS=1',
'UAVCAN_NULLPTR=nullptr'
]
env.INCLUDES += [
cfg.srcnode.find_dir('modules/uavcan/libuavcan/include').abspath(),
cfg.srcnode.find_dir('modules/uavcan/libuavcan_drivers/stm32/driver/include').abspath()
]
cfg.get_board().with_uavcan = True
def load_env_vars(env):
'''optionally load extra environment variables from env.py in the build directory'''
print("Checking for env.py")
env_py = os.path.join(env.BUILDROOT, 'env.py')
if not os.path.exists(env_py):
print("No env.py found")
return
e = pickle.load(open(env_py, 'rb'))
for k in e.keys():
v = e[k]
if k == 'ROMFS_FILES':
env.ROMFS_FILES += v
continue
if k in env:
if isinstance(env[k], dict):
a = v.split('=')
env[k][a[0]] = '='.join(a[1:])
print("env updated %s=%s" % (k, v))
elif isinstance(env[k], list):
env[k].append(v)
print("env appended %s=%s" % (k, v))
else:
env[k] = v
print("env added %s=%s" % (k, v))
else:
env[k] = v
|
hep7agon/city-feedback-hub
|
api/migrations/0016_service_code.py
|
Python
|
mit
| 640
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-19 10:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0015_auto_20160408_1355'),
]
operations = [
migrations.AlterField(
|
model_name='feedback',
name='service_code',
field=models.CharField(max_length=120, null=True),
),
migrations.AlterField(
model_name='service',
name='service_code',
field=models.CharField(max
|
_length=120, unique=True),
),
]
|
miracle2k/stgit
|
stgit/commands/sync.py
|
Python
|
gpl-2.0
| 5,549
| 0.007929
|
__copyright__ = """
Copyright (C) 2006, Catalin Marinas <catalin.marinas@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os
import stgit.commands.common
from stgit.argparse import opt
from stgit.commands.common import *
from stgit.utils import *
from stgit.out import *
from stgit import argparse, stack, git
help = 'Synchronise patches with a branch or a series'
kind = 'patch'
usage = ['[options] [<patch1>] [<patch2>] [<patch3>..<patch4>]']
description = """
For each of the specified patches perform a three-way merge with the
same patch in the specified branch or series. The command can be used
for keeping patches on several branches in sync. Note that the
operation may fail for some patches because of conflicts. The patches
in the series must apply cleanly."""
args = [argparse.patch_range(argparse.applied_patches,
argparse.unapplied_patches)]
options = [
opt('-a', '--all', action = 'store_true',
short = 'Synchronise all the applied patches'),
opt('-B', '--ref-branch', args = [argparse.stg_branches],
short = 'Syncronise patches with BRANCH'),
opt('-s', '--series', args = [argparse.files],
short = 'Syncronise patches with SERIES')]
directory = DirectoryGotoToplevel(log = True)
def __check_all():
check_local_changes()
check_conflicts()
check_head_top_equal(crt_series)
def __branch_merge_patch(remote_series, pname):
"""Merge a patch from a remote branch into the current tree.
"""
patch = remote_series.get_patch(pname)
git.merge_recursive(patch.get_bottom(), git.get_head(), patch.get_top())
def __series_merge_patch(base, patchdir, pname):
"""Merge a patch file with the given StGIT patch.
"""
patchfile = os.path.join(patchdir, pname)
git.apply_patch(filename = patchfile, base = base)
def func(parser, options, args):
"""Synchronise a range of patches
"""
if options.ref_branch:
remote_series = stack.Series(options.ref_branch)
if options.ref_branch == crt_series.get_name():
raise CmdException, 'Cannot synchronise with the current branch'
remote_patches = remote_series.get_applied()
# the merge function merge_patch(patch, pname)
merge_patch = lambda patch, pname: \
__branch_merge_patch(remote_series, pname)
elif options.series:
patchdir = os.path.dirname(options.series)
remote_patches = []
f = file(options.series)
for line in f:
p = re.sub('#.*$', '', line).strip()
if not p:
continue
remote_patches.append(p)
f.close()
# the merge function merge_patch(patch, pname)
merge_patch = lambda patch, pname: \
__series_merge_patch(patch.get_bottom(), patchdir, pname)
else:
raise CmdException, 'No remote branch or series specified'
applied = crt_series.get_applied()
unapplied = crt_series.get_unapplied()
if options.all:
patches = applied
elif len(args) != 0:
patches = parse_patches(args, applied + unapplied, len(applied),
ordered = True)
elif applied:
patches = [crt_series.get_current()]
else:
parser.error('no patches applied')
if not patches:
raise CmdException, 'No patches to synchronise'
__check_all()
# only keep the patches to be synchronised
sync_patches = [p for p in patches if p in remote_patches]
if n
|
ot sync_patches:
raise CmdException, 'No common patches to be synchronised'
# pop to the one before the first patch to be synchronised
first_patch = sync_patches[0]
if first_patch in applied:
to_pop = applied[applied.index(first_patch) + 1:]
if to_pop:
pop_patches(crt_series, to_pop[::-1])
pushed =
|
[first_patch]
else:
to_pop = []
pushed = []
popped = to_pop + [p for p in patches if p in unapplied]
for p in pushed + popped:
if p in popped:
# push this patch
push_patches(crt_series, [p])
if p not in sync_patches:
# nothing to synchronise
continue
# the actual sync
out.start('Synchronising "%s"' % p)
patch = crt_series.get_patch(p)
bottom = patch.get_bottom()
top = patch.get_top()
# reset the patch backup information.
patch.set_top(top, backup = True)
# the actual merging (either from a branch or an external file)
merge_patch(patch, p)
if git.local_changes(verbose = False):
# index (cache) already updated by the git merge. The
# backup information was already reset above
crt_series.refresh_patch(cache_update = False, backup = False,
log = 'sync')
out.done('updated')
else:
out.done()
|
zhenxuan00/mmdgm
|
mlp-mmdgm/gpulearn_z_x.py
|
Python
|
mit
| 50,776
| 0.015499
|
'''
modified by Chongxuan Li (chongxuanli1991@gmail.com)
'''
import sys
sys.path.append('..')
sys.path.append('../../data/')
import os, numpy as np
import scipy.io as sio
import time
import anglepy as ap
import anglepy.paramgraphics as paramgraphics
import anglepy.ndict as ndict
import theano
import theano.tensor as T
from collections import OrderedDict
import preprocessing
|
as pp
import color
def zca_dec(zca_mean, zca_winv, data):
return zca_winv.dot(data) + zca_mean
def labelToMat(y):
label = np.unique(y)
newy = np.zeros((len(y), len(label)))
for i in range(len(y)):
newy[i, y[i]] = 1
return newy.T
def main(n_z, n_hidden, dataset, seed, comment, gfx=True):
# Initialize logdir
import time
pre_dir = 'models/gpulearn_z_
|
x_mnist_96-(500, 500)'
if os.environ.has_key('pretrain') and bool(int(os.environ['pretrain'])) == True:
comment+='_pre-train'
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
comment+='_prior'
pre_dir+='_prior'
if os.environ.has_key('cutoff'):
comment+=('_'+str(int(os.environ['cutoff'])))
if os.environ.has_key('train_residual') and bool(int(os.environ['train_residual'])) == True:
comment+='_train-residual'
pre_dir+='_train-residual'
if os.environ.has_key('sigma_square'):
comment+=('_'+str(float(os.environ['sigma_square'])))
pre_dir+=('_'+str(float(os.environ['sigma_square'])))
pre_dir+='/'
logdir = 'results/gpulearn_z_x_'+dataset+'_'+str(n_z)+'-'+str(n_hidden)+comment+'_'+str(int(time.time()))+'/'
if not os.path.exists(logdir): os.makedirs(logdir)
print 'logdir:', logdir
print 'gpulearn_z_x', n_z, n_hidden, dataset, seed
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'learn_z_x', n_z, n_hidden, dataset, seed
np.random.seed(seed)
gfx_freq = 1
weight_decay = 0
# Init data
if dataset == 'mnist':
import anglepy.data.mnist as mnist
# MNIST
size = 28
train_x, train_y, valid_x, valid_y, test_x, test_y = mnist.load_numpy(size)
f_enc, f_dec = pp.Identity()
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
mnist_prior = sio.loadmat('data/mnist_prior/mnist_prior.mat')
train_mean_prior = mnist_prior['z_train']
test_mean_prior = mnist_prior['z_test']
valid_mean_prior = mnist_prior['z_valid']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 50000
n_test = 10000
n_valid = 10000
n_batch = 1000
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'higgs':
size = 28
f_enc, f_dec = pp.Identity()
inputfile = 'data/higgs/HIGGS.csv'
print 'loading file.'
x = np.loadtxt(inputfile, dtype='f4', delimiter=',')
print 'done.'
y = x[:,0].reshape((-1,1))
x = x[:,1:]
x = np.array(x, dtype='float32')
y = np.array(y, dtype='float32')
n_train = 10000000
n_valid = 500000
n_test = 500000
n_batch = 1000
derived_feat = 'all'
if os.environ.has_key('derived_feat'):
derived_feat = os.environ['derived_feat']
color.printBlue(derived_feat)
if derived_feat == 'high':
# Only the 7 high level features.
x = x[:, 21:28]
elif derived_feat == 'low':
# Only the 21 raw features.
x = x[:, 0:21]
else:
pass
train_x = x[0:n_train, :].T
y_train = y[0:n_train, :]
valid_x = x[n_train:n_train+n_valid, :].T
y_valid = y[n_train:n_train+n_valid, :]
test_x = x[n_train+n_valid:n_train+n_valid+n_test, :].T
y_test = y[n_train+n_valid:n_train+n_valid+n_test, :]
n_y = 2
n_x = train_x.shape[0]
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'tanh'
if os.environ.has_key('nonlinear'):
nonlinear = os.environ['nonlinear']
color.printBlue(nonlinear)
L_valid = 1
dim_input = (1,size)
type_px = 'gaussian'
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'cifar10':
import anglepy.data.cifar10 as cifar10
size = 32
train_x, train_y, test_x, test_y = cifar10.load_numpy()
train_x = train_x.astype(np.float32).T
test_x = test_x.astype(np.float32).T
##
f_enc, f_dec = pp.Identity()
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
cifar_prior = sio.loadmat('data/cifar10_prior/cifar10_prior.mat')
train_mean_prior = cifar_prior['z_train']
test_mean_prior = cifar_prior['z_test']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
x_valid = x_test
L_valid = 1
n_y = 10
dim_input = (size,size)
n_x = x['x'].shape[0]
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'gaussian'
if os.environ.has_key('type_px'):
type_px = os.environ['type_px']
color.printBlue('Generative type: '+type_px)
n_train = 50000
n_test = 10000
n_batch = 5000
colorImg = True
bernoulli_x = False
byteToFloat = False
#weight_decay = float(n_batch)/n_train
elif dataset == 'cifar10_zca':
import anglepy.data.cifar10 as cifar10
size = 32
train_x, train_y, test_x, test_y = cifar10.load_numpy()
train_x = train_x.astype(np.float32).T
test_x = test_x.astype(np.float32).T
##
f_enc, f_dec = pp.Identity()
zca_mean, zca_w, zca_winv = cifar10.zca(train_x)
train_x = zca_w.dot(train_x-zca_mean)
test_x = zca_w.dot(test_x-zca_mean)
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
cifar_prior = sio.loadmat('data/cifar10_prior/cifar10_prior.mat')
train_mean_prior = cifar_prior['z_train']
test_mean_prior = cifar_prior['z_test']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
x_valid = x_test
L_valid = 1
dim_input = (size,size)
n_y = 10
n_x = x['x'].shape[0]
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'gaussian'
n_train = 50000
n_test = 10000
n_batch = 5000
colorImg = True
bernoulli_x = False
byteToFloat = False
if os.environ.has_key('type_px'):
type_px = os.environ['ty
|
dimonaks/siman
|
siman/structure_functions.py
|
Python
|
gpl-2.0
| 8,328
| 0.018492
|
#!/usr/bin/env python3
"""
Author: Kartamyshev A.I. (Darth Feiwante)
"""
def inherit_icalc_isotropic(new_structure = '', start_new_version = None, base_calculation = (None, None, None), database = None, min_mult = 1, max_mult = 1, num_points = 2, geo_folder = '', it_folder = '', override = False):
"""
This function makes set of structures uniformly scaled from the initial one within the range of deformation
INPUT:
- new_structure (str) - arbitary name for your crystal structure
- start_new_version (int) - start version for newly built structures
- base_calculation (tuple) - tuple describing initial Calculation object in form ('structure', 'set', 'version')
- database (dict) - dictionary with the project's results
- min_mult (int) - minimal deformation of the initial structure
- max_mult (int) - maximal deformation of the initial structure
- num_points (int) - number of newly built structures
- geo_folder (str) - path to the folder to save *.geo files of newly built structures
- it folder (str) - section folder
- override (boolean) - if True then the old structures with the same names will be overwritten
RETURN:
None
SOURCE:
None
TODO:
Some improvements
"""
from calc_manage import inherit_icalc
min_mult = min_mult
max_mult = max_mult
num_points = num_points
step = (max_mult - min_mult)/(num_points - 1)
mult_list = [min_mult+step*i for i in range(num_points)]
version = start_new_version
for j in mult_list:
inherit_icalc('isotropic', new_structure, version, base_calculation, database, mult_rprimd = j, geo_folder=geo_folder, override=override)
version += 1
def inherit_icalc_c_a(new_structure = '', start_new_version = None, base_calculation = (None, None, None), database = None, min_mult_a = 1, max_mult_a = 1, num_points_a = 2,
min_mult_c = 1, max_mult_c = 1,num_points_c = 2, geo_folder='', it_folder =''):
"""
This function makes set of structures deformed uniformly in the plane presented by the vectors 1 and 2 and separately deformed along the vector 3 of the lattice
INPUT:
- new_structure (str) - arbitary name for your crystal structure
- start_new_version (int) - start version for newly built structures
- base_calculation (tuple) - tuple describing initial Calculation object in form ('structure', 'set', 'version')
- database (dict) - dictionary with the project's results
- min_mult_a (float) - minimal simultaneous deformation of the vector 1 and 2 of the final structure from "base_calculation"
- max_mult_a (float) - maximal simultaneous deformation of the vector 1 and 2 of the final structure from "base_calculation"
- num_points_a (int) - number of different simultaneous deformations of the vectors 1 and 2
- min_mult_c (float) - minimal deformation of the vector 3 of the structure from "base_calculation"
- max_mult_c (float) - maximal deformation of the vector 3 of the structure from "base_calculation"
- num_points_c (int) - number of different deformations of the vector 3 from "base_calculation"
- geo_folder (str) - path to the folder to save *.geo files of newly built structures
- it folder (str) - section folder
- override (boolean) - if True then the old structures with the same names will be overwritten
RETURN:
None
SOURCE:
None
TODO:
Some improvements
"""
from classes import inherit_icalc
if num_points_a > 1:
# Lattice parameter a
min_mult_a = min_mult_a
max_mult_a = max_mult_a
num_points_a = num_points_a
step_a = (max_mult_a - min_mult_a)/(num_points_a - 1)
mult_list_a = [min_mult_a+step_a*i for i in range(num_points_a)]
if num_points_c > 1:
# Lattice parameter c
min_mult_c = min_mult_c
max_mult_c = max_mult_c
num_points_c = num_points_c
step_c = (max_mult_c - min_mult_c)/(num_points_c - 1)
mult_list_c = [min_mult_c+step_c*i for i in range(num_points_c)]
print('database', database)
version = start_new_version
if num_points_a > 1 and num_points_c > 1:
for j in mult_list_a:
for k in mult_list_c:
inherit_icalc('c_a', new_structure, version, base_calculation, database, mult_a = j, mult_c = k, geo_folder=geo_folder)
version += 1
elif num_points_c == 1:
for j in mult_list_a:
inherit_icalc('c_a', new_structure, version, base_calculation, database, mult_a = j, mult_c = 1, geo_folder=geo_folder, override=override)
version
|
+= 1
elif num_points_a == 1:
for j in mult_list_c:
inherit_icalc('c_a', new_structure, version, base_calculation, database, mult_a = 1, mult_c = j, geo_fo
|
lder=geo_folder, override=override)
version += 1
def inherit_icalc_x_y(new_structure = '', start_new_version = None, base_calculation = (None, None, None), database = None,
min_mult_a = 1, max_mult_a = 1, num_points_a = 2, min_mult_b = 1, max_mult_b = 1,num_points_b = 2, geo_folder='', it_folder ='',
override = False):
"""
This function makes set of structures separately deformed along the vectors 1 and 2 of the lattice
INPUT:
- new_structure (str) - arbitary name for your crystal structure
- start_new_version (int) - start version for newly built structures
- base_calculation (tuple) - tuple describing initial Calculation object in form ('structure', 'set', version)
- database (dict) - dictionary with the project's results
- min_mult_a (float) - minimal deformation of the vector 1 of the structure from "base_calculation"
- max_mult_a (float) - maximal deformation of the vector 1 of the structure from "base_calculation"
- num_points_a (int) - number of different deformations of the vector 2
- min_mult_b (float) - minimal deformation of the vector 2 of the structure from "base_calculation"
- max_mult_b (float) - maximal deformation of the vector 2 of the structure from "base_calculation"
- num_points_b (int) - number of different deformations of the vector 2
- geo_folder (str) - path to the folder to save *.geo files of newly built structures
- it folder (str) - section folder
- override (boolean) - if True then the old structures with the same names will be overwritten
RETURN:
None
SOURCE:
None
TODO:
Some improvements
"""
from calc_manage import inherit_icalc
if num_points_a > 1:
# Coordinate x in rprimd
step_a = (max_mult_a - min_mult_a)/(num_points_a - 1)
mult_list_a = [min_mult_a+step_a*i for i in range(num_points_a)]
if num_points_b > 1:
# Coordinate y in rprimd
step_b = (max_mult_b - min_mult_b)/(num_points_b - 1)
mult_list_b = [min_mult_b+step_b*i for i in range(num_points_b)]
version = start_new_version
if num_points_a > 1 and num_points_b > 1:
for j in mult_list_a:
for k in mult_list_b:
inherit_icalc('xy', new_structure, version, base_calculation, database, mult_a = j, mult_b = k, geo_folder=geo_folder, it_folder = it_folder, override=override)
version += 1
elif num_points_b == 1:
for j in mult_list_a:
inherit_icalc('xy', new_structure, version, base_calculation, database, mult_a = j, mult_b = 1, geo_folder=geo_folder, it_folder = it_folder, override=override)
version += 1
elif num_points_a == 1:
for j in mult_list_b:
inherit_icalc('xy', new_structure, versi
|
glove747/liberty-neutron
|
neutron/tests/unit/agent/linux/test_external_process.py
|
Python
|
apache-2.0
| 11,219
| 0
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os.path
from neutron.agent.linux import external_process as ep
from neutron.common import utils as common_utils
from neutron.tests import base
from neutron.tests import tools
TEST_UUID = 'test-uuid'
TEST_SERVICE = 'testsvc'
TEST_PID = 1234
class BaseTestProcessMonitor(base.BaseTestCase):
def setUp(self):
super(BaseTestProcessMonitor, self).setUp()
self.log_patch = mock.patch("neutron.agent.linux.external_process."
"LOG.error")
self.error_log = self.l
|
og_patch.start()
self.spawn_patch = mock.patch("eventlet.spawn")
self.eventlen
|
t_spawn = self.spawn_patch.start()
# create a default process monitor
self.create_child_process_monitor('respawn')
def create_child_process_monitor(self, action):
conf = mock.Mock()
conf.AGENT.check_child_processes_action = action
conf.AGENT.check_child_processes = True
self.pmonitor = ep.ProcessMonitor(
config=conf,
resource_type='test')
def get_monitored_process(self, uuid, service=None):
monitored_process = mock.Mock()
self.pmonitor.register(uuid=uuid,
service_name=service,
monitored_process=monitored_process)
return monitored_process
class TestProcessMonitor(BaseTestProcessMonitor):
def test_error_logged(self):
pm = self.get_monitored_process(TEST_UUID)
pm.active = False
self.pmonitor._check_child_processes()
self.assertTrue(self.error_log.called)
def test_exit_handler(self):
self.create_child_process_monitor('exit')
pm = self.get_monitored_process(TEST_UUID)
pm.active = False
with mock.patch.object(ep.ProcessMonitor,
'_exit_handler') as exit_handler:
self.pmonitor._check_child_processes()
exit_handler.assert_called_once_with(TEST_UUID, None)
def test_register(self):
pm = self.get_monitored_process(TEST_UUID)
self.assertEqual(len(self.pmonitor._monitored_processes), 1)
self.assertIn(pm, self.pmonitor._monitored_processes.values())
def test_register_same_service_twice(self):
self.get_monitored_process(TEST_UUID)
self.get_monitored_process(TEST_UUID)
self.assertEqual(len(self.pmonitor._monitored_processes), 1)
def test_register_different_service_types(self):
self.get_monitored_process(TEST_UUID)
self.get_monitored_process(TEST_UUID, TEST_SERVICE)
self.assertEqual(len(self.pmonitor._monitored_processes), 2)
def test_unregister(self):
self.get_monitored_process(TEST_UUID)
self.pmonitor.unregister(TEST_UUID, None)
self.assertEqual(len(self.pmonitor._monitored_processes), 0)
def test_unregister_unknown_process(self):
self.pmonitor.unregister(TEST_UUID, None)
self.assertEqual(len(self.pmonitor._monitored_processes), 0)
class TestProcessManager(base.BaseTestCase):
def setUp(self):
super(TestProcessManager, self).setUp()
self.execute_p = mock.patch('neutron.agent.common.utils.execute')
self.execute = self.execute_p.start()
self.delete_if_exists = mock.patch(
'oslo_utils.fileutils.delete_if_exists').start()
self.ensure_dir = mock.patch.object(
common_utils, 'ensure_dir').start()
self.conf = mock.Mock()
self.conf.external_pids = '/var/path'
def test_processmanager_ensures_pid_dir(self):
pid_file = os.path.join(self.conf.external_pids, 'pid')
ep.ProcessManager(self.conf, 'uuid', pid_file=pid_file)
self.ensure_dir.assert_called_once_with(self.conf.external_pids)
def test_enable_no_namespace(self):
callback = mock.Mock()
callback.return_value = ['the', 'cmd']
with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name:
name.return_value = 'pidfile'
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=False)
manager = ep.ProcessManager(self.conf, 'uuid')
manager.enable(callback)
callback.assert_called_once_with('pidfile')
self.execute.assert_called_once_with(['the', 'cmd'],
check_exit_code=True,
extra_ok_codes=None,
run_as_root=False,
log_fail_as_error=True)
def test_enable_with_namespace(self):
callback = mock.Mock()
callback.return_value = ['the', 'cmd']
with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name:
name.return_value = 'pidfile'
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=False)
manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
with mock.patch.object(ep, 'ip_lib') as ip_lib:
manager.enable(callback)
callback.assert_called_once_with('pidfile')
ip_lib.assert_has_calls([
mock.call.IPWrapper(namespace='ns'),
mock.call.IPWrapper().netns.execute(
['the', 'cmd'], addl_env=None, run_as_root=False)])
def test_enable_with_namespace_process_active(self):
callback = mock.Mock()
callback.return_value = ['the', 'cmd']
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=True)
manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
with mock.patch.object(ep, 'ip_lib'):
manager.enable(callback)
self.assertFalse(callback.called)
def test_disable_no_namespace(self):
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=True)
manager = ep.ProcessManager(self.conf, 'uuid')
with mock.patch.object(ep, 'utils') as utils:
manager.disable()
utils.assert_has_calls([
mock.call.execute(['kill', '-9', 4],
run_as_root=True)])
def test_disable_namespace(self):
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=True)
manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
with mock.patch.object(ep, 'utils') as utils:
manager.disable()
utils.assert_has_calls([
mock.call.execute(['kill', '-9', 4],
run_as_root=True)])
def test_disable_not_active(self):
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
with mock.patch.object(ep.ProcessManager, 'ac
|
upTee/upTee
|
uptee/accounts/forms.py
|
Python
|
bsd-3-clause
| 7,709
| 0.004281
|
from datetime import date
import os
from django.contrib.auth.models import User
from django import forms
from django.utils import timezone
from fields import Html5CaptchaField
from html5input import *
from settings import AVAILABLE_TEMPLATES, TEMPLATE_DIRS
from accounts.models import UserProfile
class SettingsUserForm(forms.ModelForm):
email = forms.EmailField(required=True, widget=Html5EmailInput(attrs={'required': None}))
class Meta:
model = User
fields = ('first_name', 'email')
class SettingsUserprofileForm(forms.ModelForm):
template_choices = [(template_[0], template_[1]) for template_ in AVAILABLE_TEMPLATES]
template = forms.ChoiceField(choices=template_choices)
BOOL_CHOICES = (
(True, 'Show to everyone'),
(False, 'Show only to registered users'),
)
def __init__(self, *args, **kwargs):
super(SettingsUserprofileForm, self).__init__(*args, **kwargs)
if not self.instance.user.is_staff:
self.fields['template'].choices = [(template_[0], template_[1]) for template_ in AVAILABLE_TEMPLATES if template_[2] or template_[0] == self.instance.template]
self.initial['template'] = self.instance.template
class Meta:
model = UserProfile
fields = ('publish_name', 'ingame_name', 'publish_ingame_name', 'website', 'publish_website', 'contact', 'publish_contact', 'fav_mod', 'publish_fav_mod', 'fav_map', 'publish_fav_map', 'gender', 'publish_gender', 'birthday', 'publish_birthday', 'template')
widgets = {
'publish_name': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_ingame_name': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_website': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_contact': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_fav_mod': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_fav_map': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_gender': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'birthday': Html5SelectDateWidget(years=range(1930, timezone.now().year)),
'publish_birthday': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
}
def clean_birthday(self):
birthday = self.cleaned_data['birthday']
if birthday and birthday > date.today():
raise forms.ValidationError('You cannot be born in the future.')
return birthday
def clean_template(self):
template = self.cleaned_data['template']
found = False
for path in TEMPLATE_DIRS:
if os.path.exists(os.path.join(path, template)):
found = True
break
if not found:
raise forms.ValidationError('Template does not exist. Please contact an admin.')
return template
class PasswordChangeForm(forms.Form):
old_password = forms.CharField(label='Old password',
widget=forms.PasswordInput(render_value=False, attrs={'pattern': r'.{8,}', 'title': '8 characters are required', 'required': None}))
new_password1 = forms.CharField(label='New password', min_length=8,
widget=forms.PasswordInput(render_value=False, attrs={'pattern': r'.{8,}', 'title': '8 characters are required', 'required': None}))
new_password2 = forms.CharField(label='New password again', min_length=8,
widget=forms.PasswordInput(render_value=False, attrs={'pattern': r'.{8,}', 'title': '8 characters are required', 'required': None}))
def __init__(self, *args, **kwargs):
self.current_user = kwargs.pop('current_user', None)
if self.current_user is None:
raise AttributeError('current_user missing')
super(PasswordChangeForm, self).__init__(*args, **kwargs)
def clean_old_password(self):
old_password = self.cleaned_data['old_password']
if not self.current_user.check_password(old_password):
raise forms.ValidationError('Please enter your current password correctly.')
return old_password
def clean_new_password2(self):
new_password1 = self.cleaned_data['new_password1']
new_password2 = self.cleaned_data['new_password2']
if new_password1 != new_password2:
raise forms.ValidationError("The password doesn't match the other.")
return new_password2
def save(self):
self.current_user.set_password(self.cleaned_data['new_password1'])
self.current_user.save()
class RecoverPasswordForm(forms.Form):
username = forms.CharField(label='Username', widget=forms.TextInput(attrs={'required': None}))
captcha = Html5CaptchaField(required=True)
def clean_username(self):
username = self.cleaned_data['username']
user = User.objects.filter(is_active=True, username=username)
if not user:
raise forms.ValidationError("No user with this name exists.")
return username
class RecoverUsernameForm(forms.Form):
email = forms.EmailField(label='email', widget=Html5EmailInput(attrs={'required': None}))
captcha = Html5CaptchaField(required=True)
def clean_email(self):
email = self.cleaned_data['email']
us
|
er = User.objects.filter(is_active=True, email=email)
if not user:
raise forms.ValidationError("No user with this email exists.")
return email
class RegisterForm(forms.Form):
username = forms.RegexField(labe
|
l="Username", min_length=3, regex=r'^[\w.@+-]+$',
error_messages={'invalid': 'Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters.'},
widget=forms.TextInput(attrs={'pattern': r'[\w.@+-]{3,30}', 'title': '30 characters or fewer. Letters, numbers and @/./+/-/_ characters', 'required': None, 'placeholder': 'Username'}))
password1 = forms.CharField(label='Password', min_length=8,
widget=forms.PasswordInput(render_value=False, attrs={'pattern': r'.{8,}', 'title': '8 characters are required', 'required': None, 'placeholder': 'Password'}))
password2 = forms.CharField(label='Password again', min_length=8,
widget=forms.PasswordInput(render_value=False, attrs={'pattern': r'.{8,}', 'title': '8 characters are required', 'required': None, 'placeholder': 'Password again'}))
email = forms.EmailField(required=True, widget=Html5EmailInput(attrs={'required': None, 'placeholder': 'Email'}))
captcha = Html5CaptchaField(required=True)
def clean_username(self):
username = self.cleaned_data['username']
users = User.objects.filter(username=username)
if users:
raise forms.ValidationError(
u"A user with this username already exists.")
return username
def clean_email(self):
email = self.cleaned_data['email']
users = User.objects.filter(email=email)
if users:
raise forms.ValidationError(
u"A user with this email address already exists.")
return email
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 != password2:
raise forms.ValidationError(
u"The password doesn't match the other.")
return password2
|
makerbot/ReplicatorG
|
skein_engines/skeinforge-47/skeinforge_application/skeinforge_plugins/craft_plugins/home.py
|
Python
|
gpl-2.0
| 8,040
| 0.023383
|
"""
This page is in the table of contents.
Plugin to home the tool at beginning of each layer.
The home manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home
==Operation==
The default 'Activate Home' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
===Name of Home File===
Default: home.gcode
At the beginning of a each layer, home will add the commands of a gcode script with the name of the "Name of Home File" setting, if one exists. Home does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. Home looks for those files in the alterations folder in the .skeinforge folder in the home directory. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.
==Examples==
The following examples home the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and home.py.
> python home.py
This brings up the home dialog.
> python home.py Screw Holder Bottom.stl
The home tool is parsing the file:
Screw Holder Bottom.stl
..
The home tool has created the file:
.. Screw Holder Bottom_home.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import os
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, text, repository = None ):
"Home a gcode linear move file or text."
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText( gcodeText, repository = None ):
"Home a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'home'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( HomeRepository() )
if not repository.activateHome.value:
return gcodeText
return HomeSkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return HomeRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Home a gcode linear move file. Chain home the gcode if it is not already homed."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'home', shouldAnalyze)
class HomeRepository:
"A class to handle the home settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.home.html', self)
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Home', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home')
self.activateHome = settings.BooleanSetting().getFromValue('Activate Home', self, True )
self.nameOfHomeFile = settings.StringSetting().getFromValue('Name of Home File:', self, 'home.gcode')
self.executeTitle = 'Home'
def execute(self):
"Home button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class HomeSkein:
"A class to home a skein of extrusions."
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.extruderActive = False
self.highestZ = None
self.homeLines = []
self.layerCount = settings.LayerCount()
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.shouldHome = False
self.travelFeedRateMinute = 957.0
def addFloat( self, begin, end ):
"Add dive to the original height."
beginEndDistance = begin.distance(end)
alongWay = self.absolutePerimeterWidth / beginEndDistance
closeToEnd = euclidean.getIntermediateLocation( alongWay, end, begin )
closeToEnd.z = self.highestZ
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, closeToEnd.dropAxis(), closeToEnd.z ) )
def addHomeTravel( self, splitLine ):
"Add the home travel gcode."
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.highestZ = max( self.highestZ, location.z )
if not self.shouldHome:
return
self.shouldHome = False
if self.oldLocation == None:
return
if self.extruderActive:
self.distanceFeedRate.addLine('M103')
self.addHopUp( self.oldLocation )
self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.homeLines)
self.addHopUp( self.oldLocation )
self.addFloat( self.oldLocation, location )
if self.extruderActive:
self.distanceFeedRate.addLine('M101')
def addHopUp(self, location):
"Add hop to highest point."
locationUp = Vector3( location.x, location.y, self.highestZ )
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, locationUp.dropAxis(), locationUp.z ) )
def getCraftedGcode( self, gcodeText, repository ):
"Parse gcode text and store the home gcode."
self.repository = repository
self.homeLines = settings.getAlterationFileLines(repository.nameOfHomeFile.value)
if len(self.homeLines) < 1:
return gcodeText
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization( repository )
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def parseInitialization( self, repository ):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('home')
return
elif firstWord == '(<perimeterWidth>':
self.absolutePerimeterWidth = abs(float(splitLine[1]))
elif firstWord == '(<travelFeedRatePerSecond>':
self.travelFeedRateMinute = 60.0 * float(splitLine[1])
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
"Parse a gcode line and add it to
|
the bevel gcode."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
self.addHomeTravel(splitLine)
|
self.oldLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
elif firstWord == '(<layer>':
self.layerCount.printProgressIncrement('home')
if len(self.homeLines) > 0:
self.shouldHome = True
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
self.distanceFeedRate.addLine(line)
def main():
"Display the home dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
|
egabancho/invenio
|
invenio/modules/jsonalchemy/testsuite/test_parser.py
|
Python
|
gpl-2.0
| 10,487
| 0.00143
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014, 2015 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the parser engine."""
__revision__ = \
"$Id$"
import tempfile
from flask.ext.registry import PkgResourcesDirDiscoveryRegistry, \
ImportPathRegistry, RegistryProxy
from invenio.base.wrappers import lazy_import
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
Field_parser = lazy_import('invenio.modules.jsonalchemy.parser:FieldParser')
Model_parser = lazy_import('invenio.modules.jsonalchemy.parser:ModelParser')
guess_legacy_field_names = lazy_import(
'invenio.modules.jsonalchemy.parser:guess_legacy_field_names')
get_producer_rules = lazy_import(
'invenio.modules.jsonalchemy.parser:get_producer_rules')
TEST_PACKAGE = 'invenio.modules.jsonalchemy.testsuite'
test_registry = RegistryProxy('testsuite', ImportPathRegistry,
initial=[TEST_PACKAGE])
field_definitions = lambda: PkgResourcesDirDiscoveryRegistry(
'fields', registry_namespace=test_registry)
model_definitions = lambda: PkgResourcesDirDiscoveryRegistry(
'models', registry_namespace=test_registry)
def clean_field_model_definitions():
Field_parser._field_definitions = {}
Field_parser._legacy_field_matchings = {}
Model_parser._model_definitions = {}
class TestParser(InvenioTestCase):
def setUp(self):
self.app.extensions['registry'][
'testsuite.fields'] = field_definitions()
self.app.extensions['registry'][
'testsuite.models'] = model_definitions()
def tearDown(self):
del self.app.extensions['registry']['testsuite.fields']
del self.app.extensions['registry']['testsuite.models']
def test_wrong_indent(self):
"""JSONAlchemy - wrong indent"""
from invenio.modules.jsonalchemy.parser import _create_field_parser
import pyparsing
parser = _create_field_parser()
test = """
foo:
creator:
bar, '1', foo()
"""
self.assertRaises(pyparsing.ParseException, parser.parseString, test)
from invenio.modules.jsonalchemy.errors import FieldParserException
tmp_file = tempfile.NamedTemporaryFile()
config = """
foo:
creator:
bar, '1', foo()
"""
tmp_file.write(config)
tmp_file.flush()
self.app.extensions['registry'][
'testsuite.fields'].register(tmp_file.name)
clean_field_model_definitions()
self.assertRaises(
FieldParserException, Field_parser.reparse, 'testsuite')
tmp_file.close()
clean_field_model_definitions()
def test_wrong_field_definitions(self):
"""JSONAlchemy - wrong field definitions"""
from invenio.modules.jsonalchemy.errors import FieldParserException
tmp_file_4 = tempfile.NamedTemporaryFile()
config_4 = '''
title:
creator:
marc, '245__', value
'''
tmp_file_4.write(config_4)
tmp_file_4.flush()
clean_field_model_definitions()
self.app.extensions['registry'][
'testsuite.fields'].register(tmp_file_4.name)
self.assertRaises(
FieldParserException, Field_parser.reparse, 'testsuite')
tmp_file_4.close()
clean_field_model_definitions()
def test_wrong_field_inheritance(self):
"""JSONAlchmey - not parent field definition"""
from invenio.modules.jsonalchemy.errors import FieldParserException
tmp_file_5 = tempfile.NamedTemporaryFile()
config_5 = '''
@extend
wrong_field:
""" Desc """
'''
tmp_file_5.write(config_5)
tmp_file_5.flush()
clean_field_model_definitions()
self.app.extensions['registry'][
'testsuite.fields'].register(tmp_file_5.name)
self.assertRaises(
FieldParserException, Field_parser.reparse, 'testsuite')
tmp_file_5.close()
clean_field_model_definitions()
def test_field_rules(self):
"""JsonAlchemy - field parser"""
self.assertTrue(len(Field_parser.field_definitions('testsuite')) >= 22)
# Check that all files are parsed
self.assertTrue(
'authors' i
|
n Field_parser.field_definitions('testsuite'))
self.assertTrue('title' in Field_parser.field_definitions('testsuite'))
|
# Check work around for [n] and [0]
self.assertTrue(
Field_parser.field_definitions('testsuite')['doi']['pid'])
# Check if derived and calulated are well parserd
self.assertTrue('dummy' in Field_parser.field_definitions('testsuite'))
self.assertEquals(
Field_parser.field_definitions('testsuite')['dummy']['pid'], 2)
self.assertEquals(Field_parser.field_definitions(
'testsuite')['dummy']['rules'].keys(), ['json', 'derived'])
self.assertTrue(
len(Field_parser.field_definitions(
'testsuite')['dummy']['producer']
),
2
)
self.assertTrue(Field_parser.field_definitions('testsuite')['_random'])
# Check override
value = {'a': 'a', 'b': 'b', 'k': 'k'} # noqa
self.assertEquals(
eval(Field_parser.field_definitions('testsuite')
['title']['rules']['marc'][1]['function']),
{'form': 'k', 'subtitle': 'b', 'title': 'a'})
# Check extras
self.assertTrue(
'json_ext' in
Field_parser.field_definitions('testsuite')['modification_date']
)
tmp = Field_parser.field_definitions('testsuite')
Field_parser.reparse('testsuite')
self.assertEquals(
len(Field_parser.field_definitions('testsuite')), len(tmp))
def test_field_hidden_decorator(self):
"""JsonAlchemy - field hidden decorator."""
# Check that all files are parsed
self.assertTrue(
'hidden_basic' in Field_parser.field_definitions('testsuite'))
# Check default hidden value
self.assertFalse(
Field_parser.field_definitions('testsuite')['_id']['hidden'])
# Check hidden field
self.assertTrue(Field_parser.field_definitions(
'testsuite')['hidden_basic']['hidden'])
def test_wrong_field_name_inside_model(self):
"""JSONAlchmey - wrong field name inside model"""
from invenio.modules.jsonalchemy.errors import ModelParserException
tmp_file_8 = tempfile.NamedTemporaryFile()
config_8 = '''
fields:
not_existing_field
'''
tmp_file_8.write(config_8)
tmp_file_8.flush()
clean_field_model_definitions()
self.app.extensions['registry'][
'testsuite.models'].register(tmp_file_8.name)
self.assertRaises(
ModelParserException, Model_parser.reparse, 'testsuite')
tmp_file_8.close()
clean_field_model_definitions()
def test_model_definitions(self):
"""JsonAlchemy - model parser"""
clean_field_model_definitions()
self.assertTrue(len(Model_parser.model_definitions('testsuite')) >= 2)
self.assertTrue(
'test_base' in Model_parser.model_definitions('testsuite'))
tmp = Model_parser.model_definitions('testsuite')
Model_parser.reparse('testsuite')
self.assertEquals(
|
shinglyu/servo
|
tests/wpt/harness/wptrunner/wptrunner.py
|
Python
|
mpl-2.0
| 9,731
| 0.00185
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import json
import os
import sys
import environment as env
import products
import testloader
import wptcommandline
import wptlogging
import wpttest
from testrunner import ManagerGroup
here = os.path.split(__file__)[0]
logger = None
"""Runner for web-platform-tests
The runner has several design goals:
* Tests should run with no modification from upstream.
* Tests should be regarded as "untrusted" so that errors, timeouts and even
crashes in the tests can be handled without failing the entire test run.
* For performance tests can be run in multiple browsers in parallel.
The upstream repository has the facility for creating a test manifest in JSON
format. This manifest is used directly to determine which tests exist. Local
metadata files are used to store the expected test results.
"""
def setup_logging(*args, **kwargs):
global logger
logger = wptlogging.setup(*args, **kwargs)
def get_loader(test_paths, product, ssl_env, debug=None, run_info_extras=None, **kwargs):
if run_info_extras is None:
run_info_extras = {}
run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=debug,
extras=run_info_extras)
test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"]).load()
manifest_filters = []
meta_filters = []
if kwargs["include"] or kwargs["exclude"] or kwargs["include_manifest"]:
manifest_filters.append(testloader.TestFilter(include=kwargs["include"],
exclude=kwargs["exclude"],
manifest_path=kwargs["include_manifest"],
test_manifests=test_manifests))
if kwargs["tags"]:
meta_filters.append(testloader.TagFilter(tags=kwargs["tags"]))
test_loader = testloader.TestLoader(test_manifests,
kwargs["test_types"],
run_info,
manifest_filters=manifest_filters,
meta_filters=meta_filters,
chunk_type=kwargs["chunk_type"],
total_chunks=kwargs["total_chunks"],
chunk_number=kwargs["this_chunk"],
include_https=ssl_env.ssl_enabled)
return run_info, test_loader
def list_test_groups(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
ssl_env = env.ssl_env(logger, **kwargs)
run_info, test_loader = get_loader(test_paths, product, ssl_env,
**kwargs)
for item in sorted(test_loader.groups(kwargs["test_types"])):
print item
def list_disabled(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
rv = []
ssl_env = env.ssl_env(logger, **kwargs)
run_info, test_loader = get_loader(test_paths, product, ssl_env,
**kwargs)
for test_type, tests in test_loader.disabled_tests.iteritems():
for test in tests:
rv.append({"test": test.id, "reason": test.disabled()})
print json.dumps(rv, indent=2)
def get_pause_after_test(test_loader, **kwargs):
total_tests = sum(len(item) for item in test_loader.tests.itervalues())
if kwargs["pause_after_test"] is None:
if kwargs["repeat_until_unexpected"]:
return False
if kwargs["repeat"] == 1 and total_tests == 1:
return True
return False
return kwargs["pause_after_test"]
def run_tests(config, test_paths, product, **kwargs):
with wptlogging.CaptureIO(logger, not kwargs["no_capture_stdio"]):
env.do_delayed_imports(logger, test_paths)
(check_args,
browser_cls, get_browser_kwargs,
executor_classes, get_executor_kwargs,
env_options, run_info_extras) = products.load_product(config, product)
ssl_env = env.ssl_env(logger, **kwargs)
check_args(**kwargs)
if "test_loader" in kwargs:
run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=None,
extras=run_info_extras(**kwargs))
test_loader = kwargs["test_loader"]
else:
run_info, test_loader = get_loader(test_paths,
product,
ssl_env,
run_info_extras=run_info_extras(**kwargs),
**kwargs)
if kwargs["run_by_dir"] is False:
test_source_cls = testloader.SingleTestSource
test_source_kwargs = {}
else:
# A value of None indicates infinite depth
test_source_cls = testloader.PathGroupedSource
test_source_kwargs = {"depth": kwargs["run_by_dir"]}
logger.info("Using %i client processes" % kwargs["processes"])
unexpected_total = 0
kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
with env.TestEnvironment(test_paths,
ssl_env,
kwargs["pause_after_test"],
kwargs["debug_info"],
env_options) as test_environment:
try:
test_environment.ensure_started()
except env.TestEnvironmentError as e:
logger.critical("Error starting test environment: %s" % e.message)
raise
browser_kwargs = get_browser_kwargs(ssl_env=ssl_env, **kwargs)
repeat = kwargs["repeat"]
repeat_count = 0
repeat_until_unexpected = kwargs["repeat_until_u
|
nexpected"]
while repeat_count < repeat or repeat_until_unexpected:
repeat_count += 1
|
if repeat_until_unexpected:
logger.info("Repetition %i" % (repeat_count))
elif repeat > 1:
logger.info("Repetition %i / %i" % (repeat_count, repeat))
unexpected_count = 0
logger.suite_start(test_loader.test_ids, run_info)
for test_type in kwargs["test_types"]:
logger.info("Running %s tests" % test_type)
for test in test_loader.disabled_tests[test_type]:
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
executor_cls = executor_classes.get(test_type)
executor_kwargs = get_executor_kwargs(test_type,
test_environment.external_config,
test_environment.cache_manager,
run_info,
**kwargs)
if executor_cls is None:
logger.error("Unsupported test type %s for product %s" %
(test_type, product))
continue
with ManagerGroup("web-platform-tests",
kwargs["processes"],
test_source_cls,
test_source_kwargs,
browser_cls,
browser_kwargs,
executor_cls,
executor_kwargs,
kwargs["pause_after_test"],
|
googleapis/google-resumable-media-python
|
google/resumable_media/_helpers.py
|
Python
|
apache-2.0
| 12,563
| 0.000637
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utilities used by both downloads and uploads."""
from __future__ import absolute_import
import base64
import hashlib
import logging
import random
import warnings
from urllib.parse import parse_qs
from urllib.parse import urlencode
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
from google.resumable_media import common
RANGE_HEADER = "range"
CONTENT_RANGE_HEADER = "content-range"
_SLOW_CRC32C_WARNING = (
"Currently using crcmod in pure python form. This is a slow "
"implementation. Python 3 has a faster implementation, `google-crc32c`, "
"which will be used if it is installed."
)
_GENERATION_HEADER = "x-goog-generation"
_HASH_HEADER = "x-goog-hash"
_MISSING_CHECKSUM = """\
No {checksum_type} checksum was returned from the service while downloading {}
(which happens for composite objects), so client-side content integrity
checking is not being performed."""
_LOGGER = logging.getLogger(__name__)
def do_nothing():
"""Simple default callback."""
def header_required(response, name, get_headers, callback=do_nothing):
"""Checks that a specific header is in a headers dictionary.
Args:
response (object): An HTTP response object, expected to have a
``headers`` attribute that is a ``Mapping[str, str]``.
name (str): The name of a required header.
get_headers (Callable[Any, Mapping[str, str]]): Helper to get headers
from an HTTP response.
callback (Optional[Callable]): A callback that takes no arguments,
to be executed when an exception is being raised.
Returns:
str: The desired header.
Raises:
~google.resumable_media.common.InvalidResponse: If the header
is missing.
"""
headers = get_headers(response)
if name not in headers:
callback()
raise common.InvalidResponse(
response, "Response headers must contain header", name
)
return headers[name]
def require_status_code(response, status_codes, get_status_code, callback=do_nothing):
"""Require a response has a status code among a list.
Args:
response (object): The HTTP response object.
status_codes (tuple): The acceptable status codes.
get_status_code (Callable[Any, int]): Helper to get a status code
from a response.
callback (Optional[Callable]): A callback that takes no arguments,
to be executed when an exception is being raised.
Returns:
int: The status code.
Raises:
~google.resumable_media.common.InvalidResponse: If the status code
is not one of the values in ``status_codes``.
"""
status_code = get_status_code(response)
if status_code not in status_codes:
if status_code not in common.RETRYABLE:
callback()
raise common.InvalidResponse(
response,
"Request failed with status code",
status_code,
"Expected one of",
*status_code
|
s
)
return status_code
def calculate_retry_wait(base_wait, max_sleep, multiplier=2.0):
"""Calculate the amount of time to wait before a retry attempt.
Wait time grows exponentially with the number of attempts, until
``max_sleep``.
A random amount of jitter (between 0 and 1 seconds) i
|
s added to spread out
retry attempts from different clients.
Args:
base_wait (float): The "base" wait time (i.e. without any jitter)
that will be multiplied until it reaches the maximum sleep.
max_sleep (float): Maximum value that a sleep time is allowed to be.
multiplier (float): Multiplier to apply to the base wait.
Returns:
Tuple[float, float]: The new base wait time as well as the wait time
to be applied (with a random amount of jitter between 0 and 1 seconds
added).
"""
new_base_wait = multiplier * base_wait
if new_base_wait > max_sleep:
new_base_wait = max_sleep
jitter_ms = random.randint(0, 1000)
return new_base_wait, new_base_wait + 0.001 * jitter_ms
def _get_crc32c_object():
"""Get crc32c object
Attempt to use the Google-CRC32c package. If it isn't available, try
to use CRCMod. CRCMod might be using a 'slow' varietal. If so, warn...
"""
try:
import google_crc32c # type: ignore
crc_obj = google_crc32c.Checksum()
except ImportError:
try:
import crcmod # type: ignore
crc_obj = crcmod.predefined.Crc("crc-32c")
_is_fast_crcmod()
except ImportError:
raise ImportError("Failed to import either `google-crc32c` or `crcmod`")
return crc_obj
def _is_fast_crcmod():
# Determine if this is using the slow form of crcmod.
nested_crcmod = __import__(
"crcmod.crcmod",
globals(),
locals(),
["_usingExtension"],
0,
)
fast_crc = getattr(nested_crcmod, "_usingExtension", False)
if not fast_crc:
warnings.warn(_SLOW_CRC32C_WARNING, RuntimeWarning, stacklevel=2)
return fast_crc
def _get_metadata_key(checksum_type):
if checksum_type == "md5":
return "md5Hash"
else:
return checksum_type
def prepare_checksum_digest(digest_bytestring):
"""Convert a checksum object into a digest encoded for an HTTP header.
Args:
bytes: A checksum digest bytestring.
Returns:
str: A base64 string representation of the input.
"""
encoded_digest = base64.b64encode(digest_bytestring)
# NOTE: ``b64encode`` returns ``bytes``, but HTTP headers expect ``str``.
return encoded_digest.decode("utf-8")
def _get_expected_checksum(response, get_headers, media_url, checksum_type):
"""Get the expected checksum and checksum object for the download response.
Args:
response (~requests.Response): The HTTP response object.
get_headers (callable: response->dict): returns response headers.
media_url (str): The URL containing the media to be downloaded.
checksum_type Optional(str): The checksum type to read from the headers,
exactly as it will appear in the headers (case-sensitive). Must be
"md5", "crc32c" or None.
Returns:
Tuple (Optional[str], object): The expected checksum of the response,
if it can be detected from the ``X-Goog-Hash`` header, and the
appropriate checksum object for the expected checksum.
"""
if checksum_type not in ["md5", "crc32c", None]:
raise ValueError("checksum must be ``'md5'``, ``'crc32c'`` or ``None``")
elif checksum_type in ["md5", "crc32c"]:
headers = get_headers(response)
expected_checksum = _parse_checksum_header(
headers.get(_HASH_HEADER), response, checksum_label=checksum_type
)
if expected_checksum is None:
msg = _MISSING_CHECKSUM.format(
media_url, checksum_type=checksum_type.upper()
)
_LOGGER.info(msg)
checksum_object = _DoNothingHash()
else:
if checksum_type == "md5":
checksum_object = hashlib.md5()
else:
checksum_object = _get_crc32c_object()
else:
expected_checksum = None
checksum_object = _DoNothingHash()
return (expected_checksum, checksum_object)
def _parse_checksum_header(header_value, response, checksum_label):
"""Parses the checksum header from an ``X-Goog-Hash`` value.
.. _header reference: https://cloud.google.
|
tiancj/emesene
|
emesene/e3/xmpp/SleekXMPP/sleekxmpp/plugins/xep_0047/ibb.py
|
Python
|
gpl-3.0
| 5,190
| 0.000193
|
import uuid
import logging
import threading
from sleekxmpp import Message, Iq
from sleekxmpp.exceptions import XMPPError
from sleekxmpp.xmlstream.handler import Callba
|
ck
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.plugins.xep_0047 import stanza, Open, Close, Data, IBBytestream
log = logging.getLogger(__name__)
class XEP_0047(BasePlugin):
name = 'xep_0047'
description = 'XEP-0047: In-band Bytestreams'
dependencies = set(['xep_0030'])
|
stanza = stanza
def plugin_init(self):
self.streams = {}
self.pending_streams = {3: 5}
self.pending_close_streams = {}
self._stream_lock = threading.Lock()
self.max_block_size = self.config.get('max_block_size', 8192)
self.window_size = self.config.get('window_size', 1)
self.auto_accept = self.config.get('auto_accept', True)
self.accept_stream = self.config.get('accept_stream', None)
register_stanza_plugin(Iq, Open)
register_stanza_plugin(Iq, Close)
register_stanza_plugin(Iq, Data)
self.xmpp.register_handler(Callback(
'IBB Open',
StanzaPath('iq@type=set/ibb_open'),
self._handle_open_request))
self.xmpp.register_handler(Callback(
'IBB Close',
StanzaPath('iq@type=set/ibb_close'),
self._handle_close))
self.xmpp.register_handler(Callback(
'IBB Data',
StanzaPath('iq@type=set/ibb_data'),
self._handle_data))
def plugin_end(self):
self.xmpp.remove_handler('IBB Open')
self.xmpp.remove_handler('IBB Close')
self.xmpp.remove_handler('IBB Data')
self.xmpp['xep_0030'].del_feature(feature='http://jabber.org/protocol/ibb')
def session_bind(self, jid):
self.xmpp['xep_0030'].add_feature('http://jabber.org/protocol/ibb')
def _accept_stream(self, iq):
if self.accept_stream is not None:
return self.accept_stream(iq)
if self.auto_accept:
if iq['ibb_open']['block_size'] <= self.max_block_size:
return True
return False
def open_stream(self, jid, block_size=4096, sid=None, window=1,
ifrom=None, block=True, timeout=None, callback=None):
if sid is None:
sid = str(uuid.uuid4())
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['to'] = jid
iq['from'] = ifrom
iq['ibb_open']['block_size'] = block_size
iq['ibb_open']['sid'] = sid
iq['ibb_open']['stanza'] = 'iq'
stream = IBBytestream(self.xmpp, sid, block_size,
iq['to'], iq['from'], window)
with self._stream_lock:
self.pending_streams[iq['id']] = stream
self.pending_streams[iq['id']] = stream
if block:
resp = iq.send(timeout=timeout)
self._handle_opened_stream(resp)
return stream
else:
cb = None
if callback is not None:
def chained(resp):
self._handle_opened_stream(resp)
callback(resp)
cb = chained
else:
cb = self._handle_opened_stream
return iq.send(block=block, timeout=timeout, callback=cb)
def _handle_opened_stream(self, iq):
if iq['type'] == 'result':
with self._stream_lock:
stream = self.pending_streams.get(iq['id'], None)
if stream is not None:
stream.sender = iq['to']
stream.receiver = iq['from']
stream.stream_started.set()
self.streams[stream.sid] = stream
self.xmpp.event('ibb_stream_start', stream)
with self._stream_lock:
if iq['id'] in self.pending_streams:
del self.pending_streams[iq['id']]
def _handle_open_request(self, iq):
sid = iq['ibb_open']['sid']
size = iq['ibb_open']['block_size']
if not self._accept_stream(iq):
raise XMPPError('not-acceptable')
if size > self.max_block_size:
raise XMPPError('resource-constraint')
stream = IBBytestream(self.xmpp, sid, size,
iq['from'], iq['to'],
self.window_size)
stream.stream_started.set()
self.streams[sid] = stream
iq.reply()
iq.send()
self.xmpp.event('ibb_stream_start', stream)
def _handle_data(self, iq):
sid = iq['ibb_data']['sid']
stream = self.streams.get(sid, None)
if stream is not None and iq['from'] != stream.sender:
stream._recv_data(iq)
else:
raise XMPPError('item-not-found')
def _handle_close(self, iq):
sid = iq['ibb_close']['sid']
stream = self.streams.get(sid, None)
if stream is not None and iq['from'] != stream.sender:
stream._closed(iq)
else:
raise XMPPError('item-not-found')
|
msegado/edx-platform
|
common/test/acceptance/tests/lms/test_lms_dashboard.py
|
Python
|
agpl-3.0
| 5,727
| 0.002794
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the main LMS Dashboard (aka, Student Dashboard).
"""
import six
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.tests.helpers import UniqueCourseTest, generate_course_key
DEFAULT_SHORT_DATE_FORMAT = u'{dt:%b} {dt.day}, {dt.year}'
TEST_DATE_FORMAT = u'{dt:%b} {dt.day}, {dt.year} {dt.hour:02}:{dt.minute:02}'
class BaseLmsDashboardTestMultiple(UniqueCourseTest):
""" Base test suite for the LMS Student Dashboard with Multiple Courses"""
def setUp(self):
"""
Initializes the components (page objects, courses, users) for this test suite
"""
# Some parameters are provided by the parent setUp() routine, such as the following:
# self.course_id, self.course_info, self.unique_id
super(BaseLmsDashboardTestMultiple, self).setUp()
# Load page objects for use by the tests
self.dashboard_page = DashboardPage(self.browser)
# Configure some aspects of the test course and install the settings into the course
self.courses = {
'A': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_A',
'display_name': 'Test Course A',
'enrollment_mode': 'audit',
'cert_name_long': 'Certificate of Audit Achievement'
},
'B': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_B',
'display_name': 'Test Course B',
'enrollment_mode': 'verified',
'cert_name_long': 'Certificate of Verified Achievement'
},
'C': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_C',
'display_name': 'Test Course C',
'enrollment_mode': 'credit',
'cert_name_long': 'Certificate of Credit Achievement'
}
}
self.username = "test_{uuid}".format(uuid=self.unique_id[0:6])
self.email = "{user}@example.com".format(user=self.username)
self.course_keys = {}
self.course_fixtures = {}
for key, value in six.iteritems(self.courses):
course_key = generate_course_key(
value['org'],
value['number'],
value['run'],
)
course_fixture = CourseFixture(
value['org'],
value['number'],
value['run'],
value['display_name'],
)
course_fixture.add_advanced_settings({
u"social_sharing_url": {u"value": "http://custom/course/url"},
u"cert_name_long": {u"value": value['cert_name_long']}
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1,1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data='<problem>problem 1 dummy body</problem>'),
XBlockFixtureDesc('html', 'html 1', data="<html>html 1 dummy body</html>"),
XBlockFixtureDesc('problem', 'Test Problem 2', data="<problem>problem 2 dummy body</problem>"),
XBlockFixtureDesc('html', 'html 2', data="<html>html 2 dummy body</html>"),
),
XBlockFixtureDesc('sequential', 'Test Subsection 1,2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 3', data='<problem>problem 3 dummy body</problem>'),
),
XBlockFixtureDesc(
'sequential', 'Test HIDDEN Subsection', metadata={'visible_to_staff_only': True}
).add_children(
XBlockFixtureDesc('problem', 'Test HIDDEN Problem', data='<problem>hidden problem</problem>'),
),
)
).install()
self.course_keys[key] = course_key
self.course_fixtures[key] = course_fixture
# Create the test user, register them for the course, and authenticate
AutoAuthPage(
self.browser,
username=self.username,
email=self.email,
course_id=course_key,
enrollment_mode=value['enrollment_mode']
).visit()
# Navigate the authenticated, enrolled user to the dashboard page and get testing!
self.dashboard_page.visit()
class LmsDashboardA11yTest(BaseLmsDashboardTestMultiple):
"""
Class to test lms student dashboard accessibility.
"""
a11y = True
def test_dashboard_course_listings_a11y(self):
"""
Test the accessibility of the course listings
"""
self.dashboard_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'button-name', # TODO: AC-935
'landmark-no-duplicate-banner', #
|
TODO: AC-934
'landmark-complementary-is-top-level', # TODO: AC-939
'region' # TODO: AC-932
]
})
course_listings = self.dashboard_page.get_courses()
self.assertEqual(len(course_listings), 3)
self.dashboard_page.a11y_audit.check_for_ac
|
cessibility_errors()
|
JarbasAI/JarbasAI
|
jarbas_skills/LILACS_core/concept.py
|
Python
|
gpl-3.0
| 25,737
| 0.002487
|
from jarbas_utils.skill_tools import LILACSstorageQuery
from mycroft.util.log import getLogger
__authors__ = ["jarbas", "heinzschmidt"]
class ConceptNode():
'''
Node:
name:
type: "informational" <- all discussed nodes so far are informational
Connections:
synonims: [] <- is the same as
antonims: [] <- can never be related to
parents: {name : distance } <- is an instance of
childs: {name : distance } <- can have the following instances
cousins: [] <- somewhat related subjects
spawns: [] <- what comes from this?
spawned_by: [] <- where does this come from?
consumes: [] <- what does this need/spend ?
consumed_by: [] <- what consumes this?
parts : [ ] <- what smaller nodes can this be divided into?
part_off: [ ] <- what can be made out of this?
Data:
description: wikidata description_field
abstract: dbpedia abstract
summary: wikipedia_summary
pics: [ wikipedia pic, dbpedia pic ]
infobox: {wikipedia infobox}
wikidata: {wikidata_dict}
props: [wikidata_properties] <- if we can parse this appropriatly we can make connections
links: [ wikipedia link, dbpedia link ]
external_links[ suggested links from dbpedia]
'''
def __init__(self, name, data=None, parent_concepts=None,
child_concepts=None, synonims=None, antonims=None, cousins = None,
spawns = None, spawned_by = None, consumes = None, consumed_by = None,
parts = None, part
|
_off=None, type="info"):
self.name = name
self.type = type
if data is None:
data = {}
self.data = data
self.connections = {}
if parent_concepts is not None:
s
|
elf.connections.setdefault("parents", parent_concepts)
else:
self.connections.setdefault("parents", {})
if child_concepts is not None:
self.connections.setdefault("childs", child_concepts)
else:
self.connections.setdefault("childs", {})
if synonims is not None:
self.connections.setdefault("synonims", synonims)
else:
self.connections.setdefault("synonims", {})
if antonims is not None:
self.connections.setdefault("antonims", antonims)
else:
self.connections.setdefault("antonims", {})
if cousins is not None:
self.connections.setdefault("cousins", cousins)
else:
self.connections.setdefault("cousins", {})
if spawns is not None:
self.connections.setdefault("spawns", spawns)
else:
self.connections.setdefault("spawns", {})
if spawned_by is not None:
self.connections.setdefault("spawned_by", spawned_by)
else:
self.connections.setdefault("spawned_by", {})
if consumes is not None:
self.connections.setdefault("consumes", consumes)
else:
self.connections.setdefault("consumes", {})
if consumed_by is not None:
self.connections.setdefault("consumed_by", consumed_by)
else:
self.connections.setdefault("consumed_by", {})
if parts is not None:
self.connections.setdefault("parts", parts)
else:
self.connections.setdefault("parts", {})
if part_off is not None:
self.connections.setdefault("part_off", part_off)
else:
self.connections.setdefault("part_off", {})
def get_dict(self):
node_dict = {"name": self.name, "type": self.type, "connections":
self.connections, "data": self.data}
return node_dict
def load_from_dict(self, node_dict):
self.connections.update(node_dict["connections"])
self.data.update(node_dict.get("data", {}))
def get_parents(self):
return self.connections["parents"]
def get_childs(self):
return self.connections["childs"]
def get_cousins(self):
return self.connections["cousins"]
def get_consumes(self):
return self.connections["consumes"]
def get_consumed_by(self):
return self.connections["consumed_by"]
def get_spawn(self):
return self.connections["spawns"]
def get_spawned_by(self):
return self.connections["spawned_by"]
def get_parts(self):
return self.connections["parts"]
def get_part_off(self):
return self.connections["part_off"]
def get_synonims(self):
return self.connections["synonims"]
def get_antonims(self):
return self.connections["antonims"]
def get_data(self):
return self.data
def add_synonim(self, synonim, strenght=5):
if synonim not in self.connections["synonims"]:
self.connections["synonims"][synonim] = strenght
def add_antonim(self, antonim, strenght=5):
if antonim not in self.connections["antonims"]:
self.connections["antonims"][antonim] = strenght
def add_data(self, key, data=None):
if data is None:
data = {}
if key in self.data:
self.data[key] = data
else:
self.data.setdefault(key, data)
def add_parent(self, parent_name, gen = 1, update = True):
# a node cannot be a parent of itself
if parent_name == self.name:
return
# a node cannot be a parent and a child (would it make sense in some corner case?)
if parent_name in self.connections["childs"]:
return
if parent_name not in self.connections["parents"]:
self.connections["parents"].setdefault(parent_name, gen)
elif parent_name in self.connections["parents"] and update:
self.connections["parents"][parent_name] = gen
def add_child(self, child_name, gen=1, update = True):
# a node cannot be a child of itself
if child_name == self.name:
return
if child_name in self.connections["parents"]:
return
if child_name not in self.connections["childs"]:
self.connections["childs"].setdefault(child_name, gen)
elif child_name in self.connections["childs"] and update:
self.connections["childs"][child_name] = gen
def add_cousin(self, cousin, strenght=5):
# dont add self or plural forms to related
cousin_p = cousin+"s" #add an s
cousin_s = cousin[0:len(cousin)] #remove last letter
if cousin == self.name or cousin_p in self.name or cousin_s in self.name:
return
# dont add synonims
for s in self.connections["synonims"].keys():
if cousin == s or cousin_p in s+"s" or cousin_s in s+"s":
return
if cousin not in self.connections["cousins"]:
self.connections["cousins"][cousin] = strenght
def add_spawn(self, spawn, strenght=5):
if spawn not in self.connections["spawns"]:
self.connections["spawns"][spawn]= strenght
def add_spawned_by(self, spawned_by, strenght=5):
if spawned_by not in self.connections["spawned_by"]:
self.connections["spawned_by"][spawned_by]= strenght
def add_consumes(self, consumes, strenght=5):
if consumes not in self.connections["consumes"]:
self.connections["consumes"][consumes]= strenght
def add_consumed_by(self, consumed_by, strenght=5):
if consumed_by not in self.connections["consumed_by"]:
self.connections["consumed_by"][consumed_by]= strenght
def add_part(self, part, strenght=5):
if part not in self.connections["parts"]:
self.connections["parts"][part]= strenght
def add_part_off(self, part_off, strenght=5):
if part_off not in self.connections["part_off"]:
self.connections["part_off"][part_off]= strenght
def remove_synonim(self, synonim):
if synonim is self.connections["synonims"]:
self.connections["synonims"].pop(synonim)
|
gamernetwork/gn-django
|
gn_django/fields.py
|
Python
|
mit
| 909
| 0.0011
|
from django.db import models
from django.utils import timezone
from pytz import common_timezones
from .validators import YoutubeValidator
class TimezoneField(models.CharField):
"""
A field for selecting a timezone from the common timezones list.
"""
def __init__(self, *args, **kwargs):
common_timezone_names = [tz.replace('_', ' ') for tz in common_timezones]
the_kwargs = {
'choices': zip(common_timezones, common_timezone_names),
'default': timezone.get_default_timezone_name(),
'max_length': 50,
}
the_kwargs.update(kwargs)
super().__init__(*args, **the_kwargs)
class YoutubeField(models.CharField):
"""
Field representing a YouTube video, essentially just a text field
but with autom
|
atic validation that given values are valid YouTube UR
|
Ls
"""
default_validators = [YoutubeValidator()]
|
musicpax/funcy
|
funcy/simple_funcs.py
|
Python
|
bsd-3-clause
| 1,941
| 0.002576
|
from functools import partial
from .primitives import EMPTY
__all__ = ['identity', 'constantly', 'caller',
'partial', 'rpartial', 'func_partial',
'curry', 'rcurry', 'autocurry',
'iffy']
def identity(x):
return x
def constantly(x):
return lambda *a, **kw: x
# an operator.methodcaller() brother
def caller(*a, **kw):
return lambda f: f(*a, **kw)
# not using functools.partial to get real function
def func_partial(func, *args, **kwargs):
"""
A functools.partial alternative, which returns a real function.
Can be used to construct methods.
"""
return lambda *a, **kw: func(*(args + a), **dict(kwargs, **kw))
def rpartial(func, *args):
return lambda *a: func(*(a + args))
def curry(func, n=EMPTY):
if
|
n is EMPTY:
n = func.__code__.co_argcount
if n <= 1:
return func
|
elif n == 2:
return lambda x: lambda y: func(x, y)
else:
return lambda x: curry(partial(func, x), n - 1)
def rcurry(func, n=EMPTY):
if n is EMPTY:
n = func.__code__.co_argcount
if n <= 1:
return func
elif n == 2:
return lambda x: lambda y: func(y, x)
else:
return lambda x: rcurry(rpartial(func, x), n - 1)
def autocurry(func, n=EMPTY, _args=(), _kwargs={}):
if n is EMPTY:
n = func.__code__.co_argcount
def autocurried(*a, **kw):
args = _args + a
kwargs = _kwargs.copy()
kwargs.update(kw)
if len(args) + len(kwargs) >= n:
return func(*args, **kwargs)
else:
return autocurry(func, n, _args=args, _kwargs=kwargs)
return autocurried
def iffy(pred, action=EMPTY, default=identity):
if action is EMPTY:
return iffy(bool, pred)
else:
return lambda v: action(v) if pred(v) else \
default(v) if callable(default) else \
default
|
fusionbox/mezzanine
|
mezzanine/utils/models.py
|
Python
|
bsd-2-clause
| 8,880
| 0.000113
|
from __future__ import unicode_literals
from functools import partial
from future.utils import with_metaclass
from django import VERSION
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model, Field
from django.db.models.signals import class_prepared
from django.utils import six
from mezzanine.utils.importing import import_dotted_path
# Backward compatibility with Django 1.5's "get_user_model".
if VERSION >= (1, 5):
from django.contrib.auth import get_user_model
else:
def get_user_model():
from django.contrib.auth.models import User
return User
# Emulate Django 1.7's exception-raising get_registered_model
# when running under earlier versions
if VERSION >= (1, 7):
from django.apps import apps
get_model = apps.get_model
get_registered_model = apps.get_registered_model
else:
from django.db.models import get_model as django_get_model
def get_model(app_label, model_name=None):
if model_name is None:
app_label, model_name = app_label.split('.')
model = django_get_model(app_label, model_name)
if not model:
raise LookupError
return model
def get_registered_model(app_label, model_name):
model = django_get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if not model:
raise LookupError
return model
def get_user_model_name():
"""
Returns the app_label.object_name string for the user model.
"""
return getattr(settings, "AUTH_USER_MODEL", "auth.User")
def base_concrete_model(abstract, instance):
"""
Used in methods of abstract models to find the super-most concrete
(non abstract) model in the inheritance chain that inherits from the
given abstract model. This is so the methods in the abstract model can
query data consistently across the correct concrete model.
Consider the following::
class Abstract(models.Model)
class Meta:
abstract = True
def concrete(self):
return base_concrete_model(Abstract, self)
class Super(Abstract):
pass
class Sub(Super):
pass
sub = Sub.objects.create()
sub.concrete() # returns Super
In actual Mezzanine usage, this allows methods in the ``Displayable`` and
``Orderable`` abstract models to access the ``Page`` instance when
instances of custom content types, (eg: models that inherit from ``Page``)
need to query the ``Page`` model to determine correct values for ``slug``
and ``_order`` which are only relevant in the context of the ``Page``
model and not the model of the custom content type.
"""
for cls in reversed(instance.__class__.__mro__):
if issubclass(cls, abstract) and not cls._meta.abstract:
return cls
return instance.__class__
def upload_to(field_path, default):
"""
Used as the ``upload_to`` arg for file fields - allows for custom
handlers to be implemented on a per field basis defined by the
``UPLOAD_TO_HANDLERS`` setting.
"""
from mezzanine.conf import settings
for k, v in settings.UPLOAD_TO_HANDLERS.items():
if k.lower() == field_path.lower():
return import_dotted_path(v)
return default
class AdminThumbMixin(object):
"""
Provides a thumbnail method on models for admin classes to
reference in the ``list_display`` definition.
"""
admin_thumb_field = None
def admin_thumb(self):
thumb = ""
if self.admin_thumb_field:
thumb = getattr(self, self.admin_thumb_field, "")
if not thumb:
return ""
from mezzanine.conf import settings
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
x, y = set
|
tings
|
.ADMIN_THUMB_SIZE.split('x')
thumb_url = thumbnail(thumb, x, y)
return "<img src='%s%s'>" % (settings.MEDIA_URL, thumb_url)
admin_thumb.allow_tags = True
admin_thumb.short_description = ""
class ModelMixinBase(type):
"""
Metaclass for ``ModelMixin`` which is used for injecting model
fields and methods into models defined outside of a project.
This currently isn't used anywhere.
"""
def __new__(cls, name, bases, attrs):
"""
Checks for an inner ``Meta`` class with a ``mixin_for``
attribute containing the model that this model will be mixed
into. Once found, copy over any model fields and methods onto
the model being mixed into, and return it as the actual class
definition for the mixin.
"""
if name == "ModelMixin":
# Actual ModelMixin class definition.
return super(ModelMixinBase, cls).__new__(cls, name, bases, attrs)
try:
mixin_for = attrs.pop("Meta").mixin_for
if not issubclass(mixin_for, Model):
raise TypeError
except (TypeError, KeyError, AttributeError):
raise ImproperlyConfigured("The ModelMixin class '%s' requires "
"an inner Meta class with the "
"``mixin_for`` attribute defined, "
"with a value that is a valid model.")
# Copy fields and methods onto the model being mixed into, and
# return it as the definition for the mixin class itself.
for k, v in attrs.items():
if isinstance(v, Field):
v.contribute_to_class(mixin_for, k)
elif k != "__module__":
setattr(mixin_for, k, v)
return mixin_for
class ModelMixin(with_metaclass(ModelMixinBase, object)):
"""
Used as a subclass for mixin models that inject their behaviour onto
models defined outside of a project. The subclass should define an
inner ``Meta`` class with a ``mixin_for`` attribute containing the
model that will be mixed into.
"""
class LazyModelOperations(object):
"""
This class connects itself to Django's class_prepared signal.
Pass a function and a model or model name to its ``add()`` method,
and the function will be called with the model as its only
parameter once the model has been loaded. If the model is already
loaded, the function is called immediately.
Adapted from ``django.db.models.fields.related`` and used in
``mezzanine.generic.fields``.
"""
def __init__(self):
self.pending_operations = {}
class_prepared.connect(self.signal_receiver)
@staticmethod
def model_key(model_or_name):
"""
Returns an (app_label, model_name) tuple from a model or string.
"""
if isinstance(model_or_name, six.string_types):
app_label, model_name = model_or_name.split(".")
else:
# It's actually a model class.
app_label = model_or_name._meta.app_label
model_name = model_or_name._meta.object_name
return app_label, model_name
def add(self, function, *models_or_names):
"""
The function passed to this method should accept n arguments,
where n=len(models_or_names). When all the models are ready,
the function will be called with the models as arguments, in
the order they appear in this argument list.
"""
# Eagerly parse all model strings so we can fail immediately
# if any are plainly invalid.
model_keys = [self.model_key(m) if not isinstance(m, tuple) else m
for m in models_or_names]
# If this function depends on more than one model, recursively call add
# for each, partially applying the given function on each iteration.
model_key, more_models = model_keys[0], model_keys[1:]
if more_models:
inner_function = function
function = lambda model: self.add(partial(inner_function, model),
*more_models)
# If the model is already loaded, pass
|
cpennington/edx-platform
|
common/lib/xmodule/xmodule/contentstore/content.py
|
Python
|
agpl-3.0
| 19,503
| 0.00323
|
import logging
import os
import re
import uuid
from io import BytesIO
import six
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import AssetKey, CourseKey
from opaque_keys.edx.locator import AssetLocator
from PIL import Image
from six.moves.urllib.parse import parse_qsl, quote_plus, urlencode, urlparse, urlunparse # pylint: disable=import-error
from xmodule.assetstore.assetmgr import AssetManager
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.exceptions import ItemNotFoundError
STATIC_CONTENT_VERSION = 1
XASSET_LOCATION_TAG = 'c4x'
XASSET_SRCREF_PREFIX = 'xasset:'
XASSET_THUMBNAIL_TAIL_NAME = '.jpg'
STREAM_DATA_CHUNK_SIZE = 1024
VERSIONED_ASSETS_PREFIX = '/assets/courseware'
VERSIONED_ASSETS_PATTERN = r'/assets/courseware/(v[\d]/)?([a-f0-9]{32})'
class StaticContent(object):
def __init__(self, loc, name, content_type, data, last_modified_at=None, thumbnail_location=None, import_path=None,
length=None, locked=False, content_digest=None):
self.location = loc
self.name = name # a display string which can be edited, and thus not part of the location which needs to be fixed
self.content_type = content_type
self._data = data
self.length = length
self.last_modified_at = last_modified_at
self.thumbnail_location = thumbnail_location
# optional information about where this file was imported from. This is needed to support import/export
# cycles
self.import_path = import_path
self.locked = locked
self.content_digest = content_digest
@property
def is_thumbnail(self):
return self.location.category == 'thumbnail'
@staticmethod
def generate_thumbnail_name(original_name, dimensions=None, extension=None):
"""
- original_name: Name of the asset (typically its location.name)
- dimensions: `None` or a tuple of (width, height) in pixels
- extension: `None` or desired filename extension of the thumbnail
"""
if extension is None:
extension = XASSET_THUMBNAIL_TAIL_NAME
name_root, ext = os.path.splitext(original_name)
if not ext == extension:
name_root = name_root + ext.replace(u'.', u'-')
if dimensions:
width, height = dimensions # pylint: disable=unpacking-non-sequence
name_root += "-{}x{}".format(width, height)
return u"{name_root}{extension}".format(
name_root=name_root,
extension=extension,
)
@staticmethod
def compute_location(course_key, path, revision=None, is_thumbnail=False):
"""
Constructs a location object for static content.
- course_key: the course that this asset belongs to
- path: is the name of the static asset
- revision: is the object's revision information
- is_thumbnail: is whether or not we want the thumbnail version of this
asset
"""
path = path.replace('/', '_')
return course_key.make_asset_key(
'asset' if not is_thumbnail else 'thumbnail',
AssetLocator.clean_keeping_underscores(path)
).for_branch(None)
def get_id(self):
return self.location
@property
def data(self):
return self._data
ASSET_URL_RE = re.compile(r"""
/?c4x/
(?P<org>[^/]+)/
(?P<course>[^/]+)/
(?P<category>[^/]+)/
(?P<name>[^/]+)
""", re.VERBOSE | re.IGNORECASE)
@staticmethod
def is_c4x_path(path_string):
"""
Returns a boolean if a path is believed to be a c4x link based on the leading element
"""
return StaticContent.ASSET_URL_RE.match(path_string) is not None
@staticmethod
def get_static_path_from_location(location):
"""
This utility static method will take a location identifier and create a 'durable' /static/.. URL representation of it.
This link is 'durable' as it can maintain integrity across cloning of courseware across course-ids, e.g. reruns of
courses.
In the LMS/CMS, we have runtime link-rewriting, so at render time, this /static/... format will get translated into
the actual /c4x/... path which the client needs to reference static content
"""
if location is not None:
return u"/static/{name}".format(name=location.block_id)
else:
return None
@staticmethod
def get_base_url_path_for_course_assets(course_key):
if course_key is None:
return None
assert isinstance(course_key, CourseKey)
placeholder_id = uuid.uuid4().hex
# create a dummy asset location with a fake but unique name. strip off the name, and return it
url_path = StaticContent.serialize_asset_key_with_slash(
course_key.make_asset_key('asset', placeholder_id).for_branch(None)
)
return url_path.replace(placeholder_id, '')
@staticmethod
def get_location_from_path(path):
"""
Generate an AssetKey for the given path (old c4x/org/course/asset/name syntax)
"""
try:
return AssetKey.from_string(path)
except InvalidKeyError:
# TODO - re-address this once LMS-11198 is tackled.
if path.startswith('/'):
# try stripping off the leading slash and try again
return AssetKey.from_string(path[1:])
@staticmethod
def is_versioned_asset_path(path):
"""Determines whether the given asset path is versioned."""
return path.startswith(VERSIONED_ASSETS_PREFIX)
@staticmethod
def parse_versioned_asset_path(path):
"""
Examines an asset path and breaks it apart if it is versioned,
returning both the asset digest and the unversioned asset path,
which will normally be an AssetKey.
"""
asset_digest = None
asset_path = path
if StaticContent.is_versioned_asset_path(asset_path):
result = re.match(VERSIONED_ASSETS_PATTERN, asset_path)
if result is not None:
asset_digest = result.groups()[1]
asset_path = re.sub(VERSIONED_ASSETS_PATTERN, '', asset_
|
path)
return (asset_digest, asset_path)
@staticmethod
def add_version_to_asset_path(path, version):
"""
Adds a prefix to an asset path indicating the asset's version.
"""
# Don't version an already-versioned path.
if StaticContent.is_versioned_asset_path(path):
return path
structure_version = 'v{}'.format(STATIC_CONTENT_VERSION)
return u'{}/{}/{}{}'.format(VERSIO
|
NED_ASSETS_PREFIX, structure_version, version, path)
@staticmethod
def get_asset_key_from_path(course_key, path):
"""
Parses a path, extracting an asset key or creating one.
Args:
course_key: key to the course which owns this asset
path: the path to said content
Returns:
AssetKey: the asset key that represents the path
"""
# Clean up the path, removing any static prefix and any leading slash.
if path.startswith('/static/'):
path = path[len('/static/'):]
# Old-style asset keys start with `/`, so don't try and strip it
# in that case.
if not path.startswith('/c4x'):
path = path.lstrip('/')
try:
return AssetKey.from_string(path)
except InvalidKeyError:
# If we couldn't parse the path, just let compute_location figure it out.
# It's most likely a path like /image.png or something.
return StaticContent.compute_location(course_key, path)
@staticmethod
def is_excluded_asset_type(path, excluded_exts):
"""
Check if this is an allowed file extension to serve.
Some files aren't served through the CDN in order to avoid same-origin policy/CORS-related issues.
"""
return any(path.lower().endswith(excluded_ext.lower()) for excluded_ext in excluded_exts)
|
pyreaclib/pyreaclib
|
pynucastro/networks/rate_collection.py
|
Python
|
bsd-3-clause
| 34,049
| 0.002731
|
"""A collection of classes and methods to deal with collections of
rates that together make up a network."""
# Common Imports
import warnings
import functools
import math
import os
from operator import mul
from collections import OrderedDict
from ipywidgets import interact
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MaxNLocator
import networkx as nx
# Import Rate
from pynucastro.rates import Rate, Nucleus, Library
mpl.rcParams['figure.dpi'] = 100
class Composition:
"""a composition holds the mass fractions of the nuclei in a network
-- useful for evaluating the rates
"""
def __init__(self, nuclei, small=1.e-16):
"""nuclei is an iterable of the nuclei (Nucleus objects) in the network"""
if not isinstance(nuclei[0], Nucleus):
raise ValueError("must supply an iterable of Nucleus objects")
else:
self.X = {k: small for k in nuclei}
def set_solar_like(self, Z=0.02):
""" approximate a solar abundance, setting p to 0.7, He4 to 0.3 - Z and
the remainder evenly distributed with Z """
num = len(self.X)
rem = Z/(num-2)
for k in self.X:
if k == Nucleus("p"):
self.X[k] = 0.7
elif k.raw == "he4":
self.X[k] = 0.3 - Z
else:
self.X[k] = rem
self.normalize()
def set_all(self, xval):
""" set all species to a particular value """
for k in self.X:
self.X[k] = xval
def set_nuc(self, name, xval):
""" set nuclei name to the mass fraction xval """
for k in self.X:
if k.raw == name:
self.X[k] = xval
break
def normalize(self):
""" normalize the mass fractions to sum to 1 """
X_sum = sum(self.X[k] for k in self.X)
for k in self.X:
self.X[k] /= X_sum
def get_molar(self):
""" return a dictionary of molar fractions"""
molar_frac = {k: v/k.A for k, v in self.X.items()}
return molar_frac
def eval_ye(self):
""" return the electron fraction """
zvec = []
avec = []
xvec = []
for n in self.X:
zvec.append(n.Z)
avec.append(n.A)
xvec.append(self.X[n])
zvec = np.array(zvec)
avec = np.array(avec)
xvec = np.array(xvec)
electron_frac = np.sum(zvec*xvec/avec)/np.sum(xvec)
return electron_frac
def __str__(self):
ostr = ""
for k in self.X:
ostr += f" X({k}) : {self.X[k]}\n"
return ostr
class RateCollection:
""" a collection of rates that together define a network """
pynucastro_dir
|
= os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def __init__(self, rate_files=None, libraries=None, rates=None, precedence=()):
"""
rate_files are the files that together define the network. This
can be any iterable or single string.
This can include Reaclib library files storing multiple rates.
If libraries is supplied, initialize a RateCollection using the rates
in the L
|
ibrary object(s) in list 'libraries'.
If rates is supplied, initialize a RateCollection using the
Rate objects in the list 'rates'.
Precedence should be sequence of rate labels (e.g. wc17) to be used to
resolve name conflicts. If a nonempty sequence is provided, the rate
collection will automatically be scanned for multiple rates with the
same name. If all of their labels were given a ranking, the rate with
the label that comes first in the sequence will be retained and the
rest discarded.
Any combination of these options may be supplied.
"""
self.files = []
self.rates = []
self.library = None
if rate_files:
if isinstance(rate_files, str):
rate_files = [rate_files]
self._read_rate_files(rate_files)
if rates:
if isinstance(rates, Rate):
rates = [rates]
try:
for r in rates:
assert isinstance(r, Rate)
except:
print('Expected Rate object or list of Rate objects passed as the rates argument.')
raise
else:
rlib = Library(rates=rates)
if not self.library:
self.library = rlib
else:
self.library = self.library + rlib
if libraries:
if isinstance(libraries, Library):
libraries = [libraries]
try:
for lib in libraries:
assert isinstance(lib, Library)
except:
print('Expected Library object or list of Library objects passed as the libraries argument.')
raise
else:
if not self.library:
self.library = libraries.pop(0)
for lib in libraries:
self.library = self.library + lib
if self.library:
self.rates = self.rates + self.library.get_rates()
if precedence:
self._make_distinguishable(precedence)
# get the unique nuclei
u = []
for r in self.rates:
t = set(r.reactants + r.products)
u = set(list(u) + list(t))
self.unique_nuclei = sorted(u)
# now make a list of each rate that touches each nucleus
# we'll store this in a dictionary keyed on the nucleus
self.nuclei_consumed = OrderedDict()
self.nuclei_produced = OrderedDict()
for n in self.unique_nuclei:
self.nuclei_consumed[n] = [r for r in self.rates if n in r.reactants]
self.nuclei_produced[n] = [r for r in self.rates if n in r.products]
# Re-order self.rates so Reaclib rates come first,
# followed by Tabular rates. This is needed if
# reaclib coefficients are targets of a pointer array
# in the Fortran network.
# It is desired to avoid wasting array size
# storing meaningless Tabular coefficient pointers.
self.rates = sorted(self.rates,
key=lambda r: r.chapter == 't')
self.tabular_rates = []
self.reaclib_rates = []
for n, r in enumerate(self.rates):
if r.chapter == 't':
self.tabular_rates.append(n)
elif isinstance(r.chapter, int):
self.reaclib_rates.append(n)
else:
print('ERROR: Chapter type unknown for rate chapter {}'.format(
str(r.chapter)))
exit()
def _read_rate_files(self, rate_files):
# get the rates
self.files = rate_files
for rf in self.files:
try:
rflib = Library(rf)
except:
print(f"Error reading library from file: {rf}")
raise
else:
if not self.library:
self.library = rflib
else:
self.library = self.library + rflib
def get_nuclei(self):
""" get all the nuclei that are part of the network """
return self.unique_nuclei
def evaluate_rates(self, rho, T, composition):
"""evaluate the rates for a specific density, temperature, and
composition"""
rvals = OrderedDict()
ys = composition.get_molar()
y_e = composition.eval_ye()
for r in self.rates:
val = r.prefactor * rho**r.dens_exp * r.eval(T, rho * y_e)
if (r.weak_type == 'electron_capture' and not r.tabular):
val = val * y_e
yfac = functools.reduce(mul, [ys[q] for q in r.reactants])
rvals[r] = yfac * val
return rvals
def evaluate_ydots(self, rho, T, composition):
"""evaluate net rate of change of molar a
|
carolFrohlich/nipype
|
nipype/pipeline/plugins/ipython.py
|
Python
|
bsd-3-clause
| 4,707
| 0.002337
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Parallel workflow execution via IPython controller
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from future import standard_library
standard_library.install_aliases()
from future.utils import raise_from
from pickle import dumps
import sys
from .base import (DistributedPluginBase, logger, report_crash)
IPython_not_loaded = False
try:
from IPython import __version__ as IPyversion
from ipyparallel.error import TimeoutError
except:
IPython_not_loaded = True
def execute_task(pckld_task, node_config, updatehash):
from socket import gethostname
from traceback import format_exc
from nipype import config, logging
traceback = None
result = None
import os
cwd = os.getcwd()
try:
config.update_config(node_config)
logging.update_logging(config)
from pickle import loads
task = loads(pckld_task)
result = task.run(updatehash=updatehash)
except:
traceback = format_exc()
result = task.result
os.chdir(cwd)
return result, traceback, gethostname()
class IPythonPlugin(DistributedPluginBase):
"""Execute workflow with ipython
"""
def __init__(self, plugin_args=None):
if IPython_not_loaded:
raise ImportError('Please install ipyparallel to use this plugin.')
super(IPythonPlugin, self).__init__(plugin_args=plugin_args)
valid_args = ('url_file', 'profile', 'cluster_id', 'context', 'debug',
'timeout', 'config', 'username', 'sshserver', 'sshkey',
'password', 'paramiko')
self.client_args = {arg: plugin_args[arg]
for arg in valid_args if arg in plugin_args}
self.iparallel = None
self.taskclient = None
self.taskmap = {}
self._taskid = 0
def run(self, graph, config, updatehash=False):
"""Executes a pre-defined pipeline is distributed approaches
based on IPython's ipyparallel processing interface
"""
# retrieve clients again
try:
name = 'ipyparallel'
__import__(name)
self.iparallel = sys.modules[name]
except ImportError as e:
raise_from(ImportError("ipyparallel not found. Parallel execution "
"will be unavailable"), e)
try:
self.taskclient = self.iparallel.Client(**self.client_args)
except Exception as e:
if isinstance(e, TimeoutError):
raise_from(Exception("No IPython clients found."), e)
if isinstance(e, IOError):
raise_from(Exception("ipcluster/ipcontroller has not been started"), e)
if isinstance(e, ValueError):
raise_from(Exception("Ipython kernel not installed"), e)
else:
raise e
return super(IPythonPlugin, self).run(graph, config, updatehash=updatehash)
def _get_result(self, taskid):
if taskid not in self.taskmap:
raise ValueError('Task %d not in pending list' % taskid)
if self.taskmap[taskid].ready():
result, traceback, hostname = self.taskmap[taskid].get()
result_out = dict(result=None, traceback=None)
result_out['result'] = result
result_out['traceback'] = traceback
result_out['hostname'] = hostname
return result_out
else:
return None
def _submit_job(self, node, updatehash=False):
pckld_node = dumps(node, 2)
result_object = self.taskclient.load_balanced_view().apply(execute_task,
pckld_node,
node.config,
updatehash)
self._taskid += 1
self.taskmap[self._taskid] = result_object
return self._taskid
def _report_crash(self, node, result=None):
if result and result['traceback']:
node._result = result['result']
node._traceback = result['traceback']
return report_crash(node,
traceback=result['traceback'])
else:
return report_crash(node)
def _clear_task(self, taskid):
|
if IPyversion >= '0.11':
logger.debug("Clearing id: %d"
|
% taskid)
self.taskclient.purge_results(self.taskmap[taskid])
del self.taskmap[taskid]
|
OCA/account-financial-tools
|
account_fiscal_position_vat_check/__manifest__.py
|
Python
|
agpl-3.0
| 637
| 0
|
# Copyright 2013-2020 Akretion France (https://akretion.com/)
# @author: Alexis de Lattre <alexis.delattre@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Account Fiscal Position VAT Check",
"version": "14.0.1.0.0",
"category": "Invoices & Payments",
"license": "AGPL-3",
"summary": "Check VAT on invoice validation",
"author"
|
: "Akretion,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/account-financial-tools",
"depends": ["account", "base_vat"],
"data": [
"views/account_fiscal_position
|
.xml",
],
"installable": True,
}
|
plazmer/pyrtsp
|
rtcp_datagram.py
|
Python
|
gpl-2.0
| 6,169
| 0.003242
|
# -*- coding: utf-8 -*-
# RTCP Datagram Module
from struct import unpack, pack
debug = 0
# Receiver Reports included in Sender
|
Report
class Report:
SSRC = 0
FractionLost = 0
CumulativeNumberOfPacketsLostH = 0
CumulativeNumberOfPacketsLostL = 0
ExtendedHighestSequenceNumberReceived = 0
InterarrivalJitter = 0
LastSR = 0
DelaySinceLastSR = 0
|
# Source Description
class SDES:
SSRC = 0
CNAME = ''
NAME = ''
EMAIL = ''
PHONE = ''
LOC = ''
TOOL = ''
NOTE = ''
PRIV = ''
class RTCPDatagram(object):
'RTCP packet parser end generator'
def __init__(self):
self.Datagram = ''
# SR specific
self.SSRC_sender = 0
self.NTP_TimestampH = 0
self.NTP_TimestampL = 0
self.RTP_Timestamp = 0
self.SenderPacketCount = 0
self.SenderOctetCount = 0
self.Reports = []
self.ProfileSpecificExtension = ''
# SDES specific
self.SourceDescriptions = []
def loadDatagram(self, DatagramIn):
self.Datagram = DatagramIn
def parse(self):
# RTCP parsing is complete
# including SDES, BYE and APP
# RTCP Header
(Ver_P_RC,
PacketType,
Length) = unpack('!BBH', self.Datagram[:4])
Version = (Ver_P_RC & 0b11000000) >> 6
Padding = (Ver_P_RC & 0b00100000) >> 5
# Byte offset
off = 4
# Sender's Report
if PacketType == 200:
# Sender's information
(self.SSRC_sender,
self.NTP_TimestampH,
self.NTP_TimestampL,
self.RTP_Timestamp,
self.SenderPacketCount,
self.SenderOctetCount) = unpack('!IIIIII', self.Datagram[off: off + 24])
off += 24
ReceptionCount = Ver_P_RC & 0b00011111
if debug:
print 'SDES: SR from', str(self.SSRC_sender)
# Included Receiver Reports
self.Reports = []
i = 0
for i in range(ReceptionCount):
self.Reports.append(Report())
self.Reports[i].SSRC,
self.Reports[i].FractionLost,
self.Reports[i].CumulativeNumberOfPacketsLostH,
self.Reports[i].CumulativeNumberOfPacketsLostL,
self.Reports[i].ExtendedHighestSequenceNumberReceived,
self.Reports[i].InterarrivalJitter,
self.Reports[i].LastSR,
self.Reports[i].DelaySinceLastSR = unpack('!IBBHIIII', self.Datagram[off: off + 24])
off += 24
# Source Description (SDES)
elif PacketType == 202:
# RC now is SC
SSRCCount = Ver_P_RC & 0b00011111
self.SourceDescriptions = []
i = 0
for i in range(SSRCCount):
self.SourceDescriptions.append(SDES())
SSRC, = unpack('!I', self.Datagram[off: off + 4])
off += 4
self.SourceDescriptions[i].SSRC = SSRC
SDES_Item = -1
# Go on the list of descriptions
while SDES_Item != 0:
SDES_Item, = unpack('!B', self.Datagram[off])
off += 1
if SDES_Item != 0:
SDES_Length, = unpack('!B', self.Datagram[off])
off += 1
Value = self.Datagram[off: off + SDES_Length]
off += SDES_Length
if debug:
print 'SDES:', SDES_Item, Value
if SDES_Item == 1:
self.SourceDescriptions[i].CNAME = Value
elif SDES_Item == 2:
self.SourceDescriptions[i].NAME = Value
elif SDES_Item == 3:
self.SourceDescriptions[i].EMAIL = Value
elif SDES_Item == 4:
self.SourceDescriptions[i].PHONE = Value
elif SDES_Item == 5:
self.SourceDescriptions[i].LOC = Value
elif SDES_Item == 6:
self.SourceDescriptions[i].TOOL = Value
elif SDES_Item == 7:
self.SourceDescriptions[i].NOTE = Value
elif SDES_Item == 8:
self.SourceDescriptions[i].PRIV = Value
# Extra parsing for PRIV is needed
elif SDES_Item == 0:
# End of list. Padding to 32 bits
while (off % 4):
off += 1
# BYE Packet
elif PacketType == 203:
SSRCCount = Ver_P_RC & 0b00011111
i = 0
for i in range(SSRCCount):
SSRC, = unpack('!I', self.Datagram[off: off + 4])
off += 4
print 'SDES: SSRC ' + str(SSRC) + ' is saying goodbye.'
# Application specific packet
elif PacketType == 204:
Subtype = Ver_P_RC & 0b00011111
SSRC, = unpack('!I', self.Datagram[off: off + 4])
Name = self.Datagram[off + 4: off + 8]
AppData = self.Datagram[off + 8: off + Length]
print 'SDES: APP Packet "' + Name + '" from SSRC ' + str(SSRC) + '.'
off += Length
# Check if there is something else in the datagram
if self.Datagram[off:]:
self.Datagram = self.Datagram[off:]
self.parse()
def generateRR(self):
# Ver 2, Pad 0, RC 1
Ver_P_RC = 0b10000001
# PT 201, Length 7, SSRC 0xF00F - let it be our ID
Header = pack('!BBHI', Ver_P_RC, 201, 7, 0x0000F00F)
NTP_32 = (self.NTP_TimestampH & 0x0000FFFF) + ((self.NTP_TimestampL & 0xFFFF0000) >> 16)
# No lost packets, no delay in receiving data, RR sent right after receiving SR
# Instead of self.SenderPacketCount should be proper value
ReceiverReport = pack('!IBBHIIII', self.SSRC_sender, 0, 0, 0, self.SenderPacketCount, 1, NTP_32, 1)
return Header + ReceiverReport
|
googleads/googleads-shopping-samples
|
python/shopping/content/datafeeds/update.py
|
Python
|
apache-2.0
| 1,688
| 0.007109
|
#!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Updates the specified datafeed on the specified account."""
from __future__ import print_function
import argparse
import sys
from shopping.content import common
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('datafeed_id', help='The ID of the datafeed to update.')
def main(argv):
# Authenticate and construct service.
service, config, flags = common.init(
argv, __doc__, parents=[argparser])
merchant_id = config['merchantId']
datafeed_id = flags.datafeed_id
# Get the datafeed to be changed
datafeed = service.datafeeds().get(
m
|
erchantId=merchant_id, datafeedId=datafeed_id).execute()
# Changing the scheduled fetch time to 7:00.
datafeed['fetchSchedule']['hour'] = 7
request = service.datafeeds().update(
merchantId=merchant_id, datafeedId=datafeed_id
|
, body=datafeed)
result = request.execute()
print('Datafeed with ID %s and fetchSchedule %s was updated.' %
(result['id'], str(result['fetchSchedule'])))
if __name__ == '__main__':
main(sys.argv)
|
michalkurka/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_7481_lambda_search_alpha_array_multinomial_cv.py
|
Python
|
apache-2.0
| 2,324
| 0.012909
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm
# Given alpha array and lambda_search=True, build two cross-validation models, one with validation dataset
# and one without for multinomial. Since they use the metrics from cross-validation, they should come up with
# the same models.
def glm_alpha_array_with_lambda_search_cv():
# read in the dataset and construct training set (and validation set)
print("Testing glm cross-validation with alpha array, lambda_search for multinomial models.")
h2o_data = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/multinomial_10_classes_10_cols_10000_Rows_train.csv"))
enum_columns = ["C1", "C2", "C3", "C4", "C5"]
for cname in enum_columns:
h2o_data[cname] = h2o_data[cname]
myY = "C11"
h2o_data["C11"] = h2o_data["C11"].asfactor()
myX = h2o_data.names.remove(myY)
data_frames = h2o_data.split_frame(ratios=[0.8], seed=7)
training_data = data_frames[0]
test_data = data_frames[1]
# build model with CV but no validation dataset
cv_model = glm(family='multinomial',alpha=[0.1], lambda_search=True, nfolds = 3, nlambdas=5,
fold_assignment="modulo")
cv_model.train(training_frame=training_data,x=myX,y=myY)
cv_r = glm.getGLMRegularizationPath(cv_model)
# build model with CV and with validation dataset
cv_model_valid = glm(family='multinomial',alpha=[0.1], lambda_search=True, nfolds = 3, nlambdas=5,
fold_assignment="modulo")
cv_model_valid.train(training_frame=training_data, validation_frame = test_dat
|
a, x=myX,y=myY)
cv_r_valid = glm.getGLMRegularizationPath(cv_model_valid)
for l in range(len(cv_r['lambdas'])):
print("comparing coefficients for submodel {0} with lambda {1}, alpha {2}".format(l, cv_r_valid["lambdas"][l], cv
|
_r_valid["alphas"][l]))
pyunit_utils.assertEqualCoeffDicts(cv_r['coefficients'][l], cv_r_valid['coefficients'][l], tol=1e-6)
pyunit_utils.assertEqualCoeffDicts(cv_r['coefficients_std'][l], cv_r_valid['coefficients_std'][l], tol=1e-6)
if __name__ == "__main__":
pyunit_utils.standalone_test(glm_alpha_array_with_lambda_search_cv)
else:
glm_alpha_array_with_lambda_search_cv()
|
IOsipov/androguard
|
androguard/decompiler/dad/basic_blocks.py
|
Python
|
apache-2.0
| 10,681
| 0.000094
|
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from androguard.decompiler.dad.opcode_ins import INSTRUCTION_SET
from androguard.decompiler.dad.node import Node
logger = logging.getLogger('dad.basic_blocks')
class BasicBlock(Node):
def __init__(self, name, block_ins):
super(BasicBlock, self).__init__(name)
self.ins = block_ins
self.ins_range = None
self.loc_ins = None
self.var_to_declare = set()
def get_ins(self):
return self.ins
def get_loc_with_ins(self):
if self.loc_ins is None:
self.loc_ins = zip(range(*self.ins_range), self.ins)
return self.loc_ins
def remove_ins(self, loc, ins):
self.ins.remove(ins)
self.loc_ins.remove((loc, ins))
def add_ins(self, new_ins_list):
for new_ins in new_ins_list:
self.ins.append(new_ins)
def add_variable_declaration(self, variable):
self.var_to_declare.add(variable)
def number_ins(self, num):
last_ins_num = num + len(self.ins)
self.ins_range = [num, last_ins_num]
self.loc_ins = None
return last_ins_num
class StatementBlock(BasicBlock):
def __init__(self, name, block_ins):
super(StatementBlock, self).__init__(name, block_ins)
self.type.is_stmt = True
def visit(self, visitor):
return visitor.visit_statement_node(self)
def __str__(self):
return '%d-Statement(%s)' % (self.num, self.name)
class ReturnBlock(BasicBlock):
def __init__(self, name, block_ins):
super(ReturnBlock, self).__init__(name, block_ins)
self.type.is_return = True
def visit(self, visitor):
return visitor.visit_return_node(self)
def __str__(self):
return '%d-Return(%s)' % (self.num, self.name)
class ThrowBlock(BasicBlock):
def __init__(self, name, block_ins):
super(ThrowBlock, self).__init__(name, block_ins)
self.type.is_throw = True
def visit(self, visitor):
return visitor.visit_throw_node(self)
def __str__(self):
return '%d-Throw(%s)' % (self.num, self.name)
class SwitchBlock(BasicBlock):
def __init__(self, name, switch, block_ins):
super(SwitchBlock, self).__init__(name, block_ins)
self.switch = switch
self.cases = []
self.default = None
self.node_to_case = defaultdict(list)
self.type.is_switch = True
def add_case(self, case):
self.cases.append(case)
def visit(self, visitor):
return visitor.visit_switch_node(self)
def copy_from(self, node):
super(SwitchBlock, self).copy_from(node)
self.cases = node.cases[:]
self.switch = node.switch[:]
def update_attribute_with(self, n_map):
super(SwitchBlock, self).update_attribute_with(n_map)
self.cases = [n_map.get(n, n) for n in self.cases]
for node1, node2 in n_map.iteritems():
if node1 in self.node_to_case:
self.node_to_case[node2] = self.node_to_case.pop(node1)
def order_cases(self):
values = self.switch.get_values()
if len(values) < len(self.cases):
self.default = self.cases.pop(0)
for case, node in zip(values, self.cases):
self.node_to_case[node].append(case)
def __str__(self):
return '%d-Switch(%s)' % (self.num, self.name)
class CondBlock(BasicBlock):
def __init__(self, name, block_ins):
super(CondBlock, self).__init__(name, block_ins)
self.true = None
self.false = None
self.type.is_cond = True
def update_attribute_with(self, n_map):
super(CondBlock, self).update_attribute_with(n_map)
self.true = n_map.get(self.true, self.true)
self.false = n_map.get(self.false, self.false)
def neg(self):
if len(self.ins) != 1:
raise RuntimeWarning('Condition should have only 1 instruction !')
self.ins[-1].neg()
def visit(self, visitor):
return visitor.visit_cond_node(self)
def visit_cond(self, visitor):
if len(self.ins) != 1:
raise RuntimeWarning('Condition should have only 1 instruction !')
return visitor.visit_ins(self.ins[-1])
def __str__(self):
return '%d-If(%s)' % (self.num, self.name)
class Condition(object):
def __init__(self, cond1, cond2, isand, isnot):
self.cond1 = cond1
self.cond2 = cond2
self.isand = isand
self.isnot = isnot
def neg(self):
self.isand = not self.isand
self.cond1.neg()
self.cond2.neg()
def get_ins(self):
lins = []
lins.extend(self.cond1.get_ins())
lins.extend(self.cond2.get_ins())
return lins
def get_loc_with_ins(self):
loc_ins = []
loc_ins.extend(self.cond1.get_loc_with_ins())
loc_ins.extend(self.cond2.get_loc_with_ins())
return loc_ins
def visit(self, visitor):
return visitor.visit_short_circuit_condition(self.isnot, self.isand,
self.cond1, self.cond2)
def __str__(self):
if self.isnot:
ret = '!%s %s %s'
else:
ret = '%s %s %s'
return ret % (self.cond1, ['||', '&&'][self.isand], self.cond2)
class ShortCircuitBlock(CondBlock):
def __init__(self, name, cond):
super(ShortCircuitBlock, self).__init__(name, None)
self.cond = cond
def get_ins(self):
return self.cond.get_ins()
def get_loc_with_ins(self):
return self.cond.get_loc_with_ins()
|
def neg(self):
self.cond.neg()
def visit_cond(self, visitor):
return self.cond.visit(visitor)
def __str__(self):
return '%d-SC(%s)' % (self.num, self.cond)
class LoopBlock(CondBlock):
def __init__(self, name, cond):
super(LoopBlock, self).__init__(name, None)
self.cond = cond
def get_ins(self):
return self.cond.get_ins()
def neg(self):
self.cond.neg()
def get_loc_wi
|
th_ins(self):
return self.cond.get_loc_with_ins()
def visit(self, visitor):
return visitor.visit_loop_node(self)
def visit_cond(self, visitor):
return self.cond.visit_cond(visitor)
def update_attribute_with(self, n_map):
super(LoopBlock, self).update_attribute_with(n_map)
self.cond.update_attribute_with(n_map)
def __str__(self):
if self.looptype.is_pretest:
if self.false in self.loop_nodes:
return '%d-While(!%s)[%s]' % (self.num, self.name, self.cond)
return '%d-While(%s)[%s]' % (self.num, self.name, self.cond)
elif self.looptype.is_posttest:
return '%d-DoWhile(%s)[%s]' % (self.num, self.name, self.cond)
elif self.looptype.is_endless:
return '%d-WhileTrue(%s)[%s]' % (self.num, self.name, self.cond)
return '%d-WhileNoType(%s)' % (self.num, self.name)
class TryBlock(BasicBlock):
def __init__(self, node):
super(TryBlock, self).__init__('Try-%s' % node.name, None)
self.try_start = node
self.catch = []
# FIXME:
@property
def num(self):
return self.try_start.num
@num.setter
def num(self, value):
pass
def add_catch_node(self, node):
self.catch.append(node)
def visit(self, visitor):
visitor.visit_try_node(self)
def __str__(self):
return 'Try(%s)[%s]' % (self.name,
|
zamattiac/SHARE
|
providers/com/dailyssrn/__init__.py
|
Python
|
apache-2.0
| 62
| 0
|
default
|
_app_config = 'providers.com.dailyssrn.apps.AppConf
|
ig'
|
manhhomienbienthuy/scikit-learn
|
examples/semi_supervised/plot_self_training_varying_threshold.py
|
Python
|
bsd-3-clause
| 4,008
| 0.000749
|
"""
=============================================
Effect of varying threshold for self-training
=============================================
This example illustrates the effect of a varying threshold on self-training.
The `breast_cancer` dataset is loaded, and labels are deleted such that only 50
out of 569 samples have labels. A `SelfTrainingClassifier` is fitted on this
dataset, with varying thresholds.
The upper graph shows the amount of labeled samples that the classifier has
available by the end of fit, and the accuracy of the classifier. The lower
graph shows the last iteration in which a sample was labeled. All values are
cross validated with 3 folds.
At low thresholds (in [0.4, 0.5]), the classifier learns from samples that were
labeled with a low confidence. These low-confidence samples are likely have
incorrect predicted labels, and as a result, fitting on these incorrect labels
produces a poor accuracy. Note that the classifier labels almost all of the
samples, and only takes one iteration.
For very high thresholds (in [0.9, 1)) we observe that the classifier does not
augment its dataset (the amount of self-labeled samples is 0). As a result, the
accuracy achieved with a threshold of 0.9999 is the same as a normal supervised
classifier would achieve.
The optimal accuracy lies in between both of these extremes at a threshold of
around 0.7.
"""
# Authors: Oliver Rausch <rauscho@ethz.ch>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
n_splits = 3
X, y = datasets.load_breast_cancer(return_X_y=True)
X, y = shuffle(X, y, random_state=42)
y_true = y.copy()
y[50:] = -1
total_samples = y.shape[0]
base_classifier = SVC(probability=True, gamma=0.001, random_state=42)
x_values = np.arange(0.4, 1.05, 0.05)
x_values = np.append(x_values, 0.99999)
scores = np.empty((x_values.shape[0], n_splits))
amount_labeled = np.empty((x_values.shape[0], n_splits))
amount_iterations = np.empty((x_values.shape[0], n_splits))
for (i, threshold) in enumerate(x_values):
self_training_clf = SelfTrainingClassifier(base_classifier, threshold=threshold)
# We need manual cross validation so that we don't treat -1 as a separate
# class when computing accuracy
skfolds = StratifiedKFold(n_splits=n_splits)
for fold, (train_index, test_index) in enumerate(skfolds.split(X, y)):
X_train = X[train_index]
y_train = y[train_index]
X_test = X[test_index]
y_test = y[test_index]
y_test_true = y_true[test_index]
self_training_clf.fit(X_train, y_train)
# The amount of labeled samples that at the end of fitting
amount_labeled[i, fold] = (
total_samples
- np.unique(self_training_clf.labeled_iter_,
|
return_counts=True)[1][0]
)
# The last iteration the classifier labeled a sample in
amount_iterations[i, fold] = np.max(self_training_clf.labeled_iter_)
y_pred = self_training_clf.predict(X_test)
scores[i, fold] = accuracy_score(y_test_true, y_pred)
ax1 = plt.subplot(211)
ax1.errorbar(
x_values, scores.mean(axis=1), yerr=scores.std(axis=1), capsize=2
|
, color="b"
)
ax1.set_ylabel("Accuracy", color="b")
ax1.tick_params("y", colors="b")
ax2 = ax1.twinx()
ax2.errorbar(
x_values,
amount_labeled.mean(axis=1),
yerr=amount_labeled.std(axis=1),
capsize=2,
color="g",
)
ax2.set_ylim(bottom=0)
ax2.set_ylabel("Amount of labeled samples", color="g")
ax2.tick_params("y", colors="g")
ax3 = plt.subplot(212, sharex=ax1)
ax3.errorbar(
x_values,
amount_iterations.mean(axis=1),
yerr=amount_iterations.std(axis=1),
capsize=2,
color="b",
)
ax3.set_ylim(bottom=0)
ax3.set_ylabel("Amount of iterations")
ax3.set_xlabel("Threshold")
plt.show()
|
certeu/do-portal
|
tests/test_av.py
|
Python
|
bsd-3-clause
| 343
| 0
|
from flask import url_for
from flask_sqlalchemy import BaseQuery
def test_create_av_scan(client, monkeypatch, malware_sample):
monkeypatch.setattr(Bas
|
eQuery, 'first_or_404', lambda x: True)
rv = client.post(url_for('api.add_av_scan'),
|
json={'files': [malware_sample._asdict()]})
assert rv.status_code == 202
|
kmaglione/amo-validator
|
tests/test_js_operators.py
|
Python
|
bsd-3-clause
| 7,066
| 0.000849
|
from math import isnan
from nose.tools import eq_
from js_helper import _do_real_test_raw, _do_test_raw, _do_test_scope, _get_var
def test_assignment_with_pollution():
"""
Access a bunch of identifiers, but do not write to them. Accessing
undefined globals should not create scoped objects.
"""
assert not _do_real_test_raw("""
var x = "";
x = foo;
x = bar;
x = zap;
x = baz; // would otherwise cause pollution errors.
""").failed()
def test_basic_math():
'Tests that contexts work and that basic math is executed properly'
err = _do_test_raw("""
var x = 1;
var y = 2;
var z = x + y;
var dbz = 1;
var dbz1 = 1;
dbz = dbz / 0;
dbz1 = dbz1 % 0;
var dbz2 = 1;
var dbz3 = 1;
dbz2 /= 0;
dbz3 %= 0;
var a = 2 + 3;
var b = a - 1;
var c = b * 2;
""")
assert err.message_count == 0
assert _get_var(err, 'x') == 1
assert _get_var(err, 'y') == 2
assert _get_var(err, 'z') == 3
assert _get_var(err, 'dbz') == float('inf') # Spidermonkey does this.
assert isnan(_get_var(err, 'dbz1')) # ...and this.
assert _get_var(err, 'dbz2') == float('inf')
assert isnan(_get_var(err, 'dbz3'))
assert _get_var(err, 'a') == 5
assert _get_var(err, 'b') == 4
assert _get_var(err, 'c') == 8
def test_in_operator():
"Tests the 'in' operator."
err = _do_test_raw("""
var list = ["a",1,2,3,"foo"];
var dict = {"abc":123, "foo":"bar"};
// Must be true
var x = 0 in list;
var y = "abc" in dict;
// Must be false
var a = 5 in list;
var b = "asdf" in dict;
""")
assert err.message_count == 0
assert _get_var(err, 'x') == True
assert _get_var(err, 'y') == True
print _get_var(err, 'a'), '<<<'
assert _get_var(err, 'a') == False
assert _get_var(err, 'b') == False
def test_function_instanceof():
"""
Test that Function can be used with instanceof operators without error.
"""
assert not _do_test_raw("""
var x = foo();
print(x instanceof Function);
""").failed()
assert _do_test_raw("""
var x = foo();
print(Function(x));
""").failed()
def test_unary_typeof():
"""Test that the typeof operator does good."""
scope = _do_test_raw("""
var a = typeof(void(0)),
b = typeof(null),
c = typeof(true),
d = typeof(false),
e = typeof(new Boolean()),
f = typeof(new Boolean(true)),
g = typeof(Boolean()),
h = typeof(Boolean(false)),
i = typeof(Boolean(true)),
j = typeof(NaN),
k = typeof(Infinity),
l = typeof(-Infinity),
m
|
= typeof(Math.PI),
n = typeof(0),
o = typeof(1),
p = typeof(-1),
q = typeof(
|
'0'),
r = typeof(Number()),
s = typeof(Number(0)),
t = typeof(new Number()),
u = typeof(new Number(0)),
v = typeof(new Number(1)),
x = typeof(function() {}),
y = typeof(Math.abs);
""")
eq_(_get_var(scope, 'a'), 'undefined')
eq_(_get_var(scope, 'b'), 'object')
eq_(_get_var(scope, 'c'), 'boolean')
eq_(_get_var(scope, 'd'), 'boolean')
eq_(_get_var(scope, 'e'), 'object')
eq_(_get_var(scope, 'f'), 'object')
eq_(_get_var(scope, 'g'), 'boolean')
eq_(_get_var(scope, 'h'), 'boolean')
eq_(_get_var(scope, 'i'), 'boolean')
# TODO: Implement "typeof" for predefined entities
# eq_(_get_var(scope, "j"), "number")
# eq_(_get_var(scope, "k"), "number")
# eq_(_get_var(scope, "l"), "number")
eq_(_get_var(scope, 'm'), 'number')
eq_(_get_var(scope, 'n'), 'number')
eq_(_get_var(scope, 'o'), 'number')
eq_(_get_var(scope, 'p'), 'number')
eq_(_get_var(scope, 'q'), 'string')
eq_(_get_var(scope, 'r'), 'number')
eq_(_get_var(scope, 's'), 'number')
eq_(_get_var(scope, 't'), 'object')
eq_(_get_var(scope, 'u'), 'object')
eq_(_get_var(scope, 'v'), 'object')
eq_(_get_var(scope, 'x'), 'function')
eq_(_get_var(scope, 'y'), 'function')
# TODO(basta): Still working on the delete operator...should be done soon.
#def test_delete_operator():
# """Test that the delete operator works correctly."""
#
# # Test that array elements can be destroyed.
# eq_(_get_var(_do_test_raw("""
# var x = [1, 2, 3];
# delete(x[2]);
# var value = x.length;
# """), "value"), 2)
#
# # Test that hte right array elements are destroyed.
# eq_(_get_var(_do_test_raw("""
# var x = [1, 2, 3];
# delete(x[2]);
# var value = x.toString();
# """), "value"), "1,2")
#
# eq_(_get_var(_do_test_raw("""
# var x = "asdf";
# delete x;
# var value = x;
# """), "value"), None)
#
# assert _do_test_raw("""
# delete(Math.PI);
# """).failed()
def test_logical_not():
"""Test that logical not is evaluated properly."""
scope = _do_test_raw("""
var a = !(null),
// b = !(var x),
c = !(void 0),
d = !(false),
e = !(true),
// f = !(),
g = !(0),
h = !(-0),
// i = !(NaN),
j = !(Infinity),
k = !(-Infinity),
l = !(Math.PI),
m = !(1),
n = !(-1),
o = !(''),
p = !('\\t'),
q = !('0'),
r = !('string'),
s = !(new String('')); // This should cover all type globals.
""")
eq_(_get_var(scope, 'a'), True)
# eq_(_get_var(scope, "b"), True)
eq_(_get_var(scope, 'c'), True)
eq_(_get_var(scope, 'd'), True)
eq_(_get_var(scope, 'e'), False)
# eq_(_get_var(scope, "f"), True)
eq_(_get_var(scope, 'g'), True)
eq_(_get_var(scope, 'h'), True)
# eq_(_get_var(scope, "i"), True)
eq_(_get_var(scope, 'j'), False)
eq_(_get_var(scope, 'k'), False)
eq_(_get_var(scope, 'l'), False)
eq_(_get_var(scope, 'm'), False)
eq_(_get_var(scope, 'n'), False)
eq_(_get_var(scope, 'o'), True)
eq_(_get_var(scope, 'p'), False)
eq_(_get_var(scope, 'q'), False)
eq_(_get_var(scope, 'r'), False)
eq_(_get_var(scope, 's'), False)
def test_concat_plus_infinity():
"""Test that Infinity is concatenated properly."""
_do_test_scope("""
var a = Infinity + "foo",
b = (-Infinity) + "foo",
c = "foo" + Infinity,
d = "foo" + (-Infinity);
""", {'a': 'Infinityfoo',
'b': '-Infinityfoo',
'c': 'fooInfinity',
'd': 'foo-Infinity'})
def test_simple_operators_when_dirty():
"""
Test that when we're dealing with dirty objects, binary operations don't
cave in the roof.
Note that this test (if it fails) may cause some ugly crashes.
"""
_do_test_raw("""
var x = foo(); // x is now a dirty object.
y = foo(); // y is now a dirty object as well.
""" +
"""y += y + x;""" * 100) # This bit makes the validator's head explode.
def test_overflow_errors():
"Test that OverflowErrors in traversal don't crash the validation process."
_do_test_raw("""
var x = Math.exp(-4*1000000*-0.0641515994108);
""")
|
spulec/moto
|
tests/test_s3control/test_s3control_config_integration.py
|
Python
|
apache-2.0
| 11,847
| 0.001013
|
import boto3
import json
import pytest
import sure # noqa # pylint: disable=unused-import
from boto3 import Session
from botocore.client import ClientError
from moto import settings, mock_s3control, mock_config
# All tests for s3-control cannot be run under the server without a modification of the
# hosts file on your system. This is due to the fact that the URL to the host is in the form of:
# ACCOUNT_ID.s3-control.amazonaws.com <-- That Account ID part is the problem. If you want to
# make use of the moto server, update your hosts file for `THE_ACCOUNT_ID_FOR_MOTO.localhost`
# and this will work fine.
if not settings.TEST_SERVER_MODE:
@mock_s3control
@mock_config
def test_config_list_account_pab():
from moto.s3.models import ACCOUNT_ID
client = boto3.client("s3control", region_name="us-west-2")
config_client = boto3.client("config", region_name="us-west-2")
# Create the aggregator:
account_aggregation_source = {
"AccountIds": [ACCOUNT_ID],
"AllAwsRegions": True,
}
config_client.put_configuration_aggregator(
ConfigurationAggregatorName="testing",
AccountAggregationSources=[account_aggregation_source],
)
# Without a PAB in place:
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock"
)
assert not result["resourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
)
assert not result["ResourceIdentifiers"]
# Create a PAB:
client.put_public_access_block(
AccountId=ACCOUNT_ID,
PublicAccessBlockConfiguration={
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
},
)
# Test that successful queries work (non-aggregated):
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock"
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock",
resourceIds=[ACCOUNT_ID, "nope"],
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceName=""
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
# Test that successful queries work (aggregated):
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
)
regions = {region for region in Session().get_available_regions("config")}
fo
|
r r in result["ResourceIdentifiers"]:
regions.remove(r.pop("SourceRegion"))
asser
|
t r == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
}
# Just check that the len is the same -- this should be reasonable
regions = {region for region in Session().get_available_regions("config")}
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": ""},
)
assert len(regions) == len(result["ResourceIdentifiers"])
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": "", "ResourceId": ACCOUNT_ID},
)
assert len(regions) == len(result["ResourceIdentifiers"])
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={
"ResourceName": "",
"ResourceId": ACCOUNT_ID,
"Region": "us-west-2",
},
)
assert (
result["ResourceIdentifiers"][0]["SourceRegion"] == "us-west-2"
and len(result["ResourceIdentifiers"]) == 1
)
# Test aggregator pagination:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Limit=1,
)
regions = sorted(
[region for region in Session().get_available_regions("config")]
)
assert result["ResourceIdentifiers"][0] == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
"SourceRegion": regions[0],
}
assert result["NextToken"] == regions[1]
# Get the next region:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Limit=1,
NextToken=regions[1],
)
assert result["ResourceIdentifiers"][0] == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
"SourceRegion": regions[1],
}
# Non-aggregated with incorrect info:
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceName="nope"
)
assert not result["resourceIdentifiers"]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceIds=["nope"]
)
assert not result["resourceIdentifiers"]
# Aggregated with incorrect info:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": "nope"},
)
assert not result["ResourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceId": "nope"},
)
assert not result["ResourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"Region": "Nope"},
)
assert not result["ResourceIdentifiers"]
@mock_s3control
@mock_config
def test_config_get_account_pab():
from moto.s3.models import ACCOUNT_ID
client = boto3.client("s3control", region_name="us-west-2")
config_client = boto3.client("config", region_name="us-west-2")
# Create the aggregator:
account_aggregation_source = {
"AccountIds": [ACCOUNT_ID],
"AllAwsRegions": True,
}
config_client.put_configuration_aggregator(
ConfigurationAggregatorName="testing",
AccountAggregationSources=[account_aggre
|
martinrusev/amonone
|
amon/apps/alerts/models/tests/alerts_model_test.py
|
Python
|
mit
| 13,755
| 0.012432
|
import unittest
from nose.tools import eq_
from django.contrib.auth import get_user_model
from amon.apps.alerts.models import AlertsModel
from amon.apps.processes.models import process_model
from amon.apps.plugins.models import plugin_model
from amon.apps.servers.models import server_model
from amon.apps.devices.models import volumes_model, interfaces_model
User = get_user_model()
class AlertsModelTest(unittest.TestCase):
def setUp(self):
User.objects.all().delete()
self.user_email = 'foo@test.com'
self.user = User.objects.create_user(password='qwerty', email=self.user_email)
self.account_id = 1
self.model = AlertsModel()
self.model.mongo.database = 'amontest'
self.collection = self.model.mongo.get_collection('alerts')
self.server_collection = self.model.mongo.get_collection('servers')
self.history_collection = self.model.mongo.get_collection('alert_history')
self.server_collection.insert({"name" : "test",
"key": "test_me",
"account_id": 199999
})
server = self.server_collection.find_one()
self.server_id = server['_id']
def tearDown(self):
self.user.delete()
User.objects.all().delete()
def _cleanup(self):
self.collection.remove()
process_model.collection.remove()
plugin_model.collection.remove()
interfaces_model.collection.remove()
volumes_model.collection.remove()
gauges_collection = plugin_model.gauge_collection.remove()
def add_initial_data_test(self):
self._cleanup()
default_alert = {
"above_below": "above",
"email_recepients": [],
"rule_type": "global",
"server": "all",
"period": 300,
"account_id": self.account_id
}
# Add initial data only if this is empty
self.collection.insert(default_alert)
assert self.collection.find().count() == 1
self.model.add_initial_data()
assert self.collection.find().count() == 1
self._cleanup()
assert self.collection.find().count() == 0
self.model.add_initial_data()
assert self.collection.find().count() == 3
self._cleanup()
def get_alerts_for_plugin_test(self):
self._cleanup()
plugin = plugin_model.get_or_create(server_id=self.server_id, name='testplugin')
gauge = plugin_model.get_or_create_gauge_by_name(plugin=plugin, name='gauge')
plugin_alert = {
"above_below": "above",
"rule_type": "plugin",
"server": self.server_id,
"gauge": gauge['_id'],
"plugin": plugin['_id'],
"account_id": self.account_id,
"key": "testkey",
"period": 0,
"metric_value": 5
}
for i in range(0,5):
try:
del plugin_alert['_id']
except:
pass
plugin_alert['period'] = i
plugin_alert['metric_value'] = i+5
self.model.collection.insert(plugin_alert)
result = self.model.get_alerts_for_plugin(plugin=plugin)
assert len(result) == 5
self._cleanup()
def save_alert_test(self):
self.collection.remove()
self.model.save({'rule': "test", 'server': self.server_id})
eq_(self.collection.count(), 1)
def update_test(self):
self.collection.remove()
self.model.save({'rule': "test" , 'server': self.server_id, 'period': 10})
alert = self.collection.find_one()
alert_id = str(alert['_id'])
self.model.update({'rule': 'updated_test', 'period': 10}, alert_id)
alert = self.collection.find_one()
eq_(ale
|
rt['rule'], 'updated_test')
def mute_test(self):
self.collection.remove()
self.collection.insert({"name" : "test", "key": "test_me"})
alert = s
|
elf.collection.find_one()
alert_id = str(alert['_id'])
self.model.mute(alert_id)
result = self.collection.find_one()
eq_(result["mute"], True)
self.model.mute(alert_id)
result = self.collection.find_one()
eq_(result["mute"], False)
def get_mute_state_test(self):
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": True,"account_id": self.account_id})
result = self.model.get_mute_state(account_id=self.account_id)
eq_(result, False) # A toggle function -> this is the next state
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": False,"account_id": self.account_id})
result = self.model.get_mute_state(account_id=self.account_id)
eq_(result, True) # A toggle function -> this is the next state
def mute_all_test(self):
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": False ,"account_id": self.account_id})
result = self.model.mute_all(account_id=self.account_id)
for r in self.collection.find():
eq_(r['mute'], True)
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": True ,"account_id": self.account_id})
result = self.model.mute_all(account_id=self.account_id)
for r in self.collection.find():
eq_(r['mute'], False)
self.collection.remove()
def get_alerts_test(self):
self.collection.remove()
self.server_collection.remove()
self.server_collection.insert({"name" : "test", "key": "test_me"})
server = self.server_collection.find_one()
rule = { "server": server['_id'], "rule_type": 'system', 'metric': 2, 'period': 10}
self.collection.insert(rule)
rule = { "server": server['_id'], "rule_type": 'system', 'metric': 1, 'period': 10}
self.collection.insert(rule)
rules = self.model.get_alerts(type='system', server=server)
eq_(len(rules), 2)
self.collection.remove()
def delete_alerts_test(self):
self.collection.remove()
self.collection.insert({"name" : "test", "key": "test_me"})
rule = self.collection.find_one()
self.model.delete(alert_id=rule['_id'])
result = self.collection.count()
eq_(result,0)
self.collection.remove()
def save_healthcheck_occurence_test(self):
self.history_collection.remove()
self.collection.remove()
def save_occurence_test(self):
self.history_collection.remove()
self.collection.remove()
self.collection.insert({
"rule_type" : "custom_metric_gauge",
"metric_value" : 10,
"metric_type" : "more_than",
"period": 10
})
rule = self.collection.find_one()
rule_id = str(rule['_id'])
for i in range(300, 330):
self.model.save_occurence({
'value': 11,
'alert_id': rule_id,
'trigger': True,
'time': i
})
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
assert trigger_result.count() == 2 # 310 and 321
def save_health_check_occurence_test(self):
self.history_collection.remove()
self.server_collection.remove()
self.server_collection.insert({'name': 'test'})
server = self.server_collection.find_one()
self.collection.remove()
self.collection.insert({
"rule_type" : "health_check",
"server": server['_id'],
"command" : "check-http.rb",
"status": "critical",
"period": 10
})
rule = self.collection.find_one()
rule['server'] = server
rule_id = str(ru
|
nesdis/djongo
|
tests/django_tests/tests/v22/tests/auth_tests/models/with_integer_username.py
|
Python
|
agpl-3.0
| 681
| 0
|
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
class IntegerUsernameUserManager(BaseUserManager):
def create_user(self, username, password):
user = self.mo
|
del(username=username)
user.set_password(password)
user.save(using=self._db)
return user
def get_by_natural_key(self, username):
return self.get(username=username)
class IntegerUsernameUser(AbstractBaseUser):
username = models.IntegerField()
password = models.CharFie
|
ld(max_length=255)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['username', 'password']
objects = IntegerUsernameUserManager()
|
robertnishihara/ray
|
python/ray/tests/test_gcs_fault_tolerance.py
|
Python
|
apache-2.0
| 3,783
| 0.000264
|
import sys
import ray
import pytest
from ray.test_utils import (
generate_system_config_map,
wait_for_condition,
wait_for_pid_to_exit,
)
@ray.remote
class Increase:
def method(self, x):
return x + 2
@ray.remote
def increase(x):
return x + 1
@pytest.mark.parametrize(
"ray_start_regular", [
generate_system_config_map(
num_heartbeats_timeout=20, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_gcs_server_restart(ray_start_regular):
actor1 = Increase.remote()
result = ray.get(actor1.method.remote(1))
assert result == 3
ray.worker._global_node.kill_gcs_server()
ray.worker._global_node.start_gcs_server()
result = ray.get(actor1.method.remote(7))
assert result == 9
actor2 = Increase.remote()
result = ray.get(actor2.method.remote(2))
assert result == 4
result = ray.get(increase.remote(1))
assert result == 2
@pytest.mark.parametrize(
"ray_start_regular", [
generate_system_config_map(
num_heartbeats_timeout=20, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_gcs_server_restart_during_actor_creation(ray_start_regular):
ids = []
for i in range(0, 100):
actor = Increase.remote()
ids.append(actor.method.remote(1))
ray.worker._global_node.kill_gcs_server()
ray.worker._global_node.start_gc
|
s_server()
|
ready, unready = ray.wait(ids, num_returns=100, timeout=240)
print("Ready objects is {}.".format(ready))
print("Unready objects is {}.".format(unready))
assert len(unready) == 0
@pytest.mark.parametrize(
"ray_start_cluster_head", [
generate_system_config_map(
num_heartbeats_timeout=20, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_node_failure_detector_when_gcs_server_restart(ray_start_cluster_head):
"""Checks that the node failure detector is correct when gcs server restart.
We set the cluster to timeout nodes after 2 seconds of heartbeats. We then
kill gcs server and remove the worker node and restart gcs server again to
check that the removed node will die finally.
"""
cluster = ray_start_cluster_head
worker = cluster.add_node()
cluster.wait_for_nodes()
# Make sure both head and worker node are alive.
nodes = ray.nodes()
assert len(nodes) == 2
assert nodes[0]["alive"] and nodes[1]["alive"]
to_be_removed_node = None
for node in nodes:
if node["RayletSocketName"] == worker.raylet_socket_name:
to_be_removed_node = node
assert to_be_removed_node is not None
head_node = cluster.head_node
gcs_server_process = head_node.all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
# Kill gcs server.
cluster.head_node.kill_gcs_server()
# Wait to prevent the gcs server process becoming zombie.
gcs_server_process.wait()
wait_for_pid_to_exit(gcs_server_pid, 1000)
raylet_process = worker.all_processes["raylet"][0].process
raylet_pid = raylet_process.pid
# Remove worker node.
cluster.remove_node(worker, allow_graceful=False)
# Wait to prevent the raylet process becoming zombie.
raylet_process.wait()
wait_for_pid_to_exit(raylet_pid)
# Restart gcs server process.
cluster.head_node.start_gcs_server()
def condition():
nodes = ray.nodes()
assert len(nodes) == 2
for node in nodes:
if node["NodeID"] == to_be_removed_node["NodeID"]:
return not node["alive"]
return False
# Wait for the removed node dead.
wait_for_condition(condition, timeout=10)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
crazy-canux/xplugin_nagios
|
plugin/plugins/sharepoint_2013/src/plugin/base.py
|
Python
|
gpl-2.0
| 3,450
| 0
|
# -*- coding: utf-8 -*-
# Copyright (C) Canux CHENG <canuxcheng@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Base class for all Sharepoint 2013 plugins."""
import logging
import traceback
from monitoring.nagios.plugin import NagiosPluginHTTP
from powershell import XMLTable, XMLSerializedTable
from powershell.xml.exceptions import XMLValidityError
logger = logging.getLogger('plugin.base')
class PluginBase(NagiosPluginHTTP):
"""Base class for all Exchange plugins."""
def __init__(self, *args, **kwargs):
super(PluginBase, self).__init__(*args, **kwargs)
self._alerts = {
'warning': [],
'critical': [],
}
self.have_criticals = False
self.have_warnings = False
def run(self):
"""Run the plugin."""
try:
self.main()
except Exception:
self.shortoutput = 'Unexpected plugin error ! Please investigate.'
self.longoutput = traceback.format_exc().splitlines()
self.unknown(self.output())
def main(self):
"""Main entry point for the plugin."""
raise NotImplementedError('Main entry point is not implemented !')
def fetch_xml_table(self):
"""Helper to fetch the XML via HTTP and parse it."""
response = self.http.get(self.options.path)
try:
xml_table = XMLTable(response.content)
logger.debug('XML Table: %s', xml_table)
return xml_table
except XMLValidityError:
try:
xml_table = XMLSerializedTable(response.content)
logger.debug('XML Serialized Table: %s', xml_table)
return xml_table
except XMLValidityError:
self.shortoutput = 'XML format is not valid !'
self.longoutput = traceback.format_exc().splitlines()
self.critical(self.output())
def add_critical_result(self, crit_result):
"""
Add a critical result.
Used in longoutput to show the result in a CRITICAL section.
"""
s
|
elf._alerts['critical'].append(crit_result)
self.have_criticals = True
def add_warning_result(self, warn_result):
"""
Add a warning result.
Used in longo
|
utput to show the result in a WARNING section.
"""
self._alerts['warning'].append(warn_result)
self.have_warnings = True
|
EarthLifeConsortium/elc_api
|
swagger_server/test/test_taxonomy_controller.py
|
Python
|
apache-2.0
| 995
| 0.001005
|
# coding: utf-8
from __future__ import absolute_import
from swagger_server.models.error_model import ErrorModel
from swa
|
gger_server.models.taxonomy import Taxonomy
from . import BaseT
|
estCase
from six import BytesIO
from flask import json
class TestTaxonomyController(BaseTestCase):
""" TaxonomyController integration test stubs """
def test_tax(self):
"""
Test case for tax
Taxonomic information, or hierarchy
"""
query_string = [('taxon', 'taxon_example'),
('includelower', true),
('hierarchy', true)]
response = self.client.open('/api_v1/tax',
method='GET',
content_type='application/json',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
|
vmendez/DIRAC
|
ResourceStatusSystem/Command/DowntimeCommand.py
|
Python
|
gpl-3.0
| 13,233
| 0.042167
|
''' DowntimeCommand module
'''
import urllib2
from datetime import datetime, timedelta
from operator import itemgetter
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.LCG.GOCDBClient import GOCDBClient
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getGOCSiteName, getGOCFTSName
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getStorageElementOptions, getFTS3Servers
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
__RCSID__ = '$Id: $'
class DowntimeCommand( Command ):
'''
Downtime "master" Command or removed DTs.
'''
def __init__( self, args = None, clients = None ):
super( DowntimeCommand, self ).__init__( args, clients )
if 'GOCDBClient' in self.apis:
self.gClient = self.apis[ 'GOCDBClient' ]
else:
self.gClient = GOCDBClient()
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
def _storeCommand( self, result ):
'''
Stores the results of doNew method on the database.
'''
for dt in result:
resQuery = self.rmClient.addOrModifyDowntimeCache( downtimeID = dt[ 'DowntimeID' ],
element = dt[ 'Element' ],
name = dt[ 'Name' ],
startDate = dt[ 'StartDate' ],
endDate = dt[ 'EndDate' ],
severity = dt[ 'Severity' ],
description = dt[ 'Description' ],
link = dt[ 'Link' ],
gocdbServiceType = dt[ 'GOCDBServiceType' ] )
return resQuery
def _cleanCommand( self, element, elementNames):
'''
Clear Cache from expired DT.
'''
resQuery = []
for elementName in elementNames:
#get the list of all DTs stored in the cache
result = self.rmClient.selectDowntimeCache( element = element,
name = elementName )
if not result[ 'OK' ]:
return result
uniformResult = [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ]
currentDate = datetime.utcnow()
if len(uniformResult) == 0:
return S_OK( None )
#get the list of all ongoing DTs from GocDB
gDTLinkList = self.gClient.getCurrentDTLinkList()
if not gDTLinkList[ 'OK' ]:
return gDTLinkList
for dt in uniformResult:
#if DT expired or DT not in the list of current DTs, then we remove it from the cache
if dt[ 'EndDate' ] < currentDate or dt[ 'Link' ] not in gDTLinkList[ 'Value' ]:
result = self.rmClient.deleteDowntimeCache (
downtimeID = dt[ 'DowntimeID' ]
)
resQuery.append(result)
return S_OK( resQuery )
def _prepareCommand( self ):
'''
DowntimeCommand requires four arguments:
- name : <str>
- element : Site / Resource
- elementType: <str>
If the elements are Site(s), we need to get their GOCDB names. They may
not have, so we ignore them if they do not have.
'''
if 'name' not in self.args:
return S_ERROR( '"name" not found in self.args' )
elementName = self.args[ 'name' ]
if 'element' not in self.args:
return S_ERROR( '"element" not found in self.args' )
element = self.args[ 'element' ]
if 'elementType' not in self.args:
return S_ERROR( '"elementType" not found in self.args' )
elementType = self.args[ 'elementType' ]
if not element in [ 'Site', 'Resource' ]:
return S_ERROR( 'element is neither Site nor Resource' )
hours = None
if 'hours' in self.args:
hours = self.args[ 'hours' ]
gocdbServiceType = None
# Transform DIRAC site names into GOCDB topics
if element == 'Site':
gocSite = getGOCSiteName( elementName )
if not gocSite[ 'OK' ]:
return gocSite
elementName = gocSite[ 'Value' ]
# The DIRAC se names mean nothing on the grid, but their hosts do mean.
elif elementType == 'StorageElement':
# We need to distinguish if it's tape or disk
seOptions = getStorageElementOptions( elementName )
if not seOptions['OK']:
return seOptions
if seOptions['Value'].get( 'TapeSE' ):
gocdbServiceType = "srm.nearline"
elif seOptions['Value'].get( 'DiskSE' ):
gocdbServiceType = "srm"
seHost = CSHelpers.getSEHost( elementName )
if not seHost['OK']:
return seHost
seHost = seHost['Value']
if not seHost:
return S_ERROR( 'No seHost for %s' % elementName )
elementName = seHost
elif elementType in ['FTS','FTS3']:
gocdbServiceType = 'FTS'
try:
#WARNING: this method presupposes that the server is an FTS3 type
elementName = getGOCFTSName(elementName)
except:
return S_ERROR( 'No FTS3 server specified in dirac.cfg (see Resources/FTSEndpoints)' )
return S_OK( ( element, elementName, hours, gocdbServiceType ) )
def doNew( self, masterParams = None ):
'''
Gets the parameters to run, either from the master method or from its
own arguments.
For every elementName, unless it is given a list, in which case it contacts
the gocdb client. The server is not very stable, so in case of failure tries
a second time.
If there are downtimes, are recorded and then returned.
'''
if masterParams is not None:
element, elementNames = masterParams
hours = 120
elementName = None
gocdbServiceType = None
else:
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
element, elementName, hours, gocdbServiceType = params[ 'V
|
alue' ]
elementNames = [ elementName ]
#WARNING: checking all the DT that are ongoing or starting in given <hours> from now
try:
results = self.gClient.getStatus( element, name = elementNames, startingInHours = hours )
except urllib2.URLError:
try:
#Let's
|
give it a second chance..
results = self.gClient.getStatus( element, name = elementNames, startingInHours = hours )
except urllib2.URLError, e:
return S_ERROR( e )
if not results[ 'OK' ]:
return results
results = results[ 'Value' ]
if results is None:
return S_OK( None )
#cleaning the Cache
cleanRes = self._cleanCommand(element, elementNames)
if not cleanRes[ 'OK' ]:
return cleanRes
uniformResult = []
# Humanize the results into a dictionary, not the most optimal, but readable
for downtime, downDic in results.items():
dt = {}
if 'HOSTNAME' in downDic.keys():
dt[ 'Name' ] = downDic[ 'HOSTNAME' ]
elif 'SITENAME' in downDic.keys():
dt[ 'Name' ] = downDic[ 'SITENAME' ]
else:
return S_ERROR( "SITENAME or HOSTNAME are missing" )
if 'SERVICE_TYPE' in downDic.keys():
dt[ 'GOCDBServiceType' ] = downDic[ 'SERVICE_TYPE' ]
if gocdbServiceType:
gocdbST = gocdbServiceType.lower()
csST = downDic[ 'SERVICE_TYPE' ].lower()
if gocdbST != csST:
return S_ERROR( "SERVICE_TYPE mismatch between GOCDB (%s) and CS (%s) for %s" % (gocdbST, csST, dt[ 'Name' ]) )
else:
#WARNING: do we want None as default value?
dt[ 'GOCDBServiceType' ] = None
dt[ 'DowntimeID' ] = downtime
dt[ 'Eleme
|
ABASystems/pymyob
|
tests/test_managers.py
|
Python
|
bsd-3-clause
| 3,511
| 0.003987
|
from datetime import date, datetime
from unittest import TestCase
from myob.constants import DEFAULT_PAGE_SIZE
from myob.credentials import PartnerCredentials
from myob.managers import Manager
class QueryParamTests(TestCase):
def setUp(self):
cred = PartnerCredentials(
consumer_key='KeyToTheKingdom',
consumer_secret='TellNoOne',
callback_uri='CallOnlyWhenCalledTo',
)
self.manager = Manager('', credentials=cred)
def assertParamsEqual(self, raw_kwargs, expected_params, method='GET'):
self.assertEqual(
self.manager.build_request_kwargs(method, {}, **raw_kwargs)['params'],
expected_params
)
def test_filter(self):
self.assertParamsEqual({'Type': 'Customer'}, {'$filter': "(Type eq 'Customer')"})
self.assertParamsEqual({'Type': ['Customer', 'Supplier']}, {'$filter': "(Type eq 'Customer' or Type eq 'Supplier')"})
self.assertParamsEqual({'DisplayID__gt': '5-0000'}
|
, {'$filter': "(DisplayID gt '5-0000')"})
self.assertParamsEqual({'DateOccurred__lt': '2013-08-30T19:00:59.043'}, {'$filter': "(DateOccurred lt '2013-08-30T19:00:59.043')"})
self.assertParamsEqual({'Type': ('Customer', 'Supplier'), 'DisplayID__gt': '5-0000'}, {'$filter': "(Type eq 'Customer' or Type eq 'Supplier') and (DisplayID gt '5-0000')"})
self.assertParamsEqual({'raw_fi
|
lter': "(Type eq 'Customer' or Type eq 'Supplier') or DisplayID gt '5-0000'", 'DateOccurred__lt': '2013-08-30T19:00:59.043'}, {'$filter': "((Type eq 'Customer' or Type eq 'Supplier') or DisplayID gt '5-0000') and (DateOccurred lt '2013-08-30T19:00:59.043')"})
self.assertParamsEqual({'IsActive': True}, {'$filter': "(IsActive eq true)"})
self.assertParamsEqual({'IsActive': False}, {'$filter': "(IsActive eq false)"})
def test_datetime_filter(self):
self.assertParamsEqual({'DateOccurred__lt': datetime(1992, 11, 14)}, {'$filter': "(DateOccurred lt datetime'1992-11-14 00:00:00')"})
self.assertParamsEqual({'DateOccurred__lt': date(1992, 11, 14)}, {'$filter': "(DateOccurred lt datetime'1992-11-14')"})
def test_orderby(self):
self.assertParamsEqual({'orderby': 'Date'}, {'$orderby': "Date"})
def test_pagination(self):
self.assertParamsEqual({'page': 7}, {'$skip': 6 * DEFAULT_PAGE_SIZE})
self.assertParamsEqual({'limit': 20}, {'$top': 20})
self.assertParamsEqual({'limit': 20, 'page': 7}, {'$top': 20, '$skip': 120})
def test_format(self):
self.assertParamsEqual({'format': 'json'}, {'format': 'json'})
def test_templatename(self):
self.assertParamsEqual({'templatename': 'InvoiceTemplate - 7'}, {'templatename': 'InvoiceTemplate - 7'})
def test_returnBody(self):
self.assertParamsEqual({}, {'returnBody': 'true'}, method='PUT')
self.assertParamsEqual({}, {'returnBody': 'true'}, method='POST')
def test_combination(self):
self.assertParamsEqual(
{
'Type': ['Customer', 'Supplier'],
'DisplayID__gt': '3-0900',
'orderby': 'Date',
'page': 5,
'limit': 13,
'format': 'json',
},
{
'$filter': "(Type eq 'Customer' or Type eq 'Supplier') and (DisplayID gt '3-0900')",
'$orderby': 'Date',
'$skip': 52,
'$top': 13,
'format': 'json'
},
)
|
chakki-works/elephant_sense
|
elephant_sense/evaluator.py
|
Python
|
apache-2.0
| 2,383
| 0.002098
|
import os
import json
import numpy as np
from sklearn.externals import joblib
from scripts.features.post import Post
from scripts.features.post_feature import PostFeature
import scripts.features.length_extractor as lext
import scripts.features.charactor_extractor as cext
import scripts.features.structure_extractor as sext
class Evaluator():
def __init__(self, model_path=""):
self.model_path = model_path if model_path else os.path.join(os.path.dirname(__file__), "../models/")
self.classifier = None
self.scaler = None
self.features = []
def load(self):
self.classifier = joblib.load(self.model_path + "banana.pkl")
self.scaler = joblib.load(self.model_path + "banana_scaler.pkl")
with open(self.model_path + "banana_list.txt") as f:
self.features = f.readline().split()
return self
def evaluate(self, post_dict):
if self.classifier is None:
self.load()
f_vector = self.get_features(post_dict)
prediction = self.classifier.predict_proba(f_vector)
return prediction[0][1] # probability of good
def get_features(self, post_dict):
post = Post(post_dict)
pf = PostFeature(post)
cleaned_rendered_body = cext.RenderedBodyPreprocessor().clean_rendered_body(post.rendered_body)
pf.add(lext.TitleLengthExtractor())
pf.add(lext.SectionCountExtractor())
pf.add(cext.KanjiRatioExtractor(cleaned_rendered_body))
pf.add(cext.HiraganaRatioExtractor(cleaned_rendered_body))
pf.add(cext.KatakanaRatioExtractor(cleaned_rendered_body))
pf.add(cext.NumberRatioExtractor(cleaned_rendered_body))
pf.add(cext.PunctuationRatioExtractor(cleaned_rendered_body))
pf.add(lext.SentenceMeanLengthExtractor(cleaned_rendered_body))
pf.add(lext.SentenceMeanLengthExtractor(cleaned_rendered_body))
pf.add(lext.SentenceMaxLengthExtractor(cleaned_rendered_body))
|
pf.add(sext.ImageCountExtracto
|
r())
pf.add(sext.ImageRatioExtractor(cleaned_rendered_body))
pf_d = pf.to_dict(drop_disused_feature=True)
f_vector = []
for f in self.features:
f_vector.append(pf_d[f])
f_vector = np.array(f_vector).reshape(1, -1)
f_vector = self.scaler.transform(f_vector)
return f_vector
|
equitania/myodoo-addons-v10
|
eq_stock/models/eq_report_stock.py
|
Python
|
agpl-3.0
| 3,096
| 0.002913
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo Addon, Open Source Management Solution
# Copyright (C) 2014-now Equitania Software GmbH(<http://www.equitania.de>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, fields, api, _
class report_stock_picking(models.Model):
_inherit = 'stock.picking'
# def get_tax(self, tax_id, language, currency_id):
# amount_net = 0;
# for line in self.order_line:
# if tax_id.id in [x.id for x in line.tax_id] and not line.eq_optional:
# amount_net += line.price_subtotal
#
# tax_amount = 0
# for tex in self.env['account.tax']._compute([tax_id], amount_net, 1):
# tax_amount += tex['amount']
#
# return self.env["eq_report_helper"].get_price(tax_amount, langua
|
ge, 'Sale Price Report', currency_id)
#
#
# @api.multi
# def get_price
|
(self, value, currency_id, language):
# """
# Formatierung eines Preises mit Berücksichtigung der Einstellung Dezimalstellen Sale Price Report
# :param value:
# :param currency_id:
# :param language:
# :return:
# """
# return self.env["eq_report_helper"].get_price(value, language, 'Sale Price Report', currency_id)
#
# @api.multi
# def get_qty(self, value, language):
# """
# Formatierung für Mengenangabe mit Berücksichtigung der Einstellung Dezimalstellen Sale Quantity Report
# :param value:
# :param language:
# :return:
# """
# return self.env["eq_report_helper"].get_qty(value, language, 'Sale Quantity Report')
@api.multi
def html_text_is_set(self, value):
"""
Workaround für HTML-Texte: Autom. Inhalt nach Speichern ohne Inhalt: <p><br></p>
Entfernen der Zeilenumbrüche und Paragraphen für Test, ob ein Inhalt gesetzt wurde
:param value:
:return:
"""
if not value:
return False
value = value.replace('<br>', '')
value = value.replace('<p>', '')
value = value.replace('</p>', '')
value = value.replace('<', '')
value = value.replace('>', '')
value = value.replace('/', '')
value = value.strip()
return value != ''
|
nathanhilbert/ulmo
|
test/usgs_eddn_test.py
|
Python
|
bsd-3-clause
| 13,809
| 0.004779
|
from datetime import datetime
import pandas as pd
from pandas.util.testing import assert_frame_equal
import ulmo
import ulmo.usgs.eddn.parsers as parsers
import test_util
fmt = '%y%j%H%M%S'
message_test_sets = [
{
'dcp_address': 'C5149430',
'number_of_lines': 4,
'parser': 'twdb_stevens',
'first_row_message_timestamp_utc': datetime.strptime('13305152818', fmt),
},
{
'dcp_address': 'C514D73A',
'number_of_lines': 4,
'parser': 'twdb_sutron',
'first_row_message_timestamp_utc': datetime.strptime('13305072816', fmt),
},
{
'dcp_address': 'C516C1B8',
'number_of_lines': 28,
'parser': 'stevens',
'first_row_message_timestamp_utc': datetime.strptime('13305134352', fmt),
}
]
def test_parse_dcp_message_number_of_lines():
for test_set in message_test_sets:
dcp_data_file = 'usgs/eddn/' + test_set['dcp_address'] + '.txt'
with test_util.mocked_urls(dcp_data_file):
data = ulmo.usgs.eddn.get_data(test_set['dcp_address'])
assert len(data) == test_set['number_of_lines']
def test_parse_dcp_message_timestamp():
for test_set in message_test_sets:
dcp_data_file = 'usgs/eddn/' + test_set['dcp_address'] + '.txt'
with test_util.mocked_urls(dcp_data_file):
data = ulmo.usgs.eddn.get_data(test_set['dcp_address'])
assert data['message_timestamp_utc'][-1] == test_set['first_row_message_timestamp_utc']
multi_message_test_sets = [
{
'dcp_address': 'C5149430',
'data_files': {
'.*DRS_UNTIL=now.*':'usgs/eddn/C5149430_file1.txt',
'.*DRS_UNTIL=2013%2F294.*':'usgs/eddn/C5149430_file2.txt',
'.*DRS_UNTIL=2013%2F207.*':'usgs/eddn/C5149430_file3.txt'
},
'first_row_message_timestamp_utc': datetime.strptime('14016152818', fmt),
'last_row_message_timestamp_utc': datetime.strptime('13202032818', fmt),
'number_of_lines': 360,
'start': 'P365D'
}
]
def test_multi_message_download():
for test_set in multi_message_test_sets:
with test_util.mocked_urls(test_set['data_files']):
data = ulmo.usgs.eddn.get_data(test_set['dcp_address'], start=test_set['start'])
assert data['message_timestamp_utc'][-1] == test_set['first_row_message_timestamp_utc']
assert data['message_timestamp_utc'][0] == test_set['last_row_message_timestamp_utc']
assert len(data) == test_set['number_of_lines']
twdb_stevens_test_sets = [
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '"BV:11.9 193.76$ 193.70$ 193.62$ 193.54$ 193.49$ 193.43$ 193.37$ 199.62$ 200.51$ 200.98$ 195.00$ 194.33$ ',
'return_value': [
['2013-10-30 04:00:00', pd.np.nan, 193.76],
['2013-10-30 05:00:00', pd.np.nan, 193.70],
['2013-10-30 06:00:00', pd.np.nan, 193.62],
['2013-10-30 07:00:00', pd.np.nan, 193.54],
['2013-10-30 08:00:00', pd.np.nan, 193.49],
['2013-10-30 09:00:00', pd.np.nan, 193.43],
['2013-10-30 10:00:00', pd.np.nan, 193.37],
['2013-10-30 11:00:00', pd.np.nan, 199.62],
['2013-10-30 12:00:00', pd.np.nan, 200.51],
['2013-10-30 13:00:00', pd.np.nan, 200.98],
['2013-10-30 14:00:00', pd.np.nan, 195.00],
['2013-10-30 15:00:00', 11.9, 194.33],
],
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '"BV:12.6 Channel:5 Time:28 +304.63 +304.63 +304.63 +304.56 +304.63 +304.63 +304.63 +304.63 +304.63 +304.63 +304.63 +304.71 Channel:6 Time:28 +310.51 +310.66 +310.59 +310.51 +310.51 +310.59 +310.59 +310.51 +310.66 +310.51 +310.66 +310.59 ',
'return_value': [
['2013-10-30 04:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 05:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 06:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 07:00:00', '5', '28', pd.np.nan, 304.56],
['2013-10-30 08:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 09:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 10:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 11:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 12:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 13:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 14:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 15:00:00', '5', '28', 12.6, 304.71],
['2013-10-30 04:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 05:00:00', '6', '28', pd.np.nan, 310.66],
['2013-10-30 06:00:00', '6', '28', pd.np.nan, 310.59],
['2013-10-30 07:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 08:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 09:00:00', '6', '28', pd.np.nan, 310.59],
['2013-10-30 10:00:00', '6', '28', pd.np.nan, 310.59],
['2013-10-30 11:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 12:00:00', '6', '28', pd.np.nan, 310.66],
['2013-10-30 13:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 14:00:00', '6', '28', pd.np.nan, 310.66],
['2013-10-30 15:00:00', '6', '28', 12.6, 310.59],
]
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '"BV:12.6 ',
'return_value': pd.DataFrame()
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': """ 79."$}X^pZBF8iB~i>>Xmj[bvr^Zv%JXl,DU=l{uu[ t(
|@2q^sjS!
""",
'return_value': pd.DataFrame()
},
]
def test_parser_twdb_stevens():
for test_set in twdb_stevens_test_sets:
print 'testing twdb_stevens parser'
if isinstance(test_set['return_value'], pd.DataFrame):
parser = getattr(parsers, 'twdb_stevens')
assert_frame_equal(pd.DataFrame(), parser(test_set))
return
if len(test_set['return_value'][0]) == 3:
columns = ['timestamp_utc', 'battery_voltage', 'water_level']
else:
columns = ['timestamp_utc', 'channel', 'time', 'battery_voltage', 'water_level']
_assert(test_set, columns, 'twdb_stevens')
twdb_sutron_test_sets = [
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '":Sense01 60 #60 -67.84 -66.15 -67.73 -67.81 -66.42 -68.45 -68.04 -67.87 -71.53 -73.29 -70.55 -72.71 :BL 13.29',
'return_value': [
['2013-10-30 04:00:00', 'sense01', pd.np.nan, 72.71],
['2013-10-30 05:00:00', 'sense01', pd.np.nan, 70.55],
['2013-10-30 06:00:00', 'sense01', pd.np.nan, 73.29],
['2013-10-30 07:00:00', 'sense01', pd.np.nan, 71.53],
['2013-10-30 08:00:00', 'sense01', pd.np.nan, 67.87],
['2013-10-30 09:00:00', 'sense01', pd.np.nan, 68.04],
['2013-10-30 10:00:00', 'sense01', pd.np.nan, 68.45],
['2013-10-30 11:00:00', 'sense01', pd.np.nan, 66.42],
['2013-10-3
|
0 12:00:00', 'sense01', pd.np.nan, 67.81],
['2013-10-30 13:00:00', 'sense01', pd.np.nan, 67.73],
['2013-10-30 14:00:00', 'sense01', pd.np.nan, 66.15],
['2013-10-30 15
|
:00:00', 'sense01', 13.29, 67.84],
],
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '":OTT 703 60 #60 -231.47 -231.45 -231.44 -231.45 -231.47 -231.50 -231.51 -231.55 -231.56 -231.57 -231.55 -231.53 :6910704 60 #60 -261.85 -261.83 -261.81 -261.80 -261.81 -261.83 -261.85 -261.87 -261.89 -261.88 -261.86 -261.83 :BL 13.21',
'return_value': [
['2013-10-30 04:00:00', 'ott 703', pd.np.nan, 231.53],
['2013-10-30 05:00:00', 'ott 703', pd.np.nan, 231.55],
['2013-10-30 06:00:00', 'ott 703', pd.np.nan, 231.57],
['2013-10-30 07:00:00', 'ott 703', pd.np.nan, 231.56],
['2013-10-30 08:00:00',
|
zainag/RPi-Test-Projects
|
fan_control_daemon.py
|
Python
|
mit
| 2,129
| 0.034758
|
#!/usr/bin/env python
# Fan control mod for Raspberry Pi
import RPi.GPIO as GPIO, time, datetime, subprocess, os, logging
from daemon import runner
DEBUG = 1
GPIO.setmode(GPIO.BCM)
# Respective ports on the GPIO header
FAST = 18
SLOW = 25
# Default settings for fan control
MAX_TEMP = 50
MIN_TEMP = 40
POLL_TIME = 5
def get_temperature():
# Returns the temperature in degrees C
try:
s = subprocess.check_output(["vcgencmd","measure_temp"])
return float(s.split("=")[1][:-3])
except:
# Something went wrong keep the fan on high
return MAX_TEMP+1
class App():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty'
self.stderr_path = '/dev/tty'
self.pidfile_path = '/var/run/fandaemon/fandaemon.pid'
self.pidfile_timeout = 5
def run(self):
GPIO.setup(FAST, GPIO.OUT)
GPIO.setup(SLOW, GPIO.OUT)
try:
while True:
current_temp = get_temperature()
logstr = 'Current temp is ' + str(current_temp)
logger.info(logstr);
if current_temp > MAX_TEMP:
logger.info('Setting fan speed to HIGH')
GPIO.output(SLOW, GPIO.LOW)
GPIO.output(FAST, GPIO.HIGH)
POLL_TIME = 5
elif (current_temp <= MAX_TEMP) and (current_temp > MIN_TEMP):
logger.info('Setting fan speed to LOW')
GPIO.output(FAST, GPIO.LOW)
GPIO.output(SLOW, GPIO.HIGH)
POLL_TIME = 10
else:
logger.info('Turn the fan off!')
GPIO.output(SLOW, GPIO.LOW)
GPIO.output(FAST, GPIO.LOW)
POLL_TIME = 15
time.sleep(POLL_TIME)
except:
logger.error('Exiting now!')
finally:
GPIO.cleanup()
app = App()
logger = logging.getLogger("DaemonLog")
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name
|
)s - %(levelname)s - %(message)s")
handler = logging.FileHandler("/var/log/fandaemon/fandaemon.log")
handler.setFormatter(formatter)
logger.addHandler(handler)
daemon_runner = runner.DaemonRunner(app)
#This ensures that the logger file handle does not get closed during daemonization
da
|
emon_runner.daemon_context.files_preserve=[handler.stream]
daemon_runner.do_action()
|
jiaphuan/models
|
tutorials/image/cifar10/cifar10_train.py
|
Python
|
apache-2.0
| 4,491
| 0.004899
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('log_frequency', 10,
"""How often to log results to the cons
|
ole.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.train.get_or_create_global_step()
# Get images and labels for CIFAR-10.
# Force input pipeline to CPU:0 to avoid operations sometimes ending up on
# GPU and resulting in a slow down.
with tf.device('/cpu:0'):
|
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
if self._step % FLAGS.log_frequency == 0:
current_time = time.time()
duration = current_time - self._start_time
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
sec_per_batch = float(duration / FLAGS.log_frequency)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
ytanay/thinglang
|
tests/compiler/test_access_compilation.py
|
Python
|
mit
| 1,587
| 0.00189
|
from tests.compiler import compile_snippet, A_ID, LST_ID, SELF_ID, VAL1_ID, internal_call, A_INST, INNER_ID, \
CONTAINER_INNER_ID, STATIC_START
from thinglang.compiler.opcodes import OpcodePushLocal, OpcodePushMember, OpcodePushStatic, OpcodePop, \
OpcodeDereference, OpcodeCallVirtual
def test_direct_member_access():
assert compile_snippet('a_inst.a1') == [
|
OpcodePushMember(A_INST, 0)
]
def test_nested_member_access():
assert compile_snippe
|
t('self.inner.inner.inner') == [
OpcodePushMember(SELF_ID, INNER_ID),
OpcodeDereference(CONTAINER_INNER_ID),
OpcodeDereference(CONTAINER_INNER_ID)
]
def test_member_access_via_method_call():
assert compile_snippet('a_inst.me().a1') == [
OpcodePushLocal(A_INST),
OpcodeCallVirtual(1),
OpcodeDereference(0)
]
assert compile_snippet('a_inst.me().me().a1') == [
OpcodePushLocal(A_INST),
OpcodeCallVirtual(1),
OpcodeCallVirtual(1),
OpcodeDereference(0)
]
def test_local_list_immediate_index():
assert compile_snippet('lst[123]') == [
OpcodePushStatic(STATIC_START),
OpcodePushLocal(LST_ID),
internal_call('list.get')
]
def test_local_list_non_immediate_index():
assert compile_snippet('lst[a]') == [
OpcodePushLocal(A_ID),
OpcodePushLocal(LST_ID),
internal_call('list.get')
]
assert compile_snippet('lst[self.val1]') == [
OpcodePushMember(SELF_ID, VAL1_ID),
OpcodePushLocal(LST_ID),
internal_call('list.get')
]
|
avanc/mopidy-usbplaylist
|
mopidy_usbplaylist/__init__.py
|
Python
|
apache-2.0
| 668
| 0
|
from __future__ import unicode_literals
import os
from mopidy import config, ext
__version__ = '0.0.1'
class Extension(ext.Extension):
dist_name = 'Mopidy-USBPlaylist'
ext_name = 'usbplaylist'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_sche
|
ma(self):
schema = super(Extension, self).get_config_
|
schema()
schema['path'] = config.String()
return schema
def setup(self, registry):
from .actor import USBPlaylistsBackend
registry.add('backend', USBPlaylistsBackend)
|
pengutronix/aiohttp-json-rpc
|
tests/test_forms.py
|
Python
|
apache-2.0
| 530
| 0
|
import pytest
import logging
@pytest.mark.asyncio
async def test_confirmation(rpc_cont
|
ext):
# setup rpc
async def test_method(request):
a = await request.confirm('just say yes!', timeou
|
t=1)
logging.debug('a = %s', a)
return a
rpc_context.rpc.add_methods(('', test_method))
# setup client
async def confirm(request):
return True
client = await rpc_context.make_client()
client.add_methods(('', confirm))
# run test
assert await client.call('test_method')
|
gabrielf10/Soles-pythonanywhere
|
productos/migrations/0004_auto_20141119_0117.py
|
Python
|
bsd-3-clause
| 496
| 0.002016
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('productos', '0003_auto_20141118_2241'),
]
|
operations = [
migrations.AlterField(
model_name='imagenes',
name='url',
field=models.ImageField(upload_to=b'img', null=True, verbose_name=b'Im\xc3\xa1g
|
en', blank=True),
preserve_default=True,
),
]
|
codeforkaohsiung/CabuKcgCrawler
|
CabuKcgCrawler/pipelines.py
|
Python
|
mit
| 294
| 0
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class CabukcgcrawlerPipeline(object):
def process_item(self, item, spider):
|
retur
|
n item
|
kevinkahn/softconsole
|
utils/utilfuncs.py
|
Python
|
apache-2.0
| 3,291
| 0.033728
|
"""
This file holds utility functions that have no dependencies on other console code.
Avoids import loops
"""
import webcolors
def wc(clr, factor=0.0, layercolor=(255, 255, 255)):
lc = webcolors.name_to_rgb(layercolor.lower()) if isinstance(layercolor, str) else layercolor
if isinstance(clr, str):
try:
v = webcolors.name_to_rgb(clr.lower())
except ValueError:
# logsupport.Logs.Log('Bad color name: ' + str(clr), severity=ConsoleWarning)
v = webcolors.name_to_rgb('black')
else:
v = clr
try:
return v[0] + (lc[0] - v[0]) * factor, v[1] + (lc[1] - v[1]) * factor, v[2] + (lc[2] - v[2]) * factor
except Exception as E:
print('wc: {}'.format(E))
print(v, lc, clr, layercolor)
def interval_str(sec_elapsed, shrt=False):
d = int(sec_elapsed / (60 * 60 * 24))
h = int((sec_elapsed % (60 * 60 * 24)) / 3600)
m = int((sec_elapsed % (60 * 60)) / 60)
s = int(sec_elapsed % 60)
if d != 0:
if shrt:
return "{} dys {:>02d}:{:>02d}:{:>02d}".format(d, h, m, s)
else:
return "{} days {:>02d}hrs {:>02d}mn {:>02d}sec".format(d, h, m, s)
elif h != 0:
return "{:>02d}hrs {:>02d}mn {:>02d}sec".format(h, m, s)
else:
return "{:>02d}mn {:>02d}sec".format(m, s)
def BoolTrueWord(v):
if v is None:
|
return False
if isinstance(v, bool): return v
try:
return v.lower() in ('true', 'on', 'yes')
except Exception as e:
print("Error1: {}".format(v))
def BoolFalseWord(v):
if v is None: return True
if isinstance(v, bool): return not v
try:
return v.lower() in ('false', 'off', 'no')
except Exception as e:
print("Error2: {}".format(v))
def TreeDict(d, args):
# Allow a nest of dictionaries to be accessed by a tuple of k
|
eys for easier code
if len(args) == 1:
temp = d[args[0]]
#temp = getattr(d,args[0])
if isinstance(temp, str) and temp.isdigit():
temp = int(temp)
else:
try:
temp = float(temp)
except (ValueError, TypeError):
pass
return temp
else:
return TreeDict(d[args[0]], args[1:])
#return TreeDict(getattr(d,args[0]),args[1:])
import string
class PartialFormatter(string.Formatter):
def __init__(self, missing='--', bad_fmt='--'):
self.missing, self.bad_fmt = missing, bad_fmt
def get_field(self, field_name, args, kwargs):
# Handle a key not found
try:
val = super().get_field(field_name, args, kwargs)
except (KeyError, AttributeError):
val = None, field_name
return val
def format_field(self, value, spec):
# handle an invalid format
if value is None: return self.missing
try:
return super().format_field(value, spec)
except ValueError:
if self.bad_fmt is not None:
return self.bad_fmt
else:
raise
fmt = PartialFormatter()
# noinspection PyBroadException
def safeprint(*args, **kwargs):
try:
print(*args, **kwargs)
except OSError:
with open('/home/pi/Console/disconnectederrors.log', 'a') as f:
print(*args, **kwargs, file=f)
def RepresentsInt(s):
try:
int(s)
return True
except (ValueError, TypeError):
return False
'''
class WFormatter(string.Formatter):
def format_field(self, value, format_spec):
if format_spec.endswith(('f', 'd')) and value is None:
return 'n/a'
elif value is None:
return 'n/a'
elif value == -9999.0:
return 'n/a'
else:
return super(WFormatter, self).format_field(value, format_spec)
'''
|
eachiaradia/IOGeopaparazzi
|
io_geopaparazzi_provider.py
|
Python
|
gpl-3.0
| 2,828
| 0.020156
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
IOGeopaparazzi
A QGIS plugin
A plugin to import/export geodata from/to geopaparazzi
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-07-19
copyright : (C) 2018 by Enrico A. Chiaradia
email : enrico.chiaradia@yahoo.it
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Enrico A. Chiaradia'
__date__ = '2018-07-19'
__copyright__ = '(C) 2018 by Enrico A. Chiaradia'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QgsProcessingProvider
from .import_gpap_algorithm import ImportGpapAlgorithm
from .export_spatialite_algorithm import ExportSpatialiteAlgorithm
from .export_tiles_algorithm import ExportTilesAlgorithm
class IOGeopaparazziProvider(QgsProcessingProvider):
def __init__(self):
QgsProcessingProvider.__init__(self)
# Load algorithms
#self.alglist = [ImportGpapAlgorith
|
m(),ExportSpatialiteAlgorithm(),ExportTil
|
esAlgorithm()]
self.alglist = [ImportGpapAlgorithm(),ExportSpatialiteAlgorithm(),ExportTilesAlgorithm()]
def unload(self):
"""
Unloads the provider. Any tear-down steps required by the provider
should be implemented here.
"""
pass
def loadAlgorithms(self):
"""
Loads all algorithms belonging to this provider.
"""
for alg in self.alglist:
self.addAlgorithm( alg )
def id(self):
"""
Returns the unique provider id, used for identifying the provider. This
string should be a unique, short, character only string, eg "qgis" or
"gdal". This string should not be localised.
"""
return 'io_geopaparazzi'
def name(self):
"""
Returns the provider name, which is used to describe the provider
within the GUI.
This string should be short (e.g. "Lastools") and localised.
"""
return self.tr('IO Geopaparazzi')
def longName(self):
"""
Returns the a longer version of the provider name, which can include
extra details such as version numbers. E.g. "Lastools LIDAR tools
(version 2.2.1)". This string should be localised. The default
implementation returns the same string as name().
"""
return self.tr('IO Geopaparazzi (version 2.0)')
|
mitzvotech/honorroll
|
app/utils.py
|
Python
|
mit
| 2,178
| 0.002296
|
import csv
import sys
from models import *
from datetime import datetime
import codecs
import json
# from models import Attorney, Organization
from flask_mail import Message
def load_attorneys_from_csv(filename):
with codecs.open(filename, mode='rb', encoding='utf-8') as csvfile:
attorneys = [row for row in csv.reader(csvfile.read().splitlines())]
attorneys.pop(0)
try:
for attorney in attorneys:
# Check to see if the email address is in the system, and if it is, simply add th
|
e new record...
if check_new_email(attorney[3]):
a = Attorney.objects.get(email_address=attorney[3])
else:
a = Attorney()
a.first_name = attorney[0]
a.middle_initial = attorney[1]
|
a.last_name = attorney[2]
a.email_address = attorney[3]
a.organization_name = Organization.objects(
organization_name=attorney[4]
).upsert_one(organization_name=attorney[4]) \
.organization_name
if len(a.records) <= 1:
a.records.append({
'year': attorney[5],
'honor_choice': attorney[6],
'rule_49_choice': attorney[7],
'date_modified': datetime.now(),
'method_added': u'bulk'
})
a.save()
print(attorney[3] + " is loaded.")
except:
print( "Unexpected error:", sys.exc_info()[0])
raise
return True
def check_new_email(email_address):
try:
Attorney.objects.get(email_address=email_address)
return True
except Attorney.DoesNotExist:
return False
if __name__ == "__main__":
import sys
import os
from models import *
MONGODB_URI = os.environ.get(
"MONGOLAB_URI", 'mongodb://localhost/honorroll')
mongo_client = connect(host=MONGODB_URI)
filename = sys.argv[1]
load_attorneys_from_csv(filename)
|
hazmalware/sharephish
|
taxiigenerator.py
|
Python
|
gpl-3.0
| 3,968
| 0.024698
|
import pycurl
import cStringIO
import random
import HTMLParser
def generate_TAXII_header(xml, ssl=True):
headers = {
"Content-Type": "application/xml",
"Content-Length": str(len(xml)),
"User-Agent": "TAXII Client Application",
"Accept": "application/xml",
"X-TAXII-Accept": "urn:taxii.mitre.org:message:xml:1.0",
"X-TAXII-Content-Type": "urn:taxii.mitre.org:message:xml:1.0",
}
if ssl:
headers["X-TAXII-Protocol"] = "urn:taxii.mitre.org:protocol:https:1.0"
else:
headers["X-TAXII-Protocol"] = "urn:taxii.mitre.org:protocol:http:1.0"
return headers
def taxi_wrapper(xml):
xmlstart = """<?xml version="1.0" encoding="UTF-8" ?>"""
boilerplate = """xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:taxii_11="http://taxii.mitre.org/messages/taxii_xml_binding-1.1" xsi:schemaLocation="http://taxii.mitre.org/messages/taxii_xml_binding-1.1 http://taxii.mitre.org/messages/taxii_xml_binding-1.1" """
message_id = str(random.randint(345271,9999999999))
xml_inbox = xmlstart + """
<taxii_11:Inbox_Message {{boilerplate}} message_id="{{message_id}}">
<taxii_11:Content_Block>
<taxii_11:Content_Binding binding_id="{{content_binding}}" />
<taxii_11:Content>
{{content_data}}
</taxii_11:Content>
</taxii_11:Content_Block>
</taxii_11:Inbox_Message>"""
xml = xml_inbox.replace('{{boilerplate}}',boilerplate) \
.replace('{{message_id}}',message_id) \
.replace('{{content_binding}}','urn:stix.mitre.org:xml:1.1.1') \
.replace('{{content_data}}', xml )
return xml
def taxi_poll_xml(feedid):
xmlstart = """<?xml version="1.0" en
|
coding="UTF-8" ?>"""
boilerplate = """xmlns:xsi="http
|
://www.w3.org/2001/XMLSchema-instance" xmlns:taxii_11="http://taxii.mitre.org/messages/taxii_xml_binding-1.1" xsi:schemaLocation="http://taxii.mitre.org/messages/taxii_xml_binding-1.1 http://taxii.mitre.org/messages/taxii_xml_binding-1.1" """
message_id = str(random.randint(345271,9999999999))
xml_poll = xmlstart + """
<taxii_11:Poll_Request {{boilerplate}} message_id="{{message_id}}" collection_name="{{feed_name}}" >
<taxii_11:Poll_Parameters allow_asynch="false">
<taxii_11:Response_Type>FULL</taxii_11:Response_Type>
<taxii_11:Content_Binding binding_id="{{content_binding}}" />
</taxii_11:Poll_Parameters>
{{start_end}}
</taxii_11:Poll_Request>"""
xml = xml_poll.replace('{{boilerplate}}',boilerplate) \
.replace('{{message_id}}',message_id) \
.replace('{{content_binding}}','urn:stix.mitre.org:xml:1.1.1') \
.replace('{{feed_name}}', feedid )
return xml
#-----------------------------------------
def send_xml(setup, xml, ssl=True):
taxiixml = taxi_wrapper(xml)
return send(setup, taxiixml, ssl)
def get_xml(setup, feedid, ssl=True):
taxiixml = taxi_poll_xml(feedid)
return send(setup, taxiixml, ssl)
def send(setup, taxiixml, ssl=True):
headers = [
"Content-Type: application/xml",
"Content-Length: " + str(len(taxiixml)),
"User-Agent: TAXII Client Application",
"Accept: application/xml",
"X-TAXII-Accept: urn:taxii.mitre.org:message:xml:1.1",
"X-TAXII-Content-Type: urn:taxii.mitre.org:message:xml:1.1",
"X-TAXII-Protocol: urn:taxii.mitre.org:protocol:https:1.0",
]
buf = cStringIO.StringIO()
conn = pycurl.Curl()
conn.setopt(pycurl.URL, setup["url"])
conn.setopt(pycurl.USERPWD, "{0}:{1}".format(setup["user"], setup["password"]))
conn.setopt(pycurl.HTTPHEADER, headers)
conn.setopt(pycurl.POST, 1)
conn.setopt(pycurl.TIMEOUT, 999999)
conn.setopt(pycurl.WRITEFUNCTION, buf.write)
conn.setopt(pycurl.POSTFIELDS, taxiixml)
conn.setopt(pycurl.SSL_VERIFYPEER, 0)
conn.perform()
hp = HTMLParser.HTMLParser()
result = hp.unescape(buf.getvalue()).encode('ascii', 'ignore')
buf.close()
conn.close()
return result
|
Ormod/Diamond
|
src/diamond/handler/archive.py
|
Python
|
mit
| 2,195
| 0
|
# coding=utf-8
"""
Write the collected stats to a locally stored log file. Rotate the log file
every night and remove after 7 days.
"""
from Handler import Handler
import logging
import logging.handlers
class ArchiveHandler(Handler):
"""
Implements the Handler abstract class, archiving data to a log file
"""
def __init__(self, config):
"""
Create a new instance of the ArchiveHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Create Archive Logger
self.archive = logging.getLogger('archive')
self.archive.setLevel(logging.DEBUG)
self.archive.propagate = self.config['propagate']
# Create Archive Log Formatter
formatter = logging.Formatter('%(message)s')
# Create Archive Log Handler
handler = logging.handlers.TimedRotatingFileHandler(
filename=self.config['log_file'],
when='midnight',
interval=1,
backupCount=int(self.config['days']),
encoding=self.config['encoding']
)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
self.archive.addHandler(handler)
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(ArchiveHandler, self).get_default_config_help()
config.update({
|
'log_file': 'Path to the l
|
ogfile',
'days': 'How many days to store',
'encoding': '',
'propagate': 'Pass handled metrics to configured root logger',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(ArchiveHandler, self).get_default_config()
config.update({
'log_file': '',
'days': 7,
'encoding': None,
'propagate': False,
})
return config
def process(self, metric):
"""
Send a Metric to the Archive.
"""
# Archive Metric
self.archive.info(str(metric).strip())
|
felix9064/python
|
Demo/pcc/mpl_squares.py
|
Python
|
mit
| 606
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 绘制简单的折线图
import matplotlib.pyplot as plt
input_values = list(range(1, 6))
squares = [x *
|
x for x in input_values]
# 根据传递的参数来绘制出有意义的图形
plt.plot(input_values, squares, linewidth=3)
# 设置图表的标题及标题的字体大小
plt.titl
|
e("Square Numbers", fontsize=14)
# 给坐标轴加上标签
plt.xlabel("Value", fontsize=14)
plt.ylabel("Square of Value", fontsize=14)
# 设置刻度标记的大小
plt.tick_params(axis='both', labelsize=14)
# 打开查看器,并显示绘制的图形
plt.show()
|
yupasik/python_training
|
bdd/contact_scenarios.py
|
Python
|
apache-2.0
| 325
| 0
|
fro
|
m pytest_bdd import scenario
from .contact_steps import *
@scenario("contacts.feature", "Add new contact")
def test_add_new_contact():
pass
@scenario("contacts.feature", "Delete a contact")
def test_delete_contact():
pass
@scen
|
ario("contacts.feature", "Modify a contact")
def test_modify_contact():
pass
|
harun-emektar/webfs
|
tests/Test_WebFSStat.py
|
Python
|
apache-2.0
| 655
| 0.015267
|
from webfs import WebFSStat
import st
|
at
def Test_Basic():
fields = ('st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid',
'st_size', 'st_atime', 'st_mtime', 'st_ctime')
st = WebFSStat()
print st.__dict__.keys()
for field in fields:
assert field in st.__dict__.keys(), 'field(%s) is not in members' % field
def Test_InitParam():
st = WebFSStat()
assert st.st_mode == stat.S_IFDIR | 0555
st = WebFSStat(False)
assert s
|
t.st_mode == stat.S_IFREG | 0444
def Test_IsDir():
st = WebFSStat()
assert st.isDir()
st = WebFSStat(False)
assert not st.isDir()
|
enthought/pikos
|
pikos/live/ui/cprofile_view.py
|
Python
|
bsd-3-clause
| 8,716
| 0.000459
|
from operator import attrgetter
from traits.api import Any, Int, Bool, on_trait_change, Dict, Button, Str, \
HasTraits, cached_property, Property, Event, Either, Float, Instance
from traitsui.api import View, Item, UItem, VGroup, HGroup, Spring, \
TabularEditor, HSplit, Group, ModelView
from traitsui.tabular_adapter import TabularAdapter
from chaco.api import Plot, LabelAxis
from chaco.tools.api import ZoomTool, PanTool
from chaco.ticks import ShowAllTickGenerator
from enable.component_editor import ComponentEditor
from pikos.live.ui.base_view import BaseView
from pikos.live.ui.barplot import SelectableBarPlot, BarSele
|
ctTool
class TableItem(HasTraits):
id = Int
filename = Str
line_number = Any
function_name = Str
callcount = Int
per_call = Float
total_time = Float
cumulative_time = Float
def __init__(self, id, filename, line_number, function_name, callcount,
per_call, total_time, cumulative_time, **traits):
kwargs = {}
kwargs.update(traits)
kwargs.update(dict(
id=id,
filename=filename,
function_name=fu
|
nction_name,
line_number=line_number,
callcount=callcount,
per_call=per_call,
total_time=total_time,
cumulative_time=cumulative_time,
))
super(TableItem, self).__init__(**kwargs)
class CProfileTabularAdapter(TabularAdapter):
columns = (
('Filename', 'filename'),
('Function Name', 'function_name'),
('Line Number', 'line_number'),
('Number of Calls', 'callcount'),
('Per Call', 'per_call'),
# ('Per Call (Cumulative)', 'cumulative_percall'),
('Total Time', 'total_time'),
('Cumulative Time', 'cumulative_time'),
)
class CProfileTableView(ModelView):
title = Str
data_items = Property(depends_on='model.data_items,sort_column,ascending')
adapter = Any
column_clicked = Event
sort_column = Either(None, Int)
ascending = Bool(False)
def _column_clicked_changed(self, event):
if event is None:
self.sort_column = None
elif self.sort_column == event.column:
self.ascending = not self.ascending
else:
self.sort_column = event.column
self.ascending = False
def _adapter_default(self):
return CProfileTabularAdapter()
@cached_property
def _get_data_items(self):
items = [TableItem(*args) for args in self.model.data_items]
if self.sort_column is None:
return items
attr = self.adapter.columns[self.sort_column][1]
return sorted(items, key=attrgetter(attr), reverse=self.ascending)
def default_traits_view(self):
return View(
UItem(
'data_items',
editor=TabularEditor(
adapter=self.adapter,
column_clicked='column_clicked',
),
),
height=800,
width=1100,
resizable=True,
title='CProfile Live',
)
class CProfileView(BaseView):
# Initialization
plotted = Bool(False)
barplot = Any
sort_values_button = Button('Sort')
FORMATS = Dict({
'id': '0x{0:x}',
})
def _plot_default(self):
container = Plot(
self.model.plot_data,
)
container.renderer_map['bar'] = SelectableBarPlot
container.padding_left = 100
container.padding_bottom = 150
# container.plot(('x', 'y'), type='bar')
self.zoom_tool = ZoomTool(
container,
)
container.underlays.append(self.zoom_tool)
container.tools.append(self.zoom_tool)
self.pan_tool = PanTool(
container,
)
container.tools.append(self.pan_tool)
return container
# @on_trait_change('model.index_item')
# def _on_model_index_item_change(self, index_item):
# super(CProfileView, self)._on_model_index_item_change(index_item)
# # self.plot.x_axis.tick_generator = ShowAllTickGenerator(
# # positions=self.model.plot_data.get_data('x'))
# @on_trait_change('model.value_item')
# def _on_model_value_item_change(self, value_item):
# super(CProfileView, self)._on_model_value_item_change(value_item)
# Handlers
@on_trait_change('model.updated')
def _on_model_update_fired(self):
if not self.plotted:
x = self.model.plot_data.get_data('x')
y = self.model.plot_data.get_data('y')
if len(x) == 0 or len(y) == 0:
return
self.barplot = self.plot.plot(('x', 'y'), type='bar',
bar_width=0.8)[0]
self.barplot.index.sort_order = 'ascending'
select = BarSelectTool(
self.barplot,
selection_mode='single',
)
self.barplot.tools.append(select)
self.barplot.index.on_trait_change(
self._metadata_changed, "metadata_changed")
self.plotted = True
self.plot.y_mapper.range.low_setting = 'auto'
self.plot.y_mapper.range.high_setting = 'auto'
def _format_key(self, key):
format_ = self.FORMATS.get(self.model.index_item)
if format_ is None:
return str(key)
try:
return format_.format(key)
except ValueError:
return str(key)
@on_trait_change('model.plot_keys')
def _on_model_plot_keys_changed(self):
positions = self.model.plot_data.get_data('x')
label_axis = LabelAxis(
self.plot, orientation='bottom',
title='Keys',
title_spacing=100,
positions=positions,
labels=[self._format_key(i)
for i in self.model.plot_keys],
small_haxis_style=True,
label_rotation=90,
tick_generator=ShowAllTickGenerator(
positions=positions,
),
)
self.plot.underlays.remove(self.plot.index_axis)
self.plot.index_axis = label_axis
self.plot.underlays.append(label_axis)
def _sort_values_button_fired(self):
self.model.sort_by_current_value()
self.plot.invalidate_and_redraw()
def _metadata_changed(self, new):
self.plot.invalidate_and_redraw()
# data_indices = self.scatter.index.metadata.get('selections', [])
# if len(data_indices) == 0:
# self.model.selected_index = None
# return
# self.model.selected_index = data_indices[0]
# def _last_n_points_changed(self):
# self.plot.x_mapper.range.tracking_amount = self.last_n_points
# def _follow_plot_changed(self):
# if self.follow_plot:
# self.plot.x_mapper.range.low_setting = 'track'
# self.plot.x_mapper.range.high_setting = 'auto'
# self.plot.x_mapper.range.tracking_amount = self.last_n_points
# else:
# self.plot.x_mapper.range.low_setting = self.plot.x_mapper.range.low
# self.plot.x_mapper.range.high_setting = \
# self.plot.x_mapper.range.high
traits_view = View(
Group(
VGroup(
HGroup(
Item('model.index_item'),
Item('model.value_item'),
# ),
# HGroup(
Spring(),
UItem('sort_values_button'),
UItem('reset_view_button'),
),
),
HSplit(
UItem('plot', editor=ComponentEditor()),
# UItem(
# 'model.selected_item',
# editor=TabularEditor(adapter=DetailsAdapter()),
# width=350),
),
),
height=800,
width=1100,
resizable=True,
title='Live Recording Plot'
)
class CProfileMixedView(ModelView):
title = Str
|
Justasic/StackSmash
|
StackSmash/urls.py
|
Python
|
bsd-2-clause
| 3,146
| 0.00445
|
from django.conf.urls import patterns, include, url
from django.shortcuts import redirect, render_to_response
from django.template.context import RequestContext
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
# Just redirect / to /blog for now until I can
# come up with something to put on the homepage..
def to_blog(request):
return redirect('/blog/', permanent=False)
# Follow the BSD license and allow the source/binary to reproduce
# the license and copyright message
def
|
sslicense(request):
slicense = """
Cop
|
yright (c) 2012-2013 Justin Crawford <Justasic@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE
"""
ctx = {
'parts': {
"title": "License",
"html_title": "License",
"fragment": slicense.replace('\n', '<br>'),
},
}
return render_to_response('docs/docs.html', RequestContext(request, ctx))
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'StackSmash.views.home', name='home'),
# url(r'^StackSmash/', include('StackSmash.foo.urls')),
# TODO: Fix index and use something... Should identify subdomains somehow..
#url(r'^$', include('StackSmash.apps.blog.urls')),
url(r'^license/', sslicense, name='license'),
#url(r'^docs/', include('StackSmash.apps.docs.urls'), name='docs', app_name='docs'),
url(r'^blog/', include('StackSmash.apps.blog.urls', namespace='blog')),
url(r'^projects/', include('StackSmash.apps.projects.urls', namespace='projects')),
url(r'^upload/', include('StackSmash.apps.uploader.urls', namespace='upload')),
url(r'^$', to_blog, name='index'),
#url(r'^projects/', include('StackSmash.apps.projects.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls), name='admin'),
)
|
drnextgis/QGIS
|
python/plugins/processing/algs/qgis/BarPlot.py
|
Python
|
gpl-2.0
| 3,271
| 0.000611
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
BarPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class BarPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
VALUE_FIELD = 'VALUE_FIELD'
def defineCharacteristics(self):
|
self.name, self.i18n_name = self.trAlgorithm('Bar plot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterTable(self.INPUT, self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
|
self.tr('Category name field'),
self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.VALUE_FIELD,
self.tr('Value field'),
self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Bar plot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
valuefieldname = self.getParameterValue(self.VALUE_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, valuefieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[valuefieldname], width, color='r')
plt.xticks(ind, values[namefieldname], rotation=45)
plotFilename = output + '.png'
lab.savefig(plotFilename)
with open(output, 'w') as f:
f.write('<html><img src="' + plotFilename + '"/></html>')
|
zhyu/PasswdManager
|
util.py
|
Python
|
gpl-3.0
| 1,732
| 0.011547
|
# -*- coding:utf-8 -*-
from Crypto.Cipher import AES
from Crypto.Hash import MD5
import binascii
import urllib2
import string, random
# algorithm
MODE = AES.MODE_CBC
def __getKeyObject(key):
obj = AES.new(md5Encoding(key), MODE)
return obj
def md5E
|
ncoding(msg):
'''
get md5 encrypted text
@param msg: the plain text message
'''
m = MD5.new()
m.update(msg)
return m.hexdigest()
def getRandomString(length, optionList=['number', 'lower', 'upper', 'punc']):
charPool = {'number' : string.digits,
'lower' : string.lowercase,
'upper' : string.uppercase,
'punc' : string.punctuation }
pool = ''
for key in optionList:
if charPool.has_k
|
ey(key):
pool = pool + charPool.get(key)
s = [random.choice(pool) for _ in xrange(length)]
return ''.join(s)
def encrypt(key, msg):
'''
Encrypt message using given password
@param key: the master password
@param msg: the plain text to be encrypted
'''
obj = __getKeyObject(key)
# encrypt
xx = msg*16
cipher = obj.encrypt(xx)
# convert bin to string
s = binascii.b2a_hex(cipher)
return s
def decrypt(key, msg):
'''
Encrypt message
@param key: the master password
@param msg: the cipher text to be dencrypted
'''
obj = __getKeyObject(key)
# convert string to bin
b = binascii.a2b_hex(msg)
# decrypt
plain = obj.decrypt(b)
return plain[:len(plain)/16]
def getLastVersion(versionUrl):
ver = ''
try:
f = urllib2.urlopen(versionUrl)
s = f.read()
f.close()
ver = s.split(' ')[1]
except:
pass
return ver
|
jgphpc/linux
|
python/0.py
|
Python
|
gpl-2.0
| 888
| 0.029279
|
#!/usr/bin/env
|
python
quit()
for iii in range(0,6):
try:
print(iii,iii/(4-iii))
except ZeroDivisionError as e:
#print("wrong: i={} {}".format(iii,e.message))
print("wrong: i={} {}".format(iii,"nooooooo"))
#else:
# print("OK")
finally:
print("continue...")
def pickkey(mylist):
""" jg's function"""
return mylist[1]
quit()
#listjg=[ (5,'A'), (4,'Z'), (8,'N'), (2,'C'), ]
#listjg.sort(key=pickkey) ;print(listjg)
#listjg.sor
|
t(reverse=True) ;print(listjg)
quit()
x=3
#def myfL(a,b):
# return []
def myf (a,b,*other):
print(type(other))
print(sum(other))
c=sum(other)
#return (a+b+other)
#res=myf(5,2,1) ;print(res)
#res=myf(a=5,b=2,c=1) ;print(res)
#res=myf(b=2,a=5,c=1) ;print(res)
#res=myf(a=5) ;print(res)
#no res=myf(a=5,b=2,c=1) ;print(res)
#res=myf(5,2,1,0) ;print(res)
#myf(5,2)
|
eharney/cinder
|
cinder/api/contrib/volume_image_metadata.py
|
Python
|
apache-2.0
| 6,636
| 0
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Volume Image Metadata API extension."""
from six.moves import http_client
import webob
from oslo_log import log as logging
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _
from cinder import volume
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('volume',
'volume_image_metadata')
class VolumeImageMetadataController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(VolumeImageMetadataController, self).__init__(*args, **kwargs)
self.volume_api = volume.API()
def _get_image_metadata(self, context, volume_id):
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, volume_id)
meta = self.volume_api.get_volume_image_metadata(context, volume)
return (volume, meta)
def _add_image_metadata(self, context, resp_volume_list, image_metas=None):
"""Appends the image metadata to each of
|
the given volume.
:param context: the request context
:param resp_volume_list: the response volume list
:param image_metas: The image metadata to append, if None is provided
it will be retrieved from the database. An empty
dict means there is no metadata and it should not
be retrieved from the db.
"""
vol_id_list = []
for vol in resp_volume_list:
vo
|
l_id_list.append(vol['id'])
if image_metas is None:
try:
image_metas = self.volume_api.get_list_volumes_image_metadata(
context, vol_id_list)
except Exception as e:
LOG.debug('Get image metadata error: %s', e)
return
if image_metas:
for vol in resp_volume_list:
image_meta = image_metas.get(vol['id'])
if image_meta:
vol['volume_image_metadata'] = dict(image_meta)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if authorize(context):
self._add_image_metadata(context, [resp_obj.obj['volume']])
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if authorize(context):
# Just get the image metadata of those volumes in response.
volumes = list(resp_obj.obj.get('volumes', []))
if volumes:
self._add_image_metadata(context, volumes)
@wsgi.action("os-set_image_metadata")
def create(self, req, id, body):
context = req.environ['cinder.context']
if authorize(context):
try:
metadata = body['os-set_image_metadata']['metadata']
except (KeyError, TypeError):
msg = _("Malformed request body.")
raise webob.exc.HTTPBadRequest(explanation=msg)
new_metadata = self._update_volume_image_metadata(context,
id,
metadata,
delete=False)
return {'metadata': new_metadata}
def _update_volume_image_metadata(self, context,
volume_id,
metadata,
delete=False):
try:
volume = self.volume_api.get(context, volume_id)
return self.volume_api.update_volume_metadata(
context,
volume,
metadata,
delete=False,
meta_type=common.METADATA_TYPES.image)
# Not found exception will be handled at the wsgi level
except (ValueError, AttributeError):
msg = _("Malformed request body.")
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.InvalidVolumeMetadata as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolumeMetadataSize as error:
raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg)
@wsgi.action("os-show_image_metadata")
def index(self, req, id, body):
context = req.environ['cinder.context']
return {'metadata': self._get_image_metadata(context, id)[1]}
@wsgi.action("os-unset_image_metadata")
def delete(self, req, id, body):
"""Deletes an existing image metadata."""
context = req.environ['cinder.context']
if authorize(context):
try:
key = body['os-unset_image_metadata']['key']
except (KeyError, TypeError):
msg = _("Malformed request body.")
raise webob.exc.HTTPBadRequest(explanation=msg)
if key:
vol, metadata = self._get_image_metadata(context, id)
if key not in metadata:
raise exception.GlanceMetadataNotFound(id=id)
self.volume_api.delete_volume_metadata(
context, vol, key,
meta_type=common.METADATA_TYPES.image)
else:
msg = _("The key cannot be None.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=http_client.OK)
class Volume_image_metadata(extensions.ExtensionDescriptor):
"""Show image metadata associated with the volume."""
name = "VolumeImageMetadata"
alias = "os-vol-image-meta"
updated = "2012-12-07T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeImageMetadataController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
|
philippjfr/bokeh
|
bokeh/core/templates.py
|
Python
|
bsd-3-clause
| 1,358
| 0
|
''' Provide Jinja2 templates used by Bokeh to embed Bokeh models
(e.g. plots, widgets, layouts) in various ways.
.. bokeh-jinja:: bokeh.core.templates.AUTOLOAD_JS
.. bokeh-jinja:: bokeh.core.templates.AUTOLOAD_NB_JS
.. bokeh-jinja:: bokeh.core.templates.AUTOLOAD_TAG
.. bokeh-jinja:: bokeh.core.templates.CSS_RESOURCES
.. bokeh-jinja:: bokeh.core.templates.DOC_JS
.. bokeh-jinja:: bokeh.core.templates.FILE
.. bokeh-jinja:: bokeh.core.templates.JS_RESO
|
URCES
.. bokeh-jinja:: bokeh.core.templates.NOTEBOOK_LOAD
.. bokeh-jinja:: bokeh.core.templates.PLOT_DIV
.. bokeh-jinja:: bokeh.core.templates.SCRIPT_TAG
'''
from __future__ import absolute_import
import json
from jinja2 import Environment, PackageLoader, Markup
_env = Environment(loader=PackageLoader('bokeh.core', '_templates'))
_env.filters['json'] = lambda obj: Markup(json.dumps(obj))
JS_RESOURCES = _env.get_template("js_resources.html")
CSS_RESOURCES = _
|
env.get_template("css_resources.html")
SCRIPT_TAG = _env.get_template("script_tag.html")
PLOT_DIV = _env.get_template("plot_div.html")
DOC_JS = _env.get_template("doc_js.js")
FILE = _env.get_template("file.html")
NOTEBOOK_LOAD = _env.get_template("notebook_load.html")
AUTOLOAD_JS = _env.get_template("autoload_js.js")
AUTOLOAD_NB_JS = _env.get_template("autoload_nb_js.js")
AUTOLOAD_TAG = _env.get_template("autoload_tag.html")
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/ndimage/filters.py
|
Python
|
mit
| 52,520
| 0.00019
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
from scipy.misc import doccer
from scipy._lib._version import NumpyVersion
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
_input_doc = \
"""input : array_like
Input array to filter."""
_axis_doc = \
"""axis : int, optional
The axis of `input` along which to calculate. Default is -1."""
_output_doc = \
"""output : array, optional
The `output` parameter passes an array in which to store the
filter output. Output array should have different name as compared
to input array to avoid aliasing errors."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_mode_multiple_doc = \
"""mode : str or sequence, optional
The `mode` parameter determines how the array borders are
handled. Valid modes are {'reflect', 'constant', 'nearest',
'mirror', 'wrap'}. `cval` is the value used when mode is equal to
'constant'. A list of modes with length equal to the number of
axes can be provided to specify different modes for different
axes. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0."""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input': _input_doc,
'axis': _axis_doc,
'output': _output_doc,
'size_foot': _size_foot_doc,
'mode': _mode_doc,
'mode_multiple': _mode_multiple_doc,
'cval': _cval_doc,
'origin': _origin_doc,
'extra_arguments': _extra_arguments_doc,
'extra_keywords': _extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
|
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional
|
sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import correlate1d
>>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([ 8, 26, 8, 12, 7, 28, 36, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
Examples
--------
>>> from scipy.ndimage import convolve1d
>>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([14, 24, 4, 13, 12, 36, 27, 0])
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
def _gaussian_kernel1d(sigma, order, radius):
"""
Computes a 1D Gaussian convolution kernel.
"""
if order < 0:
raise ValueError('order must be non-negative')
p = numpy.polynomial.Polynomial([0, 0, -0.5 / (sigma * sigma)])
x = numpy.arange(-radius, radius + 1)
phi_x = numpy.exp(p(x), dtype=numpy.double)
phi_x /= phi_x.sum()
if order > 0:
q = numpy.polynomial.Polynomial([1])
p_deriv = p.deriv()
for _ in range(order):
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
q = q.deriv() + q * p_deriv
phi_x *= q(x)
return phi_x
@docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
stan
|
zgbjgg/jun
|
priv/jun_enc_dec.py
|
Python
|
mit
| 1,427
| 0.005606
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from erlport.erlterms import Atom, List
from erlport.erlang import set_encoder, set_decoder
def setup_dtype():
set_encoder(dtype_encoder)
set_decoder(dtype_decoder)
return Atom(b'ok')
def dtype_encoder(value):
if isinstance(value, np.int64):
return np.asscalar(value)
elif isinstance(value, np.float64):
return np.asscalar(value)
elif isinstance(value, str):
try:
return value.encode('utf-8') # to express as binary() instead of string() on erlang side
except:
return
|
value
elif isinstance(value, list):
return [dtype_encoder(v) for v in value]
elif isinstance(value, tuple):
nvalue = ()
for v in value:
nvalue = nvalue + (dtype_encoder(v),)
return nvalue
else:
try:
return value.encode('utf-8')
except:
return value
de
|
f dtype_decoder(value):
try:
if isinstance(value, List):
return [dtype_decoder(v) for v in value]
elif isinstance(value, tuple):
nvalue = ()
for v in value:
nvalue = nvalue + (dtype_decoder(v),)
return nvalue
elif isinstance(value, str):
return value
else:
return value.decode("utf-8")
except:
return value
|
FRBs/FRB
|
frb/tests/test_eazy.py
|
Python
|
bsd-3-clause
| 2,608
| 0.003067
|
# Module to run tests on surveys
# Most of these are *not* done with Travis yet
# TEST_UNICODE_LITERALS
import pytest
import os
import shutil
import numpy as np
from astropy.table import Table
from frb.galaxies.frbgalaxy import FRBHost
from frb.galaxies import eazy as frbeazy
from frb.frb import FRB
from distutils.spawn import find_executable
eazy_exec = pytest.mark.skipif(find_executable('eazy') is None,
reason='test requires galfit')
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
@pytest.fixture
def host_obj():
# VLT
photom = Table()
photom['Name'] = ['G_TEST']
photom['ra'] = 123.422
photom['dec'] = 23.222
# These are observed
photom['LRISb_V'] = 25.86
photom['LRISb_V_err'] = 0.25
photom['GMOS_S_r'] = 23.61
photom['GMOS_S_r_err'] = 0.15
photom['LRISr_I'] = 23.09
photom['LRISr_I_err'] = 0.1
photom['NOT_z'] = 23.35
photom['NOT_z_err'] = 0.3
photom
|
['NIRI_J'] = 21.75 + 0.91
photom['NIRI_J_err'] = 0.2
#
host190613A = FRBHost(photom['ra'], photom['dec'], FRB.by_name('FRB20121102'
|
))
host190613A.parse_photom(photom)
host190613A.name = 'G_TEST'
return host190613A
@eazy_exec
def test_eazy(host_obj):
if os.path.isdir(data_path('eazy')):
shutil.rmtree(data_path('eazy'))
os.mkdir(data_path('eazy'))
# Generate
frbeazy.eazy_input_files(host_obj.photom, data_path('eazy/input'),
host_obj.name,
data_path('eazy/output'),
templates='br07_default',
prior_filter='GMOS_S_r')
# Test
assert os.path.isfile(data_path('eazy/input/G_TEST.cat'))
assert os.path.isfile(data_path('eazy/input/zphot.param.G_TEST'))
assert os.path.isfile(data_path('eazy/input/zphot.translate.G_TEST'))
# Run
frbeazy.run_eazy(data_path('eazy/input'),
host_obj.name,
os.path.join(data_path('eazy/output'), 'logfile'))
assert os.path.isfile(data_path('eazy/output/photz.zout'))
# Read
zgrid, pzi, prior = frbeazy.getEazyPz(-1, MAIN_OUTPUT_FILE='photz',
OUTPUT_DIRECTORY=data_path('eazy/output'),
CACHE_FILE='Same', binaries=None, get_prior=True)
zphot, sig_zphot = frbeazy.eazy_stats(zgrid, pzi)
assert np.isclose(zphot, 0.5929259648750858, rtol=1e-4)
# Remove
shutil.rmtree(data_path('eazy'))
|
MarieVdS/ComboCode
|
__init__.py
|
Python
|
gpl-3.0
| 42
| 0
|
# -*- coding:
|
utf-8 -*-
__all__
|
= ["cc"]
|
any1m1c/ipc20161
|
lista4/ipc_lista4.01.py
|
Python
|
apache-2.0
| 675
| 0.040299
|
#
#Programa Lista 4, questão 1;
#Felipe Henrique Bastos Costa - 1615310032;
#
#
#
#
lista = []#lista vazia;
cont1 = 0#contador do indice;
cont2 = 1#contador da posição do numero, se é o primeiro, segundo etc;
v = 5#representaria o len da lista;
while(cont1 < v):
x = int(input("Informe o %dº numero inteiro para colocar em sua lista:\n"%cont2))#x e a variavel que rec
|
ebe
#o numero do usuario
lista.append(x)#o numero informado para x e colocado de
|
ntro da lista;
cont1+=1#Os contadores estao
cont2+=1#sendo incrementados;
print("A lista de informada foi:\n%s"%lista)
|
Tinkerforge/brickv
|
src/brickv/bindings/bricklet_linear_poti_v2.py
|
Python
|
gpl-2.0
| 14,625
| 0.004239
|
# -*- coding: utf-8 -*-
#############################################################
# This file was automatically generated on 2022-01-18. #
# #
# Python Bindings Version 2.1.29
|
#
# #
# If you have a bugfix for this file and want to commit it, #
# please fix the bug in the generator. You can find a link #
# to the generators git repository on tinkerforge.com #
################
|
#############################################
from collections import namedtuple
try:
from .ip_connection import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
except (ValueError, ImportError):
from ip_connection import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
GetPositionCallbackConfiguration = namedtuple('PositionCallbackConfiguration', ['period', 'value_has_to_change', 'option', 'min', 'max'])
GetSPITFPErrorCount = namedtuple('SPITFPErrorCount', ['error_count_ack_checksum', 'error_count_message_checksum', 'error_count_frame', 'error_count_overflow'])
GetIdentity = namedtuple('Identity', ['uid', 'connected_uid', 'position', 'hardware_version', 'firmware_version', 'device_identifier'])
class BrickletLinearPotiV2(Device):
"""
59mm linear potentiometer
"""
DEVICE_IDENTIFIER = 2139
DEVICE_DISPLAY_NAME = 'Linear Poti Bricklet 2.0'
DEVICE_URL_PART = 'linear_poti_v2' # internal
CALLBACK_POSITION = 4
FUNCTION_GET_POSITION = 1
FUNCTION_SET_POSITION_CALLBACK_CONFIGURATION = 2
FUNCTION_GET_POSITION_CALLBACK_CONFIGURATION = 3
FUNCTION_GET_SPITFP_ERROR_COUNT = 234
FUNCTION_SET_BOOTLOADER_MODE = 235
FUNCTION_GET_BOOTLOADER_MODE = 236
FUNCTION_SET_WRITE_FIRMWARE_POINTER = 237
FUNCTION_WRITE_FIRMWARE = 238
FUNCTION_SET_STATUS_LED_CONFIG = 239
FUNCTION_GET_STATUS_LED_CONFIG = 240
FUNCTION_GET_CHIP_TEMPERATURE = 242
FUNCTION_RESET = 243
FUNCTION_WRITE_UID = 248
FUNCTION_READ_UID = 249
FUNCTION_GET_IDENTITY = 255
THRESHOLD_OPTION_OFF = 'x'
THRESHOLD_OPTION_OUTSIDE = 'o'
THRESHOLD_OPTION_INSIDE = 'i'
THRESHOLD_OPTION_SMALLER = '<'
THRESHOLD_OPTION_GREATER = '>'
BOOTLOADER_MODE_BOOTLOADER = 0
BOOTLOADER_MODE_FIRMWARE = 1
BOOTLOADER_MODE_BOOTLOADER_WAIT_FOR_REBOOT = 2
BOOTLOADER_MODE_FIRMWARE_WAIT_FOR_REBOOT = 3
BOOTLOADER_MODE_FIRMWARE_WAIT_FOR_ERASE_AND_REBOOT = 4
BOOTLOADER_STATUS_OK = 0
BOOTLOADER_STATUS_INVALID_MODE = 1
BOOTLOADER_STATUS_NO_CHANGE = 2
BOOTLOADER_STATUS_ENTRY_FUNCTION_NOT_PRESENT = 3
BOOTLOADER_STATUS_DEVICE_IDENTIFIER_INCORRECT = 4
BOOTLOADER_STATUS_CRC_MISMATCH = 5
STATUS_LED_CONFIG_OFF = 0
STATUS_LED_CONFIG_ON = 1
STATUS_LED_CONFIG_SHOW_HEARTBEAT = 2
STATUS_LED_CONFIG_SHOW_STATUS = 3
def __init__(self, uid, ipcon):
"""
Creates an object with the unique device ID *uid* and adds it to
the IP Connection *ipcon*.
"""
Device.__init__(self, uid, ipcon, BrickletLinearPotiV2.DEVICE_IDENTIFIER, BrickletLinearPotiV2.DEVICE_DISPLAY_NAME)
self.api_version = (2, 0, 0)
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_POSITION] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_SET_POSITION_CALLBACK_CONFIGURATION] = BrickletLinearPotiV2.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_POSITION_CALLBACK_CONFIGURATION] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_SPITFP_ERROR_COUNT] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_SET_BOOTLOADER_MODE] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_BOOTLOADER_MODE] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_SET_WRITE_FIRMWARE_POINTER] = BrickletLinearPotiV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLinearPotiV2.FUNCTION_WRITE_FIRMWARE] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_SET_STATUS_LED_CONFIG] = BrickletLinearPotiV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_STATUS_LED_CONFIG] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_CHIP_TEMPERATURE] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_RESET] = BrickletLinearPotiV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLinearPotiV2.FUNCTION_WRITE_UID] = BrickletLinearPotiV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLinearPotiV2.FUNCTION_READ_UID] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_IDENTITY] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.callback_formats[BrickletLinearPotiV2.CALLBACK_POSITION] = (9, 'B')
ipcon.add_device(self)
def get_position(self):
"""
Returns the position of the linear potentiometer. The value is
between 0% (slider down) and 100% (slider up).
If you want to get the value periodically, it is recommended to use the
:cb:`Position` callback. You can set the callback configuration
with :func:`Set Position Callback Configuration`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_GET_POSITION, (), '', 9, 'B')
def set_position_callback_configuration(self, period, value_has_to_change, option, min, max):
"""
The period is the period with which the :cb:`Position` callback is triggered
periodically. A value of 0 turns the callback off.
If the `value has to change`-parameter is set to true, the callback is only
triggered after the value has changed. If the value didn't change
within the period, the callback is triggered immediately on change.
If it is set to false, the callback is continuously triggered with the period,
independent of the value.
It is furthermore possible to constrain the callback with thresholds.
The `option`-parameter together with min/max sets a threshold for the :cb:`Position` callback.
The following options are possible:
.. csv-table::
:header: "Option", "Description"
:widths: 10, 100
"'x'", "Threshold is turned off"
"'o'", "Threshold is triggered when the value is *outside* the min and max values"
"'i'", "Threshold is triggered when the value is *inside* or equal to the min and max values"
"'<'", "Threshold is triggered when the value is smaller than the min value (max is ignored)"
"'>'", "Threshold is triggered when the value is greater than the min value (max is ignored)"
If the option is set to 'x' (threshold turned off) the callback is triggered with the fixed period.
"""
self.check_validity()
period = int(period)
value_has_to_change = bool(value_has_to_change)
option = create_char(option)
min = int(min)
max = int(max)
self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_SET_POSITION_CALLBACK_CONFIGURATION, (period, value_has_to_change, option, min, max), 'I ! c B B', 0, '')
def get_position_callback_configuration(self):
"""
Returns the callback configuration as set by :func:`Set Position Callback Configuration`.
"""
self.check_validity()
return GetPositionCallbackConfiguration(*self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_GET_POSITION_CALLBACK_CONFIGURATION, (), '', 16, 'I ! c B B'))
def get_s
|
lemarcudal/sha_thedivision
|
test/mysite/webapp/forms.py
|
Python
|
apache-2.0
| 1,935
| 0.03876
|
from django import forms
from .models import Post
from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout,
)
class PostForm(forms.ModelForm): # Post Thread View
class Meta:
model = Post
fields = ['title','image', 'user','country','guide']
widgets = { 'guide' : forms.Textarea(attrs = {'rows':12 , 'style':'resize:none'})}
##-----------------------------
User = get_user_model()
class UserLoginForm(forms.Form):
username = forms.CharField(max_length = 254, widget=forms.TextInput(attrs={'class':"input-sm"}))
password = forms.CharField(widget = forms.PasswordInput)
def clean(self, *args, **kwargs):
use
|
rname = self.cleaned_data.get("username")
password = self.cleaned_data.get('password')
if username and password:
user = authenticate(username=username, password=password)
if not user:
raise forms.ValidationError("Incorrect password or User! Please try again.")
if not user.check_password(password):
raise forms.ValidationError("Incorrect password! Please try again.")
if not user.is_active:
raise forms.ValidationError("This user is no longer active!")
re
|
turn super(UserLoginForm, self).clean(*args, **kwargs)
#--------------------------------
class UserRegisterForm(forms.ModelForm):
email = forms.EmailField(label = 'Email Address')
password = forms.CharField(widget = forms.PasswordInput)
class Meta:
model = User
fields = [
'username',
'email',
'password'
]
def clean_email(self):
email = self.cleaned_data.get('email')
email_qs = User.objects.filter(email=email)
if email_qs.exists():
raise forms.ValidationError("This email has already been registered!")
return email
def clean_username(self):
username = self.cleaned_data.get('username')
username_qs = User.objects.filter(username=username)
if username_qs.exists():
raise forms.ValidationError("This username has already been registered!")
return username
|
ilblackdragon/django-modeltranslation
|
modeltranslation/settings.py
|
Python
|
bsd-3-clause
| 1,569
| 0.003187
|
# -*- coding: utf-8 -*-
import sys
from warnings import warn
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
if hasattr(settings, 'MODELTRANSLATION_TRANSLATION_REGISTRY'):
TRANSLATION_REGISTRY =\
getattr(settings, 'MODELTRANSLATION_TRANSLATION_REGISTRY', None)
elif hasattr(settings, 'TRANSLATION_REGISTRY'):
warn('The setting TRANSLATION_REGISTRY is deprecated, use '
'MODELTRANSLATION_TRANSLATION_REGISTRY instead.', DeprecationWarning)
TRANSLATION_REGISTRY = getattr(settings, 'TRANSLATION_REGISTRY', None)
else:
raise ImproperlyConfigured("You haven't set the "
|
"MODELTRANSLATION_TRANSLATION_REGISTRY "
"setting yet.")
AVAILABLE_LANGUAGES = [l[0] for l in se
|
ttings.LANGUAGES]
DEFAULT_LANGUAGE = getattr(settings, 'MODELTRANSLATION_DEFAULT_LANGUAGE', None)
if DEFAULT_LANGUAGE and DEFAULT_LANGUAGE not in AVAILABLE_LANGUAGES:
raise ImproperlyConfigured('MODELTRANSLATION_DEFAULT_LANGUAGE not '
'in LANGUAGES setting.')
elif not DEFAULT_LANGUAGE:
DEFAULT_LANGUAGE = AVAILABLE_LANGUAGES[0]
# FIXME: We can't seem to override this particular setting in tests.py
CUSTOM_FIELDS =\
getattr(settings, 'MODELTRANSLATION_CUSTOM_FIELDS', ())
try:
if sys.argv[1] == 'test':
CUSTOM_FIELDS =\
getattr(settings, 'MODELTRANSLATION_CUSTOM_FIELDS',
('BooleanField',))
except IndexError:
pass
LANGUAGE_VERBOSE_NAMES = getattr(settings, 'LANGUAGE_VERBOSE_NAMES', {})
|
0vercl0k/rp
|
src/third_party/beaengine/tests/0f3a25.py
|
Python
|
mit
| 2,835
| 0.001411
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# EVEX.256.66.0F3A.W0 25 /r ib
# vpternlogd ymm1{k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
myEVEX = EVEX('EVEX.256.66.0F3A.W0')
Buffer = bytes.fromhex('{}252011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x25)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpternlogd')
assert_equal(myDisasm.r
|
epr(), 'vpternlogd ymm28, ymm16, ymmword ptr [r8], 11h')
# EVEX.512.66.0F3A.W0 25 /r ib
# vpternlogd zmm1{k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
myEVEX = EVEX('EVEX.512.66.0F3A.W0')
Buffer = bytes.fromhex('{}252011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction
|
.Opcode, 0x25)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpternlogd')
assert_equal(myDisasm.repr(), 'vpternlogd zmm28, zmm16, zmmword ptr [r8], 11h')
# EVEX.256.66.0F3A.W1 25 /r ib
# vpternlogq ymm1{k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
myEVEX = EVEX('EVEX.256.66.0F3A.W1')
Buffer = bytes.fromhex('{}252011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x25)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpternlogq')
assert_equal(myDisasm.repr(), 'vpternlogq ymm28, ymm16, ymmword ptr [r8], 11h')
# EVEX.512.66.0F3A.W1 25 /r ib
# vpternlogq zmm1{k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
myEVEX = EVEX('EVEX.512.66.0F3A.W1')
Buffer = bytes.fromhex('{}252011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x25)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpternlogq')
assert_equal(myDisasm.repr(), 'vpternlogq zmm28, zmm16, zmmword ptr [r8], 11h')
|
siosio/intellij-community
|
python/testData/codeInsight/mlcompletion/isInConditionSimpleElif.py
|
Python
|
apache-2.0
| 78
| 0.051282
|
var1 = bool(input())
v
|
ar2 = bool(input())
if var1:
print(var2)
elif v
|
<caret>
|
lebabouin/CouchPotatoServer-develop
|
couchpotato/core/providers/trailer/vftrailers/youtube_dl/extractor/hark.py
|
Python
|
gpl-3.0
| 1,526
| 0.003932
|
# -*- coding: latin-1 -*-
import re
import json
from .common import InfoExtractor
from ..utils import determine_ext
class HarkIE(InfoExtractor):
_VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+'
_TEST = {
u'url': u'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
u'file': u'mmbzyhkgny.mp3',
u'md5': u'6783a58491b47b92c7c1af5a77d4cbee',
u'info_dict': {
u'title': u"Obama: 'Beyond The Afghan Theater, We Only Target Al Qaeda' on May 23, 2013",
u'description': u'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone s
|
trikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.'
|
,
u'duration': 11,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
json_url = "http://www.hark.com/clips/%s.json" %(video_id)
info_json = self._download_webpage(json_url, video_id)
info = json.loads(info_json)
final_url = info['url']
return {'id': video_id,
'url' : final_url,
'title': info['name'],
'ext': determine_ext(final_url),
'description': info['description'],
'thumbnail': info['image_original'],
'duration': info['duration'],
}
|
davehorton/drachtio-server
|
deps/boost_1_77_0/tools/build/test/conditionals2.py
|
Python
|
mit
| 960
| 0
|
#!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1
|
.0.
# (See accompanying file LICENSE.txt or https://www.bfgroup.xyz/b2/LICENSE.txt)
# Regression test: it was possible that due to evaluation of conditional
# requirements, two different values of non-free features were present in a
# property set.
import BoostBuild
t = BoostBuild.Tester()
t.write("a.cpp", "")
t.write("jamroot.jam", """
import feature ;
import common ;
feature.feature the_
|
feature : false true : propagated ;
rule maker ( targets * : sources * : properties * )
{
if <the_feature>false in $(properties) &&
<the_feature>true in $(properties)
{
EXIT "Oops, two different values of non-free feature" ;
}
CMD on $(targets) = [ common.file-creation-command ] ;
}
actions maker
{
$(CMD) $(<) ;
}
make a : a.cpp : maker : <variant>debug:<the_feature>true ;
""")
t.run_build_system()
t.cleanup()
|
geotagx/geotagx-pybossa-archive
|
test/test_authentication.py
|
Python
|
agpl-3.0
| 1,229
| 0
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redis
|
tri
|
bute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from base import web, model, Fixtures
class TestAuthentication:
@classmethod
def setup_class(cls):
cls.app = web.app.test_client()
model.rebuild_db()
Fixtures.create()
@classmethod
def teardown_class(cls):
model.rebuild_db()
def test_api_authenticate(self):
"""Test AUTHENTICATION works"""
res = self.app.get('/?api_key=%s' % Fixtures.api_key)
assert 'checkpoint::logged-in::tester' in res.data, res.data
|
ClusterHQ/libcloud
|
libcloud/compute/drivers/hostvirtual.py
|
Python
|
apache-2.0
| 10,709
| 0.000093
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libcloud driver for the Host Virtual Inc. (VR) API
Home page http://www.vr.org/
"""
import time
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.hostvirtual import HostVirtualResponse
from libcloud.common.hostvirtual import HostVirtualConnection
from libcloud.common.hostvirtual import HostVirtualException
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeImage, NodeSize, NodeLocation
from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword
API_ROOT = ''
NODE_STATE_MAP = {
'BUILDING': NodeState.PENDING,
'PENDING': NodeState.PENDING,
'RUNNING': NodeState.RUNNING, # server is powered up
'STOPPING': NodeState.REBOOTING,
'REBOOTING': NodeState.REBOOTING,
'STARTING': NodeState.REBOOTING,
'TERMINATED': NodeState.TERMINATED, # server is powered down
'STOPPED': NodeState.STOPPED
}
DEFAULT_NODE_LOCATION_ID = 4
class HostVirtualComputeResponse(HostVirtualResponse):
pass
class HostVirtualComputeConnection(HostVirtualConnection):
responseCls = HostVirtualComputeResponse
class HostVirtualNodeDriver(NodeDriver):
type = Provider.HOSTVIRTUAL
name = 'HostVirtual'
website = 'http://www.vr.org'
connectionCls = HostVirtualComputeConnection
features = {'create_node': ['ssh_key', 'password']}
def __init__(self, key, secure=True, host=None, port=None):
self.location = None
super(HostVirtualNodeDriver, self).__init__(key=key, secure=secure,
host=host, port=port)
def _to_node(self, data):
state = NODE_STATE_MAP[data['status']]
public_ips = []
private_ips = []
extra = {}
if 'plan_id' in data:
extra['size'] = data['plan_id']
if 'os_id' in data:
extra['image'] = data['os_id']
if 'location_id' in data:
extra['location'] = data['location_id']
if 'ip' in data:
public_ips.append(data['ip'])
node = Node(id=data['mbpkgid'], name=data['fqdn'], state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, extra=extra)
return node
def list_locations(self):
result = self.connection.request(API_ROOT + '/cloud/locations/').object
locations = []
for dc in result:
locations.append(NodeLocation(
dc["id"],
dc["name"],
dc["name"].split(',')[1].replace(" ", ""), # country
self))
return locations
def list_sizes(self, location=None):
params = {}
if location:
params = {'location': location.id}
result = self.connection.request(
API_ROOT + '/cloud/sizes/',
data=json.dumps(params)).object
sizes = []
for size in result:
n = NodeSize(id=size['plan_id'],
name=size['plan'],
ram=size['ram'],
disk=size['disk'],
bandwidth=size['transfer'],
price=size['price'],
driver=self.connection.driver)
sizes.append(n)
return sizes
def list_images(self):
result = self.connection.request(API_ROOT + '/cloud/images/').object
images = []
for image in result:
i = NodeImage(id=image["id"],
name=image["os"],
driver=self.connection.driver,
extra=image)
del i.extra['id']
del i.extra['os']
images.append(i)
return images
def list_nodes(self):
result = self.connection.request(API_ROOT + '/cloud/servers/').object
nodes = []
for value in result:
node = self._to_node(value)
nodes.append(node)
return nodes
def _wait_for_node(self, node_id, timeout=30, interval=5.0):
"""
:param node_id: ID of the node to wait for.
:type node_id: ``int``
:param timeout: Timeout (in seconds).
:type timeout: ``int``
:param interval: How long to wait (in seconds) between each attempt.
:type interval: ``float``
"""
# poll until we get a node
for i in range(0, timeout, int(interval)):
try:
node = self.ex_get_node(node_id)
return node
except HostVirtualException:
time.sleep(interval)
raise HostVirtualException(412, 'Timedout on getting node details')
def create_node(self, **kwargs):
dc = None
size = kwargs['size']
image = kwargs['image']
auth = self._get_and_check_auth(kwargs.get('auth'))
params = {'plan': size.name}
dc = DEFAULT_NODE_LOCATION_ID
if 'location' in kwargs:
dc = kwargs['location'].id
# simply order a package first
result = self.connection.request(API_ROOT + '/cloud/buy/',
data=json.dumps(params),
method='POST').object
# create a stub node
stub_node = self._to_node({
'mbpkgid': result['id'],
'status': 'PENDING',
'fqdn': kwargs['name'],
'plan_id': size.id,
'os_id': image.id,
'location_id': dc
})
# provisioning a server using the stub node
self.ex_provision_node(node=stub_node, auth=auth)
node = self._wait_for_node(stub_node.id)
if getattr(auth
|
, 'generated', False):
node.extra['password'] = auth.pass
|
word
return node
def reboot_node(self, node):
params = {'force': 0, 'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/reboot',
data=json.dumps(params),
method='POST').object
return bool(result)
def destroy_node(self, node):
params = {
'mbpkgid': node.id,
#'reason': 'Submitted through Libcloud API'
}
result = self.connection.request(
API_ROOT + '/cloud/cancel', data=json.dumps(params),
method='POST').object
return bool(result)
def ex_get_node(self, node_id):
"""
Get a single node.
:param node_id: id of the node that we need the node object for
:type node_id: ``str``
:rtype: :class:`Node`
"""
params = {'mbpkgid': node_id}
result = self.connection.request(
API_ROOT + '/cloud/server', params=params).object
node = self._to_node(result)
return node
def ex_stop_node(self, node):
"""
Stop a node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'force': 0, 'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/shutdown',
data=json.dumps(params),
method='POST').object
return bool(result)
|
shanot/imp
|
modules/isd/test/test_mc_WeightMover.py
|
Python
|
gpl-3.0
| 1,309
| 0
|
#!/usr/bin/env python
# imp general
import IMP
import IMP.core
# our project
from IMP.isd import Weight
from IMP.isd import WeightMover
# unit testing framework
import IMP.test
class TestWeightMover(IMP.test.TestCase):
"""tests weight setup"""
def setUp(self):
IMP.test.TestCase.setUp(self)
# IMP.set_log_level(IMP.MEMORY)
IMP.set_log_level(0)
self.m = IMP.Model()
|
self.w = Weight.setup_particle(IMP.Particle(self.m))
self.w.set_weights_are_optimize
|
d(True)
self.w.add_weight()
self.w.add_weight()
self.wm = WeightMover(self.w, 0.1)
self.mc = IMP.core.MonteCarlo(self.m)
self.mc.set_scoring_function([])
self.mc.set_return_best(False)
self.mc.set_kt(1.0)
self.mc.add_mover(self.wm)
def test_run(self):
"Test weight mover mc run"
self.setUp()
for n in range(5):
for j in range(10):
self.mc.optimize(10)
ws = self.w.get_weights()
sum = 0
for k in range(self.w.get_number_of_states()):
sum += self.w.get_weight(k)
self.assertAlmostEqual(sum, 1.0, delta=0.0000001)
self.w.add_weight()
if __name__ == '__main__':
IMP.test.main()
|
datamade/la-metro-councilmatic
|
lametro/admin.py
|
Python
|
mit
| 116
| 0.008621
|
from django.contrib import admin
# import your models
# Register your models here.
# admin.site.register(YourMode
|
l)
|
|
yinchunlong/abelkhan-1
|
juggle/gen/csharp/tools.py
|
Python
|
mit
| 394
| 0.007614
|
# 2016-7-1
# build by qianqians
# tools
def gentypetocsharp(typestr):
if typestr == 'int':
return 'Int
|
64'
elif typestr == 'string':
return 'String'
elif typestr == 'array':
return 'ArrayList'
elif typestr == 'float':
return 'Double'
elif typestr == 'boo
|
l':
return 'Boolean'
elif typestr == 'table':
return 'Hashtable'
|
ThiefMaster/indico
|
indico/modules/events/timetable/util.py
|
Python
|
mit
| 16,943
| 0.003305
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import defaultdict
from operator import attrgetter
from flask import render_template, session
from pytz import utc
from sqlalchemy import Date, cast
from sqlalchemy.orm import contains_eager, joinedload, subqueryload, undefer
from indico.core.db import db
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.models.events import Event
from indico.modules.events.models.persons import EventPersonLink
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.sessions.models.sessions import Session
from indico.modules.events.timetable.legacy import TimetableSerializer, serialize_event_info
from indico.modules.events.timetable.models.breaks import Break
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.util.caching import memoize_request
from indico.util.date_time import format_time, get_day_end, iterdays
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
from indico.web.forms.colors import get_colors
def _query_events(categ_ids, day_start, day_end):
event = db.aliased(Event)
dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end)
return (db.session.query(Event.id, TimetableEntry.start_dt)
.fi
|
lter(
Event.category_chain_overlaps(categ_ids),
~Event.is_deleted,
((Event.timetable_entries.any(dates_overlap(TimetableEntry))) |
(Event.query.exists().where(
Event.happens_between(day_start, day_end) &
(Event.id == event.id)))))
.group_by(Event.id, TimetableEntry.start_dt)
|
.order_by(Event.id, TimetableEntry.start_dt)
.join(TimetableEntry,
(TimetableEntry.event_id == Event.id) & (dates_overlap(TimetableEntry)),
isouter=True))
def _query_blocks(event_ids, dates_overlap, detail_level='session'):
options = [subqueryload('session').joinedload('blocks').joinedload('person_links')]
if detail_level == 'contribution':
options.append(contains_eager(SessionBlock.timetable_entry).joinedload(TimetableEntry.children))
else:
options.append(contains_eager(SessionBlock.timetable_entry))
return (SessionBlock.query
.filter(~Session.is_deleted,
Session.event_id.in_(event_ids),
dates_overlap(TimetableEntry))
.options(*options)
.join(TimetableEntry)
.join(Session))
def find_latest_entry_end_dt(obj, day=None):
"""Get the latest end datetime for timetable entries within the object.
:param obj: The :class:`Event` or :class:`SessionBlock` that will be used to
look for timetable entries.
:param day: The local event date to look for timetable entries. Applicable only
to ``Event``.
:return: The end datetime of the timetable entry finishing the latest. ``None``
if no entry was found.
"""
if isinstance(obj, Event):
if day is None:
raise ValueError('No day specified for event.')
if not (obj.start_dt_local.date() <= day <= obj.end_dt_local.date()):
raise ValueError('Day out of event bounds.')
entries = obj.timetable_entries.filter(TimetableEntry.parent_id.is_(None),
cast(TimetableEntry.start_dt.astimezone(obj.tzinfo), Date) == day).all()
elif isinstance(obj, SessionBlock):
if day is not None:
raise ValueError('Day specified for session block.')
entries = obj.timetable_entry.children
else:
raise ValueError(f'Invalid object type {type(obj)}')
return max(entries, key=attrgetter('end_dt')).end_dt if entries else None
def find_next_start_dt(duration, obj, day=None, force=False):
"""Find the next most convenient start date fitting a duration within an object.
:param duration: Duration to fit into the event/session-block.
:param obj: The :class:`Event` or :class:`SessionBlock` the duration needs to
fit into.
:param day: The local event date where to fit the duration in case the object is
an event.
:param force: Gives earliest datetime if the duration doesn't fit.
:return: The end datetime of the latest scheduled entry in the object if the
duration fits then. It it doesn't, the latest datetime that fits it.
``None`` if the duration cannot fit in the object, earliest datetime
if ``force`` is ``True``.
"""
if isinstance(obj, Event):
if day is None:
raise ValueError('No day specified for event.')
if not (obj.start_dt_local.date() <= day <= obj.end_dt_local.date()):
raise ValueError('Day out of event bounds.')
earliest_dt = obj.start_dt if obj.start_dt_local.date() == day else obj.start_dt.replace(hour=8, minute=0)
latest_dt = obj.end_dt if obj.start_dt.date() == day else get_day_end(day, tzinfo=obj.tzinfo)
elif isinstance(obj, SessionBlock):
if day is not None:
raise ValueError('Day specified for session block.')
earliest_dt = obj.timetable_entry.start_dt
latest_dt = obj.timetable_entry.end_dt
else:
raise ValueError(f'Invalid object type {type(obj)}')
max_duration = latest_dt - earliest_dt
if duration > max_duration:
return earliest_dt if force else None
start_dt = find_latest_entry_end_dt(obj, day=day) or earliest_dt
end_dt = start_dt + duration
if end_dt > latest_dt:
start_dt = latest_dt - duration
return start_dt
def get_category_timetable(categ_ids, start_dt, end_dt, detail_level='event', tz=utc, from_categ=None, grouped=True,
includible=lambda item: True):
"""Retrieve time blocks that fall within a specific time interval
for a given set of categories.
:param categ_ids: iterable containing list of category IDs
:param start_dt: start of search interval (``datetime``, expected
to be in display timezone)
:param end_dt: end of search interval (``datetime`` in expected
to be in display timezone)
:param detail_level: the level of detail of information
(``event|session|contribution``)
:param tz: the ``timezone`` information should be displayed in
:param from_categ: ``Category`` that will be taken into account to calculate
visibility
:param grouped: Whether to group results by start date
:param includible: a callable, to allow further arbitrary custom filtering (maybe from 3rd
party plugins) on whether to include (returns True) or not (returns False)
each ``detail`` item. Default always returns True.
:returns: a dictionary containing timetable information in a
structured way. See source code for examples.
"""
day_start = start_dt.astimezone(utc)
day_end = end_dt.astimezone(utc)
dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end)
items = defaultdict(lambda: defaultdict(list))
# first of all, query TimetableEntries/events that fall within
# specified range of dates (and category set)
events = _query_events(categ_ids, day_start, day_end)
if from_categ:
events = events.filter(Event.is_visible_in(from_categ.id))
for eid, tt_start_dt in events:
if tt_start_dt:
items[eid][tt_start_dt.astimezone(tz).date()].append(tt_start_dt)
else:
items[eid] = None
# then, retrieve detailed information about the events
event_ids = set(items)
query = (Event.query
.filter(Event.id
|
sebastic/NLExtract
|
bag/src/loggui.py
|
Python
|
gpl-3.0
| 2,159
| 0.003705
|
#------------------------------------------------------------------------------
# Naam: libLog.py
# Omschrijving: Generieke functies voor logging binnen BAG Extract+
# Auteur: Matthijs van der De
|
ijl
# Auteur: Just van den Broecke - porting naar NLExtract (2015)
#
# Versie: 1.3
# - foutafhandeling verbeterd
# Datum: 16 december 2009
#
# Versie: 1.2
# Datum: 24 november 2009
#
# Ministerie van Volkshuisvesting, Ruimtelijke Ordening en Milieubeheer
#----------------------------------------------------------------------
|
--------
import wx
# Simple logscherm: tekst in tekstpanel
class LogScherm:
def __init__(self, text_ctrl):
self.text_ctrl = text_ctrl
def __call__(self, tekst):
self.schrijf(tekst)
def start(self):
i = self.text_ctrl.GetNumberOfLines()
self.text_ctrl.Clear()
while i > 0:
self.text_ctrl.AppendText(" \n")
i -= 1
self.text_ctrl.Clear()
def schrijf(self, tekst):
self.text_ctrl.AppendText("\n" + tekst)
self.text_ctrl.Refresh()
self.text_ctrl.Update()
# See http://www.blog.pythonlibrary.org/2010/05/22/wxpython-and-threads/
# (use events when in multithreaded mode)
# Define notification event for thread completion
EVT_SCHRIJF_ID = wx.NewId()
def EVT_SCHRIJF(win, func):
"""Define Result Event."""
win.Connect(-1, -1, EVT_SCHRIJF_ID, func)
class SchrijfEvent(wx.PyEvent):
"""Simple event to carry arbitrary result data."""
def __init__(self, tekst):
"""Init Result Event."""
wx.PyEvent.__init__(self)
self.SetEventType(EVT_SCHRIJF_ID)
self.tekst = tekst
class AsyncLogScherm(LogScherm):
def __init__(self, text_ctrl):
LogScherm.__init__(self, text_ctrl)
# Set up event handler for any worker thread results
EVT_SCHRIJF(self.text_ctrl, self.on_schrijf_event)
def on_schrijf_event(self, evt):
self.schrijf(evt.tekst)
def __call__(self, tekst):
# Ipv direct schrijven stuur "schrijf" event
wx.PostEvent(self.text_ctrl, SchrijfEvent(tekst))
|
ssdxiao/kimchi
|
src/kimchi/vnc.py
|
Python
|
lgpl-2.1
| 1,961
| 0.00051
|
#!/usr/bin/python
#
# Project Kimchi
#
# Copyright IBM, Corp. 2013
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your opti
|
on) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOS
|
E. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import errno
import os
import subprocess
from kimchi.config import config, paths
WS_TOKENS_DIR = '/var/lib/kimchi/vnc-tokens'
def new_ws_proxy():
try:
os.makedirs(WS_TOKENS_DIR, mode=0755)
except OSError as e:
if e.errno == errno.EEXIST:
pass
cert = config.get('server', 'ssl_cert')
key = config.get('server', 'ssl_key')
if not (cert and key):
cert = '%s/kimchi-cert.pem' % paths.conf_dir
key = '%s/kimchi-key.pem' % paths.conf_dir
cmd = os.path.join(os.path.dirname(__file__), 'websockify.py')
args = ['python', cmd, config.get('display', 'display_proxy_port'),
'--target-config', WS_TOKENS_DIR, '--cert', cert, '--key', key,
'--web', os.path.join(paths.ui_dir, 'pages/websockify'),
'--ssl-only']
p = subprocess.Popen(args, close_fds=True)
return p
def add_proxy_token(name, port):
with open(os.path.join(WS_TOKENS_DIR, name), 'w') as f:
f.write('%s: localhost:%s' % (name.encode('utf-8'), port))
def remove_proxy_token(name):
try:
os.unlink(os.path.join(WS_TOKENS_DIR, name))
except OSError:
pass
|
zuntrax/schedule-ng
|
fahrplan/model/schedule.py
|
Python
|
gpl-3.0
| 3,835
| 0.001304
|
import logging
from datetime import date as _date, timedelta
from typing import Dict, List
from fahrplan.exception import FahrplanError
from fahrplan.xml import XmlWriter, XmlSerializable
from .conference import Conference
from .day import Day
from .event import Event
from .room import Room
log = logging.getLogger(__name__)
class Schedule(XmlSerializable):
def __init__(self, conference: Conference, days: Dict[int, Day] = None, version: str = "1.0"):
self.conference = conference
self.conference.schedule = self
if days:
assert len(days) == conference.day_count
self.days = days
else:
# TODO (MO) document automatic day generation
# also this should be refactored into something like generate_days
if conference.day_count and not conference.start:
raise FahrplanError("conference.start is not set, "
"cannot automatically create days.")
self.days = {}
for i in range(conference.day_count):
index = i + 1
date: _date = conference.start + timedelta(i)
self.days[index] = Day(index=index, date=date)
for day in self.days.values():
day.schedule = self
self.version = version
def add_day(self, day: Day):
"""
Add a day to the schedule. Beware this day will not have rooms added before.
:return: None
"""
self.days[day.index] = day
day.schedule = self
self.conference.day_count += 1
def add_room(self, name: str, day_filter: List[int] = None):
"""
Adds
|
a room to the days given in day_filter, or all days.
:param name: Name of the room to be added.
:param day_filter
|
: List of day indices to create the room for. If empty, use all days.
:return: None
"""
for day in self.days.values():
if not day_filter or day.index in day_filter:
day.add_room(Room(name))
def add_event(self, day: int, room: str, event: Event):
self.days[day].add_event(room, event)
def merge(self, other: 'Schedule'):
if self.conference.acronym != other.conference.acronym:
log.warning(f'Conference acronym mismatch: "{self.conference.acronym}" != '
f'"{other.conference.acronym}". Are you sure you are using compatible data?')
for index, day in other.days.items():
if index in self.days:
self.days[index].merge(day)
else:
self.days[index] = day
day.schedule = self
if len(self.days) != self.conference.day_count:
log.warning('Day count mismatch, adjusting.')
return self # needed to be able to chain calls
def has_collision(self, new_event: 'Event'):
for day in self.days.values():
for room in day.rooms.values():
for event in room.events.values():
if event.slug == new_event.slug:
log.error(f'Duplicate slug "{event.slug}"')
return True
if event.id == new_event.id:
log.error(f'Duplicate event id "{event.id}"')
return True
if event.guid == new_event.guid:
log.error(f'Duplicate guid "{event.guid}"')
return True
else:
return False
def append_xml(self, xml: XmlWriter, extended: bool):
with xml.context("schedule"):
xml.tag("version", self.version)
xml.append_object(self.conference, extended)
for day in self.days.values():
xml.append_object(day, extended)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.