text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
from nose.tools import *
from unittest import TestCase
import os
from shutil import rmtree
from tempfile import mkdtemp
from fnmatch import fnmatch
from files.file_matcher_glob import FileMatcherGlob
class FileMatcherGlobTests(TestCase):
def setUp(self):
self.directory = mkdtemp('-caboose-file-matcher-glob-tests')
def tearDown(self):
rmtree(self.directory)
def test_file_matcher_matches_against_glob(self):
self.file_matcher = FileMatcherGlob("*.java")
eq_(True, self.file_matcher.match("hello.java"))
eq_(False, self.file_matcher.match("hello.java2"))
def test_file_matcher_matches_against_unicode_glob(self):
self.file_matcher = FileMatcherGlob(u"*.java")
eq_(True, self.file_matcher.match("hello.java"))
eq_(False, self.file_matcher.match("hello.java2"))
def test_glob_matcher_handles_list_of_globs(self):
self.file_matcher = FileMatcherGlob(["*.one", "*.two"])
eq_(True, self.file_matcher.match("hello.one"))
eq_(True, self.file_matcher.match("hello.two"))
eq_(False, self.file_matcher.match("hello.three"))
|
markdrago/caboose
|
src/test/files/file_matcher_glob_tests.py
|
Python
|
mit
| 1,146
| 0.002618
|
"""Test kytos.core.buffers module."""
import asyncio
from unittest import TestCase
from unittest.mock import MagicMock, patch
from kytos.core.buffers import KytosBuffers, KytosEventBuffer
# pylint: disable=protected-access
class TestKytosEventBuffer(TestCase):
"""KytosEventBuffer tests."""
def setUp(self):
"""Instantiate a KytosEventBuffer."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.kytos_event_buffer = KytosEventBuffer('name', loop=self.loop)
@staticmethod
def create_event_mock(name='any'):
"""Create a new event mock."""
event = MagicMock()
event.name = name
return event
def test_put_get(self):
"""Test put and get methods."""
event = self.create_event_mock()
self.kytos_event_buffer.put(event)
queue_event = self.kytos_event_buffer.get()
self.assertEqual(queue_event, event)
def test_put__shutdown(self):
"""Test put method to shutdown event."""
event = self.create_event_mock('kytos/core.shutdown')
self.kytos_event_buffer.put(event)
self.assertTrue(self.kytos_event_buffer._reject_new_events)
def test_aput(self):
"""Test aput async method."""
event = MagicMock()
event.name = 'kytos/core.shutdown'
self.loop.run_until_complete(self.kytos_event_buffer.aput(event))
self.assertTrue(self.kytos_event_buffer._reject_new_events)
def test_aget(self):
"""Test aget async method."""
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync_q.put(event)
expected = self.loop.run_until_complete(self.kytos_event_buffer.aget())
self.assertEqual(event, expected)
@patch('janus._SyncQueueProxy.task_done')
def test_task_done(self, mock_task_done):
"""Test task_done method."""
self.kytos_event_buffer.task_done()
mock_task_done.assert_called()
@patch('janus._SyncQueueProxy.join')
def test_join(self, mock_join):
"""Test join method."""
self.kytos_event_buffer.join()
mock_join.assert_called()
def test_qsize(self):
"""Test qsize method to empty and with one event in query."""
qsize_1 = self.kytos_event_buffer.qsize()
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync_q.put(event)
qsize_2 = self.kytos_event_buffer.qsize()
self.assertEqual(qsize_1, 0)
self.assertEqual(qsize_2, 1)
def test_empty(self):
"""Test empty method to empty and with one event in query."""
empty_1 = self.kytos_event_buffer.empty()
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync_q.put(event)
empty_2 = self.kytos_event_buffer.empty()
self.assertTrue(empty_1)
self.assertFalse(empty_2)
@patch('janus._SyncQueueProxy.full')
def test_full(self, mock_full):
"""Test full method to full and not full query."""
mock_full.side_effect = [False, True]
full_1 = self.kytos_event_buffer.full()
full_2 = self.kytos_event_buffer.full()
self.assertFalse(full_1)
self.assertTrue(full_2)
class TestKytosBuffers(TestCase):
"""KytosBuffers tests."""
def setUp(self):
"""Instantiate a KytosBuffers."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.kytos_buffers = KytosBuffers(loop=self.loop)
def test_send_stop_signal(self):
"""Test send_stop_signal method."""
self.kytos_buffers.send_stop_signal()
self.assertTrue(self.kytos_buffers.raw._reject_new_events)
self.assertTrue(self.kytos_buffers.msg_in._reject_new_events)
self.assertTrue(self.kytos_buffers.msg_out._reject_new_events)
self.assertTrue(self.kytos_buffers.app._reject_new_events)
|
kytos/kytos
|
tests/unit/test_core/test_buffers.py
|
Python
|
mit
| 3,932
| 0
|
#! /usr/bin/env python3
from .config import *
from .diff import *
from .HtmlFormatter import *
from .AnsiFormatter import *
|
lahwaacz/python-wikeddiff
|
WikEdDiff/__init__.py
|
Python
|
gpl-3.0
| 125
| 0
|
import datetime
import nrkdownload.utils
def test_valid_filename(string=r":blah/bl:ah.ext"):
filename = nrkdownload.utils.valid_filename(string)
assert filename == "blahblah.ext"
def test_parse_duration(string="PT3H12M41.6S"):
# PT28M39S : 28m39s
# PT3H12M41.6S : 3h12m41.6s
duration = nrkdownload.utils.parse_duration(string)
assert duration == datetime.timedelta(hours=3, minutes=12, seconds=41.6)
duration = nrkdownload.utils.parse_duration("")
assert duration == datetime.timedelta()
# duration = nrkdownload.utils.parse_datetime('not_a_duration')
# assert duration == datetime.timedelta()
def test_classmethod():
c = nrkdownload.utils.ClassProperty()
assert c
|
marhoy/nrk-download
|
tests/test_utils.py
|
Python
|
gpl-3.0
| 721
| 0
|
#!/usr/bin/env python
import unittest
from aminer import AMinerParser
class AMinerParserTest(unittest.TestCase):
SINGLE_TEST_FILE = "./aminer_single.txt"
def setUp(self):
self.single_test = open(self.SINGLE_TEST_FILE, "r")
def test_single_parse(self):
p = AMinerParser()
if __name__ == "__main__":
unittest.main()
|
jevinw/rec_utilities
|
babel_util/parsers/aminer_test.py
|
Python
|
agpl-3.0
| 350
| 0.005714
|
# encoding: utf-8
import datetime
import logging
import os
import re
import urllib
import urllib2
from HTMLParser import HTMLParseError
from urlparse import urlparse
from BeautifulSoup import BeautifulSoup, Comment, NavigableString
from django.contrib.contenttypes.models import ContentType
from django.core.files.base import ContentFile
import parse_knesset_bill_pdf
from knesset.utils import send_chat_notification
from laws.models import Bill, Law, GovProposal
from links.models import Link, LinkedFile
from mks.models import Knesset
from simple.constants import PRIVATE_LAWS_URL, KNESSET_LAWS_URL, GOV_LAWS_URL
from simple.government_bills.parse_government_bill_pdf import GovProposalParser
from simple.parsers.utils import laws_parser_utils
from simple.parsers.utils.laws_parser_utils import normalize_correction_title_dashes, clean_line
logger = logging.getLogger("open-knesset.parse_laws")
# don't parse laws from an older knesset
CUTOFF_DATE = datetime.date(2009, 2, 24)
class ParseLaws(object):
"""partially abstract class for parsing laws. contains one function used in few
cases (private and other laws). this function gives the required page
"""
url = None
def get_page_with_param(self, params):
logger.debug('get_page_with_param: self.url=%s, params=%s' % (self.url, params))
if not params:
try:
html_page = urllib2.urlopen(self.url).read().decode('windows-1255').encode('utf-8')
except urllib2.URLError as e:
logger.error("can't open URL: %s" % self.url)
send_chat_notification(__name__, 'failed to open url', {'url': self.url, 'params': params})
return None
try:
soup = BeautifulSoup(html_page)
except HTMLParseError as e:
logger.debug("parsing URL: %s - %s. will try harder." % (self.url, e))
html_page = re.sub("(?s)<!--.*?-->", " ", html_page) # cut anything that looks suspicious
html_page = re.sub("(?s)<script>.*?</script>", " ", html_page)
html_page = re.sub("(?s)<!.*?>", " ", html_page)
try:
soup = BeautifulSoup(html_page)
except HTMLParseError as e:
logger.debug("error parsing URL: %s - %s" % (self.url, e))
send_chat_notification(__name__, 'failed to parse url', {'url': self.url, 'params': None})
return None
comments = soup.findAll(text=lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
return soup
else:
data = urllib.urlencode(params)
try:
url_data = urllib2.urlopen(self.url, data)
except urllib2.URLError:
logger.error("can't open URL: %s" % self.url)
send_chat_notification(__name__, 'failed to open url', {'url': self.url, 'params': data})
return None
html_page = url_data.read().decode('windows-1255').encode('utf-8')
try:
soup = BeautifulSoup(html_page)
except HTMLParseError as e:
logger.debug("error parsing URL: %s - %s" % (self.url, e))
send_chat_notification(__name__, 'failed to parse url', {'url': self.url, 'params': data})
return None
comments = soup.findAll(text=lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
return soup
class ParsePrivateLaws(ParseLaws):
"""a class that parses private laws proposed
"""
# the constructor parses the laws data from the required pages
def __init__(self, days_back):
self.url = PRIVATE_LAWS_URL
self.rtf_url = r"http://www.knesset.gov.il/privatelaw"
self.laws_data = []
self.parse_pages_days_back(days_back)
# parses the required pages data
def parse_pages_days_back(self, days_back):
today = datetime.date.today()
last_required_date = today + datetime.timedelta(days=-days_back)
last_law_checked_date = today
index = None
while last_law_checked_date > last_required_date:
if index:
params = {'RowStart': index}
else:
params = None
soup_current_page = self.get_page_with_param(params)
if not soup_current_page:
return
index = self.get_param(soup_current_page)
self.parse_private_laws_page(soup_current_page)
last_law_checked_date = self.update_last_date()
def get_param(self, soup):
name_tags = soup.findAll(
lambda tag: tag.name == 'a' and tag.has_key('href') and re.match("javascript:SndSelf\((\d+)\);",
tag['href']))
if name_tags and name_tags[0].get('href'):
m = re.match("javascript:SndSelf\((\d+)\);", name_tags[0]['href'])
return m.groups(1)[0]
else:
logger.error('Can not find any more name tags')
return None
def parse_private_laws_page(self, soup):
name_tag = soup.findAll(lambda tag: tag.name == 'tr' and tag.has_key('valign') and tag['valign'] == 'Top')
for tag in name_tag:
tds = tag.findAll(lambda td: td.name == 'td')
law_data = {}
law_data['knesset_id'] = int(tds[0].string.strip())
law_data['law_id'] = int(tds[1].string.strip())
if tds[2].findAll('a')[0].has_key('href'):
law_data['text_link'] = self.rtf_url + r"/" + tds[2].findAll('a')[0]['href']
law_data['law_full_title'] = tds[3].string.strip()
parsed_law_title = laws_parser_utils.parse_title(law_data['law_full_title'])
if not parsed_law_title:
logger.warn("can't parse proposal title: %s" % law_data['law_full_title'])
continue
law_data['law_name'] = clean_line(parsed_law_title.group(1))
comment1 = parsed_law_title.group(3)
comment2 = parsed_law_title.group(5)
if comment2:
law_data['correction'] = clean_line(comment2)
law_data['comment'] = comment1
else:
law_data['comment'] = None
if comment1:
law_data['correction'] = clean_line(comment1)
else:
law_data['correction'] = None
law_data['correction'] = normalize_correction_title_dashes(law_data['correction'])
law_data['law_year'] = parsed_law_title.group(7)
law_data['proposal_date'] = datetime.datetime.strptime(tds[4].string.strip(), '%d/%m/%Y').date()
names_string = ''.join([unicode(y) for y in tds[5].findAll('font')[0].contents])
names_string = clean_line(names_string)
proposers = []
joiners = []
# Old deprecated way to search for joiners
if re.search('ONMOUSEOUT', names_string) > 0:
splitted_names = names_string.split('ONMOUSEOUT')
joiners = [name for name in re.match('(.*?)\',\'', splitted_names[0]).group(1).split('<br />') if
len(name) > 0]
proposers = splitted_names[1][10:].split('<br />')
else:
proposers = names_string.split('<br />')
more_joiners = [name for name in tds[6].findAll(text=lambda text: isinstance(text, NavigableString)) if
name.strip() not in [u'מצטרפים לחוק:', u'אין מצטרפים לחוק']]
if len(more_joiners) and not joiners:
joiners = more_joiners
law_data['proposers'] = proposers
law_data['joiners'] = joiners
self.laws_data.append(law_data)
def update_last_date(self):
return self.laws_data[-1]['proposal_date']
class ParseKnessetLaws(ParseLaws):
"""
A class that parses Knesset Laws (laws after committees)
the constructor parses the laws data from the required pages
"""
def __init__(self, min_booklet):
self.url = KNESSET_LAWS_URL
self.pdf_url = r"http://www.knesset.gov.il"
self.laws_data = []
self.min_booklet = min_booklet
self.parse_pages_booklet()
def parse_pages_booklet(self):
full_page_parsed = True
index = None
while full_page_parsed:
if index:
params = {'First': index[0], 'Start': index[1]}
else:
params = None
soup_current_page = self.get_page_with_param(params)
index = self.get_param(soup_current_page)
full_page_parsed = self.parse_laws_page(soup_current_page)
def get_param(self, soup):
name_tags = soup.findAll(
lambda tag: tag.name == 'a' and tag.has_key('href') and re.match("javascript:SndSelf\((\d+),(\d+)\);",
tag['href']))
if name_tags and name_tags[0] and name_tags[0].get('href'):
m = re.match("javascript:SndSelf\((\d+),(\d+)\);", name_tags[0]['href'])
return m.groups()
else:
if not name_tags:
logger.info('Failed to find name tags')
elif not name_tags[0].get('href'):
logger.error('First name tag missing href %s' % name_tags[0])
return None
def parse_pdf(self, pdf_url):
return parse_knesset_bill_pdf.parse(pdf_url)
def parse_laws_page(self, soup):
name_tags = soup.findAll(lambda tag: tag.name == 'a' and tag.has_key('href') and tag['href'].find(".pdf") >= 0)
for tag in name_tags:
pdf_link = self.pdf_url + tag['href']
booklet = re.search(r"/(\d+)/", tag['href']).groups(1)[0]
if int(booklet) <= self.min_booklet:
return False
pdf_data = self.parse_pdf(pdf_link) or []
for j in range(len(pdf_data)): # sometime there is more than 1 law in a pdf
title = pdf_data[j]['title']
m = re.findall('[^\(\)]*\((.*?)\)[^\(\)]', title)
try:
comment = m[-1].strip().replace('\n', '').replace(' ', ' ')
law = title[:title.find(comment) - 1]
except:
comment = None
law = title.replace(',', '')
try:
correction = m[-2].strip().replace('\n', '').replace(' ', ' ')
law = title[:title.find(correction) - 1]
except:
correction = None
correction = normalize_correction_title_dashes(correction)
law = law.strip().replace('\n', '').replace(' ', ' ')
if law.find("הצעת ".decode("utf8")) == 0:
law = law[5:]
law_data = {'booklet': booklet, 'link': pdf_link, 'law': law, 'correction': correction,
'comment': comment, 'date': pdf_data[j]['date']}
if 'original_ids' in pdf_data[j]:
law_data['original_ids'] = pdf_data[j]['original_ids']
if 'bill' in pdf_data[j]:
law_data['bill'] = pdf_data[j]['bill']
self.laws_data.append(law_data)
return True
def update_booklet(self):
return int(self.laws_data[-1]['booklet'])
class ParseGovLaws(ParseKnessetLaws):
def __init__(self, min_booklet):
self.url = GOV_LAWS_URL
self.pdf_url = r"http://www.knesset.gov.il"
self.laws_data = []
self.min_booklet = min_booklet
def parse_gov_laws(self):
""" entry point to start parsing """
self.parse_pages_booklet()
def parse_pdf(self, pdf_url):
""" Grab a single pdf url, using cache via LinkedFile
"""
existing_count = Link.objects.filter(url=pdf_url).count()
if existing_count >= 1:
if existing_count > 1:
logger.warn("found two objects with the url %s. Taking the first" % pdf_url)
link = Link.objects.filter(url=pdf_url).first()
filename = None
if existing_count > 0:
files = [f for f in link.linkedfile_set.order_by('last_updated') if f.link_file.name != '']
if len(files) > 0:
link_file = files[0]
filename = link_file.link_file.path
logger.debug('trying reusing %s from %s' % (pdf_url, filename))
if not os.path.exists(filename):
# for some reason the file can't be found, we'll just d/l
# it again
filename = None
logger.debug('not reusing because file not found')
if not filename:
logger.debug('getting %s' % pdf_url)
contents = urllib2.urlopen(pdf_url).read()
link_file = LinkedFile()
saved_filename = os.path.basename(urlparse(pdf_url).path)
link_file.link_file.save(saved_filename, ContentFile(contents))
filename = link_file.link_file.path
try:
prop = GovProposalParser(filename)
except Exception:
logger.exception('Gov proposal exception %s'.format(pdf_url))
return None
# TODO: check if parsing handles more than 1 prop in a booklet
x = {'title': prop.get_title(),
'date': prop.get_date(),
# 'bill':prop,
'link_file': link_file}
return [x]
def update_single_bill(self, pdf_link, booklet=None, alt_title=None):
gp = None
if booklet is None:
# get booklet from existing bill
gps = GovProposal.objects.filter(source_url=pdf_link)
if gps.count() < 1:
logger.error('no existing object with given pdf link and no '
'booklet given. pdf_link = %s' % pdf_link)
return
gp = gps[0]
booklet = gp.booklet_number
pdf_data = self.parse_pdf(pdf_link)
if pdf_data is None:
return
for j in range(len(pdf_data)): # sometime there is more than 1 gov
# bill in a pdf
if alt_title: # just use the given title
title = alt_title
else: # get the title from the PDF file itself.
# doesn't work so well
title = pdf_data[j]['title']
m = re.findall('[^\(\)]*\((.*?)\)[^\(\)]', title)
try:
comment = m[-1].strip().replace('\n', '').replace(
' ', ' ')
law = title[:title.find(comment) - 1]
except:
comment = None
law = title.replace(',', '')
try:
correction = m[-2].strip().replace('\n', '').replace(
' ', ' ')
law = title[:title.find(correction) - 1]
except:
correction = None
correction = normalize_correction_title_dashes(correction)
law = law.strip().replace('\n', '').replace(' ', ' ')
if law.find("הצעת ".decode("utf8")) == 0:
law = law[5:]
law_data = {'booklet': booklet, 'link': pdf_link,
'law': law, 'correction': correction,
'comment': comment, 'date': pdf_data[j]['date']}
if 'original_ids' in pdf_data[j]:
law_data['original_ids'] = pdf_data[j]['original_ids']
if 'bill' in pdf_data[j]:
law_data['bill'] = pdf_data[j]['bill']
self.laws_data.append(law_data)
self.create_or_update_single_bill(
data=law_data,
pdf_link=pdf_link,
link_file=pdf_data[j]['link_file'],
gp=gp)
def create_or_update_single_bill(self, data, pdf_link, link_file, gp=None):
"""
data - a dict of data for this gov proposal
pdf_link - the source url from which the bill is taken
link_file - a cached version of the pdf
gp - an existing GovProposal objects. if this is given, it will be
updated, instead of creating a new object
"""
if not (data['date']) or CUTOFF_DATE and data['date'] < CUTOFF_DATE:
return
law_name = data['law']
try:
law, created = Law.objects.get_or_create(title=law_name)
except Law.MultipleObjectsReturned:
created = False
try:
law = Law.objects.filter(title=law_name, merged_into=None).last()
except Law.MultipleObjectsReturned: # How is this possible? probably another bug somewhere
law = Law.objects.filter(title=law_name).last()
if created:
law.save()
if law.merged_into:
law = law.merged_into
title = u''
if data['correction']:
title += data['correction']
if data['comment']:
title += ' ' + data['comment']
if len(title) <= 1:
title = u'חוק חדש'
k_id = Knesset.objects.get_knesset_by_date(data['date']).pk
if gp is None: # create new GovProposal, or look for an identical one
(gp, created) = GovProposal.objects.get_or_create(
booklet_number=data['booklet'],
source_url=data['link'],
title=title,
law=law,
date=data['date'], defaults={'knesset_id': k_id})
if created:
gp.save()
logger.debug("created GovProposal id = %d" % gp.id)
# look for similar bills
bill_params = dict(law=law, title=title, stage='3',
stage_date=data['date'])
similar_bills = Bill.objects.filter(**bill_params).order_by('id')
if len(similar_bills) >= 1:
b = similar_bills[0]
if len(similar_bills) > 1:
logger.debug("multiple bills detected")
for bill in similar_bills:
if bill.id == b.id:
logger.debug("bill being used now: %d" % bill.id)
else:
logger.debug("bill with same fields: %d" % bill.id)
else: # create a bill
b = Bill(**bill_params)
b.save()
logger.debug("created bill %d" % b.id)
# see if the found bill is already linked to a gov proposal
try:
bill_gp_id = b.gov_proposal.id
except GovProposal.DoesNotExist:
bill_gp_id = None
if (bill_gp_id is None) or (gp.id == b.gov_proposal.id):
# b is not linked to gp, or linked to the current gp
gp.bill = b
gp.save()
else:
logger.debug("processing gp %d - matching bill (%d) already has gp"
" (%d)" % (gp.id, b.id, b.gov_proposal.id))
else: # update a given GovProposal
# TODO: move to a classmethod
gp.booklet_number = data['booklet']
gp.knesset_id = k_id
gp.source_url = data['link']
gp.title = title
gp.law = law
gp.date = data['date']
gp.save()
gp.bill.title = title
gp.bill.law = law
gp.bill.save()
b = gp.bill
if (link_file is not None) and (link_file.link is None):
link = Link(title=pdf_link, url=pdf_link,
content_type=ContentType.objects.get_for_model(gp),
object_pk=str(gp.id))
link.save()
link_file.link = link
link_file.save()
logger.debug("check updated %s" % b.get_absolute_url())
def parse_laws_page(self, soup):
# Fall back to regex, because these pages are too broken to get the
# <td> element we need with BS"""
u = unicode(soup)
pairs = []
curr_href = None
for line in u.split('\n'):
# This builds upon always having the pdf in one line and then the actual title, else would cause errors
curr_title = None
if '.pdf' in line:
curr_href = re.search('href="(.*?)"', line).group(1)
if 'LawText1">' in line:
try:
curr_title = re.search('LawText1">(.*?)</', line).group(1)
except AttributeError:
curr_title = re.search('LawText1">(.*?)\r', line).group(1)
pairs.append((curr_title, curr_href))
if not pairs:
return False
for title, href in pairs:
try:
pdf_link = self.pdf_url + href
booklet = re.search(r"/(\d+)/", href).groups(1)[0]
if int(booklet) <= self.min_booklet:
return False
self.update_single_bill(pdf_link, booklet=booklet, alt_title=title)
except TypeError:
logger.exception('law scraping exception pdf_url: %s href %s' % (self.pdf_url, href))
return True
#############
# Main #
#############
if __name__ == '__main__':
m = ParsePrivateLaws(15)
|
OriHoch/Open-Knesset
|
simple/parsers/parse_laws.py
|
Python
|
bsd-3-clause
| 21,686
| 0.003465
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import logging, os.path
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import tornado.gen
class Application(tornado.web.Application):
def __init__(self):
base_dir = os.path.dirname(__file__)
app_settings = {
"debug": True,
'static_path': os.path.join(base_dir, "static"),
}
tornado.web.Application.__init__(self, [
tornado.web.url(r"/", MainHandler, name="main"),
tornado.web.url(r"/live", WebSocketHandler, name="websocket"),
], **app_settings)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
class WebSocketHandler(tornado.websocket.WebSocketHandler):
listenners = []
def check_origin(self, origin):
return True
@tornado.gen.engine
def open(self):
WebSocketHandler.listenners.append(self)
def on_close(self):
if self in WebSocketHandler.listenners:
WebSocketHandler.listenners.remove(self)
@tornado.gen.engine
def on_message(self, wsdata):
for listenner in WebSocketHandler.listenners:
listenner.write_message(wsdata)
@tornado.gen.coroutine
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(8888)
logging.info("application running on http://localhost:8888")
if __name__ == "__main__":
tornado.ioloop.IOLoop.current().run_sync(main)
tornado.ioloop.IOLoop.current().start()
|
mehmetkose/react-websocket
|
example/server.py
|
Python
|
mit
| 1,627
| 0.003688
|
#!/usr/bin/env python
# Calculate a table of pairwise energies and forces between "INT" atoms
# in the lipid membrane model described in
# Brannigan et al, Phys Rev E, 72, 011915 (2005)
# The energy of this interaction U(r) = eps*(0.4*(sigma/r)^12 - 3.0*(sigma/r)^2)
# I realized later this is not what we want because although energy is conserved
# all enrgies are shifted with respect to energies used in the Brannigan paper
# (by 0.27 kCal/mole) and the later Watson JCP 2011 paper (by 0.224 kCal/mole).
# (So don't use this.)
# Calculate and print a
def S(r, rc1, rc2, derivative=False):
"""
Calculate the switching function S(r) which decays continuously
between 1 and 0 in the range from rc1 to rc2 (rc2>rc1):
S(r) = (rc2^2 - r^2)^2 * (rc2^2 + 2*r^2 - 3*rc1^2) / (rc2^2-rc1^2)^3
I'm using the same smoothing/switching cutoff function used by the CHARMM
force-fields. (I'm even using the same code to implement it, taken
from lammps charmm/coul/charmm pair style, rewritten in python.)
"""
assert(rc2>rc1)
rsq = r*r
rc1sq = rc1*rc1
rc2sq = rc2*rc2
denom_lj_inv = (1.0 / ((rc2sq-rc1sq)*
(rc2sq-rc1sq)*
(rc2sq-rc1sq)))
if rsq > rc2sq:
return 0.0
elif rsq < rc1sq:
if derivative:
return 0.0
else:
return 1.0
else:
rc2sq_minus_rsq = (rc2sq - rsq)
rc2sq_minus_rsq_sq = rc2sq_minus_rsq * rc2sq_minus_rsq
if derivative:
return (12.0 * rsq * rc2sq_minus_rsq * (rsq-rc1sq) * denom_lj_inv)
else:
return (rc2sq_minus_rsq_sq *
(rc2sq + 2.0*rsq - 3.0*rc1sq) * denom_lj_inv)
def U(r, eps, sigma):
return eps* (0.4*pow((sigma/r),12) - 3.0*sigma*sigma/(r*r))
def F(r, eps, sigma):
return eps*(12*0.4*pow((sigma/r),13)/sigma - 2*3.0*sigma*sigma/(r*r*r))
epsilon = 2.75/4.184 # kCal/mole
sigma = 7.5
Rmin = 2.6
Rmax = 22.6
Rc1 = 22.0
Rc2 = 22.5
N = 1001
for i in range(0,N):
r = Rmin + i*(Rmax-Rmin)/(N-1)
U_r = U(r, epsilon, sigma)
F_r = F(r, epsilon, sigma)
# Multiply U(r) & F(r) by the smoothing/switch function
U_r = U_r * S(r, Rc1, Rc2)
F_r = U_r * S(r, Rc1, Rc2, True) + F_r * S(r, Rc1, Rc2, False)
print(str(i+1)+' '+str(r)+' '+str(U_r)+' '+str(F_r))
|
jcarlson23/lammps
|
tools/moltemplate/examples/CG_membrane_examples/membrane_BranniganPRE2005/moltemplate_files/version_charmm_cutoff/calc_table.py
|
Python
|
gpl-2.0
| 2,403
| 0.012901
|
# -*- coding: utf-8 -*-
import unittest
import logging
from os.path import isfile
from os import popen
from os import remove
from t2db_objects import objects
from t2db_objects.utilities import formatHash
from t2db_objects.parameters import generate_config_yaml
from clean_text.cleaner import sentenceCleaner
from clean_text.cleaner import tokenCleaner
from clean_text.cleaner import tokenize
from clean_text.cleaner import sentenize
from clean_text.cleaner import cleanSentence
from clean_text.cleaner import Processor
from clean_text.cleaner import cleaner
from clean_text.utilities import load_stopwords
from clean_text.run import param_fields
from clean_text.run import conf_fields
from clean_text import functions
logger = logging.getLogger('clean_text')
""" Count the word in the file given"""
def wordCount(word, file_):
p = popen("cat " + file_ + " | awk -F '\t' '{print $6}' | grep -w " + word + " | wc -l")
# Get result and cast it
pOut = p.read()
p.close()
return int(pOut)
class TestCleanerFunctions(unittest.TestCase):
def setUp(self):
pass
def test_sentenceCleaner(self):
sentence = "this is a @user sample and a http://hi.com sample"
goldenSentence = "this is a sample and a sample"
self.assertEqual(sentenceCleaner(sentence, ["removeUrl", "removeUserMention"]), goldenSentence)
def test_tokenize(self):
sentence = "Hello didn't very happy 1313"
goldenTokens = ["Hello" , "did", "n't", "very", "happy", "1313"]
tokens = tokenize(sentence)
for i in range(0, len(tokens)):
self.assertEqual(tokens[i][0], goldenTokens[i])
def test_sentenize(self):
sentence = "Hello I'm very happy 1313"
goldenSentence = "Hello I 'm very happy 1313"
tokens = tokenize(sentence)
self.assertEqual(sentenize(tokens), goldenSentence)
def test_tokenCleaner(self):
sentence = "Hello I'm very happy 1313"
goldenSentence = "hello"
tokens = tokenize(sentence)
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
newTokens = tokenCleaner(tokens, ["stemming", "toLowerCase", "removePunctuationAndNumbers", "stopwording"])
self.assertEqual(sentenize(newTokens), goldenSentence)
def test_cleanSentence(self):
sentence = ("At 8 o'clock on Thursday morning, the boys and girls didn't feel very good.")
sentenceProcList = ["removeUrl", "removeUserMention"]
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
tokenProcList = ["stemming", "toLowerCase", "removePunctuationAndNumbers", "stopwording", "removeSingleChar", "removeDoubleChar"]
newSentence = cleanSentence(sentence, sentenceProcList, tokenProcList)
goldSentence = "oclock thursday morning boy girl feel good"
self.assertEqual(newSentence, goldSentence)
def test_cleanSentenceUnicode(self):
sentence = u"Según @NWS_PTWC, no hay riesgo generalizado de #tsunami tras el #sismo de Japón http://t.co/icErcNfSCf"
sentenceProcList = ["removeUrl", "removeUserMention"]
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
tokenProcList = ["stemming", "toLowerCase", "removePunctuationAndNumbers", "stopwording", "removeSingleChar", "removeDoubleChar"]
newSentence = cleanSentence(sentence, sentenceProcList, tokenProcList)
goldSentence = u"según hay riesgo generalizado tsunami tras sismo japón"
self.assertEqual(newSentence, goldSentence)
@unittest.skip("demonstrating skipping")
def test_processFile(self):
rawObject = {
"date":"Sun Aug 07 01:28:32 IST 2011",
"id":"100000335933878272",
"user_id":"71610408",
"status":"@baloji you were so awesome, it was amazing and you were shining like the star that you are...MERCI!! #baloji i_i"
}
goldenRawObject = {
"date":"Sun Aug 07 01:28:32 IST 2011",
"id":"100000335933878272",
"user_id":"71610408",
"status":"@baloji you were so awesome, it was amazing and you were shining like the star that you are...MERCI!! #baloji i_i",
"status_clean":"awesome amaze shin star merci baloji"
}
rawObjects = [rawObject]
text_field = 'status'
new_text_field = 'status_clean'
sentence_proc_list = {'removeUrl', 'removeUserMention'}
token_proc_list = {'stemming', 'toLowerCase', 'removePunctuationAndNumbers',
'stopwording', 'removeSingleChar', 'removeDoubleChar'}
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
proc = Processor(text_field, new_text_field, sentence_proc_list, token_proc_list)
newRawObject = proc.processFile(rawObjects)
self.assertEqual(rawObject, goldenRawObject)
@unittest.skip("demonstrating skipping")
def test_processFileUnicode(self):
rawObject = {
"date":u"Sun Aug 07 01:28:32 IST 2011",
"id":u"100000335933878272",
"user_id":u"71610408",
"status":u"Según @NWS_PTWC, no hay riesgo generalizado de #tsunami tras el #sismo de Japón http://t.co/icErcNfSCf",
}
goldenRawObject = {
"date":u"Sun Aug 07 01:28:32 IST 2011",
"id":u"100000335933878272",
"user_id":u"71610408",
"status":u"Según @NWS_PTWC, no hay riesgo generalizado de #tsunami tras el #sismo de Japón http://t.co/icErcNfSCf",
"status_clean":u"Según hay riesgo generalizado tsunami tras sismo Japón"
}
rawObjects = [rawObject]
text_field = 'status'
new_text_field = 'status_clean'
sentence_proc_list = {'removeUrl', 'removeUserMention'}
token_proc_list = {'stemming', 'toLowerCase', 'removePunctuationAndNumbers',
'stopwording', 'removeSingleChar', 'removeDoubleChar'}
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
proc = Processor(text_field, new_text_field, sentence_proc_list, token_proc_list)
newRawObject = proc.processFile(rawObjects)
self.assertEqual(rawObject, goldenRawObject)
@unittest.skip("demonstrating skipping")
def test_notValidProcessFile(self):
rawObject = {
"date":"Sun Aug 07 01:28:32 IST 2011",
"id":"100000335933878272",
"user_id":"71610408",
"status":"@baloji you were so awesome, it was amazing and you were shining like the star that you are...MERCI!! #baloji i_i"
}
rawObjects = [rawObject]
text_field = 'otherfield'
new_text_field = 'status_clean'
sentence_proc_list = {'removeUrl', 'removeUserMention'}
token_proc_list = {'stemming', 'toLowerCase', 'removePunctuationAndNumbers',
'stopwording', 'removeSingleChar', 'removeDoubleChar'}
functions.stopwords = load_stopwords('etc/stopwords_en.txt')
proc = Processor(text_field, new_text_field, sentence_proc_list, token_proc_list)
proc = Processor(config)
self.assertRaises(Exception, proc.processFile, rawObjects)
#@unittest.skip("avoid big files")
def test_cleaner(self):
rawParams = {
'input_file':'etc/example.tsv',
'output_file':'output.tmp',
'config_file':'etc/config.yaml',
}
params = objects.Configuration(param_fields, rawParams)
config = generate_config_yaml(conf_fields, params.config_file)
if isfile(params.output_file):
remove(params.output_file)
cleaner(params, config)
self.assertTrue(isfile(params.output_file))
self.assertEqual(wordCount(" to ", params.output_file), 0)
self.assertEqual(wordCount(" photo ", params.output_file), 0)
|
ptorrestr/clean_text
|
clean_text/tests/test_cleaner.py
|
Python
|
gpl-2.0
| 7,295
| 0.010981
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""
code for workers executing jobs for the manager
"""
__version__ = "$Rev: 6125 $"
|
flyapen/UgFlu
|
flumotion/worker/__init__.py
|
Python
|
gpl-2.0
| 962
| 0
|
import graphene
from .account.schema import AccountMutations, AccountQueries
from .checkout.schema import CheckoutMutations, CheckoutQueries
from .core.schema import CoreMutations
from .discount.schema import DiscountMutations, DiscountQueries
from .menu.schema import MenuMutations, MenuQueries
from .order.schema import OrderMutations, OrderQueries
from .page.schema import PageMutations, PageQueries
from .payment.schema import PaymentMutations, PaymentQueries
from .product.schema import ProductMutations, ProductQueries
from .shipping.schema import ShippingMutations, ShippingQueries
from .shop.schema import ShopMutations, ShopQueries
from .translations.schema import TranslationQueries
class Query(AccountQueries, CheckoutQueries, DiscountQueries, MenuQueries,
OrderQueries, PageQueries, PaymentQueries, ProductQueries,
ShippingQueries, ShopQueries, TranslationQueries):
node = graphene.Node.Field()
class Mutations(AccountMutations, CheckoutMutations, CoreMutations,
DiscountMutations, MenuMutations, OrderMutations,
PageMutations, PaymentMutations, ProductMutations,
ShippingMutations, ShopMutations):
pass
schema = graphene.Schema(Query, Mutations)
|
UITools/saleor
|
saleor/graphql/api.py
|
Python
|
bsd-3-clause
| 1,246
| 0
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import paddle.fluid as fluid
import paddle
import sys
import numpy
import unittest
import math
import sys
import os
BATCH_SIZE = 64
def inference_program():
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
hidden = fluid.layers.fc(input=img, size=200, act='tanh')
hidden = fluid.layers.fc(input=hidden, size=200, act='tanh')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
return prediction
def train_program():
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
predict = inference_program()
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
acc = fluid.layers.accuracy(input=predict, label=label)
return [avg_cost, acc]
def optimizer_func():
return fluid.optimizer.Adam(learning_rate=0.001)
def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
def event_handler(event):
if isinstance(event, fluid.EndEpochEvent):
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
print("avg_cost: %s" % avg_cost)
print("acc : %s" % acc)
if acc > 0.2: # Smaller value to increase CI speed
trainer.save_params(params_dirname)
else:
print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
trainer.train(
num_epochs=1,
event_handler=event_handler,
reader=train_reader,
feed_order=['img', 'label'])
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 1
tensor_img = numpy.random.uniform(-1.0, 1.0,
[batch_size, 1, 28, 28]).astype("float32")
results = inferencer.infer({'img': tensor_img})
print("infer results: ", results[0])
def main(use_cuda):
params_dirname = "recognize_digits_mlp.inference.model"
# call train() with is_local argument to run distributed train
train(
use_cuda=use_cuda,
train_program=train_program,
params_dirname=params_dirname)
infer(
use_cuda=use_cuda,
inference_program=inference_program,
params_dirname=params_dirname)
if __name__ == '__main__':
# for use_cuda in (False, True):
main(use_cuda=False)
|
Canpio/Paddle
|
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py
|
Python
|
apache-2.0
| 3,760
| 0.000266
|
#coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_O_IBK_WSYH_ECUSRLOGINTYPE').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
O_CI_WSYH_ECUSRLOGINTYPE = sqlContext.read.parquet(hdfs+'/O_CI_WSYH_ECUSRLOGINTYPE/*')
O_CI_WSYH_ECUSRLOGINTYPE.registerTempTable("O_CI_WSYH_ECUSRLOGINTYPE")
#任务[12] 001-01::
V_STEP = V_STEP + 1
#先删除原表所有数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE/*.parquet")
#从昨天备表复制一份全量过来
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE_BK/"+V_DT_LD+".parquet /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE/"+V_DT+".parquet")
F_CI_WSYH_ECUSRLOGINTYPE = sqlContext.read.parquet(hdfs+'/F_CI_WSYH_ECUSRLOGINTYPE/*')
F_CI_WSYH_ECUSRLOGINTYPE.registerTempTable("F_CI_WSYH_ECUSRLOGINTYPE")
sql = """
SELECT A.USERSEQ AS USERSEQ
,A.MCHANNELID AS MCHANNELID
,A.LOGINTYPE AS LOGINTYPE
,A.USERID AS USERID
,A.PASSWORD AS PASSWORD
,A.LOGINTYPESTATE AS LOGINTYPESTATE
,A.UPDATEPASSWORDDATE AS UPDATEPASSWORDDATE
,A.WRONGPASSCOUNT AS WRONGPASSCOUNT
,A.UNLOCKDATE AS UNLOCKDATE
,A.FIRSTLOGINTIME AS FIRSTLOGINTIME
,A.LASTLOGINTIME AS LASTLOGINTIME
,A.LASTLOGINADDR AS LASTLOGINADDR
,A.CREATEUSERSEQ AS CREATEUSERSEQ
,A.CREATEDEPTSEQ AS CREATEDEPTSEQ
,A.CREATETIME AS CREATETIME
,A.UPDATEUSERSEQ AS UPDATEUSERSEQ
,A.UPDATEDEPTSEQ AS UPDATEDEPTSEQ
,A.UPDATETIME AS UPDATETIME
,A.FR_ID AS FR_ID
,V_DT AS ODS_ST_DATE
,'IBK' AS ODS_SYS_ID
FROM O_CI_WSYH_ECUSRLOGINTYPE A --电子银行用户认证信息表
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1 = sqlContext.sql(sql)
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1.registerTempTable("F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1")
#F_CI_WSYH_ECUSRLOGINTYPE = sqlContext.read.parquet(hdfs+'/F_CI_WSYH_ECUSRLOGINTYPE/*')
#F_CI_WSYH_ECUSRLOGINTYPE.registerTempTable("F_CI_WSYH_ECUSRLOGINTYPE")
sql = """
SELECT DST.USERSEQ --用户顺序号:src.USERSEQ
,DST.MCHANNELID --模块渠道代号:src.MCHANNELID
,DST.LOGINTYPE --登录类型:src.LOGINTYPE
,DST.USERID --用户登录号:src.USERID
,DST.PASSWORD --用户登录密码:src.PASSWORD
,DST.LOGINTYPESTATE --开通状态:src.LOGINTYPESTATE
,DST.UPDATEPASSWORDDATE --最近密码修改时间:src.UPDATEPASSWORDDATE
,DST.WRONGPASSCOUNT --密码错误次数:src.WRONGPASSCOUNT
,DST.UNLOCKDATE --最后一次解锁日期时间:src.UNLOCKDATE
,DST.FIRSTLOGINTIME --首次登录时间:src.FIRSTLOGINTIME
,DST.LASTLOGINTIME --最后登录时间:src.LASTLOGINTIME
,DST.LASTLOGINADDR --最后一次登录地址:src.LASTLOGINADDR
,DST.CREATEUSERSEQ --创建用户顺序号:src.CREATEUSERSEQ
,DST.CREATEDEPTSEQ --创建机构顺序号:src.CREATEDEPTSEQ
,DST.CREATETIME --创建时间:src.CREATETIME
,DST.UPDATEUSERSEQ --更新用户顺序号:src.UPDATEUSERSEQ
,DST.UPDATEDEPTSEQ --更新机构顺序号:src.UPDATEDEPTSEQ
,DST.UPDATETIME --更新时间:src.UPDATETIME
,DST.FR_ID --法人号:src.FR_ID
,DST.ODS_ST_DATE --系统日期:src.ODS_ST_DATE
,DST.ODS_SYS_ID --系统标志:src.ODS_SYS_ID
FROM F_CI_WSYH_ECUSRLOGINTYPE DST
LEFT JOIN F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1 SRC
ON SRC.USERSEQ = DST.USERSEQ
AND SRC.MCHANNELID = DST.MCHANNELID
AND SRC.LOGINTYPE = DST.LOGINTYPE
WHERE SRC.USERSEQ IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2 = sqlContext.sql(sql)
dfn="F_CI_WSYH_ECUSRLOGINTYPE/"+V_DT+".parquet"
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2=F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2.unionAll(F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1)
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1.cache()
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2.cache()
nrowsi = F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1.count()
nrowsa = F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2.count()
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP1.unpersist()
F_CI_WSYH_ECUSRLOGINTYPE_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_WSYH_ECUSRLOGINTYPE lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
ret = os.system("hdfs dfs -mv /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE/"+V_DT_LD+".parquet /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE_BK/")
#先删除备表当天数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE_BK/"+V_DT+".parquet")
#从当天原表复制一份全量到备表
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE/"+V_DT+".parquet /"+dbname+"/F_CI_WSYH_ECUSRLOGINTYPE_BK/"+V_DT+".parquet")
|
cysuncn/python
|
spark/crm/PROC_O_IBK_WSYH_ECUSRLOGINTYPE.py
|
Python
|
gpl-3.0
| 6,837
| 0.013234
|
import concurrent.futures
from itertools import islice
import xbmc
import threading
Executor = concurrent.futures.ThreadPoolExecutor
def execute(f, iterable, stop_flag=None, workers=10, timeout=30):
with Executor(max_workers=workers) as executor:
threading.Timer(timeout, stop_flag.set)
for future in _batched_pool_runner(executor, workers, f,
iterable, timeout):
if xbmc.abortRequested:
break
if stop_flag and stop_flag.isSet():
break
yield future.result()
def _batched_pool_runner(pool, batch_size, f, iterable, timeout):
futures = [pool.submit(f, x) for x in iterable]
try:
for item in concurrent.futures.as_completed(futures, timeout):
yield item
except:
pass
|
repotvsupertuga/tvsupertuga.repository
|
script.module.universalscrapers/lib/universalscrapers/executor.py
|
Python
|
gpl-2.0
| 842
| 0.001188
|
from src.settings import Colors
def league_color(league: str) -> Colors:
if league in [
]:
return Colors.GREEN
if league in [
'1 CFL (Montenegro)',
'A Lyga (Lithuania)',
'Bikar (Iceland)',
'Coupe de la Ligue (France)',
'EURO Qualifiers (Europe)',
'FA Cup (England)',
'J-League (Japan)',
'J-League 2 (Japan)',
'K-League (South Korea)',
'Landspokal (Denmark)',
'League Cup (Scotland)',
'Meistriliiga (Estonia)',
'OFB Cup (Austria)',
'Pohar CMFS (Czech Republic)',
'Premier League (Wales)',
'Primera Division (Chile)',
'Proximus League (Belgium)',
'Serie A (Italy)',
'S-League (Singapore)',
'Slovensky Pohar (Slovakia)',
'Svenska Cupen (Sweden)',
'Swiss Cup (Switzerland)',
'Virsliga (Latvia)',
'Vyscha Liga (Ukraine)',
'Úrvalsdeild (Iceland)',
]:
return Colors.RED
if league in [
]:
return Colors.YELLOW
return Colors.EMPTY
|
vapkarian/soccer-analyzer
|
src/colors/v20/default.py
|
Python
|
mit
| 1,079
| 0
|
# Third-party
import astropy.units as u
import numpy as np
# Project
from ....dynamics import PhaseSpacePosition, Orbit
from ....units import galactic
PSP = PhaseSpacePosition
ORB = Orbit
class _TestBase(object):
use_half_ndim = False
E_unit = u.erg/u.kg
@classmethod
def setup_class(cls):
np.random.seed(42)
ndim = 6
r_ndim = ndim # return ndim
if cls.use_half_ndim:
r_ndim = r_ndim // 2
norbits = 16
ntimes = 8
# some position or phase-space position arrays we will test methods on:
cls.w0s = []
cls.energy_return_shapes = []
cls.gradient_return_shapes = []
cls.hessian_return_shapes = []
# 1D - phase-space position
cls.w0s.append(PSP(pos=np.random.random(size=ndim//2),
vel=np.random.random(size=ndim//2)))
cls.w0s.append(PSP(pos=np.random.random(size=ndim//2)*u.kpc,
vel=np.random.random(size=ndim//2)*u.km/u.s))
cls.energy_return_shapes += [(1,)]*2
cls.gradient_return_shapes += [(r_ndim, 1)]*2
cls.hessian_return_shapes += [(r_ndim, r_ndim, 1)]*2
# 2D - phase-space position
cls.w0s.append(PSP(pos=np.random.random(size=(ndim//2, norbits)),
vel=np.random.random(size=(ndim//2, norbits))))
cls.w0s.append(PSP(pos=np.random.random(size=(ndim//2, norbits))*u.kpc,
vel=np.random.random(size=(ndim//2, norbits))*u.km/u.s))
cls.energy_return_shapes += [(norbits,)]*2
cls.gradient_return_shapes += [(r_ndim, norbits)]*2
cls.hessian_return_shapes += [(r_ndim, r_ndim, norbits)]*2
# 3D - phase-space position
cls.w0s.append(PSP(pos=np.random.random(size=(ndim//2, norbits, ntimes)),
vel=np.random.random(size=(ndim//2, norbits, ntimes))))
cls.w0s.append(PSP(pos=np.random.random(size=(ndim//2, norbits, ntimes))*u.kpc,
vel=np.random.random(size=(ndim//2, norbits, ntimes))*u.km/u.s))
cls.energy_return_shapes += [(norbits, ntimes)]*2
cls.gradient_return_shapes += [(r_ndim, norbits, ntimes)]*2
cls.hessian_return_shapes += [(r_ndim, r_ndim, norbits, ntimes)]*2
# 2D - orbit
cls.w0s.append(ORB(pos=np.random.random(size=(ndim//2, ntimes)),
vel=np.random.random(size=(ndim//2, ntimes))))
cls.w0s.append(ORB(pos=np.random.random(size=(ndim//2, ntimes))*u.kpc,
vel=np.random.random(size=(ndim//2, ntimes))*u.km/u.s))
cls.energy_return_shapes += [(ntimes,)]*2
cls.gradient_return_shapes += [(r_ndim, ntimes,)]*2
cls.hessian_return_shapes += [(r_ndim, r_ndim, ntimes,)]*2
# 3D - orbit
cls.w0s.append(ORB(pos=np.random.random(size=(ndim//2, ntimes, norbits)),
vel=np.random.random(size=(ndim//2, ntimes, norbits))))
cls.w0s.append(ORB(pos=np.random.random(size=(ndim//2, ntimes, norbits))*u.kpc,
vel=np.random.random(size=(ndim//2, ntimes, norbits))*u.km/u.s))
cls.energy_return_shapes += [(ntimes, norbits)]*2
cls.gradient_return_shapes += [(r_ndim, ntimes, norbits)]*2
cls.hessian_return_shapes += [(r_ndim, r_ndim, ntimes, norbits)]*2
_obj_w0s = cls.w0s[:]
for w0, eshp, gshp, hshp in zip(_obj_w0s,
cls.energy_return_shapes,
cls.gradient_return_shapes,
cls.hessian_return_shapes):
cls.w0s.append(w0.w(galactic))
cls.energy_return_shapes.append(eshp)
cls.gradient_return_shapes.append(gshp)
cls.hessian_return_shapes.append(hshp)
def test_energy(self):
for arr, shp in zip(self.w0s, self.energy_return_shapes):
if self.E_unit.is_equivalent(u.one) and hasattr(arr, 'pos') and \
not arr.xyz.unit.is_equivalent(u.one):
continue
v = self.obj.energy(arr)
assert v.shape == shp
assert v.unit.is_equivalent(self.E_unit)
t = np.zeros(np.array(arr).shape[1:]) + 0.1
self.obj.energy(arr, t=0.1)
self.obj.energy(arr, t=t)
self.obj.energy(arr, t=0.1*self.obj.units['time'])
def test_gradient(self):
for arr, shp in zip(self.w0s, self.gradient_return_shapes):
if self.E_unit.is_equivalent(u.one) and hasattr(arr, 'pos') and \
not arr.xyz.unit.is_equivalent(u.one):
continue
v = self.obj.gradient(arr)
assert v.shape == shp
# TODO: check return units
t = np.zeros(np.array(arr).shape[1:]) + 0.1
self.obj.gradient(arr, t=0.1)
self.obj.gradient(arr, t=t)
self.obj.gradient(arr, t=0.1*self.obj.units['time'])
def test_hessian(self):
for arr, shp in zip(self.w0s, self.hessian_return_shapes):
if self.E_unit.is_equivalent(u.one) and hasattr(arr, 'pos') and \
not arr.xyz.unit.is_equivalent(u.one):
continue
g = self.obj.hessian(arr)
assert g.shape == shp
# TODO: check return units
|
adrn/gala
|
gala/potential/hamiltonian/tests/helpers.py
|
Python
|
mit
| 5,372
| 0.001862
|
import os
def get_html_theme_path():
theme_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return theme_dir
|
iktakahiro/sphinx_theme_pd
|
sphinx_theme_pd/__init__.py
|
Python
|
mit
| 136
| 0
|
#!/usr/bin/env python
# encoding: utf-8
"""
Waf tool for ChibiOS build
"""
from waflib import Errors, Logs, Task, Utils
from waflib.TaskGen import after_method, before_method, feature
import os
import shutil
import sys
import re
import pickle
_dynamic_env_data = {}
def _load_dynamic_env_data(bld):
bldnode = bld.bldnode.make_node('modules/ChibiOS')
tmp_str = bldnode.find_node('include_dirs').read()
tmp_str = tmp_str.replace(';\n','')
tmp_str = tmp_str.replace('-I','') #remove existing -I flags
# split, coping with separator
idirs = re.split('; ', tmp_str)
# create unique list, coping with relative paths
idirs2 = []
for d in idirs:
if d.startswith('../'):
# relative paths from the make build are relative to BUILDROOT
d = os.path.join(bld.env.BUILDROOT, d)
d = os.path.normpath(d)
if not d in idirs2:
idirs2.append(d)
_dynamic_env_data['include_dirs'] = idirs2
@feature('ch_ap_library', 'ch_ap_program')
@before_method('process_source')
def ch_dynamic_env(self):
# The generated files from configuration possibly don't exist if it's just
# a list command (TODO: figure out a better way to address that).
if self.bld.cmd == 'list':
return
if not _dynamic_env_data:
_load_dynamic_env_data(self.bld)
self.use += ' ch'
self.env.append_value('INCLUDES', _dynamic_env_data['include_dirs'])
class upload_fw(Task.Task):
color='BLUE'
always_run = True
def run(self):
upload_tools = self.env.get_flat('UPLOAD_TOOLS')
src = self.inputs[0]
return self.exec_command("python '{}/px_uploader.py' '{}'".format(upload_tools, src))
def exec_command(self, cmd, **kw):
kw['stdout'] = sys.stdout
return super(upload_fw, self).exec_command(cmd, **kw)
def keyword(self):
return "Uploading"
class set_default_parameters(Task.Task):
color='CYAN'
always_run = True
def keyword(self):
return "apj_tool"
def run(self):
rel_default_parameters = self.env.get_flat('DEFAULT_PARAMETERS')
abs_default_parameters = os.path.join(self.env.SRCROOT, rel_default_parameters)
apj_tool = self.env.APJ_TOOL
sys.path.append(os.path.dirname(apj_tool))
from apj_tool import embedded_defaults
defaults = embedded_defaults(self.inputs[0].abspath())
if not defaults.find():
print("Error: Param defaults support not found in firmware")
sys.exit(1)
defaults.set_file(abs_default_parameters)
defaults.save()
class generate_bin(Task.Task):
color='CYAN'
run_str="${OBJCOPY} -O binary ${SRC} ${TGT}"
always_run = True
def keyword(self):
return "Generating"
def __str__(self):
return self.outputs[0].path_from(self.generator.bld.bldnode)
class generate_apj(Task.Task):
'''generate an apj firmware file'''
color='CYAN'
always_run = True
def keyword(self):
return "apj_gen"
def run(self):
import json, time, base64, zlib
img = open(self.inputs[0].abspath(),'rb').read()
d = {
"board_id": int(self.env.APJ_BOARD_ID),
"magic": "APJFWv1",
"description": "Firmware for a %s board" % self.env.APJ_BOARD_TYPE,
"image": base64.b64encode(zlib.compress(img,9)).decode('utf-8'),
"build_time": int(time.time()),
"summary": self.env.BOARD,
"version": "0.1",
"image_size": len(img),
"git_identity": self.generator.bld.git_head_hash(short=True),
"board_revision": 0
}
apj_file = self.outputs[0].abspath()
f = open(apj_file, "w")
f.write(json.dumps(d, indent=4))
f.close()
class build_abin(Task.Task):
'''build an abin file for skyviper firmware upload via web UI'''
color='CYAN'
run_str='${TOOLS_SCRIPTS}/make_abin.sh ${SRC}.bin ${SRC}.abin'
always_run = True
def keyword(self):
return "Generating"
def __str__(self):
return self.outputs[0].path_from(self.generator.bld.bldnode)
class build_intel_hex(Task.Task):
'''build an intel hex file for upload with DFU'''
color='CYAN'
run_str='${TOOLS_SCRIPTS}/make_intel_hex.py ${SRC} ${FLASH_RESERVE_START_KB}'
always_run = True
def keyword(self):
return "Generating"
def __str__(self):
return self.outputs[0].path_from(self.generator.bld.bldnode)
@feature('ch_ap_program')
@after_method('process_source')
def chibios_firmware(self):
self.link_task.always_run = True
link_output = self.link_task.outputs[0]
bin_target = self.bld.bldnode.find_or_declare('bin/' + link_output.change_ext('.bin').name)
apj_target = self.bld.bldnode.find_or_declare('bin/' + link_output.change_ext('.apj').name)
generate_bin_task = self.create_task('generate_bin', src=link_output, tgt=bin_target)
generate_bin_task.set_run_after(self.link_task)
generate_apj_task = self.create_task('generate_apj', src=bin_target, tgt=apj_target)
generate_apj_task.set_run_after(generate_bin_task)
if self.env.BUILD_ABIN:
abin_target = self.bld.bldnode.find_or_declare('bin/' + link_output.change_ext('.abin').name)
abin_task = self.create_task('build_abin', src=link_output, tgt=abin_target)
abin_task.set_run_after(generate_apj_task)
bootloader_bin = self.bld.srcnode.make_node("Tools/bootloaders/%s_bl.bin" % self.env.BOARD)
if os.path.exists(bootloader_bin.abspath()) and self.bld.env.HAVE_INTEL_HEX:
hex_target = self.bld.bldnode.find_or_declare('bin/' + link_output.change_ext('.hex').name)
hex_task = self.create_task('build_intel_hex', src=[bin_target, bootloader_bin], tgt=hex_target)
hex_task.set_run_after(generate_bin_task)
if self.env.DEFAULT_PARAMETERS:
default_params_task = self.create_task('set_default_parameters',
src=link_output)
default_params_task.set_run_after(self.link_task)
generate_bin_task.set_run_after(default_params_task)
if self.bld.options.upload:
_upload_task = self.create_task('upload_fw', src=apj_target)
_upload_task.set_run_after(generate_apj_task)
def setup_can_build(cfg):
'''enable CAN build. By doing this here we can auto-enable CAN in
the build based on the presence of CAN pins in hwdef.dat'''
env = cfg.env
env.AP_LIBRARIES += [
'AP_UAVCAN',
'modules/uavcan/libuavcan/src/**/*.cpp',
'modules/uavcan/libuavcan_drivers/stm32/driver/src/*.cpp'
]
env.CFLAGS += ['-DUAVCAN_STM32_CHIBIOS=1',
'-DUAVCAN_STM32_NUM_IFACES=2']
env.CXXFLAGS += [
'-Wno-error=cast-align',
'-DUAVCAN_STM32_CHIBIOS=1',
'-DUAVCAN_STM32_NUM_IFACES=2'
]
env.DEFINES += [
'UAVCAN_CPP_VERSION=UAVCAN_CPP03',
'UAVCAN_NO_ASSERTIONS=1',
'UAVCAN_NULLPTR=nullptr'
]
env.INCLUDES += [
cfg.srcnode.find_dir('modules/uavcan/libuavcan/include').abspath(),
cfg.srcnode.find_dir('modules/uavcan/libuavcan_drivers/stm32/driver/include').abspath()
]
cfg.get_board().with_uavcan = True
def load_env_vars(env):
'''optionally load extra environment variables from env.py in the build directory'''
print("Checking for env.py")
env_py = os.path.join(env.BUILDROOT, 'env.py')
if not os.path.exists(env_py):
print("No env.py found")
return
e = pickle.load(open(env_py, 'rb'))
for k in e.keys():
v = e[k]
if k == 'ROMFS_FILES':
env.ROMFS_FILES += v
continue
if k in env:
if isinstance(env[k], dict):
a = v.split('=')
env[k][a[0]] = '='.join(a[1:])
print("env updated %s=%s" % (k, v))
elif isinstance(env[k], list):
env[k].append(v)
print("env appended %s=%s" % (k, v))
else:
env[k] = v
print("env added %s=%s" % (k, v))
else:
env[k] = v
print("env set %s=%s" % (k, v))
def configure(cfg):
cfg.find_program('make', var='MAKE')
#cfg.objcopy = cfg.find_program('%s-%s'%(cfg.env.TOOLCHAIN,'objcopy'), var='OBJCOPY', mandatory=True)
cfg.find_program('arm-none-eabi-objcopy', var='OBJCOPY')
env = cfg.env
bldnode = cfg.bldnode.make_node(cfg.variant)
def srcpath(path):
return cfg.srcnode.make_node(path).abspath()
def bldpath(path):
return bldnode.make_node(path).abspath()
env.AP_PROGRAM_FEATURES += ['ch_ap_program']
kw = env.AP_LIBRARIES_OBJECTS_KW
kw['features'] = Utils.to_list(kw.get('features', [])) + ['ch_ap_library']
env.CH_ROOT = srcpath('modules/ChibiOS')
env.AP_HAL_ROOT = srcpath('libraries/AP_HAL_ChibiOS')
env.BUILDDIR = bldpath('modules/ChibiOS')
env.BUILDROOT = bldpath('')
env.SRCROOT = srcpath('')
env.PT_DIR = srcpath('Tools/ardupilotwaf/chibios/image')
env.UPLOAD_TOOLS = srcpath('Tools/ardupilotwaf')
env.CHIBIOS_SCRIPTS = srcpath('libraries/AP_HAL_ChibiOS/hwdef/scripts')
env.TOOLS_SCRIPTS = srcpath('Tools/scripts')
env.APJ_TOOL = srcpath('Tools/scripts/apj_tool.py')
env.SERIAL_PORT = srcpath('/dev/serial/by-id/*_STLink*')
# relative paths to pass to make, relative to directory that make is run from
env.CH_ROOT_REL = os.path.relpath(env.CH_ROOT, env.BUILDROOT)
env.AP_HAL_REL = os.path.relpath(env.AP_HAL_ROOT, env.BUILDROOT)
env.BUILDDIR_REL = os.path.relpath(env.BUILDDIR, env.BUILDROOT)
mk_custom = srcpath('libraries/AP_HAL_ChibiOS/hwdef/%s/chibios_board.mk' % env.BOARD)
mk_common = srcpath('libraries/AP_HAL_ChibiOS/hwdef/common/chibios_board.mk')
# see if there is a board specific make file
if os.path.exists(mk_custom):
env.BOARD_MK = mk_custom
else:
env.BOARD_MK = mk_common
if cfg.options.default_parameters:
cfg.msg('Default parameters', cfg.options.default_parameters, color='YELLOW')
env.DEFAULT_PARAMETERS = srcpath(cfg.options.default_parameters)
# we need to run chibios_hwdef.py at configure stage to generate the ldscript.ld
# that is needed by the remaining configure checks
import subprocess
if env.BOOTLOADER:
env.HWDEF = srcpath('libraries/AP_HAL_ChibiOS/hwdef/%s/hwdef-bl.dat' % env.BOARD)
env.BOOTLOADER_OPTION="--bootloader"
else:
env.HWDEF = srcpath('libraries/AP_HAL_ChibiOS/hwdef/%s/hwdef.dat' % env.BOARD)
env.BOOTLOADER_OPTION=""
hwdef_script = srcpath('libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py')
hwdef_out = env.BUILDROOT
if not os.path.exists(hwdef_out):
os.mkdir(hwdef_out)
try:
cmd = "python '{0}' -D '{1}' '{2}' {3}".format(hwdef_script, hwdef_out, env.HWDEF, env.BOOTLOADER_OPTION)
ret = subprocess.call(cmd, shell=True)
except Exception:
cfg.fatal("Failed to process hwdef.dat")
if ret != 0:
cfg.fatal("Failed to process hwdef.dat ret=%d" % ret)
load_env_vars(cfg.env)
if env.HAL_WITH_UAVCAN:
setup_can_build(cfg)
def pre_build(bld):
'''pre-build hook to change dynamic sources'''
load_env_vars(bld.env)
if bld.env.HAL_WITH_UAVCAN:
bld.get_board().with_uavcan = True
def build(bld):
bld(
# build hwdef.h from hwdef.dat. This is needed after a waf clean
source=bld.path.ant_glob(bld.env.HWDEF),
rule="python '${AP_HAL_ROOT}/hwdef/scripts/chibios_hwdef.py' -D '${BUILDROOT}' '%s' %s" % (bld.env.HWDEF, bld.env.BOOTLOADER_OPTION),
group='dynamic_sources',
target=[bld.bldnode.find_or_declare('hwdef.h'),
bld.bldnode.find_or_declare('ldscript.ld')]
)
bld(
# create the file modules/ChibiOS/include_dirs
rule="touch Makefile && BUILDDIR=${BUILDDIR_REL} CHIBIOS=${CH_ROOT_REL} AP_HAL=${AP_HAL_REL} ${CHIBIOS_BUILD_FLAGS} ${CHIBIOS_BOARD_NAME} ${MAKE} pass -f '${BOARD_MK}'",
group='dynamic_sources',
target=bld.bldnode.find_or_declare('modules/ChibiOS/include_dirs')
)
common_src = [bld.bldnode.find_or_declare('hwdef.h'),
bld.bldnode.find_or_declare('modules/ChibiOS/include_dirs')]
common_src += bld.path.ant_glob('libraries/AP_HAL_ChibiOS/hwdef/common/*.[ch]')
common_src += bld.path.ant_glob('libraries/AP_HAL_ChibiOS/hwdef/common/*.mk')
common_src += bld.path.ant_glob('modules/ChibiOS/os/hal/**/*.[ch]')
common_src += bld.path.ant_glob('modules/ChibiOS/os/hal/**/*.mk')
if bld.env.ROMFS_FILES:
common_src += [bld.bldnode.find_or_declare('ap_romfs_embedded.h')]
ch_task = bld(
# build libch.a from ChibiOS sources and hwdef.h
rule="BUILDDIR='${BUILDDIR_REL}' CHIBIOS='${CH_ROOT_REL}' AP_HAL=${AP_HAL_REL} ${CHIBIOS_BUILD_FLAGS} ${CHIBIOS_BOARD_NAME} '${MAKE}' lib -f '${BOARD_MK}'",
group='dynamic_sources',
source=common_src,
target=bld.bldnode.find_or_declare('modules/ChibiOS/libch.a')
)
ch_task.name = "ChibiOS_lib"
bld.env.LIB += ['ch']
bld.env.LIBPATH += ['modules/ChibiOS/']
wraplist = ['strerror_r', 'fclose', 'freopen', 'fread']
for w in wraplist:
bld.env.LINKFLAGS += ['-Wl,--wrap,%s' % w]
|
yonahbox/ardupilot
|
Tools/ardupilotwaf/chibios.py
|
Python
|
gpl-3.0
| 13,403
| 0.005596
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-19 10:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0015_auto_20160408_1355'),
]
operations = [
migrations.AlterField(
model_name='feedback',
name='service_code',
field=models.CharField(max_length=120, null=True),
),
migrations.AlterField(
model_name='service',
name='service_code',
field=models.CharField(max_length=120, unique=True),
),
]
|
hep7agon/city-feedback-hub
|
api/migrations/0016_service_code.py
|
Python
|
mit
| 640
| 0
|
__copyright__ = """
Copyright (C) 2006, Catalin Marinas <catalin.marinas@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os
import stgit.commands.common
from stgit.argparse import opt
from stgit.commands.common import *
from stgit.utils import *
from stgit.out import *
from stgit import argparse, stack, git
help = 'Synchronise patches with a branch or a series'
kind = 'patch'
usage = ['[options] [<patch1>] [<patch2>] [<patch3>..<patch4>]']
description = """
For each of the specified patches perform a three-way merge with the
same patch in the specified branch or series. The command can be used
for keeping patches on several branches in sync. Note that the
operation may fail for some patches because of conflicts. The patches
in the series must apply cleanly."""
args = [argparse.patch_range(argparse.applied_patches,
argparse.unapplied_patches)]
options = [
opt('-a', '--all', action = 'store_true',
short = 'Synchronise all the applied patches'),
opt('-B', '--ref-branch', args = [argparse.stg_branches],
short = 'Syncronise patches with BRANCH'),
opt('-s', '--series', args = [argparse.files],
short = 'Syncronise patches with SERIES')]
directory = DirectoryGotoToplevel(log = True)
def __check_all():
check_local_changes()
check_conflicts()
check_head_top_equal(crt_series)
def __branch_merge_patch(remote_series, pname):
"""Merge a patch from a remote branch into the current tree.
"""
patch = remote_series.get_patch(pname)
git.merge_recursive(patch.get_bottom(), git.get_head(), patch.get_top())
def __series_merge_patch(base, patchdir, pname):
"""Merge a patch file with the given StGIT patch.
"""
patchfile = os.path.join(patchdir, pname)
git.apply_patch(filename = patchfile, base = base)
def func(parser, options, args):
"""Synchronise a range of patches
"""
if options.ref_branch:
remote_series = stack.Series(options.ref_branch)
if options.ref_branch == crt_series.get_name():
raise CmdException, 'Cannot synchronise with the current branch'
remote_patches = remote_series.get_applied()
# the merge function merge_patch(patch, pname)
merge_patch = lambda patch, pname: \
__branch_merge_patch(remote_series, pname)
elif options.series:
patchdir = os.path.dirname(options.series)
remote_patches = []
f = file(options.series)
for line in f:
p = re.sub('#.*$', '', line).strip()
if not p:
continue
remote_patches.append(p)
f.close()
# the merge function merge_patch(patch, pname)
merge_patch = lambda patch, pname: \
__series_merge_patch(patch.get_bottom(), patchdir, pname)
else:
raise CmdException, 'No remote branch or series specified'
applied = crt_series.get_applied()
unapplied = crt_series.get_unapplied()
if options.all:
patches = applied
elif len(args) != 0:
patches = parse_patches(args, applied + unapplied, len(applied),
ordered = True)
elif applied:
patches = [crt_series.get_current()]
else:
parser.error('no patches applied')
if not patches:
raise CmdException, 'No patches to synchronise'
__check_all()
# only keep the patches to be synchronised
sync_patches = [p for p in patches if p in remote_patches]
if not sync_patches:
raise CmdException, 'No common patches to be synchronised'
# pop to the one before the first patch to be synchronised
first_patch = sync_patches[0]
if first_patch in applied:
to_pop = applied[applied.index(first_patch) + 1:]
if to_pop:
pop_patches(crt_series, to_pop[::-1])
pushed = [first_patch]
else:
to_pop = []
pushed = []
popped = to_pop + [p for p in patches if p in unapplied]
for p in pushed + popped:
if p in popped:
# push this patch
push_patches(crt_series, [p])
if p not in sync_patches:
# nothing to synchronise
continue
# the actual sync
out.start('Synchronising "%s"' % p)
patch = crt_series.get_patch(p)
bottom = patch.get_bottom()
top = patch.get_top()
# reset the patch backup information.
patch.set_top(top, backup = True)
# the actual merging (either from a branch or an external file)
merge_patch(patch, p)
if git.local_changes(verbose = False):
# index (cache) already updated by the git merge. The
# backup information was already reset above
crt_series.refresh_patch(cache_update = False, backup = False,
log = 'sync')
out.done('updated')
else:
out.done()
|
miracle2k/stgit
|
stgit/commands/sync.py
|
Python
|
gpl-2.0
| 5,549
| 0.007929
|
'''
modified by Chongxuan Li (chongxuanli1991@gmail.com)
'''
import sys
sys.path.append('..')
sys.path.append('../../data/')
import os, numpy as np
import scipy.io as sio
import time
import anglepy as ap
import anglepy.paramgraphics as paramgraphics
import anglepy.ndict as ndict
import theano
import theano.tensor as T
from collections import OrderedDict
import preprocessing as pp
import color
def zca_dec(zca_mean, zca_winv, data):
return zca_winv.dot(data) + zca_mean
def labelToMat(y):
label = np.unique(y)
newy = np.zeros((len(y), len(label)))
for i in range(len(y)):
newy[i, y[i]] = 1
return newy.T
def main(n_z, n_hidden, dataset, seed, comment, gfx=True):
# Initialize logdir
import time
pre_dir = 'models/gpulearn_z_x_mnist_96-(500, 500)'
if os.environ.has_key('pretrain') and bool(int(os.environ['pretrain'])) == True:
comment+='_pre-train'
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
comment+='_prior'
pre_dir+='_prior'
if os.environ.has_key('cutoff'):
comment+=('_'+str(int(os.environ['cutoff'])))
if os.environ.has_key('train_residual') and bool(int(os.environ['train_residual'])) == True:
comment+='_train-residual'
pre_dir+='_train-residual'
if os.environ.has_key('sigma_square'):
comment+=('_'+str(float(os.environ['sigma_square'])))
pre_dir+=('_'+str(float(os.environ['sigma_square'])))
pre_dir+='/'
logdir = 'results/gpulearn_z_x_'+dataset+'_'+str(n_z)+'-'+str(n_hidden)+comment+'_'+str(int(time.time()))+'/'
if not os.path.exists(logdir): os.makedirs(logdir)
print 'logdir:', logdir
print 'gpulearn_z_x', n_z, n_hidden, dataset, seed
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'learn_z_x', n_z, n_hidden, dataset, seed
np.random.seed(seed)
gfx_freq = 1
weight_decay = 0
# Init data
if dataset == 'mnist':
import anglepy.data.mnist as mnist
# MNIST
size = 28
train_x, train_y, valid_x, valid_y, test_x, test_y = mnist.load_numpy(size)
f_enc, f_dec = pp.Identity()
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
mnist_prior = sio.loadmat('data/mnist_prior/mnist_prior.mat')
train_mean_prior = mnist_prior['z_train']
test_mean_prior = mnist_prior['z_test']
valid_mean_prior = mnist_prior['z_valid']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 50000
n_test = 10000
n_valid = 10000
n_batch = 1000
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'higgs':
size = 28
f_enc, f_dec = pp.Identity()
inputfile = 'data/higgs/HIGGS.csv'
print 'loading file.'
x = np.loadtxt(inputfile, dtype='f4', delimiter=',')
print 'done.'
y = x[:,0].reshape((-1,1))
x = x[:,1:]
x = np.array(x, dtype='float32')
y = np.array(y, dtype='float32')
n_train = 10000000
n_valid = 500000
n_test = 500000
n_batch = 1000
derived_feat = 'all'
if os.environ.has_key('derived_feat'):
derived_feat = os.environ['derived_feat']
color.printBlue(derived_feat)
if derived_feat == 'high':
# Only the 7 high level features.
x = x[:, 21:28]
elif derived_feat == 'low':
# Only the 21 raw features.
x = x[:, 0:21]
else:
pass
train_x = x[0:n_train, :].T
y_train = y[0:n_train, :]
valid_x = x[n_train:n_train+n_valid, :].T
y_valid = y[n_train:n_train+n_valid, :]
test_x = x[n_train+n_valid:n_train+n_valid+n_test, :].T
y_test = y[n_train+n_valid:n_train+n_valid+n_test, :]
n_y = 2
n_x = train_x.shape[0]
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'tanh'
if os.environ.has_key('nonlinear'):
nonlinear = os.environ['nonlinear']
color.printBlue(nonlinear)
L_valid = 1
dim_input = (1,size)
type_px = 'gaussian'
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'cifar10':
import anglepy.data.cifar10 as cifar10
size = 32
train_x, train_y, test_x, test_y = cifar10.load_numpy()
train_x = train_x.astype(np.float32).T
test_x = test_x.astype(np.float32).T
##
f_enc, f_dec = pp.Identity()
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
cifar_prior = sio.loadmat('data/cifar10_prior/cifar10_prior.mat')
train_mean_prior = cifar_prior['z_train']
test_mean_prior = cifar_prior['z_test']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
x_valid = x_test
L_valid = 1
n_y = 10
dim_input = (size,size)
n_x = x['x'].shape[0]
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'gaussian'
if os.environ.has_key('type_px'):
type_px = os.environ['type_px']
color.printBlue('Generative type: '+type_px)
n_train = 50000
n_test = 10000
n_batch = 5000
colorImg = True
bernoulli_x = False
byteToFloat = False
#weight_decay = float(n_batch)/n_train
elif dataset == 'cifar10_zca':
import anglepy.data.cifar10 as cifar10
size = 32
train_x, train_y, test_x, test_y = cifar10.load_numpy()
train_x = train_x.astype(np.float32).T
test_x = test_x.astype(np.float32).T
##
f_enc, f_dec = pp.Identity()
zca_mean, zca_w, zca_winv = cifar10.zca(train_x)
train_x = zca_w.dot(train_x-zca_mean)
test_x = zca_w.dot(test_x-zca_mean)
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
cifar_prior = sio.loadmat('data/cifar10_prior/cifar10_prior.mat')
train_mean_prior = cifar_prior['z_train']
test_mean_prior = cifar_prior['z_test']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
x_valid = x_test
L_valid = 1
dim_input = (size,size)
n_y = 10
n_x = x['x'].shape[0]
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'gaussian'
n_train = 50000
n_test = 10000
n_batch = 5000
colorImg = True
bernoulli_x = False
byteToFloat = False
if os.environ.has_key('type_px'):
type_px = os.environ['type_px']
color.printBlue('Generative type: '+type_px)
nonlinear = 'softplus'
elif dataset == 'mnist_basic':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_'
tmp = sio.loadmat(data_dir+'train.mat')
#color.printRed(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'rectangle':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'rectangles_'
tmp = sio.loadmat(data_dir+'train.mat')
color.printRed(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,1000:]
valid_y = train_y[1000:]
train_x = train_x[:,:1000]
train_y = train_y[:1000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 2
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 1000
n_valid = 200
n_test = 50000
n_batch = 500
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
#print '3', n_x
elif dataset == 'convex':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'convex_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,6000:]
valid_y = train_y[6000:]
train_x = train_x[:,:6000]
train_y = train_y[:6000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 2
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 6000
n_valid = 2000
n_test = 50000
n_batch = 120
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'rectangle_image':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'rectangles_im_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 2
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_rot':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_all_rotation_normalized_float_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_back_rand':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_background_random_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_back_image':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_background_images_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_back_image_rot':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_all_background_images_rotation_normalized_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_binarized':
#import anglepy.data.mnist_binarized as mnist_binarized
# MNIST
import anglepy.data.mnist as mnist
size = 28
data_dir = '/home/lichongxuan/regbayes2/data/mat_data/'+'binarized_mnist_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
#train_y = tmp['t_train'].T.astype(np.int32)
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
tmp = sio.loadmat(data_dir+'valid.mat')
#print tmp.keys()
valid_x = tmp['x_valid'].T
#test_y = tmp['t_test'].T.astype(np.int32)
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
train_x = np.hstack((train_x, valid_x)).astype(np.float32)
train_mean_prior = np.hstack((train_mean_prior,valid_mean_prior)).astype(np.float32)
print train_mean_prior.shape
print train_x.shape
x = {'x': train_x.astype(np.float32), 'mean_prior':train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': test_x.astype(np.float32),'mean_prior':test_mean_prior.astype(np.float32)}
x_test = x_valid
L_valid = 1
dim_input = (28,28)
n_x = 28*28
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 60000
n_valid = 10000
n_batch = 1000
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_binarized_own':
#import anglepy.data.mnist_binarized as mnist_binarized
# MNIST
import anglepy.data.mnist as mnist
size = 28
data_dir = 'data/mnist_binarized_own/'+'binarized_mnist_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['train_x'].T
#train_y = tmp['t_train'].T.astype(np.int32)
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['test_x'].T
tmp = sio.loadmat(data_dir+'valid.mat')
#print tmp.keys()
valid_x = tmp['valid_x'].T
#test_y = tmp['t_test'].T.astype(np.int32)
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
train_x = np.hstack((train_x, valid_x)).astype(np.float32)
train_mean_prior = np.hstack((train_mean_prior,valid_mean_prior)).astype(np.float32)
print train_mean_prior.shape
print train_x.shape
x = {'x': train_x.astype(np.float32), 'mean_prior':train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': test_x.astype(np.float32),'mean_prior':test_mean_prior.astype(np.float32)}
x_test = x_valid
L_valid = 1
dim_input = (28,28)
n_x = 28*28
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 60000
n_valid = 10000
n_batch = 1000
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'freyface':
# Frey's face
import anglepy.data.freyface as freyface
n_train = 1600
train_x = freyface.load_numpy()
np.random.shuffle(train_x)
x = {'x': train_x.T[:,0:n_train]}
x_valid = {'x': train_x.T[:,n_train:]}
L_valid = 1
dim_input = (28,20)
n_x = 20*28
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'bounded01'
nonlinear = 'tanh' #tanh works better with freyface #'softplus'
n_batch = 100
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'freyface_pca':
# Frey's face
import anglepy.data.freyface as freyface
n_train = 1600
train_x = freyface.load_numpy().T
np.random.shuffle(train_x.T)
f_enc, f_dec, _ = pp.PCA(train_x, 0.99)
train_x = f_enc(train_x)
x = {'x': train_x[:,0:n_train].astype(np.float32)}
x_valid = {'x': train_x[:,n_train:].astype(np.float32)}
L_valid = 1
dim_input = (28,20)
n_x = train_x.shape[0]
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 100
colorImg = False
bernoulli_x = False
byteToFloat = False
elif dataset == 'freyface_bernoulli':
# Frey's face
import anglepy.data.freyface as freyface
n_train = 1600
train_x = freyface.load_numpy().T
np.random.shuffle(train_x.T)
x = {'x': train_x[:,0:n_train].astype(np.float32)}
x_valid = {'x': train_x[:,n_train:].astype(np.float32)}
L_valid = 1
dim_input = (28,20)
n_x = train_x.shape[0]
type_pz = 'gaussianmarg'
type_px = 'bernoulli'
nonlinear = 'softplus'
n_batch = 100
colorImg = False
bernoulli_x = False
byteToFloat = False
elif dataset == 'norb_48_24300_pca':
size = 48
train_x, train_y, test_x, test_y = np.load('data/norb/norb_48_24300.npy')
_x = {'x': train_x, 'y': train_y}
#ndict.shuffleCols(_x)
#train_x = _x['x']
#train_y = _x['y']
#print _x['x'][:,:10000].shape
# Do PCA
print 'pca'
f_enc, f_dec, pca_params = pp.PCA(_x['x'][:,:10000], cutoff=500, toFloat=False)
ndict.savez(pca_params, logdir+'pca_params')
print 'done'
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': f_enc(train_x).astype(np.float32), 'mean_prior' : train_mean_prior.astype(np.float32)}
x_valid = {'x': f_enc(test_x).astype(np.float32), 'mean_prior' : test_mean_prior.astype(np.float32)}
x_test = {'x': f_enc(test_x).astype(np.float32), 'mean_prior' : test_mean_prior.astype(np.float32)}
x_train = x
print x['x'].shape
print x['mean_prior'].shape
L_valid = 1
n_y = 5
n_x = x['x'].shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
bernoulli_x = False
byteToFloat = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'norb':
# small NORB dataset
import anglepy.data.norb as norb
size = 48
train_x, train_y, test_x, test_y = norb.load_resized(size, binarize_y=True)
x = {'x': train_x.astype(np.float32)}
x_valid = {'x': test_x.astype(np.float32)}
L_valid = 1
n_x = train_x.shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
byteToFloat = False
bernoulli_x = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'norb_pca':
# small NORB dataset
import anglepy.data.norb as norb
size = 48
train_x, train_y, test_x, test_y = norb.load_resized(size, binarize_y=True)
f_enc, f_dec, _ = pp.PCA(train_x, 0.999)
#f_enc, f_dec, _ = pp.normalize_random(train_x)
train_x = f_enc(train_x)
test_x = f_enc(test_x)
x = {'x': train_x.astype(np.float32)}
x_valid = {'x': test_x.astype(np.float32)}
L_valid = 1
n_x = train_x.shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
bernoulli_x = False
byteToFloat = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'norb_normalized':
# small NORB dataset
import anglepy.data.norb as norb
size = 48
train_x, train_y, test_x, test_y = norb.load_resized(size, binarize_y=True)
#f_enc, f_dec, _ = pp.PCA(train_x, 0.99)
#f_enc, f_dec, _ = pp.normalize_random(train_x)
f_enc, f_dec, _ = pp.normalize(train_x)
train_x = f_enc(train_x)
test_x = f_enc(test_x)
x = {'x': train_x.astype(np.float32)}
x_valid = {'x': test_x.astype(np.float32)}
L_valid = 1
n_x = train_x.shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
bernoulli_x = False
byteToFloat = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'svhn':
# SVHN dataset
#import anglepy.data.svhn as svhn
size = 32
train_x, train_y, test_x, test_y = np.load('data/svhn/svhn.npy')
#extra_x, extra_y = svhn.load_numpy_extra(False, binarize_y=True)
#x = {'x': np.hstack((train_x, extra_x)), 'y':np.hstack((train_y, extra_y))}
#ndict.shuffleCols(x)
x = {'x' : train_x, 'y': train_y}
print 'Performing PCA, can take a few minutes... '
cutoff = 300
if os.environ.has_key('cutoff'):
cutoff = int(os.environ['cutoff'])
color.printBlue('cutoff: '+str(cutoff))
f_enc, f_dec, pca_params = pp.PCA(x['x'][:,:10000], cutoff=cutoff, toFloat=True)
ndict.savez(pca_params, logdir+'pca_params')
print 'Done.'
n_y = 10
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
train_mean_prior, train_y1, test_mean_prior, test_y1 = np.load('data/svhn/svhn_prior.npy')
print np.sum((train_y1 == train_y).astype(np.int32))
print np.sum((test_y1 == test_y).astype(np.int32))
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': f_enc(x['x']).astype(np.float32), 'mean_prior':train_mean_prior.astype(np.float32)}
x_train = x
x_test = {'x': f_enc(test_x).astype(np.float32), 'mean_prior':test_mean_prior.astype(np.float32)}
x_valid = x_test
print x_train['x'].shape
print x_test['x'].shape
print train_y.shape
print test_y.shape
print x_train['mean_prior'].shape
print x_test['mean_prior'].shape
L_valid = 1
n_x = x['x'].shape[0]
dim_input = (size,size)
n_batch = 5000
n_train = 604388
n_valid = 26032
n_test = 26032
colorImg = True
bernoulli_x = False
byteToFloat = False
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
else:
print 'invalid data set'
exit()
#print '2', n_x
# Construct model
from anglepy.models import GPUVAE_Z_X
learning_rate1 = 3e-4
if os.environ.has_key('stepsize'):
learning_rate1 = float(os.environ['stepsize'])
color.printBlue(str(learning_rate1))
if os.environ.has_key('preoption'):
pre = int(os.environ['preoption'])
if pre == 1:
updates = get_adam_optimizer(learning_rate=3e-4, decay1=0.9, decay2=0.999, weight_decay=0)
elif pre ==2:
updates = get_adam_optimizer(learning_rate=3e-4, decay1=0.9, decay2=0.999, weight_decay=weight_decay)
else:
raise Exception('Prepotion unknown')
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'preoption ' + str(pre)
else:
updates = get_adam_optimizer(learning_rate=learning_rate1, weight_decay=weight_decay)
#print '1', n_x
model = GPUVAE_Z_X(updates, n_x, n_hidden, n_z, n_hidden[::-1], nonlinear, nonlinear, type_px, type_qz=type_qz, type_pz=type_pz, prior_sd=100, init_sd=1e-3)
if os.environ.has_key('pretrain') and bool(int(os.environ['pretrain'])) == True:
#dir = '/Users/dpkingma/results/learn_z_x_mnist_binarized_50-(500, 500)_mog_1412689061/'
#dir = '/Users/dpkingma/results/learn_z_x_svhn_bernoulli_300-(1000, 1000)_l1l2_sharing_and_1000HU_1412676966/'
#dir = '/Users/dpkingma/results/learn_z_x_svhn_bernoulli_300-(1000, 1000)_l1l2_sharing_and_1000HU_1412695481/'
#dir = '/Users/dpkingma/results/learn_z_x_mnist_binarized_50-(500, 500)_mog_1412695455/'
#dir = '/Users/dpkingma/results/gpulearn_z_x_svhn_pca_300-(500, 500)__1413904756/'
if len(n_hidden) == 1:
color.printBlue('pre-training-1-layer')
layer_str = '-500'
elif len(n_hidden) == 2:
color.printBlue('pre-training-2-layers')
layer_str = '-(500, 500)'
else:
raise Exception()
pre_str = 'models/gpulearn_z_x_'
if dataset == 'mnist':
#dir = pre_str + 'mnist_'+str(n_z)+layer_str+'_longrun/'
dir = 'models/mnist_z_x_50-500-500_longrun/'
elif dataset == 'mnist_rot':
dir = pre_str + 'mnist_rot_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_back_rand':
dir = pre_str + 'mnist_back_rand_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_back_image':
dir = pre_str + 'mnist_back_image_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_back_image_rot':
dir = pre_str + 'mnist_back_image_rot_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'rectangle':
dir = pre_str + 'rectangle_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'rectangle_image':
dir = pre_str + 'rectangle_image_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'convex':
dir = pre_str + 'convex_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_basic':
dir = pre_str + 'mnist_basic_'+str(n_z)+layer_str+'_longrun/'
if dataset == 'svhn':
if (os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True):
print 'prior-------------------'
pre_dir = 'results/gpulearn_z_x_svhn_'+str(n_z)+'-500-500_prior_'+str(cutoff)+'_longrun/'
else:
pre_dir = 'results/gpulearn_z_x_svhn_'+str(n_z)+'-500-500_'+str(cutoff)+'_longrun/'
color.printBlue(pre_dir)
w = ndict.loadz(pre_dir+'w_best.ndict.tar.gz')
v = ndict.loadz(pre_dir+'v_best.ndict.tar.gz')
elif n_z == 50:
print 'n_z = 50', dir
w = ndict.loadz(dir+'w_best.ndict.tar.gz')
v = ndict.loadz(dir+'v_best.ndict.tar.gz')
else:
print 'n_z != 50'
w = ndict.loadz(pre_dir+'w_best.ndict.tar.gz')
v = ndict.loadz(pre_dir+'v_best.ndict.tar.gz')
ndict.set_value2(model.w, w)
ndict.set_value2(model.v, v)
# Some statistics for optimization
ll_valid_stats = [-1e99, 0]
# Progress hook
def hook(epoch, t, ll):
if epoch%10 != 0: return
n_batch_n = n_batch
if n_batch_n > n_valid:
n_batch_n = n_valid
ll_valid, _ = model.est_loglik(x_valid, n_samples=L_valid, n_batch=n_batch_n, byteToFloat=byteToFloat)
ll_test = ll_valid
#if not dataset == 'mnist_binarized':
if not dataset == 'svhn':
ll_test, _ = model.est_loglik(x_test, n_samples=L_valid, n_batch=n_batch, byteToFloat=byteToFloat)
# Log
ndict.savez(ndict.get_value(model.v), logdir+'v')
ndict.savez(ndict.get_value(model.w), logdir+'w')
def infer(data, n_batch=1000):
#print '--', n_batch
size = data['x'].shape[1]
res = np.zeros((sum(n_hidden), size))
res1 = np.zeros((n_z,size))
res2 = np.zeros((n_hidden[-1],size))
res3 = np.zeros((n_z,size))
for i in range(0, size, n_batch):
idx_to = min(size, i+n_batch)
x_batch = ndict.getCols(data, i, idx_to)
# may have bugs
nn_batch = idx_to - i
_x, _z, _z_confab = model.gen_xz(x_batch, {}, nn_batch)
x_samples = _z_confab['x']
for (hi, hidden) in enumerate(_z_confab['hidden']):
res[sum(n_hidden[:hi]):sum(n_hidden[:hi+1]),i:i+nn_batch] = hidden
res1[:,i:i+nn_batch] = _z_confab['mean']
res2[:,i:i+nn_batch] = _z_confab['hidden'][-1]
res3[:,i:i+nn_batch] = _z_confab['logvar']
#print '--'
return res, res1, res2, res3
#print '..', n_batch
#if not dataset == 'mnist_binarized':
if not dataset == 'svhn':
z_test, z_test1, z_test2, vv_test = infer(x_test)
z_train, z_train1, z_train2, vv_train = infer(x_train)
if ll_valid > ll_valid_stats[0]:
ll_valid_stats[0] = ll_valid
ll_valid_stats[1] = 0
ndict.savez(ndict.get_value(model.v), logdir+'v_best')
ndict.savez(ndict.get_value(model.w), logdir+'w_best')
#if not dataset == 'mnist_binarized':
if dataset == 'svhn':
pass
#np.save(logdir+'full_latent', ('z_test': z_test, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train))
#np.save(logdir+'last_latent', ('z_test': z_test2, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train2))
else:
sio.savemat(logdir+'full_latent.mat', {'z_test': z_test, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train})
sio.savemat(logdir+'mean_latent.mat', {'z_test': z_test1, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train1})
sio.savemat(logdir+'last_latent.mat', {'z_test': z_test2, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train2})
else:
ll_valid_stats[1] += 1
# Stop when not improving validation set performance in 100 iterations
if ll_valid_stats[1] > 1000:
print "Finished"
with open(logdir+'hook.txt', 'a') as f:
print >>f, "Finished"
exit()
print epoch, t, ll, ll_valid, ll_test, ll_valid_stats
with open(logdir+'hook.txt', 'a') as f:
print >>f, epoch, t, ll, ll_valid, ll_test, ll_valid_stats
'''
if dataset != 'svhn':
l_t, px_t, pz_t, qz_t = model.test(x_train, n_samples=1, n_batch=n_batch, byteToFloat=byteToFloat)
print 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
#sigma_square = float(os.environ['sigma_square'])
print 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
print >>f, 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
'''
# Graphics
if gfx and epoch%gfx_freq == 0:
#tail = '.png'
tail = '-'+str(epoch)+'.png'
v = {i: model.v[i].get_value() for i in model.v}
w = {i: model.w[i].get_value() for i in model.w}
if 'pca' not in dataset and 'random' not in dataset and 'normalized' not in dataset and 'zca' not in dataset:
if 'w0' in v:
image = paramgraphics.mat_to_img(f_dec(v['w0'][:].T), dim_input, True, colorImg=colorImg)
image.save(logdir+'q_w0'+tail, 'PNG')
image = paramgraphics.mat_to_img(f_dec(w['out_w'][:]), dim_input, True, colorImg=colorImg)
image.save(logdir+'out_w'+tail, 'PNG')
if 'out_unif' in w:
image = paramgraphics.mat_to_img(f_dec(w['out_unif'].reshape((-1,1))), dim_input, True, colorImg=colorImg)
image.save(logdir+'out_unif'+tail, 'PNG')
if n_z == 2:
n_width = 10
import scipy.stats
z = {'z':np.zeros((2,n_width**2))}
for i in range(0,n_width):
for j in range(0,n_width):
z['z'][0,n_width*i+j] = scipy.stats.norm.ppf(float(i)/n_width+0.5/n_width)
z['z'][1,n_width*i+j] = scipy.stats.norm.ppf(float(j)/n_width+0.5/n_width)
x, _, _z = model.gen_xz({}, z, n_width**2)
if dataset == 'mnist':
x = 1 - _z['x']
image = paramgraphics.mat_to_img(f_dec(_z['x']), dim_input)
image.save(logdir+'2dmanifold'+tail, 'PNG')
else:
if 'norb' in dataset or dataset=='svhn':
nn_batch_nn = 64
else:
nn_batch_nn = 144
if not(os.environ.has_key('train_residual') and bool(int(os.environ['train_residual'])) == True) and (os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True):
mp_in = np.random.randint(0,x_train['mean_prior'].shape[1],nn_batch_nn)
m_p = x_train['mean_prior'][:,mp_in]
s_s = 1
if os.environ.has_key('sigma_square'):
s_s = float(os.environ['sigma_square'])
x_samples = model.gen_xz_prior({}, {}, m_p, s_s, n_batch=nn_batch_nn)
x_samples = x_samples['x']
m_p1 = (np.ones((n_z, nn_batch_nn)).T * np.mean(x_train['mean_prior'], axis = 1)).T
x_samples1 = model.gen_xz_prior({}, {}, m_p1.astype(np.float32), s_s, n_batch=nn_batch_nn)
image = paramgraphics.mat_to_img(f_dec(x_samples1['x']), dim_input, colorImg=colorImg)
image.save(logdir+'mean_samples-prior'+tail, 'PNG')
x_samples11 = model.gen_xz_prior11({}, {}, m_p, s_s, n_batch=nn_batch_nn)
image = paramgraphics.mat_to_img(f_dec(x_samples11['x']), dim_input, colorImg=colorImg)
image.save(logdir+'prior-image'+tail, 'PNG')
else:
_x, _, _z_confab = model.gen_xz({}, {}, n_batch=nn_batch_nn)
x_samples = _z_confab['x']
image = paramgraphics.mat_to_img(f_dec(x_samples), dim_input, colorImg=colorImg)
image.save(logdir+'samples-prior'+tail, 'PNG')
#x_samples = _x['x']
#image = paramgraphics.mat_to_img(x_samples, dim_input, colorImg=colorImg)
#image.save(logdir+'samples2'+tail, 'PNG')
else:
# Model with preprocessing
if 'w0' in v:
tmp = f_dec(v['w0'][:].T)
#print dim_input
#print tmp.shape
if 'zca' in dataset or dataset=='svhn':
tmp = zca_dec(zca_mean, zca_winv, tmp)
image = paramgraphics.mat_to_img(tmp, dim_input, True, colorImg=colorImg)
image.save(logdir+'q_w0'+tail, 'PNG')
tmp = f_dec(w['out_w'][:])
if 'zca' in dataset:
tmp = zca_dec(zca_mean, zca_winv, tmp)
image = paramgraphics.mat_to_img(tmp, dim_input, True, colorImg=colorImg)
image.save(logdir+'out_w'+tail, 'PNG')
if dataset == 'svhn':
nn_batch_nn = 64
else:
nn_batch_nn = 144
if not(os.environ.has_key('train_residual') and bool(int(os.environ['train_residual'])) == True) and (os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True):
mp_in = np.random.randint(0,x_train['mean_prior'].shape[1],nn_batch_nn)
m_p = x_train['mean_prior'][:,mp_in]
s_s = 1
if os.environ.has_key('sigma_square'):
s_s = float(os.environ['sigma_square'])
x_samples = model.gen_xz_prior({}, {}, m_p, s_s, n_batch=nn_batch_nn)
x_samples = zca_dec(zca_mean, zca_winv,x_samples['x'])
x_samples = np.minimum(np.maximum(x_samples, 0), 1)
x_samples11 = model.gen_xz_prior11({}, {}, m_p, s_s, n_batch=nn_batch_nn)
x_samples11 = zca_dec(zca_mean,zca_winv,x_samples11['x'])
x_samples11 = np.minimum(np.maximum(x_samples11, 0), 1)
image = paramgraphics.mat_to_img(x_samples11, dim_input, colorImg=colorImg)
image.save(logdir+'prior-image'+tail, 'PNG')
else:
_x, _z, _z_confab = model.gen_xz({}, {}, n_batch=nn_batch_nn)
x_samples = f_dec(_z_confab['x'])
x_samples = np.minimum(np.maximum(x_samples, 0), 1)
image = paramgraphics.mat_to_img(x_samples, dim_input, colorImg=colorImg)
image.save(logdir+'samples'+tail, 'PNG')
'''
def infer(data, n_batch=1000):
#print '--', n_batch
size = data['x'].shape[1]
res = np.zeros((sum(n_hidden), size))
res1 = np.zeros((n_z,size))
res2 = np.zeros((n_hidden[-1],size))
res3 = np.zeros((n_z,size))
for i in range(0, size, n_batch):
idx_to = min(size, i+n_batch)
x_batch = ndict.getCols(data, i, idx_to)
# may have bugs
nn_batch = idx_to - i
_x, _z, _z_confab = model.gen_xz(x_batch, {}, nn_batch)
x_samples = _z_confab['x']
for (hi, hidden) in enumerate(_z_confab['hidden']):
res[sum(n_hidden[:hi]):sum(n_hidden[:hi+1]),i:i+nn_batch] = hidden
res1[:,i:i+nn_batch] = _z_confab['mean']
res2[:,i:i+nn_batch] = _z_confab['hidden'][-1]
res3[:,i:i+nn_batch] = _z_confab['logvar']
#
return res, res1, res2, res3
#print n_batch
#if not dataset == 'mnist_binarized':
z_test, z_test1, z_test2, vv_test = infer(x_test)
z_train, z_train1, z_train2, vv_train = infer(x_train)
l_t, px_t, pz_t, qz_t = model.test(x_train, n_samples=1, n_batch=n_batch, byteToFloat=byteToFloat)
print 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
#sigma_square = float(os.environ['sigma_square'])
print 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
print >>f, 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
#if not dataset == 'mnist_binarized':
sio.savemat(logdir+'full_latent.mat', {'z_test': z_test, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train})
sio.savemat(logdir+'mean_latent.mat', {'z_test': z_test1, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train1})
sio.savemat(logdir+'last_latent.mat', {'z_test': z_test2, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train2})
'''
# Optimize
#SFO
dostep = epoch_vae_adam(model, x, n_batch=n_batch, bernoulli_x=bernoulli_x, byteToFloat=byteToFloat)
loop_va(dostep, hook)
pass
# Training loop for variational autoencoder
def loop_va(doEpoch, hook, n_epochs=1201):
t0 = time.time()
for t in xrange(1, n_epochs):
L = doEpoch()
hook(t, time.time() - t0, L)
print 'Optimization loop finished'
# Learning step for variational auto-encoder
def epoch_vae_adam(model, x, n_batch=100, convertImgs=False, bernoulli_x=False, byteToFloat=False):
print 'Variational Auto-Encoder', n_batch
def doEpoch():
from collections import OrderedDict
n_tot = x.itervalues().next().shape[1]
idx_from = 0
L = 0
while idx_from < n_tot:
idx_to = min(n_tot, idx_from+n_batch)
x_minibatch = ndict.getCols(x, idx_from, idx_to)
idx_from += n_batch
if byteToFloat: x_minibatch['x'] = x_minibatch['x'].astype(np.float32)/256.
if bernoulli_x: x_minibatch['x'] = np.random.binomial(n=1, p=x_minibatch['x']).astype(np.float32)
# Do gradient ascent step
L += model.evalAndUpdate(x_minibatch, {}).sum()
#model.profmode.print_summary()
L /= n_tot
return L
return doEpoch
def get_adam_optimizer(learning_rate=0.001, decay1=0.1, decay2=0.001, weight_decay=0.0):
print 'AdaM', learning_rate, decay1, decay2, weight_decay
def shared32(x, name=None, borrow=False):
return theano.shared(np.asarray(x, dtype='float32'), name=name, borrow=borrow)
def get_optimizer(w, g):
updates = OrderedDict()
it = shared32(0.)
updates[it] = it + 1.
fix1 = 1.-(1.-decay1)**(it+1.) # To make estimates unbiased
fix2 = 1.-(1.-decay2)**(it+1.) # To make estimates unbiased
lr_t = learning_rate * T.sqrt(fix2) / fix1
for i in w:
gi = g[i]
if weight_decay > 0:
gi -= weight_decay * w[i] #T.tanh(w[i])
# mean_squared_grad := E[g^2]_{t-1}
mom1 = shared32(w[i].get_value() * 0.)
mom2 = shared32(w[i].get_value() * 0.)
# Update moments
mom1_new = mom1 + decay1 * (gi - mom1)
mom2_new = mom2 + decay2 * (T.sqr(gi) - mom2)
# Compute the effective gradient and effective learning rate
effgrad = mom1_new / (T.sqrt(mom2_new) + 1e-10)
effstep_new = lr_t * effgrad
# Do update
w_new = w[i] + effstep_new
# Apply update
updates[w[i]] = w_new
updates[mom1] = mom1_new
updates[mom2] = mom2_new
return updates
return get_optimizer
|
zhenxuan00/mmdgm
|
mlp-mmdgm/gpulearn_z_x.py
|
Python
|
mit
| 50,776
| 0.015499
|
#!/usr/bin/env python3
"""
Author: Kartamyshev A.I. (Darth Feiwante)
"""
def inherit_icalc_isotropic(new_structure = '', start_new_version = None, base_calculation = (None, None, None), database = None, min_mult = 1, max_mult = 1, num_points = 2, geo_folder = '', it_folder = '', override = False):
"""
This function makes set of structures uniformly scaled from the initial one within the range of deformation
INPUT:
- new_structure (str) - arbitary name for your crystal structure
- start_new_version (int) - start version for newly built structures
- base_calculation (tuple) - tuple describing initial Calculation object in form ('structure', 'set', 'version')
- database (dict) - dictionary with the project's results
- min_mult (int) - minimal deformation of the initial structure
- max_mult (int) - maximal deformation of the initial structure
- num_points (int) - number of newly built structures
- geo_folder (str) - path to the folder to save *.geo files of newly built structures
- it folder (str) - section folder
- override (boolean) - if True then the old structures with the same names will be overwritten
RETURN:
None
SOURCE:
None
TODO:
Some improvements
"""
from calc_manage import inherit_icalc
min_mult = min_mult
max_mult = max_mult
num_points = num_points
step = (max_mult - min_mult)/(num_points - 1)
mult_list = [min_mult+step*i for i in range(num_points)]
version = start_new_version
for j in mult_list:
inherit_icalc('isotropic', new_structure, version, base_calculation, database, mult_rprimd = j, geo_folder=geo_folder, override=override)
version += 1
def inherit_icalc_c_a(new_structure = '', start_new_version = None, base_calculation = (None, None, None), database = None, min_mult_a = 1, max_mult_a = 1, num_points_a = 2,
min_mult_c = 1, max_mult_c = 1,num_points_c = 2, geo_folder='', it_folder =''):
"""
This function makes set of structures deformed uniformly in the plane presented by the vectors 1 and 2 and separately deformed along the vector 3 of the lattice
INPUT:
- new_structure (str) - arbitary name for your crystal structure
- start_new_version (int) - start version for newly built structures
- base_calculation (tuple) - tuple describing initial Calculation object in form ('structure', 'set', 'version')
- database (dict) - dictionary with the project's results
- min_mult_a (float) - minimal simultaneous deformation of the vector 1 and 2 of the final structure from "base_calculation"
- max_mult_a (float) - maximal simultaneous deformation of the vector 1 and 2 of the final structure from "base_calculation"
- num_points_a (int) - number of different simultaneous deformations of the vectors 1 and 2
- min_mult_c (float) - minimal deformation of the vector 3 of the structure from "base_calculation"
- max_mult_c (float) - maximal deformation of the vector 3 of the structure from "base_calculation"
- num_points_c (int) - number of different deformations of the vector 3 from "base_calculation"
- geo_folder (str) - path to the folder to save *.geo files of newly built structures
- it folder (str) - section folder
- override (boolean) - if True then the old structures with the same names will be overwritten
RETURN:
None
SOURCE:
None
TODO:
Some improvements
"""
from classes import inherit_icalc
if num_points_a > 1:
# Lattice parameter a
min_mult_a = min_mult_a
max_mult_a = max_mult_a
num_points_a = num_points_a
step_a = (max_mult_a - min_mult_a)/(num_points_a - 1)
mult_list_a = [min_mult_a+step_a*i for i in range(num_points_a)]
if num_points_c > 1:
# Lattice parameter c
min_mult_c = min_mult_c
max_mult_c = max_mult_c
num_points_c = num_points_c
step_c = (max_mult_c - min_mult_c)/(num_points_c - 1)
mult_list_c = [min_mult_c+step_c*i for i in range(num_points_c)]
print('database', database)
version = start_new_version
if num_points_a > 1 and num_points_c > 1:
for j in mult_list_a:
for k in mult_list_c:
inherit_icalc('c_a', new_structure, version, base_calculation, database, mult_a = j, mult_c = k, geo_folder=geo_folder)
version += 1
elif num_points_c == 1:
for j in mult_list_a:
inherit_icalc('c_a', new_structure, version, base_calculation, database, mult_a = j, mult_c = 1, geo_folder=geo_folder, override=override)
version += 1
elif num_points_a == 1:
for j in mult_list_c:
inherit_icalc('c_a', new_structure, version, base_calculation, database, mult_a = 1, mult_c = j, geo_folder=geo_folder, override=override)
version += 1
def inherit_icalc_x_y(new_structure = '', start_new_version = None, base_calculation = (None, None, None), database = None,
min_mult_a = 1, max_mult_a = 1, num_points_a = 2, min_mult_b = 1, max_mult_b = 1,num_points_b = 2, geo_folder='', it_folder ='',
override = False):
"""
This function makes set of structures separately deformed along the vectors 1 and 2 of the lattice
INPUT:
- new_structure (str) - arbitary name for your crystal structure
- start_new_version (int) - start version for newly built structures
- base_calculation (tuple) - tuple describing initial Calculation object in form ('structure', 'set', version)
- database (dict) - dictionary with the project's results
- min_mult_a (float) - minimal deformation of the vector 1 of the structure from "base_calculation"
- max_mult_a (float) - maximal deformation of the vector 1 of the structure from "base_calculation"
- num_points_a (int) - number of different deformations of the vector 2
- min_mult_b (float) - minimal deformation of the vector 2 of the structure from "base_calculation"
- max_mult_b (float) - maximal deformation of the vector 2 of the structure from "base_calculation"
- num_points_b (int) - number of different deformations of the vector 2
- geo_folder (str) - path to the folder to save *.geo files of newly built structures
- it folder (str) - section folder
- override (boolean) - if True then the old structures with the same names will be overwritten
RETURN:
None
SOURCE:
None
TODO:
Some improvements
"""
from calc_manage import inherit_icalc
if num_points_a > 1:
# Coordinate x in rprimd
step_a = (max_mult_a - min_mult_a)/(num_points_a - 1)
mult_list_a = [min_mult_a+step_a*i for i in range(num_points_a)]
if num_points_b > 1:
# Coordinate y in rprimd
step_b = (max_mult_b - min_mult_b)/(num_points_b - 1)
mult_list_b = [min_mult_b+step_b*i for i in range(num_points_b)]
version = start_new_version
if num_points_a > 1 and num_points_b > 1:
for j in mult_list_a:
for k in mult_list_b:
inherit_icalc('xy', new_structure, version, base_calculation, database, mult_a = j, mult_b = k, geo_folder=geo_folder, it_folder = it_folder, override=override)
version += 1
elif num_points_b == 1:
for j in mult_list_a:
inherit_icalc('xy', new_structure, version, base_calculation, database, mult_a = j, mult_b = 1, geo_folder=geo_folder, it_folder = it_folder, override=override)
version += 1
elif num_points_a == 1:
for j in mult_list_b:
inherit_icalc('xy', new_structure, version, base_calculation, database, mult_a = 1, mult_b = j, geo_folder=geo_folder, it_folder = it_folder, override=override)
version += 1
|
dimonaks/siman
|
siman/structure_functions.py
|
Python
|
gpl-2.0
| 8,328
| 0.018492
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os.path
from neutron.agent.linux import external_process as ep
from neutron.common import utils as common_utils
from neutron.tests import base
from neutron.tests import tools
TEST_UUID = 'test-uuid'
TEST_SERVICE = 'testsvc'
TEST_PID = 1234
class BaseTestProcessMonitor(base.BaseTestCase):
def setUp(self):
super(BaseTestProcessMonitor, self).setUp()
self.log_patch = mock.patch("neutron.agent.linux.external_process."
"LOG.error")
self.error_log = self.log_patch.start()
self.spawn_patch = mock.patch("eventlet.spawn")
self.eventlent_spawn = self.spawn_patch.start()
# create a default process monitor
self.create_child_process_monitor('respawn')
def create_child_process_monitor(self, action):
conf = mock.Mock()
conf.AGENT.check_child_processes_action = action
conf.AGENT.check_child_processes = True
self.pmonitor = ep.ProcessMonitor(
config=conf,
resource_type='test')
def get_monitored_process(self, uuid, service=None):
monitored_process = mock.Mock()
self.pmonitor.register(uuid=uuid,
service_name=service,
monitored_process=monitored_process)
return monitored_process
class TestProcessMonitor(BaseTestProcessMonitor):
def test_error_logged(self):
pm = self.get_monitored_process(TEST_UUID)
pm.active = False
self.pmonitor._check_child_processes()
self.assertTrue(self.error_log.called)
def test_exit_handler(self):
self.create_child_process_monitor('exit')
pm = self.get_monitored_process(TEST_UUID)
pm.active = False
with mock.patch.object(ep.ProcessMonitor,
'_exit_handler') as exit_handler:
self.pmonitor._check_child_processes()
exit_handler.assert_called_once_with(TEST_UUID, None)
def test_register(self):
pm = self.get_monitored_process(TEST_UUID)
self.assertEqual(len(self.pmonitor._monitored_processes), 1)
self.assertIn(pm, self.pmonitor._monitored_processes.values())
def test_register_same_service_twice(self):
self.get_monitored_process(TEST_UUID)
self.get_monitored_process(TEST_UUID)
self.assertEqual(len(self.pmonitor._monitored_processes), 1)
def test_register_different_service_types(self):
self.get_monitored_process(TEST_UUID)
self.get_monitored_process(TEST_UUID, TEST_SERVICE)
self.assertEqual(len(self.pmonitor._monitored_processes), 2)
def test_unregister(self):
self.get_monitored_process(TEST_UUID)
self.pmonitor.unregister(TEST_UUID, None)
self.assertEqual(len(self.pmonitor._monitored_processes), 0)
def test_unregister_unknown_process(self):
self.pmonitor.unregister(TEST_UUID, None)
self.assertEqual(len(self.pmonitor._monitored_processes), 0)
class TestProcessManager(base.BaseTestCase):
def setUp(self):
super(TestProcessManager, self).setUp()
self.execute_p = mock.patch('neutron.agent.common.utils.execute')
self.execute = self.execute_p.start()
self.delete_if_exists = mock.patch(
'oslo_utils.fileutils.delete_if_exists').start()
self.ensure_dir = mock.patch.object(
common_utils, 'ensure_dir').start()
self.conf = mock.Mock()
self.conf.external_pids = '/var/path'
def test_processmanager_ensures_pid_dir(self):
pid_file = os.path.join(self.conf.external_pids, 'pid')
ep.ProcessManager(self.conf, 'uuid', pid_file=pid_file)
self.ensure_dir.assert_called_once_with(self.conf.external_pids)
def test_enable_no_namespace(self):
callback = mock.Mock()
callback.return_value = ['the', 'cmd']
with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name:
name.return_value = 'pidfile'
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=False)
manager = ep.ProcessManager(self.conf, 'uuid')
manager.enable(callback)
callback.assert_called_once_with('pidfile')
self.execute.assert_called_once_with(['the', 'cmd'],
check_exit_code=True,
extra_ok_codes=None,
run_as_root=False,
log_fail_as_error=True)
def test_enable_with_namespace(self):
callback = mock.Mock()
callback.return_value = ['the', 'cmd']
with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name:
name.return_value = 'pidfile'
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=False)
manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
with mock.patch.object(ep, 'ip_lib') as ip_lib:
manager.enable(callback)
callback.assert_called_once_with('pidfile')
ip_lib.assert_has_calls([
mock.call.IPWrapper(namespace='ns'),
mock.call.IPWrapper().netns.execute(
['the', 'cmd'], addl_env=None, run_as_root=False)])
def test_enable_with_namespace_process_active(self):
callback = mock.Mock()
callback.return_value = ['the', 'cmd']
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=True)
manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
with mock.patch.object(ep, 'ip_lib'):
manager.enable(callback)
self.assertFalse(callback.called)
def test_disable_no_namespace(self):
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=True)
manager = ep.ProcessManager(self.conf, 'uuid')
with mock.patch.object(ep, 'utils') as utils:
manager.disable()
utils.assert_has_calls([
mock.call.execute(['kill', '-9', 4],
run_as_root=True)])
def test_disable_namespace(self):
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=True)
manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
with mock.patch.object(ep, 'utils') as utils:
manager.disable()
utils.assert_has_calls([
mock.call.execute(['kill', '-9', 4],
run_as_root=True)])
def test_disable_not_active(self):
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=False)
with mock.patch.object(ep.LOG, 'debug') as debug:
manager = ep.ProcessManager(self.conf, 'uuid')
manager.disable()
debug.assert_called_once_with(mock.ANY, mock.ANY)
def test_disable_no_pid(self):
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=None)
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=False)
with mock.patch.object(ep.LOG, 'debug') as debug:
manager = ep.ProcessManager(self.conf, 'uuid')
manager.disable()
debug.assert_called_once_with(mock.ANY, mock.ANY)
def test_get_pid_file_name_default(self):
manager = ep.ProcessManager(self.conf, 'uuid')
retval = manager.get_pid_file_name()
self.assertEqual(retval, '/var/path/uuid.pid')
def test_pid(self):
self.useFixture(tools.OpenFixture('/var/path/uuid.pid', '5'))
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertEqual(manager.pid, 5)
def test_pid_no_an_int(self):
self.useFixture(tools.OpenFixture('/var/path/uuid.pid', 'foo'))
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertIsNone(manager.pid)
def test_pid_invalid_file(self):
with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name:
name.return_value = '.doesnotexist/pid'
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertIsNone(manager.pid)
def test_active(self):
mock_open = self.useFixture(
tools.OpenFixture('/proc/4/cmdline', 'python foo --router_id=uuid')
).mock_open
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertTrue(manager.active)
mock_open.assert_called_once_with('/proc/4/cmdline', 'r')
def test_active_none(self):
dummy_cmd_line = 'python foo --router_id=uuid'
self.execute.return_value = dummy_cmd_line
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=None)
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertFalse(manager.active)
def test_active_cmd_mismatch(self):
mock_open = self.useFixture(
tools.OpenFixture('/proc/4/cmdline',
'python foo --router_id=anotherid')
).mock_open
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertFalse(manager.active)
mock_open.assert_called_once_with('/proc/4/cmdline', 'r')
|
glove747/liberty-neutron
|
neutron/tests/unit/agent/linux/test_external_process.py
|
Python
|
apache-2.0
| 11,219
| 0
|
from datetime import date
import os
from django.contrib.auth.models import User
from django import forms
from django.utils import timezone
from fields import Html5CaptchaField
from html5input import *
from settings import AVAILABLE_TEMPLATES, TEMPLATE_DIRS
from accounts.models import UserProfile
class SettingsUserForm(forms.ModelForm):
email = forms.EmailField(required=True, widget=Html5EmailInput(attrs={'required': None}))
class Meta:
model = User
fields = ('first_name', 'email')
class SettingsUserprofileForm(forms.ModelForm):
template_choices = [(template_[0], template_[1]) for template_ in AVAILABLE_TEMPLATES]
template = forms.ChoiceField(choices=template_choices)
BOOL_CHOICES = (
(True, 'Show to everyone'),
(False, 'Show only to registered users'),
)
def __init__(self, *args, **kwargs):
super(SettingsUserprofileForm, self).__init__(*args, **kwargs)
if not self.instance.user.is_staff:
self.fields['template'].choices = [(template_[0], template_[1]) for template_ in AVAILABLE_TEMPLATES if template_[2] or template_[0] == self.instance.template]
self.initial['template'] = self.instance.template
class Meta:
model = UserProfile
fields = ('publish_name', 'ingame_name', 'publish_ingame_name', 'website', 'publish_website', 'contact', 'publish_contact', 'fav_mod', 'publish_fav_mod', 'fav_map', 'publish_fav_map', 'gender', 'publish_gender', 'birthday', 'publish_birthday', 'template')
widgets = {
'publish_name': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_ingame_name': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_website': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_contact': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_fav_mod': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_fav_map': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'publish_gender': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
'birthday': Html5SelectDateWidget(years=range(1930, timezone.now().year)),
'publish_birthday': forms.Select(choices=((True, 'Show to everyone'), (False, 'Show only to registered users'))),
}
def clean_birthday(self):
birthday = self.cleaned_data['birthday']
if birthday and birthday > date.today():
raise forms.ValidationError('You cannot be born in the future.')
return birthday
def clean_template(self):
template = self.cleaned_data['template']
found = False
for path in TEMPLATE_DIRS:
if os.path.exists(os.path.join(path, template)):
found = True
break
if not found:
raise forms.ValidationError('Template does not exist. Please contact an admin.')
return template
class PasswordChangeForm(forms.Form):
old_password = forms.CharField(label='Old password',
widget=forms.PasswordInput(render_value=False, attrs={'pattern': r'.{8,}', 'title': '8 characters are required', 'required': None}))
new_password1 = forms.CharField(label='New password', min_length=8,
widget=forms.PasswordInput(render_value=False, attrs={'pattern': r'.{8,}', 'title': '8 characters are required', 'required': None}))
new_password2 = forms.CharField(label='New password again', min_length=8,
widget=forms.PasswordInput(render_value=False, attrs={'pattern': r'.{8,}', 'title': '8 characters are required', 'required': None}))
def __init__(self, *args, **kwargs):
self.current_user = kwargs.pop('current_user', None)
if self.current_user is None:
raise AttributeError('current_user missing')
super(PasswordChangeForm, self).__init__(*args, **kwargs)
def clean_old_password(self):
old_password = self.cleaned_data['old_password']
if not self.current_user.check_password(old_password):
raise forms.ValidationError('Please enter your current password correctly.')
return old_password
def clean_new_password2(self):
new_password1 = self.cleaned_data['new_password1']
new_password2 = self.cleaned_data['new_password2']
if new_password1 != new_password2:
raise forms.ValidationError("The password doesn't match the other.")
return new_password2
def save(self):
self.current_user.set_password(self.cleaned_data['new_password1'])
self.current_user.save()
class RecoverPasswordForm(forms.Form):
username = forms.CharField(label='Username', widget=forms.TextInput(attrs={'required': None}))
captcha = Html5CaptchaField(required=True)
def clean_username(self):
username = self.cleaned_data['username']
user = User.objects.filter(is_active=True, username=username)
if not user:
raise forms.ValidationError("No user with this name exists.")
return username
class RecoverUsernameForm(forms.Form):
email = forms.EmailField(label='email', widget=Html5EmailInput(attrs={'required': None}))
captcha = Html5CaptchaField(required=True)
def clean_email(self):
email = self.cleaned_data['email']
user = User.objects.filter(is_active=True, email=email)
if not user:
raise forms.ValidationError("No user with this email exists.")
return email
class RegisterForm(forms.Form):
username = forms.RegexField(label="Username", min_length=3, regex=r'^[\w.@+-]+$',
error_messages={'invalid': 'Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters.'},
widget=forms.TextInput(attrs={'pattern': r'[\w.@+-]{3,30}', 'title': '30 characters or fewer. Letters, numbers and @/./+/-/_ characters', 'required': None, 'placeholder': 'Username'}))
password1 = forms.CharField(label='Password', min_length=8,
widget=forms.PasswordInput(render_value=False, attrs={'pattern': r'.{8,}', 'title': '8 characters are required', 'required': None, 'placeholder': 'Password'}))
password2 = forms.CharField(label='Password again', min_length=8,
widget=forms.PasswordInput(render_value=False, attrs={'pattern': r'.{8,}', 'title': '8 characters are required', 'required': None, 'placeholder': 'Password again'}))
email = forms.EmailField(required=True, widget=Html5EmailInput(attrs={'required': None, 'placeholder': 'Email'}))
captcha = Html5CaptchaField(required=True)
def clean_username(self):
username = self.cleaned_data['username']
users = User.objects.filter(username=username)
if users:
raise forms.ValidationError(
u"A user with this username already exists.")
return username
def clean_email(self):
email = self.cleaned_data['email']
users = User.objects.filter(email=email)
if users:
raise forms.ValidationError(
u"A user with this email address already exists.")
return email
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 != password2:
raise forms.ValidationError(
u"The password doesn't match the other.")
return password2
|
upTee/upTee
|
uptee/accounts/forms.py
|
Python
|
bsd-3-clause
| 7,709
| 0.004281
|
"""
This page is in the table of contents.
Plugin to home the tool at beginning of each layer.
The home manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home
==Operation==
The default 'Activate Home' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
===Name of Home File===
Default: home.gcode
At the beginning of a each layer, home will add the commands of a gcode script with the name of the "Name of Home File" setting, if one exists. Home does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. Home looks for those files in the alterations folder in the .skeinforge folder in the home directory. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.
==Examples==
The following examples home the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and home.py.
> python home.py
This brings up the home dialog.
> python home.py Screw Holder Bottom.stl
The home tool is parsing the file:
Screw Holder Bottom.stl
..
The home tool has created the file:
.. Screw Holder Bottom_home.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import os
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, text, repository = None ):
"Home a gcode linear move file or text."
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText( gcodeText, repository = None ):
"Home a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'home'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( HomeRepository() )
if not repository.activateHome.value:
return gcodeText
return HomeSkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return HomeRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Home a gcode linear move file. Chain home the gcode if it is not already homed."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'home', shouldAnalyze)
class HomeRepository:
"A class to handle the home settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.home.html', self)
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Home', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home')
self.activateHome = settings.BooleanSetting().getFromValue('Activate Home', self, True )
self.nameOfHomeFile = settings.StringSetting().getFromValue('Name of Home File:', self, 'home.gcode')
self.executeTitle = 'Home'
def execute(self):
"Home button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class HomeSkein:
"A class to home a skein of extrusions."
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.extruderActive = False
self.highestZ = None
self.homeLines = []
self.layerCount = settings.LayerCount()
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.shouldHome = False
self.travelFeedRateMinute = 957.0
def addFloat( self, begin, end ):
"Add dive to the original height."
beginEndDistance = begin.distance(end)
alongWay = self.absolutePerimeterWidth / beginEndDistance
closeToEnd = euclidean.getIntermediateLocation( alongWay, end, begin )
closeToEnd.z = self.highestZ
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, closeToEnd.dropAxis(), closeToEnd.z ) )
def addHomeTravel( self, splitLine ):
"Add the home travel gcode."
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.highestZ = max( self.highestZ, location.z )
if not self.shouldHome:
return
self.shouldHome = False
if self.oldLocation == None:
return
if self.extruderActive:
self.distanceFeedRate.addLine('M103')
self.addHopUp( self.oldLocation )
self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.homeLines)
self.addHopUp( self.oldLocation )
self.addFloat( self.oldLocation, location )
if self.extruderActive:
self.distanceFeedRate.addLine('M101')
def addHopUp(self, location):
"Add hop to highest point."
locationUp = Vector3( location.x, location.y, self.highestZ )
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, locationUp.dropAxis(), locationUp.z ) )
def getCraftedGcode( self, gcodeText, repository ):
"Parse gcode text and store the home gcode."
self.repository = repository
self.homeLines = settings.getAlterationFileLines(repository.nameOfHomeFile.value)
if len(self.homeLines) < 1:
return gcodeText
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization( repository )
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def parseInitialization( self, repository ):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('home')
return
elif firstWord == '(<perimeterWidth>':
self.absolutePerimeterWidth = abs(float(splitLine[1]))
elif firstWord == '(<travelFeedRatePerSecond>':
self.travelFeedRateMinute = 60.0 * float(splitLine[1])
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
"Parse a gcode line and add it to the bevel gcode."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
self.addHomeTravel(splitLine)
self.oldLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
elif firstWord == '(<layer>':
self.layerCount.printProgressIncrement('home')
if len(self.homeLines) > 0:
self.shouldHome = True
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
self.distanceFeedRate.addLine(line)
def main():
"Display the home dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
|
makerbot/ReplicatorG
|
skein_engines/skeinforge-47/skeinforge_application/skeinforge_plugins/craft_plugins/home.py
|
Python
|
gpl-2.0
| 8,040
| 0.023383
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014, 2015 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the parser engine."""
__revision__ = \
"$Id$"
import tempfile
from flask.ext.registry import PkgResourcesDirDiscoveryRegistry, \
ImportPathRegistry, RegistryProxy
from invenio.base.wrappers import lazy_import
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
Field_parser = lazy_import('invenio.modules.jsonalchemy.parser:FieldParser')
Model_parser = lazy_import('invenio.modules.jsonalchemy.parser:ModelParser')
guess_legacy_field_names = lazy_import(
'invenio.modules.jsonalchemy.parser:guess_legacy_field_names')
get_producer_rules = lazy_import(
'invenio.modules.jsonalchemy.parser:get_producer_rules')
TEST_PACKAGE = 'invenio.modules.jsonalchemy.testsuite'
test_registry = RegistryProxy('testsuite', ImportPathRegistry,
initial=[TEST_PACKAGE])
field_definitions = lambda: PkgResourcesDirDiscoveryRegistry(
'fields', registry_namespace=test_registry)
model_definitions = lambda: PkgResourcesDirDiscoveryRegistry(
'models', registry_namespace=test_registry)
def clean_field_model_definitions():
Field_parser._field_definitions = {}
Field_parser._legacy_field_matchings = {}
Model_parser._model_definitions = {}
class TestParser(InvenioTestCase):
def setUp(self):
self.app.extensions['registry'][
'testsuite.fields'] = field_definitions()
self.app.extensions['registry'][
'testsuite.models'] = model_definitions()
def tearDown(self):
del self.app.extensions['registry']['testsuite.fields']
del self.app.extensions['registry']['testsuite.models']
def test_wrong_indent(self):
"""JSONAlchemy - wrong indent"""
from invenio.modules.jsonalchemy.parser import _create_field_parser
import pyparsing
parser = _create_field_parser()
test = """
foo:
creator:
bar, '1', foo()
"""
self.assertRaises(pyparsing.ParseException, parser.parseString, test)
from invenio.modules.jsonalchemy.errors import FieldParserException
tmp_file = tempfile.NamedTemporaryFile()
config = """
foo:
creator:
bar, '1', foo()
"""
tmp_file.write(config)
tmp_file.flush()
self.app.extensions['registry'][
'testsuite.fields'].register(tmp_file.name)
clean_field_model_definitions()
self.assertRaises(
FieldParserException, Field_parser.reparse, 'testsuite')
tmp_file.close()
clean_field_model_definitions()
def test_wrong_field_definitions(self):
"""JSONAlchemy - wrong field definitions"""
from invenio.modules.jsonalchemy.errors import FieldParserException
tmp_file_4 = tempfile.NamedTemporaryFile()
config_4 = '''
title:
creator:
marc, '245__', value
'''
tmp_file_4.write(config_4)
tmp_file_4.flush()
clean_field_model_definitions()
self.app.extensions['registry'][
'testsuite.fields'].register(tmp_file_4.name)
self.assertRaises(
FieldParserException, Field_parser.reparse, 'testsuite')
tmp_file_4.close()
clean_field_model_definitions()
def test_wrong_field_inheritance(self):
"""JSONAlchmey - not parent field definition"""
from invenio.modules.jsonalchemy.errors import FieldParserException
tmp_file_5 = tempfile.NamedTemporaryFile()
config_5 = '''
@extend
wrong_field:
""" Desc """
'''
tmp_file_5.write(config_5)
tmp_file_5.flush()
clean_field_model_definitions()
self.app.extensions['registry'][
'testsuite.fields'].register(tmp_file_5.name)
self.assertRaises(
FieldParserException, Field_parser.reparse, 'testsuite')
tmp_file_5.close()
clean_field_model_definitions()
def test_field_rules(self):
"""JsonAlchemy - field parser"""
self.assertTrue(len(Field_parser.field_definitions('testsuite')) >= 22)
# Check that all files are parsed
self.assertTrue(
'authors' in Field_parser.field_definitions('testsuite'))
self.assertTrue('title' in Field_parser.field_definitions('testsuite'))
# Check work around for [n] and [0]
self.assertTrue(
Field_parser.field_definitions('testsuite')['doi']['pid'])
# Check if derived and calulated are well parserd
self.assertTrue('dummy' in Field_parser.field_definitions('testsuite'))
self.assertEquals(
Field_parser.field_definitions('testsuite')['dummy']['pid'], 2)
self.assertEquals(Field_parser.field_definitions(
'testsuite')['dummy']['rules'].keys(), ['json', 'derived'])
self.assertTrue(
len(Field_parser.field_definitions(
'testsuite')['dummy']['producer']
),
2
)
self.assertTrue(Field_parser.field_definitions('testsuite')['_random'])
# Check override
value = {'a': 'a', 'b': 'b', 'k': 'k'} # noqa
self.assertEquals(
eval(Field_parser.field_definitions('testsuite')
['title']['rules']['marc'][1]['function']),
{'form': 'k', 'subtitle': 'b', 'title': 'a'})
# Check extras
self.assertTrue(
'json_ext' in
Field_parser.field_definitions('testsuite')['modification_date']
)
tmp = Field_parser.field_definitions('testsuite')
Field_parser.reparse('testsuite')
self.assertEquals(
len(Field_parser.field_definitions('testsuite')), len(tmp))
def test_field_hidden_decorator(self):
"""JsonAlchemy - field hidden decorator."""
# Check that all files are parsed
self.assertTrue(
'hidden_basic' in Field_parser.field_definitions('testsuite'))
# Check default hidden value
self.assertFalse(
Field_parser.field_definitions('testsuite')['_id']['hidden'])
# Check hidden field
self.assertTrue(Field_parser.field_definitions(
'testsuite')['hidden_basic']['hidden'])
def test_wrong_field_name_inside_model(self):
"""JSONAlchmey - wrong field name inside model"""
from invenio.modules.jsonalchemy.errors import ModelParserException
tmp_file_8 = tempfile.NamedTemporaryFile()
config_8 = '''
fields:
not_existing_field
'''
tmp_file_8.write(config_8)
tmp_file_8.flush()
clean_field_model_definitions()
self.app.extensions['registry'][
'testsuite.models'].register(tmp_file_8.name)
self.assertRaises(
ModelParserException, Model_parser.reparse, 'testsuite')
tmp_file_8.close()
clean_field_model_definitions()
def test_model_definitions(self):
"""JsonAlchemy - model parser"""
clean_field_model_definitions()
self.assertTrue(len(Model_parser.model_definitions('testsuite')) >= 2)
self.assertTrue(
'test_base' in Model_parser.model_definitions('testsuite'))
tmp = Model_parser.model_definitions('testsuite')
Model_parser.reparse('testsuite')
self.assertEquals(
len(Model_parser.model_definitions('testsuite')), len(tmp))
clean_field_model_definitions()
def test_resolve_several_models(self):
"""JSONAlchemy - test resolve several models"""
test_model = Model_parser.model_definitions('testsuite')['test_model']
clean_field_model_definitions()
self.assertEquals(
Model_parser.resolve_models('test_model', 'testsuite')['fields'],
test_model['fields'])
self.assertEquals(
Model_parser.resolve_models(
['test_base', 'test_model'], 'testsuite')['fields'],
test_model['fields'])
clean_field_model_definitions()
def test_field_name_model_based(self):
"""JSONAlchemy - field name model based"""
clean_field_model_definitions()
field_model_def = Field_parser.field_definition_model_based(
'title', 'test_model', 'testsuite')
field_def = Field_parser.field_definitions('testsuite')['title_title']
value = {'a': 'Awesome title', 'b': 'sub title', 'k': 'form'}
from invenio.base.utils import try_to_eval
self.assertEqual(
try_to_eval(field_model_def['rules'][
'marc'][0]['function'], value=value),
try_to_eval(field_def['rules']['marc'][0]['function'],
value=value))
clean_field_model_definitions()
def test_guess_legacy_field_names(self):
"""JsonAlchemy - check legacy field names"""
self.assertEquals(
guess_legacy_field_names(('100__a', '245'), 'marc', 'testsuite'),
{'100__a': ['_first_author.full_name'], '245': ['title']})
self.assertEquals(
guess_legacy_field_names('foo', 'bar', 'baz'), {'foo': []})
def test_get_producer_rules(self):
"""JsonAlchemy - check producer rules"""
clean_field_model_definitions()
self.assertEquals(
len(get_producer_rules('keywords', 'json_for_marc', 'testsuite')),
1
)
self.assertRaises(
KeyError,
lambda: get_producer_rules('foo', 'json_for_marc', 'testsuite'))
clean_field_model_definitions()
TEST_SUITE = make_test_suite(TestParser)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
|
egabancho/invenio
|
invenio/modules/jsonalchemy/testsuite/test_parser.py
|
Python
|
gpl-2.0
| 10,487
| 0.00143
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import json
import os
import sys
import environment as env
import products
import testloader
import wptcommandline
import wptlogging
import wpttest
from testrunner import ManagerGroup
here = os.path.split(__file__)[0]
logger = None
"""Runner for web-platform-tests
The runner has several design goals:
* Tests should run with no modification from upstream.
* Tests should be regarded as "untrusted" so that errors, timeouts and even
crashes in the tests can be handled without failing the entire test run.
* For performance tests can be run in multiple browsers in parallel.
The upstream repository has the facility for creating a test manifest in JSON
format. This manifest is used directly to determine which tests exist. Local
metadata files are used to store the expected test results.
"""
def setup_logging(*args, **kwargs):
global logger
logger = wptlogging.setup(*args, **kwargs)
def get_loader(test_paths, product, ssl_env, debug=None, run_info_extras=None, **kwargs):
if run_info_extras is None:
run_info_extras = {}
run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=debug,
extras=run_info_extras)
test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"]).load()
manifest_filters = []
meta_filters = []
if kwargs["include"] or kwargs["exclude"] or kwargs["include_manifest"]:
manifest_filters.append(testloader.TestFilter(include=kwargs["include"],
exclude=kwargs["exclude"],
manifest_path=kwargs["include_manifest"],
test_manifests=test_manifests))
if kwargs["tags"]:
meta_filters.append(testloader.TagFilter(tags=kwargs["tags"]))
test_loader = testloader.TestLoader(test_manifests,
kwargs["test_types"],
run_info,
manifest_filters=manifest_filters,
meta_filters=meta_filters,
chunk_type=kwargs["chunk_type"],
total_chunks=kwargs["total_chunks"],
chunk_number=kwargs["this_chunk"],
include_https=ssl_env.ssl_enabled)
return run_info, test_loader
def list_test_groups(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
ssl_env = env.ssl_env(logger, **kwargs)
run_info, test_loader = get_loader(test_paths, product, ssl_env,
**kwargs)
for item in sorted(test_loader.groups(kwargs["test_types"])):
print item
def list_disabled(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
rv = []
ssl_env = env.ssl_env(logger, **kwargs)
run_info, test_loader = get_loader(test_paths, product, ssl_env,
**kwargs)
for test_type, tests in test_loader.disabled_tests.iteritems():
for test in tests:
rv.append({"test": test.id, "reason": test.disabled()})
print json.dumps(rv, indent=2)
def get_pause_after_test(test_loader, **kwargs):
total_tests = sum(len(item) for item in test_loader.tests.itervalues())
if kwargs["pause_after_test"] is None:
if kwargs["repeat_until_unexpected"]:
return False
if kwargs["repeat"] == 1 and total_tests == 1:
return True
return False
return kwargs["pause_after_test"]
def run_tests(config, test_paths, product, **kwargs):
with wptlogging.CaptureIO(logger, not kwargs["no_capture_stdio"]):
env.do_delayed_imports(logger, test_paths)
(check_args,
browser_cls, get_browser_kwargs,
executor_classes, get_executor_kwargs,
env_options, run_info_extras) = products.load_product(config, product)
ssl_env = env.ssl_env(logger, **kwargs)
check_args(**kwargs)
if "test_loader" in kwargs:
run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=None,
extras=run_info_extras(**kwargs))
test_loader = kwargs["test_loader"]
else:
run_info, test_loader = get_loader(test_paths,
product,
ssl_env,
run_info_extras=run_info_extras(**kwargs),
**kwargs)
if kwargs["run_by_dir"] is False:
test_source_cls = testloader.SingleTestSource
test_source_kwargs = {}
else:
# A value of None indicates infinite depth
test_source_cls = testloader.PathGroupedSource
test_source_kwargs = {"depth": kwargs["run_by_dir"]}
logger.info("Using %i client processes" % kwargs["processes"])
unexpected_total = 0
kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
with env.TestEnvironment(test_paths,
ssl_env,
kwargs["pause_after_test"],
kwargs["debug_info"],
env_options) as test_environment:
try:
test_environment.ensure_started()
except env.TestEnvironmentError as e:
logger.critical("Error starting test environment: %s" % e.message)
raise
browser_kwargs = get_browser_kwargs(ssl_env=ssl_env, **kwargs)
repeat = kwargs["repeat"]
repeat_count = 0
repeat_until_unexpected = kwargs["repeat_until_unexpected"]
while repeat_count < repeat or repeat_until_unexpected:
repeat_count += 1
if repeat_until_unexpected:
logger.info("Repetition %i" % (repeat_count))
elif repeat > 1:
logger.info("Repetition %i / %i" % (repeat_count, repeat))
unexpected_count = 0
logger.suite_start(test_loader.test_ids, run_info)
for test_type in kwargs["test_types"]:
logger.info("Running %s tests" % test_type)
for test in test_loader.disabled_tests[test_type]:
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
executor_cls = executor_classes.get(test_type)
executor_kwargs = get_executor_kwargs(test_type,
test_environment.external_config,
test_environment.cache_manager,
run_info,
**kwargs)
if executor_cls is None:
logger.error("Unsupported test type %s for product %s" %
(test_type, product))
continue
with ManagerGroup("web-platform-tests",
kwargs["processes"],
test_source_cls,
test_source_kwargs,
browser_cls,
browser_kwargs,
executor_cls,
executor_kwargs,
kwargs["pause_after_test"],
kwargs["pause_on_unexpected"],
kwargs["restart_on_unexpected"],
kwargs["debug_info"]) as manager_group:
try:
manager_group.run(test_type, test_loader.tests)
except KeyboardInterrupt:
logger.critical("Main thread got signal")
manager_group.stop()
raise
unexpected_count += manager_group.unexpected_count()
unexpected_total += unexpected_count
logger.info("Got %i unexpected results" % unexpected_count)
if repeat_until_unexpected and unexpected_total > 0:
break
logger.suite_end()
return unexpected_total == 0
def main():
"""Main entry point when calling from the command line"""
kwargs = wptcommandline.parse_args()
try:
if kwargs["prefs_root"] is None:
kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
setup_logging(kwargs, {"raw": sys.stdout})
if kwargs["list_test_groups"]:
list_test_groups(**kwargs)
elif kwargs["list_disabled"]:
list_disabled(**kwargs)
else:
return not run_tests(**kwargs)
except Exception:
if kwargs["pdb"]:
import pdb, traceback
print traceback.format_exc()
pdb.post_mortem()
else:
raise
|
shinglyu/servo
|
tests/wpt/harness/wptrunner/wptrunner.py
|
Python
|
mpl-2.0
| 9,731
| 0.00185
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utilities used by both downloads and uploads."""
from __future__ import absolute_import
import base64
import hashlib
import logging
import random
import warnings
from urllib.parse import parse_qs
from urllib.parse import urlencode
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
from google.resumable_media import common
RANGE_HEADER = "range"
CONTENT_RANGE_HEADER = "content-range"
_SLOW_CRC32C_WARNING = (
"Currently using crcmod in pure python form. This is a slow "
"implementation. Python 3 has a faster implementation, `google-crc32c`, "
"which will be used if it is installed."
)
_GENERATION_HEADER = "x-goog-generation"
_HASH_HEADER = "x-goog-hash"
_MISSING_CHECKSUM = """\
No {checksum_type} checksum was returned from the service while downloading {}
(which happens for composite objects), so client-side content integrity
checking is not being performed."""
_LOGGER = logging.getLogger(__name__)
def do_nothing():
"""Simple default callback."""
def header_required(response, name, get_headers, callback=do_nothing):
"""Checks that a specific header is in a headers dictionary.
Args:
response (object): An HTTP response object, expected to have a
``headers`` attribute that is a ``Mapping[str, str]``.
name (str): The name of a required header.
get_headers (Callable[Any, Mapping[str, str]]): Helper to get headers
from an HTTP response.
callback (Optional[Callable]): A callback that takes no arguments,
to be executed when an exception is being raised.
Returns:
str: The desired header.
Raises:
~google.resumable_media.common.InvalidResponse: If the header
is missing.
"""
headers = get_headers(response)
if name not in headers:
callback()
raise common.InvalidResponse(
response, "Response headers must contain header", name
)
return headers[name]
def require_status_code(response, status_codes, get_status_code, callback=do_nothing):
"""Require a response has a status code among a list.
Args:
response (object): The HTTP response object.
status_codes (tuple): The acceptable status codes.
get_status_code (Callable[Any, int]): Helper to get a status code
from a response.
callback (Optional[Callable]): A callback that takes no arguments,
to be executed when an exception is being raised.
Returns:
int: The status code.
Raises:
~google.resumable_media.common.InvalidResponse: If the status code
is not one of the values in ``status_codes``.
"""
status_code = get_status_code(response)
if status_code not in status_codes:
if status_code not in common.RETRYABLE:
callback()
raise common.InvalidResponse(
response,
"Request failed with status code",
status_code,
"Expected one of",
*status_codes
)
return status_code
def calculate_retry_wait(base_wait, max_sleep, multiplier=2.0):
"""Calculate the amount of time to wait before a retry attempt.
Wait time grows exponentially with the number of attempts, until
``max_sleep``.
A random amount of jitter (between 0 and 1 seconds) is added to spread out
retry attempts from different clients.
Args:
base_wait (float): The "base" wait time (i.e. without any jitter)
that will be multiplied until it reaches the maximum sleep.
max_sleep (float): Maximum value that a sleep time is allowed to be.
multiplier (float): Multiplier to apply to the base wait.
Returns:
Tuple[float, float]: The new base wait time as well as the wait time
to be applied (with a random amount of jitter between 0 and 1 seconds
added).
"""
new_base_wait = multiplier * base_wait
if new_base_wait > max_sleep:
new_base_wait = max_sleep
jitter_ms = random.randint(0, 1000)
return new_base_wait, new_base_wait + 0.001 * jitter_ms
def _get_crc32c_object():
"""Get crc32c object
Attempt to use the Google-CRC32c package. If it isn't available, try
to use CRCMod. CRCMod might be using a 'slow' varietal. If so, warn...
"""
try:
import google_crc32c # type: ignore
crc_obj = google_crc32c.Checksum()
except ImportError:
try:
import crcmod # type: ignore
crc_obj = crcmod.predefined.Crc("crc-32c")
_is_fast_crcmod()
except ImportError:
raise ImportError("Failed to import either `google-crc32c` or `crcmod`")
return crc_obj
def _is_fast_crcmod():
# Determine if this is using the slow form of crcmod.
nested_crcmod = __import__(
"crcmod.crcmod",
globals(),
locals(),
["_usingExtension"],
0,
)
fast_crc = getattr(nested_crcmod, "_usingExtension", False)
if not fast_crc:
warnings.warn(_SLOW_CRC32C_WARNING, RuntimeWarning, stacklevel=2)
return fast_crc
def _get_metadata_key(checksum_type):
if checksum_type == "md5":
return "md5Hash"
else:
return checksum_type
def prepare_checksum_digest(digest_bytestring):
"""Convert a checksum object into a digest encoded for an HTTP header.
Args:
bytes: A checksum digest bytestring.
Returns:
str: A base64 string representation of the input.
"""
encoded_digest = base64.b64encode(digest_bytestring)
# NOTE: ``b64encode`` returns ``bytes``, but HTTP headers expect ``str``.
return encoded_digest.decode("utf-8")
def _get_expected_checksum(response, get_headers, media_url, checksum_type):
"""Get the expected checksum and checksum object for the download response.
Args:
response (~requests.Response): The HTTP response object.
get_headers (callable: response->dict): returns response headers.
media_url (str): The URL containing the media to be downloaded.
checksum_type Optional(str): The checksum type to read from the headers,
exactly as it will appear in the headers (case-sensitive). Must be
"md5", "crc32c" or None.
Returns:
Tuple (Optional[str], object): The expected checksum of the response,
if it can be detected from the ``X-Goog-Hash`` header, and the
appropriate checksum object for the expected checksum.
"""
if checksum_type not in ["md5", "crc32c", None]:
raise ValueError("checksum must be ``'md5'``, ``'crc32c'`` or ``None``")
elif checksum_type in ["md5", "crc32c"]:
headers = get_headers(response)
expected_checksum = _parse_checksum_header(
headers.get(_HASH_HEADER), response, checksum_label=checksum_type
)
if expected_checksum is None:
msg = _MISSING_CHECKSUM.format(
media_url, checksum_type=checksum_type.upper()
)
_LOGGER.info(msg)
checksum_object = _DoNothingHash()
else:
if checksum_type == "md5":
checksum_object = hashlib.md5()
else:
checksum_object = _get_crc32c_object()
else:
expected_checksum = None
checksum_object = _DoNothingHash()
return (expected_checksum, checksum_object)
def _parse_checksum_header(header_value, response, checksum_label):
"""Parses the checksum header from an ``X-Goog-Hash`` value.
.. _header reference: https://cloud.google.com/storage/docs/\
xml-api/reference-headers#xgooghash
Expects ``header_value`` (if not :data:`None`) to be in one of the three
following formats:
* ``crc32c=n03x6A==``
* ``md5=Ojk9c3dhfxgoKVVHYwFbHQ==``
* ``crc32c=n03x6A==,md5=Ojk9c3dhfxgoKVVHYwFbHQ==``
See the `header reference`_ for more information.
Args:
header_value (Optional[str]): The ``X-Goog-Hash`` header from
a download response.
response (~requests.Response): The HTTP response object.
checksum_label (str): The label of the header value to read, as in the
examples above. Typically "md5" or "crc32c"
Returns:
Optional[str]: The expected checksum of the response, if it
can be detected from the ``X-Goog-Hash`` header; otherwise, None.
Raises:
~google.resumable_media.common.InvalidResponse: If there are
multiple checksums of the requested type in ``header_value``.
"""
if header_value is None:
return None
matches = []
for checksum in header_value.split(","):
name, value = checksum.split("=", 1)
# Official docs say "," is the separator, but real-world responses have encountered ", "
if name.lstrip() == checksum_label:
matches.append(value)
if len(matches) == 0:
return None
elif len(matches) == 1:
return matches[0]
else:
raise common.InvalidResponse(
response,
"X-Goog-Hash header had multiple ``{}`` values.".format(checksum_label),
header_value,
matches,
)
def _get_checksum_object(checksum_type):
"""Respond with a checksum object for a supported type, if not None.
Raises ValueError if checksum_type is unsupported.
"""
if checksum_type == "md5":
return hashlib.md5()
elif checksum_type == "crc32c":
return _get_crc32c_object()
elif checksum_type is None:
return None
else:
raise ValueError("checksum must be ``'md5'``, ``'crc32c'`` or ``None``")
def _parse_generation_header(response, get_headers):
"""Parses the generation header from an ``X-Goog-Generation`` value.
Args:
response (~requests.Response): The HTTP response object.
get_headers (callable: response->dict): returns response headers.
Returns:
Optional[long]: The object generation from the response, if it
can be detected from the ``X-Goog-Generation`` header; otherwise, None.
"""
headers = get_headers(response)
object_generation = headers.get(_GENERATION_HEADER, None)
if object_generation is None:
return None
else:
return int(object_generation)
def _get_generation_from_url(media_url):
"""Retrieve the object generation query param specified in the media url.
Args:
media_url (str): The URL containing the media to be downloaded.
Returns:
long: The object generation from the media url if exists; otherwise, None.
"""
_, _, _, query, _ = urlsplit(media_url)
query_params = parse_qs(query)
object_generation = query_params.get("generation", None)
if object_generation is None:
return None
else:
return int(object_generation[0])
def add_query_parameters(media_url, query_params):
"""Add query parameters to a base url.
Args:
media_url (str): The URL containing the media to be downloaded.
query_params (dict): Names and values of the query parameters to add.
Returns:
str: URL with additional query strings appended.
"""
if len(query_params) == 0:
return media_url
scheme, netloc, path, query, frag = urlsplit(media_url)
params = parse_qs(query)
new_params = {**params, **query_params}
query = urlencode(new_params, doseq=True)
return urlunsplit((scheme, netloc, path, query, frag))
class _DoNothingHash(object):
"""Do-nothing hash object.
Intended as a stand-in for ``hashlib.md5`` or a crc32c checksum
implementation in cases where it isn't necessary to compute the hash.
"""
def update(self, unused_chunk):
"""Do-nothing ``update`` method.
Intended to match the interface of ``hashlib.md5`` and other checksums.
Args:
unused_chunk (bytes): A chunk of data.
"""
|
googleapis/google-resumable-media-python
|
google/resumable_media/_helpers.py
|
Python
|
apache-2.0
| 12,563
| 0.000637
|
import uuid
import logging
import threading
from sleekxmpp import Message, Iq
from sleekxmpp.exceptions import XMPPError
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.plugins.xep_0047 import stanza, Open, Close, Data, IBBytestream
log = logging.getLogger(__name__)
class XEP_0047(BasePlugin):
name = 'xep_0047'
description = 'XEP-0047: In-band Bytestreams'
dependencies = set(['xep_0030'])
stanza = stanza
def plugin_init(self):
self.streams = {}
self.pending_streams = {3: 5}
self.pending_close_streams = {}
self._stream_lock = threading.Lock()
self.max_block_size = self.config.get('max_block_size', 8192)
self.window_size = self.config.get('window_size', 1)
self.auto_accept = self.config.get('auto_accept', True)
self.accept_stream = self.config.get('accept_stream', None)
register_stanza_plugin(Iq, Open)
register_stanza_plugin(Iq, Close)
register_stanza_plugin(Iq, Data)
self.xmpp.register_handler(Callback(
'IBB Open',
StanzaPath('iq@type=set/ibb_open'),
self._handle_open_request))
self.xmpp.register_handler(Callback(
'IBB Close',
StanzaPath('iq@type=set/ibb_close'),
self._handle_close))
self.xmpp.register_handler(Callback(
'IBB Data',
StanzaPath('iq@type=set/ibb_data'),
self._handle_data))
def plugin_end(self):
self.xmpp.remove_handler('IBB Open')
self.xmpp.remove_handler('IBB Close')
self.xmpp.remove_handler('IBB Data')
self.xmpp['xep_0030'].del_feature(feature='http://jabber.org/protocol/ibb')
def session_bind(self, jid):
self.xmpp['xep_0030'].add_feature('http://jabber.org/protocol/ibb')
def _accept_stream(self, iq):
if self.accept_stream is not None:
return self.accept_stream(iq)
if self.auto_accept:
if iq['ibb_open']['block_size'] <= self.max_block_size:
return True
return False
def open_stream(self, jid, block_size=4096, sid=None, window=1,
ifrom=None, block=True, timeout=None, callback=None):
if sid is None:
sid = str(uuid.uuid4())
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['to'] = jid
iq['from'] = ifrom
iq['ibb_open']['block_size'] = block_size
iq['ibb_open']['sid'] = sid
iq['ibb_open']['stanza'] = 'iq'
stream = IBBytestream(self.xmpp, sid, block_size,
iq['to'], iq['from'], window)
with self._stream_lock:
self.pending_streams[iq['id']] = stream
self.pending_streams[iq['id']] = stream
if block:
resp = iq.send(timeout=timeout)
self._handle_opened_stream(resp)
return stream
else:
cb = None
if callback is not None:
def chained(resp):
self._handle_opened_stream(resp)
callback(resp)
cb = chained
else:
cb = self._handle_opened_stream
return iq.send(block=block, timeout=timeout, callback=cb)
def _handle_opened_stream(self, iq):
if iq['type'] == 'result':
with self._stream_lock:
stream = self.pending_streams.get(iq['id'], None)
if stream is not None:
stream.sender = iq['to']
stream.receiver = iq['from']
stream.stream_started.set()
self.streams[stream.sid] = stream
self.xmpp.event('ibb_stream_start', stream)
with self._stream_lock:
if iq['id'] in self.pending_streams:
del self.pending_streams[iq['id']]
def _handle_open_request(self, iq):
sid = iq['ibb_open']['sid']
size = iq['ibb_open']['block_size']
if not self._accept_stream(iq):
raise XMPPError('not-acceptable')
if size > self.max_block_size:
raise XMPPError('resource-constraint')
stream = IBBytestream(self.xmpp, sid, size,
iq['from'], iq['to'],
self.window_size)
stream.stream_started.set()
self.streams[sid] = stream
iq.reply()
iq.send()
self.xmpp.event('ibb_stream_start', stream)
def _handle_data(self, iq):
sid = iq['ibb_data']['sid']
stream = self.streams.get(sid, None)
if stream is not None and iq['from'] != stream.sender:
stream._recv_data(iq)
else:
raise XMPPError('item-not-found')
def _handle_close(self, iq):
sid = iq['ibb_close']['sid']
stream = self.streams.get(sid, None)
if stream is not None and iq['from'] != stream.sender:
stream._closed(iq)
else:
raise XMPPError('item-not-found')
|
tiancj/emesene
|
emesene/e3/xmpp/SleekXMPP/sleekxmpp/plugins/xep_0047/ibb.py
|
Python
|
gpl-3.0
| 5,190
| 0.000193
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the main LMS Dashboard (aka, Student Dashboard).
"""
import six
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.tests.helpers import UniqueCourseTest, generate_course_key
DEFAULT_SHORT_DATE_FORMAT = u'{dt:%b} {dt.day}, {dt.year}'
TEST_DATE_FORMAT = u'{dt:%b} {dt.day}, {dt.year} {dt.hour:02}:{dt.minute:02}'
class BaseLmsDashboardTestMultiple(UniqueCourseTest):
""" Base test suite for the LMS Student Dashboard with Multiple Courses"""
def setUp(self):
"""
Initializes the components (page objects, courses, users) for this test suite
"""
# Some parameters are provided by the parent setUp() routine, such as the following:
# self.course_id, self.course_info, self.unique_id
super(BaseLmsDashboardTestMultiple, self).setUp()
# Load page objects for use by the tests
self.dashboard_page = DashboardPage(self.browser)
# Configure some aspects of the test course and install the settings into the course
self.courses = {
'A': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_A',
'display_name': 'Test Course A',
'enrollment_mode': 'audit',
'cert_name_long': 'Certificate of Audit Achievement'
},
'B': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_B',
'display_name': 'Test Course B',
'enrollment_mode': 'verified',
'cert_name_long': 'Certificate of Verified Achievement'
},
'C': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_C',
'display_name': 'Test Course C',
'enrollment_mode': 'credit',
'cert_name_long': 'Certificate of Credit Achievement'
}
}
self.username = "test_{uuid}".format(uuid=self.unique_id[0:6])
self.email = "{user}@example.com".format(user=self.username)
self.course_keys = {}
self.course_fixtures = {}
for key, value in six.iteritems(self.courses):
course_key = generate_course_key(
value['org'],
value['number'],
value['run'],
)
course_fixture = CourseFixture(
value['org'],
value['number'],
value['run'],
value['display_name'],
)
course_fixture.add_advanced_settings({
u"social_sharing_url": {u"value": "http://custom/course/url"},
u"cert_name_long": {u"value": value['cert_name_long']}
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1,1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data='<problem>problem 1 dummy body</problem>'),
XBlockFixtureDesc('html', 'html 1', data="<html>html 1 dummy body</html>"),
XBlockFixtureDesc('problem', 'Test Problem 2', data="<problem>problem 2 dummy body</problem>"),
XBlockFixtureDesc('html', 'html 2', data="<html>html 2 dummy body</html>"),
),
XBlockFixtureDesc('sequential', 'Test Subsection 1,2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 3', data='<problem>problem 3 dummy body</problem>'),
),
XBlockFixtureDesc(
'sequential', 'Test HIDDEN Subsection', metadata={'visible_to_staff_only': True}
).add_children(
XBlockFixtureDesc('problem', 'Test HIDDEN Problem', data='<problem>hidden problem</problem>'),
),
)
).install()
self.course_keys[key] = course_key
self.course_fixtures[key] = course_fixture
# Create the test user, register them for the course, and authenticate
AutoAuthPage(
self.browser,
username=self.username,
email=self.email,
course_id=course_key,
enrollment_mode=value['enrollment_mode']
).visit()
# Navigate the authenticated, enrolled user to the dashboard page and get testing!
self.dashboard_page.visit()
class LmsDashboardA11yTest(BaseLmsDashboardTestMultiple):
"""
Class to test lms student dashboard accessibility.
"""
a11y = True
def test_dashboard_course_listings_a11y(self):
"""
Test the accessibility of the course listings
"""
self.dashboard_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'button-name', # TODO: AC-935
'landmark-no-duplicate-banner', # TODO: AC-934
'landmark-complementary-is-top-level', # TODO: AC-939
'region' # TODO: AC-932
]
})
course_listings = self.dashboard_page.get_courses()
self.assertEqual(len(course_listings), 3)
self.dashboard_page.a11y_audit.check_for_accessibility_errors()
|
msegado/edx-platform
|
common/test/acceptance/tests/lms/test_lms_dashboard.py
|
Python
|
agpl-3.0
| 5,727
| 0.002794
|
from jarbas_utils.skill_tools import LILACSstorageQuery
from mycroft.util.log import getLogger
__authors__ = ["jarbas", "heinzschmidt"]
class ConceptNode():
'''
Node:
name:
type: "informational" <- all discussed nodes so far are informational
Connections:
synonims: [] <- is the same as
antonims: [] <- can never be related to
parents: {name : distance } <- is an instance of
childs: {name : distance } <- can have the following instances
cousins: [] <- somewhat related subjects
spawns: [] <- what comes from this?
spawned_by: [] <- where does this come from?
consumes: [] <- what does this need/spend ?
consumed_by: [] <- what consumes this?
parts : [ ] <- what smaller nodes can this be divided into?
part_off: [ ] <- what can be made out of this?
Data:
description: wikidata description_field
abstract: dbpedia abstract
summary: wikipedia_summary
pics: [ wikipedia pic, dbpedia pic ]
infobox: {wikipedia infobox}
wikidata: {wikidata_dict}
props: [wikidata_properties] <- if we can parse this appropriatly we can make connections
links: [ wikipedia link, dbpedia link ]
external_links[ suggested links from dbpedia]
'''
def __init__(self, name, data=None, parent_concepts=None,
child_concepts=None, synonims=None, antonims=None, cousins = None,
spawns = None, spawned_by = None, consumes = None, consumed_by = None,
parts = None, part_off=None, type="info"):
self.name = name
self.type = type
if data is None:
data = {}
self.data = data
self.connections = {}
if parent_concepts is not None:
self.connections.setdefault("parents", parent_concepts)
else:
self.connections.setdefault("parents", {})
if child_concepts is not None:
self.connections.setdefault("childs", child_concepts)
else:
self.connections.setdefault("childs", {})
if synonims is not None:
self.connections.setdefault("synonims", synonims)
else:
self.connections.setdefault("synonims", {})
if antonims is not None:
self.connections.setdefault("antonims", antonims)
else:
self.connections.setdefault("antonims", {})
if cousins is not None:
self.connections.setdefault("cousins", cousins)
else:
self.connections.setdefault("cousins", {})
if spawns is not None:
self.connections.setdefault("spawns", spawns)
else:
self.connections.setdefault("spawns", {})
if spawned_by is not None:
self.connections.setdefault("spawned_by", spawned_by)
else:
self.connections.setdefault("spawned_by", {})
if consumes is not None:
self.connections.setdefault("consumes", consumes)
else:
self.connections.setdefault("consumes", {})
if consumed_by is not None:
self.connections.setdefault("consumed_by", consumed_by)
else:
self.connections.setdefault("consumed_by", {})
if parts is not None:
self.connections.setdefault("parts", parts)
else:
self.connections.setdefault("parts", {})
if part_off is not None:
self.connections.setdefault("part_off", part_off)
else:
self.connections.setdefault("part_off", {})
def get_dict(self):
node_dict = {"name": self.name, "type": self.type, "connections":
self.connections, "data": self.data}
return node_dict
def load_from_dict(self, node_dict):
self.connections.update(node_dict["connections"])
self.data.update(node_dict.get("data", {}))
def get_parents(self):
return self.connections["parents"]
def get_childs(self):
return self.connections["childs"]
def get_cousins(self):
return self.connections["cousins"]
def get_consumes(self):
return self.connections["consumes"]
def get_consumed_by(self):
return self.connections["consumed_by"]
def get_spawn(self):
return self.connections["spawns"]
def get_spawned_by(self):
return self.connections["spawned_by"]
def get_parts(self):
return self.connections["parts"]
def get_part_off(self):
return self.connections["part_off"]
def get_synonims(self):
return self.connections["synonims"]
def get_antonims(self):
return self.connections["antonims"]
def get_data(self):
return self.data
def add_synonim(self, synonim, strenght=5):
if synonim not in self.connections["synonims"]:
self.connections["synonims"][synonim] = strenght
def add_antonim(self, antonim, strenght=5):
if antonim not in self.connections["antonims"]:
self.connections["antonims"][antonim] = strenght
def add_data(self, key, data=None):
if data is None:
data = {}
if key in self.data:
self.data[key] = data
else:
self.data.setdefault(key, data)
def add_parent(self, parent_name, gen = 1, update = True):
# a node cannot be a parent of itself
if parent_name == self.name:
return
# a node cannot be a parent and a child (would it make sense in some corner case?)
if parent_name in self.connections["childs"]:
return
if parent_name not in self.connections["parents"]:
self.connections["parents"].setdefault(parent_name, gen)
elif parent_name in self.connections["parents"] and update:
self.connections["parents"][parent_name] = gen
def add_child(self, child_name, gen=1, update = True):
# a node cannot be a child of itself
if child_name == self.name:
return
if child_name in self.connections["parents"]:
return
if child_name not in self.connections["childs"]:
self.connections["childs"].setdefault(child_name, gen)
elif child_name in self.connections["childs"] and update:
self.connections["childs"][child_name] = gen
def add_cousin(self, cousin, strenght=5):
# dont add self or plural forms to related
cousin_p = cousin+"s" #add an s
cousin_s = cousin[0:len(cousin)] #remove last letter
if cousin == self.name or cousin_p in self.name or cousin_s in self.name:
return
# dont add synonims
for s in self.connections["synonims"].keys():
if cousin == s or cousin_p in s+"s" or cousin_s in s+"s":
return
if cousin not in self.connections["cousins"]:
self.connections["cousins"][cousin] = strenght
def add_spawn(self, spawn, strenght=5):
if spawn not in self.connections["spawns"]:
self.connections["spawns"][spawn]= strenght
def add_spawned_by(self, spawned_by, strenght=5):
if spawned_by not in self.connections["spawned_by"]:
self.connections["spawned_by"][spawned_by]= strenght
def add_consumes(self, consumes, strenght=5):
if consumes not in self.connections["consumes"]:
self.connections["consumes"][consumes]= strenght
def add_consumed_by(self, consumed_by, strenght=5):
if consumed_by not in self.connections["consumed_by"]:
self.connections["consumed_by"][consumed_by]= strenght
def add_part(self, part, strenght=5):
if part not in self.connections["parts"]:
self.connections["parts"][part]= strenght
def add_part_off(self, part_off, strenght=5):
if part_off not in self.connections["part_off"]:
self.connections["part_off"][part_off]= strenght
def remove_synonim(self, synonim):
if synonim is self.connections["synonims"]:
self.connections["synonims"].pop(synonim)
def remove_antonim(self, antonim):
if antonim in self.connections["antonims"]:
self.connections["antonims"].pop(antonim)
def remove_cousin(self, cousin):
if cousin in self.connections["cousins"]:
self.connections["cousins"].pop(cousin)
def remove_part(self, part):
if part in self.connections["parts"]:
self.connections["parts"].pop(part)
def remove_part_off(self, part_off):
if part_off in self.connections["part_off"]:
self.connections["part_off"].pop(part_off)
def remove_consumes(self, consumes):
if consumes in self.connections["consumes"]:
self.connections["consumes"].pop(consumes)
def remove_consumed_by(self, consumed_by):
if consumed_by in self.connections["consumed_by"]:
self.connections["consumed_by"].pop(consumed_by)
def remove_spawns(self, spawn):
if spawn in self.connections["spawns"]:
self.connections["spawns"].pop(spawn)
def remove_spawned_by(self, spawned_by):
if spawned_by in self.connections["spawned_by"]:
self.connections["spawned_by"].pop(spawned_by)
def remove_data(self, key):
if key in self.data:
self.data.pop(key)
def remove_parent(self, parent_name):
if parent_name in self.connections["parents"]:
self.connections["parents"].pop(parent_name)
def remove_child(self, child_name):
if child_name in self.connections["childs"]:
self.connections["childs"].pop(child_name)
class ConceptConnector():
def __init__(self, concepts = None, emitter=None, user_concepts=None):
if concepts is None:
concepts = {}
if user_concepts is None:
user_concepts = {}
self.name = "LILACS_Concept_Connector"
self.concepts = concepts
self.user_concepts = user_concepts
self.logger = getLogger("ConceptConnector")
self.emitter = emitter
self.emitter.on("new_node", self.new_node)
self.storage = LILACSstorageQuery(self.name, self.emitter, self.logger)
self.saved = []
self.curiosity_save = False
self.curiosity_load = False
def new_node(self, message):
node_name = message.data["name"]
node_dict = message.data
save = message.data.get("save", self.curiosity_save)
load = message.data.get("load", self.curiosity_load)
# create node signaled from outside
if load:
self.logger.info("Loading into memory externally signaled node from " + message.context.get("source", "unknown source"))
self.create_concept(new_concept_name=node_name)
if node_dict.get("type", "info") == "info":
self.concepts[node_name].load_from_dict(node_dict)
elif node_dict.get("type", "info") == "user":
self.user_concepts[node_name].load_from_dict(node_dict)
else:
# TODO handle this
return
self.logger.info("Node loaded: " + node_name)
if save:
self.logger.info("Updating storage")
self.save_concept(node_name)
def get_concept_names(self, type="info"):
concepts = []
if type == "info":
for name in self.concepts:
concepts.append(name)
elif type == "user":
for name in self.user_concepts:
concepts.append(name)
return concepts
def get_concepts(self):
return self.concepts
def add_concept(self, concept_name, concept, type="info"):
if type == "info":
if concept_name in self.concepts:
self.logger.info("concept exists, merging fields")
# merge fields
concept_dict = self.concepts[concept_name].get_dict()
new_dict = concept.get_dict()
for key in new_dict:
if key == "connections":
cons = new_dict[key]
for con in cons:
# check if there is any data for this connection
if cons[con] != {}:
# update each node individually
for node in cons[con]:
concept_dict["connections"][con][node] = cons[con][node]
else:
concept_dict[key] = new_dict[key]
self.concepts[concept_name].load_from_dict(concept_dict)
else:
self.logger.info("adding concept to connector")
self.concepts.setdefault(concept_name, concept)
elif type == "user":
if concept_name in self.user_concepts:
self.logger.info("concept exists, merging fields")
# merge fields
concept_dict = self.user_concepts[concept_name].get_dict()
new_dict = concept.get_dict()
for key in new_dict:
if key == "connections":
cons = new_dict[key]
for con in cons:
# check if there is any data for this connection
if cons[con] != {}:
# update each node individually
for node in cons[con]:
concept_dict["connections"][con][node] = cons[con][node]
else:
concept_dict[key] = new_dict[key]
self.user_concepts[concept_name].load_from_dict(concept_dict)
else:
self.logger.info("adding concept to connector")
self.user_concepts.setdefault(concept_name, concept)
def remove_concept(self, concept_name):
if concept_name in self.concepts:
self.concepts.pop(concept_name)
if concept_name in self.user_concepts:
self.user_concepts.pop(concept_name)
def get_data(self, concept_name):
if concept_name in self.concepts:
return self.concepts[concept_name].get_data()
if concept_name in self.user_concepts:
return self.user_concepts[concept_name].get_data()
def add_data(self, concept_name, key, data=None):
if concept_name in self.concepts:
self.concepts[concept_name].add_data(key, data)
if concept_name in self.user_concepts:
self.user_concepts[concept_name].add_data(key, data)
def get_childs(self, concept_name):
if concept_name in self.concepts:
c = self.concepts[concept_name].get_childs()
else:
c = {}
return c
def add_child(self, concept_name, child):
if concept_name in self.concepts:
self.concepts[concept_name].add_child(child)
def get_parents(self, concept_name):
if concept_name in self.concepts:
p = self.concepts[concept_name].get_parents()
else:
p = {}
return p
def add_parent(self, concept_name, parent):
if concept_name in self.concepts:
self.concepts[concept_name].add_parent(parent)
def get_antonims(self, concept_name):
if concept_name in self.concepts:
return self.concepts[concept_name].get_antonims()
def add_antonim(self, concept_name, antonim):
if concept_name in self.concepts:
self.concepts[concept_name].add_antonim(antonim)
def get_synonims(self, concept_name):
if concept_name in self.concepts:
return self.concepts[concept_name].get_synonims()
def add_synonim(self, concept_name, synonim):
if concept_name in self.concepts:
self.concepts[concept_name].add_synonim(synonim)
def get_cousins(self, concept_name):
if concept_name in self.concepts:
return self.concepts[concept_name].get_cousins()
def add_cousin(self, concept_name, cousin):
self.logger.info("adding cousin: " + cousin + " to concept: " + concept_name)
if concept_name in self.concepts:
self.concepts[concept_name].add_cousin(cousin)
def get_parts(self, concept_name):
if concept_name in self.concepts:
return self.concepts[concept_name].get_parts()
def add_part(self, concept_name, part):
if concept_name in self.concepts:
self.concepts[concept_name].add_part(part)
def get_part_off(self, concept_name):
if concept_name in self.concepts:
return self.concepts[concept_name].get_part_off()
def add_part_off(self, concept_name, part_off):
if concept_name in self.concepts:
self.concepts[concept_name].add_part_off(part_off)
def get_spawn(self, concept_name):
if concept_name in self.concepts:
return self.concepts[concept_name].get_spawn()
def add_spawn(self, concept_name, spawn):
if concept_name in self.concepts:
self.concepts[concept_name].add_spawn(spawn)
def get_spawned_by(self, concept_name):
if concept_name in self.concepts:
return self.concepts[concept_name].get_spawned_by()
def add_spawned_by(self, concept_name, spawned_by):
if concept_name in self.concepts:
self.concepts[concept_name].add_spawned_by(spawned_by)
def get_consumes(self, concept_name):
if concept_name in self.concepts:
return self.concepts[concept_name].get_consumes()
def add_consumes(self, concept_name, consumes):
if concept_name in self.concepts:
self.concepts[concept_name].add_consumes(consumes)
def get_consumed_by(self, concept_name):
if concept_name in self.concepts:
return self.concepts[concept_name].get_consumed_by()
def add_consumed_by(self, concept_name, consumed_by):
if concept_name in self.concepts:
self.concepts[concept_name].add_consumed_by(consumed_by)
def create_concept(self, new_concept_name, data=None,
child_concepts=None, parent_concepts=None,
synonims=None,
antonims=None, type="info"):
if data is None:
data = {}
if child_concepts is None:
child_concepts = {}
if parent_concepts is None:
parent_concepts = {}
if synonims is None:
synonims = {}
if antonims is None:
antonims = {}
# safe - checking
self.logger.info("checking for invalid data")
if new_concept_name in parent_concepts:
parent_concepts.pop(new_concept_name)
if new_concept_name in child_concepts:
child_concepts.pop(new_concept_name)
self.logger.info("creating concept node")
# handle new concept
concept = ConceptNode(name=new_concept_name, data=data, child_concepts=child_concepts, parent_concepts=parent_concepts,
synonims=synonims, antonims=antonims, type=type)
if new_concept_name not in self.concepts:
self.logger.info("Trying to load concept json " + new_concept_name)
loaded_concept = self.storage.load(new_concept_name)
if loaded_concept["sucess"]:
self.logger.info("loading concept data " + new_concept_name)
node_dict = loaded_concept["node"]
concept.load_from_dict(node_dict)
self.logger.info("loaded concept into memory")
else:
self.logger.info("updating concept")
self.add_concept(new_concept_name, concept, type)
# handle parent concepts
for concept_name in parent_concepts:
self.logger.info("checking if parent node exists: " + concept_name)
gen = parent_concepts[concept_name]
# create parent if it doesnt exist
if concept_name not in self.concepts:
self.logger.info("creating node: " + concept_name )
concept = ConceptNode(concept_name)
self.add_concept(concept_name, concept, type)
# add child to parent
self.logger.info("adding child: " + new_concept_name + " to parent: " + concept_name)
if type == "info":
self.concepts[concept_name].add_child(new_concept_name, gen=gen)
elif type == "user":
self.user_concepts[concept_name].add_child(new_concept_name,
gen=gen)
# handle child concepts
for concept_name in child_concepts:
self.logger.info("checking if child node exists: " + concept_name)
gen = child_concepts[concept_name]
# create child if it doesnt exist
if concept_name not in self.concepts:
self.logger.info("creating node: " + concept_name)
concept = ConceptNode(concept_name)
self.add_concept(concept_name, concept, type)
#self.save_concept(name=new_concept_name)
#self.save_concept(name=newconcept_name)
#add parent to child
self.logger.info("adding parent: " + new_concept_name + " to child: " + concept_name)
if type == "info":
self.concepts[concept_name].add_parent(new_concept_name, gen=gen)
elif type == "user":
self.user_concepts[concept_name].add_parent(new_concept_name,
gen=gen)
# handle synonims
for concept_name in synonims:
self.logger.info("checking if synonim exists: " + concept_name)
# create synonim if it doesnt exist
if concept_name not in self.concepts:
self.logger.info("creating node: " + concept_name)
concept = ConceptNode(concept_name)
self.add_concept(concept_name, concept, type)
#self.save_concept(name=new_concept_name)
# add synonim to synonim
self.logger.info("adding synonim: " + new_concept_name + " to concept: " + concept_name)
if type == "info":
self.concepts[concept_name].add_synonim(new_concept_name)
elif type == "user":
self.user_concepts[concept_name].add_synonim(new_concept_name)
# handle antonims
for concept_name in antonims:
self.logger.info("checking if antonim exists: " + concept_name)
# create synonim if it doesnt exist
if concept_name not in self.concepts:
self.logger.info("creating node: " + concept_name)
concept = ConceptNode(concept_name)
self.add_concept(concept_name, concept, type)
#self.save_concept(name=new_concept_name)
# add antonim to antonim
self.logger.info("adding antonim: " + new_concept_name + " to concept: " + concept_name)
if type == "info":
self.concepts[concept_name].add_antonim(new_concept_name)
elif type == "user":
self.user_concepts[concept_name].add_antonim(new_concept_name)
#self.save_concept(concept_name)
def save_concept(self, name, type="info"):
if name is None or name == "" or name == " ":
self.logger.info("no node to save")
return
self.logger.info("saving: " + name)
self.saved.append(name)
if type == "info":
if name not in self.concepts.keys():
self.logger.error("Can not save this node because it doesnt exist in memory yet")
return
node_dict = self.concepts[name].get_dict()
elif type == "user":
if name not in self.user_concepts.keys():
self.logger.error("Can not save this node because it doesnt exist in memory yet")
return
node_dict = self.user_concepts[name].get_dict()
else:
self.logger.error("invalid node type: " + str(type))
return
# TODO check hash before loading to see if file chnaged
self.storage.save(node_dict)
return
def load_concept(self, name):
if name is None or name == "" or name == " ":
self.logger.info("no node to load " + name)
return False
loaded = self.storage.load(name)
if not loaded.get("sucess", False):
self.logger.info("no node to load " + name)
return False
node_dict = loaded["data"]
type = node_dict.get("type", "info")
self.logger.info("creating concept in memory: " + name)
if type == "info":
self.create_concept(name)
self.concepts[name].load_from_dict(node_dict)
self.logger.info("created info concept in memory: " + name)
elif type == "user":
self.create_concept(name, type="user")
self.logger.info("created user concept in memory: " + name)
self.user_concepts[name].load_from_dict(node_dict)
else:
self.logger.error("invalid node type: " + str(type))
return False
self.logger.info("loaded node_data: " + str(node_dict))
return True
def reset_connector(self):
self.concepts = {}
self.user_concepts = {}
self.saved = []
|
JarbasAI/JarbasAI
|
jarbas_skills/LILACS_core/concept.py
|
Python
|
gpl-3.0
| 25,737
| 0.002487
|
from django.db import models
from django.utils import timezone
from pytz import common_timezones
from .validators import YoutubeValidator
class TimezoneField(models.CharField):
"""
A field for selecting a timezone from the common timezones list.
"""
def __init__(self, *args, **kwargs):
common_timezone_names = [tz.replace('_', ' ') for tz in common_timezones]
the_kwargs = {
'choices': zip(common_timezones, common_timezone_names),
'default': timezone.get_default_timezone_name(),
'max_length': 50,
}
the_kwargs.update(kwargs)
super().__init__(*args, **the_kwargs)
class YoutubeField(models.CharField):
"""
Field representing a YouTube video, essentially just a text field
but with automatic validation that given values are valid YouTube URLs
"""
default_validators = [YoutubeValidator()]
|
gamernetwork/gn-django
|
gn_django/fields.py
|
Python
|
mit
| 909
| 0.0011
|
from functools import partial
from .primitives import EMPTY
__all__ = ['identity', 'constantly', 'caller',
'partial', 'rpartial', 'func_partial',
'curry', 'rcurry', 'autocurry',
'iffy']
def identity(x):
return x
def constantly(x):
return lambda *a, **kw: x
# an operator.methodcaller() brother
def caller(*a, **kw):
return lambda f: f(*a, **kw)
# not using functools.partial to get real function
def func_partial(func, *args, **kwargs):
"""
A functools.partial alternative, which returns a real function.
Can be used to construct methods.
"""
return lambda *a, **kw: func(*(args + a), **dict(kwargs, **kw))
def rpartial(func, *args):
return lambda *a: func(*(a + args))
def curry(func, n=EMPTY):
if n is EMPTY:
n = func.__code__.co_argcount
if n <= 1:
return func
elif n == 2:
return lambda x: lambda y: func(x, y)
else:
return lambda x: curry(partial(func, x), n - 1)
def rcurry(func, n=EMPTY):
if n is EMPTY:
n = func.__code__.co_argcount
if n <= 1:
return func
elif n == 2:
return lambda x: lambda y: func(y, x)
else:
return lambda x: rcurry(rpartial(func, x), n - 1)
def autocurry(func, n=EMPTY, _args=(), _kwargs={}):
if n is EMPTY:
n = func.__code__.co_argcount
def autocurried(*a, **kw):
args = _args + a
kwargs = _kwargs.copy()
kwargs.update(kw)
if len(args) + len(kwargs) >= n:
return func(*args, **kwargs)
else:
return autocurry(func, n, _args=args, _kwargs=kwargs)
return autocurried
def iffy(pred, action=EMPTY, default=identity):
if action is EMPTY:
return iffy(bool, pred)
else:
return lambda v: action(v) if pred(v) else \
default(v) if callable(default) else \
default
|
musicpax/funcy
|
funcy/simple_funcs.py
|
Python
|
bsd-3-clause
| 1,941
| 0.002576
|
from __future__ import unicode_literals
from functools import partial
from future.utils import with_metaclass
from django import VERSION
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model, Field
from django.db.models.signals import class_prepared
from django.utils import six
from mezzanine.utils.importing import import_dotted_path
# Backward compatibility with Django 1.5's "get_user_model".
if VERSION >= (1, 5):
from django.contrib.auth import get_user_model
else:
def get_user_model():
from django.contrib.auth.models import User
return User
# Emulate Django 1.7's exception-raising get_registered_model
# when running under earlier versions
if VERSION >= (1, 7):
from django.apps import apps
get_model = apps.get_model
get_registered_model = apps.get_registered_model
else:
from django.db.models import get_model as django_get_model
def get_model(app_label, model_name=None):
if model_name is None:
app_label, model_name = app_label.split('.')
model = django_get_model(app_label, model_name)
if not model:
raise LookupError
return model
def get_registered_model(app_label, model_name):
model = django_get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if not model:
raise LookupError
return model
def get_user_model_name():
"""
Returns the app_label.object_name string for the user model.
"""
return getattr(settings, "AUTH_USER_MODEL", "auth.User")
def base_concrete_model(abstract, instance):
"""
Used in methods of abstract models to find the super-most concrete
(non abstract) model in the inheritance chain that inherits from the
given abstract model. This is so the methods in the abstract model can
query data consistently across the correct concrete model.
Consider the following::
class Abstract(models.Model)
class Meta:
abstract = True
def concrete(self):
return base_concrete_model(Abstract, self)
class Super(Abstract):
pass
class Sub(Super):
pass
sub = Sub.objects.create()
sub.concrete() # returns Super
In actual Mezzanine usage, this allows methods in the ``Displayable`` and
``Orderable`` abstract models to access the ``Page`` instance when
instances of custom content types, (eg: models that inherit from ``Page``)
need to query the ``Page`` model to determine correct values for ``slug``
and ``_order`` which are only relevant in the context of the ``Page``
model and not the model of the custom content type.
"""
for cls in reversed(instance.__class__.__mro__):
if issubclass(cls, abstract) and not cls._meta.abstract:
return cls
return instance.__class__
def upload_to(field_path, default):
"""
Used as the ``upload_to`` arg for file fields - allows for custom
handlers to be implemented on a per field basis defined by the
``UPLOAD_TO_HANDLERS`` setting.
"""
from mezzanine.conf import settings
for k, v in settings.UPLOAD_TO_HANDLERS.items():
if k.lower() == field_path.lower():
return import_dotted_path(v)
return default
class AdminThumbMixin(object):
"""
Provides a thumbnail method on models for admin classes to
reference in the ``list_display`` definition.
"""
admin_thumb_field = None
def admin_thumb(self):
thumb = ""
if self.admin_thumb_field:
thumb = getattr(self, self.admin_thumb_field, "")
if not thumb:
return ""
from mezzanine.conf import settings
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
x, y = settings.ADMIN_THUMB_SIZE.split('x')
thumb_url = thumbnail(thumb, x, y)
return "<img src='%s%s'>" % (settings.MEDIA_URL, thumb_url)
admin_thumb.allow_tags = True
admin_thumb.short_description = ""
class ModelMixinBase(type):
"""
Metaclass for ``ModelMixin`` which is used for injecting model
fields and methods into models defined outside of a project.
This currently isn't used anywhere.
"""
def __new__(cls, name, bases, attrs):
"""
Checks for an inner ``Meta`` class with a ``mixin_for``
attribute containing the model that this model will be mixed
into. Once found, copy over any model fields and methods onto
the model being mixed into, and return it as the actual class
definition for the mixin.
"""
if name == "ModelMixin":
# Actual ModelMixin class definition.
return super(ModelMixinBase, cls).__new__(cls, name, bases, attrs)
try:
mixin_for = attrs.pop("Meta").mixin_for
if not issubclass(mixin_for, Model):
raise TypeError
except (TypeError, KeyError, AttributeError):
raise ImproperlyConfigured("The ModelMixin class '%s' requires "
"an inner Meta class with the "
"``mixin_for`` attribute defined, "
"with a value that is a valid model.")
# Copy fields and methods onto the model being mixed into, and
# return it as the definition for the mixin class itself.
for k, v in attrs.items():
if isinstance(v, Field):
v.contribute_to_class(mixin_for, k)
elif k != "__module__":
setattr(mixin_for, k, v)
return mixin_for
class ModelMixin(with_metaclass(ModelMixinBase, object)):
"""
Used as a subclass for mixin models that inject their behaviour onto
models defined outside of a project. The subclass should define an
inner ``Meta`` class with a ``mixin_for`` attribute containing the
model that will be mixed into.
"""
class LazyModelOperations(object):
"""
This class connects itself to Django's class_prepared signal.
Pass a function and a model or model name to its ``add()`` method,
and the function will be called with the model as its only
parameter once the model has been loaded. If the model is already
loaded, the function is called immediately.
Adapted from ``django.db.models.fields.related`` and used in
``mezzanine.generic.fields``.
"""
def __init__(self):
self.pending_operations = {}
class_prepared.connect(self.signal_receiver)
@staticmethod
def model_key(model_or_name):
"""
Returns an (app_label, model_name) tuple from a model or string.
"""
if isinstance(model_or_name, six.string_types):
app_label, model_name = model_or_name.split(".")
else:
# It's actually a model class.
app_label = model_or_name._meta.app_label
model_name = model_or_name._meta.object_name
return app_label, model_name
def add(self, function, *models_or_names):
"""
The function passed to this method should accept n arguments,
where n=len(models_or_names). When all the models are ready,
the function will be called with the models as arguments, in
the order they appear in this argument list.
"""
# Eagerly parse all model strings so we can fail immediately
# if any are plainly invalid.
model_keys = [self.model_key(m) if not isinstance(m, tuple) else m
for m in models_or_names]
# If this function depends on more than one model, recursively call add
# for each, partially applying the given function on each iteration.
model_key, more_models = model_keys[0], model_keys[1:]
if more_models:
inner_function = function
function = lambda model: self.add(partial(inner_function, model),
*more_models)
# If the model is already loaded, pass it to the function immediately.
# Otherwise, delay execution until the class is prepared.
try:
model_class = get_registered_model(*model_key)
except LookupError:
self.pending_operations.setdefault(model_key, []).append(function)
else:
function(model_class)
def signal_receiver(self, sender, **kwargs):
"""
Receive ``class_prepared``, and pass the freshly prepared
model to each function waiting for it.
"""
key = (sender._meta.app_label, sender.__name__)
for function in self.pending_operations.pop(key, []):
function(sender)
lazy_model_ops = LazyModelOperations()
|
fusionbox/mezzanine
|
mezzanine/utils/models.py
|
Python
|
bsd-2-clause
| 8,880
| 0.000113
|
import logging
import os
import re
import uuid
from io import BytesIO
import six
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import AssetKey, CourseKey
from opaque_keys.edx.locator import AssetLocator
from PIL import Image
from six.moves.urllib.parse import parse_qsl, quote_plus, urlencode, urlparse, urlunparse # pylint: disable=import-error
from xmodule.assetstore.assetmgr import AssetManager
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.exceptions import ItemNotFoundError
STATIC_CONTENT_VERSION = 1
XASSET_LOCATION_TAG = 'c4x'
XASSET_SRCREF_PREFIX = 'xasset:'
XASSET_THUMBNAIL_TAIL_NAME = '.jpg'
STREAM_DATA_CHUNK_SIZE = 1024
VERSIONED_ASSETS_PREFIX = '/assets/courseware'
VERSIONED_ASSETS_PATTERN = r'/assets/courseware/(v[\d]/)?([a-f0-9]{32})'
class StaticContent(object):
def __init__(self, loc, name, content_type, data, last_modified_at=None, thumbnail_location=None, import_path=None,
length=None, locked=False, content_digest=None):
self.location = loc
self.name = name # a display string which can be edited, and thus not part of the location which needs to be fixed
self.content_type = content_type
self._data = data
self.length = length
self.last_modified_at = last_modified_at
self.thumbnail_location = thumbnail_location
# optional information about where this file was imported from. This is needed to support import/export
# cycles
self.import_path = import_path
self.locked = locked
self.content_digest = content_digest
@property
def is_thumbnail(self):
return self.location.category == 'thumbnail'
@staticmethod
def generate_thumbnail_name(original_name, dimensions=None, extension=None):
"""
- original_name: Name of the asset (typically its location.name)
- dimensions: `None` or a tuple of (width, height) in pixels
- extension: `None` or desired filename extension of the thumbnail
"""
if extension is None:
extension = XASSET_THUMBNAIL_TAIL_NAME
name_root, ext = os.path.splitext(original_name)
if not ext == extension:
name_root = name_root + ext.replace(u'.', u'-')
if dimensions:
width, height = dimensions # pylint: disable=unpacking-non-sequence
name_root += "-{}x{}".format(width, height)
return u"{name_root}{extension}".format(
name_root=name_root,
extension=extension,
)
@staticmethod
def compute_location(course_key, path, revision=None, is_thumbnail=False):
"""
Constructs a location object for static content.
- course_key: the course that this asset belongs to
- path: is the name of the static asset
- revision: is the object's revision information
- is_thumbnail: is whether or not we want the thumbnail version of this
asset
"""
path = path.replace('/', '_')
return course_key.make_asset_key(
'asset' if not is_thumbnail else 'thumbnail',
AssetLocator.clean_keeping_underscores(path)
).for_branch(None)
def get_id(self):
return self.location
@property
def data(self):
return self._data
ASSET_URL_RE = re.compile(r"""
/?c4x/
(?P<org>[^/]+)/
(?P<course>[^/]+)/
(?P<category>[^/]+)/
(?P<name>[^/]+)
""", re.VERBOSE | re.IGNORECASE)
@staticmethod
def is_c4x_path(path_string):
"""
Returns a boolean if a path is believed to be a c4x link based on the leading element
"""
return StaticContent.ASSET_URL_RE.match(path_string) is not None
@staticmethod
def get_static_path_from_location(location):
"""
This utility static method will take a location identifier and create a 'durable' /static/.. URL representation of it.
This link is 'durable' as it can maintain integrity across cloning of courseware across course-ids, e.g. reruns of
courses.
In the LMS/CMS, we have runtime link-rewriting, so at render time, this /static/... format will get translated into
the actual /c4x/... path which the client needs to reference static content
"""
if location is not None:
return u"/static/{name}".format(name=location.block_id)
else:
return None
@staticmethod
def get_base_url_path_for_course_assets(course_key):
if course_key is None:
return None
assert isinstance(course_key, CourseKey)
placeholder_id = uuid.uuid4().hex
# create a dummy asset location with a fake but unique name. strip off the name, and return it
url_path = StaticContent.serialize_asset_key_with_slash(
course_key.make_asset_key('asset', placeholder_id).for_branch(None)
)
return url_path.replace(placeholder_id, '')
@staticmethod
def get_location_from_path(path):
"""
Generate an AssetKey for the given path (old c4x/org/course/asset/name syntax)
"""
try:
return AssetKey.from_string(path)
except InvalidKeyError:
# TODO - re-address this once LMS-11198 is tackled.
if path.startswith('/'):
# try stripping off the leading slash and try again
return AssetKey.from_string(path[1:])
@staticmethod
def is_versioned_asset_path(path):
"""Determines whether the given asset path is versioned."""
return path.startswith(VERSIONED_ASSETS_PREFIX)
@staticmethod
def parse_versioned_asset_path(path):
"""
Examines an asset path and breaks it apart if it is versioned,
returning both the asset digest and the unversioned asset path,
which will normally be an AssetKey.
"""
asset_digest = None
asset_path = path
if StaticContent.is_versioned_asset_path(asset_path):
result = re.match(VERSIONED_ASSETS_PATTERN, asset_path)
if result is not None:
asset_digest = result.groups()[1]
asset_path = re.sub(VERSIONED_ASSETS_PATTERN, '', asset_path)
return (asset_digest, asset_path)
@staticmethod
def add_version_to_asset_path(path, version):
"""
Adds a prefix to an asset path indicating the asset's version.
"""
# Don't version an already-versioned path.
if StaticContent.is_versioned_asset_path(path):
return path
structure_version = 'v{}'.format(STATIC_CONTENT_VERSION)
return u'{}/{}/{}{}'.format(VERSIONED_ASSETS_PREFIX, structure_version, version, path)
@staticmethod
def get_asset_key_from_path(course_key, path):
"""
Parses a path, extracting an asset key or creating one.
Args:
course_key: key to the course which owns this asset
path: the path to said content
Returns:
AssetKey: the asset key that represents the path
"""
# Clean up the path, removing any static prefix and any leading slash.
if path.startswith('/static/'):
path = path[len('/static/'):]
# Old-style asset keys start with `/`, so don't try and strip it
# in that case.
if not path.startswith('/c4x'):
path = path.lstrip('/')
try:
return AssetKey.from_string(path)
except InvalidKeyError:
# If we couldn't parse the path, just let compute_location figure it out.
# It's most likely a path like /image.png or something.
return StaticContent.compute_location(course_key, path)
@staticmethod
def is_excluded_asset_type(path, excluded_exts):
"""
Check if this is an allowed file extension to serve.
Some files aren't served through the CDN in order to avoid same-origin policy/CORS-related issues.
"""
return any(path.lower().endswith(excluded_ext.lower()) for excluded_ext in excluded_exts)
@staticmethod
def get_canonicalized_asset_path(course_key, path, base_url, excluded_exts, encode=True):
"""
Returns a fully-qualified path to a piece of static content.
If a static asset CDN is configured, this path will include it.
Otherwise, the path will simply be relative.
Args:
course_key: key to the course which owns this asset
path: the path to said content
Returns:
string: fully-qualified path to asset
"""
# Break down the input path.
_, _, relative_path, params, query_string, _ = urlparse(path)
# Convert our path to an asset key if it isn't one already.
asset_key = StaticContent.get_asset_key_from_path(course_key, relative_path)
# Check the status of the asset to see if this can be served via CDN aka publicly.
serve_from_cdn = False
content_digest = None
try:
content = AssetManager.find(asset_key, as_stream=True)
serve_from_cdn = not getattr(content, "locked", True)
content_digest = getattr(content, "content_digest", None)
except (ItemNotFoundError, NotFoundError):
# If we can't find the item, just treat it as if it's locked.
serve_from_cdn = False
# Do a generic check to see if anything about this asset disqualifies it from being CDN'd.
is_excluded = False
if StaticContent.is_excluded_asset_type(relative_path, excluded_exts):
serve_from_cdn = False
is_excluded = True
# Update any query parameter values that have asset paths in them. This is for assets that
# require their own after-the-fact values, like a Flash file that needs the path of a config
# file passed to it e.g. /static/visualization.swf?configFile=/static/visualization.xml
query_params = parse_qsl(query_string)
updated_query_params = []
for query_name, query_val in query_params:
if query_val.startswith("/static/"):
new_val = StaticContent.get_canonicalized_asset_path(
course_key, query_val, base_url, excluded_exts, encode=False)
updated_query_params.append((query_name, new_val.encode('utf-8')))
else:
# Make sure we're encoding Unicode strings down to their byte string
# representation so that `urlencode` can handle it.
updated_query_params.append((query_name, query_val.encode('utf-8')))
serialized_asset_key = StaticContent.serialize_asset_key_with_slash(asset_key)
base_url = base_url if serve_from_cdn else ''
asset_path = serialized_asset_key
# If the content has a digest (i.e. md5sum) value specified, create a versioned path to the asset using it.
if not is_excluded and content_digest:
asset_path = StaticContent.add_version_to_asset_path(serialized_asset_key, content_digest)
# Only encode this if told to. Important so that we don't double encode
# when working with paths that are in query parameters.
if encode:
asset_path = asset_path.encode('utf-8')
asset_path = quote_plus(asset_path, '/:+@')
return urlunparse(('', base_url, asset_path, params, urlencode(updated_query_params), ''))
def stream_data(self):
yield self._data
@staticmethod
def serialize_asset_key_with_slash(asset_key):
"""
Legacy code expects the serialized asset key to start w/ a slash; so, do that in one place
:param asset_key:
"""
url = six.text_type(asset_key)
if not url.startswith('/'):
url = '/' + url # TODO - re-address this once LMS-11198 is tackled.
return url
class StaticContentStream(StaticContent):
def __init__(self, loc, name, content_type, stream, last_modified_at=None, thumbnail_location=None, import_path=None,
length=None, locked=False, content_digest=None):
super(StaticContentStream, self).__init__(loc, name, content_type, None, last_modified_at=last_modified_at,
thumbnail_location=thumbnail_location, import_path=import_path,
length=length, locked=locked, content_digest=content_digest)
self._stream = stream
def stream_data(self):
while True:
chunk = self._stream.read(STREAM_DATA_CHUNK_SIZE)
if len(chunk) == 0:
break
yield chunk
def stream_data_in_range(self, first_byte, last_byte):
"""
Stream the data between first_byte and last_byte (included)
"""
self._stream.seek(first_byte)
position = first_byte
while True:
if last_byte < position + STREAM_DATA_CHUNK_SIZE - 1:
chunk = self._stream.read(last_byte - position + 1)
yield chunk
break
chunk = self._stream.read(STREAM_DATA_CHUNK_SIZE)
position += STREAM_DATA_CHUNK_SIZE
yield chunk
def close(self):
self._stream.close()
def copy_to_in_mem(self):
self._stream.seek(0)
content = StaticContent(self.location, self.name, self.content_type, self._stream.read(),
last_modified_at=self.last_modified_at, thumbnail_location=self.thumbnail_location,
import_path=self.import_path, length=self.length, locked=self.locked,
content_digest=self.content_digest)
return content
class ContentStore(object):
'''
Abstraction for all ContentStore providers (e.g. MongoDB)
'''
def save(self, content):
raise NotImplementedError
def find(self, filename):
raise NotImplementedError
def get_all_content_for_course(self, course_key, start=0, maxresults=-1, sort=None, filter_params=None):
'''
Returns a list of static assets for a course, followed by the total number of assets.
By default all assets are returned, but start and maxresults can be provided to limit the query.
The return format is a list of asset data dictionaries.
The asset data dictionaries have the following keys:
asset_key (:class:`opaque_keys.edx.AssetKey`): The key of the asset
displayname: The human-readable name of the asset
uploadDate (datetime.datetime): The date and time that the file was uploadDate
contentType: The mimetype string of the asset
md5: An md5 hash of the asset content
'''
raise NotImplementedError
def delete_all_course_assets(self, course_key):
"""
Delete all of the assets which use this course_key as an identifier
:param course_key:
"""
raise NotImplementedError
def copy_all_course_assets(self, source_course_key, dest_course_key):
"""
Copy all the course assets from source_course_key to dest_course_key
"""
raise NotImplementedError
def generate_thumbnail(self, content, tempfile_path=None, dimensions=None):
"""Create a thumbnail for a given image.
Returns a tuple of (StaticContent, AssetKey)
`content` is the StaticContent representing the image you want to make a
thumbnail out of.
`tempfile_path` is a string path to the location of a file to read from
in order to grab the image data, instead of relying on `content.data`
`dimensions` is an optional param that represents (width, height) in
pixels. It defaults to None.
"""
thumbnail_content = None
is_svg = content.content_type == 'image/svg+xml'
# use a naming convention to associate originals with the thumbnail
thumbnail_name = StaticContent.generate_thumbnail_name(
content.location.block_id, dimensions=dimensions, extension='.svg' if is_svg else None
)
thumbnail_file_location = StaticContent.compute_location(
content.location.course_key, thumbnail_name, is_thumbnail=True
)
# if we're uploading an image, then let's generate a thumbnail so that we can
# serve it up when needed without having to rescale on the fly
try:
if is_svg:
# for svg simply store the provided svg file, since vector graphics should be good enough
# for downscaling client-side
if tempfile_path is None:
thumbnail_file = BytesIO(content.data)
else:
with open(tempfile_path) as f:
thumbnail_file = BytesIO(f.read())
thumbnail_content = StaticContent(thumbnail_file_location, thumbnail_name,
'image/svg+xml', thumbnail_file)
self.save(thumbnail_content)
elif content.content_type is not None and content.content_type.split('/')[0] == 'image':
# use PIL to do the thumbnail generation (http://www.pythonware.com/products/pil/)
# My understanding is that PIL will maintain aspect ratios while restricting
# the max-height/width to be whatever you pass in as 'size'
# @todo: move the thumbnail size to a configuration setting?!?
if tempfile_path is None:
source = BytesIO(content.data)
else:
source = tempfile_path
# We use the context manager here to avoid leaking the inner file descriptor
# of the Image object -- this way it gets closed after we're done with using it.
thumbnail_file = BytesIO()
with Image.open(source) as image:
# I've seen some exceptions from the PIL library when trying to save palletted
# PNG files to JPEG. Per the google-universe, they suggest converting to RGB first.
thumbnail_image = image.convert('RGB')
if not dimensions:
dimensions = (128, 128)
thumbnail_image.thumbnail(dimensions, Image.ANTIALIAS)
thumbnail_image.save(thumbnail_file, 'JPEG')
thumbnail_file.seek(0)
# store this thumbnail as any other piece of content
thumbnail_content = StaticContent(thumbnail_file_location, thumbnail_name,
'image/jpeg', thumbnail_file)
self.save(thumbnail_content)
except Exception as exc: # pylint: disable=broad-except
# log and continue as thumbnails are generally considered as optional
logging.exception(
u"Failed to generate thumbnail for {0}. Exception: {1}".format(content.location, str(exc))
)
return thumbnail_content, thumbnail_file_location
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
"""
pass
|
cpennington/edx-platform
|
common/lib/xmodule/xmodule/contentstore/content.py
|
Python
|
agpl-3.0
| 19,503
| 0.00323
|
"""A collection of classes and methods to deal with collections of
rates that together make up a network."""
# Common Imports
import warnings
import functools
import math
import os
from operator import mul
from collections import OrderedDict
from ipywidgets import interact
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MaxNLocator
import networkx as nx
# Import Rate
from pynucastro.rates import Rate, Nucleus, Library
mpl.rcParams['figure.dpi'] = 100
class Composition:
"""a composition holds the mass fractions of the nuclei in a network
-- useful for evaluating the rates
"""
def __init__(self, nuclei, small=1.e-16):
"""nuclei is an iterable of the nuclei (Nucleus objects) in the network"""
if not isinstance(nuclei[0], Nucleus):
raise ValueError("must supply an iterable of Nucleus objects")
else:
self.X = {k: small for k in nuclei}
def set_solar_like(self, Z=0.02):
""" approximate a solar abundance, setting p to 0.7, He4 to 0.3 - Z and
the remainder evenly distributed with Z """
num = len(self.X)
rem = Z/(num-2)
for k in self.X:
if k == Nucleus("p"):
self.X[k] = 0.7
elif k.raw == "he4":
self.X[k] = 0.3 - Z
else:
self.X[k] = rem
self.normalize()
def set_all(self, xval):
""" set all species to a particular value """
for k in self.X:
self.X[k] = xval
def set_nuc(self, name, xval):
""" set nuclei name to the mass fraction xval """
for k in self.X:
if k.raw == name:
self.X[k] = xval
break
def normalize(self):
""" normalize the mass fractions to sum to 1 """
X_sum = sum(self.X[k] for k in self.X)
for k in self.X:
self.X[k] /= X_sum
def get_molar(self):
""" return a dictionary of molar fractions"""
molar_frac = {k: v/k.A for k, v in self.X.items()}
return molar_frac
def eval_ye(self):
""" return the electron fraction """
zvec = []
avec = []
xvec = []
for n in self.X:
zvec.append(n.Z)
avec.append(n.A)
xvec.append(self.X[n])
zvec = np.array(zvec)
avec = np.array(avec)
xvec = np.array(xvec)
electron_frac = np.sum(zvec*xvec/avec)/np.sum(xvec)
return electron_frac
def __str__(self):
ostr = ""
for k in self.X:
ostr += f" X({k}) : {self.X[k]}\n"
return ostr
class RateCollection:
""" a collection of rates that together define a network """
pynucastro_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def __init__(self, rate_files=None, libraries=None, rates=None, precedence=()):
"""
rate_files are the files that together define the network. This
can be any iterable or single string.
This can include Reaclib library files storing multiple rates.
If libraries is supplied, initialize a RateCollection using the rates
in the Library object(s) in list 'libraries'.
If rates is supplied, initialize a RateCollection using the
Rate objects in the list 'rates'.
Precedence should be sequence of rate labels (e.g. wc17) to be used to
resolve name conflicts. If a nonempty sequence is provided, the rate
collection will automatically be scanned for multiple rates with the
same name. If all of their labels were given a ranking, the rate with
the label that comes first in the sequence will be retained and the
rest discarded.
Any combination of these options may be supplied.
"""
self.files = []
self.rates = []
self.library = None
if rate_files:
if isinstance(rate_files, str):
rate_files = [rate_files]
self._read_rate_files(rate_files)
if rates:
if isinstance(rates, Rate):
rates = [rates]
try:
for r in rates:
assert isinstance(r, Rate)
except:
print('Expected Rate object or list of Rate objects passed as the rates argument.')
raise
else:
rlib = Library(rates=rates)
if not self.library:
self.library = rlib
else:
self.library = self.library + rlib
if libraries:
if isinstance(libraries, Library):
libraries = [libraries]
try:
for lib in libraries:
assert isinstance(lib, Library)
except:
print('Expected Library object or list of Library objects passed as the libraries argument.')
raise
else:
if not self.library:
self.library = libraries.pop(0)
for lib in libraries:
self.library = self.library + lib
if self.library:
self.rates = self.rates + self.library.get_rates()
if precedence:
self._make_distinguishable(precedence)
# get the unique nuclei
u = []
for r in self.rates:
t = set(r.reactants + r.products)
u = set(list(u) + list(t))
self.unique_nuclei = sorted(u)
# now make a list of each rate that touches each nucleus
# we'll store this in a dictionary keyed on the nucleus
self.nuclei_consumed = OrderedDict()
self.nuclei_produced = OrderedDict()
for n in self.unique_nuclei:
self.nuclei_consumed[n] = [r for r in self.rates if n in r.reactants]
self.nuclei_produced[n] = [r for r in self.rates if n in r.products]
# Re-order self.rates so Reaclib rates come first,
# followed by Tabular rates. This is needed if
# reaclib coefficients are targets of a pointer array
# in the Fortran network.
# It is desired to avoid wasting array size
# storing meaningless Tabular coefficient pointers.
self.rates = sorted(self.rates,
key=lambda r: r.chapter == 't')
self.tabular_rates = []
self.reaclib_rates = []
for n, r in enumerate(self.rates):
if r.chapter == 't':
self.tabular_rates.append(n)
elif isinstance(r.chapter, int):
self.reaclib_rates.append(n)
else:
print('ERROR: Chapter type unknown for rate chapter {}'.format(
str(r.chapter)))
exit()
def _read_rate_files(self, rate_files):
# get the rates
self.files = rate_files
for rf in self.files:
try:
rflib = Library(rf)
except:
print(f"Error reading library from file: {rf}")
raise
else:
if not self.library:
self.library = rflib
else:
self.library = self.library + rflib
def get_nuclei(self):
""" get all the nuclei that are part of the network """
return self.unique_nuclei
def evaluate_rates(self, rho, T, composition):
"""evaluate the rates for a specific density, temperature, and
composition"""
rvals = OrderedDict()
ys = composition.get_molar()
y_e = composition.eval_ye()
for r in self.rates:
val = r.prefactor * rho**r.dens_exp * r.eval(T, rho * y_e)
if (r.weak_type == 'electron_capture' and not r.tabular):
val = val * y_e
yfac = functools.reduce(mul, [ys[q] for q in r.reactants])
rvals[r] = yfac * val
return rvals
def evaluate_ydots(self, rho, T, composition):
"""evaluate net rate of change of molar abundance for each nucleus
for a specific density, temperature, and composition"""
rvals = self.evaluate_rates(rho, T, composition)
ydots = dict()
for nuc in self.unique_nuclei:
# Rates that consume / produce nuc
consuming_rates = self.nuclei_consumed[nuc]
producing_rates = self.nuclei_produced[nuc]
# Number of nuclei consumed / produced
nconsumed = (r.reactants.count(nuc) for r in consuming_rates)
nproduced = (r.products.count(nuc) for r in producing_rates)
# Multiply each rate by the count
consumed = (c * rvals[r] for c, r in zip(nconsumed, consuming_rates))
produced = (c * rvals[r] for c, r in zip(nproduced, producing_rates))
# Net change is difference between produced and consumed
ydots[nuc] = sum(produced) - sum(consumed)
return ydots
def evaluate_activity(self, rho, T, composition):
"""sum over all of the terms contributing to ydot,
neglecting sign"""
rvals = self.evaluate_rates(rho, T, composition)
act = dict()
for nuc in self.unique_nuclei:
# Rates that consume / produce nuc
consuming_rates = self.nuclei_consumed[nuc]
producing_rates = self.nuclei_produced[nuc]
# Number of nuclei consumed / produced
nconsumed = (r.reactants.count(nuc) for r in consuming_rates)
nproduced = (r.products.count(nuc) for r in producing_rates)
# Multiply each rate by the count
consumed = (c * rvals[r] for c, r in zip(nconsumed, consuming_rates))
produced = (c * rvals[r] for c, r in zip(nproduced, producing_rates))
# Net activity is sum of produced and consumed
act[nuc] = sum(produced) + sum(consumed)
return act
def network_overview(self):
""" return a verbose network overview """
ostr = ""
for n in self.unique_nuclei:
ostr += f"{n}\n"
ostr += " consumed by:\n"
for r in self.nuclei_consumed[n]:
ostr += f" {r.string}\n"
ostr += " produced by:\n"
for r in self.nuclei_produced[n]:
ostr += f" {r.string}\n"
ostr += "\n"
return ostr
def get_screening_map(self):
"""a screening map is just a list of tuples containing the information
about nuclei pairs for screening: (descriptive name of nuclei,
nucleus 1, nucleus 2, rate, 1-based index of rate)
"""
screening_map = []
for k, r in enumerate(self.rates):
if r.ion_screen:
nucs = "_".join([str(q) for q in r.ion_screen])
in_map = False
for h, _, _, mrates, krates in screening_map:
if h == nucs:
# if we already have the reactants, then we
# will already be doing the screening factors,
# so just append this new rate to the list we
# are keeping of the rates where this
# screening is needed
in_map = True
mrates.append(r)
krates.append(k+1)
break
if not in_map:
# we handle 3-alpha specially -- we actually need 2 screening factors for it
if nucs == "he4_he4_he4":
# he4 + he4
screening_map.append((nucs, r.ion_screen[0], r.ion_screen[1],
[r], [k+1]))
# he4 + be8
be8 = Nucleus("Be8", dummy=True)
screening_map.append((nucs+"_dummy", r.ion_screen[2], be8,
[r], [k+1]))
else:
screening_map.append((nucs, r.ion_screen[0], r.ion_screen[1],
[r], [k+1]))
return screening_map
def write_network(self, *args, **kwargs):
"""Before writing the network, check to make sure the rates
are distinguishable by name."""
assert self._distinguishable_rates(), "ERROR: Rates not uniquely identified by Rate.fname"
self._write_network(*args, **kwargs)
def _distinguishable_rates(self):
"""Every Rate in this RateCollection should have a unique Rate.fname,
as the network writers distinguish the rates on this basis."""
names = [r.fname for r in self.rates]
for n, r in zip(names, self.rates):
k = names.count(n)
if k > 1:
print(f'Found rate {r} named {n} with {k} entries in the RateCollection.')
print(f'Rate {r} has the original source:\n{r.original_source}')
print(f'Rate {r} is in chapter {r.chapter}')
return len(set(names)) == len(self.rates)
def _make_distinguishable(self, precedence):
"""If multiple rates have the same name, eliminate the extraneous ones according to their
labels' positions in the precedence list. Only do this if all of the labels have
rankings in the list."""
nameset = {r.fname for r in self.rates}
precedence = {lab: i for i, lab in enumerate(precedence)}
def sorting_key(i): return precedence[self.rates[i].label]
for n in nameset:
# Count instances of name, and cycle if there is only one
ind = [i for i, r in enumerate(self.rates) if r.fname == n]
k = len(ind)
if k <= 1: continue
# If there were multiple instances, use the precedence settings to delete extraneous
# rates
labels = [self.rates[i].label for i in ind]
if all(lab in precedence for lab in labels):
sorted_ind = sorted(ind, key=sorting_key)
r = self.rates[sorted_ind[0]]
for i in sorted(sorted_ind[1:], reverse=True): del self.rates[i]
print(f'Found rate {r} named {n} with {k} entries in the RateCollection.')
print(f'Kept only entry with label {r.label} out of {labels}.')
def _write_network(self, *args, **kwargs):
"""A stub for function to output the network -- this is implementation
dependent."""
print('To create network integration source code, use a class that implements a specific network type.')
return
def plot(self, outfile=None, rho=None, T=None, comp=None,
size=(800, 600), dpi=100, title=None,
ydot_cutoff_value=None,
node_size=1000, node_font_size=13, node_color="#A0CBE2", node_shape="o",
N_range=None, Z_range=None, rotated=False,
always_show_p=False, always_show_alpha=False, hide_xalpha=False, filter_function=None):
"""Make a plot of the network structure showing the links between
nuclei. If a full set of thermodymamic conditions are
provided (rho, T, comp), then the links are colored by rate
strength.
parameters
----------
outfile: output name of the plot -- extension determines the type
rho: density to evaluate rates with
T: temperature to evaluate rates with
comp: composition to evaluate rates with
size: tuple giving width x height of the plot in inches
dpi: pixels per inch used by matplotlib in rendering bitmap
title: title to display on the plot
ydot_cutoff_value: rate threshold below which we do not show a
line corresponding to a rate
node_size: size of a node
node_font_size: size of the font used to write the isotope in the node
node_color: color to make the nodes
node_shape: shape of the node (using matplotlib marker names)
N_range: range of neutron number to zoom in on
Z_range: range of proton number to zoom in on
rotate: if True, we plot A - 2Z vs. Z instead of the default Z vs. N
always_show_p: include p as a node on the plot even if we
don't have p+p reactions
always_show_alpha: include He4 as a node on the plot even if we don't have 3-alpha
hide_xalpha=False: dont connect the links to alpha for heavy
nuclei reactions of the form A(alpha,X)B or A(X,alpha)B, except if alpha
is the heaviest product.
filter_function: name of a custom function that takes the list
of nuclei and returns a new list with the nuclei to be shown
as nodes.
"""
G = nx.MultiDiGraph()
G.position = {}
G.labels = {}
fig, ax = plt.subplots()
#divider = make_axes_locatable(ax)
#cax = divider.append_axes('right', size='15%', pad=0.05)
#ax.plot([0, 0], [8, 8], 'b-')
# in general, we do not show p, n, alpha,
# unless we have p + p, 3-a, etc.
hidden_nuclei = ["n"]
if not always_show_p:
hidden_nuclei.append("p")
if not always_show_alpha:
hidden_nuclei.append("he4")
# nodes -- the node nuclei will be all of the heavies
# add all the nuclei into G.node
node_nuclei = []
for n in self.unique_nuclei:
if n.raw not in hidden_nuclei:
node_nuclei.append(n)
else:
for r in self.rates:
if r.reactants.count(n) > 1:
node_nuclei.append(n)
break
if filter_function is not None:
node_nuclei = list(filter(filter_function, node_nuclei))
for n in node_nuclei:
G.add_node(n)
if rotated:
G.position[n] = (n.Z, n.A - 2*n.Z)
else:
G.position[n] = (n.N, n.Z)
G.labels[n] = fr"${n.pretty}$"
# get the rates for each reaction
if rho is not None and T is not None and comp is not None:
ydots = self.evaluate_rates(rho, T, comp)
else:
ydots = None
# Do not show rates on the graph if their corresponding ydot is less than ydot_cutoff_value
invisible_rates = set()
if ydot_cutoff_value is not None:
for r in self.rates:
if ydots[r] < ydot_cutoff_value:
invisible_rates.add(r)
# edges
for n in node_nuclei:
for r in self.nuclei_consumed[n]:
for p in r.products:
if p in node_nuclei:
if hide_xalpha:
# first check is alpha is the heaviest nucleus on the RHS
rhs_heavy = sorted(r.products)[-1]
if not (rhs_heavy.Z == 2 and rhs_heavy.A == 4):
# for rates that are A (x, alpha) B, where A and B are heavy nuclei,
# don't show the connection of the nucleus to alpha, only show it to B
if p.Z == 2 and p.A == 4:
continue
# likewise, hide A (alpha, x) B, unless A itself is an alpha
c = r.reactants
n_alpha = 0
for nuc in c:
if nuc.Z == 2 and nuc.A == 4:
n_alpha += 1
# if there is only 1 alpha and we are working on the alpha node,
# then skip
if n_alpha == 1 and n.Z == 2 and n.A == 4:
continue
# networkx doesn't seem to keep the edges in
# any particular order, so we associate data
# to the edges here directly, in this case,
# the reaction rate, which will be used to
# color it
if ydots is None:
G.add_edges_from([(n, p)], weight=0.5)
else:
if r in invisible_rates:
continue
try:
rate_weight = math.log10(ydots[r])
except ValueError:
# if ydots[r] is zero, then set the weight
# to roughly the minimum exponent possible
# for python floats
rate_weight = -308
except:
raise
G.add_edges_from([(n, p)], weight=rate_weight)
# It seems that networkx broke backwards compatability, and 'zorder' is no longer a valid
# keyword argument. The 'linewidth' argument has also changed to 'linewidths'.
nx.draw_networkx_nodes(G, G.position, # plot the element at the correct position
node_color=node_color, alpha=1.0,
node_shape=node_shape, node_size=node_size, linewidths=2.0, ax=ax)
nx.draw_networkx_labels(G, G.position, G.labels, # label the name of element at the correct position
font_size=node_font_size, font_color="w", ax=ax)
# get the edges and weights coupled in the same order
edges, weights = zip(*nx.get_edge_attributes(G, 'weight').items())
edge_color=weights
ww = np.array(weights)
min_weight = ww.min()
max_weight = ww.max()
dw = (max_weight - min_weight)/4
widths = np.ones_like(ww)
widths[ww > min_weight + dw] = 1.5
widths[ww > min_weight + 2*dw] = 2.5
widths[ww > min_weight + 3*dw] = 4
edges_lc = nx.draw_networkx_edges(G, G.position, width=list(widths), # plot the arrow of reaction
edgelist=edges, edge_color=edge_color,
node_size=node_size,
edge_cmap=plt.cm.viridis, ax=ax)
# for networkx <= 2.0 draw_networkx_edges returns a
# LineCollection matplotlib type which we can use for the
# colorbar directly. For networkx >= 2.1, it is a collection
# of FancyArrowPatch-s, which we need to run through a
# PatchCollection. See:
# https://stackoverflow.com/questions/18658047/adding-a-matplotlib-colorbar-from-a-patchcollection
if ydots is not None:
pc = mpl.collections.PatchCollection(edges_lc, cmap=plt.cm.viridis)
pc.set_array(weights)
if not rotated:
plt.colorbar(pc, ax=ax, label="log10(rate)")
else:
plt.colorbar(pc, ax=ax, label="log10(rate)", orientation="horizontal", fraction=0.05)
Ns = [n.N for n in node_nuclei]
Zs = [n.Z for n in node_nuclei]
if not rotated:
ax.set_xlim(min(Ns)-1, max(Ns)+1)
else:
ax.set_xlim(min(Zs)-1, max(Zs)+1)
#plt.ylim(min(Zs)-1, max(Zs)+1)
if not rotated:
plt.xlabel(r"$N$", fontsize="large")
plt.ylabel(r"$Z$", fontsize="large")
else:
plt.xlabel(r"$Z$", fontsize="large")
plt.ylabel(r"$A - 2Z$", fontsize="large")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if Z_range is not None and N_range is not None:
if not rotated:
ax.set_xlim(N_range[0], N_range[1])
ax.set_ylim(Z_range[0], Z_range[1])
else:
ax.set_xlim(Z_range[0], Z_range[1])
if not rotated:
ax.set_aspect("equal", "datalim")
fig.set_size_inches(size[0]/dpi, size[1]/dpi)
if title is not None:
fig.suptitle(title)
if outfile is None:
plt.show()
else:
plt.tight_layout()
plt.savefig(outfile, dpi=dpi)
@staticmethod
def _safelog(arr, small):
arr = np.copy(arr)
if np.any(arr < 0.0):
raise ValueError("Negative values not allowed for logscale - try symlog instead.")
zeros = arr == 0.0
arr[zeros] = min(small, arr[~zeros].min() / 10)
return np.log10(arr)
@staticmethod
def _symlog(arr, linthresh=1.0):
assert linthresh >= 1.0
neg = arr < 0.0
arr = np.abs(arr)
needslog = arr > linthresh
arr[needslog] = np.log10(arr[needslog]) + linthresh
arr[neg] *= -1
return arr
@staticmethod
def _scale(arr, minval=None, maxval=None):
if minval is None: minval = arr.min()
if maxval is None: maxval = arr.max()
if minval != maxval:
scaled = (arr - minval) / (maxval - minval)
else:
scaled = np.zeros_like(arr)
scaled[scaled < 0.0] = 0.0
scaled[scaled > 1.0] = 1.0
return scaled
def gridplot(self, comp=None, color_field="X", rho=None, T=None, **kwargs):
"""
Plot nuclides as cells on a grid of Z vs. N, colored by *color_field*. If called
without a composition, the function will just plot the grid with no color field.
:param comp: Composition of the environment.
:param color_field: Field to color by. Must be one of 'X' (mass fraction),
'Y' (molar abundance), 'Xdot' (time derivative of X), 'Ydot' (time
derivative of Y), or 'activity' (sum of contributions to Ydot of
all rates, ignoring sign).
:param rho: Density to evaluate rates at. Needed for fields involving time
derivatives.
:param T: Temperature to evaluate rates at. Needed for fields involving time
derivatives.
:Keyword Arguments:
- *scale* -- One of 'linear', 'log', and 'symlog'. Linear by default.
- *small* -- If using logarithmic scaling, zeros will be replaced with
this value. 1e-30 by default.
- *linthresh* -- Linearity threshold for symlog scaling.
- *filter_function* -- A callable to filter Nucleus objects with. Should
return *True* if the nuclide should be plotted.
- *outfile* -- Output file to save the plot to. The plot will be shown if
not specified.
- *dpi* -- DPI to save the image file at.
- *cmap* -- Name of the matplotlib colormap to use. Default is 'magma'.
- *edgecolor* -- Color of grid cell edges.
- *area* -- Area of the figure without the colorbar, in square inches. 64
by default.
- *no_axes* -- Set to *True* to omit axis spines.
- *no_ticks* -- Set to *True* to omit tickmarks.
- *no_cbar* -- Set to *True* to omit colorbar.
- *cbar_label* -- Colorbar label.
- *cbar_bounds* -- Explicit colorbar bounds.
- *cbar_format* -- Format string or Formatter object for the colorbar ticks.
"""
# Process kwargs
outfile = kwargs.pop("outfile", None)
scale = kwargs.pop("scale", "linear")
cmap = kwargs.pop("cmap", "viridis")
edgecolor = kwargs.pop("edgecolor", "grey")
small = kwargs.pop("small", 1e-30)
area = kwargs.pop("area", 64)
no_axes = kwargs.pop("no_axes", False)
no_ticks = kwargs.pop("no_ticks", False)
no_cbar = kwargs.pop("no_cbar", False)
cbar_label = kwargs.pop("cbar_label", None)
cbar_format = kwargs.pop("cbar_format", None)
cbar_bounds = kwargs.pop("cbar_bounds", None)
filter_function = kwargs.pop("filter_function", None)
dpi = kwargs.pop("dpi", 100)
linthresh = kwargs.pop("linthresh", 1.0)
if kwargs: warnings.warn(f"Unrecognized keyword arguments: {kwargs.keys()}")
# Get figure, colormap
fig, ax = plt.subplots()
cmap = mpl.cm.get_cmap(cmap)
# Get nuclei and all 3 numbers
nuclei = self.unique_nuclei
if filter_function is not None:
nuclei = list(filter(filter_function, nuclei))
Ns = np.array([n.N for n in nuclei])
Zs = np.array([n.Z for n in nuclei])
As = Ns + Zs
# Compute weights
color_field = color_field.lower()
if color_field not in {"x", "y", "ydot", "xdot", "activity"}:
raise ValueError(f"Invalid color field: '{color_field}'")
if comp is None:
values = np.zeros(len(nuclei))
elif color_field == "x":
values = np.array([comp.X[nuc] for nuc in nuclei])
elif color_field == "y":
ys = comp.get_molar()
values = np.array([ys[nuc] for nuc in nuclei])
elif color_field in {"ydot", "xdot"}:
if rho is None or T is None:
raise ValueError("Need both rho and T to evaluate rates!")
ydots = self.evaluate_ydots(rho, T, comp)
values = np.array([ydots[nuc] for nuc in nuclei])
if color_field == "xdot": values *= As
elif color_field == "activity":
if rho is None or T is None:
raise ValueError("Need both rho and T to evaluate rates!")
act = self.evaluate_activity(rho, T, comp)
values = np.array([act[nuc] for nuc in nuclei])
if scale == "log": values = self._safelog(values, small)
elif scale == "symlog": values = self._symlog(values, linthresh)
if cbar_bounds is None:
cbar_bounds = values.min(), values.max()
weights = self._scale(values, *cbar_bounds)
# Plot a square for each nucleus
for nuc, weight in zip(nuclei, weights):
square = plt.Rectangle((nuc.N - 0.5, nuc.Z - 0.5), width=1, height=1,
facecolor=cmap(weight), edgecolor=edgecolor)
ax.add_patch(square)
# Set limits
maxN, minN = max(Ns), min(Ns)
maxZ, minZ = max(Zs), min(Zs)
plt.xlim(minN - 0.5, maxN + 0.6)
plt.ylim(minZ - 0.5, maxZ + 0.6)
# Set plot appearance
rat = (maxN - minN) / (maxZ - minZ)
width = np.sqrt(area * rat)
height = area / width
fig.set_size_inches(width, height)
plt.xlabel(r"N $\rightarrow$")
plt.ylabel(r"Z $\rightarrow$")
if no_axes or no_ticks:
plt.tick_params \
(
axis = 'both',
which = 'both',
bottom = False,
left = False,
labelbottom = False,
labelleft = False
)
else:
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if no_axes:
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
# Colorbar stuff
if not no_cbar and comp is not None:
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='3.5%', pad=0.1)
cbar_norm = mpl.colors.Normalize(*cbar_bounds)
smap = mpl.cm.ScalarMappable(norm=cbar_norm, cmap=cmap)
if not cbar_label:
capfield = color_field.capitalize()
if scale == "log":
cbar_label = f"log[{capfield}]"
elif scale == "symlog":
cbar_label = f"symlog[{capfield}]"
else:
cbar_label = capfield
fig.colorbar(smap, cax=cax, orientation="vertical",
label=cbar_label, format=cbar_format)
# Show or save
if outfile is None:
plt.show()
else:
plt.tight_layout()
plt.savefig(outfile, dpi=dpi)
def __repr__(self):
string = ""
for r in self.rates:
string += f"{r.string}\n"
return string
class Explorer:
""" interactively explore a rate collection """
def __init__(self, rc, comp, size=(800, 600),
ydot_cutoff_value=None,
always_show_p=False, always_show_alpha=False):
""" take a RateCollection and a composition """
self.rc = rc
self.comp = comp
self.size = size
self.ydot_cutoff_value = ydot_cutoff_value
self.always_show_p = always_show_p
self.always_show_alpha = always_show_alpha
def _make_plot(self, logrho, logT):
self.rc.plot(rho=10.0**logrho, T=10.0**logT,
comp=self.comp, size=self.size,
ydot_cutoff_value=self.ydot_cutoff_value,
always_show_p=self.always_show_p,
always_show_alpha=self.always_show_alpha)
def explore(self, logrho=(2, 6, 0.1), logT=(7, 9, 0.1)):
"""Perform interactive exploration of the network structure."""
interact(self._make_plot, logrho=logrho, logT=logT)
|
pyreaclib/pyreaclib
|
pynucastro/networks/rate_collection.py
|
Python
|
bsd-3-clause
| 34,049
| 0.002731
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Parallel workflow execution via IPython controller
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from future import standard_library
standard_library.install_aliases()
from future.utils import raise_from
from pickle import dumps
import sys
from .base import (DistributedPluginBase, logger, report_crash)
IPython_not_loaded = False
try:
from IPython import __version__ as IPyversion
from ipyparallel.error import TimeoutError
except:
IPython_not_loaded = True
def execute_task(pckld_task, node_config, updatehash):
from socket import gethostname
from traceback import format_exc
from nipype import config, logging
traceback = None
result = None
import os
cwd = os.getcwd()
try:
config.update_config(node_config)
logging.update_logging(config)
from pickle import loads
task = loads(pckld_task)
result = task.run(updatehash=updatehash)
except:
traceback = format_exc()
result = task.result
os.chdir(cwd)
return result, traceback, gethostname()
class IPythonPlugin(DistributedPluginBase):
"""Execute workflow with ipython
"""
def __init__(self, plugin_args=None):
if IPython_not_loaded:
raise ImportError('Please install ipyparallel to use this plugin.')
super(IPythonPlugin, self).__init__(plugin_args=plugin_args)
valid_args = ('url_file', 'profile', 'cluster_id', 'context', 'debug',
'timeout', 'config', 'username', 'sshserver', 'sshkey',
'password', 'paramiko')
self.client_args = {arg: plugin_args[arg]
for arg in valid_args if arg in plugin_args}
self.iparallel = None
self.taskclient = None
self.taskmap = {}
self._taskid = 0
def run(self, graph, config, updatehash=False):
"""Executes a pre-defined pipeline is distributed approaches
based on IPython's ipyparallel processing interface
"""
# retrieve clients again
try:
name = 'ipyparallel'
__import__(name)
self.iparallel = sys.modules[name]
except ImportError as e:
raise_from(ImportError("ipyparallel not found. Parallel execution "
"will be unavailable"), e)
try:
self.taskclient = self.iparallel.Client(**self.client_args)
except Exception as e:
if isinstance(e, TimeoutError):
raise_from(Exception("No IPython clients found."), e)
if isinstance(e, IOError):
raise_from(Exception("ipcluster/ipcontroller has not been started"), e)
if isinstance(e, ValueError):
raise_from(Exception("Ipython kernel not installed"), e)
else:
raise e
return super(IPythonPlugin, self).run(graph, config, updatehash=updatehash)
def _get_result(self, taskid):
if taskid not in self.taskmap:
raise ValueError('Task %d not in pending list' % taskid)
if self.taskmap[taskid].ready():
result, traceback, hostname = self.taskmap[taskid].get()
result_out = dict(result=None, traceback=None)
result_out['result'] = result
result_out['traceback'] = traceback
result_out['hostname'] = hostname
return result_out
else:
return None
def _submit_job(self, node, updatehash=False):
pckld_node = dumps(node, 2)
result_object = self.taskclient.load_balanced_view().apply(execute_task,
pckld_node,
node.config,
updatehash)
self._taskid += 1
self.taskmap[self._taskid] = result_object
return self._taskid
def _report_crash(self, node, result=None):
if result and result['traceback']:
node._result = result['result']
node._traceback = result['traceback']
return report_crash(node,
traceback=result['traceback'])
else:
return report_crash(node)
def _clear_task(self, taskid):
if IPyversion >= '0.11':
logger.debug("Clearing id: %d" % taskid)
self.taskclient.purge_results(self.taskmap[taskid])
del self.taskmap[taskid]
|
carolFrohlich/nipype
|
nipype/pipeline/plugins/ipython.py
|
Python
|
bsd-3-clause
| 4,707
| 0.002337
|
# Copyright 2013-2020 Akretion France (https://akretion.com/)
# @author: Alexis de Lattre <alexis.delattre@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Account Fiscal Position VAT Check",
"version": "14.0.1.0.0",
"category": "Invoices & Payments",
"license": "AGPL-3",
"summary": "Check VAT on invoice validation",
"author": "Akretion,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/account-financial-tools",
"depends": ["account", "base_vat"],
"data": [
"views/account_fiscal_position.xml",
],
"installable": True,
}
|
OCA/account-financial-tools
|
account_fiscal_position_vat_check/__manifest__.py
|
Python
|
agpl-3.0
| 637
| 0
|
# -*- coding: utf-8 -*-
# RTCP Datagram Module
from struct import unpack, pack
debug = 0
# Receiver Reports included in Sender Report
class Report:
SSRC = 0
FractionLost = 0
CumulativeNumberOfPacketsLostH = 0
CumulativeNumberOfPacketsLostL = 0
ExtendedHighestSequenceNumberReceived = 0
InterarrivalJitter = 0
LastSR = 0
DelaySinceLastSR = 0
# Source Description
class SDES:
SSRC = 0
CNAME = ''
NAME = ''
EMAIL = ''
PHONE = ''
LOC = ''
TOOL = ''
NOTE = ''
PRIV = ''
class RTCPDatagram(object):
'RTCP packet parser end generator'
def __init__(self):
self.Datagram = ''
# SR specific
self.SSRC_sender = 0
self.NTP_TimestampH = 0
self.NTP_TimestampL = 0
self.RTP_Timestamp = 0
self.SenderPacketCount = 0
self.SenderOctetCount = 0
self.Reports = []
self.ProfileSpecificExtension = ''
# SDES specific
self.SourceDescriptions = []
def loadDatagram(self, DatagramIn):
self.Datagram = DatagramIn
def parse(self):
# RTCP parsing is complete
# including SDES, BYE and APP
# RTCP Header
(Ver_P_RC,
PacketType,
Length) = unpack('!BBH', self.Datagram[:4])
Version = (Ver_P_RC & 0b11000000) >> 6
Padding = (Ver_P_RC & 0b00100000) >> 5
# Byte offset
off = 4
# Sender's Report
if PacketType == 200:
# Sender's information
(self.SSRC_sender,
self.NTP_TimestampH,
self.NTP_TimestampL,
self.RTP_Timestamp,
self.SenderPacketCount,
self.SenderOctetCount) = unpack('!IIIIII', self.Datagram[off: off + 24])
off += 24
ReceptionCount = Ver_P_RC & 0b00011111
if debug:
print 'SDES: SR from', str(self.SSRC_sender)
# Included Receiver Reports
self.Reports = []
i = 0
for i in range(ReceptionCount):
self.Reports.append(Report())
self.Reports[i].SSRC,
self.Reports[i].FractionLost,
self.Reports[i].CumulativeNumberOfPacketsLostH,
self.Reports[i].CumulativeNumberOfPacketsLostL,
self.Reports[i].ExtendedHighestSequenceNumberReceived,
self.Reports[i].InterarrivalJitter,
self.Reports[i].LastSR,
self.Reports[i].DelaySinceLastSR = unpack('!IBBHIIII', self.Datagram[off: off + 24])
off += 24
# Source Description (SDES)
elif PacketType == 202:
# RC now is SC
SSRCCount = Ver_P_RC & 0b00011111
self.SourceDescriptions = []
i = 0
for i in range(SSRCCount):
self.SourceDescriptions.append(SDES())
SSRC, = unpack('!I', self.Datagram[off: off + 4])
off += 4
self.SourceDescriptions[i].SSRC = SSRC
SDES_Item = -1
# Go on the list of descriptions
while SDES_Item != 0:
SDES_Item, = unpack('!B', self.Datagram[off])
off += 1
if SDES_Item != 0:
SDES_Length, = unpack('!B', self.Datagram[off])
off += 1
Value = self.Datagram[off: off + SDES_Length]
off += SDES_Length
if debug:
print 'SDES:', SDES_Item, Value
if SDES_Item == 1:
self.SourceDescriptions[i].CNAME = Value
elif SDES_Item == 2:
self.SourceDescriptions[i].NAME = Value
elif SDES_Item == 3:
self.SourceDescriptions[i].EMAIL = Value
elif SDES_Item == 4:
self.SourceDescriptions[i].PHONE = Value
elif SDES_Item == 5:
self.SourceDescriptions[i].LOC = Value
elif SDES_Item == 6:
self.SourceDescriptions[i].TOOL = Value
elif SDES_Item == 7:
self.SourceDescriptions[i].NOTE = Value
elif SDES_Item == 8:
self.SourceDescriptions[i].PRIV = Value
# Extra parsing for PRIV is needed
elif SDES_Item == 0:
# End of list. Padding to 32 bits
while (off % 4):
off += 1
# BYE Packet
elif PacketType == 203:
SSRCCount = Ver_P_RC & 0b00011111
i = 0
for i in range(SSRCCount):
SSRC, = unpack('!I', self.Datagram[off: off + 4])
off += 4
print 'SDES: SSRC ' + str(SSRC) + ' is saying goodbye.'
# Application specific packet
elif PacketType == 204:
Subtype = Ver_P_RC & 0b00011111
SSRC, = unpack('!I', self.Datagram[off: off + 4])
Name = self.Datagram[off + 4: off + 8]
AppData = self.Datagram[off + 8: off + Length]
print 'SDES: APP Packet "' + Name + '" from SSRC ' + str(SSRC) + '.'
off += Length
# Check if there is something else in the datagram
if self.Datagram[off:]:
self.Datagram = self.Datagram[off:]
self.parse()
def generateRR(self):
# Ver 2, Pad 0, RC 1
Ver_P_RC = 0b10000001
# PT 201, Length 7, SSRC 0xF00F - let it be our ID
Header = pack('!BBHI', Ver_P_RC, 201, 7, 0x0000F00F)
NTP_32 = (self.NTP_TimestampH & 0x0000FFFF) + ((self.NTP_TimestampL & 0xFFFF0000) >> 16)
# No lost packets, no delay in receiving data, RR sent right after receiving SR
# Instead of self.SenderPacketCount should be proper value
ReceiverReport = pack('!IBBHIIII', self.SSRC_sender, 0, 0, 0, self.SenderPacketCount, 1, NTP_32, 1)
return Header + ReceiverReport
|
plazmer/pyrtsp
|
rtcp_datagram.py
|
Python
|
gpl-2.0
| 6,169
| 0.003242
|
#!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Updates the specified datafeed on the specified account."""
from __future__ import print_function
import argparse
import sys
from shopping.content import common
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('datafeed_id', help='The ID of the datafeed to update.')
def main(argv):
# Authenticate and construct service.
service, config, flags = common.init(
argv, __doc__, parents=[argparser])
merchant_id = config['merchantId']
datafeed_id = flags.datafeed_id
# Get the datafeed to be changed
datafeed = service.datafeeds().get(
merchantId=merchant_id, datafeedId=datafeed_id).execute()
# Changing the scheduled fetch time to 7:00.
datafeed['fetchSchedule']['hour'] = 7
request = service.datafeeds().update(
merchantId=merchant_id, datafeedId=datafeed_id, body=datafeed)
result = request.execute()
print('Datafeed with ID %s and fetchSchedule %s was updated.' %
(result['id'], str(result['fetchSchedule'])))
if __name__ == '__main__':
main(sys.argv)
|
googleads/googleads-shopping-samples
|
python/shopping/content/datafeeds/update.py
|
Python
|
apache-2.0
| 1,688
| 0.007109
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm
# Given alpha array and lambda_search=True, build two cross-validation models, one with validation dataset
# and one without for multinomial. Since they use the metrics from cross-validation, they should come up with
# the same models.
def glm_alpha_array_with_lambda_search_cv():
# read in the dataset and construct training set (and validation set)
print("Testing glm cross-validation with alpha array, lambda_search for multinomial models.")
h2o_data = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/multinomial_10_classes_10_cols_10000_Rows_train.csv"))
enum_columns = ["C1", "C2", "C3", "C4", "C5"]
for cname in enum_columns:
h2o_data[cname] = h2o_data[cname]
myY = "C11"
h2o_data["C11"] = h2o_data["C11"].asfactor()
myX = h2o_data.names.remove(myY)
data_frames = h2o_data.split_frame(ratios=[0.8], seed=7)
training_data = data_frames[0]
test_data = data_frames[1]
# build model with CV but no validation dataset
cv_model = glm(family='multinomial',alpha=[0.1], lambda_search=True, nfolds = 3, nlambdas=5,
fold_assignment="modulo")
cv_model.train(training_frame=training_data,x=myX,y=myY)
cv_r = glm.getGLMRegularizationPath(cv_model)
# build model with CV and with validation dataset
cv_model_valid = glm(family='multinomial',alpha=[0.1], lambda_search=True, nfolds = 3, nlambdas=5,
fold_assignment="modulo")
cv_model_valid.train(training_frame=training_data, validation_frame = test_data, x=myX,y=myY)
cv_r_valid = glm.getGLMRegularizationPath(cv_model_valid)
for l in range(len(cv_r['lambdas'])):
print("comparing coefficients for submodel {0} with lambda {1}, alpha {2}".format(l, cv_r_valid["lambdas"][l], cv_r_valid["alphas"][l]))
pyunit_utils.assertEqualCoeffDicts(cv_r['coefficients'][l], cv_r_valid['coefficients'][l], tol=1e-6)
pyunit_utils.assertEqualCoeffDicts(cv_r['coefficients_std'][l], cv_r_valid['coefficients_std'][l], tol=1e-6)
if __name__ == "__main__":
pyunit_utils.standalone_test(glm_alpha_array_with_lambda_search_cv)
else:
glm_alpha_array_with_lambda_search_cv()
|
michalkurka/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_7481_lambda_search_alpha_array_multinomial_cv.py
|
Python
|
apache-2.0
| 2,324
| 0.012909
|
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from androguard.decompiler.dad.opcode_ins import INSTRUCTION_SET
from androguard.decompiler.dad.node import Node
logger = logging.getLogger('dad.basic_blocks')
class BasicBlock(Node):
def __init__(self, name, block_ins):
super(BasicBlock, self).__init__(name)
self.ins = block_ins
self.ins_range = None
self.loc_ins = None
self.var_to_declare = set()
def get_ins(self):
return self.ins
def get_loc_with_ins(self):
if self.loc_ins is None:
self.loc_ins = zip(range(*self.ins_range), self.ins)
return self.loc_ins
def remove_ins(self, loc, ins):
self.ins.remove(ins)
self.loc_ins.remove((loc, ins))
def add_ins(self, new_ins_list):
for new_ins in new_ins_list:
self.ins.append(new_ins)
def add_variable_declaration(self, variable):
self.var_to_declare.add(variable)
def number_ins(self, num):
last_ins_num = num + len(self.ins)
self.ins_range = [num, last_ins_num]
self.loc_ins = None
return last_ins_num
class StatementBlock(BasicBlock):
def __init__(self, name, block_ins):
super(StatementBlock, self).__init__(name, block_ins)
self.type.is_stmt = True
def visit(self, visitor):
return visitor.visit_statement_node(self)
def __str__(self):
return '%d-Statement(%s)' % (self.num, self.name)
class ReturnBlock(BasicBlock):
def __init__(self, name, block_ins):
super(ReturnBlock, self).__init__(name, block_ins)
self.type.is_return = True
def visit(self, visitor):
return visitor.visit_return_node(self)
def __str__(self):
return '%d-Return(%s)' % (self.num, self.name)
class ThrowBlock(BasicBlock):
def __init__(self, name, block_ins):
super(ThrowBlock, self).__init__(name, block_ins)
self.type.is_throw = True
def visit(self, visitor):
return visitor.visit_throw_node(self)
def __str__(self):
return '%d-Throw(%s)' % (self.num, self.name)
class SwitchBlock(BasicBlock):
def __init__(self, name, switch, block_ins):
super(SwitchBlock, self).__init__(name, block_ins)
self.switch = switch
self.cases = []
self.default = None
self.node_to_case = defaultdict(list)
self.type.is_switch = True
def add_case(self, case):
self.cases.append(case)
def visit(self, visitor):
return visitor.visit_switch_node(self)
def copy_from(self, node):
super(SwitchBlock, self).copy_from(node)
self.cases = node.cases[:]
self.switch = node.switch[:]
def update_attribute_with(self, n_map):
super(SwitchBlock, self).update_attribute_with(n_map)
self.cases = [n_map.get(n, n) for n in self.cases]
for node1, node2 in n_map.iteritems():
if node1 in self.node_to_case:
self.node_to_case[node2] = self.node_to_case.pop(node1)
def order_cases(self):
values = self.switch.get_values()
if len(values) < len(self.cases):
self.default = self.cases.pop(0)
for case, node in zip(values, self.cases):
self.node_to_case[node].append(case)
def __str__(self):
return '%d-Switch(%s)' % (self.num, self.name)
class CondBlock(BasicBlock):
def __init__(self, name, block_ins):
super(CondBlock, self).__init__(name, block_ins)
self.true = None
self.false = None
self.type.is_cond = True
def update_attribute_with(self, n_map):
super(CondBlock, self).update_attribute_with(n_map)
self.true = n_map.get(self.true, self.true)
self.false = n_map.get(self.false, self.false)
def neg(self):
if len(self.ins) != 1:
raise RuntimeWarning('Condition should have only 1 instruction !')
self.ins[-1].neg()
def visit(self, visitor):
return visitor.visit_cond_node(self)
def visit_cond(self, visitor):
if len(self.ins) != 1:
raise RuntimeWarning('Condition should have only 1 instruction !')
return visitor.visit_ins(self.ins[-1])
def __str__(self):
return '%d-If(%s)' % (self.num, self.name)
class Condition(object):
def __init__(self, cond1, cond2, isand, isnot):
self.cond1 = cond1
self.cond2 = cond2
self.isand = isand
self.isnot = isnot
def neg(self):
self.isand = not self.isand
self.cond1.neg()
self.cond2.neg()
def get_ins(self):
lins = []
lins.extend(self.cond1.get_ins())
lins.extend(self.cond2.get_ins())
return lins
def get_loc_with_ins(self):
loc_ins = []
loc_ins.extend(self.cond1.get_loc_with_ins())
loc_ins.extend(self.cond2.get_loc_with_ins())
return loc_ins
def visit(self, visitor):
return visitor.visit_short_circuit_condition(self.isnot, self.isand,
self.cond1, self.cond2)
def __str__(self):
if self.isnot:
ret = '!%s %s %s'
else:
ret = '%s %s %s'
return ret % (self.cond1, ['||', '&&'][self.isand], self.cond2)
class ShortCircuitBlock(CondBlock):
def __init__(self, name, cond):
super(ShortCircuitBlock, self).__init__(name, None)
self.cond = cond
def get_ins(self):
return self.cond.get_ins()
def get_loc_with_ins(self):
return self.cond.get_loc_with_ins()
def neg(self):
self.cond.neg()
def visit_cond(self, visitor):
return self.cond.visit(visitor)
def __str__(self):
return '%d-SC(%s)' % (self.num, self.cond)
class LoopBlock(CondBlock):
def __init__(self, name, cond):
super(LoopBlock, self).__init__(name, None)
self.cond = cond
def get_ins(self):
return self.cond.get_ins()
def neg(self):
self.cond.neg()
def get_loc_with_ins(self):
return self.cond.get_loc_with_ins()
def visit(self, visitor):
return visitor.visit_loop_node(self)
def visit_cond(self, visitor):
return self.cond.visit_cond(visitor)
def update_attribute_with(self, n_map):
super(LoopBlock, self).update_attribute_with(n_map)
self.cond.update_attribute_with(n_map)
def __str__(self):
if self.looptype.is_pretest:
if self.false in self.loop_nodes:
return '%d-While(!%s)[%s]' % (self.num, self.name, self.cond)
return '%d-While(%s)[%s]' % (self.num, self.name, self.cond)
elif self.looptype.is_posttest:
return '%d-DoWhile(%s)[%s]' % (self.num, self.name, self.cond)
elif self.looptype.is_endless:
return '%d-WhileTrue(%s)[%s]' % (self.num, self.name, self.cond)
return '%d-WhileNoType(%s)' % (self.num, self.name)
class TryBlock(BasicBlock):
def __init__(self, node):
super(TryBlock, self).__init__('Try-%s' % node.name, None)
self.try_start = node
self.catch = []
# FIXME:
@property
def num(self):
return self.try_start.num
@num.setter
def num(self, value):
pass
def add_catch_node(self, node):
self.catch.append(node)
def visit(self, visitor):
visitor.visit_try_node(self)
def __str__(self):
return 'Try(%s)[%s]' % (self.name, self.catch)
class CatchBlock(BasicBlock):
def __init__(self, node):
self.exception = node.ins[0]
node.ins.pop(0)
super(CatchBlock, self).__init__('Catch-%s' % node.name, node.ins)
self.catch_start = node
def visit(self, visitor):
visitor.visit_catch_node(self)
def visit_exception(self, visitor):
visitor.visit_ins(self.exception)
def __str__(self):
return 'Catch(%s)' % self.name
def build_node_from_block(block, vmap, gen_ret, exception_type=None):
ins, lins = None, []
idx = block.get_start()
for ins in block.get_instructions():
opcode = ins.get_op_value()
if opcode == -1: # FIXME? or opcode in (0x0300, 0x0200, 0x0100):
idx += ins.get_length()
continue
try:
_ins = INSTRUCTION_SET[opcode]
except IndexError:
logger.error('Unknown instruction : %s.', ins.get_name().lower())
_ins = INSTRUCTION_SET[0]
# fill-array-data
if opcode == 0x26:
fillaray = block.get_special_ins(idx)
lins.append(_ins(ins, vmap, fillaray))
# invoke-kind[/range]
elif (0x6e <= opcode <= 0x72 or 0x74 <= opcode <= 0x78):
lins.append(_ins(ins, vmap, gen_ret))
# filled-new-array[/range]
elif 0x24 <= opcode <= 0x25:
lins.append(_ins(ins, vmap, gen_ret.new()))
# move-result*
elif 0xa <= opcode <= 0xc:
lins.append(_ins(ins, vmap, gen_ret.last()))
# move-exception
elif opcode == 0xd:
lins.append(_ins(ins, vmap, exception_type))
# monitor-{enter,exit}
elif 0x1d <= opcode <= 0x1e:
idx += ins.get_length()
continue
else:
lins.append(_ins(ins, vmap))
idx += ins.get_length()
name = block.get_name()
# return*
if 0xe <= opcode <= 0x11:
node = ReturnBlock(name, lins)
# {packed,sparse}-switch
elif 0x2b <= opcode <= 0x2c:
idx -= ins.get_length()
values = block.get_special_ins(idx)
node = SwitchBlock(name, values, lins)
# if-test[z]
elif 0x32 <= opcode <= 0x3d:
node = CondBlock(name, lins)
node.off_last_ins = ins.get_ref_off()
# throw
elif opcode == 0x27:
node = ThrowBlock(name, lins)
else:
# goto*
if 0x28 <= opcode <= 0x2a:
lins.pop()
node = StatementBlock(name, lins)
return node
|
IOsipov/androguard
|
androguard/decompiler/dad/basic_blocks.py
|
Python
|
apache-2.0
| 10,681
| 0.000094
|
default_app_config = 'providers.com.dailyssrn.apps.AppConfig'
|
zamattiac/SHARE
|
providers/com/dailyssrn/__init__.py
|
Python
|
apache-2.0
| 62
| 0
|
"""
=============================================
Effect of varying threshold for self-training
=============================================
This example illustrates the effect of a varying threshold on self-training.
The `breast_cancer` dataset is loaded, and labels are deleted such that only 50
out of 569 samples have labels. A `SelfTrainingClassifier` is fitted on this
dataset, with varying thresholds.
The upper graph shows the amount of labeled samples that the classifier has
available by the end of fit, and the accuracy of the classifier. The lower
graph shows the last iteration in which a sample was labeled. All values are
cross validated with 3 folds.
At low thresholds (in [0.4, 0.5]), the classifier learns from samples that were
labeled with a low confidence. These low-confidence samples are likely have
incorrect predicted labels, and as a result, fitting on these incorrect labels
produces a poor accuracy. Note that the classifier labels almost all of the
samples, and only takes one iteration.
For very high thresholds (in [0.9, 1)) we observe that the classifier does not
augment its dataset (the amount of self-labeled samples is 0). As a result, the
accuracy achieved with a threshold of 0.9999 is the same as a normal supervised
classifier would achieve.
The optimal accuracy lies in between both of these extremes at a threshold of
around 0.7.
"""
# Authors: Oliver Rausch <rauscho@ethz.ch>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
n_splits = 3
X, y = datasets.load_breast_cancer(return_X_y=True)
X, y = shuffle(X, y, random_state=42)
y_true = y.copy()
y[50:] = -1
total_samples = y.shape[0]
base_classifier = SVC(probability=True, gamma=0.001, random_state=42)
x_values = np.arange(0.4, 1.05, 0.05)
x_values = np.append(x_values, 0.99999)
scores = np.empty((x_values.shape[0], n_splits))
amount_labeled = np.empty((x_values.shape[0], n_splits))
amount_iterations = np.empty((x_values.shape[0], n_splits))
for (i, threshold) in enumerate(x_values):
self_training_clf = SelfTrainingClassifier(base_classifier, threshold=threshold)
# We need manual cross validation so that we don't treat -1 as a separate
# class when computing accuracy
skfolds = StratifiedKFold(n_splits=n_splits)
for fold, (train_index, test_index) in enumerate(skfolds.split(X, y)):
X_train = X[train_index]
y_train = y[train_index]
X_test = X[test_index]
y_test = y[test_index]
y_test_true = y_true[test_index]
self_training_clf.fit(X_train, y_train)
# The amount of labeled samples that at the end of fitting
amount_labeled[i, fold] = (
total_samples
- np.unique(self_training_clf.labeled_iter_, return_counts=True)[1][0]
)
# The last iteration the classifier labeled a sample in
amount_iterations[i, fold] = np.max(self_training_clf.labeled_iter_)
y_pred = self_training_clf.predict(X_test)
scores[i, fold] = accuracy_score(y_test_true, y_pred)
ax1 = plt.subplot(211)
ax1.errorbar(
x_values, scores.mean(axis=1), yerr=scores.std(axis=1), capsize=2, color="b"
)
ax1.set_ylabel("Accuracy", color="b")
ax1.tick_params("y", colors="b")
ax2 = ax1.twinx()
ax2.errorbar(
x_values,
amount_labeled.mean(axis=1),
yerr=amount_labeled.std(axis=1),
capsize=2,
color="g",
)
ax2.set_ylim(bottom=0)
ax2.set_ylabel("Amount of labeled samples", color="g")
ax2.tick_params("y", colors="g")
ax3 = plt.subplot(212, sharex=ax1)
ax3.errorbar(
x_values,
amount_iterations.mean(axis=1),
yerr=amount_iterations.std(axis=1),
capsize=2,
color="b",
)
ax3.set_ylim(bottom=0)
ax3.set_ylabel("Amount of iterations")
ax3.set_xlabel("Threshold")
plt.show()
|
manhhomienbienthuy/scikit-learn
|
examples/semi_supervised/plot_self_training_varying_threshold.py
|
Python
|
bsd-3-clause
| 4,008
| 0.000749
|
from flask import url_for
from flask_sqlalchemy import BaseQuery
def test_create_av_scan(client, monkeypatch, malware_sample):
monkeypatch.setattr(BaseQuery, 'first_or_404', lambda x: True)
rv = client.post(url_for('api.add_av_scan'),
json={'files': [malware_sample._asdict()]})
assert rv.status_code == 202
|
certeu/do-portal
|
tests/test_av.py
|
Python
|
bsd-3-clause
| 343
| 0
|
from math import isnan
from nose.tools import eq_
from js_helper import _do_real_test_raw, _do_test_raw, _do_test_scope, _get_var
def test_assignment_with_pollution():
"""
Access a bunch of identifiers, but do not write to them. Accessing
undefined globals should not create scoped objects.
"""
assert not _do_real_test_raw("""
var x = "";
x = foo;
x = bar;
x = zap;
x = baz; // would otherwise cause pollution errors.
""").failed()
def test_basic_math():
'Tests that contexts work and that basic math is executed properly'
err = _do_test_raw("""
var x = 1;
var y = 2;
var z = x + y;
var dbz = 1;
var dbz1 = 1;
dbz = dbz / 0;
dbz1 = dbz1 % 0;
var dbz2 = 1;
var dbz3 = 1;
dbz2 /= 0;
dbz3 %= 0;
var a = 2 + 3;
var b = a - 1;
var c = b * 2;
""")
assert err.message_count == 0
assert _get_var(err, 'x') == 1
assert _get_var(err, 'y') == 2
assert _get_var(err, 'z') == 3
assert _get_var(err, 'dbz') == float('inf') # Spidermonkey does this.
assert isnan(_get_var(err, 'dbz1')) # ...and this.
assert _get_var(err, 'dbz2') == float('inf')
assert isnan(_get_var(err, 'dbz3'))
assert _get_var(err, 'a') == 5
assert _get_var(err, 'b') == 4
assert _get_var(err, 'c') == 8
def test_in_operator():
"Tests the 'in' operator."
err = _do_test_raw("""
var list = ["a",1,2,3,"foo"];
var dict = {"abc":123, "foo":"bar"};
// Must be true
var x = 0 in list;
var y = "abc" in dict;
// Must be false
var a = 5 in list;
var b = "asdf" in dict;
""")
assert err.message_count == 0
assert _get_var(err, 'x') == True
assert _get_var(err, 'y') == True
print _get_var(err, 'a'), '<<<'
assert _get_var(err, 'a') == False
assert _get_var(err, 'b') == False
def test_function_instanceof():
"""
Test that Function can be used with instanceof operators without error.
"""
assert not _do_test_raw("""
var x = foo();
print(x instanceof Function);
""").failed()
assert _do_test_raw("""
var x = foo();
print(Function(x));
""").failed()
def test_unary_typeof():
"""Test that the typeof operator does good."""
scope = _do_test_raw("""
var a = typeof(void(0)),
b = typeof(null),
c = typeof(true),
d = typeof(false),
e = typeof(new Boolean()),
f = typeof(new Boolean(true)),
g = typeof(Boolean()),
h = typeof(Boolean(false)),
i = typeof(Boolean(true)),
j = typeof(NaN),
k = typeof(Infinity),
l = typeof(-Infinity),
m = typeof(Math.PI),
n = typeof(0),
o = typeof(1),
p = typeof(-1),
q = typeof('0'),
r = typeof(Number()),
s = typeof(Number(0)),
t = typeof(new Number()),
u = typeof(new Number(0)),
v = typeof(new Number(1)),
x = typeof(function() {}),
y = typeof(Math.abs);
""")
eq_(_get_var(scope, 'a'), 'undefined')
eq_(_get_var(scope, 'b'), 'object')
eq_(_get_var(scope, 'c'), 'boolean')
eq_(_get_var(scope, 'd'), 'boolean')
eq_(_get_var(scope, 'e'), 'object')
eq_(_get_var(scope, 'f'), 'object')
eq_(_get_var(scope, 'g'), 'boolean')
eq_(_get_var(scope, 'h'), 'boolean')
eq_(_get_var(scope, 'i'), 'boolean')
# TODO: Implement "typeof" for predefined entities
# eq_(_get_var(scope, "j"), "number")
# eq_(_get_var(scope, "k"), "number")
# eq_(_get_var(scope, "l"), "number")
eq_(_get_var(scope, 'm'), 'number')
eq_(_get_var(scope, 'n'), 'number')
eq_(_get_var(scope, 'o'), 'number')
eq_(_get_var(scope, 'p'), 'number')
eq_(_get_var(scope, 'q'), 'string')
eq_(_get_var(scope, 'r'), 'number')
eq_(_get_var(scope, 's'), 'number')
eq_(_get_var(scope, 't'), 'object')
eq_(_get_var(scope, 'u'), 'object')
eq_(_get_var(scope, 'v'), 'object')
eq_(_get_var(scope, 'x'), 'function')
eq_(_get_var(scope, 'y'), 'function')
# TODO(basta): Still working on the delete operator...should be done soon.
#def test_delete_operator():
# """Test that the delete operator works correctly."""
#
# # Test that array elements can be destroyed.
# eq_(_get_var(_do_test_raw("""
# var x = [1, 2, 3];
# delete(x[2]);
# var value = x.length;
# """), "value"), 2)
#
# # Test that hte right array elements are destroyed.
# eq_(_get_var(_do_test_raw("""
# var x = [1, 2, 3];
# delete(x[2]);
# var value = x.toString();
# """), "value"), "1,2")
#
# eq_(_get_var(_do_test_raw("""
# var x = "asdf";
# delete x;
# var value = x;
# """), "value"), None)
#
# assert _do_test_raw("""
# delete(Math.PI);
# """).failed()
def test_logical_not():
"""Test that logical not is evaluated properly."""
scope = _do_test_raw("""
var a = !(null),
// b = !(var x),
c = !(void 0),
d = !(false),
e = !(true),
// f = !(),
g = !(0),
h = !(-0),
// i = !(NaN),
j = !(Infinity),
k = !(-Infinity),
l = !(Math.PI),
m = !(1),
n = !(-1),
o = !(''),
p = !('\\t'),
q = !('0'),
r = !('string'),
s = !(new String('')); // This should cover all type globals.
""")
eq_(_get_var(scope, 'a'), True)
# eq_(_get_var(scope, "b"), True)
eq_(_get_var(scope, 'c'), True)
eq_(_get_var(scope, 'd'), True)
eq_(_get_var(scope, 'e'), False)
# eq_(_get_var(scope, "f"), True)
eq_(_get_var(scope, 'g'), True)
eq_(_get_var(scope, 'h'), True)
# eq_(_get_var(scope, "i"), True)
eq_(_get_var(scope, 'j'), False)
eq_(_get_var(scope, 'k'), False)
eq_(_get_var(scope, 'l'), False)
eq_(_get_var(scope, 'm'), False)
eq_(_get_var(scope, 'n'), False)
eq_(_get_var(scope, 'o'), True)
eq_(_get_var(scope, 'p'), False)
eq_(_get_var(scope, 'q'), False)
eq_(_get_var(scope, 'r'), False)
eq_(_get_var(scope, 's'), False)
def test_concat_plus_infinity():
"""Test that Infinity is concatenated properly."""
_do_test_scope("""
var a = Infinity + "foo",
b = (-Infinity) + "foo",
c = "foo" + Infinity,
d = "foo" + (-Infinity);
""", {'a': 'Infinityfoo',
'b': '-Infinityfoo',
'c': 'fooInfinity',
'd': 'foo-Infinity'})
def test_simple_operators_when_dirty():
"""
Test that when we're dealing with dirty objects, binary operations don't
cave in the roof.
Note that this test (if it fails) may cause some ugly crashes.
"""
_do_test_raw("""
var x = foo(); // x is now a dirty object.
y = foo(); // y is now a dirty object as well.
""" +
"""y += y + x;""" * 100) # This bit makes the validator's head explode.
def test_overflow_errors():
"Test that OverflowErrors in traversal don't crash the validation process."
_do_test_raw("""
var x = Math.exp(-4*1000000*-0.0641515994108);
""")
|
kmaglione/amo-validator
|
tests/test_js_operators.py
|
Python
|
bsd-3-clause
| 7,066
| 0.000849
|
import boto3
import json
import pytest
import sure # noqa # pylint: disable=unused-import
from boto3 import Session
from botocore.client import ClientError
from moto import settings, mock_s3control, mock_config
# All tests for s3-control cannot be run under the server without a modification of the
# hosts file on your system. This is due to the fact that the URL to the host is in the form of:
# ACCOUNT_ID.s3-control.amazonaws.com <-- That Account ID part is the problem. If you want to
# make use of the moto server, update your hosts file for `THE_ACCOUNT_ID_FOR_MOTO.localhost`
# and this will work fine.
if not settings.TEST_SERVER_MODE:
@mock_s3control
@mock_config
def test_config_list_account_pab():
from moto.s3.models import ACCOUNT_ID
client = boto3.client("s3control", region_name="us-west-2")
config_client = boto3.client("config", region_name="us-west-2")
# Create the aggregator:
account_aggregation_source = {
"AccountIds": [ACCOUNT_ID],
"AllAwsRegions": True,
}
config_client.put_configuration_aggregator(
ConfigurationAggregatorName="testing",
AccountAggregationSources=[account_aggregation_source],
)
# Without a PAB in place:
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock"
)
assert not result["resourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
)
assert not result["ResourceIdentifiers"]
# Create a PAB:
client.put_public_access_block(
AccountId=ACCOUNT_ID,
PublicAccessBlockConfiguration={
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
},
)
# Test that successful queries work (non-aggregated):
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock"
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock",
resourceIds=[ACCOUNT_ID, "nope"],
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceName=""
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
# Test that successful queries work (aggregated):
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
)
regions = {region for region in Session().get_available_regions("config")}
for r in result["ResourceIdentifiers"]:
regions.remove(r.pop("SourceRegion"))
assert r == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
}
# Just check that the len is the same -- this should be reasonable
regions = {region for region in Session().get_available_regions("config")}
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": ""},
)
assert len(regions) == len(result["ResourceIdentifiers"])
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": "", "ResourceId": ACCOUNT_ID},
)
assert len(regions) == len(result["ResourceIdentifiers"])
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={
"ResourceName": "",
"ResourceId": ACCOUNT_ID,
"Region": "us-west-2",
},
)
assert (
result["ResourceIdentifiers"][0]["SourceRegion"] == "us-west-2"
and len(result["ResourceIdentifiers"]) == 1
)
# Test aggregator pagination:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Limit=1,
)
regions = sorted(
[region for region in Session().get_available_regions("config")]
)
assert result["ResourceIdentifiers"][0] == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
"SourceRegion": regions[0],
}
assert result["NextToken"] == regions[1]
# Get the next region:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Limit=1,
NextToken=regions[1],
)
assert result["ResourceIdentifiers"][0] == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
"SourceRegion": regions[1],
}
# Non-aggregated with incorrect info:
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceName="nope"
)
assert not result["resourceIdentifiers"]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceIds=["nope"]
)
assert not result["resourceIdentifiers"]
# Aggregated with incorrect info:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": "nope"},
)
assert not result["ResourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceId": "nope"},
)
assert not result["ResourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"Region": "Nope"},
)
assert not result["ResourceIdentifiers"]
@mock_s3control
@mock_config
def test_config_get_account_pab():
from moto.s3.models import ACCOUNT_ID
client = boto3.client("s3control", region_name="us-west-2")
config_client = boto3.client("config", region_name="us-west-2")
# Create the aggregator:
account_aggregation_source = {
"AccountIds": [ACCOUNT_ID],
"AllAwsRegions": True,
}
config_client.put_configuration_aggregator(
ConfigurationAggregatorName="testing",
AccountAggregationSources=[account_aggregation_source],
)
# Without a PAB in place:
with pytest.raises(ClientError) as ce:
config_client.get_resource_config_history(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID
)
assert ce.value.response["Error"]["Code"] == "ResourceNotDiscoveredException"
# aggregate
result = config_client.batch_get_resource_config(
resourceKeys=[
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": "ACCOUNT_ID",
}
]
)
assert not result["baseConfigurationItems"]
result = config_client.batch_get_aggregate_resource_config(
ConfigurationAggregatorName="testing",
ResourceIdentifiers=[
{
"SourceAccountId": ACCOUNT_ID,
"SourceRegion": "us-west-2",
"ResourceId": ACCOUNT_ID,
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"ResourceName": "",
}
],
)
assert not result["BaseConfigurationItems"]
# Create a PAB:
client.put_public_access_block(
AccountId=ACCOUNT_ID,
PublicAccessBlockConfiguration={
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
},
)
# Get the proper config:
proper_config = {
"blockPublicAcls": True,
"ignorePublicAcls": True,
"blockPublicPolicy": True,
"restrictPublicBuckets": True,
}
result = config_client.get_resource_config_history(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID
)
assert (
json.loads(result["configurationItems"][0]["configuration"])
== proper_config
)
assert (
result["configurationItems"][0]["accountId"]
== result["configurationItems"][0]["resourceId"]
== ACCOUNT_ID
)
result = config_client.batch_get_resource_config(
resourceKeys=[
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
)
assert len(result["baseConfigurationItems"]) == 1
assert (
json.loads(result["baseConfigurationItems"][0]["configuration"])
== proper_config
)
assert (
result["baseConfigurationItems"][0]["accountId"]
== result["baseConfigurationItems"][0]["resourceId"]
== ACCOUNT_ID
)
for region in Session().get_available_regions("s3control"):
result = config_client.batch_get_aggregate_resource_config(
ConfigurationAggregatorName="testing",
ResourceIdentifiers=[
{
"SourceAccountId": ACCOUNT_ID,
"SourceRegion": region,
"ResourceId": ACCOUNT_ID,
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"ResourceName": "",
}
],
)
assert len(result["BaseConfigurationItems"]) == 1
assert (
json.loads(result["BaseConfigurationItems"][0]["configuration"])
== proper_config
)
|
spulec/moto
|
tests/test_s3control/test_s3control_config_integration.py
|
Python
|
apache-2.0
| 11,847
| 0.001013
|
import unittest
from nose.tools import eq_
from django.contrib.auth import get_user_model
from amon.apps.alerts.models import AlertsModel
from amon.apps.processes.models import process_model
from amon.apps.plugins.models import plugin_model
from amon.apps.servers.models import server_model
from amon.apps.devices.models import volumes_model, interfaces_model
User = get_user_model()
class AlertsModelTest(unittest.TestCase):
def setUp(self):
User.objects.all().delete()
self.user_email = 'foo@test.com'
self.user = User.objects.create_user(password='qwerty', email=self.user_email)
self.account_id = 1
self.model = AlertsModel()
self.model.mongo.database = 'amontest'
self.collection = self.model.mongo.get_collection('alerts')
self.server_collection = self.model.mongo.get_collection('servers')
self.history_collection = self.model.mongo.get_collection('alert_history')
self.server_collection.insert({"name" : "test",
"key": "test_me",
"account_id": 199999
})
server = self.server_collection.find_one()
self.server_id = server['_id']
def tearDown(self):
self.user.delete()
User.objects.all().delete()
def _cleanup(self):
self.collection.remove()
process_model.collection.remove()
plugin_model.collection.remove()
interfaces_model.collection.remove()
volumes_model.collection.remove()
gauges_collection = plugin_model.gauge_collection.remove()
def add_initial_data_test(self):
self._cleanup()
default_alert = {
"above_below": "above",
"email_recepients": [],
"rule_type": "global",
"server": "all",
"period": 300,
"account_id": self.account_id
}
# Add initial data only if this is empty
self.collection.insert(default_alert)
assert self.collection.find().count() == 1
self.model.add_initial_data()
assert self.collection.find().count() == 1
self._cleanup()
assert self.collection.find().count() == 0
self.model.add_initial_data()
assert self.collection.find().count() == 3
self._cleanup()
def get_alerts_for_plugin_test(self):
self._cleanup()
plugin = plugin_model.get_or_create(server_id=self.server_id, name='testplugin')
gauge = plugin_model.get_or_create_gauge_by_name(plugin=plugin, name='gauge')
plugin_alert = {
"above_below": "above",
"rule_type": "plugin",
"server": self.server_id,
"gauge": gauge['_id'],
"plugin": plugin['_id'],
"account_id": self.account_id,
"key": "testkey",
"period": 0,
"metric_value": 5
}
for i in range(0,5):
try:
del plugin_alert['_id']
except:
pass
plugin_alert['period'] = i
plugin_alert['metric_value'] = i+5
self.model.collection.insert(plugin_alert)
result = self.model.get_alerts_for_plugin(plugin=plugin)
assert len(result) == 5
self._cleanup()
def save_alert_test(self):
self.collection.remove()
self.model.save({'rule': "test", 'server': self.server_id})
eq_(self.collection.count(), 1)
def update_test(self):
self.collection.remove()
self.model.save({'rule': "test" , 'server': self.server_id, 'period': 10})
alert = self.collection.find_one()
alert_id = str(alert['_id'])
self.model.update({'rule': 'updated_test', 'period': 10}, alert_id)
alert = self.collection.find_one()
eq_(alert['rule'], 'updated_test')
def mute_test(self):
self.collection.remove()
self.collection.insert({"name" : "test", "key": "test_me"})
alert = self.collection.find_one()
alert_id = str(alert['_id'])
self.model.mute(alert_id)
result = self.collection.find_one()
eq_(result["mute"], True)
self.model.mute(alert_id)
result = self.collection.find_one()
eq_(result["mute"], False)
def get_mute_state_test(self):
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": True,"account_id": self.account_id})
result = self.model.get_mute_state(account_id=self.account_id)
eq_(result, False) # A toggle function -> this is the next state
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": False,"account_id": self.account_id})
result = self.model.get_mute_state(account_id=self.account_id)
eq_(result, True) # A toggle function -> this is the next state
def mute_all_test(self):
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": False ,"account_id": self.account_id})
result = self.model.mute_all(account_id=self.account_id)
for r in self.collection.find():
eq_(r['mute'], True)
self.collection.remove()
for i in range(0, 10):
self.collection.insert({"name" : "test", "mute": True ,"account_id": self.account_id})
result = self.model.mute_all(account_id=self.account_id)
for r in self.collection.find():
eq_(r['mute'], False)
self.collection.remove()
def get_alerts_test(self):
self.collection.remove()
self.server_collection.remove()
self.server_collection.insert({"name" : "test", "key": "test_me"})
server = self.server_collection.find_one()
rule = { "server": server['_id'], "rule_type": 'system', 'metric': 2, 'period': 10}
self.collection.insert(rule)
rule = { "server": server['_id'], "rule_type": 'system', 'metric': 1, 'period': 10}
self.collection.insert(rule)
rules = self.model.get_alerts(type='system', server=server)
eq_(len(rules), 2)
self.collection.remove()
def delete_alerts_test(self):
self.collection.remove()
self.collection.insert({"name" : "test", "key": "test_me"})
rule = self.collection.find_one()
self.model.delete(alert_id=rule['_id'])
result = self.collection.count()
eq_(result,0)
self.collection.remove()
def save_healthcheck_occurence_test(self):
self.history_collection.remove()
self.collection.remove()
def save_occurence_test(self):
self.history_collection.remove()
self.collection.remove()
self.collection.insert({
"rule_type" : "custom_metric_gauge",
"metric_value" : 10,
"metric_type" : "more_than",
"period": 10
})
rule = self.collection.find_one()
rule_id = str(rule['_id'])
for i in range(300, 330):
self.model.save_occurence({
'value': 11,
'alert_id': rule_id,
'trigger': True,
'time': i
})
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
assert trigger_result.count() == 2 # 310 and 321
def save_health_check_occurence_test(self):
self.history_collection.remove()
self.server_collection.remove()
self.server_collection.insert({'name': 'test'})
server = self.server_collection.find_one()
self.collection.remove()
self.collection.insert({
"rule_type" : "health_check",
"server": server['_id'],
"command" : "check-http.rb",
"status": "critical",
"period": 10
})
rule = self.collection.find_one()
rule['server'] = server
rule_id = str(rule['_id'])
for i in range(0, 110):
trigger_dict = {
'value': 1,
'alert_id': rule_id,
'trigger': True,
'time': i,
'health_checks_data_id': 'test'
}
self.model.save_healtcheck_occurence(trigger=trigger_dict, server_id=server['_id'])
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
eq_(trigger_result.count(), 10)
for r in trigger_result.clone():
assert r['from'] in [0, 11, 22, 33, 44, 55, 66, 77, 88, 99]
assert r['time'] in [10, 21, 32, 43, 54, 65, 76, 87, 98, 109]
assert r['health_checks_data_id']
self.history_collection.remove()
# Second test with some of the triggers set to False
for i in range(300, 400):
trigger = True
if i % 2 == 1:
trigger = False
trigger_dict = {
'value': 1,
'alert_id': rule_id,
'trigger': trigger,
'time': i,
'health_checks_data_id': 'test'
}
self.model.save_healtcheck_occurence(trigger=trigger_dict, server_id=server['_id'])
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
eq_(trigger_result.count(), 0)
self.history_collection.remove()
def save_system_occurence_test(self):
self.history_collection.remove()
self.server_collection.remove()
self.server_collection.insert({'name': 'test'})
server = self.server_collection.find_one()
self.collection.remove()
self.collection.insert({
"rule_type" : "system",
"server": server['_id'],
"metric_type_value" : "%",
"metric_value" : "10",
"metric_type" : "more_than",
"metric" : "CPU",
"period": 10
})
rule = self.collection.find_one()
rule_id = str(rule['_id'])
server_id = rule['server']
for i in range(300, 320):
self.model.save_system_occurence({'cpu':
[{
'value': 11,
'rule': rule_id,
'trigger': True,
'server_id': server_id,
'time': i
}]}, server_id=server_id)
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
eq_(trigger_result.count(), 1) # Only 1 trigger on 400
for r in trigger_result.clone():
eq_(r['time'], 310)
eq_(r['from'], 300)
self.history_collection.remove()
# Second test with some of the triggers set to False
for i in range(300, 400):
trigger = True
if i % 2 == 1:
trigger = False
self.model.save_system_occurence({'cpu':
[{
'value': 11,
'rule': rule_id,
'trigger': trigger,
'server': server['_id'],
'time': i
}]}, server_id=server_id)
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
eq_(trigger_result.count(), 0)
self.history_collection.remove()
# Try with bigger range and multiple triggers
for i in range(300, 333):
self.model.save_system_occurence({'cpu':
[{
'value': 11,
'rule': rule_id,
'trigger': True,
'server': server['_id'],
'time': i
}]}, server_id=server_id)
trigger_result = self.history_collection.find({'alert_id': rule['_id'] , 'notify': True})
eq_(trigger_result.count(), 3)
for r in trigger_result.clone():
time_list = [310, 321, 332]
eq_(r['time'] in time_list, True)
self.history_collection.remove()
self.server_collection.remove()
def delete_server_alerts_test(self):
server_model.collection.remove()
self.collection.remove()
server_id = server_model.add('testserver')
self.collection.insert({"rule_type" : "process",})
self.collection.insert({"rule_type" : "system",})
self.collection.insert({"rule_type" : "log", "server": server_id})
self.collection.insert({"rule_type" : "dummy", "server":server_id})
self.collection.insert({"rule_type" : "dummy", "server": server_id})
self.model.delete_server_alerts(server_id)
eq_(self.collection.count(), 2)
self.collection.remove()
def get_by_id_test(self):
self.collection.remove()
server_model.collection.remove()
plugin_model.collection.remove()
server_id = server_model.add('testserver')
plugin = plugin_model.get_or_create(name='testplugin', server_id=server_id)
self.collection.insert({
"rule_type" : "process",
"server": server_id,
"plugin": plugin['_id'],
'sms_recepients': [],
'email_recepients': [],
'webhooks': []}
)
alert = self.collection.find_one()
alert_from_model = self.model.get_by_id(alert['_id'])
assert alert_from_model['plugin'] == plugin['_id']
|
martinrusev/amonone
|
amon/apps/alerts/models/tests/alerts_model_test.py
|
Python
|
mit
| 13,755
| 0.012432
|
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
class IntegerUsernameUserManager(BaseUserManager):
def create_user(self, username, password):
user = self.model(username=username)
user.set_password(password)
user.save(using=self._db)
return user
def get_by_natural_key(self, username):
return self.get(username=username)
class IntegerUsernameUser(AbstractBaseUser):
username = models.IntegerField()
password = models.CharField(max_length=255)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['username', 'password']
objects = IntegerUsernameUserManager()
|
nesdis/djongo
|
tests/django_tests/tests/v22/tests/auth_tests/models/with_integer_username.py
|
Python
|
agpl-3.0
| 681
| 0
|
import sys
import ray
import pytest
from ray.test_utils import (
generate_system_config_map,
wait_for_condition,
wait_for_pid_to_exit,
)
@ray.remote
class Increase:
def method(self, x):
return x + 2
@ray.remote
def increase(x):
return x + 1
@pytest.mark.parametrize(
"ray_start_regular", [
generate_system_config_map(
num_heartbeats_timeout=20, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_gcs_server_restart(ray_start_regular):
actor1 = Increase.remote()
result = ray.get(actor1.method.remote(1))
assert result == 3
ray.worker._global_node.kill_gcs_server()
ray.worker._global_node.start_gcs_server()
result = ray.get(actor1.method.remote(7))
assert result == 9
actor2 = Increase.remote()
result = ray.get(actor2.method.remote(2))
assert result == 4
result = ray.get(increase.remote(1))
assert result == 2
@pytest.mark.parametrize(
"ray_start_regular", [
generate_system_config_map(
num_heartbeats_timeout=20, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_gcs_server_restart_during_actor_creation(ray_start_regular):
ids = []
for i in range(0, 100):
actor = Increase.remote()
ids.append(actor.method.remote(1))
ray.worker._global_node.kill_gcs_server()
ray.worker._global_node.start_gcs_server()
ready, unready = ray.wait(ids, num_returns=100, timeout=240)
print("Ready objects is {}.".format(ready))
print("Unready objects is {}.".format(unready))
assert len(unready) == 0
@pytest.mark.parametrize(
"ray_start_cluster_head", [
generate_system_config_map(
num_heartbeats_timeout=20, ping_gcs_rpc_server_max_retries=60)
],
indirect=True)
def test_node_failure_detector_when_gcs_server_restart(ray_start_cluster_head):
"""Checks that the node failure detector is correct when gcs server restart.
We set the cluster to timeout nodes after 2 seconds of heartbeats. We then
kill gcs server and remove the worker node and restart gcs server again to
check that the removed node will die finally.
"""
cluster = ray_start_cluster_head
worker = cluster.add_node()
cluster.wait_for_nodes()
# Make sure both head and worker node are alive.
nodes = ray.nodes()
assert len(nodes) == 2
assert nodes[0]["alive"] and nodes[1]["alive"]
to_be_removed_node = None
for node in nodes:
if node["RayletSocketName"] == worker.raylet_socket_name:
to_be_removed_node = node
assert to_be_removed_node is not None
head_node = cluster.head_node
gcs_server_process = head_node.all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
# Kill gcs server.
cluster.head_node.kill_gcs_server()
# Wait to prevent the gcs server process becoming zombie.
gcs_server_process.wait()
wait_for_pid_to_exit(gcs_server_pid, 1000)
raylet_process = worker.all_processes["raylet"][0].process
raylet_pid = raylet_process.pid
# Remove worker node.
cluster.remove_node(worker, allow_graceful=False)
# Wait to prevent the raylet process becoming zombie.
raylet_process.wait()
wait_for_pid_to_exit(raylet_pid)
# Restart gcs server process.
cluster.head_node.start_gcs_server()
def condition():
nodes = ray.nodes()
assert len(nodes) == 2
for node in nodes:
if node["NodeID"] == to_be_removed_node["NodeID"]:
return not node["alive"]
return False
# Wait for the removed node dead.
wait_for_condition(condition, timeout=10)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
robertnishihara/ray
|
python/ray/tests/test_gcs_fault_tolerance.py
|
Python
|
apache-2.0
| 3,783
| 0.000264
|
# -*- coding: utf-8 -*-
# Copyright (C) Canux CHENG <canuxcheng@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Base class for all Sharepoint 2013 plugins."""
import logging
import traceback
from monitoring.nagios.plugin import NagiosPluginHTTP
from powershell import XMLTable, XMLSerializedTable
from powershell.xml.exceptions import XMLValidityError
logger = logging.getLogger('plugin.base')
class PluginBase(NagiosPluginHTTP):
"""Base class for all Exchange plugins."""
def __init__(self, *args, **kwargs):
super(PluginBase, self).__init__(*args, **kwargs)
self._alerts = {
'warning': [],
'critical': [],
}
self.have_criticals = False
self.have_warnings = False
def run(self):
"""Run the plugin."""
try:
self.main()
except Exception:
self.shortoutput = 'Unexpected plugin error ! Please investigate.'
self.longoutput = traceback.format_exc().splitlines()
self.unknown(self.output())
def main(self):
"""Main entry point for the plugin."""
raise NotImplementedError('Main entry point is not implemented !')
def fetch_xml_table(self):
"""Helper to fetch the XML via HTTP and parse it."""
response = self.http.get(self.options.path)
try:
xml_table = XMLTable(response.content)
logger.debug('XML Table: %s', xml_table)
return xml_table
except XMLValidityError:
try:
xml_table = XMLSerializedTable(response.content)
logger.debug('XML Serialized Table: %s', xml_table)
return xml_table
except XMLValidityError:
self.shortoutput = 'XML format is not valid !'
self.longoutput = traceback.format_exc().splitlines()
self.critical(self.output())
def add_critical_result(self, crit_result):
"""
Add a critical result.
Used in longoutput to show the result in a CRITICAL section.
"""
self._alerts['critical'].append(crit_result)
self.have_criticals = True
def add_warning_result(self, warn_result):
"""
Add a warning result.
Used in longoutput to show the result in a WARNING section.
"""
self._alerts['warning'].append(warn_result)
self.have_warnings = True
|
crazy-canux/xplugin_nagios
|
plugin/plugins/sharepoint_2013/src/plugin/base.py
|
Python
|
gpl-2.0
| 3,450
| 0
|
# coding: utf-8
from __future__ import absolute_import
from swagger_server.models.error_model import ErrorModel
from swagger_server.models.taxonomy import Taxonomy
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestTaxonomyController(BaseTestCase):
""" TaxonomyController integration test stubs """
def test_tax(self):
"""
Test case for tax
Taxonomic information, or hierarchy
"""
query_string = [('taxon', 'taxon_example'),
('includelower', true),
('hierarchy', true)]
response = self.client.open('/api_v1/tax',
method='GET',
content_type='application/json',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
|
EarthLifeConsortium/elc_api
|
swagger_server/test/test_taxonomy_controller.py
|
Python
|
apache-2.0
| 995
| 0.001005
|
''' DowntimeCommand module
'''
import urllib2
from datetime import datetime, timedelta
from operator import itemgetter
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.LCG.GOCDBClient import GOCDBClient
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getGOCSiteName, getGOCFTSName
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getStorageElementOptions, getFTS3Servers
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
__RCSID__ = '$Id: $'
class DowntimeCommand( Command ):
'''
Downtime "master" Command or removed DTs.
'''
def __init__( self, args = None, clients = None ):
super( DowntimeCommand, self ).__init__( args, clients )
if 'GOCDBClient' in self.apis:
self.gClient = self.apis[ 'GOCDBClient' ]
else:
self.gClient = GOCDBClient()
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
def _storeCommand( self, result ):
'''
Stores the results of doNew method on the database.
'''
for dt in result:
resQuery = self.rmClient.addOrModifyDowntimeCache( downtimeID = dt[ 'DowntimeID' ],
element = dt[ 'Element' ],
name = dt[ 'Name' ],
startDate = dt[ 'StartDate' ],
endDate = dt[ 'EndDate' ],
severity = dt[ 'Severity' ],
description = dt[ 'Description' ],
link = dt[ 'Link' ],
gocdbServiceType = dt[ 'GOCDBServiceType' ] )
return resQuery
def _cleanCommand( self, element, elementNames):
'''
Clear Cache from expired DT.
'''
resQuery = []
for elementName in elementNames:
#get the list of all DTs stored in the cache
result = self.rmClient.selectDowntimeCache( element = element,
name = elementName )
if not result[ 'OK' ]:
return result
uniformResult = [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ]
currentDate = datetime.utcnow()
if len(uniformResult) == 0:
return S_OK( None )
#get the list of all ongoing DTs from GocDB
gDTLinkList = self.gClient.getCurrentDTLinkList()
if not gDTLinkList[ 'OK' ]:
return gDTLinkList
for dt in uniformResult:
#if DT expired or DT not in the list of current DTs, then we remove it from the cache
if dt[ 'EndDate' ] < currentDate or dt[ 'Link' ] not in gDTLinkList[ 'Value' ]:
result = self.rmClient.deleteDowntimeCache (
downtimeID = dt[ 'DowntimeID' ]
)
resQuery.append(result)
return S_OK( resQuery )
def _prepareCommand( self ):
'''
DowntimeCommand requires four arguments:
- name : <str>
- element : Site / Resource
- elementType: <str>
If the elements are Site(s), we need to get their GOCDB names. They may
not have, so we ignore them if they do not have.
'''
if 'name' not in self.args:
return S_ERROR( '"name" not found in self.args' )
elementName = self.args[ 'name' ]
if 'element' not in self.args:
return S_ERROR( '"element" not found in self.args' )
element = self.args[ 'element' ]
if 'elementType' not in self.args:
return S_ERROR( '"elementType" not found in self.args' )
elementType = self.args[ 'elementType' ]
if not element in [ 'Site', 'Resource' ]:
return S_ERROR( 'element is neither Site nor Resource' )
hours = None
if 'hours' in self.args:
hours = self.args[ 'hours' ]
gocdbServiceType = None
# Transform DIRAC site names into GOCDB topics
if element == 'Site':
gocSite = getGOCSiteName( elementName )
if not gocSite[ 'OK' ]:
return gocSite
elementName = gocSite[ 'Value' ]
# The DIRAC se names mean nothing on the grid, but their hosts do mean.
elif elementType == 'StorageElement':
# We need to distinguish if it's tape or disk
seOptions = getStorageElementOptions( elementName )
if not seOptions['OK']:
return seOptions
if seOptions['Value'].get( 'TapeSE' ):
gocdbServiceType = "srm.nearline"
elif seOptions['Value'].get( 'DiskSE' ):
gocdbServiceType = "srm"
seHost = CSHelpers.getSEHost( elementName )
if not seHost['OK']:
return seHost
seHost = seHost['Value']
if not seHost:
return S_ERROR( 'No seHost for %s' % elementName )
elementName = seHost
elif elementType in ['FTS','FTS3']:
gocdbServiceType = 'FTS'
try:
#WARNING: this method presupposes that the server is an FTS3 type
elementName = getGOCFTSName(elementName)
except:
return S_ERROR( 'No FTS3 server specified in dirac.cfg (see Resources/FTSEndpoints)' )
return S_OK( ( element, elementName, hours, gocdbServiceType ) )
def doNew( self, masterParams = None ):
'''
Gets the parameters to run, either from the master method or from its
own arguments.
For every elementName, unless it is given a list, in which case it contacts
the gocdb client. The server is not very stable, so in case of failure tries
a second time.
If there are downtimes, are recorded and then returned.
'''
if masterParams is not None:
element, elementNames = masterParams
hours = 120
elementName = None
gocdbServiceType = None
else:
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
element, elementName, hours, gocdbServiceType = params[ 'Value' ]
elementNames = [ elementName ]
#WARNING: checking all the DT that are ongoing or starting in given <hours> from now
try:
results = self.gClient.getStatus( element, name = elementNames, startingInHours = hours )
except urllib2.URLError:
try:
#Let's give it a second chance..
results = self.gClient.getStatus( element, name = elementNames, startingInHours = hours )
except urllib2.URLError, e:
return S_ERROR( e )
if not results[ 'OK' ]:
return results
results = results[ 'Value' ]
if results is None:
return S_OK( None )
#cleaning the Cache
cleanRes = self._cleanCommand(element, elementNames)
if not cleanRes[ 'OK' ]:
return cleanRes
uniformResult = []
# Humanize the results into a dictionary, not the most optimal, but readable
for downtime, downDic in results.items():
dt = {}
if 'HOSTNAME' in downDic.keys():
dt[ 'Name' ] = downDic[ 'HOSTNAME' ]
elif 'SITENAME' in downDic.keys():
dt[ 'Name' ] = downDic[ 'SITENAME' ]
else:
return S_ERROR( "SITENAME or HOSTNAME are missing" )
if 'SERVICE_TYPE' in downDic.keys():
dt[ 'GOCDBServiceType' ] = downDic[ 'SERVICE_TYPE' ]
if gocdbServiceType:
gocdbST = gocdbServiceType.lower()
csST = downDic[ 'SERVICE_TYPE' ].lower()
if gocdbST != csST:
return S_ERROR( "SERVICE_TYPE mismatch between GOCDB (%s) and CS (%s) for %s" % (gocdbST, csST, dt[ 'Name' ]) )
else:
#WARNING: do we want None as default value?
dt[ 'GOCDBServiceType' ] = None
dt[ 'DowntimeID' ] = downtime
dt[ 'Element' ] = element
dt[ 'StartDate' ] = downDic[ 'FORMATED_START_DATE' ]
dt[ 'EndDate' ] = downDic[ 'FORMATED_END_DATE' ]
dt[ 'Severity' ] = downDic[ 'SEVERITY' ]
dt[ 'Description' ] = downDic[ 'DESCRIPTION' ].replace( '\'', '' )
dt[ 'Link' ] = downDic[ 'GOCDB_PORTAL_URL' ]
uniformResult.append( dt )
storeRes = self._storeCommand( uniformResult )
if not storeRes[ 'OK' ]:
return storeRes
return S_OK()
def doCache( self ):
'''
Method that reads the cache table and tries to read from it. It will
return a list with one dictionary describing the DT if there are results.
'''
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
element, elementName, hours, gocdbServiceType = params[ 'Value' ]
result = self.rmClient.selectDowntimeCache( element = element, name = elementName,
gocdbServiceType = gocdbServiceType )
if not result[ 'OK' ]:
return result
uniformResult = [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ]
#'targetDate' can be either now or some 'hours' later in the future
targetDate = datetime.utcnow()
#dtOverlapping is a buffer to assure only one dt is returned
#when there are overlapping outage/warning dt for same element
#on top of the buffer we put the most recent outages
#while at the bottom the most recent warnings,
#assumption: uniformResult list is already ordered by resource/site name, severity, startdate
dtOverlapping = []
if hours is not None:
#IN THE FUTURE
targetDate = targetDate + timedelta( hours = hours )
#sorting by 'StartDate' b/c if we look for DTs in the future
#then we are interested in the earliest DTs
uniformResult.sort(key=itemgetter('Name','Severity','StartDate'))
for dt in uniformResult:
if ( dt[ 'StartDate' ] < targetDate ) and ( dt[ 'EndDate' ] > targetDate ):
#the list is already ordered in a way that outages come first over warnings
#and the earliest outages are on top of other outages and warnings
#while the earliest warnings are on top of the other warnings
#so what ever comes first in the list is also what we are looking for
dtOverlapping = [dt]
break
else:
#IN THE PRESENT
#sorting by 'EndDate' b/c if we look for DTs in the present
#then we are interested in those DTs that last longer
uniformResult.sort(key=itemgetter('Name','Severity','EndDate'))
for dt in uniformResult:
if ( dt[ 'StartDate' ] < targetDate ) and ( dt[ 'EndDate' ] > targetDate ):
#if outage, we put it on top of the overlapping buffer
#i.e. the latest ending outage is on top
if dt['Severity'].upper() == 'OUTAGE':
dtOverlapping = [dt] + dtOverlapping
#if warning, we put it at the bottom of the overlapping buffer
#i.e. the latest ending warning is at the bottom
elif dt['Severity'].upper() == 'WARNING':
dtOverlapping.append(dt)
result = None
if len(dtOverlapping) > 0:
dtTop = dtOverlapping[0]
dtBottom = dtOverlapping[-1]
if dtTop['Severity'].upper() == 'OUTAGE':
result = dtTop
else:
result = dtBottom
return S_OK( result )
def doMaster( self ):
''' Master method, which looks little bit spaghetti code, sorry !
- It gets all sites and transforms them into gocSites.
- It gets all the storage elements and transforms them into their hosts
- It gets the the CEs (FTS and file catalogs will come).
'''
gocSites = CSHelpers.getGOCSites()
if not gocSites[ 'OK' ]:
return gocSites
gocSites = gocSites[ 'Value' ]
sesHosts = CSHelpers.getStorageElementsHosts()
if not sesHosts[ 'OK' ]:
return sesHosts
sesHosts = sesHosts[ 'Value' ]
resources = sesHosts
ftsServer = getFTS3Servers()
if ftsServer[ 'OK' ]:
resources.extend( ftsServer[ 'Value' ] )
#TODO: file catalogs need also to use their hosts
#fc = CSHelpers.getFileCatalogs()
#if fc[ 'OK' ]:
# resources = resources + fc[ 'Value' ]
ce = CSHelpers.getComputingElements()
if ce[ 'OK' ]:
resources.extend( ce[ 'Value' ] )
self.log.verbose( 'Processing Sites: %s' % ', '.join( gocSites ) )
siteRes = self.doNew( ( 'Site', gocSites ) )
if not siteRes[ 'OK' ]:
self.metrics[ 'failed' ].append( siteRes[ 'Message' ] )
self.log.verbose( 'Processing Resources: %s' % ', '.join( resources ) )
resourceRes = self.doNew( ( 'Resource', resources ) )
if not resourceRes[ 'OK' ]:
self.metrics[ 'failed' ].append( resourceRes[ 'Message' ] )
return S_OK( self.metrics )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
vmendez/DIRAC
|
ResourceStatusSystem/Command/DowntimeCommand.py
|
Python
|
gpl-3.0
| 13,233
| 0.042167
|
from datetime import date, datetime
from unittest import TestCase
from myob.constants import DEFAULT_PAGE_SIZE
from myob.credentials import PartnerCredentials
from myob.managers import Manager
class QueryParamTests(TestCase):
def setUp(self):
cred = PartnerCredentials(
consumer_key='KeyToTheKingdom',
consumer_secret='TellNoOne',
callback_uri='CallOnlyWhenCalledTo',
)
self.manager = Manager('', credentials=cred)
def assertParamsEqual(self, raw_kwargs, expected_params, method='GET'):
self.assertEqual(
self.manager.build_request_kwargs(method, {}, **raw_kwargs)['params'],
expected_params
)
def test_filter(self):
self.assertParamsEqual({'Type': 'Customer'}, {'$filter': "(Type eq 'Customer')"})
self.assertParamsEqual({'Type': ['Customer', 'Supplier']}, {'$filter': "(Type eq 'Customer' or Type eq 'Supplier')"})
self.assertParamsEqual({'DisplayID__gt': '5-0000'}, {'$filter': "(DisplayID gt '5-0000')"})
self.assertParamsEqual({'DateOccurred__lt': '2013-08-30T19:00:59.043'}, {'$filter': "(DateOccurred lt '2013-08-30T19:00:59.043')"})
self.assertParamsEqual({'Type': ('Customer', 'Supplier'), 'DisplayID__gt': '5-0000'}, {'$filter': "(Type eq 'Customer' or Type eq 'Supplier') and (DisplayID gt '5-0000')"})
self.assertParamsEqual({'raw_filter': "(Type eq 'Customer' or Type eq 'Supplier') or DisplayID gt '5-0000'", 'DateOccurred__lt': '2013-08-30T19:00:59.043'}, {'$filter': "((Type eq 'Customer' or Type eq 'Supplier') or DisplayID gt '5-0000') and (DateOccurred lt '2013-08-30T19:00:59.043')"})
self.assertParamsEqual({'IsActive': True}, {'$filter': "(IsActive eq true)"})
self.assertParamsEqual({'IsActive': False}, {'$filter': "(IsActive eq false)"})
def test_datetime_filter(self):
self.assertParamsEqual({'DateOccurred__lt': datetime(1992, 11, 14)}, {'$filter': "(DateOccurred lt datetime'1992-11-14 00:00:00')"})
self.assertParamsEqual({'DateOccurred__lt': date(1992, 11, 14)}, {'$filter': "(DateOccurred lt datetime'1992-11-14')"})
def test_orderby(self):
self.assertParamsEqual({'orderby': 'Date'}, {'$orderby': "Date"})
def test_pagination(self):
self.assertParamsEqual({'page': 7}, {'$skip': 6 * DEFAULT_PAGE_SIZE})
self.assertParamsEqual({'limit': 20}, {'$top': 20})
self.assertParamsEqual({'limit': 20, 'page': 7}, {'$top': 20, '$skip': 120})
def test_format(self):
self.assertParamsEqual({'format': 'json'}, {'format': 'json'})
def test_templatename(self):
self.assertParamsEqual({'templatename': 'InvoiceTemplate - 7'}, {'templatename': 'InvoiceTemplate - 7'})
def test_returnBody(self):
self.assertParamsEqual({}, {'returnBody': 'true'}, method='PUT')
self.assertParamsEqual({}, {'returnBody': 'true'}, method='POST')
def test_combination(self):
self.assertParamsEqual(
{
'Type': ['Customer', 'Supplier'],
'DisplayID__gt': '3-0900',
'orderby': 'Date',
'page': 5,
'limit': 13,
'format': 'json',
},
{
'$filter': "(Type eq 'Customer' or Type eq 'Supplier') and (DisplayID gt '3-0900')",
'$orderby': 'Date',
'$skip': 52,
'$top': 13,
'format': 'json'
},
)
|
ABASystems/pymyob
|
tests/test_managers.py
|
Python
|
bsd-3-clause
| 3,511
| 0.003987
|
import os
import json
import numpy as np
from sklearn.externals import joblib
from scripts.features.post import Post
from scripts.features.post_feature import PostFeature
import scripts.features.length_extractor as lext
import scripts.features.charactor_extractor as cext
import scripts.features.structure_extractor as sext
class Evaluator():
def __init__(self, model_path=""):
self.model_path = model_path if model_path else os.path.join(os.path.dirname(__file__), "../models/")
self.classifier = None
self.scaler = None
self.features = []
def load(self):
self.classifier = joblib.load(self.model_path + "banana.pkl")
self.scaler = joblib.load(self.model_path + "banana_scaler.pkl")
with open(self.model_path + "banana_list.txt") as f:
self.features = f.readline().split()
return self
def evaluate(self, post_dict):
if self.classifier is None:
self.load()
f_vector = self.get_features(post_dict)
prediction = self.classifier.predict_proba(f_vector)
return prediction[0][1] # probability of good
def get_features(self, post_dict):
post = Post(post_dict)
pf = PostFeature(post)
cleaned_rendered_body = cext.RenderedBodyPreprocessor().clean_rendered_body(post.rendered_body)
pf.add(lext.TitleLengthExtractor())
pf.add(lext.SectionCountExtractor())
pf.add(cext.KanjiRatioExtractor(cleaned_rendered_body))
pf.add(cext.HiraganaRatioExtractor(cleaned_rendered_body))
pf.add(cext.KatakanaRatioExtractor(cleaned_rendered_body))
pf.add(cext.NumberRatioExtractor(cleaned_rendered_body))
pf.add(cext.PunctuationRatioExtractor(cleaned_rendered_body))
pf.add(lext.SentenceMeanLengthExtractor(cleaned_rendered_body))
pf.add(lext.SentenceMeanLengthExtractor(cleaned_rendered_body))
pf.add(lext.SentenceMaxLengthExtractor(cleaned_rendered_body))
pf.add(sext.ImageCountExtractor())
pf.add(sext.ImageRatioExtractor(cleaned_rendered_body))
pf_d = pf.to_dict(drop_disused_feature=True)
f_vector = []
for f in self.features:
f_vector.append(pf_d[f])
f_vector = np.array(f_vector).reshape(1, -1)
f_vector = self.scaler.transform(f_vector)
return f_vector
|
chakki-works/elephant_sense
|
elephant_sense/evaluator.py
|
Python
|
apache-2.0
| 2,383
| 0.002098
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo Addon, Open Source Management Solution
# Copyright (C) 2014-now Equitania Software GmbH(<http://www.equitania.de>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, fields, api, _
class report_stock_picking(models.Model):
_inherit = 'stock.picking'
# def get_tax(self, tax_id, language, currency_id):
# amount_net = 0;
# for line in self.order_line:
# if tax_id.id in [x.id for x in line.tax_id] and not line.eq_optional:
# amount_net += line.price_subtotal
#
# tax_amount = 0
# for tex in self.env['account.tax']._compute([tax_id], amount_net, 1):
# tax_amount += tex['amount']
#
# return self.env["eq_report_helper"].get_price(tax_amount, language, 'Sale Price Report', currency_id)
#
#
# @api.multi
# def get_price(self, value, currency_id, language):
# """
# Formatierung eines Preises mit Berücksichtigung der Einstellung Dezimalstellen Sale Price Report
# :param value:
# :param currency_id:
# :param language:
# :return:
# """
# return self.env["eq_report_helper"].get_price(value, language, 'Sale Price Report', currency_id)
#
# @api.multi
# def get_qty(self, value, language):
# """
# Formatierung für Mengenangabe mit Berücksichtigung der Einstellung Dezimalstellen Sale Quantity Report
# :param value:
# :param language:
# :return:
# """
# return self.env["eq_report_helper"].get_qty(value, language, 'Sale Quantity Report')
@api.multi
def html_text_is_set(self, value):
"""
Workaround für HTML-Texte: Autom. Inhalt nach Speichern ohne Inhalt: <p><br></p>
Entfernen der Zeilenumbrüche und Paragraphen für Test, ob ein Inhalt gesetzt wurde
:param value:
:return:
"""
if not value:
return False
value = value.replace('<br>', '')
value = value.replace('<p>', '')
value = value.replace('</p>', '')
value = value.replace('<', '')
value = value.replace('>', '')
value = value.replace('/', '')
value = value.strip()
return value != ''
|
equitania/myodoo-addons-v10
|
eq_stock/models/eq_report_stock.py
|
Python
|
agpl-3.0
| 3,096
| 0.002913
|
from datetime import datetime
import pandas as pd
from pandas.util.testing import assert_frame_equal
import ulmo
import ulmo.usgs.eddn.parsers as parsers
import test_util
fmt = '%y%j%H%M%S'
message_test_sets = [
{
'dcp_address': 'C5149430',
'number_of_lines': 4,
'parser': 'twdb_stevens',
'first_row_message_timestamp_utc': datetime.strptime('13305152818', fmt),
},
{
'dcp_address': 'C514D73A',
'number_of_lines': 4,
'parser': 'twdb_sutron',
'first_row_message_timestamp_utc': datetime.strptime('13305072816', fmt),
},
{
'dcp_address': 'C516C1B8',
'number_of_lines': 28,
'parser': 'stevens',
'first_row_message_timestamp_utc': datetime.strptime('13305134352', fmt),
}
]
def test_parse_dcp_message_number_of_lines():
for test_set in message_test_sets:
dcp_data_file = 'usgs/eddn/' + test_set['dcp_address'] + '.txt'
with test_util.mocked_urls(dcp_data_file):
data = ulmo.usgs.eddn.get_data(test_set['dcp_address'])
assert len(data) == test_set['number_of_lines']
def test_parse_dcp_message_timestamp():
for test_set in message_test_sets:
dcp_data_file = 'usgs/eddn/' + test_set['dcp_address'] + '.txt'
with test_util.mocked_urls(dcp_data_file):
data = ulmo.usgs.eddn.get_data(test_set['dcp_address'])
assert data['message_timestamp_utc'][-1] == test_set['first_row_message_timestamp_utc']
multi_message_test_sets = [
{
'dcp_address': 'C5149430',
'data_files': {
'.*DRS_UNTIL=now.*':'usgs/eddn/C5149430_file1.txt',
'.*DRS_UNTIL=2013%2F294.*':'usgs/eddn/C5149430_file2.txt',
'.*DRS_UNTIL=2013%2F207.*':'usgs/eddn/C5149430_file3.txt'
},
'first_row_message_timestamp_utc': datetime.strptime('14016152818', fmt),
'last_row_message_timestamp_utc': datetime.strptime('13202032818', fmt),
'number_of_lines': 360,
'start': 'P365D'
}
]
def test_multi_message_download():
for test_set in multi_message_test_sets:
with test_util.mocked_urls(test_set['data_files']):
data = ulmo.usgs.eddn.get_data(test_set['dcp_address'], start=test_set['start'])
assert data['message_timestamp_utc'][-1] == test_set['first_row_message_timestamp_utc']
assert data['message_timestamp_utc'][0] == test_set['last_row_message_timestamp_utc']
assert len(data) == test_set['number_of_lines']
twdb_stevens_test_sets = [
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '"BV:11.9 193.76$ 193.70$ 193.62$ 193.54$ 193.49$ 193.43$ 193.37$ 199.62$ 200.51$ 200.98$ 195.00$ 194.33$ ',
'return_value': [
['2013-10-30 04:00:00', pd.np.nan, 193.76],
['2013-10-30 05:00:00', pd.np.nan, 193.70],
['2013-10-30 06:00:00', pd.np.nan, 193.62],
['2013-10-30 07:00:00', pd.np.nan, 193.54],
['2013-10-30 08:00:00', pd.np.nan, 193.49],
['2013-10-30 09:00:00', pd.np.nan, 193.43],
['2013-10-30 10:00:00', pd.np.nan, 193.37],
['2013-10-30 11:00:00', pd.np.nan, 199.62],
['2013-10-30 12:00:00', pd.np.nan, 200.51],
['2013-10-30 13:00:00', pd.np.nan, 200.98],
['2013-10-30 14:00:00', pd.np.nan, 195.00],
['2013-10-30 15:00:00', 11.9, 194.33],
],
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '"BV:12.6 Channel:5 Time:28 +304.63 +304.63 +304.63 +304.56 +304.63 +304.63 +304.63 +304.63 +304.63 +304.63 +304.63 +304.71 Channel:6 Time:28 +310.51 +310.66 +310.59 +310.51 +310.51 +310.59 +310.59 +310.51 +310.66 +310.51 +310.66 +310.59 ',
'return_value': [
['2013-10-30 04:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 05:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 06:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 07:00:00', '5', '28', pd.np.nan, 304.56],
['2013-10-30 08:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 09:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 10:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 11:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 12:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 13:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 14:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 15:00:00', '5', '28', 12.6, 304.71],
['2013-10-30 04:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 05:00:00', '6', '28', pd.np.nan, 310.66],
['2013-10-30 06:00:00', '6', '28', pd.np.nan, 310.59],
['2013-10-30 07:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 08:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 09:00:00', '6', '28', pd.np.nan, 310.59],
['2013-10-30 10:00:00', '6', '28', pd.np.nan, 310.59],
['2013-10-30 11:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 12:00:00', '6', '28', pd.np.nan, 310.66],
['2013-10-30 13:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 14:00:00', '6', '28', pd.np.nan, 310.66],
['2013-10-30 15:00:00', '6', '28', 12.6, 310.59],
]
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '"BV:12.6 ',
'return_value': pd.DataFrame()
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': """ 79."$}X^pZBF8iB~i>>Xmj[bvr^Zv%JXl,DU=l{uu[ t(
|@2q^sjS!
""",
'return_value': pd.DataFrame()
},
]
def test_parser_twdb_stevens():
for test_set in twdb_stevens_test_sets:
print 'testing twdb_stevens parser'
if isinstance(test_set['return_value'], pd.DataFrame):
parser = getattr(parsers, 'twdb_stevens')
assert_frame_equal(pd.DataFrame(), parser(test_set))
return
if len(test_set['return_value'][0]) == 3:
columns = ['timestamp_utc', 'battery_voltage', 'water_level']
else:
columns = ['timestamp_utc', 'channel', 'time', 'battery_voltage', 'water_level']
_assert(test_set, columns, 'twdb_stevens')
twdb_sutron_test_sets = [
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '":Sense01 60 #60 -67.84 -66.15 -67.73 -67.81 -66.42 -68.45 -68.04 -67.87 -71.53 -73.29 -70.55 -72.71 :BL 13.29',
'return_value': [
['2013-10-30 04:00:00', 'sense01', pd.np.nan, 72.71],
['2013-10-30 05:00:00', 'sense01', pd.np.nan, 70.55],
['2013-10-30 06:00:00', 'sense01', pd.np.nan, 73.29],
['2013-10-30 07:00:00', 'sense01', pd.np.nan, 71.53],
['2013-10-30 08:00:00', 'sense01', pd.np.nan, 67.87],
['2013-10-30 09:00:00', 'sense01', pd.np.nan, 68.04],
['2013-10-30 10:00:00', 'sense01', pd.np.nan, 68.45],
['2013-10-30 11:00:00', 'sense01', pd.np.nan, 66.42],
['2013-10-30 12:00:00', 'sense01', pd.np.nan, 67.81],
['2013-10-30 13:00:00', 'sense01', pd.np.nan, 67.73],
['2013-10-30 14:00:00', 'sense01', pd.np.nan, 66.15],
['2013-10-30 15:00:00', 'sense01', 13.29, 67.84],
],
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '":OTT 703 60 #60 -231.47 -231.45 -231.44 -231.45 -231.47 -231.50 -231.51 -231.55 -231.56 -231.57 -231.55 -231.53 :6910704 60 #60 -261.85 -261.83 -261.81 -261.80 -261.81 -261.83 -261.85 -261.87 -261.89 -261.88 -261.86 -261.83 :BL 13.21',
'return_value': [
['2013-10-30 04:00:00', 'ott 703', pd.np.nan, 231.53],
['2013-10-30 05:00:00', 'ott 703', pd.np.nan, 231.55],
['2013-10-30 06:00:00', 'ott 703', pd.np.nan, 231.57],
['2013-10-30 07:00:00', 'ott 703', pd.np.nan, 231.56],
['2013-10-30 08:00:00', 'ott 703', pd.np.nan, 231.55],
['2013-10-30 09:00:00', 'ott 703', pd.np.nan, 231.51],
['2013-10-30 10:00:00', 'ott 703', pd.np.nan, 231.50],
['2013-10-30 11:00:00', 'ott 703', pd.np.nan, 231.47],
['2013-10-30 12:00:00', 'ott 703', pd.np.nan, 231.45],
['2013-10-30 13:00:00', 'ott 703', pd.np.nan, 231.44],
['2013-10-30 14:00:00', 'ott 703', pd.np.nan, 231.45],
['2013-10-30 15:00:00', 'ott 703', 13.21, 231.47],
['2013-10-30 04:00:00', '6910704', pd.np.nan, 261.83],
['2013-10-30 05:00:00', '6910704', pd.np.nan, 261.86],
['2013-10-30 06:00:00', '6910704', pd.np.nan, 261.88],
['2013-10-30 07:00:00', '6910704', pd.np.nan, 261.89],
['2013-10-30 08:00:00', '6910704', pd.np.nan, 261.87],
['2013-10-30 09:00:00', '6910704', pd.np.nan, 261.85],
['2013-10-30 10:00:00', '6910704', pd.np.nan, 261.83],
['2013-10-30 11:00:00', '6910704', pd.np.nan, 261.81],
['2013-10-30 12:00:00', '6910704', pd.np.nan, 261.80],
['2013-10-30 13:00:00', '6910704', pd.np.nan, 261.81],
['2013-10-30 14:00:00', '6910704', pd.np.nan, 261.83],
['2013-10-30 15:00:00', '6910704', 13.21, 261.85],
]
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '"\r\n// \r\n// \r\n// \r\n// \r\n// \r\n-199.88 \r\n-199.92 \r\n-199.96 \r\n-199.98 \r\n-200.05 \r\n-200.09 \r\n-200.15',
'return_value': [
['2013-10-30 04:00:00', pd.np.nan, 200.15],
['2013-10-30 05:00:00', pd.np.nan, 200.09],
['2013-10-30 06:00:00', pd.np.nan, 200.05],
['2013-10-30 07:00:00', pd.np.nan, 199.98],
['2013-10-30 08:00:00', pd.np.nan, 199.96],
['2013-10-30 09:00:00', pd.np.nan, 199.92],
['2013-10-30 10:00:00', pd.np.nan, 199.88],
['2013-10-30 11:00:00', pd.np.nan, pd.np.nan],
['2013-10-30 12:00:00', pd.np.nan, pd.np.nan],
['2013-10-30 13:00:00', pd.np.nan, pd.np.nan],
['2013-10-30 14:00:00', pd.np.nan, pd.np.nan],
['2013-10-30 15:00:00', pd.np.nan, pd.np.nan],
],
},
]
def test_parser_twdb_sutron():
for test_set in twdb_sutron_test_sets:
print 'testing twdb_sutron parser'
if len(test_set['return_value'][0]) == 3:
columns = ['timestamp_utc', 'battery_voltage', 'water_level']
else:
columns = ['timestamp_utc', 'channel', 'battery_voltage', 'water_level']
_assert(test_set, columns, 'twdb_sutron')
twdb_texuni_test_sets = [
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': ' \r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+340.0,+2013.,+307.0,+1400.,+12.07,+0.000,-109.9,-109.8,-110.1,+30.57,',
'return_value': [
['2013-10-29 16:00:00', pd.np.nan, 109.8],
['2013-10-29 17:00:00', pd.np.nan, 109.8],
['2013-10-29 18:00:00', pd.np.nan, 109.8],
['2013-10-29 19:00:00', pd.np.nan, 109.8],
['2013-10-29 20:00:00', pd.np.nan, 109.8],
['2013-10-29 21:00:00', pd.np.nan, 109.9],
['2013-10-29 22:00:00', pd.np.nan, 109.9],
['2013-10-29 23:00:00', pd.np.nan, 109.9],
['2013-10-30 00:00:00', pd.np.nan, 109.9],
['2013-10-30 01:00:00', pd.np.nan, 109.9],
['2013-10-30 02:00:00', pd.np.nan, 110.0],
['2013-10-30 03:00:00', pd.np.nan, 110.0],
['2013-10-30 04:00:00', pd.np.nan, 109.9],
['2013-10-30 05:00:00', pd.np.nan, 109.9],
['2013-10-30 06:00:00', pd.np.nan, 109.9],
['2013-10-30 07:00:00', pd.np.nan, 109.9],
['2013-10-30 08:00:00', pd.np.nan, 110.0],
['2013-10-30 09:00:00', pd.np.nan, 110.0],
['2013-10-30 10:00:00', pd.np.nan, 110.0],
['2013-10-30 11:00:00', pd.np.nan, 110.1],
['2013-10-30 12:00:00', pd.np.nan, 110.1],
['2013-10-30 13:00:00', pd.np.nan, 110.1],
['2013-10-30 14:00:00', pd.np.nan, 110.1],
['2013-10-30 15:00:00', pd.np.nan, 110.1],
]
},
]
def test_parser_twdb_texuni():
for test_set in twdb_texuni_test_sets:
print 'testing twdb_texuni parser'
columns = ['timestamp_utc', 'battery_voltage', 'water_level']
_assert(test_set, columns, 'twdb_texuni')
def _assert(test_set, columns, parser):
expected = pd.DataFrame(test_set['return_value'], columns=columns)
expected.index = pd.to_datetime(expected['timestamp_utc'])
del expected['timestamp_utc']
parser = getattr(parsers, parser)
df = parser(test_set)
# to compare pandas dataframes, columns must be in same order
if 'channel' in df.columns:
for channel in pd.np.unique(df['channel']):
df_c = df[df['channel']==channel]
expected_c = expected[expected['channel']==channel]
assert_frame_equal(df_c.sort(axis=1).sort(axis=0), expected_c.sort(axis=1).sort(axis=0))
else:
assert_frame_equal(df.sort(axis=1).sort(axis=0), expected.sort(axis=1).sort(axis=0))
|
nathanhilbert/ulmo
|
test/usgs_eddn_test.py
|
Python
|
bsd-3-clause
| 13,809
| 0.004779
|
#!/usr/bin/env python
# Fan control mod for Raspberry Pi
import RPi.GPIO as GPIO, time, datetime, subprocess, os, logging
from daemon import runner
DEBUG = 1
GPIO.setmode(GPIO.BCM)
# Respective ports on the GPIO header
FAST = 18
SLOW = 25
# Default settings for fan control
MAX_TEMP = 50
MIN_TEMP = 40
POLL_TIME = 5
def get_temperature():
# Returns the temperature in degrees C
try:
s = subprocess.check_output(["vcgencmd","measure_temp"])
return float(s.split("=")[1][:-3])
except:
# Something went wrong keep the fan on high
return MAX_TEMP+1
class App():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty'
self.stderr_path = '/dev/tty'
self.pidfile_path = '/var/run/fandaemon/fandaemon.pid'
self.pidfile_timeout = 5
def run(self):
GPIO.setup(FAST, GPIO.OUT)
GPIO.setup(SLOW, GPIO.OUT)
try:
while True:
current_temp = get_temperature()
logstr = 'Current temp is ' + str(current_temp)
logger.info(logstr);
if current_temp > MAX_TEMP:
logger.info('Setting fan speed to HIGH')
GPIO.output(SLOW, GPIO.LOW)
GPIO.output(FAST, GPIO.HIGH)
POLL_TIME = 5
elif (current_temp <= MAX_TEMP) and (current_temp > MIN_TEMP):
logger.info('Setting fan speed to LOW')
GPIO.output(FAST, GPIO.LOW)
GPIO.output(SLOW, GPIO.HIGH)
POLL_TIME = 10
else:
logger.info('Turn the fan off!')
GPIO.output(SLOW, GPIO.LOW)
GPIO.output(FAST, GPIO.LOW)
POLL_TIME = 15
time.sleep(POLL_TIME)
except:
logger.error('Exiting now!')
finally:
GPIO.cleanup()
app = App()
logger = logging.getLogger("DaemonLog")
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler = logging.FileHandler("/var/log/fandaemon/fandaemon.log")
handler.setFormatter(formatter)
logger.addHandler(handler)
daemon_runner = runner.DaemonRunner(app)
#This ensures that the logger file handle does not get closed during daemonization
daemon_runner.daemon_context.files_preserve=[handler.stream]
daemon_runner.do_action()
|
zainag/RPi-Test-Projects
|
fan_control_daemon.py
|
Python
|
mit
| 2,129
| 0.034758
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('log_frequency', 10,
"""How often to log results to the console.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.train.get_or_create_global_step()
# Get images and labels for CIFAR-10.
# Force input pipeline to CPU:0 to avoid operations sometimes ending up on
# GPU and resulting in a slow down.
with tf.device('/cpu:0'):
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
if self._step % FLAGS.log_frequency == 0:
current_time = time.time()
duration = current_time - self._start_time
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
sec_per_batch = float(duration / FLAGS.log_frequency)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
jiaphuan/models
|
tutorials/image/cifar10/cifar10_train.py
|
Python
|
apache-2.0
| 4,491
| 0.004899
|
from tests.compiler import compile_snippet, A_ID, LST_ID, SELF_ID, VAL1_ID, internal_call, A_INST, INNER_ID, \
CONTAINER_INNER_ID, STATIC_START
from thinglang.compiler.opcodes import OpcodePushLocal, OpcodePushMember, OpcodePushStatic, OpcodePop, \
OpcodeDereference, OpcodeCallVirtual
def test_direct_member_access():
assert compile_snippet('a_inst.a1') == [
OpcodePushMember(A_INST, 0)
]
def test_nested_member_access():
assert compile_snippet('self.inner.inner.inner') == [
OpcodePushMember(SELF_ID, INNER_ID),
OpcodeDereference(CONTAINER_INNER_ID),
OpcodeDereference(CONTAINER_INNER_ID)
]
def test_member_access_via_method_call():
assert compile_snippet('a_inst.me().a1') == [
OpcodePushLocal(A_INST),
OpcodeCallVirtual(1),
OpcodeDereference(0)
]
assert compile_snippet('a_inst.me().me().a1') == [
OpcodePushLocal(A_INST),
OpcodeCallVirtual(1),
OpcodeCallVirtual(1),
OpcodeDereference(0)
]
def test_local_list_immediate_index():
assert compile_snippet('lst[123]') == [
OpcodePushStatic(STATIC_START),
OpcodePushLocal(LST_ID),
internal_call('list.get')
]
def test_local_list_non_immediate_index():
assert compile_snippet('lst[a]') == [
OpcodePushLocal(A_ID),
OpcodePushLocal(LST_ID),
internal_call('list.get')
]
assert compile_snippet('lst[self.val1]') == [
OpcodePushMember(SELF_ID, VAL1_ID),
OpcodePushLocal(LST_ID),
internal_call('list.get')
]
|
ytanay/thinglang
|
tests/compiler/test_access_compilation.py
|
Python
|
mit
| 1,587
| 0.00189
|
from __future__ import unicode_literals
import os
from mopidy import config, ext
__version__ = '0.0.1'
class Extension(ext.Extension):
dist_name = 'Mopidy-USBPlaylist'
ext_name = 'usbplaylist'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['path'] = config.String()
return schema
def setup(self, registry):
from .actor import USBPlaylistsBackend
registry.add('backend', USBPlaylistsBackend)
|
avanc/mopidy-usbplaylist
|
mopidy_usbplaylist/__init__.py
|
Python
|
apache-2.0
| 668
| 0
|
import pytest
import logging
@pytest.mark.asyncio
async def test_confirmation(rpc_context):
# setup rpc
async def test_method(request):
a = await request.confirm('just say yes!', timeout=1)
logging.debug('a = %s', a)
return a
rpc_context.rpc.add_methods(('', test_method))
# setup client
async def confirm(request):
return True
client = await rpc_context.make_client()
client.add_methods(('', confirm))
# run test
assert await client.call('test_method')
|
pengutronix/aiohttp-json-rpc
|
tests/test_forms.py
|
Python
|
apache-2.0
| 530
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('productos', '0003_auto_20141118_2241'),
]
operations = [
migrations.AlterField(
model_name='imagenes',
name='url',
field=models.ImageField(upload_to=b'img', null=True, verbose_name=b'Im\xc3\xa1gen', blank=True),
preserve_default=True,
),
]
|
gabrielf10/Soles-pythonanywhere
|
productos/migrations/0004_auto_20141119_0117.py
|
Python
|
bsd-3-clause
| 496
| 0.002016
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class CabukcgcrawlerPipeline(object):
def process_item(self, item, spider):
return item
|
codeforkaohsiung/CabuKcgCrawler
|
CabuKcgCrawler/pipelines.py
|
Python
|
mit
| 294
| 0
|
"""
This file holds utility functions that have no dependencies on other console code.
Avoids import loops
"""
import webcolors
def wc(clr, factor=0.0, layercolor=(255, 255, 255)):
lc = webcolors.name_to_rgb(layercolor.lower()) if isinstance(layercolor, str) else layercolor
if isinstance(clr, str):
try:
v = webcolors.name_to_rgb(clr.lower())
except ValueError:
# logsupport.Logs.Log('Bad color name: ' + str(clr), severity=ConsoleWarning)
v = webcolors.name_to_rgb('black')
else:
v = clr
try:
return v[0] + (lc[0] - v[0]) * factor, v[1] + (lc[1] - v[1]) * factor, v[2] + (lc[2] - v[2]) * factor
except Exception as E:
print('wc: {}'.format(E))
print(v, lc, clr, layercolor)
def interval_str(sec_elapsed, shrt=False):
d = int(sec_elapsed / (60 * 60 * 24))
h = int((sec_elapsed % (60 * 60 * 24)) / 3600)
m = int((sec_elapsed % (60 * 60)) / 60)
s = int(sec_elapsed % 60)
if d != 0:
if shrt:
return "{} dys {:>02d}:{:>02d}:{:>02d}".format(d, h, m, s)
else:
return "{} days {:>02d}hrs {:>02d}mn {:>02d}sec".format(d, h, m, s)
elif h != 0:
return "{:>02d}hrs {:>02d}mn {:>02d}sec".format(h, m, s)
else:
return "{:>02d}mn {:>02d}sec".format(m, s)
def BoolTrueWord(v):
if v is None: return False
if isinstance(v, bool): return v
try:
return v.lower() in ('true', 'on', 'yes')
except Exception as e:
print("Error1: {}".format(v))
def BoolFalseWord(v):
if v is None: return True
if isinstance(v, bool): return not v
try:
return v.lower() in ('false', 'off', 'no')
except Exception as e:
print("Error2: {}".format(v))
def TreeDict(d, args):
# Allow a nest of dictionaries to be accessed by a tuple of keys for easier code
if len(args) == 1:
temp = d[args[0]]
#temp = getattr(d,args[0])
if isinstance(temp, str) and temp.isdigit():
temp = int(temp)
else:
try:
temp = float(temp)
except (ValueError, TypeError):
pass
return temp
else:
return TreeDict(d[args[0]], args[1:])
#return TreeDict(getattr(d,args[0]),args[1:])
import string
class PartialFormatter(string.Formatter):
def __init__(self, missing='--', bad_fmt='--'):
self.missing, self.bad_fmt = missing, bad_fmt
def get_field(self, field_name, args, kwargs):
# Handle a key not found
try:
val = super().get_field(field_name, args, kwargs)
except (KeyError, AttributeError):
val = None, field_name
return val
def format_field(self, value, spec):
# handle an invalid format
if value is None: return self.missing
try:
return super().format_field(value, spec)
except ValueError:
if self.bad_fmt is not None:
return self.bad_fmt
else:
raise
fmt = PartialFormatter()
# noinspection PyBroadException
def safeprint(*args, **kwargs):
try:
print(*args, **kwargs)
except OSError:
with open('/home/pi/Console/disconnectederrors.log', 'a') as f:
print(*args, **kwargs, file=f)
def RepresentsInt(s):
try:
int(s)
return True
except (ValueError, TypeError):
return False
'''
class WFormatter(string.Formatter):
def format_field(self, value, format_spec):
if format_spec.endswith(('f', 'd')) and value is None:
return 'n/a'
elif value is None:
return 'n/a'
elif value == -9999.0:
return 'n/a'
else:
return super(WFormatter, self).format_field(value, format_spec)
'''
|
kevinkahn/softconsole
|
utils/utilfuncs.py
|
Python
|
apache-2.0
| 3,291
| 0.033728
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
IOGeopaparazzi
A QGIS plugin
A plugin to import/export geodata from/to geopaparazzi
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-07-19
copyright : (C) 2018 by Enrico A. Chiaradia
email : enrico.chiaradia@yahoo.it
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Enrico A. Chiaradia'
__date__ = '2018-07-19'
__copyright__ = '(C) 2018 by Enrico A. Chiaradia'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QgsProcessingProvider
from .import_gpap_algorithm import ImportGpapAlgorithm
from .export_spatialite_algorithm import ExportSpatialiteAlgorithm
from .export_tiles_algorithm import ExportTilesAlgorithm
class IOGeopaparazziProvider(QgsProcessingProvider):
def __init__(self):
QgsProcessingProvider.__init__(self)
# Load algorithms
#self.alglist = [ImportGpapAlgorithm(),ExportSpatialiteAlgorithm(),ExportTilesAlgorithm()]
self.alglist = [ImportGpapAlgorithm(),ExportSpatialiteAlgorithm(),ExportTilesAlgorithm()]
def unload(self):
"""
Unloads the provider. Any tear-down steps required by the provider
should be implemented here.
"""
pass
def loadAlgorithms(self):
"""
Loads all algorithms belonging to this provider.
"""
for alg in self.alglist:
self.addAlgorithm( alg )
def id(self):
"""
Returns the unique provider id, used for identifying the provider. This
string should be a unique, short, character only string, eg "qgis" or
"gdal". This string should not be localised.
"""
return 'io_geopaparazzi'
def name(self):
"""
Returns the provider name, which is used to describe the provider
within the GUI.
This string should be short (e.g. "Lastools") and localised.
"""
return self.tr('IO Geopaparazzi')
def longName(self):
"""
Returns the a longer version of the provider name, which can include
extra details such as version numbers. E.g. "Lastools LIDAR tools
(version 2.2.1)". This string should be localised. The default
implementation returns the same string as name().
"""
return self.tr('IO Geopaparazzi (version 2.0)')
|
eachiaradia/IOGeopaparazzi
|
io_geopaparazzi_provider.py
|
Python
|
gpl-3.0
| 2,828
| 0.020156
|
import csv
import sys
from models import *
from datetime import datetime
import codecs
import json
# from models import Attorney, Organization
from flask_mail import Message
def load_attorneys_from_csv(filename):
with codecs.open(filename, mode='rb', encoding='utf-8') as csvfile:
attorneys = [row for row in csv.reader(csvfile.read().splitlines())]
attorneys.pop(0)
try:
for attorney in attorneys:
# Check to see if the email address is in the system, and if it is, simply add the new record...
if check_new_email(attorney[3]):
a = Attorney.objects.get(email_address=attorney[3])
else:
a = Attorney()
a.first_name = attorney[0]
a.middle_initial = attorney[1]
a.last_name = attorney[2]
a.email_address = attorney[3]
a.organization_name = Organization.objects(
organization_name=attorney[4]
).upsert_one(organization_name=attorney[4]) \
.organization_name
if len(a.records) <= 1:
a.records.append({
'year': attorney[5],
'honor_choice': attorney[6],
'rule_49_choice': attorney[7],
'date_modified': datetime.now(),
'method_added': u'bulk'
})
a.save()
print(attorney[3] + " is loaded.")
except:
print( "Unexpected error:", sys.exc_info()[0])
raise
return True
def check_new_email(email_address):
try:
Attorney.objects.get(email_address=email_address)
return True
except Attorney.DoesNotExist:
return False
if __name__ == "__main__":
import sys
import os
from models import *
MONGODB_URI = os.environ.get(
"MONGOLAB_URI", 'mongodb://localhost/honorroll')
mongo_client = connect(host=MONGODB_URI)
filename = sys.argv[1]
load_attorneys_from_csv(filename)
|
mitzvotech/honorroll
|
app/utils.py
|
Python
|
mit
| 2,178
| 0.002296
|
import pycurl
import cStringIO
import random
import HTMLParser
def generate_TAXII_header(xml, ssl=True):
headers = {
"Content-Type": "application/xml",
"Content-Length": str(len(xml)),
"User-Agent": "TAXII Client Application",
"Accept": "application/xml",
"X-TAXII-Accept": "urn:taxii.mitre.org:message:xml:1.0",
"X-TAXII-Content-Type": "urn:taxii.mitre.org:message:xml:1.0",
}
if ssl:
headers["X-TAXII-Protocol"] = "urn:taxii.mitre.org:protocol:https:1.0"
else:
headers["X-TAXII-Protocol"] = "urn:taxii.mitre.org:protocol:http:1.0"
return headers
def taxi_wrapper(xml):
xmlstart = """<?xml version="1.0" encoding="UTF-8" ?>"""
boilerplate = """xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:taxii_11="http://taxii.mitre.org/messages/taxii_xml_binding-1.1" xsi:schemaLocation="http://taxii.mitre.org/messages/taxii_xml_binding-1.1 http://taxii.mitre.org/messages/taxii_xml_binding-1.1" """
message_id = str(random.randint(345271,9999999999))
xml_inbox = xmlstart + """
<taxii_11:Inbox_Message {{boilerplate}} message_id="{{message_id}}">
<taxii_11:Content_Block>
<taxii_11:Content_Binding binding_id="{{content_binding}}" />
<taxii_11:Content>
{{content_data}}
</taxii_11:Content>
</taxii_11:Content_Block>
</taxii_11:Inbox_Message>"""
xml = xml_inbox.replace('{{boilerplate}}',boilerplate) \
.replace('{{message_id}}',message_id) \
.replace('{{content_binding}}','urn:stix.mitre.org:xml:1.1.1') \
.replace('{{content_data}}', xml )
return xml
def taxi_poll_xml(feedid):
xmlstart = """<?xml version="1.0" encoding="UTF-8" ?>"""
boilerplate = """xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:taxii_11="http://taxii.mitre.org/messages/taxii_xml_binding-1.1" xsi:schemaLocation="http://taxii.mitre.org/messages/taxii_xml_binding-1.1 http://taxii.mitre.org/messages/taxii_xml_binding-1.1" """
message_id = str(random.randint(345271,9999999999))
xml_poll = xmlstart + """
<taxii_11:Poll_Request {{boilerplate}} message_id="{{message_id}}" collection_name="{{feed_name}}" >
<taxii_11:Poll_Parameters allow_asynch="false">
<taxii_11:Response_Type>FULL</taxii_11:Response_Type>
<taxii_11:Content_Binding binding_id="{{content_binding}}" />
</taxii_11:Poll_Parameters>
{{start_end}}
</taxii_11:Poll_Request>"""
xml = xml_poll.replace('{{boilerplate}}',boilerplate) \
.replace('{{message_id}}',message_id) \
.replace('{{content_binding}}','urn:stix.mitre.org:xml:1.1.1') \
.replace('{{feed_name}}', feedid )
return xml
#-----------------------------------------
def send_xml(setup, xml, ssl=True):
taxiixml = taxi_wrapper(xml)
return send(setup, taxiixml, ssl)
def get_xml(setup, feedid, ssl=True):
taxiixml = taxi_poll_xml(feedid)
return send(setup, taxiixml, ssl)
def send(setup, taxiixml, ssl=True):
headers = [
"Content-Type: application/xml",
"Content-Length: " + str(len(taxiixml)),
"User-Agent: TAXII Client Application",
"Accept: application/xml",
"X-TAXII-Accept: urn:taxii.mitre.org:message:xml:1.1",
"X-TAXII-Content-Type: urn:taxii.mitre.org:message:xml:1.1",
"X-TAXII-Protocol: urn:taxii.mitre.org:protocol:https:1.0",
]
buf = cStringIO.StringIO()
conn = pycurl.Curl()
conn.setopt(pycurl.URL, setup["url"])
conn.setopt(pycurl.USERPWD, "{0}:{1}".format(setup["user"], setup["password"]))
conn.setopt(pycurl.HTTPHEADER, headers)
conn.setopt(pycurl.POST, 1)
conn.setopt(pycurl.TIMEOUT, 999999)
conn.setopt(pycurl.WRITEFUNCTION, buf.write)
conn.setopt(pycurl.POSTFIELDS, taxiixml)
conn.setopt(pycurl.SSL_VERIFYPEER, 0)
conn.perform()
hp = HTMLParser.HTMLParser()
result = hp.unescape(buf.getvalue()).encode('ascii', 'ignore')
buf.close()
conn.close()
return result
|
hazmalware/sharephish
|
taxiigenerator.py
|
Python
|
gpl-3.0
| 3,968
| 0.024698
|
# coding=utf-8
"""
Write the collected stats to a locally stored log file. Rotate the log file
every night and remove after 7 days.
"""
from Handler import Handler
import logging
import logging.handlers
class ArchiveHandler(Handler):
"""
Implements the Handler abstract class, archiving data to a log file
"""
def __init__(self, config):
"""
Create a new instance of the ArchiveHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Create Archive Logger
self.archive = logging.getLogger('archive')
self.archive.setLevel(logging.DEBUG)
self.archive.propagate = self.config['propagate']
# Create Archive Log Formatter
formatter = logging.Formatter('%(message)s')
# Create Archive Log Handler
handler = logging.handlers.TimedRotatingFileHandler(
filename=self.config['log_file'],
when='midnight',
interval=1,
backupCount=int(self.config['days']),
encoding=self.config['encoding']
)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
self.archive.addHandler(handler)
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(ArchiveHandler, self).get_default_config_help()
config.update({
'log_file': 'Path to the logfile',
'days': 'How many days to store',
'encoding': '',
'propagate': 'Pass handled metrics to configured root logger',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(ArchiveHandler, self).get_default_config()
config.update({
'log_file': '',
'days': 7,
'encoding': None,
'propagate': False,
})
return config
def process(self, metric):
"""
Send a Metric to the Archive.
"""
# Archive Metric
self.archive.info(str(metric).strip())
|
Ormod/Diamond
|
src/diamond/handler/archive.py
|
Python
|
mit
| 2,195
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 绘制简单的折线图
import matplotlib.pyplot as plt
input_values = list(range(1, 6))
squares = [x * x for x in input_values]
# 根据传递的参数来绘制出有意义的图形
plt.plot(input_values, squares, linewidth=3)
# 设置图表的标题及标题的字体大小
plt.title("Square Numbers", fontsize=14)
# 给坐标轴加上标签
plt.xlabel("Value", fontsize=14)
plt.ylabel("Square of Value", fontsize=14)
# 设置刻度标记的大小
plt.tick_params(axis='both', labelsize=14)
# 打开查看器,并显示绘制的图形
plt.show()
|
felix9064/python
|
Demo/pcc/mpl_squares.py
|
Python
|
mit
| 606
| 0
|
from pytest_bdd import scenario
from .contact_steps import *
@scenario("contacts.feature", "Add new contact")
def test_add_new_contact():
pass
@scenario("contacts.feature", "Delete a contact")
def test_delete_contact():
pass
@scenario("contacts.feature", "Modify a contact")
def test_modify_contact():
pass
|
yupasik/python_training
|
bdd/contact_scenarios.py
|
Python
|
apache-2.0
| 325
| 0
|
from webfs import WebFSStat
import stat
def Test_Basic():
fields = ('st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid',
'st_size', 'st_atime', 'st_mtime', 'st_ctime')
st = WebFSStat()
print st.__dict__.keys()
for field in fields:
assert field in st.__dict__.keys(), 'field(%s) is not in members' % field
def Test_InitParam():
st = WebFSStat()
assert st.st_mode == stat.S_IFDIR | 0555
st = WebFSStat(False)
assert st.st_mode == stat.S_IFREG | 0444
def Test_IsDir():
st = WebFSStat()
assert st.isDir()
st = WebFSStat(False)
assert not st.isDir()
|
harun-emektar/webfs
|
tests/Test_WebFSStat.py
|
Python
|
apache-2.0
| 655
| 0.015267
|
from operator import attrgetter
from traits.api import Any, Int, Bool, on_trait_change, Dict, Button, Str, \
HasTraits, cached_property, Property, Event, Either, Float, Instance
from traitsui.api import View, Item, UItem, VGroup, HGroup, Spring, \
TabularEditor, HSplit, Group, ModelView
from traitsui.tabular_adapter import TabularAdapter
from chaco.api import Plot, LabelAxis
from chaco.tools.api import ZoomTool, PanTool
from chaco.ticks import ShowAllTickGenerator
from enable.component_editor import ComponentEditor
from pikos.live.ui.base_view import BaseView
from pikos.live.ui.barplot import SelectableBarPlot, BarSelectTool
class TableItem(HasTraits):
id = Int
filename = Str
line_number = Any
function_name = Str
callcount = Int
per_call = Float
total_time = Float
cumulative_time = Float
def __init__(self, id, filename, line_number, function_name, callcount,
per_call, total_time, cumulative_time, **traits):
kwargs = {}
kwargs.update(traits)
kwargs.update(dict(
id=id,
filename=filename,
function_name=function_name,
line_number=line_number,
callcount=callcount,
per_call=per_call,
total_time=total_time,
cumulative_time=cumulative_time,
))
super(TableItem, self).__init__(**kwargs)
class CProfileTabularAdapter(TabularAdapter):
columns = (
('Filename', 'filename'),
('Function Name', 'function_name'),
('Line Number', 'line_number'),
('Number of Calls', 'callcount'),
('Per Call', 'per_call'),
# ('Per Call (Cumulative)', 'cumulative_percall'),
('Total Time', 'total_time'),
('Cumulative Time', 'cumulative_time'),
)
class CProfileTableView(ModelView):
title = Str
data_items = Property(depends_on='model.data_items,sort_column,ascending')
adapter = Any
column_clicked = Event
sort_column = Either(None, Int)
ascending = Bool(False)
def _column_clicked_changed(self, event):
if event is None:
self.sort_column = None
elif self.sort_column == event.column:
self.ascending = not self.ascending
else:
self.sort_column = event.column
self.ascending = False
def _adapter_default(self):
return CProfileTabularAdapter()
@cached_property
def _get_data_items(self):
items = [TableItem(*args) for args in self.model.data_items]
if self.sort_column is None:
return items
attr = self.adapter.columns[self.sort_column][1]
return sorted(items, key=attrgetter(attr), reverse=self.ascending)
def default_traits_view(self):
return View(
UItem(
'data_items',
editor=TabularEditor(
adapter=self.adapter,
column_clicked='column_clicked',
),
),
height=800,
width=1100,
resizable=True,
title='CProfile Live',
)
class CProfileView(BaseView):
# Initialization
plotted = Bool(False)
barplot = Any
sort_values_button = Button('Sort')
FORMATS = Dict({
'id': '0x{0:x}',
})
def _plot_default(self):
container = Plot(
self.model.plot_data,
)
container.renderer_map['bar'] = SelectableBarPlot
container.padding_left = 100
container.padding_bottom = 150
# container.plot(('x', 'y'), type='bar')
self.zoom_tool = ZoomTool(
container,
)
container.underlays.append(self.zoom_tool)
container.tools.append(self.zoom_tool)
self.pan_tool = PanTool(
container,
)
container.tools.append(self.pan_tool)
return container
# @on_trait_change('model.index_item')
# def _on_model_index_item_change(self, index_item):
# super(CProfileView, self)._on_model_index_item_change(index_item)
# # self.plot.x_axis.tick_generator = ShowAllTickGenerator(
# # positions=self.model.plot_data.get_data('x'))
# @on_trait_change('model.value_item')
# def _on_model_value_item_change(self, value_item):
# super(CProfileView, self)._on_model_value_item_change(value_item)
# Handlers
@on_trait_change('model.updated')
def _on_model_update_fired(self):
if not self.plotted:
x = self.model.plot_data.get_data('x')
y = self.model.plot_data.get_data('y')
if len(x) == 0 or len(y) == 0:
return
self.barplot = self.plot.plot(('x', 'y'), type='bar',
bar_width=0.8)[0]
self.barplot.index.sort_order = 'ascending'
select = BarSelectTool(
self.barplot,
selection_mode='single',
)
self.barplot.tools.append(select)
self.barplot.index.on_trait_change(
self._metadata_changed, "metadata_changed")
self.plotted = True
self.plot.y_mapper.range.low_setting = 'auto'
self.plot.y_mapper.range.high_setting = 'auto'
def _format_key(self, key):
format_ = self.FORMATS.get(self.model.index_item)
if format_ is None:
return str(key)
try:
return format_.format(key)
except ValueError:
return str(key)
@on_trait_change('model.plot_keys')
def _on_model_plot_keys_changed(self):
positions = self.model.plot_data.get_data('x')
label_axis = LabelAxis(
self.plot, orientation='bottom',
title='Keys',
title_spacing=100,
positions=positions,
labels=[self._format_key(i)
for i in self.model.plot_keys],
small_haxis_style=True,
label_rotation=90,
tick_generator=ShowAllTickGenerator(
positions=positions,
),
)
self.plot.underlays.remove(self.plot.index_axis)
self.plot.index_axis = label_axis
self.plot.underlays.append(label_axis)
def _sort_values_button_fired(self):
self.model.sort_by_current_value()
self.plot.invalidate_and_redraw()
def _metadata_changed(self, new):
self.plot.invalidate_and_redraw()
# data_indices = self.scatter.index.metadata.get('selections', [])
# if len(data_indices) == 0:
# self.model.selected_index = None
# return
# self.model.selected_index = data_indices[0]
# def _last_n_points_changed(self):
# self.plot.x_mapper.range.tracking_amount = self.last_n_points
# def _follow_plot_changed(self):
# if self.follow_plot:
# self.plot.x_mapper.range.low_setting = 'track'
# self.plot.x_mapper.range.high_setting = 'auto'
# self.plot.x_mapper.range.tracking_amount = self.last_n_points
# else:
# self.plot.x_mapper.range.low_setting = self.plot.x_mapper.range.low
# self.plot.x_mapper.range.high_setting = \
# self.plot.x_mapper.range.high
traits_view = View(
Group(
VGroup(
HGroup(
Item('model.index_item'),
Item('model.value_item'),
# ),
# HGroup(
Spring(),
UItem('sort_values_button'),
UItem('reset_view_button'),
),
),
HSplit(
UItem('plot', editor=ComponentEditor()),
# UItem(
# 'model.selected_item',
# editor=TabularEditor(adapter=DetailsAdapter()),
# width=350),
),
),
height=800,
width=1100,
resizable=True,
title='Live Recording Plot'
)
class CProfileMixedView(ModelView):
title = Str
table_view = Instance(CProfileTableView)
plot_view = Instance(CProfileView)
def _table_view_default(self):
return CProfileTableView(title=self.title, model=self.model)
def _plot_view_default(self):
return CProfileView(title=self.title, model=self.model)
traits_view = View(
VGroup(
UItem('table_view', style='custom'),
UItem('plot_view', style='custom'),
),
height=800,
width=1100,
resizable=True,
title='Live CProfile',
)
|
enthought/pikos
|
pikos/live/ui/cprofile_view.py
|
Python
|
bsd-3-clause
| 8,716
| 0.000459
|
from django.conf.urls import patterns, include, url
from django.shortcuts import redirect, render_to_response
from django.template.context import RequestContext
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
# Just redirect / to /blog for now until I can
# come up with something to put on the homepage..
def to_blog(request):
return redirect('/blog/', permanent=False)
# Follow the BSD license and allow the source/binary to reproduce
# the license and copyright message
def sslicense(request):
slicense = """
Copyright (c) 2012-2013 Justin Crawford <Justasic@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE
"""
ctx = {
'parts': {
"title": "License",
"html_title": "License",
"fragment": slicense.replace('\n', '<br>'),
},
}
return render_to_response('docs/docs.html', RequestContext(request, ctx))
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'StackSmash.views.home', name='home'),
# url(r'^StackSmash/', include('StackSmash.foo.urls')),
# TODO: Fix index and use something... Should identify subdomains somehow..
#url(r'^$', include('StackSmash.apps.blog.urls')),
url(r'^license/', sslicense, name='license'),
#url(r'^docs/', include('StackSmash.apps.docs.urls'), name='docs', app_name='docs'),
url(r'^blog/', include('StackSmash.apps.blog.urls', namespace='blog')),
url(r'^projects/', include('StackSmash.apps.projects.urls', namespace='projects')),
url(r'^upload/', include('StackSmash.apps.uploader.urls', namespace='upload')),
url(r'^$', to_blog, name='index'),
#url(r'^projects/', include('StackSmash.apps.projects.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls), name='admin'),
)
|
Justasic/StackSmash
|
StackSmash/urls.py
|
Python
|
bsd-2-clause
| 3,146
| 0.00445
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
BarPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class BarPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
VALUE_FIELD = 'VALUE_FIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Bar plot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterTable(self.INPUT, self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
self.tr('Category name field'),
self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.VALUE_FIELD,
self.tr('Value field'),
self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Bar plot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
valuefieldname = self.getParameterValue(self.VALUE_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, valuefieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[valuefieldname], width, color='r')
plt.xticks(ind, values[namefieldname], rotation=45)
plotFilename = output + '.png'
lab.savefig(plotFilename)
with open(output, 'w') as f:
f.write('<html><img src="' + plotFilename + '"/></html>')
|
drnextgis/QGIS
|
python/plugins/processing/algs/qgis/BarPlot.py
|
Python
|
gpl-2.0
| 3,271
| 0.000611
|
# -*- coding:utf-8 -*-
from Crypto.Cipher import AES
from Crypto.Hash import MD5
import binascii
import urllib2
import string, random
# algorithm
MODE = AES.MODE_CBC
def __getKeyObject(key):
obj = AES.new(md5Encoding(key), MODE)
return obj
def md5Encoding(msg):
'''
get md5 encrypted text
@param msg: the plain text message
'''
m = MD5.new()
m.update(msg)
return m.hexdigest()
def getRandomString(length, optionList=['number', 'lower', 'upper', 'punc']):
charPool = {'number' : string.digits,
'lower' : string.lowercase,
'upper' : string.uppercase,
'punc' : string.punctuation }
pool = ''
for key in optionList:
if charPool.has_key(key):
pool = pool + charPool.get(key)
s = [random.choice(pool) for _ in xrange(length)]
return ''.join(s)
def encrypt(key, msg):
'''
Encrypt message using given password
@param key: the master password
@param msg: the plain text to be encrypted
'''
obj = __getKeyObject(key)
# encrypt
xx = msg*16
cipher = obj.encrypt(xx)
# convert bin to string
s = binascii.b2a_hex(cipher)
return s
def decrypt(key, msg):
'''
Encrypt message
@param key: the master password
@param msg: the cipher text to be dencrypted
'''
obj = __getKeyObject(key)
# convert string to bin
b = binascii.a2b_hex(msg)
# decrypt
plain = obj.decrypt(b)
return plain[:len(plain)/16]
def getLastVersion(versionUrl):
ver = ''
try:
f = urllib2.urlopen(versionUrl)
s = f.read()
f.close()
ver = s.split(' ')[1]
except:
pass
return ver
|
zhyu/PasswdManager
|
util.py
|
Python
|
gpl-3.0
| 1,732
| 0.011547
|
#!/usr/bin/env python
quit()
for iii in range(0,6):
try:
print(iii,iii/(4-iii))
except ZeroDivisionError as e:
#print("wrong: i={} {}".format(iii,e.message))
print("wrong: i={} {}".format(iii,"nooooooo"))
#else:
# print("OK")
finally:
print("continue...")
def pickkey(mylist):
""" jg's function"""
return mylist[1]
quit()
#listjg=[ (5,'A'), (4,'Z'), (8,'N'), (2,'C'), ]
#listjg.sort(key=pickkey) ;print(listjg)
#listjg.sort(reverse=True) ;print(listjg)
quit()
x=3
#def myfL(a,b):
# return []
def myf (a,b,*other):
print(type(other))
print(sum(other))
c=sum(other)
#return (a+b+other)
#res=myf(5,2,1) ;print(res)
#res=myf(a=5,b=2,c=1) ;print(res)
#res=myf(b=2,a=5,c=1) ;print(res)
#res=myf(a=5) ;print(res)
#no res=myf(a=5,b=2,c=1) ;print(res)
#res=myf(5,2,1,0) ;print(res)
#myf(5,2)
|
jgphpc/linux
|
python/0.py
|
Python
|
gpl-2.0
| 888
| 0.029279
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Volume Image Metadata API extension."""
from six.moves import http_client
import webob
from oslo_log import log as logging
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _
from cinder import volume
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('volume',
'volume_image_metadata')
class VolumeImageMetadataController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(VolumeImageMetadataController, self).__init__(*args, **kwargs)
self.volume_api = volume.API()
def _get_image_metadata(self, context, volume_id):
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, volume_id)
meta = self.volume_api.get_volume_image_metadata(context, volume)
return (volume, meta)
def _add_image_metadata(self, context, resp_volume_list, image_metas=None):
"""Appends the image metadata to each of the given volume.
:param context: the request context
:param resp_volume_list: the response volume list
:param image_metas: The image metadata to append, if None is provided
it will be retrieved from the database. An empty
dict means there is no metadata and it should not
be retrieved from the db.
"""
vol_id_list = []
for vol in resp_volume_list:
vol_id_list.append(vol['id'])
if image_metas is None:
try:
image_metas = self.volume_api.get_list_volumes_image_metadata(
context, vol_id_list)
except Exception as e:
LOG.debug('Get image metadata error: %s', e)
return
if image_metas:
for vol in resp_volume_list:
image_meta = image_metas.get(vol['id'])
if image_meta:
vol['volume_image_metadata'] = dict(image_meta)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if authorize(context):
self._add_image_metadata(context, [resp_obj.obj['volume']])
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if authorize(context):
# Just get the image metadata of those volumes in response.
volumes = list(resp_obj.obj.get('volumes', []))
if volumes:
self._add_image_metadata(context, volumes)
@wsgi.action("os-set_image_metadata")
def create(self, req, id, body):
context = req.environ['cinder.context']
if authorize(context):
try:
metadata = body['os-set_image_metadata']['metadata']
except (KeyError, TypeError):
msg = _("Malformed request body.")
raise webob.exc.HTTPBadRequest(explanation=msg)
new_metadata = self._update_volume_image_metadata(context,
id,
metadata,
delete=False)
return {'metadata': new_metadata}
def _update_volume_image_metadata(self, context,
volume_id,
metadata,
delete=False):
try:
volume = self.volume_api.get(context, volume_id)
return self.volume_api.update_volume_metadata(
context,
volume,
metadata,
delete=False,
meta_type=common.METADATA_TYPES.image)
# Not found exception will be handled at the wsgi level
except (ValueError, AttributeError):
msg = _("Malformed request body.")
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.InvalidVolumeMetadata as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolumeMetadataSize as error:
raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg)
@wsgi.action("os-show_image_metadata")
def index(self, req, id, body):
context = req.environ['cinder.context']
return {'metadata': self._get_image_metadata(context, id)[1]}
@wsgi.action("os-unset_image_metadata")
def delete(self, req, id, body):
"""Deletes an existing image metadata."""
context = req.environ['cinder.context']
if authorize(context):
try:
key = body['os-unset_image_metadata']['key']
except (KeyError, TypeError):
msg = _("Malformed request body.")
raise webob.exc.HTTPBadRequest(explanation=msg)
if key:
vol, metadata = self._get_image_metadata(context, id)
if key not in metadata:
raise exception.GlanceMetadataNotFound(id=id)
self.volume_api.delete_volume_metadata(
context, vol, key,
meta_type=common.METADATA_TYPES.image)
else:
msg = _("The key cannot be None.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=http_client.OK)
class Volume_image_metadata(extensions.ExtensionDescriptor):
"""Show image metadata associated with the volume."""
name = "VolumeImageMetadata"
alias = "os-vol-image-meta"
updated = "2012-12-07T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeImageMetadataController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
|
eharney/cinder
|
cinder/api/contrib/volume_image_metadata.py
|
Python
|
apache-2.0
| 6,636
| 0
|
''' Provide Jinja2 templates used by Bokeh to embed Bokeh models
(e.g. plots, widgets, layouts) in various ways.
.. bokeh-jinja:: bokeh.core.templates.AUTOLOAD_JS
.. bokeh-jinja:: bokeh.core.templates.AUTOLOAD_NB_JS
.. bokeh-jinja:: bokeh.core.templates.AUTOLOAD_TAG
.. bokeh-jinja:: bokeh.core.templates.CSS_RESOURCES
.. bokeh-jinja:: bokeh.core.templates.DOC_JS
.. bokeh-jinja:: bokeh.core.templates.FILE
.. bokeh-jinja:: bokeh.core.templates.JS_RESOURCES
.. bokeh-jinja:: bokeh.core.templates.NOTEBOOK_LOAD
.. bokeh-jinja:: bokeh.core.templates.PLOT_DIV
.. bokeh-jinja:: bokeh.core.templates.SCRIPT_TAG
'''
from __future__ import absolute_import
import json
from jinja2 import Environment, PackageLoader, Markup
_env = Environment(loader=PackageLoader('bokeh.core', '_templates'))
_env.filters['json'] = lambda obj: Markup(json.dumps(obj))
JS_RESOURCES = _env.get_template("js_resources.html")
CSS_RESOURCES = _env.get_template("css_resources.html")
SCRIPT_TAG = _env.get_template("script_tag.html")
PLOT_DIV = _env.get_template("plot_div.html")
DOC_JS = _env.get_template("doc_js.js")
FILE = _env.get_template("file.html")
NOTEBOOK_LOAD = _env.get_template("notebook_load.html")
AUTOLOAD_JS = _env.get_template("autoload_js.js")
AUTOLOAD_NB_JS = _env.get_template("autoload_nb_js.js")
AUTOLOAD_TAG = _env.get_template("autoload_tag.html")
|
philippjfr/bokeh
|
bokeh/core/templates.py
|
Python
|
bsd-3-clause
| 1,358
| 0
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
from scipy.misc import doccer
from scipy._lib._version import NumpyVersion
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
_input_doc = \
"""input : array_like
Input array to filter."""
_axis_doc = \
"""axis : int, optional
The axis of `input` along which to calculate. Default is -1."""
_output_doc = \
"""output : array, optional
The `output` parameter passes an array in which to store the
filter output. Output array should have different name as compared
to input array to avoid aliasing errors."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_mode_multiple_doc = \
"""mode : str or sequence, optional
The `mode` parameter determines how the array borders are
handled. Valid modes are {'reflect', 'constant', 'nearest',
'mirror', 'wrap'}. `cval` is the value used when mode is equal to
'constant'. A list of modes with length equal to the number of
axes can be provided to specify different modes for different
axes. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0."""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input': _input_doc,
'axis': _axis_doc,
'output': _output_doc,
'size_foot': _size_foot_doc,
'mode': _mode_doc,
'mode_multiple': _mode_multiple_doc,
'cval': _cval_doc,
'origin': _origin_doc,
'extra_arguments': _extra_arguments_doc,
'extra_keywords': _extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import correlate1d
>>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([ 8, 26, 8, 12, 7, 28, 36, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
Examples
--------
>>> from scipy.ndimage import convolve1d
>>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([14, 24, 4, 13, 12, 36, 27, 0])
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
def _gaussian_kernel1d(sigma, order, radius):
"""
Computes a 1D Gaussian convolution kernel.
"""
if order < 0:
raise ValueError('order must be non-negative')
p = numpy.polynomial.Polynomial([0, 0, -0.5 / (sigma * sigma)])
x = numpy.arange(-radius, radius + 1)
phi_x = numpy.exp(p(x), dtype=numpy.double)
phi_x /= phi_x.sum()
if order > 0:
q = numpy.polynomial.Polynomial([1])
p_deriv = p.deriv()
for _ in range(order):
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
q = q.deriv() + q * p_deriv
phi_x *= q(x)
return phi_x
@docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : int, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. A positive order corresponds to convolution with
that derivative of a Gaussian.
%(output)s
%(mode)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
Examples
--------
>>> from scipy.ndimage import gaussian_filter1d
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
>>> import matplotlib.pyplot as plt
>>> np.random.seed(280490)
>>> x = np.random.randn(101).cumsum()
>>> y3 = gaussian_filter1d(x, 3)
>>> y6 = gaussian_filter1d(x, 6)
>>> plt.plot(x, 'k', label='original data')
>>> plt.plot(y3, '--', label='filtered, sigma=3')
>>> plt.plot(y6, ':', label='filtered, sigma=6')
>>> plt.legend()
>>> plt.grid()
>>> plt.show()
"""
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
# Since we are calling correlate, not convolve, revert the kernel
weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
return correlate1d(input, weights, axis, output, mode, cval, 0)
@docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : int or sequence of ints, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. A positive order
corresponds to convolution with that derivative of a Gaussian.
%(output)s
%(mode_multiple)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
>>> from scipy import misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = gaussian_filter(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order, mode in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
return return_value
@docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
return return_value
@docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords=None):
"""
N-dimensional Laplace filter using a provided second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative2(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return return_value
@docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-dimensional Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords=None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return return_value
@docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Returns
-------
gaussian_gradient_magnitude : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return return_value
@docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
input : array-like
input array to filter
weights : ndarray
array of weights, same number of dimensions as input
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output. Output array should have different name as
compared to input array to avoid aliasing errors.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
input : array_like
Input array to filter.
weights : array_like
Array of weights, same number of dimensions as input
output : ndarray, optional
The `output` parameter passes an array in which to store the
filter output. Output array should have different name as
compared to input array to avoid aliasing errors.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
the `mode` parameter determines how the array borders are
handled. For 'constant' mode, values beyond borders are set to be
`cval`. Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : array_like, optional
The `origin` parameter controls the placement of the filter,
relative to the centre of the current element of the input.
Default of 0 is equivalent to ``(0,)*input.ndim``.
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import uniform_filter1d
>>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([4, 3, 4, 1, 4, 6, 6, 3])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return return_value
@docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
%(origin)s
Returns
-------
uniform_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.uniform_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin, mode in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import minimum_filter1d
>>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([2, 0, 0, 0, 1, 1, 0, 0])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return return_value
@docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import maximum_filter1d
>>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([8, 8, 8, 4, 9, 9, 9, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return return_value
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint, dtype=bool)
if not footprint.any():
raise ValueError("All-zero footprint is not supported.")
if footprint.all():
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin, mode in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return return_value
@docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin)s
Returns
-------
minimum_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.minimum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin)s
Returns
-------
maximum_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.maximum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return return_value
@docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
rank_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.rank_filter(ascent, rank=42, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculate a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
median_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.median_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
percentile_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a one-dimensional filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int function(double *input_line, npy_intp input_length,
double *output_line, npy_intp output_length,
void *user_data)
int function(double *input_line, intptr_t input_length,
double *output_line, intptr_t output_length,
void *user_data)
The calling function iterates over the lines of the input and output
arrays, calling the callback function at each line. The current line
is extended according to the border conditions set by the calling
function, and the result is copied into the array that is passed
through ``input_line``. The length of the input line (after extension)
is passed through ``input_length``. The callback function should apply
the filter and store the result in the array passed through
``output_line``. The length of the output line is passed through
``output_length``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments,
extra_keywords)
return return_value
@docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int callback(double *buffer, npy_intp filter_size,
double *return_value, void *user_data)
int callback(double *buffer, intptr_t filter_size,
double *return_value, void *user_data)
The calling function iterates over the elements of the input and
output arrays, calling the callback function at each element. The
elements within the footprint of the filter at the current element are
passed through the ``buffer`` parameter, and the number of elements
within the footprint through ``filter_size``. The calculated value is
returned in ``return_value``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return return_value
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/ndimage/filters.py
|
Python
|
mit
| 52,520
| 0.00019
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from erlport.erlterms import Atom, List
from erlport.erlang import set_encoder, set_decoder
def setup_dtype():
set_encoder(dtype_encoder)
set_decoder(dtype_decoder)
return Atom(b'ok')
def dtype_encoder(value):
if isinstance(value, np.int64):
return np.asscalar(value)
elif isinstance(value, np.float64):
return np.asscalar(value)
elif isinstance(value, str):
try:
return value.encode('utf-8') # to express as binary() instead of string() on erlang side
except:
return value
elif isinstance(value, list):
return [dtype_encoder(v) for v in value]
elif isinstance(value, tuple):
nvalue = ()
for v in value:
nvalue = nvalue + (dtype_encoder(v),)
return nvalue
else:
try:
return value.encode('utf-8')
except:
return value
def dtype_decoder(value):
try:
if isinstance(value, List):
return [dtype_decoder(v) for v in value]
elif isinstance(value, tuple):
nvalue = ()
for v in value:
nvalue = nvalue + (dtype_decoder(v),)
return nvalue
elif isinstance(value, str):
return value
else:
return value.decode("utf-8")
except:
return value
|
zgbjgg/jun
|
priv/jun_enc_dec.py
|
Python
|
mit
| 1,427
| 0.005606
|
# Module to run tests on surveys
# Most of these are *not* done with Travis yet
# TEST_UNICODE_LITERALS
import pytest
import os
import shutil
import numpy as np
from astropy.table import Table
from frb.galaxies.frbgalaxy import FRBHost
from frb.galaxies import eazy as frbeazy
from frb.frb import FRB
from distutils.spawn import find_executable
eazy_exec = pytest.mark.skipif(find_executable('eazy') is None,
reason='test requires galfit')
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
@pytest.fixture
def host_obj():
# VLT
photom = Table()
photom['Name'] = ['G_TEST']
photom['ra'] = 123.422
photom['dec'] = 23.222
# These are observed
photom['LRISb_V'] = 25.86
photom['LRISb_V_err'] = 0.25
photom['GMOS_S_r'] = 23.61
photom['GMOS_S_r_err'] = 0.15
photom['LRISr_I'] = 23.09
photom['LRISr_I_err'] = 0.1
photom['NOT_z'] = 23.35
photom['NOT_z_err'] = 0.3
photom['NIRI_J'] = 21.75 + 0.91
photom['NIRI_J_err'] = 0.2
#
host190613A = FRBHost(photom['ra'], photom['dec'], FRB.by_name('FRB20121102'))
host190613A.parse_photom(photom)
host190613A.name = 'G_TEST'
return host190613A
@eazy_exec
def test_eazy(host_obj):
if os.path.isdir(data_path('eazy')):
shutil.rmtree(data_path('eazy'))
os.mkdir(data_path('eazy'))
# Generate
frbeazy.eazy_input_files(host_obj.photom, data_path('eazy/input'),
host_obj.name,
data_path('eazy/output'),
templates='br07_default',
prior_filter='GMOS_S_r')
# Test
assert os.path.isfile(data_path('eazy/input/G_TEST.cat'))
assert os.path.isfile(data_path('eazy/input/zphot.param.G_TEST'))
assert os.path.isfile(data_path('eazy/input/zphot.translate.G_TEST'))
# Run
frbeazy.run_eazy(data_path('eazy/input'),
host_obj.name,
os.path.join(data_path('eazy/output'), 'logfile'))
assert os.path.isfile(data_path('eazy/output/photz.zout'))
# Read
zgrid, pzi, prior = frbeazy.getEazyPz(-1, MAIN_OUTPUT_FILE='photz',
OUTPUT_DIRECTORY=data_path('eazy/output'),
CACHE_FILE='Same', binaries=None, get_prior=True)
zphot, sig_zphot = frbeazy.eazy_stats(zgrid, pzi)
assert np.isclose(zphot, 0.5929259648750858, rtol=1e-4)
# Remove
shutil.rmtree(data_path('eazy'))
|
FRBs/FRB
|
frb/tests/test_eazy.py
|
Python
|
bsd-3-clause
| 2,608
| 0.003067
|
# -*- coding: utf-8 -*-
__all__ = ["cc"]
|
MarieVdS/ComboCode
|
__init__.py
|
Python
|
gpl-3.0
| 42
| 0
|
#
#Programa Lista 4, questão 1;
#Felipe Henrique Bastos Costa - 1615310032;
#
#
#
#
lista = []#lista vazia;
cont1 = 0#contador do indice;
cont2 = 1#contador da posição do numero, se é o primeiro, segundo etc;
v = 5#representaria o len da lista;
while(cont1 < v):
x = int(input("Informe o %dº numero inteiro para colocar em sua lista:\n"%cont2))#x e a variavel que recebe
#o numero do usuario
lista.append(x)#o numero informado para x e colocado dentro da lista;
cont1+=1#Os contadores estao
cont2+=1#sendo incrementados;
print("A lista de informada foi:\n%s"%lista)
|
any1m1c/ipc20161
|
lista4/ipc_lista4.01.py
|
Python
|
apache-2.0
| 675
| 0.040299
|
# -*- coding: utf-8 -*-
#############################################################
# This file was automatically generated on 2022-01-18. #
# #
# Python Bindings Version 2.1.29 #
# #
# If you have a bugfix for this file and want to commit it, #
# please fix the bug in the generator. You can find a link #
# to the generators git repository on tinkerforge.com #
#############################################################
from collections import namedtuple
try:
from .ip_connection import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
except (ValueError, ImportError):
from ip_connection import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
GetPositionCallbackConfiguration = namedtuple('PositionCallbackConfiguration', ['period', 'value_has_to_change', 'option', 'min', 'max'])
GetSPITFPErrorCount = namedtuple('SPITFPErrorCount', ['error_count_ack_checksum', 'error_count_message_checksum', 'error_count_frame', 'error_count_overflow'])
GetIdentity = namedtuple('Identity', ['uid', 'connected_uid', 'position', 'hardware_version', 'firmware_version', 'device_identifier'])
class BrickletLinearPotiV2(Device):
"""
59mm linear potentiometer
"""
DEVICE_IDENTIFIER = 2139
DEVICE_DISPLAY_NAME = 'Linear Poti Bricklet 2.0'
DEVICE_URL_PART = 'linear_poti_v2' # internal
CALLBACK_POSITION = 4
FUNCTION_GET_POSITION = 1
FUNCTION_SET_POSITION_CALLBACK_CONFIGURATION = 2
FUNCTION_GET_POSITION_CALLBACK_CONFIGURATION = 3
FUNCTION_GET_SPITFP_ERROR_COUNT = 234
FUNCTION_SET_BOOTLOADER_MODE = 235
FUNCTION_GET_BOOTLOADER_MODE = 236
FUNCTION_SET_WRITE_FIRMWARE_POINTER = 237
FUNCTION_WRITE_FIRMWARE = 238
FUNCTION_SET_STATUS_LED_CONFIG = 239
FUNCTION_GET_STATUS_LED_CONFIG = 240
FUNCTION_GET_CHIP_TEMPERATURE = 242
FUNCTION_RESET = 243
FUNCTION_WRITE_UID = 248
FUNCTION_READ_UID = 249
FUNCTION_GET_IDENTITY = 255
THRESHOLD_OPTION_OFF = 'x'
THRESHOLD_OPTION_OUTSIDE = 'o'
THRESHOLD_OPTION_INSIDE = 'i'
THRESHOLD_OPTION_SMALLER = '<'
THRESHOLD_OPTION_GREATER = '>'
BOOTLOADER_MODE_BOOTLOADER = 0
BOOTLOADER_MODE_FIRMWARE = 1
BOOTLOADER_MODE_BOOTLOADER_WAIT_FOR_REBOOT = 2
BOOTLOADER_MODE_FIRMWARE_WAIT_FOR_REBOOT = 3
BOOTLOADER_MODE_FIRMWARE_WAIT_FOR_ERASE_AND_REBOOT = 4
BOOTLOADER_STATUS_OK = 0
BOOTLOADER_STATUS_INVALID_MODE = 1
BOOTLOADER_STATUS_NO_CHANGE = 2
BOOTLOADER_STATUS_ENTRY_FUNCTION_NOT_PRESENT = 3
BOOTLOADER_STATUS_DEVICE_IDENTIFIER_INCORRECT = 4
BOOTLOADER_STATUS_CRC_MISMATCH = 5
STATUS_LED_CONFIG_OFF = 0
STATUS_LED_CONFIG_ON = 1
STATUS_LED_CONFIG_SHOW_HEARTBEAT = 2
STATUS_LED_CONFIG_SHOW_STATUS = 3
def __init__(self, uid, ipcon):
"""
Creates an object with the unique device ID *uid* and adds it to
the IP Connection *ipcon*.
"""
Device.__init__(self, uid, ipcon, BrickletLinearPotiV2.DEVICE_IDENTIFIER, BrickletLinearPotiV2.DEVICE_DISPLAY_NAME)
self.api_version = (2, 0, 0)
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_POSITION] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_SET_POSITION_CALLBACK_CONFIGURATION] = BrickletLinearPotiV2.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_POSITION_CALLBACK_CONFIGURATION] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_SPITFP_ERROR_COUNT] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_SET_BOOTLOADER_MODE] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_BOOTLOADER_MODE] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_SET_WRITE_FIRMWARE_POINTER] = BrickletLinearPotiV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLinearPotiV2.FUNCTION_WRITE_FIRMWARE] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_SET_STATUS_LED_CONFIG] = BrickletLinearPotiV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_STATUS_LED_CONFIG] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_CHIP_TEMPERATURE] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_RESET] = BrickletLinearPotiV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLinearPotiV2.FUNCTION_WRITE_UID] = BrickletLinearPotiV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLinearPotiV2.FUNCTION_READ_UID] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLinearPotiV2.FUNCTION_GET_IDENTITY] = BrickletLinearPotiV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.callback_formats[BrickletLinearPotiV2.CALLBACK_POSITION] = (9, 'B')
ipcon.add_device(self)
def get_position(self):
"""
Returns the position of the linear potentiometer. The value is
between 0% (slider down) and 100% (slider up).
If you want to get the value periodically, it is recommended to use the
:cb:`Position` callback. You can set the callback configuration
with :func:`Set Position Callback Configuration`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_GET_POSITION, (), '', 9, 'B')
def set_position_callback_configuration(self, period, value_has_to_change, option, min, max):
"""
The period is the period with which the :cb:`Position` callback is triggered
periodically. A value of 0 turns the callback off.
If the `value has to change`-parameter is set to true, the callback is only
triggered after the value has changed. If the value didn't change
within the period, the callback is triggered immediately on change.
If it is set to false, the callback is continuously triggered with the period,
independent of the value.
It is furthermore possible to constrain the callback with thresholds.
The `option`-parameter together with min/max sets a threshold for the :cb:`Position` callback.
The following options are possible:
.. csv-table::
:header: "Option", "Description"
:widths: 10, 100
"'x'", "Threshold is turned off"
"'o'", "Threshold is triggered when the value is *outside* the min and max values"
"'i'", "Threshold is triggered when the value is *inside* or equal to the min and max values"
"'<'", "Threshold is triggered when the value is smaller than the min value (max is ignored)"
"'>'", "Threshold is triggered when the value is greater than the min value (max is ignored)"
If the option is set to 'x' (threshold turned off) the callback is triggered with the fixed period.
"""
self.check_validity()
period = int(period)
value_has_to_change = bool(value_has_to_change)
option = create_char(option)
min = int(min)
max = int(max)
self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_SET_POSITION_CALLBACK_CONFIGURATION, (period, value_has_to_change, option, min, max), 'I ! c B B', 0, '')
def get_position_callback_configuration(self):
"""
Returns the callback configuration as set by :func:`Set Position Callback Configuration`.
"""
self.check_validity()
return GetPositionCallbackConfiguration(*self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_GET_POSITION_CALLBACK_CONFIGURATION, (), '', 16, 'I ! c B B'))
def get_spitfp_error_count(self):
"""
Returns the error count for the communication between Brick and Bricklet.
The errors are divided into
* ACK checksum errors,
* message checksum errors,
* framing errors and
* overflow errors.
The errors counts are for errors that occur on the Bricklet side. All
Bricks have a similar function that returns the errors on the Brick side.
"""
self.check_validity()
return GetSPITFPErrorCount(*self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_GET_SPITFP_ERROR_COUNT, (), '', 24, 'I I I I'))
def set_bootloader_mode(self, mode):
"""
Sets the bootloader mode and returns the status after the requested
mode change was instigated.
You can change from bootloader mode to firmware mode and vice versa. A change
from bootloader mode to firmware mode will only take place if the entry function,
device identifier and CRC are present and correct.
This function is used by Brick Viewer during flashing. It should not be
necessary to call it in a normal user program.
"""
self.check_validity()
mode = int(mode)
return self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 9, 'B')
def get_bootloader_mode(self):
"""
Returns the current bootloader mode, see :func:`Set Bootloader Mode`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_GET_BOOTLOADER_MODE, (), '', 9, 'B')
def set_write_firmware_pointer(self, pointer):
"""
Sets the firmware pointer for :func:`Write Firmware`. The pointer has
to be increased by chunks of size 64. The data is written to flash
every 4 chunks (which equals to one page of size 256).
This function is used by Brick Viewer during flashing. It should not be
necessary to call it in a normal user program.
"""
self.check_validity()
pointer = int(pointer)
self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_SET_WRITE_FIRMWARE_POINTER, (pointer,), 'I', 0, '')
def write_firmware(self, data):
"""
Writes 64 Bytes of firmware at the position as written by
:func:`Set Write Firmware Pointer` before. The firmware is written
to flash every 4 chunks.
You can only write firmware in bootloader mode.
This function is used by Brick Viewer during flashing. It should not be
necessary to call it in a normal user program.
"""
self.check_validity()
data = list(map(int, data))
return self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_WRITE_FIRMWARE, (data,), '64B', 9, 'B')
def set_status_led_config(self, config):
"""
Sets the status LED configuration. By default the LED shows
communication traffic between Brick and Bricklet, it flickers once
for every 10 received data packets.
You can also turn the LED permanently on/off or show a heartbeat.
If the Bricklet is in bootloader mode, the LED is will show heartbeat by default.
"""
self.check_validity()
config = int(config)
self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_SET_STATUS_LED_CONFIG, (config,), 'B', 0, '')
def get_status_led_config(self):
"""
Returns the configuration as set by :func:`Set Status LED Config`
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_GET_STATUS_LED_CONFIG, (), '', 9, 'B')
def get_chip_temperature(self):
"""
Returns the temperature as measured inside the microcontroller. The
value returned is not the ambient temperature!
The temperature is only proportional to the real temperature and it has bad
accuracy. Practically it is only useful as an indicator for
temperature changes.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_GET_CHIP_TEMPERATURE, (), '', 10, 'h')
def reset(self):
"""
Calling this function will reset the Bricklet. All configurations
will be lost.
After a reset you have to create new device objects,
calling functions on the existing ones will result in
undefined behavior!
"""
self.check_validity()
self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_RESET, (), '', 0, '')
def write_uid(self, uid):
"""
Writes a new UID into flash. If you want to set a new UID
you have to decode the Base58 encoded UID string into an
integer first.
We recommend that you use Brick Viewer to change the UID.
"""
self.check_validity()
uid = int(uid)
self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_WRITE_UID, (uid,), 'I', 0, '')
def read_uid(self):
"""
Returns the current UID as an integer. Encode as
Base58 to get the usual string version.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_READ_UID, (), '', 12, 'I')
def get_identity(self):
"""
Returns the UID, the UID where the Bricklet is connected to,
the position, the hardware and firmware version as well as the
device identifier.
The position can be 'a', 'b', 'c', 'd', 'e', 'f', 'g' or 'h' (Bricklet Port).
A Bricklet connected to an :ref:`Isolator Bricklet <isolator_bricklet>` is always at
position 'z'.
The device identifier numbers can be found :ref:`here <device_identifier>`.
|device_identifier_constant|
"""
return GetIdentity(*self.ipcon.send_request(self, BrickletLinearPotiV2.FUNCTION_GET_IDENTITY, (), '', 33, '8s 8s c 3B 3B H'))
def register_callback(self, callback_id, function):
"""
Registers the given *function* with the given *callback_id*.
"""
if function is None:
self.registered_callbacks.pop(callback_id, None)
else:
self.registered_callbacks[callback_id] = function
LinearPotiV2 = BrickletLinearPotiV2 # for backward compatibility
|
Tinkerforge/brickv
|
src/brickv/bindings/bricklet_linear_poti_v2.py
|
Python
|
gpl-2.0
| 14,625
| 0.004239
|
from django import forms
from .models import Post
from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout,
)
class PostForm(forms.ModelForm): # Post Thread View
class Meta:
model = Post
fields = ['title','image', 'user','country','guide']
widgets = { 'guide' : forms.Textarea(attrs = {'rows':12 , 'style':'resize:none'})}
##-----------------------------
User = get_user_model()
class UserLoginForm(forms.Form):
username = forms.CharField(max_length = 254, widget=forms.TextInput(attrs={'class':"input-sm"}))
password = forms.CharField(widget = forms.PasswordInput)
def clean(self, *args, **kwargs):
username = self.cleaned_data.get("username")
password = self.cleaned_data.get('password')
if username and password:
user = authenticate(username=username, password=password)
if not user:
raise forms.ValidationError("Incorrect password or User! Please try again.")
if not user.check_password(password):
raise forms.ValidationError("Incorrect password! Please try again.")
if not user.is_active:
raise forms.ValidationError("This user is no longer active!")
return super(UserLoginForm, self).clean(*args, **kwargs)
#--------------------------------
class UserRegisterForm(forms.ModelForm):
email = forms.EmailField(label = 'Email Address')
password = forms.CharField(widget = forms.PasswordInput)
class Meta:
model = User
fields = [
'username',
'email',
'password'
]
def clean_email(self):
email = self.cleaned_data.get('email')
email_qs = User.objects.filter(email=email)
if email_qs.exists():
raise forms.ValidationError("This email has already been registered!")
return email
def clean_username(self):
username = self.cleaned_data.get('username')
username_qs = User.objects.filter(username=username)
if username_qs.exists():
raise forms.ValidationError("This username has already been registered!")
return username
|
lemarcudal/sha_thedivision
|
test/mysite/webapp/forms.py
|
Python
|
apache-2.0
| 1,935
| 0.03876
|
# -*- coding: utf-8 -*-
import sys
from warnings import warn
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
if hasattr(settings, 'MODELTRANSLATION_TRANSLATION_REGISTRY'):
TRANSLATION_REGISTRY =\
getattr(settings, 'MODELTRANSLATION_TRANSLATION_REGISTRY', None)
elif hasattr(settings, 'TRANSLATION_REGISTRY'):
warn('The setting TRANSLATION_REGISTRY is deprecated, use '
'MODELTRANSLATION_TRANSLATION_REGISTRY instead.', DeprecationWarning)
TRANSLATION_REGISTRY = getattr(settings, 'TRANSLATION_REGISTRY', None)
else:
raise ImproperlyConfigured("You haven't set the "
"MODELTRANSLATION_TRANSLATION_REGISTRY "
"setting yet.")
AVAILABLE_LANGUAGES = [l[0] for l in settings.LANGUAGES]
DEFAULT_LANGUAGE = getattr(settings, 'MODELTRANSLATION_DEFAULT_LANGUAGE', None)
if DEFAULT_LANGUAGE and DEFAULT_LANGUAGE not in AVAILABLE_LANGUAGES:
raise ImproperlyConfigured('MODELTRANSLATION_DEFAULT_LANGUAGE not '
'in LANGUAGES setting.')
elif not DEFAULT_LANGUAGE:
DEFAULT_LANGUAGE = AVAILABLE_LANGUAGES[0]
# FIXME: We can't seem to override this particular setting in tests.py
CUSTOM_FIELDS =\
getattr(settings, 'MODELTRANSLATION_CUSTOM_FIELDS', ())
try:
if sys.argv[1] == 'test':
CUSTOM_FIELDS =\
getattr(settings, 'MODELTRANSLATION_CUSTOM_FIELDS',
('BooleanField',))
except IndexError:
pass
LANGUAGE_VERBOSE_NAMES = getattr(settings, 'LANGUAGE_VERBOSE_NAMES', {})
|
ilblackdragon/django-modeltranslation
|
modeltranslation/settings.py
|
Python
|
bsd-3-clause
| 1,569
| 0.003187
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# EVEX.256.66.0F3A.W0 25 /r ib
# vpternlogd ymm1{k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
myEVEX = EVEX('EVEX.256.66.0F3A.W0')
Buffer = bytes.fromhex('{}252011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x25)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpternlogd')
assert_equal(myDisasm.repr(), 'vpternlogd ymm28, ymm16, ymmword ptr [r8], 11h')
# EVEX.512.66.0F3A.W0 25 /r ib
# vpternlogd zmm1{k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
myEVEX = EVEX('EVEX.512.66.0F3A.W0')
Buffer = bytes.fromhex('{}252011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x25)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpternlogd')
assert_equal(myDisasm.repr(), 'vpternlogd zmm28, zmm16, zmmword ptr [r8], 11h')
# EVEX.256.66.0F3A.W1 25 /r ib
# vpternlogq ymm1{k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
myEVEX = EVEX('EVEX.256.66.0F3A.W1')
Buffer = bytes.fromhex('{}252011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x25)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpternlogq')
assert_equal(myDisasm.repr(), 'vpternlogq ymm28, ymm16, ymmword ptr [r8], 11h')
# EVEX.512.66.0F3A.W1 25 /r ib
# vpternlogq zmm1{k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
myEVEX = EVEX('EVEX.512.66.0F3A.W1')
Buffer = bytes.fromhex('{}252011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x25)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpternlogq')
assert_equal(myDisasm.repr(), 'vpternlogq zmm28, zmm16, zmmword ptr [r8], 11h')
|
0vercl0k/rp
|
src/third_party/beaengine/tests/0f3a25.py
|
Python
|
mit
| 2,835
| 0.001411
|
var1 = bool(input())
var2 = bool(input())
if var1:
print(var2)
elif v<caret>
|
siosio/intellij-community
|
python/testData/codeInsight/mlcompletion/isInConditionSimpleElif.py
|
Python
|
apache-2.0
| 78
| 0.051282
|
# -*- coding: latin-1 -*-
import re
import json
from .common import InfoExtractor
from ..utils import determine_ext
class HarkIE(InfoExtractor):
_VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+'
_TEST = {
u'url': u'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
u'file': u'mmbzyhkgny.mp3',
u'md5': u'6783a58491b47b92c7c1af5a77d4cbee',
u'info_dict': {
u'title': u"Obama: 'Beyond The Afghan Theater, We Only Target Al Qaeda' on May 23, 2013",
u'description': u'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
u'duration': 11,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
json_url = "http://www.hark.com/clips/%s.json" %(video_id)
info_json = self._download_webpage(json_url, video_id)
info = json.loads(info_json)
final_url = info['url']
return {'id': video_id,
'url' : final_url,
'title': info['name'],
'ext': determine_ext(final_url),
'description': info['description'],
'thumbnail': info['image_original'],
'duration': info['duration'],
}
|
lebabouin/CouchPotatoServer-develop
|
couchpotato/core/providers/trailer/vftrailers/youtube_dl/extractor/hark.py
|
Python
|
gpl-3.0
| 1,526
| 0.003932
|
#!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or https://www.bfgroup.xyz/b2/LICENSE.txt)
# Regression test: it was possible that due to evaluation of conditional
# requirements, two different values of non-free features were present in a
# property set.
import BoostBuild
t = BoostBuild.Tester()
t.write("a.cpp", "")
t.write("jamroot.jam", """
import feature ;
import common ;
feature.feature the_feature : false true : propagated ;
rule maker ( targets * : sources * : properties * )
{
if <the_feature>false in $(properties) &&
<the_feature>true in $(properties)
{
EXIT "Oops, two different values of non-free feature" ;
}
CMD on $(targets) = [ common.file-creation-command ] ;
}
actions maker
{
$(CMD) $(<) ;
}
make a : a.cpp : maker : <variant>debug:<the_feature>true ;
""")
t.run_build_system()
t.cleanup()
|
davehorton/drachtio-server
|
deps/boost_1_77_0/tools/build/test/conditionals2.py
|
Python
|
mit
| 960
| 0
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from base import web, model, Fixtures
class TestAuthentication:
@classmethod
def setup_class(cls):
cls.app = web.app.test_client()
model.rebuild_db()
Fixtures.create()
@classmethod
def teardown_class(cls):
model.rebuild_db()
def test_api_authenticate(self):
"""Test AUTHENTICATION works"""
res = self.app.get('/?api_key=%s' % Fixtures.api_key)
assert 'checkpoint::logged-in::tester' in res.data, res.data
|
geotagx/geotagx-pybossa-archive
|
test/test_authentication.py
|
Python
|
agpl-3.0
| 1,229
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libcloud driver for the Host Virtual Inc. (VR) API
Home page http://www.vr.org/
"""
import time
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.hostvirtual import HostVirtualResponse
from libcloud.common.hostvirtual import HostVirtualConnection
from libcloud.common.hostvirtual import HostVirtualException
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeImage, NodeSize, NodeLocation
from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword
API_ROOT = ''
NODE_STATE_MAP = {
'BUILDING': NodeState.PENDING,
'PENDING': NodeState.PENDING,
'RUNNING': NodeState.RUNNING, # server is powered up
'STOPPING': NodeState.REBOOTING,
'REBOOTING': NodeState.REBOOTING,
'STARTING': NodeState.REBOOTING,
'TERMINATED': NodeState.TERMINATED, # server is powered down
'STOPPED': NodeState.STOPPED
}
DEFAULT_NODE_LOCATION_ID = 4
class HostVirtualComputeResponse(HostVirtualResponse):
pass
class HostVirtualComputeConnection(HostVirtualConnection):
responseCls = HostVirtualComputeResponse
class HostVirtualNodeDriver(NodeDriver):
type = Provider.HOSTVIRTUAL
name = 'HostVirtual'
website = 'http://www.vr.org'
connectionCls = HostVirtualComputeConnection
features = {'create_node': ['ssh_key', 'password']}
def __init__(self, key, secure=True, host=None, port=None):
self.location = None
super(HostVirtualNodeDriver, self).__init__(key=key, secure=secure,
host=host, port=port)
def _to_node(self, data):
state = NODE_STATE_MAP[data['status']]
public_ips = []
private_ips = []
extra = {}
if 'plan_id' in data:
extra['size'] = data['plan_id']
if 'os_id' in data:
extra['image'] = data['os_id']
if 'location_id' in data:
extra['location'] = data['location_id']
if 'ip' in data:
public_ips.append(data['ip'])
node = Node(id=data['mbpkgid'], name=data['fqdn'], state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, extra=extra)
return node
def list_locations(self):
result = self.connection.request(API_ROOT + '/cloud/locations/').object
locations = []
for dc in result:
locations.append(NodeLocation(
dc["id"],
dc["name"],
dc["name"].split(',')[1].replace(" ", ""), # country
self))
return locations
def list_sizes(self, location=None):
params = {}
if location:
params = {'location': location.id}
result = self.connection.request(
API_ROOT + '/cloud/sizes/',
data=json.dumps(params)).object
sizes = []
for size in result:
n = NodeSize(id=size['plan_id'],
name=size['plan'],
ram=size['ram'],
disk=size['disk'],
bandwidth=size['transfer'],
price=size['price'],
driver=self.connection.driver)
sizes.append(n)
return sizes
def list_images(self):
result = self.connection.request(API_ROOT + '/cloud/images/').object
images = []
for image in result:
i = NodeImage(id=image["id"],
name=image["os"],
driver=self.connection.driver,
extra=image)
del i.extra['id']
del i.extra['os']
images.append(i)
return images
def list_nodes(self):
result = self.connection.request(API_ROOT + '/cloud/servers/').object
nodes = []
for value in result:
node = self._to_node(value)
nodes.append(node)
return nodes
def _wait_for_node(self, node_id, timeout=30, interval=5.0):
"""
:param node_id: ID of the node to wait for.
:type node_id: ``int``
:param timeout: Timeout (in seconds).
:type timeout: ``int``
:param interval: How long to wait (in seconds) between each attempt.
:type interval: ``float``
"""
# poll until we get a node
for i in range(0, timeout, int(interval)):
try:
node = self.ex_get_node(node_id)
return node
except HostVirtualException:
time.sleep(interval)
raise HostVirtualException(412, 'Timedout on getting node details')
def create_node(self, **kwargs):
dc = None
size = kwargs['size']
image = kwargs['image']
auth = self._get_and_check_auth(kwargs.get('auth'))
params = {'plan': size.name}
dc = DEFAULT_NODE_LOCATION_ID
if 'location' in kwargs:
dc = kwargs['location'].id
# simply order a package first
result = self.connection.request(API_ROOT + '/cloud/buy/',
data=json.dumps(params),
method='POST').object
# create a stub node
stub_node = self._to_node({
'mbpkgid': result['id'],
'status': 'PENDING',
'fqdn': kwargs['name'],
'plan_id': size.id,
'os_id': image.id,
'location_id': dc
})
# provisioning a server using the stub node
self.ex_provision_node(node=stub_node, auth=auth)
node = self._wait_for_node(stub_node.id)
if getattr(auth, 'generated', False):
node.extra['password'] = auth.password
return node
def reboot_node(self, node):
params = {'force': 0, 'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/reboot',
data=json.dumps(params),
method='POST').object
return bool(result)
def destroy_node(self, node):
params = {
'mbpkgid': node.id,
#'reason': 'Submitted through Libcloud API'
}
result = self.connection.request(
API_ROOT + '/cloud/cancel', data=json.dumps(params),
method='POST').object
return bool(result)
def ex_get_node(self, node_id):
"""
Get a single node.
:param node_id: id of the node that we need the node object for
:type node_id: ``str``
:rtype: :class:`Node`
"""
params = {'mbpkgid': node_id}
result = self.connection.request(
API_ROOT + '/cloud/server', params=params).object
node = self._to_node(result)
return node
def ex_stop_node(self, node):
"""
Stop a node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'force': 0, 'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/shutdown',
data=json.dumps(params),
method='POST').object
return bool(result)
def ex_start_node(self, node):
"""
Start a node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/start',
data=json.dumps(params),
method='POST').object
return bool(result)
def ex_provision_node(self, **kwargs):
"""
Provision a server on a VR package and get it booted
:keyword node: node which should be used
:type node: :class:`Node`
:keyword image: The distribution to deploy on your server (mandatory)
:type image: :class:`NodeImage`
:keyword auth: an SSH key or root password (mandatory)
:type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword`
:keyword location: which datacenter to create the server in
:type location: :class:`NodeLocation`
:return: Node representing the newly built server
:rtype: :class:`Node`
"""
node = kwargs['node']
if 'image' in kwargs:
image = kwargs['image']
else:
image = node.extra['image']
params = {
'mbpkgid': node.id,
'image': image,
'fqdn': node.name,
'location': node.extra['location'],
}
auth = kwargs['auth']
ssh_key = None
password = None
if isinstance(auth, NodeAuthSSHKey):
ssh_key = auth.pubkey
params['ssh_key'] = ssh_key
elif isinstance(auth, NodeAuthPassword):
password = auth.password
params['password'] = password
if not ssh_key and not password:
raise HostVirtualException(500, "Need SSH key or Root password")
result = self.connection.request(API_ROOT + '/cloud/server/build',
data=json.dumps(params),
method='POST').object
return bool(result)
def ex_delete_node(self, node):
"""
Delete a node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/delete', data=json.dumps(params),
method='POST').object
return bool(result)
|
ClusterHQ/libcloud
|
libcloud/compute/drivers/hostvirtual.py
|
Python
|
apache-2.0
| 10,709
| 0.000093
|
#!/usr/bin/env python
# imp general
import IMP
import IMP.core
# our project
from IMP.isd import Weight
from IMP.isd import WeightMover
# unit testing framework
import IMP.test
class TestWeightMover(IMP.test.TestCase):
"""tests weight setup"""
def setUp(self):
IMP.test.TestCase.setUp(self)
# IMP.set_log_level(IMP.MEMORY)
IMP.set_log_level(0)
self.m = IMP.Model()
self.w = Weight.setup_particle(IMP.Particle(self.m))
self.w.set_weights_are_optimized(True)
self.w.add_weight()
self.w.add_weight()
self.wm = WeightMover(self.w, 0.1)
self.mc = IMP.core.MonteCarlo(self.m)
self.mc.set_scoring_function([])
self.mc.set_return_best(False)
self.mc.set_kt(1.0)
self.mc.add_mover(self.wm)
def test_run(self):
"Test weight mover mc run"
self.setUp()
for n in range(5):
for j in range(10):
self.mc.optimize(10)
ws = self.w.get_weights()
sum = 0
for k in range(self.w.get_number_of_states()):
sum += self.w.get_weight(k)
self.assertAlmostEqual(sum, 1.0, delta=0.0000001)
self.w.add_weight()
if __name__ == '__main__':
IMP.test.main()
|
shanot/imp
|
modules/isd/test/test_mc_WeightMover.py
|
Python
|
gpl-3.0
| 1,309
| 0
|
from django.contrib import admin
# import your models
# Register your models here.
# admin.site.register(YourModel)
|
datamade/la-metro-councilmatic
|
lametro/admin.py
|
Python
|
mit
| 116
| 0.008621
|
# 2016-7-1
# build by qianqians
# tools
def gentypetocsharp(typestr):
if typestr == 'int':
return 'Int64'
elif typestr == 'string':
return 'String'
elif typestr == 'array':
return 'ArrayList'
elif typestr == 'float':
return 'Double'
elif typestr == 'bool':
return 'Boolean'
elif typestr == 'table':
return 'Hashtable'
|
yinchunlong/abelkhan-1
|
juggle/gen/csharp/tools.py
|
Python
|
mit
| 394
| 0.007614
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import defaultdict
from operator import attrgetter
from flask import render_template, session
from pytz import utc
from sqlalchemy import Date, cast
from sqlalchemy.orm import contains_eager, joinedload, subqueryload, undefer
from indico.core.db import db
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.models.events import Event
from indico.modules.events.models.persons import EventPersonLink
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.sessions.models.sessions import Session
from indico.modules.events.timetable.legacy import TimetableSerializer, serialize_event_info
from indico.modules.events.timetable.models.breaks import Break
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.util.caching import memoize_request
from indico.util.date_time import format_time, get_day_end, iterdays
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
from indico.web.forms.colors import get_colors
def _query_events(categ_ids, day_start, day_end):
event = db.aliased(Event)
dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end)
return (db.session.query(Event.id, TimetableEntry.start_dt)
.filter(
Event.category_chain_overlaps(categ_ids),
~Event.is_deleted,
((Event.timetable_entries.any(dates_overlap(TimetableEntry))) |
(Event.query.exists().where(
Event.happens_between(day_start, day_end) &
(Event.id == event.id)))))
.group_by(Event.id, TimetableEntry.start_dt)
.order_by(Event.id, TimetableEntry.start_dt)
.join(TimetableEntry,
(TimetableEntry.event_id == Event.id) & (dates_overlap(TimetableEntry)),
isouter=True))
def _query_blocks(event_ids, dates_overlap, detail_level='session'):
options = [subqueryload('session').joinedload('blocks').joinedload('person_links')]
if detail_level == 'contribution':
options.append(contains_eager(SessionBlock.timetable_entry).joinedload(TimetableEntry.children))
else:
options.append(contains_eager(SessionBlock.timetable_entry))
return (SessionBlock.query
.filter(~Session.is_deleted,
Session.event_id.in_(event_ids),
dates_overlap(TimetableEntry))
.options(*options)
.join(TimetableEntry)
.join(Session))
def find_latest_entry_end_dt(obj, day=None):
"""Get the latest end datetime for timetable entries within the object.
:param obj: The :class:`Event` or :class:`SessionBlock` that will be used to
look for timetable entries.
:param day: The local event date to look for timetable entries. Applicable only
to ``Event``.
:return: The end datetime of the timetable entry finishing the latest. ``None``
if no entry was found.
"""
if isinstance(obj, Event):
if day is None:
raise ValueError('No day specified for event.')
if not (obj.start_dt_local.date() <= day <= obj.end_dt_local.date()):
raise ValueError('Day out of event bounds.')
entries = obj.timetable_entries.filter(TimetableEntry.parent_id.is_(None),
cast(TimetableEntry.start_dt.astimezone(obj.tzinfo), Date) == day).all()
elif isinstance(obj, SessionBlock):
if day is not None:
raise ValueError('Day specified for session block.')
entries = obj.timetable_entry.children
else:
raise ValueError(f'Invalid object type {type(obj)}')
return max(entries, key=attrgetter('end_dt')).end_dt if entries else None
def find_next_start_dt(duration, obj, day=None, force=False):
"""Find the next most convenient start date fitting a duration within an object.
:param duration: Duration to fit into the event/session-block.
:param obj: The :class:`Event` or :class:`SessionBlock` the duration needs to
fit into.
:param day: The local event date where to fit the duration in case the object is
an event.
:param force: Gives earliest datetime if the duration doesn't fit.
:return: The end datetime of the latest scheduled entry in the object if the
duration fits then. It it doesn't, the latest datetime that fits it.
``None`` if the duration cannot fit in the object, earliest datetime
if ``force`` is ``True``.
"""
if isinstance(obj, Event):
if day is None:
raise ValueError('No day specified for event.')
if not (obj.start_dt_local.date() <= day <= obj.end_dt_local.date()):
raise ValueError('Day out of event bounds.')
earliest_dt = obj.start_dt if obj.start_dt_local.date() == day else obj.start_dt.replace(hour=8, minute=0)
latest_dt = obj.end_dt if obj.start_dt.date() == day else get_day_end(day, tzinfo=obj.tzinfo)
elif isinstance(obj, SessionBlock):
if day is not None:
raise ValueError('Day specified for session block.')
earliest_dt = obj.timetable_entry.start_dt
latest_dt = obj.timetable_entry.end_dt
else:
raise ValueError(f'Invalid object type {type(obj)}')
max_duration = latest_dt - earliest_dt
if duration > max_duration:
return earliest_dt if force else None
start_dt = find_latest_entry_end_dt(obj, day=day) or earliest_dt
end_dt = start_dt + duration
if end_dt > latest_dt:
start_dt = latest_dt - duration
return start_dt
def get_category_timetable(categ_ids, start_dt, end_dt, detail_level='event', tz=utc, from_categ=None, grouped=True,
includible=lambda item: True):
"""Retrieve time blocks that fall within a specific time interval
for a given set of categories.
:param categ_ids: iterable containing list of category IDs
:param start_dt: start of search interval (``datetime``, expected
to be in display timezone)
:param end_dt: end of search interval (``datetime`` in expected
to be in display timezone)
:param detail_level: the level of detail of information
(``event|session|contribution``)
:param tz: the ``timezone`` information should be displayed in
:param from_categ: ``Category`` that will be taken into account to calculate
visibility
:param grouped: Whether to group results by start date
:param includible: a callable, to allow further arbitrary custom filtering (maybe from 3rd
party plugins) on whether to include (returns True) or not (returns False)
each ``detail`` item. Default always returns True.
:returns: a dictionary containing timetable information in a
structured way. See source code for examples.
"""
day_start = start_dt.astimezone(utc)
day_end = end_dt.astimezone(utc)
dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end)
items = defaultdict(lambda: defaultdict(list))
# first of all, query TimetableEntries/events that fall within
# specified range of dates (and category set)
events = _query_events(categ_ids, day_start, day_end)
if from_categ:
events = events.filter(Event.is_visible_in(from_categ.id))
for eid, tt_start_dt in events:
if tt_start_dt:
items[eid][tt_start_dt.astimezone(tz).date()].append(tt_start_dt)
else:
items[eid] = None
# then, retrieve detailed information about the events
event_ids = set(items)
query = (Event.query
.filter(Event.id.in_(event_ids))
.options(subqueryload(Event.person_links).joinedload(EventPersonLink.person),
joinedload(Event.own_room).noload('owner'),
joinedload(Event.own_venue),
joinedload(Event.category).undefer('effective_icon_data'),
undefer('effective_protection_mode')))
scheduled_events = defaultdict(list)
ongoing_events = []
events = []
for e in query:
if not includible(e):
continue
if grouped:
local_start_dt = e.start_dt.astimezone(tz).date()
local_end_dt = e.end_dt.astimezone(tz).date()
if items[e.id] is None:
# if there is no TimetableEntry, this means the event has not timetable on that interval
for day in iterdays(max(start_dt.date(), local_start_dt), min(end_dt.date(), local_end_dt)):
# if the event starts on this date, we've got a time slot
if day.date() == local_start_dt:
scheduled_events[day.date()].append((e.start_dt, e))
else:
ongoing_events.append(e)
else:
for start_d, start_dts in items[e.id].items():
scheduled_events[start_d].append((start_dts[0], e))
else:
events.append(e)
# result['events'][date(...)] -> [(datetime(....), Event(...))]
# result[event_id]['contribs'][date(...)] -> [(TimetableEntry(...), Contribution(...))]
# result['ongoing_events'] = [Event(...)]
if grouped:
result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
else:
result = defaultdict(lambda: defaultdict(list))
result.update({
'events': scheduled_events if grouped else events,
'ongoing_events': ongoing_events
})
# according to detail level, ask for extra information from the DB
if detail_level != 'event':
query = _query_blocks(event_ids, dates_overlap, detail_level)
if grouped:
for b in query:
start_date = b.timetable_entry.start_dt.astimezone(tz).date()
result[b.session.event_id]['blocks'][start_date].append((b.timetable_entry, b))
else:
for b in query:
result[b.session.event_id]['blocks'].append(b)
if detail_level == 'contribution':
query = (Contribution.query
.filter(Contribution.event_id.in_(event_ids),
dates_overlap(TimetableEntry),
~Contribution.is_deleted)
.options(contains_eager(Contribution.timetable_entry),
joinedload(Contribution.person_links))
.join(TimetableEntry))
if grouped:
for c in query:
start_date = c.timetable_entry.start_dt.astimezone(tz).date()
result[c.event_id]['contribs'][start_date].append((c.timetable_entry, c))
else:
for c in query:
result[c.event_id]['contributions'].append(c)
query = (Break.query
.filter(TimetableEntry.event_id.in_(event_ids), dates_overlap(TimetableEntry))
.options(contains_eager(Break.timetable_entry))
.join(TimetableEntry))
if grouped:
for b in query:
start_date = b.timetable_entry.start_dt.astimezone(tz).date()
result[b.timetable_entry.event_id]['breaks'][start_date].append((b.timetable_entry, b))
else:
for b in query:
result[b.timetable_entry.event_id]['breaks'].append(b)
return result
def render_entry_info_balloon(entry, editable=False, sess=None, is_session_timetable=False):
if entry.break_:
return render_template('events/timetable/balloons/break.html', break_=entry.break_, editable=editable,
can_manage_event=entry.event.can_manage(session.user), color_list=get_colors(),
event_locked=entry.event.is_locked,
is_session_timetable=is_session_timetable)
elif entry.contribution:
return render_template('events/timetable/balloons/contribution.html', contrib=entry.contribution,
editable=editable,
can_manage_event=entry.event.can_manage(session.user),
can_manage_contributions=sess.can_manage_contributions(session.user) if sess else True,
event_locked=entry.event.is_locked)
elif entry.session_block:
return render_template('events/timetable/balloons/block.html', block=entry.session_block, editable=editable,
can_manage_session=sess.can_manage(session.user) if sess else True,
can_manage_blocks=sess.can_manage_blocks(session.user) if sess else True,
color_list=get_colors(), event_locked=entry.event.is_locked,
is_session_timetable=is_session_timetable)
else:
raise ValueError('Invalid entry')
def render_session_timetable(session, timetable_layout=None, management=False):
if not session.start_dt:
# no scheduled sessions present
return ''
timetable_data = TimetableSerializer(session.event).serialize_session_timetable(session, without_blocks=True,
strip_empty_days=True)
event_info = serialize_event_info(session.event)
tpl = get_template_module('events/timetable/_timetable.html')
return tpl.render_timetable(timetable_data, event_info, timetable_layout=timetable_layout, management=management)
def get_session_block_entries(event, day):
"""Return a list of event top-level session blocks for the given `day`."""
return (event.timetable_entries
.filter(db.cast(TimetableEntry.start_dt.astimezone(event.tzinfo), db.Date) == day.date(),
TimetableEntry.type == TimetableEntryType.SESSION_BLOCK)
.all())
def shift_following_entries(entry, shift, session_=None):
"""Reschedule entries starting after the given entry by the given shift."""
query = entry.siblings_query.filter(TimetableEntry.start_dt >= entry.end_dt)
if session_ and not entry.parent:
query.filter(TimetableEntry.type == TimetableEntryType.SESSION_BLOCK,
TimetableEntry.session_block.has(session_id=session_.id))
entries = query.all()
if not entries:
return []
for sibling in entries:
sibling.move(sibling.start_dt + shift)
def get_timetable_offline_pdf_generator(event):
from indico.legacy.pdfinterface.conference import TimetablePDFFormat, TimeTablePlain
pdf_format = TimetablePDFFormat()
return TimeTablePlain(event, session.user, sortingCrit=None, ttPDFFormat=pdf_format, pagesize='A4',
fontsize='normal')
def get_time_changes_notifications(changes, tzinfo, entry=None):
notifications = []
for obj, change in changes.items():
if entry:
if entry.object == obj:
continue
if not isinstance(obj, Event) and obj.timetable_entry in entry.children:
continue
msg = None
if isinstance(obj, Event):
if 'start_dt' in change:
new_time = change['start_dt'][1]
msg = _('Event start time changed to {}')
elif 'end_dt' in change:
new_time = change['end_dt'][1]
msg = _('Event end time changed to {}')
else:
raise ValueError('Invalid change in event.')
elif isinstance(obj, SessionBlock):
if 'start_dt' in change:
new_time = change['start_dt'][1]
msg = _('Session block start time changed to {}')
elif 'end_dt' in change:
new_time = change['end_dt'][1]
msg = _('Session block end time changed to {}')
else:
raise ValueError('Invalid change in session block.')
if msg:
notifications.append(msg.format(format_time(new_time, timezone=tzinfo)))
return notifications
@memoize_request
def get_top_level_entries(event):
return event.timetable_entries.filter_by(parent_id=None).all()
@memoize_request
def get_nested_entries(event):
entries = event.timetable_entries.filter(TimetableEntry.parent_id.isnot(None)).all()
result = defaultdict(list)
for entry in entries:
result[entry.parent_id].append(entry)
return result
|
ThiefMaster/indico
|
indico/modules/events/timetable/util.py
|
Python
|
mit
| 16,943
| 0.003305
|
#------------------------------------------------------------------------------
# Naam: libLog.py
# Omschrijving: Generieke functies voor logging binnen BAG Extract+
# Auteur: Matthijs van der Deijl
# Auteur: Just van den Broecke - porting naar NLExtract (2015)
#
# Versie: 1.3
# - foutafhandeling verbeterd
# Datum: 16 december 2009
#
# Versie: 1.2
# Datum: 24 november 2009
#
# Ministerie van Volkshuisvesting, Ruimtelijke Ordening en Milieubeheer
#------------------------------------------------------------------------------
import wx
# Simple logscherm: tekst in tekstpanel
class LogScherm:
def __init__(self, text_ctrl):
self.text_ctrl = text_ctrl
def __call__(self, tekst):
self.schrijf(tekst)
def start(self):
i = self.text_ctrl.GetNumberOfLines()
self.text_ctrl.Clear()
while i > 0:
self.text_ctrl.AppendText(" \n")
i -= 1
self.text_ctrl.Clear()
def schrijf(self, tekst):
self.text_ctrl.AppendText("\n" + tekst)
self.text_ctrl.Refresh()
self.text_ctrl.Update()
# See http://www.blog.pythonlibrary.org/2010/05/22/wxpython-and-threads/
# (use events when in multithreaded mode)
# Define notification event for thread completion
EVT_SCHRIJF_ID = wx.NewId()
def EVT_SCHRIJF(win, func):
"""Define Result Event."""
win.Connect(-1, -1, EVT_SCHRIJF_ID, func)
class SchrijfEvent(wx.PyEvent):
"""Simple event to carry arbitrary result data."""
def __init__(self, tekst):
"""Init Result Event."""
wx.PyEvent.__init__(self)
self.SetEventType(EVT_SCHRIJF_ID)
self.tekst = tekst
class AsyncLogScherm(LogScherm):
def __init__(self, text_ctrl):
LogScherm.__init__(self, text_ctrl)
# Set up event handler for any worker thread results
EVT_SCHRIJF(self.text_ctrl, self.on_schrijf_event)
def on_schrijf_event(self, evt):
self.schrijf(evt.tekst)
def __call__(self, tekst):
# Ipv direct schrijven stuur "schrijf" event
wx.PostEvent(self.text_ctrl, SchrijfEvent(tekst))
|
sebastic/NLExtract
|
bag/src/loggui.py
|
Python
|
gpl-3.0
| 2,159
| 0.003705
|
#!/usr/bin/python
#
# Project Kimchi
#
# Copyright IBM, Corp. 2013
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import errno
import os
import subprocess
from kimchi.config import config, paths
WS_TOKENS_DIR = '/var/lib/kimchi/vnc-tokens'
def new_ws_proxy():
try:
os.makedirs(WS_TOKENS_DIR, mode=0755)
except OSError as e:
if e.errno == errno.EEXIST:
pass
cert = config.get('server', 'ssl_cert')
key = config.get('server', 'ssl_key')
if not (cert and key):
cert = '%s/kimchi-cert.pem' % paths.conf_dir
key = '%s/kimchi-key.pem' % paths.conf_dir
cmd = os.path.join(os.path.dirname(__file__), 'websockify.py')
args = ['python', cmd, config.get('display', 'display_proxy_port'),
'--target-config', WS_TOKENS_DIR, '--cert', cert, '--key', key,
'--web', os.path.join(paths.ui_dir, 'pages/websockify'),
'--ssl-only']
p = subprocess.Popen(args, close_fds=True)
return p
def add_proxy_token(name, port):
with open(os.path.join(WS_TOKENS_DIR, name), 'w') as f:
f.write('%s: localhost:%s' % (name.encode('utf-8'), port))
def remove_proxy_token(name):
try:
os.unlink(os.path.join(WS_TOKENS_DIR, name))
except OSError:
pass
|
ssdxiao/kimchi
|
src/kimchi/vnc.py
|
Python
|
lgpl-2.1
| 1,961
| 0.00051
|
import logging
from datetime import date as _date, timedelta
from typing import Dict, List
from fahrplan.exception import FahrplanError
from fahrplan.xml import XmlWriter, XmlSerializable
from .conference import Conference
from .day import Day
from .event import Event
from .room import Room
log = logging.getLogger(__name__)
class Schedule(XmlSerializable):
def __init__(self, conference: Conference, days: Dict[int, Day] = None, version: str = "1.0"):
self.conference = conference
self.conference.schedule = self
if days:
assert len(days) == conference.day_count
self.days = days
else:
# TODO (MO) document automatic day generation
# also this should be refactored into something like generate_days
if conference.day_count and not conference.start:
raise FahrplanError("conference.start is not set, "
"cannot automatically create days.")
self.days = {}
for i in range(conference.day_count):
index = i + 1
date: _date = conference.start + timedelta(i)
self.days[index] = Day(index=index, date=date)
for day in self.days.values():
day.schedule = self
self.version = version
def add_day(self, day: Day):
"""
Add a day to the schedule. Beware this day will not have rooms added before.
:return: None
"""
self.days[day.index] = day
day.schedule = self
self.conference.day_count += 1
def add_room(self, name: str, day_filter: List[int] = None):
"""
Adds a room to the days given in day_filter, or all days.
:param name: Name of the room to be added.
:param day_filter: List of day indices to create the room for. If empty, use all days.
:return: None
"""
for day in self.days.values():
if not day_filter or day.index in day_filter:
day.add_room(Room(name))
def add_event(self, day: int, room: str, event: Event):
self.days[day].add_event(room, event)
def merge(self, other: 'Schedule'):
if self.conference.acronym != other.conference.acronym:
log.warning(f'Conference acronym mismatch: "{self.conference.acronym}" != '
f'"{other.conference.acronym}". Are you sure you are using compatible data?')
for index, day in other.days.items():
if index in self.days:
self.days[index].merge(day)
else:
self.days[index] = day
day.schedule = self
if len(self.days) != self.conference.day_count:
log.warning('Day count mismatch, adjusting.')
return self # needed to be able to chain calls
def has_collision(self, new_event: 'Event'):
for day in self.days.values():
for room in day.rooms.values():
for event in room.events.values():
if event.slug == new_event.slug:
log.error(f'Duplicate slug "{event.slug}"')
return True
if event.id == new_event.id:
log.error(f'Duplicate event id "{event.id}"')
return True
if event.guid == new_event.guid:
log.error(f'Duplicate guid "{event.guid}"')
return True
else:
return False
def append_xml(self, xml: XmlWriter, extended: bool):
with xml.context("schedule"):
xml.tag("version", self.version)
xml.append_object(self.conference, extended)
for day in self.days.values():
xml.append_object(day, extended)
|
zuntrax/schedule-ng
|
fahrplan/model/schedule.py
|
Python
|
gpl-3.0
| 3,835
| 0.001304
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.