code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# coding:utf-8
# 函数式编程
def add(a, b):
return a + b
def fprintf(fmt, *args):
"""args is tuple"""
print fmt % args
print "{0:s}".format("ljinshuan")
fprintf("%s,%d,%s", "xxx", 1, "gerg")
def fprintf(fmt, **args):
"""args is map"""
print args
fprintf("{name} ,{age}", name="ljinshuan", age=10)
# 装饰器
event_handers = {}
def eventhander(event):
def register_function(func):
event_handers[event] = func
return func
return register_function
@eventhander("click") # 返回一个参是handle_click(msg)函数的函数
def handle_click(msg):
print msg
event_handers["click"]("heheheh")
# just lick
temp = eventhander("click")
handle_click = temp(handle_click)
# 生成器
def countdown(n):
print "Begin Counting"
while n > 0:
yield n
n -= 1
c = countdown(10)
for x in c:
print x,
# 协程
def receiver():
print "Begin receiver"
while True:
n = (yield)
print "Got %d" % n
r = receiver()
r.next()
r.send(1)
r.close()
# 接收的同时返回一个值
def line_splitter(delimter=None):
print "Ready to split"
result = None
while True:
line = (yield result)
result = line.split(delimter)
s = line_splitter(",")
s.next()
print s.send("A,B,C")
print s.send("100,200,300")
s.close()
# 列表推导
nums = [1, 2, 3, 4, 5]
squars = [x ** 2 for x in nums if x % 2 is not 0]
print squars
# 生成器表达式
squars2 = (x ** 2 for x in nums if x % 2 is not 0)
print squars2.next()
print eval("2 + 3")
exec("for x in xrange(1, 10):print x,")
|
ljinshuan/LearningPython
|
base/Chapter6.py
|
Python
|
apache-2.0
| 1,591
|
# Copyright 2015-2017 ProfitBricks GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from profitbricks.client import ProfitBricksService, Datacenter, Volume
from profitbricks.errors import PBError, PBNotAuthorizedError, PBNotFoundError, PBValidationError
from helpers import configuration
from helpers.resources import resource
class TestErrors(unittest.TestCase):
@classmethod
def setUpClass(self):
self.resource = resource()
self.client = ProfitBricksService(
username=configuration.USERNAME,
password=configuration.PASSWORD,
headers=configuration.HEADERS)
self.datacenter = self.client.create_datacenter(
datacenter=Datacenter(**self.resource['datacenter']))
@classmethod
def tearDownClass(self):
self.client.delete_datacenter(datacenter_id=self.datacenter['id'])
def test_pb_not_found(self):
try:
self.client.get_datacenter("fake_id")
except PBError as err:
self.assertTrue(isinstance(err, PBNotFoundError))
def test_pb_unauthorized_error(self):
try:
self.client = ProfitBricksService(
username=configuration.USERNAME + "1",
password=configuration.PASSWORD,
headers=configuration.HEADERS)
self.client.list_datacenters()
except PBError as err:
self.assertTrue(isinstance(err, PBNotAuthorizedError))
def test_pb_validation_error(self):
try:
i = Volume(
name='Explicitly created volume',
size=5,
disk_type='HDD',
image='fake_image_id',
bus='VIRTIO')
self.client.create_volume(datacenter_id=self.datacenter['id'], volume=i)
except PBError as err:
self.assertTrue(isinstance(err, PBValidationError))
if __name__ == '__main__':
unittest.main()
|
StackPointCloud/profitbricks-sdk-python
|
tests/test_errors.py
|
Python
|
apache-2.0
| 2,455
|
"""Auto-generated file, do not edit by hand. NZ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_NZ = PhoneMetadata(id='NZ', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[14]\\d{2,3}', possible_length=(3, 4)),
emergency=PhoneNumberDesc(national_number_pattern='111', example_number='111', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='111|4098', example_number='111', possible_length=(3, 4)),
sms_services=PhoneNumberDesc(national_number_pattern='4098', example_number='4098', possible_length=(4,)),
short_data=True)
|
gencer/python-phonenumbers
|
python/phonenumbers/shortdata/region_NZ.py
|
Python
|
apache-2.0
| 673
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test mongo using the synchronizer, i.e. as it would be used by an
user
"""
import os
import sys
import time
from bson import SON
from gridfs import GridFS
sys.path[0:0] = [""]
from mongo_connector.doc_managers.mongo_doc_manager import DocManager
from mongo_connector.connector import Connector
from mongo_connector.util import retry_until_ok
from mongo_connector.test_utils import (ReplicaSet,
Server,
connector_opts,
assert_soon,
close_client)
from tests import unittest
class MongoTestCase(unittest.TestCase):
use_single_meta_collection = False
@classmethod
def setUpClass(cls):
cls.standalone = Server().start()
cls.mongo_doc = DocManager(cls.standalone.uri)
cls.mongo_conn = cls.standalone.client()
cls.mongo = cls.mongo_conn['test']['test']
@classmethod
def tearDownClass(cls):
close_client(cls.mongo_conn)
cls.standalone.stop()
def _search(self, **kwargs):
for doc in self.mongo.find(**kwargs):
yield doc
fs = GridFS(self.mongo_conn['test'], 'test')
collection_name = 'test.test'
if self.use_single_meta_collection:
collection_name = '__oplog'
for doc in self.mongo_conn['__mongo_connector'][collection_name].find():
if doc.get('gridfs_id'):
for f in fs.find({'_id': doc['gridfs_id']}):
doc['filename'] = f.filename
doc['content'] = f.read()
yield doc
def _remove(self):
self.mongo_conn['test']['test'].drop()
self.mongo_conn['test']['test.files'].drop()
self.mongo_conn['test']['test.chunks'].drop()
class TestMongo(MongoTestCase):
""" Tests the mongo instance
"""
@classmethod
def setUpClass(cls):
MongoTestCase.setUpClass()
cls.repl_set = ReplicaSet().start()
cls.conn = cls.repl_set.client()
@classmethod
def tearDownClass(cls):
""" Kills cluster instance
"""
MongoTestCase.tearDownClass()
cls.repl_set.stop()
def tearDown(self):
self.connector.join()
def setUp(self):
try:
os.unlink("oplog.timestamp")
except OSError:
pass
self._remove()
self.connector = Connector(
mongo_address=self.repl_set.uri,
ns_set=['test.test'],
doc_managers=(self.mongo_doc,),
gridfs_set=['test.test'],
**connector_opts
)
self.conn.test.test.drop()
self.conn.test.test.files.drop()
self.conn.test.test.chunks.drop()
self.connector.start()
assert_soon(lambda: len(self.connector.shard_set) > 0)
assert_soon(lambda: sum(1 for _ in self._search()) == 0)
def test_insert(self):
"""Tests insert
"""
self.conn['test']['test'].insert_one({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self._search()) == 1)
result_set_1 = self._search()
self.assertEqual(sum(1 for _ in result_set_1), 1)
result_set_2 = self.conn['test']['test'].find_one()
for item in result_set_1:
self.assertEqual(item['_id'], result_set_2['_id'])
self.assertEqual(item['name'], result_set_2['name'])
def test_remove(self):
"""Tests remove
"""
self.conn['test']['test'].insert_one({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self._search()) == 1)
self.conn['test']['test'].delete_one({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self._search()) != 1)
self.assertEqual(sum(1 for _ in self._search()), 0)
def test_insert_file(self):
"""Tests inserting a gridfs file
"""
fs = GridFS(self.conn['test'], 'test')
test_data = b"test_insert_file test file"
id = fs.put(test_data, filename="test.txt", encoding='utf8')
assert_soon(lambda: sum(1 for _ in self._search()) > 0)
res = list(self._search())
self.assertEqual(len(res), 1)
doc = res[0]
self.assertEqual(doc['filename'], 'test.txt')
self.assertEqual(doc['_id'], id)
self.assertEqual(doc['content'], test_data)
def test_remove_file(self):
fs = GridFS(self.conn['test'], 'test')
id = fs.put("test file", filename="test.txt", encoding='utf8')
assert_soon(lambda: sum(1 for _ in self._search()) == 1)
fs.delete(id)
assert_soon(lambda: sum(1 for _ in self._search()) == 0)
def test_update(self):
"""Test update operations."""
# Insert
self.conn.test.test.insert_one({"a": 0})
assert_soon(lambda: sum(1 for _ in self._search()) == 1)
def check_update(update_spec):
updated = self.conn.test.command(
SON([('findAndModify', 'test'),
('query', {"a": 0}),
('update', update_spec),
('new', True)]))['value']
# Allow some time for update to propagate
time.sleep(2)
replicated = self.mongo_doc.mongo.test.test.find_one({"a": 0})
self.assertEqual(replicated, updated)
# Update by adding a field
check_update({"$set": {"b": [{"c": 10}, {"d": 11}]}})
# Update by setting an attribute of a sub-document beyond end of array.
check_update({"$set": {"b.10.c": 42}})
# Update by changing a value within a sub-document (contains array)
check_update({"$inc": {"b.0.c": 1}})
# Update by changing the value within an array
check_update({"$inc": {"b.1.f": 12}})
# Update by adding new bucket to list
check_update({"$push": {"b": {"e": 12}}})
# Update by changing an entire sub-document
check_update({"$set": {"b.0": {"e": 4}}})
# Update by adding a sub-document
check_update({"$set": {"b": {"0": {"c": 100}}}})
# Update whole document
check_update({"a": 0, "b": {"1": {"d": 10000}}})
def test_rollback(self):
"""Tests rollback. We force a rollback by adding a doc, killing the
primary, adding another doc, killing the new primary, and then
restarting both.
"""
primary_conn = self.repl_set.primary.client()
self.conn['test']['test'].insert_one({'name': 'paul'})
condition = lambda: self.conn['test']['test'].find_one(
{'name': 'paul'}) is not None
assert_soon(condition)
assert_soon(lambda: sum(1 for _ in self._search()) == 1)
self.repl_set.primary.stop(destroy=False)
new_primary_conn = self.repl_set.secondary.client()
admin = new_primary_conn['admin']
condition = lambda: admin.command("isMaster")['ismaster']
assert_soon(lambda: retry_until_ok(condition))
retry_until_ok(self.conn.test.test.insert_one,
{'name': 'pauline'})
assert_soon(lambda: sum(1 for _ in self._search()) == 2)
result_set_1 = list(self._search())
result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
self.assertEqual(len(result_set_1), 2)
#make sure pauline is there
for item in result_set_1:
if item['name'] == 'pauline':
self.assertEqual(item['_id'], result_set_2['_id'])
self.repl_set.secondary.stop(destroy=False)
self.repl_set.primary.start()
assert_soon(
lambda: primary_conn['admin'].command("isMaster")['ismaster'])
self.repl_set.secondary.start()
time.sleep(2)
result_set_1 = list(self._search())
self.assertEqual(len(result_set_1), 1)
for item in result_set_1:
self.assertEqual(item['name'], 'paul')
find_cursor = retry_until_ok(self.conn['test']['test'].find)
self.assertEqual(retry_until_ok(find_cursor.count), 1)
if __name__ == '__main__':
unittest.main()
|
agolo/mongo-connector
|
tests/test_mongo.py
|
Python
|
apache-2.0
| 8,734
|
def test_image_export_reference(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'REFERENCE'
gltf_image_default['uri'] = '../filepath.png'
output = exporters.ImageExporter.export(state, bpy_image_default)
assert output == gltf_image_default
def test_image_export_embed(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'EMBED'
gltf_image_default['uri'] = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACElEQVR42gMAAAAAAW'
'/dyZEAAAAASUVORK5CYII='
)
gltf_image_default['mimeType'] = 'image/png'
output = exporters.ImageExporter.export(state, bpy_image_default)
assert output == gltf_image_default
def test_image_export_embed_glb(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'EMBED'
state['settings']['gltf_export_binary'] = True
gltf_image_default['mimeType'] = 'image/png'
gltf_image_default['bufferView'] = 'bufferView_buffer_Image_0'
output = exporters.ImageExporter.export(state, bpy_image_default)
for ref in state['references']:
ref.source[ref.prop] = ref.blender_name
assert output == gltf_image_default
def test_image_to_data_uri(exporters, bpy_image_default):
image_data = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\r'
b'IHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\x08'
b'IDATx\xda\x03\x00\x00\x00\x00\x01o\xdd\xc9\x91\x00\x00\x00\x00'
b'IEND\xaeB`\x82'
)
assert exporters.ImageExporter.image_to_data_uri(bpy_image_default) == image_data
def test_image_check(exporters, state, bpy_image_default):
assert exporters.ImageExporter.check(state, bpy_image_default)
def test_image_default(exporters, state, bpy_image_default):
assert exporters.ImageExporter.default(state, bpy_image_default) == {
'name': 'Image',
'uri': '',
}
def test_image_check_0_x(exporters, state, bpy_image_default):
bpy_image_default.size = [0, 1]
assert exporters.ImageExporter.check(state, bpy_image_default) is not True
def test_image_check_0_y(exporters, state, bpy_image_default):
bpy_image_default.size = [1, 0]
assert exporters.ImageExporter.check(state, bpy_image_default) is not True
def test_image_check_type(exporters, state, bpy_image_default):
bpy_image_default.type = 'NOT_IMAGE'
assert exporters.ImageExporter.check(state, bpy_image_default) is not True
|
Kupoman/blendergltf
|
tests/unit/test_image.py
|
Python
|
apache-2.0
| 2,570
|
# -*- coding: utf-8 -*-
"""
Common design parameters for minimum order design methods
@author: Christian Muenker
"""
from __future__ import print_function, division, unicode_literals
#from importlib import import_module
#import filterbroker as fb
class min_order_common(object):
def __init__(self):
self.name = {'common':'Common filter params'}
# message for min. filter order response types:
msg_min = ("Enter the maximum pass band ripple, minimum stop band "
"attenuation and the corresponding corner frequencies.")
# VISIBLE widgets for all man. / min. filter order response types:
vis_min = ['fo','fspecs','aspecs'] # minimum filter order
# ENABLED widgets for all man. / min. filter order response types:
enb_min = ['fo','fspecs','aspecs'] # minimum filter order
# common parameters for all man. / min. filter order response types:
par_min = ['f_S', 'A_PB', 'A_SB'] # enabled widget for min. filt. order
# Common data for all man. / min. filter order response types:
# This data is merged with the entries for individual response types
# (common data comes first):
self.com = {"min":{"enb":enb_min, "msg":msg_min, "par": par_min}}
self.rt = {
"LP": {"min":{"par":['f_S','A_PB','A_SB','F_PB','F_SB']}},
"HP": {"min":{"par":['f_S','A_PB','A_SB','F_SB','F_PB']}},
"BP": {"min":{"par":['f_S','A_PB','A_SB','A_SB2',
'F_SB','F_PB','F_PB2','F_SB2']}},
"BS": {"min":{"par":['f_S','A_PB','A_SB','A_PB2',
'F_PB','F_SB','F_SB2','F_PB2']}}
# "HIL": {"man":{"par":['F_SB', 'F_PB', 'F_PB2', 'F_SB2','A_SB','A_PB','A_SB2']}}
#"DIFF":
}
|
honahursey/pyFDA
|
work/min_order_common.py
|
Python
|
apache-2.0
| 1,902
|
import re
from django import forms
from django.core.urlresolvers import reverse
from django.contrib.formtools.wizard import FormWizard
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.utils.translation import ugettext_lazy as _
from google.appengine.ext.db import djangoforms
from google.appengine.api.labs import taskqueue
from uni_form.helpers import FormHelper, Submit
from uni_form.helpers import Layout, Fieldset, Row
from models import Developer
from users.utils import LocationField
from country.models import COUNTRIES
class DeveloperForm(djangoforms.ModelForm):
#geolocation = LocationField()
about_me = forms.CharField(widget=forms.Textarea,
help_text="accepts textile markup (<a target='_new' href='http://textile.thresholdstate.com/'>reference</a>)",
required=False)
location = LocationField()
country = forms.ChoiceField(choices=COUNTRIES)
tags = forms.CharField(help_text=_("space separated"))
class Meta:
model = Developer
#fields = ['alias', 'email_contact', 'first_name', 'last_name', 'location', 'photo']
exclude = ('_class', 'user', 'last_login', 'sign_up_date', 'location_geocells', 'photo')
def clean_tags(self):
tags = self.cleaned_data['tags']
if "," in tags:
tags = [tag.strip().replace(" ","-") for tag in tags.split(",")]
else:
tags = tags.split(" ")
return filter(len, map(lambda x: x.strip().lower(), tags))
def clean_alias(self):
alias = self.cleaned_data["alias"].strip()
if re.match("^([\w\d_]+)$", alias) is None:
raise forms.ValidationError(_("alias not valid, use only letters, numbers or underscores"))
if self.instance and self.instance.alias == alias:
return alias
if Developer.all(keys_only=True).filter("alias =", alias).get():
raise forms.ValidationError(_("alias not available"))
return alias
# Attach a formHelper to your forms class.
helper = FormHelper()
# create the layout object
layout = Layout(
# first fieldset shows the company
Fieldset('Basic', 'alias',
Row('first_name','last_name'),
'about_me',
'python_sdk',
'java_sdk',
'tags', css_class="developer_basic"),
# second fieldset shows the contact info
Fieldset('Contact details',
'public_contact_information',
'email_contact',
'phone',
'personal_blog',
'personal_page',
css_class="developer_contact_details"),
Fieldset('Location',
'country',
'location_description',
'location',
css_class="developer_contact_details")
)
helper.add_layout(layout)
submit = Submit('save',_('Save'))
helper.add_input(submit)
class SignUpStep1Form(forms.Form):
alias = forms.CharField()
first_name = forms.CharField()
last_name = forms.CharField()
def clean_alias(self):
alias = self.cleaned_data["alias"].strip()
if re.match("^([\w\d_]+)$", alias) is None:
raise forms.ValidationError(_("alias not valid, use only letters, numbers or underscores"))
if Developer.all(keys_only=True).filter("alias =", alias).get():
raise forms.ValidationError(_("alias not available"))
return alias
class SignUpStep2Form(forms.Form):
SDK_CHOICES = (
('python', 'python'),
('java', 'java')
)
email_contact = forms.CharField(help_text="protected with reCAPTCHA Mailhide", required=False)
phone = forms.CharField(required=False)
personal_blog = forms.URLField(required=False)
personal_page = forms.URLField(required=False)
about_me = forms.CharField(widget=forms.Textarea,
help_text="accepts textile markup (<a target='_new' href='http://textile.thresholdstate.com/'>reference</a>)",
required=False)
sdks = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=SDK_CHOICES,help_text="which SDKs you use?", required=False)
tags = forms.CharField(help_text=_("space separated"), required=False)
def clean_tags(self):
tags = self.cleaned_data['tags']
if "," in tags:
tags = [tag.strip().replace(" ","-") for tag in tags.split(",")]
else:
tags = tags.split(" ")
return filter(len, map(lambda x: x.strip().lower(), tags))
class SignUpStep3Form(forms.Form):
location = LocationField()
location_description = forms.CharField()
country = forms.ChoiceField(choices=COUNTRIES)
class SignUpWizard(FormWizard):
def get_template(self, step):
return 'users/sign_up_%s.html' % step
def done(self, request, form_list):
cleaned_data = {}
[cleaned_data.update(form.cleaned_data) for form in form_list]
import logging
logging.info(cleaned_data)
form = DeveloperForm(cleaned_data)
developer = Developer(
user = request.user,
alias = cleaned_data['alias'],
email_contact = cleaned_data['email_contact'] or None,
first_name = cleaned_data['first_name'],
last_name = cleaned_data['last_name'],
location = cleaned_data['location'] or None,
location_description = cleaned_data['location_description'] or None,
country = cleaned_data['country'],
phone = cleaned_data['phone'] or None,
personal_blog = cleaned_data['personal_blog'] or None,
personal_page = cleaned_data['personal_page'] or None,
public_contact_information = True,
about_me = cleaned_data['about_me'] or None,
python_sdk = "python" in cleaned_data['sdks'],
java_sdk = "java" in cleaned_data['sdks'],
tags = cleaned_data['tags'] or []
)
developer.put()
taskqueue.add(url=reverse("users_fusiontable_insert", args=[str(developer.key())]))
taskqueue.add(url=reverse("country_update_country", kwargs={'country_code': developer.country}))
request.flash['message'] = unicode(_("Welcome!"))
request.flash['severity'] = "success"
return HttpResponseRedirect(reverse('users_avatar_change'))
|
devsar/ae-people
|
apps/users/forms.py
|
Python
|
apache-2.0
| 6,966
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
local_qiskit_simulator command to snapshot the quantum state.
"""
from qiskit import CompositeGate
from qiskit import Gate
from qiskit import QuantumCircuit
from qiskit._instructionset import InstructionSet
from qiskit._quantumregister import QuantumRegister
from qiskit.qasm import _node as node
class SnapshotGate(Gate):
"""Simulator snapshot operation."""
def __init__(self, m, qubit, circ=None):
"""Create new snapshot gate."""
super().__init__("snapshot", [m], [qubit], circ)
def qasm(self):
"""Return OPENQASM string."""
qubit = self.arg[0]
m = self.param[0]
return self._qasmif("snapshot(%d) %s[%d];" % (m,
qubit[0].name,
qubit[1]))
def inverse(self):
"""Invert this gate."""
return self # self-inverse
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.snapshot(self.param[0], self.arg[0]))
def snapshot(self, m, q):
"""Cache the quantum state of local_qiskit_simulator."""
if isinstance(q, QuantumRegister):
gs = InstructionSet()
for j in range(q.size):
gs.add(self.snapshot(m, (q, j)))
return gs
self._check_qubit(q)
return self._attach(SnapshotGate(m, q, self))
# Add to QuantumCircuit and CompositeGate classes
QuantumCircuit.snapshot = snapshot
CompositeGate.snapshot = snapshot
# cache quantum state (identity)
QuantumCircuit.definitions["snapshot"] = {
"print": True,
"opaque": False,
"n_args": 1,
"n_bits": 1,
"args": ["m"],
"bits": ["a"],
# gate snapshot(m) a { }
"body": node.GateBody([])
}
|
atilag/qiskit-sdk-py
|
qiskit/extensions/qasm_simulator_cpp/snapshot.py
|
Python
|
apache-2.0
| 2,505
|
import datetime
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from six import print_
import bigbro
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--store',
dest='store',
help='Watch log subdirectory'),
)
help = 'Log employee RFID check-ins.'
def handle(self, *args, **options):
print_('Note: RFID scanner must be set up for keyboard input (see README).')
print_('Waiting for RFID input. Press Ctrl+C to quit.')
date_today = datetime.datetime.now().strftime('%Y-%m-%d')
log_location = bigbro.log_location(options['store'], date_today, 'checkin')
with open(log_location, 'a') as outf:
while True:
try:
rfid = raw_input()
time_f = datetime.datetime.now().strftime(settings.LOG_TIME_FMT)
print_(time_f, rfid, sep='\t', file=outf)
except KeyboardInterrupt:
print_('')
print_('Quitting...')
break
|
mikepii/retail_store_foot_traffic_monitor
|
tracking/management/commands/checkin.py
|
Python
|
apache-2.0
| 1,191
|
'''
Created on May 6, 2014
@author: cmills
The idea here is to provide client-side functions to interact with the TASR
repo. We use the requests package here. We provide both stand-alone functions
and a class with methods. The class is easier if you are using non-default
values for the host or port.
'''
import requests
import tasr.app
from tasr.registered_schema import RegisteredAvroSchema
from tasr.headers import SubjectHeaderBot, SchemaHeaderBot
from tasr.client import TASRError, reg_schema_from_url
APP = tasr.app.TASR_APP
APP.set_config_mode('local')
TASR_HOST = APP.config.host
TASR_PORT = APP.config.port
TIMEOUT = 2 # seconds
def get_active_topics(host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/active_topics
Retrieves available metadata for active topics (i.e. -- groups) with
registered schemas. A dict of <topic name>:<topic metadata> is returned.
'''
url = 'http://%s:%s/tasr/active_topics' % (host, port)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if not 200 == resp.status_code:
raise TASRError('Failed request to %s (status code: %s)' %
(url, resp.status_code))
topic_metas = SubjectHeaderBot.extract_metadata(resp)
return topic_metas
def get_all_topics(host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/topic
Retrieves available metadata for all the topics (i.e. -- groups) with
registered schemas. A dict of <topic name>:<topic metadata> is returned.
'''
url = 'http://%s:%s/tasr/topic' % (host, port)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if not 200 == resp.status_code:
raise TASRError('Failed request to %s (status code: %s)' %
(url, resp.status_code))
topic_metas = SubjectHeaderBot.extract_metadata(resp)
return topic_metas
def register_schema(topic_name, schema_str, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' PUT /tasr/topic/<topic name>
Register a schema string for a topic. Returns a SchemaMetadata object
with the topic-version, topic-timestamp and ID metadata.
'''
url = 'http://%s:%s/tasr/topic/%s' % (host, port, topic_name)
headers = {'content-type': 'application/json; charset=utf8', }
rs = reg_schema_from_url(url, method='PUT', data=schema_str,
headers=headers, timeout=timeout)
return rs
def get_latest_schema(topic_name, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/topic/<topic name>
Retrieve the latest schema registered for the given topic name. Returns a
RegisteredSchema object back.
'''
return get_schema_version(topic_name, None, host, port, timeout)
def get_schema_version(topic_name, version, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/topic/<topic name>/version/<version>
Retrieve a specific schema registered for the given topic name identified
by a version (a positive integer). Returns a RegisteredSchema object.
'''
url = ('http://%s:%s/tasr/topic/%s/version/%s' %
(host, port, topic_name, version))
return reg_schema_from_url(url, timeout=timeout,
err_404='No such version.')
def schema_for_id_str(id_str, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/id/<ID string>
Retrieves a schema that has been registered for at least one topic name as
identified by a hash-based ID string. The ID string is a base64 encoded
byte sequence, starting with a 1-byte ID type and followed by fingerprint
bytes for the ID type. For example, with an SHA256-based ID, a fingerprint
is 32 bytes in length, so there would be 33 ID bytes, which would produce
an ID string of length 44 once base64-encoded. The MD5-based IDs are 17
bytes (1 + 16), producing ID strings of length 24. A RegisteredSchema
object is returned.
'''
url = 'http://%s:%s/tasr/id/%s' % (host, port, id_str)
return reg_schema_from_url(url, timeout=timeout,
err_404='No schema for id.')
def schema_for_schema_str(schema_str, object_on_miss=False,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' POST /tasr/schema
In essence this is very similar to the schema_for_id_str, but with the
calculation of the ID string being moved to the server. That is, the
client POSTs the schema JSON itself, the server canonicalizes it, then
calculates the SHA256-based ID string for what was sent, then looks for
a matching schema based on that ID string. This allows clients that do not
know how to canonicalize or hash the schemas to find the metadata (is it
registered, what version does it have for a topic) with what they have.
A RegisteredSchema object is returned if the schema string POSTed has been
registered for one or more topics.
If the schema string POSTed has yet to be registered for a topic and the
object_on_miss flag is True, a RegisteredSchema calculated for the POSTed
schema string is returned (it will have no topic-versions as there are
none). This provides an easy way for a client to get the ID strings to
use for subsequent requests.
If the object_on_miss flag is False (the default), then a request for a
previously unregistered schema will raise a TASRError.
'''
url = 'http://%s:%s/tasr/schema' % (host, port)
headers = {'content-type': 'application/json; charset=utf8', }
resp = requests.post(url, data=schema_str, headers=headers,
timeout=timeout)
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if 200 == resp.status_code:
# success -- return a normal reg schema
ras = RegisteredAvroSchema()
ras.schema_str = resp.context
schema_meta = SchemaHeaderBot.extract_metadata(resp)
ras.update_from_schema_metadata(schema_meta)
return ras
elif 404 == resp.status_code and object_on_miss:
ras = RegisteredAvroSchema()
ras.schema_str = schema_str
schema_meta = SchemaHeaderBot.extract_metadata(resp)
ras.update_from_schema_metadata(schema_meta)
return ras
raise TASRError('Schema not registered to any topics.')
#############################################################################
# Wrapped in a class
#############################################################################
class TASRLegacyClient(object):
'''An object means you only need to specify the host settings once.
'''
def __init__(self, host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
self.host = host
self.port = port
self.timeout = timeout
# topic calls
def get_active_topics(self):
'''Returns a dict of <topic name>:<metadata> for active topics.'''
return get_active_topics(self.host, self.port, self.timeout)
def get_all_topics(self):
'''Returns a dict of <topic name>:<metadata> for all topics.'''
return get_all_topics(self.host, self.port, self.timeout)
# schema calls
def register_schema(self, topic_name, schema_str):
'''Register a schema for a topic'''
return register_schema(topic_name, schema_str)
def get_latest_schema(self, topic_name):
'''Get the latest schema registered for a topic'''
return get_latest_schema(topic_name,
self.host, self.port, self.timeout)
def get_schema_version(self, topic_name, version=None):
'''Get a schema by version for the topic'''
return get_schema_version(topic_name, version,
self.host, self.port, self.timeout)
def schema_for_id_str(self, id_str):
'''Get a schema identified by an ID str.'''
return schema_for_id_str(id_str,
self.host, self.port, self.timeout)
def schema_for_schema_str(self, schema_str):
'''Get a schema object using a (non-canonical) schema string.'''
return schema_for_schema_str(schema_str,
self.host, self.port, self.timeout)
|
ifwe/tasr
|
src/py/tasr/client_legacy.py
|
Python
|
apache-2.0
| 8,458
|
from app import app
from flask import render_template
@app.route('/')
def index():
return render_template('index.html')
@app.route('/story/')
def story():
return render_template('story.html')
@app.route('/bio/')
def bio():
return render_template('bio.html')
@app.route('/contact/')
def contact():
return render_template('contact.html')
# @app.route('/fun/')
# def fun():
# return render_template('fun.html')
# @app.route('/portfolio/')
# def portfolio():
# return render_template('portfolio.html')
# @app.route('/boot_index/')
# def boot_index():
# return render_template('bootstrap_index.html')
|
jacburge/wewillremember
|
app/views.py
|
Python
|
apache-2.0
| 610
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library functions for ContextRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
# The negative value used in padding the invalid weights.
_NEGATIVE_PADDING_VALUE = -100000
def filter_weight_value(weights, values, valid_mask):
"""Filters weights and values based on valid_mask.
_NEGATIVE_PADDING_VALUE will be added to invalid elements in the weights to
avoid their contribution in softmax. 0 will be set for the invalid elements in
the values.
Args:
weights: A float Tensor of shape [batch_size, input_size, context_size].
values: A float Tensor of shape [batch_size, context_size,
projected_dimension].
valid_mask: A boolean Tensor of shape [batch_size, context_size]. True means
valid and False means invalid.
Returns:
weights: A float Tensor of shape [batch_size, input_size, context_size].
values: A float Tensor of shape [batch_size, context_size,
projected_dimension].
Raises:
ValueError: If shape of doesn't match.
"""
w_batch_size, _, w_context_size = weights.shape
v_batch_size, v_context_size, _ = values.shape
m_batch_size, m_context_size = valid_mask.shape
if w_batch_size != v_batch_size or v_batch_size != m_batch_size:
raise ValueError("Please make sure the first dimension of the input"
" tensors are the same.")
if w_context_size != v_context_size:
raise ValueError("Please make sure the third dimension of weights matches"
" the second dimension of values.")
if w_context_size != m_context_size:
raise ValueError("Please make sure the third dimension of the weights"
" matches the second dimension of the valid_mask.")
valid_mask = valid_mask[..., tf.newaxis]
# Force the invalid weights to be very negative so it won't contribute to
# the softmax.
weights += tf.transpose(
tf.cast(tf.math.logical_not(valid_mask), weights.dtype) *
_NEGATIVE_PADDING_VALUE,
perm=[0, 2, 1])
# Force the invalid values to be 0.
values *= tf.cast(valid_mask, values.dtype)
return weights, values
def compute_valid_mask(num_valid_elements, num_elements):
"""Computes mask of valid entries within padded context feature.
Args:
num_valid_elements: A int32 Tensor of shape [batch_size].
num_elements: An int32 Tensor.
Returns:
A boolean Tensor of the shape [batch_size, num_elements]. True means
valid and False means invalid.
"""
batch_size = num_valid_elements.shape[0]
element_idxs = tf.range(num_elements, dtype=tf.int32)
batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1])
num_valid_elements = num_valid_elements[..., tf.newaxis]
valid_mask = tf.less(batch_element_idxs, num_valid_elements)
return valid_mask
def project_features(features, projection_dimension, is_training, normalize):
"""Projects features to another feature space.
Args:
features: A float Tensor of shape [batch_size, features_size,
num_features].
projection_dimension: A int32 Tensor.
is_training: A boolean Tensor (affecting batch normalization).
normalize: A boolean Tensor. If true, the output features will be l2
normalized on the last dimension.
Returns:
A float Tensor of shape [batch, features_size, projection_dimension].
"""
# TODO(guanhangwu) Figure out a better way of specifying the batch norm
# params.
batch_norm_params = {
"is_training": is_training,
"decay": 0.97,
"epsilon": 0.001,
"center": True,
"scale": True
}
batch_size, _, num_features = features.shape
features = tf.reshape(features, [-1, num_features])
projected_features = slim.fully_connected(
features,
num_outputs=projection_dimension,
activation_fn=tf.nn.relu6,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params)
projected_features = tf.reshape(projected_features,
[batch_size, -1, projection_dimension])
if normalize:
projected_features = tf.math.l2_normalize(projected_features, axis=-1)
return projected_features
def attention_block(input_features, context_features, bottleneck_dimension,
output_dimension, attention_temperature, valid_mask,
is_training):
"""Generic attention block.
Args:
input_features: A float Tensor of shape [batch_size, input_size,
num_input_features].
context_features: A float Tensor of shape [batch_size, context_size,
num_context_features].
bottleneck_dimension: A int32 Tensor representing the bottleneck dimension
for intermediate projections.
output_dimension: A int32 Tensor representing the last dimension of the
output feature.
attention_temperature: A float Tensor. It controls the temperature of the
softmax for weights calculation. The formula for calculation as follows:
weights = exp(weights / temperature) / sum(exp(weights / temperature))
valid_mask: A boolean Tensor of shape [batch_size, context_size].
is_training: A boolean Tensor (affecting batch normalization).
Returns:
A float Tensor of shape [batch_size, input_size, output_dimension].
"""
with tf.variable_scope("AttentionBlock"):
queries = project_features(
input_features, bottleneck_dimension, is_training, normalize=True)
keys = project_features(
context_features, bottleneck_dimension, is_training, normalize=True)
values = project_features(
context_features, bottleneck_dimension, is_training, normalize=True)
weights = tf.matmul(queries, keys, transpose_b=True)
weights, values = filter_weight_value(weights, values, valid_mask)
weights = tf.nn.softmax(weights / attention_temperature)
features = tf.matmul(weights, values)
output_features = project_features(
features, output_dimension, is_training, normalize=False)
return output_features
def compute_box_context_attention(box_features, context_features,
valid_context_size, bottleneck_dimension,
attention_temperature, is_training):
"""Computes the attention feature from the context given a batch of box.
Args:
box_features: A float Tensor of shape [batch_size, max_num_proposals,
height, width, channels]. It is pooled features from first stage
proposals.
context_features: A float Tensor of shape [batch_size, context_size,
num_context_features].
valid_context_size: A int32 Tensor of shape [batch_size].
bottleneck_dimension: A int32 Tensor representing the bottleneck dimension
for intermediate projections.
attention_temperature: A float Tensor. It controls the temperature of the
softmax for weights calculation. The formula for calculation as follows:
weights = exp(weights / temperature) / sum(exp(weights / temperature))
is_training: A boolean Tensor (affecting batch normalization).
Returns:
A float Tensor of shape [batch_size, max_num_proposals, 1, 1, channels].
"""
_, context_size, _ = context_features.shape
valid_mask = compute_valid_mask(valid_context_size, context_size)
channels = box_features.shape[-1]
# Average pools over height and width dimension so that the shape of
# box_features becomes [batch_size, max_num_proposals, channels].
box_features = tf.reduce_mean(box_features, [2, 3])
output_features = attention_block(box_features, context_features,
bottleneck_dimension, channels.value,
attention_temperature, valid_mask,
is_training)
# Expands the dimension back to match with the original feature map.
output_features = output_features[:, :, tf.newaxis, tf.newaxis, :]
return output_features
|
tombstone/models
|
research/object_detection/meta_architectures/context_rcnn_lib.py
|
Python
|
apache-2.0
| 8,672
|
# coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trax.shapes."""
from absl.testing import absltest
import numpy as np
from trax import shapes
from trax.shapes import ShapeDtype
class ShapesTest(absltest.TestCase):
def test_constructor_and_read_properties(self):
sd = ShapeDtype((2, 3), np.int32)
self.assertEqual(sd.shape, (2, 3))
self.assertEqual(sd.dtype, np.int32)
def test_default_dtype_is_float32(self):
sd = ShapeDtype((2, 3))
self.assertEqual(sd.shape, (2, 3))
self.assertEqual(sd.dtype, np.float32)
def test_signature_on_ndarray(self):
array = np.array([[2, 3, 5, 7],
[11, 13, 17, 19]],
dtype=np.int16)
sd = shapes.signature(array)
self.assertEqual(sd.shape, (2, 4))
self.assertEqual(sd.dtype, np.int16)
def test_shape_dtype_repr(self):
sd = ShapeDtype((2, 3))
repr_string = '{}'.format(sd)
self.assertEqual(repr_string,
"ShapeDtype{shape:(2, 3), dtype:<class 'numpy.float32'>}")
def test_splice_signatures(self):
sd1 = ShapeDtype((1,))
sd2 = ShapeDtype((2,))
sd3 = ShapeDtype((3,))
sd4 = ShapeDtype((4,))
sd5 = ShapeDtype((5,))
# Signatures can be ShapeDtype instances, tuples of 2+ ShapeDtype instances,
# or empty tuples.
sig1 = sd1
sig2 = (sd2, sd3, sd4)
sig3 = ()
sig4 = sd5
spliced = shapes.splice_signatures(sig1, sig2, sig3, sig4)
self.assertEqual(spliced, (sd1, sd2, sd3, sd4, sd5))
def test_len_signature(self):
"""Signatures of all sizes should give correct length when asked."""
x1 = np.array([1, 2, 3])
x2 = np.array([10, 20, 30])
inputs0 = ()
inputs1 = x1 # NOT in a tuple
inputs2 = (x1, x2)
sig0 = shapes.signature(inputs0)
sig1 = shapes.signature(inputs1)
sig2 = shapes.signature(inputs2)
# pylint: disable=g-generic-assert
self.assertEqual(len(sig0), 0)
self.assertEqual(len(sig1), 1)
self.assertEqual(len(sig2), 2)
# pylint: enable=g-generic-assert
if __name__ == '__main__':
absltest.main()
|
google/trax
|
trax/shapes_test.py
|
Python
|
apache-2.0
| 2,632
|
"""Auto-generated file, do not edit by hand. IS metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IS = PhoneMetadata(id='IS', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,5}', possible_length=(3, 4, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='1717', example_number='1717', possible_length=(4,)),
premium_rate=PhoneNumberDesc(national_number_pattern='1848', example_number='1848', possible_length=(4,)),
emergency=PhoneNumberDesc(national_number_pattern='112', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1(?:[28]|6(?:1(?:23|16)))|4(?:00|1[145]|4[0146])|55|7(?:00|17|7[07-9])|8(?:0[08]|1[016-9]|20|48|8[018])|900)', example_number='112', possible_length=(3, 4, 6)),
carrier_specific=PhoneNumberDesc(national_number_pattern='1441', example_number='1441', possible_length=(4,)),
sms_services=PhoneNumberDesc(national_number_pattern='1(?:415|848|900)', example_number='1415', possible_length=(4,)),
short_data=True)
|
gencer/python-phonenumbers
|
python/phonenumbers/shortdata/region_IS.py
|
Python
|
apache-2.0
| 1,126
|
# -*- coding: utf-8 -*-
# Lumina User Guide documentation build configuration file.
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import six
import string
import sys
import time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('/extensions'))
sys.path.insert(0, os.path.abspath('./extensions'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
# 'globalindex',
# 'sphinx.ext.ifconfig',
# 'sphinxcontrib.httpdomain'
]
# -- Options for automatic Figure numbering
numfig = True
numfig_secnum_depth = (2)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Lumina® Handbook'
copyright = u'2011-2017, iXsystems'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
#Option to add text substitutions to all .rst files being built.
rst_prolog = """
.. |freenas| replace:: FreeNAS\ :sup:`"""u'\u00AE'"""`
.. |truenas| replace:: TrueNAS\ :sup:`"""u'\u00AE'"""`
.. |pcbsd| replace:: PC-BSD\ :sup:`"""u'\u00AE'"""`
.. |trueos| replace:: TrueOS\ :sup:`"""u'\u00AE'"""`
.. |appcafe| replace:: AppCafe\ :sup:`"""u'\u00AE'"""`
.. |lumina| replace:: Lumina\ :sup:`"""u'\u00AE'"""`
.. |sysadm| replace:: SysAdm\ :sup:`"""u'\u2122'"""`
.. |trpi| replace:: TrueOS Pico\ :sup:`"""u'\u00AE'"""`
.. |pise| replace:: Pico Server\ :sup:`"""u'\u00AE'"""`
.. |picl| replace:: Pico Client\ :sup:`"""u'\u00AE'"""`
"""
# -- Option to change :menuselection: arrow -----------------------------
from docutils import nodes, utils
from docutils.parsers.rst import roles
from sphinx.roles import _amp_re
def patched_menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
if typ == 'menuselection':
text = text.replace('-->', u'\N{RIGHTWARDS ARROW}') # Here is the patch
spans = _amp_re.split(text)
node = nodes.literal(rawtext=rawtext)
for i, span in enumerate(spans):
span = span.replace('&&', '&')
if i == 0:
if len(span) > 0:
textnode = nodes.Text(span)
node += textnode
continue
accel_node = nodes.inline()
letter_node = nodes.Text(span[0])
accel_node += letter_node
accel_node['classes'].append('accelerator')
node += accel_node
textnode = nodes.Text(span[1:])
node += textnode
node['classes'].append(typ)
return [node], []
# Use 'patched_menusel_role' function for processing the 'menuselection' role
roles.register_local_role("menuselection", patched_menusel_role)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'trueos_style'
html_theme_path = ["themes",]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Lumina Handbook'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "lumina.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "lumina.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Lumina'
# -- Options for translations
#locale_dirs = ['_build/locale-po/']
# -- Options for LaTeX output ---------------------------------------------
if six.PY3:
texproject = project.replace('®', r'''\textsuperscript{\textregistered}''')
else:
texproject = project.replace(u'®', r'''\textsuperscript{\textregistered}''')
PREAMBLE = r'''\def\docname{''' + texproject + '}'
PREAMBLE = (PREAMBLE
+ r'''\def\docdate{'''
+ time.strftime("%B %Y")
+ ' Edition}')
# define custom title page
PREAMBLE = PREAMBLE + r'''
% FreeNAS/TrueNAS LaTeX preamble
\usepackage[default,scale=0.95]{opensans}
\usepackage[T1]{fontenc}
\usepackage{color}
\usepackage{tikz}
\usetikzlibrary{calc}
%for ragged right tables
\usepackage{array,ragged2e}
\definecolor{ixblue}{cmyk}{0.85,0.24,0,0}
\newenvironment{widemargins}{%
\begin{list}{}{%
\setlength{\leftmargin}{-0.5in}%
\setlength{\rightmargin}{-0.5in}%
}\item}%
{\end{list}%
}
\makeatletter
\renewcommand{\maketitle}{%
\begin{titlepage}%
\newlength{\thistitlewidth}%
\begin{widemargins}%
\usefont{T1}{fos}{l}{n}%
\vspace*{-6mm}%
\fontsize{32}{36}\selectfont%
\docname\par%
\vspace*{-4.5mm}%
\settowidth{\thistitlewidth}{\docname}%
{\color{ixblue}\rule{\thistitlewidth}{1.5pt}}\par%
\vspace*{4.5mm}%
\fontsize{18}{22}\fontseries{sbc}\selectfont%
\docdate\par%
\end{widemargins}%
\begin{tikzpicture}[remember picture,overlay]
\fill [ixblue] (current page.south west) rectangle ($(current page.south east) + (0, 2in)$);
\end{tikzpicture}
\end{titlepage}
}
\makeatother
% a plain page style for front matter
\fancypagestyle{frontmatter}{%
\fancyhf{}
\fancyhf[FCO,FCE]{}
\fancyhf[FLE,FRO]{\textbf{\thepage}}
\fancyhf[FLO,FRE]{}
}
'''
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Disable Index Generation.
#'printindex': '',
# Additional stuff for the LaTeX preamble.
'preamble': PREAMBLE,
# remove blank pages
'classoptions': ',openany',
'babel': r'''\usepackage[english]{babel}''',
# strict positioning of figures
'figure_align': 'H'
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'LuminaHandbook.tex', u'Lumina Desktop Environment Documentation',
u'Lumina Users', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.#
latex_show_pagerefs = True
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Lumina Handbook'
epub_author = u'iXsystems'
epub_publisher = u'iXsystems'
epub_copyright = u'2011-2016, iXsystems'
# The basename for the epub file. It defaults to the project name.
epub_basename = u'lumina_handbook'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
epub_scheme = 'URL'
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = 'lumina.org'
# A unique identification for the text.
epub_uid = '1.0'
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
epub_show_urls = 'no'
# If false, no index is generated.
#epub_use_index = True
# -- Epilogue additions ----------------------------------------------------------
# .. |keyword| replace:: text to replace it with
# .. |copy| unicode:: U+000A9 .. COPYRIGHT SIGN
#rst_epilog = """
#.. |cop| unicode:: U+000A9
#.. |reg| unicode:: U+000AE
#"""
|
a-stjohn/lumina-docs
|
conf.py
|
Python
|
bsd-2-clause
| 13,283
|
#!/usr/bin/env python
# coding: utf-8
#
# License: BSD; see LICENSE for more details.
from pygments.lexer import RegexLexer, include, bygroups
import pygments.token as t
class SnortLexer(RegexLexer):
name = 'Snort'
aliases = ['snort', 'hog']
filenames = ['*.rules']
tokens = {
'root': [
(r'#.*$', t.Comment),
(r'(\$\w+)', t.Name.Variable),
(r'\b(any|(\d{1,3}\.){3}\d{1,3}(/\d+)?)', t.Name.Variable),
(r'^\s*(log|pass|alert|activate|dynamic|drop|reject|sdrop|'
r'ruletype|var|portvar|ipvar)',
t.Keyword.Type),
(r'\b(metadata)(?:\s*:)', t.Keyword, 'metadata'),
(r'\b(reference)(?:\s*:)', t.Keyword, 'reference'),
(r'\b(msg|reference|gid|sid|rev|classtype|priority|metadata|'
r'content|http_encode|uricontent|urilen|isdataat|pcre|pkt_data|'
r'file_data|base64_decode|base64_data|byte_test|byte_jump|'
r'byte_extract|ftp_bounce|pcre|asn1|cvs|dce_iface|dce_opnum|'
r'dce_stub_data|sip_method|sip_stat_code|sip_header|sip_body|'
r'gtp_type|gtp_info|gtp_version|ssl_version|ssl_state|nocase|'
r'rawbytes|depth|offset|distance|within|http_client_body|'
r'http_cookie|http_raw_cookie|http_header|http_raw_header|'
r'http_method|http_uri|http_raw_uri|http_stat_code|'
r'http_stat_msg|fast_pattern|fragoffset|fragbits|'
r'ttl|tos|id|ipopts|dsize|flags|flow|flowbits|seq|ack|window|'
r'itype|icode|icmp_id|icmp_seq|rpc|ip_proto|sameip|'
r'stream_reassemble|stream_size|logto|session|resp|react|tag|'
r'activates|activated_by|replace|detection_filter|treshold)'
r'(?:\s*:)',
t.Keyword),
(r'\b(tcp|udp|icmp|ip)', t.Keyword.Constant),
(r'\b(hex|dec|oct|string|type|output|any|engine|soid|service|'
r'norm|raw|relative|bytes|big|little|align|invalid-entry|'
r'enable|disable|client|server|both|either|printable|binary|'
r'all|session|host|packets|seconds|bytes|src|dst|track|by_src|'
r'by_dst|uri|header|cookie|utf8|double_encode|non_ascii|'
r'uencode|bare_byte|ascii|iis_encode|bitstring_overflow|'
r'double_overflow|oversize_length|absolute_offset|'
r'relative_offset|rr|eol|nop|ts|sec|esec|lsrr|lsrre|'
r'ssrr|satid|to_client|to_server|from_client|from_server|'
r'established|not_established|stateless|no_stream|only_stream|'
r'no_frag|only_frag|set|setx|unset|toggle|isset|isnotset|'
r'noalert|limit|treshold|count|str_offset|str_depth|tagged)',
t.Name.Attribute),
(r'(<-|->|<>)', t.Operator),
(ur'”', t.String, 'fancy-string'),
(ur'“', t.String, 'fancy-string'),
(r'"', t.String, 'dq-string'),
(r'\'', t.String, 'sq-string'),
(r'(\d+)', t.Number),
(r';', t.Punctuation),
(r'\\', t.String.Escape),
(r'\s+', t.Whitespace),
],
'hex': [
(r'\|([a-fA-F0-9 ]+)\|', t.Number.Hex),
],
'dq-string': [
include('hex'),
(r'([^"])', t.String),
(r'"', t.String, '#pop')
],
'sq-string': [
include('hex'),
(r'([^\'])', t.String),
(r'\'', t.String, '#pop')
],
'fancy-string': [
include('hex'),
(ur'([^”])', t.String),
(ur'”', t.String, '#pop')
],
'metadata': [
(r'\s', t.Whitespace),
(r'([\w_-]+)(\s+)([\w_-]+)',
bygroups(t.Name.Variable, t.Whitespace, t.Name.Attribute)),
(r';', t.Punctuation, '#pop'),
],
'reference': [
(r'(\w+)(,)(?:\s*)([^;]+)',
bygroups(t.Name.Variable, t.Punctuation, t.Name.Attribute)),
(r';', t.Punctuation, '#pop')
]
}
if __name__ == '__main__':
from pygments import highlight
from pygments.formatters import Terminal256Formatter
from sys import argv
if len(argv) > 1:
import io
for arg in argv[1:]:
input = io.open(arg, 'r')
code = input.read(-1)
print("Highlighting " + arg)
print(highlight(code, SnortLexer(encoding='chardet'),
Terminal256Formatter(encoding='utf-8')))
else:
code = """
alert tcp $HOME_NET any -> 192.168.1.0/24 111 (content:"|00 01 86 a5|"; msg: "mountd access";)
alert tcp any any -> any 21 (content:"site exec"; content:"%"; msg:"site exec buffer overflow attempt";)
alert tcp !192.168.1.0/24 any -> 192.168.1.0/24 111 (content: "|00 01 86 a5|"; msg: "external mountd access";)
"""
print(highlight(code, SnortLexer(), Terminal256Formatter()))
|
yaunj/hogments
|
hogments/hog.py
|
Python
|
bsd-2-clause
| 4,940
|
"""
Utilities for parseing SExtractor files.
H. Ferguson - revised 4/23/03 to promote ints to floats if a value
with a decimal point appears somewhere in the column originally thought
to be integers
version::
v2.1 - fails gracefully when the catalog has no sources
v3.0 - added gettypes to return column types
- create new column names when they are not explictly in the header
v4.0 - added gettypes to return column types
v4.1 - uses numarray by default
v4.2 - delete attributed 'l' (input lines from catalog) before returning
v4.3 - 1/11/06Added less-offensive alias se_catalog() == sextractor()
v4.4hf-1/21/06 Fixed bug in creating extra column names when last is a vector
v4.4vl - V. Laidler added new methods:
__len__ returns number of objects in catalog
__iter__ returns the index of the next row in the catalog
line(self,i) returns a constructed string containing the ith line
buildheader returns a constructed header from the hdict
Added new attribute self.header: contains the header as read in
from the catalog.
Lines that start with '#' but are not followed by an integer are
now assumed to be comment lines, which are added to the
header but otherwise skipped.
v4.5 - V. Laidler removed Numeric dependence
v4.6 - V. Laidler converted to numpy
v5.0 - 7/5/07 Numpy conversion
v6.0 - V. Laidler: added rw_catalog class, reworked internals to avoid
column name clashes
v7.0 - S.-M. Niemi: some modifications
v7.1 - S.-M. Niemi: now supports string columns
"""
__version__ = '7.1'
__author = 'Henry C. Ferguson, STScI'
import string
import numpy as N
import os, sys
class se_catalog(object):
"""
Read a SExtractor-style catalog.
Usage: c=se_catalog(catalog,readfile=True,preserve_case=False)
Will read the catalog and return an object c, whose attributes are
arrays containing the data. For example, c.mag_auto contains the
mag_auto values.
Arguments:
catalog -- The input SExtractor catalog.
readfile -- True means read the data. False means return the
object without reading the data. The lines from the catalog
are returned as a list of ascii strings c.l. Useful if you want
to do some special parsing of some sort.
preserve_case -- default (False) converts column names to lower case
The input catalog MUST have a header with the SExtractor format:
# 1 ID comment
# 2 ALPHA_J200 another comment
That is, first column is the comment symbol #, second column is
the column number, third column is the column name, and the rest
of the line is a comment. SExtractor allows "vectors" to be identified
only by the first column...e.g.
# 12 FLUX_APER
# 20 FLUXERR_APER
the missing columns are all aperture fluxes through different
apertures. These will be read into attributes:
c.flux_aper # The first one
c.flux_aper_1 # the second one, and so on
The case of aperture radii is a bit nasty, since these only
appear in the SExtractor configuration file. Use parseconfig()
to read that file.
"""
def __init__(self, cfile, readfile=True, preserve_case=False):
(self._d, self._l, self._ncolumns, self._header) = initcat(cfile,
preserve_case=preserve_case)
self._fname = cfile
if readfile:
self._colentries = range(len(self._l))
for i in range(len(self._l)):
self._colentries[i] = self._l[i].split()
#SMN: added
if min(self._d.values()) == 0:
for key in self._d: self._d[key] += 1
self.gettypes()
for k in self._d.keys():
contents = getcolvalues(self._d[k],
self._type[k],
self._colentries)
colname = self._okname(k)
setattr(self, colname, contents)
delattr(self, '_l')
def __len__(self):
return len(self._colentries)
def __iter__(self):
return range(len(self._colentries)).__iter__()
def _okname(self, k):
try:
#Munge column name if it conflicts
test = self.__getattribute__(k)
newkey = 'c_' + k
print "--Column '%s' changed to '%s' to avoid conflicts" % (k, newkey)
self._d[newkey] = self._d[k]
del self._d[k]
return newkey
except AttributeError:
return k
def line(self, i):
"""
Returns an assembled line of this catalog suitable for writing.
Except it doesn't really, if we modified the individual columns
"""
ans = ' '.join(self._colentries[i]) + '\n'
return ans
def buildheader(self):
"""
Reconstruct the header from the header dictionary.
This might be useful if only a few columns were selected
from the file; otherwise just use the 'header' attribute.
"""
lines = {}
for k in self._d:
lines[self._d[k]] = '# %d %s' % (self._d[k], k.upper())
#sort the new keys
nkeys = lines.keys()
nkeys.sort()
#join them together with newlines
ans = ''
for k in nkeys:
ans = ans + "%s\n" % lines[k]
return ans
def getcol(self, col, offset=0):
column = self._d[col]
return getcol(column + offset, self._l)
def getcols(self, *args):
ret = []
for i in range(len(args)):
ret = ret + [getcol(self._d[args[i]], self._l)]
return ret
def gettypes(self):
self._type = {}
for k in self._d.keys():
#this line may require changing
if len(self._l) > 1000000:
every = 500
elif len(self._l) > 10000:
every = 20
else:
every = 10
ret = getcol(self._d[k], self._l[::every])
t = type(ret)
if t == type(N.array([1])):
if ret.dtype.char == 'i' or ret.dtype.char == 'l':
t = type(1)
elif ret.dtype.char == 'd':
t = type(1.e99)
else:
t = type('string')
#print k, t
self._type[k] = t
class sextractor(se_catalog): # Just an alias for class se_catalog
""" Read SExtractor catalog...just an alias for se_catalog """
pass
class rw_catalog(se_catalog):
""" Extend the se_catalog class to support adding new columns,
and writing out the new version."""
def __init__(self, fname):
self._modflag = False #this flag will be set by add_column routines
self._fname = fname
self._colnames = []
se_catalog.__init__(self, fname,
readfile=True, preserve_case=False)
coldict = invert_dict(self._d)
for k in coldict:
self._colnames.append(coldict[k])
def addcolumn(self, input_colname, coldata):
""" coldata must be a 1d numarray of the correct length"""
if len(coldata) != len(self):
raise ValueError, "Column length must match catalog length"
colname = self._okname(input_colname)
#Most of the bookkeeping is the same as for an empty column
self.addemptycolumn(colname, coldata.dtype)
#and then we reset the column to contain the actual data
setattr(self, colname, coldata)
def addemptycolumn(self, input_colname, coltype):
""" Defines a new column & updates all the bookkeeping, but
does not actually fill in the data. """
colname = self._okname(input_colname)
setattr(self, colname, N.zeros((len(self),), coltype))
self._modflag = True
self._type[colname] = coltype
#Looks strange here because we count columns from 1 but
#Python counts them from 0
self._ncolumns += 1
self._d[colname] = self._ncolumns
self._colnames.append(colname)
self._header += '# %d %s\n' % (self._ncolumns, colname)
def line(self, rownum):
""" Construct a new line as to be printed out """
if not self._modflag:
return se_catalog.line(self, rownum)
else:
linelist = []
for c in self._colnames:
col = getattr(self, c)
linelist.append(str(col[rownum]))
line = ' '.join(linelist) + '\n'
return line
def writeto(self, outname, clobber=False):
if not clobber:
if os.path.isfile(outname):
raise ValueError, """File already exists.
Use .writeto(fname, clobber=True) to overwrite. """
out = open(outname, 'w')
out.write(self._header)
for k in range(len(self)):
out.write(self.line(k))
out.close()
def printme(self):
""" Like writeto, but for sys.stdout """
sys.stdout.write(self._header)
for k in range(len(self)):
sys.stdout.write(self.line(k))
def invert_dict(d):
""" Generate a new dictionary with the key/value relationship inverted """
newd = {}
for k in d:
newd[d[k]] = k
return newd
def parseconfig_se(cfile):
""" parseconfig -- read a SExtractor .sex file and return a dictionary
of options & values. Comments are ignored.
"""
cdict = {}
f = open(cfile, 'r')
lines = f.readlines()
for l in lines:
a = string.split(l)
if len(a) > 0:
if a[0][0] != '#':
maxi = len(a)
for i in range(1, len(a)):
if a[i][0] == '#':
maxi = i
break
# Turn comma-separated lists into python lists
entry = []
for e in a[1:maxi]:
if string.find(e, ','):
entry = entry + string.split(e, ',')
else:
entry = entry + [e]
cdict[a[0]] = entry
return cdict
def initcat(catfile, preserve_case=False):
""" parseheader -- reads the header of a SExtractor catalog file and
returns a dictionary of parameter names and column numbers.
Also returns a list of lines containing the data.
"""
hdict = {}
header = []
f = open(catfile, 'r')
lines = f.readlines()
f.close()
first = 1
firstdata = 0
i = 0
previous_column = 0
previous_key = ""
for l in lines:
if l.startswith('#'): #this is a header line
header.append(l)
a = (l.replace('#', '# ')).split() #Guard against "#10 colname"
try:
col = int(a[1])
# If the column numbers skip, create new column names for
# columns not named explicitly in the header
if col != previous_column + 1:
for c in range(previous_column + 1, col):
column_name = previous_key + "_%d" % (c - previous_column)
hdict[column_name] = c
# Update this column in the dictionary
if (preserve_case):
column_name = a[2]
else:
column_name = a[2].lower()
hdict[column_name] = col
firstdata = i + 1
previous_column = col
previous_key = column_name
except (ValueError, IndexError):
#it's a comment line with no column number,
#or an entirely blank comment line: skip
pass
else: # This is where the data start
if previous_column == 0:
raise ValueError("No valid header found in %s" % catfile)
a = string.split(l)
if len(a) > 0:
if first:
firstdata = i
first = 0
# Check if there are extra columns
if len(a) > previous_column:
# If so, add keys for the last entry
for c in range(previous_column + 1, len(a)):
column_name = previous_key + "_%d" % (c - previous_column)
if (preserve_case):
hdict[column_name] = c
else:
hdict[column_name] = c.lower()
ncolumns = len(a)
i = i + 1
return(hdict, lines[firstdata:], ncolumns, ''.join(header))
def getcol(col, lines):
""" Get a column from a SExtractor catalog. Determine the type
(integer, float, string) and return either an array of that
type (Int32, Float64) or a list of strings """
i = col - 1 # Columns start at 1, arrays start at 0
nlines = len(lines)
if len(lines) == 0:
values = N.array([])
return values
a = string.split(lines[0])
if string.find(a[i], '.') < 0:
try:
x = int(a[i])
except:
values = range(nlines)
getstrings(col, lines, values)
else:
values = N.zeros((nlines), N.int32)
if type(getints(col, lines, values)) == type(-1):
values = N.zeros((nlines), N.float64)
getfloats(col, lines, values)
else:
try:
x = float(a[i])
except:
values = range(nlines)
getstrings(col, lines, values)
else:
values = N.zeros((nlines), N.float64)
getfloats(col, lines, values)
return values
def getcolvalues(col, coltype, colentries, colzero=False):
""" Get a column from a SExtractor catalog. Determine the type
(integer, float, string) and return either an array of that
type (Int32, Float64) or a list of strings """
i = col - 1 # Columns start at 1, arrays start at 0
nlines = len(colentries)
if len(colentries) == 0:
values = N.array([])
return values
if coltype == type('string'):
values = range(nlines)
for j in range(nlines):
values[j] = colentries[j][i]
if coltype == type(1.0): # Convert floats
values = N.zeros((nlines), N.float64)
for j in range(nlines):
values[j] = float(colentries[j][i])
if coltype == type(1): # Convert Ints
values = N.zeros((nlines), N.int32)
for j in range(nlines):
values[j] = int(colentries[j][i])
return values
def getstrings(col, lines, values):
n = 0
for l in lines:
a = string.split(l)
values[n] = a[col - 1]
n = n + 1
def getints(col, lines, values):
n = 0
for l in lines:
a = string.split(l)
if string.find(a[col - 1], '.') > 0:
return -1
else:
values[n] = int(a[col - 1])
n = n + 1
return values
def getfloats(col, lines, values):
n = 0
for l in lines:
a = string.split(l)
values[n] = float(a[col - 1])
n = n + 1
def getcols(d, l, *args):
""" Get multiple columns from SExtractor list using getcol() """
ret = []
for i in range(len(args)):
ret = ret + [getcol(d[args[i]], l)]
return ret
def writeheader(fh, colnames):
""" Write an SExtractor-style header to an open file handle.
:param fh: file handle
:type fh: file
:param colnames: list of column names
:type colnames: list
:todo: add space checking to colnames
:todo: permit passing a filename?
:todo: handle comments
"""
for i in range(len(colnames)):
fh.write('# %d %s\n' % (i + 1, colnames[i]))
|
sniemi/SamPy
|
smnIO/sextutils.py
|
Python
|
bsd-2-clause
| 15,984
|
#
# run all the regression tests
#
# [or, you can request specific ones by giving their names on the command line]
#
import sys
import compile
import os
import subprocess
import context
def run_test (cmd, *args):
cmd = PJ ('tests', cmd)
p = subprocess.Popen ([cmd] + list(args), stdout=subprocess.PIPE)
out = p.stdout.read()
return out
def test_t17():
lines = run_test ('t17').split ('\n')
# we can't say anything about the address returned by malloc
# but we should expect to read this number
assert (lines[1] == '3141')
# and the sizeof (pxll_int) is random too, since even on the same
# platform we might compile 32 or 64 bit, so just ignore it.
def test_t_dump_image():
# generate the output
run_test ('t_dump_image')
# load the image and run it
exp0 = run_test ('t_dump_image','-l')
exp1 = open ('tests/t_dump_image.exp').read()
assert (exp0 == exp1)
def test_t21():
out = run_test ('t21')
exp = open ('gc.c').read()
# make sure the first part matches the contents of gc.c
assert (out[:len(exp)] == exp)
# the chars are too hard to tests for, and unlikely to be wrong.
# should really make a separate char test.
def test_t22():
out = run_test ('t22')
lines = out.split ('\n')
assert (lines[0].count ('<closure pc=') == 5)
r6 = [ str(x) for x in range (6) ]
assert (lines[1:] == (r6 + r6 + ['#u', '']))
def test_t_lex():
out = run_test ('t_lex')
assert (out.split('\n')[-4:] == ['{u0 NUMBER "42"}', '{u0 NEWLINE "\\0x0a"}', '"done"', ''])
def test_t_vm():
out = run_test ('t_vm', 'vm/tests/t11.byc')
assert (out.split()[-2:] == ['7', '#u'])
PJ = os.path.join
if len(sys.argv) > 1:
# run only these specific tests
files = [x + '.scm' for x in sys.argv[1:]]
else:
files = os.listdir ('tests')
# When looking for things that are broken, I prefer to work with the smallest
# test that reproduces a problem. Thus, run the tests in source-size order...
files = [ (os.stat(PJ ('tests', x)).st_size, x) for x in files ]
files.sort()
# tests that need special handling
special = [x[5:] for x in dir() if x.startswith ('test_')]
failed = []
# nobody wants to wait for a non-optimized tak20
optimize = ['t_vm']
succeeded = 0
for size, file in files:
if file.endswith ('.scm'):
base, ext = os.path.splitext (file)
path = os.path.join ('tests', file)
print 'compiling', path
fail = file.startswith ('f')
c = context.context()
c.verbose = False
c.typetype = True
if base in optimize:
c.optimize = True
try:
compile.compile_file (open (path, 'rb'), path, c)
except:
if not fail:
#raise
failed.append ((base, "compile failed"))
else:
succeeded += 1
else:
if fail:
failed.append ((base, 'compile did not fail like expected'))
#raise ValueError ("oops - expected compilation to fail")
if base not in special:
out = run_test (base)
exp_path = PJ ('tests', base + '.exp')
if os.path.isfile (exp_path):
exp = open (exp_path).read()
if out != exp:
failed.append ((base, 'did not match expected output'))
#raise ValueError ("oops - output didn't match on test '%s'" % (base,))
else:
succeeded += 1
else:
succeeded += 1
else:
# tests that require special handling for whatever reason.
try:
eval ('test_%s()' % (base,))
except:
failed.append ((base, 'assertion failed'))
else:
succeeded += 1
print '%d tests passed' % succeeded
if len(failed):
print '%d tests failed!!' % (len(failed))
for base, reason in failed:
print base, reason
|
samrushing/irken-compiler
|
util/run_tests_py.py
|
Python
|
bsd-2-clause
| 4,100
|
"""
Basic framework for acquiring a roach measurement that includes both sweep(s) and stream(s).
Acquire
-Initialize equipment.
-Initialize roach: preload frequencies, if necessary.
-Create state dictionary containing state from all equipment, including temperatures, if possible.
-Run a coarse sweep, if necessary: create a SweepArray and extract resonance frequencies.
-Run fine sweeps to map out resonance frequencies carefully.
If desired, we can combine the data from coarse and fine sweeps into a single SweepArray.
All streams in these sweeps are created with the same roach state, which should not change during the sweeps.
The sweep(s) are created with the experiment state, which should also not change.
Acquire streams:
-Initialize equipment for stream(s).
-Initialize roach for stream(s).
-Create experiment state dictionary.
-Acquire a StreamArray.
-Repeat the stream acquisition as needed
-Instantiate the final measurement with all data, and save it to disk.
-Clean up equipment.
If instead we want to save data as it is collected, we can do that by writing a blank final measurement to disk, then
writing the sub-measurements as they are acquired.
"""
from __future__ import division
import os
import sys
import time
import inspect
import subprocess
import logging
import numpy as np
from kid_readout import settings
from kid_readout.utils import log
from kid_readout.measurement import core, basic
from kid_readout.measurement.io import nc, npy
logger = logging.getLogger(__name__)
# Frequency sweep
def load_baseband_sweep_tones(ri, tone_banks, num_tone_samples):
return ri.set_tone_freqs(freqs=np.vstack(tone_banks), nsamp=num_tone_samples)
def load_heterodyne_sweep_tones(ri, tone_banks, num_tone_samples):
return ri.set_tone_freqs(freqs=np.vstack(tone_banks), nsamp=num_tone_samples)
def run_sweep(ri, tone_banks, num_tone_samples, length_seconds=0, state=None, description='', verbose=False,
wait_for_sync=0.1, **kwargs):
"""
Return a SweepArray acquired using the given tone banks.
Parameters
----------
ri : RoachInterface
An instance of a subclass.
tone_banks : iterable of ndarray (float)
An iterable of arrays (or a 2-D array) of frequencies to use for the sweep.
num_tone_samples : int
The number of samples in the playback buffer; must be a power of two.
length_seconds : float
The duration of each data stream; the default of 0 means the minimum unit of data that can be read out in the
current configuration.
state : dict
The non-roach state to pass to the SweepArray.
description : str
A human-readable description of the measurement.
verbose : bool
If true, print progress messages.
wait_for_sync : float
Sleep for this time in seconds to let the ROACH sync finish.
kwargs
Keyword arguments passed to ri.get_measurement().
Returns
-------
SweepArray
"""
stream_arrays = core.MeasurementList()
if verbose:
print("Measuring bank")
for n, tone_bank in enumerate(tone_banks):
if verbose:
print n,
sys.stdout.flush()
ri.set_tone_freqs(tone_bank, nsamp=num_tone_samples)
ri.select_fft_bins(np.arange(tone_bank.size))
# we wait a bit here to let the roach2 sync catch up. figuring this out still.
time.sleep(wait_for_sync)
stream_arrays.append(ri.get_measurement(num_seconds=length_seconds, **kwargs))
return basic.SweepArray(stream_arrays, state=state, description=description)
def run_loaded_sweep(ri, length_seconds=0, state=None, description='', tone_bank_indices=None, bin_indices=None,
verbose=False, **kwargs):
"""
Return a SweepArray acquired using previously-loaded tones.
Parameters
----------
ri : RoachInterface
An instance of a subclass.
length_seconds : float
The duration of each data stream; the default of 0 means the minimum unit of data that can be read out in the
current configuration.
state : dict
The non-roach state to pass to the SweepArray.
description : str
A human-readable description of the measurement.
tone_bank_indices : numpy.ndarray[int]
The indices of the tone banks to use in the sweep; the default is to use all existing.
bin_indices : numpy.ndarray[int]
The indices of the filterbank bins to read out; the default is to read out all bins.
verbose : bool
If true, print progress messages.
kwargs
Keyword arguments passed to ri.get_measurement().
Returns
-------
SweepArray
"""
if tone_bank_indices is None:
tone_bank_indices = np.arange(ri.tone_bins.shape[0])
if bin_indices is None:
bin_indices = np.arange(ri.tone_bins.shape[1])
stream_arrays = core.MeasurementList()
if verbose:
print "Measuring bank:",
for tone_bank_index in tone_bank_indices:
if verbose:
print tone_bank_index,
sys.stdout.flush()
ri.select_bank(tone_bank_index)
ri.select_fft_bins(bin_indices)
stream_arrays.append(ri.get_measurement(num_seconds=length_seconds, **kwargs))
return basic.SweepArray(stream_arrays, state=state, description=description)
def run_multipart_sweep(ri, length_seconds=0, state=None, description='', num_tones_read_at_once=32, verbose=False,
**kwargs):
num_tones = ri.tone_bins.shape[1]
num_steps = num_tones // num_tones_read_at_once
if num_steps == 0:
num_steps = 1
indices_to_read = range(num_tones)
parts = []
for step in range(num_steps):
if verbose:
print("running sweep step {} of {}.".format(step,num_steps))
parts.append(run_loaded_sweep(ri, length_seconds=length_seconds, state=state, description=description,
bin_indices=indices_to_read[step::num_steps], **kwargs))
stream_arrays = core.MeasurementList()
for part in parts:
stream_arrays.extend(list(part.stream_arrays))
return basic.SweepArray(stream_arrays, state=state, description=description)
# Metadata
def script_code():
"""
Return the source code of a module running as '__main__'. Acquisition scripts can use this to save their code.
If attempting to load the source code raises an exception, return a string representation of the exception.
Returns
-------
str
The code, with lines separated by newline characters.
"""
try:
return inspect.getsource(sys.modules['__main__'])
except Exception as e:
return str(e)
def git_log():
import kid_readout
kid_readout_directory = os.path.dirname(os.path.abspath(kid_readout.__file__))
try:
return subprocess.check_output(("cd {}; git log -1".format(kid_readout_directory)), shell=True)
except Exception as e:
return str(e)
def git_status():
import kid_readout
kid_readout_directory = os.path.dirname(os.path.abspath(kid_readout.__file__))
try:
return subprocess.check_output(("cd {}; git status --porcelain".format(kid_readout_directory)), shell=True)
except Exception as e:
return str(e)
def all_metadata():
meta = {'script_code': script_code(),
'git_log': git_log(),
'git_status': git_status(),
'cryostat': settings.CRYOSTAT,
'cooldown': settings.COOLDOWN}
return meta
# IO object creation
def new_nc_file(suffix='', directory=settings.BASE_DATA_DIR, metadata=None):
if suffix and not suffix.startswith('_'):
suffix = '_' + suffix
if metadata is None:
metadata = all_metadata()
root_path = os.path.join(directory, time.strftime('%Y-%m-%d_%H%M%S') + suffix + nc.NCFile.EXTENSION)
logger.debug("Creating new NCFile with path %s" % root_path)
return nc.NCFile(root_path, metadata=metadata)
def new_npy_directory(suffix='', directory=settings.BASE_DATA_DIR, metadata=None):
if suffix and not suffix.startswith('_'):
suffix = '_' + suffix
if metadata is None:
metadata = all_metadata()
root_path = os.path.join(directory, time.strftime('%Y-%m-%d_%H%M%S') + suffix + npy.NumpyDirectory.EXTENSION)
logger.debug("Creating new NumpyDirectory with path %s" % root_path)
return npy.NumpyDirectory(root_path, metadata=metadata)
# Interactive checks to be used at the beginning of scripts
def show_settings():
print("cryostat: {}".format(settings.CRYOSTAT))
for k, v in settings.COOLDOWN.items():
print("{}: {}".format(k, v))
raw_input("Press enter to continue or ctrl-C to quit.")
def show_git_status():
print("git status:")
print(git_status())
raw_input("Press enter to continue or ctrl-C to quit.")
# Logging
def get_script_logger(name, level=logging.INFO):
script_logger = logging.getLogger('kid_readout')
script_logger.setLevel(logging.DEBUG)
if log.default_handler not in script_logger.handlers:
stream_handler = log.default_handler
stream_handler.setLevel(level)
script_logger.addHandler(stream_handler)
script_logger.addHandler(log.file_handler(name))
return script_logger
|
ColumbiaCMB/kid_readout
|
kid_readout/measurement/acquire.py
|
Python
|
bsd-2-clause
| 9,283
|
import os
from flask import Flask, request, render_template
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.debug = True
app.threaded = True
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:////tmp/test.db')
db = SQLAlchemy(app)
class Email(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255))
@app.route('/', methods=['GET', 'POST'])
def main():
if request.method == 'POST':
email = Email(email=request.post('email'))
db.session.add(email)
db.session.commit()
return 'ok'
return render_template('index.html')
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
|
mjtamlyn/oxford-team
|
old-flask/app.py
|
Python
|
bsd-2-clause
| 767
|
# Author: Leland McInnes <leland.mcinnes@gmail.com>
# Enough simple sparse operations in numba to enable sparse UMAP
#
# License: BSD 3 clause
from __future__ import print_function
import locale
import numpy as np
import numba
from pynndescent.utils import (
tau_rand_int,
make_heap,
new_build_candidates,
deheap_sort,
checked_flagged_heap_push,
apply_graph_updates_high_memory,
apply_graph_updates_low_memory,
)
from pynndescent.sparse import sparse_euclidean
locale.setlocale(locale.LC_NUMERIC, "C")
EMPTY_GRAPH = make_heap(1, 1)
@numba.njit(parallel=True, cache=True)
def generate_leaf_updates(leaf_block, dist_thresholds, inds, indptr, data, dist):
updates = [[(-1, -1, np.inf)] for i in range(leaf_block.shape[0])]
for n in numba.prange(leaf_block.shape[0]):
for i in range(leaf_block.shape[1]):
p = leaf_block[n, i]
if p < 0:
break
for j in range(i + 1, leaf_block.shape[1]):
q = leaf_block[n, j]
if q < 0:
break
from_inds = inds[indptr[p] : indptr[p + 1]]
from_data = data[indptr[p] : indptr[p + 1]]
to_inds = inds[indptr[q] : indptr[q + 1]]
to_data = data[indptr[q] : indptr[q + 1]]
d = dist(from_inds, from_data, to_inds, to_data)
if d < dist_thresholds[p] or d < dist_thresholds[q]:
updates[n].append((p, q, d))
return updates
@numba.njit(locals={"d": numba.float32, "p": numba.int32, "q": numba.int32}, cache=True)
def init_rp_tree(inds, indptr, data, dist, current_graph, leaf_array):
n_leaves = leaf_array.shape[0]
block_size = 65536
n_blocks = n_leaves // block_size
for i in range(n_blocks + 1):
block_start = i * block_size
block_end = min(n_leaves, (i + 1) * block_size)
leaf_block = leaf_array[block_start:block_end]
dist_thresholds = current_graph[1][:, 0]
updates = generate_leaf_updates(
leaf_block, dist_thresholds, inds, indptr, data, dist
)
for j in range(len(updates)):
for k in range(len(updates[j])):
p, q, d = updates[j][k]
if p == -1 or q == -1:
continue
checked_flagged_heap_push(
current_graph[1][p],
current_graph[0][p],
current_graph[2][p],
d,
q,
np.uint8(1),
)
checked_flagged_heap_push(
current_graph[1][q],
current_graph[0][q],
current_graph[2][q],
d,
p,
np.uint8(1),
)
@numba.njit(
fastmath=True,
locals={"d": numba.float32, "i": numba.int32, "idx": numba.int32},
cache=True,
)
def init_random(n_neighbors, inds, indptr, data, heap, dist, rng_state):
n_samples = indptr.shape[0] - 1
for i in range(n_samples):
if heap[0][i, 0] < 0.0:
for j in range(n_neighbors - np.sum(heap[0][i] >= 0.0)):
idx = np.abs(tau_rand_int(rng_state)) % n_samples
from_inds = inds[indptr[idx] : indptr[idx + 1]]
from_data = data[indptr[idx] : indptr[idx + 1]]
to_inds = inds[indptr[i] : indptr[i + 1]]
to_data = data[indptr[i] : indptr[i + 1]]
d = dist(from_inds, from_data, to_inds, to_data)
checked_flagged_heap_push(
heap[1][i], heap[0][i], heap[2][i], d, idx, np.uint8(1)
)
return
@numba.njit(parallel=True, cache=True)
def generate_graph_updates(
new_candidate_block, old_candidate_block, dist_thresholds, inds, indptr, data, dist
):
block_size = new_candidate_block.shape[0]
updates = [[(-1, -1, np.inf)] for i in range(block_size)]
max_candidates = new_candidate_block.shape[1]
for i in numba.prange(block_size):
for j in range(max_candidates):
p = int(new_candidate_block[i, j])
if p < 0:
continue
for k in range(j, max_candidates):
q = int(new_candidate_block[i, k])
if q < 0:
continue
from_inds = inds[indptr[p] : indptr[p + 1]]
from_data = data[indptr[p] : indptr[p + 1]]
to_inds = inds[indptr[q] : indptr[q + 1]]
to_data = data[indptr[q] : indptr[q + 1]]
d = dist(from_inds, from_data, to_inds, to_data)
if d <= dist_thresholds[p] or d <= dist_thresholds[q]:
updates[i].append((p, q, d))
for k in range(max_candidates):
q = int(old_candidate_block[i, k])
if q < 0:
continue
from_inds = inds[indptr[p] : indptr[p + 1]]
from_data = data[indptr[p] : indptr[p + 1]]
to_inds = inds[indptr[q] : indptr[q + 1]]
to_data = data[indptr[q] : indptr[q + 1]]
d = dist(from_inds, from_data, to_inds, to_data)
if d <= dist_thresholds[p] or d <= dist_thresholds[q]:
updates[i].append((p, q, d))
return updates
@numba.njit()
def nn_descent_internal_low_memory_parallel(
current_graph,
inds,
indptr,
data,
n_neighbors,
rng_state,
max_candidates=50,
dist=sparse_euclidean,
n_iters=10,
delta=0.001,
verbose=False,
):
n_vertices = indptr.shape[0] - 1
block_size = 16384
n_blocks = n_vertices // block_size
n_threads = numba.get_num_threads()
for n in range(n_iters):
if verbose:
print("\t", n + 1, " / ", n_iters)
(new_candidate_neighbors, old_candidate_neighbors) = new_build_candidates(
current_graph, max_candidates, rng_state, n_threads
)
c = 0
for i in range(n_blocks + 1):
block_start = i * block_size
block_end = min(n_vertices, (i + 1) * block_size)
new_candidate_block = new_candidate_neighbors[block_start:block_end]
old_candidate_block = old_candidate_neighbors[block_start:block_end]
dist_thresholds = current_graph[1][:, 0]
updates = generate_graph_updates(
new_candidate_block,
old_candidate_block,
dist_thresholds,
inds,
indptr,
data,
dist,
)
c += apply_graph_updates_low_memory(current_graph, updates, n_threads)
if c <= delta * n_neighbors * n_vertices:
if verbose:
print("\tStopping threshold met -- exiting after", n + 1, "iterations")
return
@numba.njit()
def nn_descent_internal_high_memory_parallel(
current_graph,
inds,
indptr,
data,
n_neighbors,
rng_state,
max_candidates=50,
dist=sparse_euclidean,
n_iters=10,
delta=0.001,
verbose=False,
):
n_vertices = indptr.shape[0] - 1
block_size = 16384
n_blocks = n_vertices // block_size
n_threads = numba.get_num_threads()
in_graph = [
set(current_graph[0][i].astype(np.int64))
for i in range(current_graph[0].shape[0])
]
for n in range(n_iters):
if verbose:
print("\t", n + 1, " / ", n_iters)
(new_candidate_neighbors, old_candidate_neighbors) = new_build_candidates(
current_graph, max_candidates, rng_state, n_threads
)
c = 0
for i in range(n_blocks + 1):
block_start = i * block_size
block_end = min(n_vertices, (i + 1) * block_size)
new_candidate_block = new_candidate_neighbors[block_start:block_end]
old_candidate_block = old_candidate_neighbors[block_start:block_end]
dist_thresholds = current_graph[1][:, 0]
updates = generate_graph_updates(
new_candidate_block,
old_candidate_block,
dist_thresholds,
inds,
indptr,
data,
dist,
)
c += apply_graph_updates_high_memory(current_graph, updates, in_graph)
if c <= delta * n_neighbors * n_vertices:
if verbose:
print("\tStopping threshold met -- exiting after", n + 1, "iterations")
return
@numba.njit()
def nn_descent(
inds,
indptr,
data,
n_neighbors,
rng_state,
max_candidates=50,
dist=sparse_euclidean,
n_iters=10,
delta=0.001,
init_graph=EMPTY_GRAPH,
rp_tree_init=True,
leaf_array=None,
low_memory=False,
verbose=False,
):
n_samples = indptr.shape[0] - 1
if init_graph[0].shape[0] == 1: # EMPTY_GRAPH
current_graph = make_heap(n_samples, n_neighbors)
if rp_tree_init:
init_rp_tree(inds, indptr, data, dist, current_graph, leaf_array)
init_random(n_neighbors, inds, indptr, data, current_graph, dist, rng_state)
elif init_graph[0].shape[0] == n_samples and init_graph[0].shape[1] == n_neighbors:
current_graph = init_graph
else:
raise ValueError("Invalid initial graph specified!")
if low_memory:
nn_descent_internal_low_memory_parallel(
current_graph,
inds,
indptr,
data,
n_neighbors,
rng_state,
max_candidates=max_candidates,
dist=dist,
n_iters=n_iters,
delta=delta,
verbose=verbose,
)
else:
nn_descent_internal_high_memory_parallel(
current_graph,
inds,
indptr,
data,
n_neighbors,
rng_state,
max_candidates=max_candidates,
dist=dist,
n_iters=n_iters,
delta=delta,
verbose=verbose,
)
return deheap_sort(current_graph[0], current_graph[1])
|
lmcinnes/pynndescent
|
pynndescent/sparse_nndescent.py
|
Python
|
bsd-2-clause
| 10,204
|
#!/usr/bin/python
import sys
import osxdaemons
COL_RED = "\033[91m"
COL_GRN = "\033[92m"
COL_END = "\033[0m"
load_actions = ["up", "on", "load", "start"]
unload_actions = ["down", "off", "unload", "stop"]
ACTIONS = {act:"load" for act in load_actions}
ACTIONS.update({act:"unload" for act in unload_actions})
def usage():
sname = str(sys.argv[0])
print("Lists or starts/stops macports related services.")
print("Usage: ./" + sname + " [<service name> <verb>] ")
print("Valid verbs: ")
def match_service(sname):
matches = [daemon for daemon in osxdaemons.get_all_daemons() if sname in daemon]
if len(matches) > 1:
print("Matched too many services:\n")
for match in matches:
print("> " + match + "\n")
return None
#print("Found service: " + matches[0] + "\n")
return matches[0]
def match_action(action):
if action in ACTIONS:
action = ACTIONS[action]
return action
else:
return None
def service_action(service, action):
action = match_action(action)
if action:
print(action.title() + "ing service: " + service)
return osxdaemons.do(service, action)
else:
print("Wtf I don't know how to " + action + ".")
usage()
return -1
def print_services():
running_daemons = osxdaemons.get_running_daemons()
for daemon in osxdaemons.get_all_daemons():
outs = daemon + " "*(60-len(daemon))
if daemon in running_daemons:
col = COL_GRN
status = "RUNNING"
else:
col = COL_RED
status = "NOT RUNNING"
print(outs + col + status + COL_END)
def main():
if len(sys.argv) == 1:
print_services()
return 0
elif len(sys.argv) == 3:
sname, action = sys.argv[1:3]
sname = match_service(sname)
return service_action(sname, action)
else:
usage()
return 0
if __name__ == "__main__":
sys.exit(main())
|
ZachAnders/MPServices
|
mpservices.py
|
Python
|
bsd-2-clause
| 1,761
|
#! /usr/bin/env python
from peyotl.api import OTI
from peyotl.test.support.pathmap import get_test_ot_service_domains
from peyotl.utility import get_logger
import unittest
import os
_LOG = get_logger(__name__)
@unittest.skipIf('RUN_WEB_SERVICE_TESTS' not in os.environ,
'RUN_WEB_SERVICE_TESTS is not in your environment, so tests that use ' \
'Open Tree of Life web services are disabled.')
class TestOTI(unittest.TestCase):
def setUp(self):
d = get_test_ot_service_domains()
self.oti = OTI(d)
def testFindAllStudies(self):
x = self.oti.find_all_studies(verbose=True)
self.assertTrue(len(x) > 0)
self.assertTrue('ot:studyId' in x[0])
def testStudyTerms(self):
t_set = self.oti.study_search_term_set
self.assertTrue(bool(t_set))
r = self.oti.find_studies({'ot:studyPublication': '10.1073/pnas.0709121104'})
self.assertTrue(len(r) > 0)
def testNodeTerms(self):
if self.oti.use_v1:
t_set = self.oti.node_search_term_set
self.assertTrue('ot:ottId' in t_set)
nl = self.oti.find_nodes(ottId=990437)
self.assertTrue(len(nl) > 0)
f = nl[0]
self.assertTrue('matched_trees' in f)
t = f['matched_trees']
self.assertTrue(len(t) > 0)
tr = t[0]
self.assertTrue('matched_nodes' in tr)
n = tr['matched_nodes']
self.assertTrue(len(n) > 0)
def testBadNodeTerms(self):
if self.oti.use_v1:
qd = {'bogus key': 'Aponogeoton ulvaceus 1 2'}
self.assertRaises(ValueError, self.oti.find_nodes, qd)
def testTreeTerms(self):
qd = {'ot:ottTaxonName': 'Aponogeton ulvaceus'}
if self.oti.use_v1:
nl = self.oti.find_trees(qd)
self.assertTrue(len(nl) > 0)
f = nl[0]
self.assertTrue('matched_trees' in f)
t = f['matched_trees']
self.assertTrue(len(t) > 0)
def testBadTreeTerms(self):
qd = {'bogus key': 'Aponogeoton ulvaceus 1 2'}
self.assertRaises(ValueError, self.oti.find_trees, qd)
if __name__ == "__main__":
unittest.main(verbosity=5)
|
rvosa/peyotl
|
peyotl/test/test_oti.py
|
Python
|
bsd-2-clause
| 2,232
|
from setuptools import setup, find_packages
import sys, os
setup(name='nanoweb',
version="1.0",
description="The nano web framework",
long_description="""\
The nano framework provides some glue for Webob and Routes.""",
classifiers=[],
keywords='WSGI',
author='Eric Moritz',
author_email='eric@themoritzfamily.com',
url='https://github.com/ericmoritz/nanoweb/',
license='BSD',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
"routes",
"webob",
"json-schema-validator",
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
ericmoritz/nanoweb
|
setup.py
|
Python
|
bsd-2-clause
| 774
|
#!/usr/bin/python
"""nrvr.util.ipaddress - Utilities regarding IP addresses
Class provided by this module is IPAddress.
Works in Linux and Windows.
Idea and first implementation - Leo Baschy <srguiwiz12 AT nrvr DOT com>
Contributor - Nora Baschy
Public repository - https://github.com/srguiwiz/nrvr-commander
Copyright (c) Nirvana Research 2006-2015.
Simplified BSD License"""
import re
class IPAddress(object):
"""Methods for multiple machines on one subnet.
As implemented only supports IPv4."""
octetsRegex = re.compile(r"^\s*([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\s*$")
@classmethod
def asList(cls, ipaddress, rangeCheck=False):
"""For ipaddress="10.123.45.67" return mutable [10, 123, 45, 67].
If already a list, a copy is made and returned."""
if isinstance(ipaddress, basestring):
octetsMatch = IPAddress.octetsRegex.search(ipaddress)
if not octetsMatch:
raise Exception("won't recognize as IP address: {0}".format(ipaddress))
octets = [octetsMatch.group(1),
octetsMatch.group(2),
octetsMatch.group(3),
octetsMatch.group(4)]
for index, octet in enumerate(octets):
octet = int(octet)
if rangeCheck and octet > 255:
raise Exception("won't recognize as IP address because > 255: {0}".format(ipaddress))
octets[index] = octet
return octets
elif isinstance(ipaddress, (int, long)):
octets = []
while ipaddress:
octets.append(ipaddress % 256)
ipaddress /= 256
octets += [0 for i in range(max(4 - len(octets), 0))]
octets.reverse()
return octets
else:
# force making a copy
return list(ipaddress)
@classmethod
def asTuple(cls, ipaddress):
"""For ipaddress="10.123.45.67" return immutable (10, 123, 45, 67)."""
if isinstance(ipaddress, tuple):
return ipaddress
elif isinstance(ipaddress, list):
return tuple(ipaddress)
else:
return tuple(cls.asList(ipaddress))
@classmethod
def asString(cls, ipaddress):
"""For ipaddress=[10, 123, 45, 67] return "10.123.45.67"."""
if isinstance(ipaddress, basestring):
return ipaddress
if isinstance(ipaddress, (int, long)):
ipaddress = cls.asList(ipaddress)
return ".".join(map(str, ipaddress))
@classmethod
def asInteger(cls, ipaddress):
"""For ipaddress=[10, 123, 45, 67] return 175844675.
At the time of this writing, such an integer however is
not accepted as input by other methods of this class."""
octets = cls.asList(ipaddress) # must make a copy
integer = 0
while octets:
integer = 256 * integer + octets.pop(0)
return integer
@classmethod
def bitAnd(cls, one, other):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
if not isinstance(other, (list, tuple)):
other = cls.asList(other)
octets = []
for oneOctet, otherOctet in zip(one, other):
octets.append(oneOctet & otherOctet)
return octets
@classmethod
def bitOr(cls, one, other):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
if not isinstance(other, (list, tuple)):
other = cls.asList(other)
octets = []
for oneOctet, otherOctet in zip(one, other):
octets.append(oneOctet | otherOctet)
return octets
@classmethod
def bitNot(cls, one):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
octets = []
for oneOctet in one:
octets.append(~oneOctet & 255)
return octets
@classmethod
def nameWithNumber(cls, stem, ipaddress, octets=1, separator="-"):
"""For stem="example" and ipaddress="10.123.45.67" return "example-067".
If octets=2 return "example-045-067"."""
name = stem
ipaddress = IPAddress.asTuple(ipaddress)
if not separator:
# empty string instead of e.g. None
separator = ""
for index in range(-octets, 0):
# create leading zeros, e.g. from "19" to "019"
name += separator + "%03d" % ipaddress[index]
return name
@classmethod
def numberWithinSubnet(cls, oneInSubnet, otherNumber, netmask="255.255.255.0"):
"""For oneInSubnet="10.123.45.67" and otherNumber="89" return [10, 123, 45, 89].
For oneInSubnet="10.123.45.67" and otherNumber="89.34" and netmask="255.255.0.0" return [10, 123, 89, 34]."""
if not isinstance(oneInSubnet, (list, tuple)):
oneInSubnet = cls.asList(oneInSubnet)
# less than stellar decoding of otherNumber, but it works in actual use cases
if isinstance(otherNumber, int):
# in theory handling more than 16 bits' 65536 would be desirable,
# practically handling up to 16 bits' 65535 is enough
if otherNumber <= 255:
otherNumber = [otherNumber]
else:
otherNumber = [otherNumber >> 8, otherNumber & 255]
if not isinstance(otherNumber, (list, tuple)):
otherNumber = otherNumber.split(".")
otherNumber = map(int, otherNumber)
if not isinstance(netmask, (list, tuple)):
netmask = cls.asList(netmask)
complementOfNetmask = cls.bitNot(netmask)
contributedBySubnet = cls.bitAnd(oneInSubnet, netmask)
otherNumber = [0] * (len(contributedBySubnet) - len(otherNumber)) + otherNumber
contributedByNumber = cls.bitAnd(otherNumber, complementOfNetmask)
result = cls.bitOr(contributedBySubnet, contributedByNumber)
return result
if __name__ == "__main__":
print IPAddress.asList("10.123.45.67")
print IPAddress.asList((192, 168, 95, 17))
print IPAddress.asList([192, 168, 95, 17])
print IPAddress.asList(175844675)
print IPAddress.asTuple("10.123.45.67")
print IPAddress.asTuple([192, 168, 95, 17])
print IPAddress.asTuple((192, 168, 95, 17))
print IPAddress.asTuple(175844675)
print IPAddress.asString([192, 168, 95, 17])
print IPAddress.asString((192, 168, 95, 17))
print IPAddress.asString("10.123.45.67")
print IPAddress.asString(175844675)
print IPAddress.asInteger("10.123.45.67")
print IPAddress.asInteger([10,123,45,67])
print IPAddress.bitAnd("10.123.45.67", "255.255.255.0")
print IPAddress.bitOr(IPAddress.bitAnd("10.123.45.67", "255.255.255.0"), "0.0.0.1")
print IPAddress.bitNot("1.2.3.4")
print IPAddress.nameWithNumber("example", "10.123.45.67")
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=2)
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=3)
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=4)
print IPAddress.numberWithinSubnet("10.123.45.67", "89")
print IPAddress.numberWithinSubnet("10.123.45.67", 89)
print IPAddress.numberWithinSubnet("10.123.45.67", "89.34", netmask="255.255.0.0")
print IPAddress.numberWithinSubnet("10.123.45.67", 22818, netmask="255.255.0.0")
|
srguiwiz/nrvr-commander
|
src/nrvr/util/ipaddress.py
|
Python
|
bsd-2-clause
| 7,449
|
from ..errors import ErrorFolderNotFound, ErrorInvalidOperation, ErrorNoPublicFolderReplicaAvailable
from ..util import MNS, create_element
from .common import EWSAccountService, folder_ids_element, parse_folder_elem, shape_element
class GetFolder(EWSAccountService):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getfolder-operation"""
SERVICE_NAME = "GetFolder"
element_container_name = f"{{{MNS}}}Folders"
ERRORS_TO_CATCH_IN_RESPONSE = EWSAccountService.ERRORS_TO_CATCH_IN_RESPONSE + (
ErrorFolderNotFound,
ErrorNoPublicFolderReplicaAvailable,
ErrorInvalidOperation,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.folders = [] # A hack to communicate parsing args to _elems_to_objs()
def call(self, folders, additional_fields, shape):
"""Take a folder ID and returns the full information for that folder.
:param folders: a list of Folder objects
:param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects
:param shape: The set of attributes to return
:return: XML elements for the folders, in stable order
"""
# We can't easily find the correct folder class from the returned XML. Instead, return objects with the same
# class as the folder instance it was requested with.
self.folders = list(folders) # Convert to a list, in case 'folders' is a generator. We're iterating twice.
return self._elems_to_objs(
self._chunked_get_elements(
self.get_payload,
items=self.folders,
additional_fields=additional_fields,
shape=shape,
)
)
def _elems_to_objs(self, elems):
for folder, elem in zip(self.folders, elems):
if isinstance(elem, Exception):
yield elem
continue
yield parse_folder_elem(elem=elem, folder=folder, account=self.account)
def get_payload(self, folders, additional_fields, shape):
payload = create_element(f"m:{self.SERVICE_NAME}")
payload.append(
shape_element(
tag="m:FolderShape", shape=shape, additional_fields=additional_fields, version=self.account.version
)
)
payload.append(folder_ids_element(folders=folders, version=self.account.version))
return payload
|
ecederstrand/exchangelib
|
exchangelib/services/get_folder.py
|
Python
|
bsd-2-clause
| 2,503
|
"""Base settings shared by all environments.
This is a reusable basic settings file.
"""
from django.conf.global_settings import *
import os
import sys
import re
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'GB'
USE_TZ = True
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = 'en-GB'
LANGUAGES = (
('en-GB', 'British English'),
)
SITE_ID = 1
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
STATIC_URL = '/static/'
MEDIA_URL = '/uploads/'
ADMINS = (
('David Seddon', 'david@seddonym.me'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'error': {
'level':'ERROR',
'class':'logging.handlers.RotatingFileHandler',
# 'filename': ERROR_LOG_PATH, - filled in by handler
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'debug': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
# 'filename': DEBUG_LOG_PATH, - filled in by handler
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
},
'loggers': {
'django': {
'handlers':['error'],
'propagate': True,
'level':'DEBUG',
},
'django.request': {
'handlers': ['mail_admins', 'error'],
'level': 'ERROR',
'propagate': False,
},
'project': {
'handlers':['debug'],
'propagate': True,
'level':'DEBUG',
},
}
}
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
ROOT_URLCONF = 'urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
|
seddonym/bobsleigh-seddonym
|
bobsleigh_seddonym/settings/base.py
|
Python
|
bsd-2-clause
| 2,877
|
"""
Application for testing syncing algorithm
(c) 2013-2014 by Mega Limited, Wellsford, New Zealand
This file is part of the MEGA SDK - Client Access Engine.
Applications using the MEGA API must present a valid application key
and comply with the the rules set forth in the Terms of Service.
The MEGA SDK is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@copyright Simplified (2-clause) BSD License.
You should have received a copy of the license along with this
program.
"""
import sys
import os
import time
import shutil
import unittest
import xmlrunner
import subprocess
import re
from sync_test_app import SyncTestApp
from sync_test import SyncTest
import logging
import argparse
class SyncTestMegaCliApp(SyncTestApp):
"""
operates with megacli application
"""
def __init__(self, local_mount_in, local_mount_out, delete_tmp_files=True, use_large_files=True, check_if_alive=True):
"""
local_mount_in: local upsync folder
local_mount_out: local downsync folder
"""
self.work_dir = os.path.join(".", "work_dir")
SyncTestApp.__init__(self, local_mount_in, local_mount_out, self.work_dir, delete_tmp_files, use_large_files)
self.check_if_alive = check_if_alive
def sync(self):
time.sleep(5)
def start(self):
# try to create work dir
return True
def finish(self):
try:
shutil.rmtree(self.work_dir)
except OSError, e:
logging.error("Failed to remove dir: %s (%s)" % (self.work_dir, e))
def is_alive(self):
"""
return True if application instance is running
"""
if not self.check_if_alive:
return True
s = subprocess.Popen(["ps", "axw"], stdout=subprocess.PIPE)
for x in s.stdout:
if re.search("megacli", x):
return True
return False
def pause(self):
"""
pause application
"""
# TODO: implement this !
raise NotImplementedError("Not Implemented !")
def unpause(self):
"""
unpause application
"""
# TODO: implement this !
raise NotImplementedError("Not Implemented !")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--test1", help="test_create_delete_files", action="store_true")
parser.add_argument("--test2", help="test_create_rename_delete_files", action="store_true")
parser.add_argument("--test3", help="test_create_delete_dirs", action="store_true")
parser.add_argument("--test4", help="test_create_rename_delete_dirs", action="store_true")
parser.add_argument("--test5", help="test_sync_files_write", action="store_true")
parser.add_argument("--test6", help="test_local_operations", action="store_true")
parser.add_argument("--test7", help="test_update_mtime", action="store_true")
parser.add_argument("--test8", help="test_create_rename_delete_unicode_files_dirs", action="store_true")
parser.add_argument("-a", "--all", help="run all tests", action="store_true")
parser.add_argument("-b", "--basic", help="run basic, stable tests", action="store_true")
parser.add_argument("-d", "--debug", help="use debug output", action="store_true")
parser.add_argument("-l", "--large", help="use large files for testing", action="store_true")
parser.add_argument("-n", "--nodelete", help="Do not delete work files", action="store_false")
parser.add_argument("-c", "--check", help="Do not check if megacli is running (useful, if other application is used for testing)", action="store_false")
parser.add_argument("upsync_dir", help="local upsync directory")
parser.add_argument("downsync_dir", help="local downsync directory")
args = parser.parse_args()
if args.debug:
lvl = logging.DEBUG
else:
lvl = logging.INFO
if args.all:
args.test1 = args.test2 = args.test3 = args.test4 = args.test5 = args.test6 = args.test7 = args.test8 = True
if args.basic:
args.test1 = args.test2 = args.test3 = args.test4 = True
logging.StreamHandler(sys.stdout)
logging.basicConfig(format='[%(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=lvl)
logging.info("")
logging.info("1) Start the first [megacli] and run the following command: sync " + args.upsync_dir + " [remote folder]")
logging.info("2) Start the second [megacli] and run the following command: sync " + args.downsync_dir + " [remote folder]")
logging.info("3) Wait for both folders get fully synced")
logging.info("4) Run: python %s", sys.argv[0])
logging.info("")
time.sleep(5)
with SyncTestMegaCliApp(args.upsync_dir, args.downsync_dir, args.nodelete, args.large, args.check) as app:
suite = unittest.TestSuite()
if args.test1:
suite.addTest(SyncTest("test_create_delete_files", app))
if args.test2:
suite.addTest(SyncTest("test_create_rename_delete_files", app))
if args.test3:
suite.addTest(SyncTest("test_create_delete_dirs", app, ))
if args.test4:
suite.addTest(SyncTest("test_create_rename_delete_dirs", app))
if args.test5:
suite.addTest(SyncTest("test_sync_files_write", app))
if args.test6:
suite.addTest(SyncTest("test_local_operations", app))
if args.test7:
suite.addTest(SyncTest("test_update_mtime", app))
if args.test8:
suite.addTest(SyncTest("test_create_rename_delete_unicode_files_dirs", app))
testRunner = xmlrunner.XMLTestRunner(output='test-reports')
testRunner.run(suite)
|
wizzard/sdk
|
tests/sync_test_megacli.py
|
Python
|
bsd-2-clause
| 5,819
|
from __future__ import print_function, absolute_import
import weakref
class PDroneCreator(object):
def __init__(self, mainwindow, clipboard, title="drones"):
self._mainwindow = mainwindow
self._clipboard = clipboard
self._subwin = mainwindow.newSubWindow(title)
from . import PTree
self._tree = PTree(self._subwin.wrapwidget(), self._select_drone)
self._subwin.setWidget(self._tree.widget())
def _select_drone(self, dronetype):
dronetype = ".".join(dronetype)
self._clipboard.set_dragboard_value("drone", dronetype)
def append(self, dronename):
key = tuple(dronename.split("."))
self._tree.append(key)
def remove(self, dronename):
key = tuple(dronename.split("."))
self._tree.remove(key)
|
agoose77/hivesystem
|
hiveguilib/PGui/PDroneCreator.py
|
Python
|
bsd-2-clause
| 807
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(name='django-darkknight',
version='0.9.0',
license="BSD",
description="He's a silent guardian, a watchful protector",
long_description=read('README.rst'),
author="Fusionbox, Inc",
author_email="programmers@fusionbox.com",
url='http://github.com/fusionbox/django-darkknight',
packages=['darkknight', 'darkknight_gpg'],
install_requires=[
'django-dotenv',
'Django>=1.5',
'pyOpenSSL',
'django-localflavor',
'django-countries',
],
extras_require = {
'gpg': ['gnupg>=2.0.2,<3', 'django-apptemplates'],
},
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Security :: Cryptography",
],
)
|
fusionbox/django-darkknight
|
setup.py
|
Python
|
bsd-2-clause
| 1,503
|
#!/usr/bin/env python
"""Dictionary-based password generator.
Usage: pass.py [options]
Options:
-h --help Show this help text
-d --dictionary=<path> Specify a non-default dictionary
-n --length=N Specify number of words to use [default: 4]
-v --verbose Print entropy estimate
--complex Bypass complexity requirements
--truncate=SIZE Truncate dictionary to specified size
--uncontrolled Generate a naively-random password from the list
The default mode ensures words are spread throughout the list, slightly
reducing absolute entropy but generally improving password memorability if the
dictionary is ordered by frequency.
"""
import math
import os
from docopt import docopt
from secrets import SystemRandom
def main():
# Normalize arguments
args = docopt(__doc__)
word_count = int(args['--length'])
# Read and transform dictionary file
if args['--dictionary']:
dict_path = args['--dictionary']
else:
dict_path = os.path.join(os.path.dirname(__file__), 'words.txt')
dictionary = [w for w in [l.strip() for l in open(dict_path)] if w]
if args['--truncate']:
dictionary = dictionary[:int(args['--truncate'])]
elif not args['--dictionary']:
# Default truncation for built-in dictionary
dictionary = dictionary[:8192]
# Basic entropy calculation
if args['--uncontrolled']:
entropy = math.log(math.pow(len(dictionary), word_count), 2)
else:
batch_size = len(dictionary) // word_count
entropy = math.log(math.pow(batch_size, word_count) *
math.factorial(word_count), 2)
if args['--verbose']:
print("Pessimistic password entropy: %.1f bits" % entropy)
print("Approximate time to crack at 20k/s: %.1f days" %
(math.pow(2, entropy) / 20000 / 60 / 60 / 24))
# Generate password
rng = SystemRandom()
if args['--uncontrolled']:
# Select random words
words = [rng.choice(dictionary) for i in range(word_count)]
else:
# Generate batches in random order
batches = [dictionary[i*batch_size:(i+1)*batch_size]
for i in range(word_count)]
rng.shuffle(batches)
# Select word from each batch
words = [rng.choice(batches[i]) for i in range(word_count)]
# Reveal to user
print(" ".join(words))
if args['--complex']:
print("Complexified: %s1." % "".join(words).capitalize())
if __name__ == '__main__':
main()
|
drakedevel/pass
|
passgen/__init__.py
|
Python
|
bsd-2-clause
| 2,565
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
PATH = os.path.dirname(os.path.abspath(__file__))
|
damonchen/chan
|
chan/core/templates/config.py
|
Python
|
bsd-2-clause
| 109
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'hyperlink15.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks.This example doesn't have any link formatting and tests the relationshiplinkage code."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url('B2', 'external:subdir/blank.xlsx')
workbook.close()
self.assertExcelEqual()
|
jkyeung/XlsxWriter
|
xlsxwriter/test/comparison/test_hyperlink15.py
|
Python
|
bsd-2-clause
| 1,235
|
"""
ROC Analysis Widget
-------------------
"""
import operator
from functools import reduce, wraps
from collections import namedtuple, deque
import numpy
import sklearn.metrics as skl_metrics
from PyQt4 import QtGui
from PyQt4.QtGui import QColor, QPen, QBrush
from PyQt4.QtCore import Qt
import pyqtgraph as pg
import Orange
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import colorpalette, colorbrewer
#: Points on a ROC curve
ROCPoints = namedtuple(
"ROCPoints",
["fpr", # (N,) array of false positive rate coordinates (ascending)
"tpr", # (N,) array of true positive rate coordinates
"thresholds" # (N,) array of thresholds (in descending order)
]
)
ROCPoints.is_valid = property(lambda self: self.fpr.size > 0)
#: ROC Curve and it's convex hull
ROCCurve = namedtuple(
"ROCCurve",
["points", # ROCPoints
"hull" # ROCPoints of the convex hull
]
)
ROCCurve.is_valid = property(lambda self: self.points.is_valid)
#: A ROC Curve averaged vertically
ROCAveragedVert = namedtuple(
"ROCAveragedVert",
["points", # ROCPoints sampled by fpr
"hull", # ROCPoints of the convex hull
"tpr_std", # array standard deviation of tpr at each fpr point
]
)
ROCAveragedVert.is_valid = property(lambda self: self.points.is_valid)
#: A ROC Curve averaged by thresholds
ROCAveragedThresh = namedtuple(
"ROCAveragedThresh",
["points", # ROCPoints sampled by threshold
"hull", # ROCPoints of the convex hull
"tpr_std", # array standard deviations of tpr at each threshold
"fpr_std" # array standard deviations of fpr at each threshold
]
)
ROCAveragedThresh.is_valid = property(lambda self: self.points.is_valid)
#: Combined data for a ROC curve of a single algorithm
ROCData = namedtuple(
"ROCData",
["merged", # ROCCurve merged over all folds
"folds", # ROCCurve list, one for each fold
"avg_vertical", # ROCAveragedVert
"avg_threshold", # ROCAveragedThresh
]
)
def ROCData_from_results(results, clf_index, target):
"""
Compute ROC Curve(s) from evaluation results.
:param Orange.evaluation.Results results:
Evaluation results.
:param int clf_index:
Learner index in the `results`.
:param int target:
Target class index (i.e. positive class).
:rval ROCData:
A instance holding the computed curves.
"""
merged = roc_curve_for_fold(results, slice(0, -1), clf_index, target)
merged_curve = ROCCurve(ROCPoints(*merged),
ROCPoints(*roc_curve_convex_hull(merged)))
folds = results.folds if results.folds is not None else [slice(0, -1)]
fold_curves = []
for fold in folds:
# TODO: Check for no FP or no TP
points = roc_curve_for_fold(results, fold, clf_index, target)
hull = roc_curve_convex_hull(points)
c = ROCCurve(ROCPoints(*points), ROCPoints(*hull))
fold_curves.append(c)
curves = [fold.points for fold in fold_curves
if fold.is_valid]
fpr, tpr, std = roc_curve_vertical_average(curves)
thresh = numpy.zeros_like(fpr) * numpy.nan
hull = roc_curve_convex_hull((fpr, tpr, thresh))
v_avg = ROCAveragedVert(
ROCPoints(fpr, tpr, thresh),
ROCPoints(*hull),
std
)
all_thresh = numpy.hstack([t for _, _, t in curves])
all_thresh = numpy.clip(all_thresh, 0.0 - 1e-10, 1.0 + 1e-10)
all_thresh = numpy.unique(all_thresh)[::-1]
thresh = all_thresh[::max(all_thresh.size // 10, 1)]
(fpr, fpr_std), (tpr, tpr_std) = \
roc_curve_threshold_average(curves, thresh)
hull = roc_curve_convex_hull((fpr, tpr, thresh))
t_avg = ROCAveragedThresh(
ROCPoints(fpr, tpr, thresh),
ROCPoints(*hull),
tpr_std,
fpr_std
)
return ROCData(merged_curve, fold_curves, v_avg, t_avg)
ROCData.from_results = staticmethod(ROCData_from_results)
#: A curve item to be displayed in a plot
PlotCurve = namedtuple(
"PlotCurve",
["curve", # ROCCurve source curve
"curve_item", # pg.PlotDataItem main curve
"hull_item" # pg.PlotDataItem curve's convex hull
]
)
def plot_curve(curve, pen=None, shadow_pen=None, symbol="+",
symbol_size=3, name=None):
"""
Construct a `PlotCurve` for the given `ROCCurve`.
:param ROCCurve curve:
Source curve.
The other parameters are passed to pg.PlotDataItem
:rtype: PlotCurve
"""
def extend_to_origin(points):
"Extend ROCPoints to include coordinate origin if not already present"
if points.tpr.size and (points.tpr[0] > 0 or points.fpr[0] > 0):
points = ROCPoints(
numpy.r_[0, points.fpr], numpy.r_[0, points.tpr],
numpy.r_[points.thresholds[0] + 1, points.thresholds]
)
return points
points = extend_to_origin(curve.points)
item = pg.PlotCurveItem(
points.fpr, points.tpr, pen=pen, shadowPen=shadow_pen,
name=name, antialias=True
)
sp = pg.ScatterPlotItem(
curve.points.fpr, curve.points.tpr, symbol=symbol,
size=symbol_size, pen=shadow_pen,
name=name
)
sp.setParentItem(item)
hull = extend_to_origin(curve.hull)
hull_item = pg.PlotDataItem(
hull.fpr, hull.tpr, pen=pen, antialias=True
)
return PlotCurve(curve, item, hull_item)
PlotCurve.from_roc_curve = staticmethod(plot_curve)
#: A curve displayed in a plot with error bars
PlotAvgCurve = namedtuple(
"PlotAvgCurve",
["curve", # ROCCurve
"curve_item", # pg.PlotDataItem
"hull_item", # pg.PlotDataItem
"confint_item", # pg.ErrorBarItem
]
)
def plot_avg_curve(curve, pen=None, shadow_pen=None, symbol="+",
symbol_size=4, name=None):
"""
Construct a `PlotAvgCurve` for the given `curve`.
:param curve: Source curve.
:type curve: ROCAveragedVert or ROCAveragedThresh
The other parameters are passed to pg.PlotDataItem
:rtype: PlotAvgCurve
"""
pc = plot_curve(curve, pen=pen, shadow_pen=shadow_pen, symbol=symbol,
symbol_size=symbol_size, name=name)
points = curve.points
if isinstance(curve, ROCAveragedVert):
tpr_std = curve.tpr_std
error_item = pg.ErrorBarItem(
x=points.fpr[1:-1], y=points.tpr[1:-1],
height=2 * tpr_std[1:-1],
pen=pen, beam=0.025,
antialias=True,
)
elif isinstance(curve, ROCAveragedThresh):
tpr_std, fpr_std = curve.tpr_std, curve.fpr_std
error_item = pg.ErrorBarItem(
x=points.fpr[1:-1], y=points.tpr[1:-1],
height=2 * tpr_std[1:-1], width=2 * fpr_std[1:-1],
pen=pen, beam=0.025,
antialias=True,
)
return PlotAvgCurve(curve, pc.curve_item, pc.hull_item, error_item)
PlotAvgCurve.from_roc_curve = staticmethod(plot_avg_curve)
Some = namedtuple("Some", ["val"])
def once(f):
"""
Return a function that will be called only once, and it's result cached.
"""
cached = None
@wraps(f)
def wraped():
nonlocal cached
if cached is None:
cached = Some(f())
return cached.val
return wraped
plot_curves = namedtuple(
"plot_curves",
["merge", # :: () -> PlotCurve
"folds", # :: () -> [PlotCurve]
"avg_vertical", # :: () -> PlotAvgCurve
"avg_threshold", # :: () -> PlotAvgCurve
]
)
class InfiniteLine(pg.InfiniteLine):
"""pyqtgraph.InfiniteLine extended to support antialiasing.
"""
def __init__(self, pos=None, angle=90, pen=None, movable=False,
bounds=None, antialias=False):
super().__init__(pos, angle, pen, movable, bounds)
self.antialias = antialias
def paint(self, painter, *args):
if self.antialias:
painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
super().paint(painter, *args)
class OWROCAnalysis(widget.OWWidget):
name = "ROC Analysis"
description = ("Displays Receiver Operating Characteristics curve " +
"based on evaluation of classifiers.")
icon = "icons/ROCAnalysis.svg"
priority = 1010
inputs = [
{"name": "Evaluation Results",
"type": Orange.evaluation.Results,
"handler": "set_results"}
]
target_index = settings.Setting(0)
selected_classifiers = []
display_perf_line = settings.Setting(True)
display_def_threshold = settings.Setting(True)
fp_cost = settings.Setting(500)
fn_cost = settings.Setting(500)
target_prior = settings.Setting(50.0)
#: ROC Averaging Types
Merge, Vertical, Threshold, NoAveraging = 0, 1, 2, 3
roc_averaging = settings.Setting(Merge)
display_convex_hull = settings.Setting(False)
display_convex_curve = settings.Setting(False)
def __init__(self, parent=None):
super().__init__(parent)
self.results = None
self.classifier_names = []
self.perf_line = None
self.colors = []
self._curve_data = {}
self._plot_curves = {}
self._rocch = None
self._perf_line = None
box = gui.widgetBox(self.controlArea, "Plot")
tbox = gui.widgetBox(box, "Target Class")
tbox.setFlat(True)
self.target_cb = gui.comboBox(
tbox, self, "target_index", callback=self._on_target_changed)
cbox = gui.widgetBox(box, "Classifiers")
cbox.setFlat(True)
self.classifiers_list_box = gui.listBox(
cbox, self, "selected_classifiers", "classifier_names",
selectionMode=QtGui.QListView.MultiSelection,
callback=self._on_classifiers_changed)
abox = gui.widgetBox(box, "Combine ROC Curves From Folds")
abox.setFlat(True)
gui.comboBox(abox, self, "roc_averaging",
items=["Merge predictions from folds", "Mean TP rate",
"Mean TP and FP at threshold", "Show individual curves"],
callback=self._replot)
hbox = gui.widgetBox(box, "ROC Convex Hull")
hbox.setFlat(True)
gui.checkBox(hbox, self, "display_convex_curve",
"Show convex ROC curves", callback=self._replot)
gui.checkBox(hbox, self, "display_convex_hull",
"Show ROC convex hull", callback=self._replot)
box = gui.widgetBox(self.controlArea, "Analysis")
gui.checkBox(box, self, "display_def_threshold",
"Default threshold (0.5) point",
callback=self._on_display_def_threshold_changed)
gui.checkBox(box, self, "display_perf_line", "Show performance line",
callback=self._on_display_perf_line_changed)
grid = QtGui.QGridLayout()
ibox = gui.indentedBox(box, orientation=grid)
sp = gui.spin(box, self, "fp_cost", 1, 1000, 10,
callback=self._on_display_perf_line_changed)
grid.addWidget(QtGui.QLabel("FP Cost"), 0, 0)
grid.addWidget(sp, 0, 1)
sp = gui.spin(box, self, "fn_cost", 1, 1000, 10,
callback=self._on_display_perf_line_changed)
grid.addWidget(QtGui.QLabel("FN Cost"))
grid.addWidget(sp, 1, 1)
sp = gui.spin(box, self, "target_prior", 1, 99,
callback=self._on_display_perf_line_changed)
sp.setSuffix("%")
sp.addAction(QtGui.QAction("Auto", sp))
grid.addWidget(QtGui.QLabel("Prior target class probability"))
grid.addWidget(sp, 2, 1)
self.plotview = pg.GraphicsView(background="w")
self.plotview.setFrameStyle(QtGui.QFrame.StyledPanel)
self.plot = pg.PlotItem()
self.plot.getViewBox().setMenuEnabled(False)
self.plot.getViewBox().setMouseEnabled(False, False)
pen = QPen(self.palette().color(QtGui.QPalette.Text))
tickfont = QtGui.QFont(self.font())
tickfont.setPixelSize(max(int(tickfont.pixelSize() * 2 // 3), 11))
axis = self.plot.getAxis("bottom")
axis.setTickFont(tickfont)
axis.setPen(pen)
axis.setLabel("FP Rate (1-Specificity)")
axis = self.plot.getAxis("left")
axis.setTickFont(tickfont)
axis.setPen(pen)
axis.setLabel("TP Rate (Sensitivity)")
self.plot.showGrid(True, True, alpha=0.1)
self.plot.setRange(xRange=(0.0, 1.0), yRange=(0.0, 1.0))
self.plotview.setCentralItem(self.plot)
self.mainArea.layout().addWidget(self.plotview)
def set_results(self, results):
"""Set the input evaluation results."""
self.clear()
self.error(0)
if results is not None:
if results.data is None:
self.error(0, "Give me data!!")
results = None
elif not isinstance(results.data.domain.class_var,
Orange.data.DiscreteVariable):
self.error(0, "Need discrete class variable")
results = None
self.results = results
if results is not None:
self._initialize(results)
self._setup_plot()
def clear(self):
"""Clear the widget state."""
self.results = None
self.plot.clear()
self.classifier_names = []
self.selected_classifiers = []
self.target_cb.clear()
self.target_index = 0
self.colors = []
self._curve_data = {}
self._plot_curves = {}
self._rocch = None
self._perf_line = None
def _initialize(self, results):
names = getattr(results, "learner_names", None)
if names is None:
names = ["#{}".format(i + 1)
for i in range(len(results.predicted))]
self.colors = colorpalette.ColorPaletteGenerator(
len(names), colorbrewer.colorSchemes["qualitative"]["Dark2"])
self.classifier_names = names
self.selected_classifiers = list(range(len(names)))
for i in range(len(names)):
listitem = self.classifiers_list_box.item(i)
listitem.setIcon(colorpalette.ColorPixmap(self.colors[i]))
class_var = results.data.domain.class_var
self.target_cb.addItems(class_var.values)
def curve_data(self, target, clf_idx):
"""Return `ROCData' for the given target and classifier."""
if (target, clf_idx) not in self._curve_data:
data = ROCData.from_results(self.results, clf_idx, target)
self._curve_data[target, clf_idx] = data
return self._curve_data[target, clf_idx]
def plot_curves(self, target, clf_idx):
"""Return a set of functions `plot_curves` generating plot curves."""
def generate_pens(basecolor):
pen = QPen(basecolor, 1)
pen.setCosmetic(True)
shadow_pen = QPen(pen.color().lighter(160), 2.5)
shadow_pen.setCosmetic(True)
return pen, shadow_pen
data = self.curve_data(target, clf_idx)
if (target, clf_idx) not in self._plot_curves:
pen, shadow_pen = generate_pens(self.colors[clf_idx])
name = self.classifier_names[clf_idx]
@once
def merged():
return plot_curve(
data.merged, pen=pen, shadow_pen=shadow_pen, name=name)
@once
def folds():
return [plot_curve(fold, pen=pen, shadow_pen=shadow_pen)
for fold in data.folds]
@once
def avg_vert():
return plot_avg_curve(data.avg_vertical, pen=pen,
shadow_pen=shadow_pen, name=name)
@once
def avg_thres():
return plot_avg_curve(data.avg_threshold, pen=pen,
shadow_pen=shadow_pen, name=name)
self._plot_curves[target, clf_idx] = plot_curves(
merge=merged, folds=folds,
avg_vertical=avg_vert, avg_threshold=avg_thres
)
return self._plot_curves[target, clf_idx]
def _setup_plot(self):
target = self.target_index
selected = self.selected_classifiers
curves = [self.plot_curves(target, i) for i in selected]
selected = [self.curve_data(target, i) for i in selected]
if self.roc_averaging == OWROCAnalysis.Merge:
for curve in curves:
graphics = curve.merge()
curve = graphics.curve
self.plot.addItem(graphics.curve_item)
if self.display_convex_curve:
self.plot.addItem(graphics.hull_item)
if self.display_def_threshold:
points = curve.points
ind = numpy.argmin(numpy.abs(points.thresholds - 0.5))
item = pg.TextItem(
text="{:.3f}".format(points.thresholds[ind]),
)
item.setPos(points.fpr[ind], points.tpr[ind])
self.plot.addItem(item)
hull_curves = [curve.merged.hull for curve in selected]
if hull_curves:
self._rocch = convex_hull(hull_curves)
iso_pen = QPen(QColor(Qt.black), 1)
iso_pen.setCosmetic(True)
self._perf_line = InfiniteLine(pen=iso_pen, antialias=True)
self.plot.addItem(self._perf_line)
elif self.roc_averaging == OWROCAnalysis.Vertical:
for curve in curves:
graphics = curve.avg_vertical()
self.plot.addItem(graphics.curve_item)
self.plot.addItem(graphics.confint_item)
hull_curves = [curve.avg_vertical.hull for curve in selected]
elif self.roc_averaging == OWROCAnalysis.Threshold:
for curve in curves:
graphics = curve.avg_threshold()
self.plot.addItem(graphics.curve_item)
self.plot.addItem(graphics.confint_item)
hull_curves = [curve.avg_threshold.hull for curve in selected]
elif self.roc_averaging == OWROCAnalysis.NoAveraging:
for curve in curves:
graphics = curve.folds()
for fold in graphics:
self.plot.addItem(fold.curve_item)
if self.display_convex_curve:
self.plot.addItem(fold.hull_item)
hull_curves = [fold.hull for curve in selected for fold in curve.folds]
if self.display_convex_hull and hull_curves:
hull = convex_hull(hull_curves)
hull_pen = QPen(QColor(200, 200, 200, 100), 2)
hull_pen.setCosmetic(True)
item = self.plot.plot(
hull.fpr, hull.tpr,
pen=hull_pen,
brush=QBrush(QColor(200, 200, 200, 50)),
fillLevel=0)
item.setZValue(-10000)
pen = QPen(QColor(100, 100, 100, 100), 1, Qt.DashLine)
pen.setCosmetic(True)
self.plot.plot([0, 1], [0, 1], pen=pen, antialias=True)
if self.roc_averaging == OWROCAnalysis.Merge:
self._update_perf_line()
def _on_target_changed(self):
self.plot.clear()
self._setup_plot()
def _on_classifiers_changed(self):
self.plot.clear()
if self.results is not None:
self._setup_plot()
def _on_display_perf_line_changed(self):
if self.roc_averaging == OWROCAnalysis.Merge:
self._update_perf_line()
if self.perf_line is not None:
self.perf_line.setVisible(self.display_perf_line)
def _on_display_def_threshold_changed(self):
self._replot()
def _replot(self):
self.plot.clear()
if self.results is not None:
self._setup_plot()
def _update_perf_line(self):
if self._perf_line is None:
return
self._perf_line.setVisible(self.display_perf_line)
if self.display_perf_line:
m = roc_iso_performance_slope(
self.fp_cost, self.fn_cost, self.target_prior / 100.0)
hull = self._rocch
ind = roc_iso_performance_line(m, hull)
angle = numpy.arctan2(m, 1) # in radians
self._perf_line.setAngle(angle * 180 / numpy.pi)
self._perf_line.setPos((hull.fpr[ind[0]], hull.tpr[ind[0]]))
def onDeleteWidget(self):
self.clear()
def interp(x, xp, fp, left=None, right=None):
"""
Like numpy.interp except for handling of running sequences of
same values in `xp`.
"""
x = numpy.asanyarray(x)
xp = numpy.asanyarray(xp)
fp = numpy.asanyarray(fp)
if xp.shape != fp.shape:
raise ValueError("xp and fp must have the same shape")
ind = numpy.searchsorted(xp, x, side="right")
fx = numpy.zeros(len(x))
under = ind == 0
over = ind == len(xp)
between = ~under & ~over
fx[under] = left if left is not None else fp[0]
fx[over] = right if right is not None else fp[-1]
if right is not None:
# Fix points exactly on the right boundary.
fx[x == xp[-1]] = fp[-1]
ind = ind[between]
df = (fp[ind] - fp[ind - 1]) / (xp[ind] - xp[ind - 1])
fx[between] = df * (x[between] - xp[ind]) + fp[ind]
return fx
def roc_curve_for_fold(res, fold, clf_idx, target):
fold_actual = res.actual[fold]
P = numpy.sum(fold_actual == target)
N = fold_actual.size - P
if P == 0 or N == 0:
# Undefined TP and FP rate
return numpy.array([]), numpy.array([]), numpy.array([])
fold_probs = res.probabilities[clf_idx][fold][:, target]
return skl_metrics.roc_curve(
fold_actual, fold_probs, pos_label=target
)
def roc_curve_vertical_average(curves, samples=10):
fpr_sample = numpy.linspace(0.0, 1.0, samples)
tpr_samples = []
for fpr, tpr, _ in curves:
tpr_samples.append(interp(fpr_sample, fpr, tpr, left=0, right=1))
tpr_samples = numpy.array(tpr_samples)
return fpr_sample, tpr_samples.mean(axis=0), tpr_samples.std(axis=0)
def roc_curve_threshold_average(curves, thresh_samples):
fpr_samples, tpr_samples = [], []
for fpr, tpr, thresh in curves:
ind = numpy.searchsorted(thresh[::-1], thresh_samples, side="left")
ind = ind[::-1]
ind = numpy.clip(ind, 0, len(thresh) - 1)
fpr_samples.append(fpr[ind])
tpr_samples.append(tpr[ind])
fpr_samples = numpy.array(fpr_samples)
tpr_samples = numpy.array(tpr_samples)
return ((fpr_samples.mean(axis=0), fpr_samples.std(axis=0)),
(tpr_samples.mean(axis=0), fpr_samples.std(axis=0)))
def roc_curve_threshold_average_interp(curves, thresh_samples):
fpr_samples, tpr_samples = [], []
for fpr, tpr, thresh in curves:
thresh = thresh[::-1]
fpr = interp(thresh_samples, thresh, fpr[::-1], left=1.0, right=0.0)
tpr = interp(thresh_samples, thresh, tpr[::-1], left=1.0, right=0.0)
fpr_samples.append(fpr)
tpr_samples.append(tpr)
fpr_samples = numpy.array(fpr_samples)
tpr_samples = numpy.array(tpr_samples)
return ((fpr_samples.mean(axis=0), fpr_samples.std(axis=0)),
(tpr_samples.mean(axis=0), fpr_samples.std(axis=0)))
roc_point = namedtuple("roc_point", ["fpr", "tpr", "threshold"])
def roc_curve_convex_hull(curve):
def slope(p1, p2):
x1, y1, _ = p1
x2, y2, _ = p2
if x1 != x2:
return (y2 - y1) / (x2 - x1)
else:
return numpy.inf
fpr, _, _ = curve
if len(fpr) <= 2:
return curve
points = map(roc_point._make, zip(*curve))
hull = deque([next(points)])
for point in points:
while True:
if len(hull) < 2:
hull.append(point)
break
else:
last = hull[-1]
if point.fpr != last.fpr and \
slope(hull[-2], last) > slope(last, point):
hull.append(point)
break
else:
hull.pop()
fpr = numpy.array([p.fpr for p in hull])
tpr = numpy.array([p.tpr for p in hull])
thres = numpy.array([p.threshold for p in hull])
return (fpr, tpr, thres)
def convex_hull(curves):
def slope(p1, p2):
x1, y1, *_ = p1
x2, y2, *_ = p2
if x1 != x2:
return (y2 - y1) / (x2 - x1)
else:
return numpy.inf
curves = [list(map(roc_point._make, zip(*curve))) for curve in curves]
merged_points = reduce(operator.iadd, curves, [])
merged_points = sorted(merged_points)
if len(merged_points) == 0:
return ROCPoints(numpy.array([]), numpy.array([]), numpy.array([]))
if len(merged_points) <= 2:
return ROCPoints._make(map(numpy.array, zip(*merged_points)))
points = iter(merged_points)
hull = deque([next(points)])
for point in points:
while True:
if len(hull) < 2:
hull.append(point)
break
else:
last = hull[-1]
if point[0] != last[0] and \
slope(hull[-2], last) > slope(last, point):
hull.append(point)
break
else:
hull.pop()
return ROCPoints._make(map(numpy.array, zip(*hull)))
def roc_iso_performance_line(slope, hull, tol=1e-5):
"""
Return the indices where a line with `slope` touches the ROC convex hull.
"""
fpr, tpr, *_ = hull
# Compute the distance of each point to a reference iso line
# going through point (0, 1). The point(s) with the minimum
# distance are our result
# y = m * x + 1
# m * x - 1y + 1 = 0
a, b, c = slope, -1, 1
dist = distance_to_line(a, b, c, fpr, tpr)
mindist = numpy.min(dist)
return numpy.flatnonzero((dist - mindist) <= tol)
def distance_to_line(a, b, c, x0, y0):
"""
Return the distance to a line ax + by + c = 0
"""
assert a != 0 or b != 0
return numpy.abs(a * x0 + b * y0 + c) / numpy.sqrt(a ** 2 + b ** 2)
def roc_iso_performance_slope(fp_cost, fn_cost, p):
assert 0 <= p <= 1
if fn_cost * p == 0:
return numpy.inf
else:
return (fp_cost * (1. - p)) / (fn_cost * p)
def main():
import gc
import sip
from PyQt4.QtGui import QApplication
from Orange.classification import (LogisticRegressionLearner, SVMLearner,
NuSVMLearner)
app = QApplication([])
w = OWROCAnalysis()
w.show()
w.raise_()
# data = Orange.data.Table("iris")
data = Orange.data.Table("ionosphere")
results = Orange.evaluation.CrossValidation(
data,
[LogisticRegressionLearner(),
LogisticRegressionLearner(penalty="l1"),
SVMLearner(probability=True),
NuSVMLearner(probability=True)],
k=5,
store_data=True,
)
results.learner_names = ["Logistic", "Logistic (L1 reg.)", "SVM", "NuSVM"]
w.set_results(results)
rval = app.exec_()
w.deleteLater()
sip.delete(w)
del w
app.processEvents()
sip.delete(app)
del app
gc.collect()
return rval
if __name__ == "__main__":
import sys
sys.exit(main())
|
qusp/orange3
|
Orange/widgets/evaluate/owrocanalysis.py
|
Python
|
bsd-2-clause
| 27,533
|
'''
Created on 21.04.2015
@author: marscher
'''
from __future__ import absolute_import
"""Miscellaneous classes/functions/etc."""
import os
import struct
import ctypes
if os.name != 'nt':
import fcntl
import termios
else:
import ctypes.wintypes
DEFAULT_TERMINAL_WIDTH = None
class _WindowsCSBI(object):
"""Interfaces with Windows CONSOLE_SCREEN_BUFFER_INFO API/DLL calls. Gets info for stderr and stdout.
References:
https://code.google.com/p/colorama/issues/detail?id=47.
pytest's py project: py/_io/terminalwriter.py.
Class variables:
CSBI -- ConsoleScreenBufferInfo class/struct (not instance, the class definition itself) defined in _define_csbi().
HANDLE_STDERR -- GetStdHandle() return integer for stderr.
HANDLE_STDOUT -- GetStdHandle() return integer for stdout.
WINDLL -- my own loaded instance of ctypes.WinDLL.
"""
CSBI = None
HANDLE_STDERR = None
HANDLE_STDOUT = None
WINDLL = ctypes.LibraryLoader(getattr(ctypes, 'WinDLL', None))
@staticmethod
def _define_csbi():
"""Defines structs and populates _WindowsCSBI.CSBI."""
if _WindowsCSBI.CSBI is not None:
return
class COORD(ctypes.Structure):
"""Windows COORD structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119"""
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class SmallRECT(ctypes.Structure):
"""Windows SMALL_RECT structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311"""
_fields_ = [('Left', ctypes.c_short), ('Top', ctypes.c_short), ('Right', ctypes.c_short),
('Bottom', ctypes.c_short)]
class ConsoleScreenBufferInfo(ctypes.Structure):
"""Windows CONSOLE_SCREEN_BUFFER_INFO structure.
http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093
"""
_fields_ = [
('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', ctypes.wintypes.WORD),
('srWindow', SmallRECT),
('dwMaximumWindowSize', COORD)
]
_WindowsCSBI.CSBI = ConsoleScreenBufferInfo
@staticmethod
def initialize():
"""Initializes the WINDLL resource and populated the CSBI class variable."""
_WindowsCSBI._define_csbi()
_WindowsCSBI.HANDLE_STDERR = _WindowsCSBI.HANDLE_STDERR or _WindowsCSBI.WINDLL.kernel32.GetStdHandle(-12)
_WindowsCSBI.HANDLE_STDOUT = _WindowsCSBI.HANDLE_STDOUT or _WindowsCSBI.WINDLL.kernel32.GetStdHandle(-11)
if _WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.argtypes:
return
_WindowsCSBI.WINDLL.kernel32.GetStdHandle.argtypes = [ctypes.wintypes.DWORD]
_WindowsCSBI.WINDLL.kernel32.GetStdHandle.restype = ctypes.wintypes.HANDLE
_WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.restype = ctypes.wintypes.BOOL
_WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.argtypes = [
ctypes.wintypes.HANDLE, ctypes.POINTER(_WindowsCSBI.CSBI)
]
@staticmethod
def get_info(handle):
"""Get information about this current console window (for Microsoft Windows only).
Raises IOError if attempt to get information fails (if there is no console window).
Don't forget to call _WindowsCSBI.initialize() once in your application before calling this method.
Positional arguments:
handle -- either _WindowsCSBI.HANDLE_STDERR or _WindowsCSBI.HANDLE_STDOUT.
Returns:
Dictionary with different integer values. Keys are:
buffer_width -- width of the buffer (Screen Buffer Size in cmd.exe layout tab).
buffer_height -- height of the buffer (Screen Buffer Size in cmd.exe layout tab).
terminal_width -- width of the terminal window.
terminal_height -- height of the terminal window.
bg_color -- current background color (http://msdn.microsoft.com/en-us/library/windows/desktop/ms682088).
fg_color -- current text color code.
"""
# Query Win32 API.
csbi = _WindowsCSBI.CSBI()
try:
if not _WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo(handle, ctypes.byref(csbi)):
raise IOError('Unable to get console screen buffer info from win32 API.')
except ctypes.ArgumentError:
raise IOError('Unable to get console screen buffer info from win32 API.')
# Parse data.
result = dict(
buffer_width=int(csbi.dwSize.X - 1),
buffer_height=int(csbi.dwSize.Y),
terminal_width=int(csbi.srWindow.Right - csbi.srWindow.Left),
terminal_height=int(csbi.srWindow.Bottom - csbi.srWindow.Top),
bg_color=int(csbi.wAttributes & 240),
fg_color=int(csbi.wAttributes % 16),
)
return result
def terminal_width():
"""Returns the terminal's width (number of character columns)."""
try:
if os.name == 'nt':
_WindowsCSBI.initialize()
return _WindowsCSBI.get_info(_WindowsCSBI.HANDLE_STDOUT)['terminal_width']
return struct.unpack('hhhh', fcntl.ioctl(0, termios.TIOCGWINSZ, '\000' * 8))[1]
except IOError:
return 80
|
trendelkampschroer/PyEMMA
|
pyemma/_base/progress/bar/misc.py
|
Python
|
bsd-2-clause
| 5,371
|
#!/usr/bin/env python
import functools
import itertools
import contextlib
import weakref
import logging
l = logging.getLogger("angr.sim_state")
import claripy
import ana
from archinfo import arch_from_id
from .misc.ux import deprecated
def arch_overrideable(f):
@functools.wraps(f)
def wrapped_f(self, *args, **kwargs):
if hasattr(self.arch, f.__name__):
arch_f = getattr(self.arch, f.__name__)
return arch_f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return wrapped_f
from .state_plugins import default_plugins
# This is a counter for the state-merging symbolic variables
merge_counter = itertools.count()
class SimState(ana.Storable): # pylint: disable=R0904
"""
The SimState represents the state of a program, including its memory, registers, and so forth.
:ivar regs: A convenient view of the state's registers, where each register is a property
:ivar mem: A convenient view of the state's memory, a :class:`angr.state_plugins.view.SimMemView`
:ivar registers: The state's register file as a flat memory region
:ivar memory: The state's memory as a flat memory region
:ivar solver: The symbolic solver and variable manager for this state
:ivar inspect: The breakpoint manager, a :class:`angr.state_plugins.inspect.SimInspector`
:ivar log: Information about the state's history
:ivar scratch: Information about the current execution step
:ivar posix: MISNOMER: information about the operating system or environment model
:ivar libc: Information about the standard library we are emulating
:ivar cgc: Information about the cgc environment
:ivar uc_manager: Control of under-constrained symbolic execution
:ivar unicorn: Control of the Unicorn Engine
"""
def __init__(self, project=None, arch=None, plugins=None, memory_backer=None, permissions_backer=None, mode=None, options=None,
add_options=None, remove_options=None, special_memory_filler=None, os_name=None):
self.project = project
self.arch = arch if arch is not None else project.arch.copy() if project is not None else None
if type(self.arch) is str:
self.arch = arch_from_id(self.arch)
# the options
if options is None:
if mode is None:
l.warning("SimState defaulting to symbolic mode.")
mode = "symbolic"
options = o.modes[mode]
options = set(options)
if add_options is not None:
options |= add_options
if remove_options is not None:
options -= remove_options
self.options = options
self.mode = mode
# plugins
self.plugins = { }
if plugins is not None:
for n,p in plugins.iteritems():
self.register_plugin(n, p)
if not self.has_plugin('memory'):
# we don't set the memory endness because, unlike registers, it's hard to understand
# which endness the data should be read
if o.ABSTRACT_MEMORY in self.options:
# We use SimAbstractMemory in static mode
# Convert memory_backer into 'global' region
if memory_backer is not None:
memory_backer = {'global': memory_backer}
# TODO: support permissions backer in SimAbstractMemory
self.register_plugin('memory', SimAbstractMemory(memory_backer=memory_backer, memory_id="mem"))
elif o.FAST_MEMORY in self.options:
self.register_plugin('memory', SimFastMemory(memory_backer=memory_backer, memory_id="mem"))
else:
self.register_plugin('memory', SimSymbolicMemory(memory_backer=memory_backer, permissions_backer=permissions_backer, memory_id="mem"))
if not self.has_plugin('registers'):
if o.FAST_REGISTERS in self.options:
self.register_plugin('registers', SimFastMemory(memory_id="reg", endness=self.arch.register_endness))
else:
self.register_plugin('registers', SimSymbolicMemory(memory_id="reg", endness=self.arch.register_endness))
# OS name
self.os_name = os_name
# This is used in static mode as we don't have any constraints there
self._satisfiable = True
# states are big, so let's give them UUIDs for ANA right away to avoid
# extra pickling
self.make_uuid()
self.uninitialized_access_handler = None
self._special_memory_filler = special_memory_filler
# this is a global condition, applied to all added constraints, memory reads, etc
self._global_condition = None
self.ip_constraints = []
def _ana_getstate(self):
s = dict(ana.Storable._ana_getstate(self))
s['plugins'] = { k:v for k,v in s['plugins'].iteritems() if k not in ('inspector', 'regs', 'mem') }
return s
def _ana_setstate(self, s):
ana.Storable._ana_setstate(self, s)
for p in self.plugins.values():
p.set_state(self._get_weakref() if not isinstance(p, SimAbstractMemory) else self)
if p.STRONGREF_STATE:
p.set_strongref_state(self)
def _get_weakref(self):
return weakref.proxy(self)
def _get_strongref(self):
return self
def __repr__(self):
try:
ip_str = "%#x" % self.addr
except (SimValueError, SimSolverModeError):
ip_str = repr(self.regs.ip)
return "<SimState @ %s>" % ip_str
#
# Easier access to some properties
#
@property
def ip(self):
"""
Get the instruction pointer expression, trigger SimInspect breakpoints, and generate SimActions.
Use ``_ip`` to not trigger breakpoints or generate actions.
:return: an expression
"""
return self.regs.ip
@ip.setter
def ip(self, val):
self.regs.ip = val
@property
def _ip(self):
"""
Get the instruction pointer expression without triggering SimInspect breakpoints or generating SimActions.
:return: an expression
"""
return self.regs._ip
@_ip.setter
def _ip(self, val):
"""
Set the instruction pointer without triggering SimInspect breakpoints or generating SimActions.
:param val: The new instruction pointer.
:return: None
"""
self.regs._ip = val
@property
def addr(self):
"""
Get the concrete address of the instruction pointer, without triggering SimInspect breakpoints or generating
SimActions. An integer is returned, or an exception is raised if the instruction pointer is symbolic.
:return: an int
"""
return self.se.eval_one(self.regs._ip)
#
# Plugin accessors
#
def __getattr__(self, v):
try:
return self.get_plugin(v)
except KeyError:
raise AttributeError(v)
@property
def memory(self):
return self.get_plugin('memory')
@property
def registers(self):
return self.get_plugin('registers')
@property
def se(self):
return self.get_plugin('solver_engine')
@property
def solver(self):
return self.get_plugin('solver_engine')
@property
def inspect(self):
return self.get_plugin('inspector')
@property
def log(self):
return self.get_plugin('log')
@property
def scratch(self):
return self.get_plugin('scratch')
@property
def history(self):
return self.get_plugin('history')
@property
def posix(self):
return self.get_plugin('posix')
@property
def libc(self):
return self.get_plugin('libc')
@property
def cgc(self):
return self.get_plugin('cgc')
@property
def regs(self):
return self.get_plugin('regs')
@property
def mem(self):
return self.get_plugin('mem')
@property
def gdb(self):
return self.get_plugin('gdb')
@property
def globals(self):
return self.get_plugin('globals')
@property
def uc_manager(self):
return self.get_plugin('uc_manager')
@property
def unicorn(self):
return self.get_plugin('unicorn')
@property
def preconstrainer(self):
return self.get_plugin('preconstrainer')
@property
def callstack(self):
return self.get_plugin('callstack')
def _inspect(self, *args, **kwargs):
if self.has_plugin('inspector'):
self.inspect.action(*args, **kwargs)
def _inspect_getattr(self, attr, default_value):
if self.has_plugin('inspector'):
if hasattr(self.inspect, attr):
return getattr(self.inspect, attr)
return default_value
#
# Plugins
#
def has_plugin(self, name):
return name in self.plugins
def get_plugin(self, name):
if name not in self.plugins:
p = default_plugins[name]()
self.register_plugin(name, p)
return p
return self.plugins[name]
def register_plugin(self, name, plugin):
#l.debug("Adding plugin %s of type %s", name, plugin.__class__.__name__)
plugin.set_state(self._get_weakref() if not isinstance(plugin, SimAbstractMemory) else self)
if plugin.STRONGREF_STATE:
plugin.set_strongref_state(self)
self.plugins[name] = plugin
plugin.init_state()
return plugin
def release_plugin(self, name):
if name in self.plugins:
del self.plugins[name]
#
# Constraint pass-throughs
#
def simplify(self, *args):
"""
Simplify this state's constraints.
"""
return self.se.simplify(*args)
def add_constraints(self, *args, **kwargs):
"""
Add some constraints to the state.
You may pass in any number of symbolic booleans as variadic positional arguments.
"""
if len(args) > 0 and isinstance(args[0], (list, tuple)):
raise Exception("Tuple or list passed to add_constraints!")
if o.TRACK_CONSTRAINTS in self.options and len(args) > 0:
if o.SIMPLIFY_CONSTRAINTS in self.options:
constraints = [ self.simplify(a) for a in args ]
else:
constraints = args
self._inspect('constraints', BP_BEFORE, added_constraints=constraints)
constraints = self._inspect_getattr("added_constraints", constraints)
added = self.se.add(*constraints)
self._inspect('constraints', BP_AFTER)
# add actions for the added constraints
if o.TRACK_CONSTRAINT_ACTIONS in self.options:
for c in added:
sac = SimActionConstraint(self, c)
self.history.add_action(sac)
else:
# preserve the old action logic for when we don't track constraints (why?)
if (
'action' in kwargs and kwargs['action'] and
o.TRACK_CONSTRAINT_ACTIONS in self.options and len(args) > 0
):
for arg in args:
if self.se.symbolic(arg):
sac = SimActionConstraint(self, arg)
self.history.add_action(sac)
if o.ABSTRACT_SOLVER in self.options and len(args) > 0:
for arg in args:
if self.se.is_false(arg):
self._satisfiable = False
return
if self.se.is_true(arg):
continue
# `is_true` and `is_false` does not use VSABackend currently (see commits 97a75366 and 2dfba73e in
# claripy). There is a chance that VSA backend can in fact handle it.
# Therefore we try to resolve it with VSABackend again
if claripy.backends.vsa.is_false(arg):
self._satisfiable = False
return
if claripy.backends.vsa.is_true(arg):
continue
# It's neither True or False. Let's try to apply the condition
# We take the argument, extract a list of constrained SIs out of it (if we could, of course), and
# then replace each original SI the intersection of original SI and the constrained one.
_, converted = self.se.constraint_to_si(arg)
for original_expr, constrained_si in converted:
if not original_expr.variables:
l.error('Incorrect original_expression to replace in add_constraints(). ' +
'This is due to defects in VSA logics inside claripy. Please report ' +
'to Fish and he will fix it if he\'s free.')
continue
new_expr = constrained_si
self.registers.replace_all(original_expr, new_expr)
for _, region in self.memory.regions.items():
region.memory.replace_all(original_expr, new_expr)
l.debug("SimState.add_constraints: Applied to final state.")
elif o.SYMBOLIC not in self.options and len(args) > 0:
for arg in args:
if self.se.is_false(arg):
self._satisfiable = False
return
def satisfiable(self, **kwargs):
"""
Whether the state's constraints are satisfiable
"""
if o.ABSTRACT_SOLVER in self.options or o.SYMBOLIC not in self.options:
extra_constraints = kwargs.pop('extra_constraints', ())
for e in extra_constraints:
if self.se.is_false(e):
return False
return self._satisfiable
else:
return self.se.satisfiable(**kwargs)
def downsize(self):
"""
Clean up after the solver engine. Calling this when a state no longer needs to be solved on will reduce memory
usage.
"""
if 'solver_engine' in self.plugins:
self.se.downsize()
#
# State branching operations
#
def step(self, **kwargs):
"""
Perform a step of symbolic execution using this state.
Any arguments to `AngrObjectFactory.successors` can be passed to this.
:return: A SimSuccessors object categorizing the results of the step.
"""
return self.project.factory.successors(self, **kwargs)
def block(self, *args, **kwargs):
"""
Represent the basic block at this state's instruction pointer.
Any arguments to `AngrObjectFactory.block` can ba passed to this.
:return: A Block object describing the basic block of code at this point.
"""
if not args and 'addr' not in kwargs:
kwargs['addr'] = self.addr
return self.project.factory.block(*args, backup_state=self, **kwargs)
# Returns a dict that is a copy of all the state's plugins
def _copy_plugins(self):
memo = {}
out = {}
for n, p in self.plugins.iteritems():
if id(p) in memo:
out[n] = memo[id(p)]
else:
out[n] = p.copy()
memo[id(p)] = out[n]
return out
def copy(self):
"""
Returns a copy of the state.
"""
if self._global_condition is not None:
raise SimStateError("global condition was not cleared before state.copy().")
c_plugins = self._copy_plugins()
state = SimState(project=self.project, arch=self.arch, plugins=c_plugins, options=self.options, mode=self.mode, os_name=self.os_name)
state.uninitialized_access_handler = self.uninitialized_access_handler
state._special_memory_filler = self._special_memory_filler
state.ip_constraints = self.ip_constraints
return state
def merge(self, *others, **kwargs):
"""
Merges this state with the other states. Returns the merging result, merged state, and the merge flag.
:param states: the states to merge
:param merge_conditions: a tuple of the conditions under which each state holds
:param common_ancestor: a state that represents the common history between the states being merged. Usually it
is only available when EFFICIENT_STATE_MERGING is enabled, otherwise weak-refed states
might be dropped from state history instances.
:param plugin_whitelist: a list of plugin names that will be merged. If this option is given and is not None,
any plugin that is not inside this list will not be merged, and will be created as a
fresh instance in the new state.
:param common_ancestor_history:
a SimStateHistory instance that represents the common history between the states being
merged. This is to allow optimal state merging when EFFICIENT_STATE_MERGING is
disabled.
:return: (merged state, merge flag, a bool indicating if any merging occured)
"""
merge_conditions = kwargs.pop('merge_conditions', None)
common_ancestor = kwargs.pop('common_ancestor', None)
plugin_whitelist = kwargs.pop('plugin_whitelist', None)
common_ancestor_history = kwargs.pop('common_ancestor_history', None)
if len(kwargs) != 0:
raise ValueError("invalid arguments: %s" % kwargs.keys())
if merge_conditions is None:
# TODO: maybe make the length of this smaller? Maybe: math.ceil(math.log(len(others)+1, 2))
merge_flag = self.se.BVS("state_merge_%d" % merge_counter.next(), 16)
merge_values = range(len(others)+1)
merge_conditions = [ merge_flag == b for b in merge_values ]
else:
merge_conditions = [
(self.se.true if len(mc) == 0 else self.se.And(*mc)) for mc in merge_conditions
]
if len(set(o.arch.name for o in others)) != 1:
raise SimMergeError("Unable to merge due to different architectures.")
all_plugins = set(self.plugins.keys()) | set.union(*(set(o.plugins.keys()) for o in others))
if plugin_whitelist is not None:
all_plugins = all_plugins.intersection(set(plugin_whitelist))
merged = self.copy()
merging_occurred = False
# fix parent
merged.history.parent = self.history
# plugins
for p in all_plugins:
our_plugin = merged.plugins[p] if p in merged.plugins else None
their_plugins = [ (pl.plugins[p] if p in pl.plugins else None) for pl in others ]
plugin_classes = (
set([our_plugin.__class__]) | set(pl.__class__ for pl in their_plugins)
) - set([None.__class__])
if len(plugin_classes) != 1:
raise SimMergeError(
"There are differing plugin classes (%s) for plugin %s" % (plugin_classes, p)
)
plugin_class = plugin_classes.pop()
our_filled_plugin = our_plugin if our_plugin is not None else merged.register_plugin(
p, plugin_class()
)
their_filled_plugins = [
(tp if tp is not None else t.register_plugin(p, plugin_class()))
for t,tp in zip(others, their_plugins)
]
plugin_common_ancestor = (
common_ancestor.plugins[p] if
(common_ancestor is not None and p in common_ancestor.plugins) else
None
)
if plugin_common_ancestor is None and \
plugin_class is SimStateHistory and \
common_ancestor_history is not None:
plugin_common_ancestor = common_ancestor_history
plugin_state_merged = our_filled_plugin.merge(
their_filled_plugins, merge_conditions, common_ancestor=plugin_common_ancestor,
)
if plugin_state_merged:
l.debug('Merging occurred in %s', p)
merging_occurred = True
merged.add_constraints(merged.se.Or(*merge_conditions))
return merged, merge_conditions, merging_occurred
def widen(self, *others):
"""
Perform a widening between self and other states
:param others:
:return:
"""
if len(set(frozenset(o.plugins.keys()) for o in others)) != 1:
raise SimMergeError("Unable to widen due to different sets of plugins.")
if len(set(o.arch.name for o in others)) != 1:
raise SimMergeError("Unable to widen due to different architectures.")
widened = self.copy()
widening_occurred = False
# plugins
for p in self.plugins:
if p in ('solver_engine', 'unicorn'):
continue
plugin_state_widened = widened.plugins[p].widen([_.plugins[p] for _ in others])
if plugin_state_widened:
l.debug('Widening occured in %s', p)
widening_occurred = True
return widened, widening_occurred
#############################################
### Accessors for tmps, registers, memory ###
#############################################
def reg_concrete(self, *args, **kwargs):
"""
Returns the contents of a register but, if that register is symbolic,
raises a SimValueError.
"""
e = self.registers.load(*args, **kwargs)
if self.se.symbolic(e):
raise SimValueError("target of reg_concrete is symbolic!")
return self.se.eval(e)
def mem_concrete(self, *args, **kwargs):
"""
Returns the contents of a memory but, if the contents are symbolic,
raises a SimValueError.
"""
e = self.memory.load(*args, **kwargs)
if self.se.symbolic(e):
raise SimValueError("target of mem_concrete is symbolic!")
return self.se.eval(e)
###############################
### Stack operation helpers ###
###############################
@arch_overrideable
def stack_push(self, thing):
"""
Push 'thing' to the stack, writing the thing to memory and adjusting the stack pointer.
"""
# increment sp
sp = self.regs.sp + self.arch.stack_change
self.regs.sp = sp
return self.memory.store(sp, thing, endness=self.arch.memory_endness)
@arch_overrideable
def stack_pop(self):
"""
Pops from the stack and returns the popped thing. The length will be the architecture word size.
"""
sp = self.regs.sp
self.regs.sp = sp - self.arch.stack_change
return self.memory.load(sp, self.arch.bits / 8, endness=self.arch.memory_endness)
@arch_overrideable
def stack_read(self, offset, length, bp=False):
"""
Reads length bytes, at an offset into the stack.
:param offset: The offset from the stack pointer.
:param length: The number of bytes to read.
:param bp: If True, offset from the BP instead of the SP. Default: False.
"""
sp = self.regs.bp if bp else self.regs.sp
return self.memory.load(sp+offset, length, endness=self.arch.memory_endness)
###############################
### Other helpful functions ###
###############################
def make_concrete_int(self, expr):
if isinstance(expr, (int, long)):
return expr
if not self.se.symbolic(expr):
return self.se.eval(expr)
v = self.se.eval(expr)
self.add_constraints(expr == v)
return v
# This handles the preparation of concrete function launches from abstract functions.
@arch_overrideable
def prepare_callsite(self, retval, args, cc='wtf'):
#TODO
pass
def _stack_values_to_string(self, stack_values):
"""
Convert each stack value to a string
:param stack_values: A list of values
:return: The converted string
"""
strings = [ ]
for stack_value in stack_values:
if self.se.symbolic(stack_value):
concretized_value = "SYMBOLIC - %s" % repr(stack_value)
else:
if len(self.se.eval_upto(stack_value, 2)) == 2:
concretized_value = repr(stack_value)
else:
concretized_value = repr(stack_value)
strings.append(concretized_value)
return " .. ".join(strings)
def dbg_print_stack(self, depth=None, sp=None):
"""
Only used for debugging purposes.
Return the current stack info in formatted string. If depth is None, the
current stack frame (from sp to bp) will be printed out.
"""
var_size = self.arch.bits / 8
sp_sim = self.regs._sp
bp_sim = self.regs._bp
if self.se.symbolic(sp_sim) and sp is None:
result = "SP is SYMBOLIC"
elif self.se.symbolic(bp_sim) and depth is None:
result = "BP is SYMBOLIC"
else:
sp_value = sp if sp is not None else self.se.eval(sp_sim)
if self.se.symbolic(bp_sim):
result = "SP = 0x%08x, BP is symbolic\n" % (sp_value)
bp_value = None
else:
bp_value = self.se.eval(bp_sim)
result = "SP = 0x%08x, BP = 0x%08x\n" % (sp_value, bp_value)
if depth is None:
# bp_value cannot be None here
depth = (bp_value - sp_value) / var_size + 1 # Print one more value
pointer_value = sp_value
for i in xrange(depth):
# For AbstractMemory, we wanna utilize more information from VSA
stack_values = [ ]
if o.ABSTRACT_MEMORY in self.options:
sp = self.regs._sp
segment_sizes = self.memory.get_segments(sp + i * var_size, var_size)
pos = i * var_size
for segment_size in segment_sizes:
stack_values.append(self.stack_read(pos, segment_size, bp=False))
pos += segment_size
else:
stack_values.append(self.stack_read(i * var_size, var_size, bp=False))
# Convert it into a big string!
val = self._stack_values_to_string(stack_values)
if pointer_value == sp_value:
line = "(sp)% 16x | %s" % (pointer_value, val)
elif pointer_value == bp_value:
line = "(bp)% 16x | %s" % (pointer_value, val)
else:
line = "% 20x | %s" % (pointer_value, val)
pointer_value += var_size
result += line + "\n"
return result
#
# Other helper methods
#
def set_mode(self, mode):
self.mode = mode
self.options = set(o.modes[mode])
@property
def thumb(self):
if not self.arch.name.startswith('ARM'):
return False
if self.regs.ip.symbolic:
# return True when IP can *only* be odd
new_state = self.copy()
new_state.add_constraints(new_state.regs.ip % 2 == 1, new_state.regs.ip % 2 != 0)
return new_state.satisfiable()
else:
concrete_ip = self.se.eval(self.regs.ip)
return concrete_ip % 2 == 1
#
# Some pretty fancy global condition stuff!
#
@property
def with_condition(self):
@contextlib.contextmanager
def ctx(c):
old_condition = self._global_condition
try:
new_condition = c if old_condition is None else self.se.And(old_condition, c)
self._global_condition = new_condition
yield
finally:
self._global_condition = old_condition
return ctx
def _adjust_condition(self, c):
if self._global_condition is None:
return c
elif c is None:
return self._global_condition
else:
return self.se.And(self._global_condition, c)
def _adjust_condition_list(self, conditions):
if self._global_condition is None:
return conditions
elif len(conditions) == 0:
return conditions.__class__((self._global_condition,))
else:
return conditions.__class__((self._adjust_condition(self.se.And(*conditions)),))
#
# Compatibility layer
#
@property
def state(self):
return self
@property
def length(self):
return self.history.block_count
@property
def jumpkind(self):
return self.scratch.jumpkind
@property
def last_actions(self):
return self.history.recent_actions
@property
def history_iterator(self):
return self.history.lineage
@property
def addr_trace(self):
return self.history.addr_trace
@property
def trace(self):
return self.history.trace
@property
def targets(self):
return self.history.jump_targets
@property
def guards(self):
return self.history.jump_guards
@property
def jumpkinds(self):
return self.history.jumpkinds
@property
def events(self):
return self.history.events
@property
def actions(self):
return self.history.actions
@property
def reachable(self):
return self.history.reachable()
@deprecated
def trim_history(self):
self.history.trim()
from .state_plugins.symbolic_memory import SimSymbolicMemory
from .state_plugins.fast_memory import SimFastMemory
from .state_plugins.abstract_memory import SimAbstractMemory
from .state_plugins.history import SimStateHistory
from .errors import SimMergeError, SimValueError, SimStateError, SimSolverModeError
from .state_plugins.inspect import BP_AFTER, BP_BEFORE
from .state_plugins.sim_action import SimActionConstraint
from . import sim_options as o
|
f-prettyland/angr
|
angr/sim_state.py
|
Python
|
bsd-2-clause
| 30,608
|
import calendar
from django.template import Library
from molo.core.templatetags.core_tags import load_sections
from molo.profiles.models import UserProfilesSettings
from nurseconnect.utils import get_survey_results_for_user
register = Library()
@register.filter('fieldtype')
def fieldtype(field):
return field.field.widget.__class__.__name__
@register.inclusion_tag("core/tags/footerlink.html", takes_context=True)
def footer_link(context, id):
request = context["request"]
locale = context.get("locale_code")
terms = UserProfilesSettings.for_site(request.site).terms_and_conditions
return {
"id": id,
"terms": terms,
"request": context["request"],
"locale_code": locale,
}
@register.inclusion_tag(
"core/tags/section_listing_menu.html",
takes_context=True
)
def section_listing_menu(context):
locale_code = context.get("locale_code")
return {
"sections": load_sections(context),
"request": context["request"],
"locale_code": locale_code,
}
@register.assignment_tag()
def convert_month(value):
if value:
return calendar.month_name[value]
else:
return ""
@register.assignment_tag()
def get_next_article(page):
if page.get_next_sibling():
return page.get_next_sibling().specific
return None
@register.inclusion_tag("surveys/embedded_survey.html",
takes_context=True)
def embedded_survey_tag(context, page):
'''
Display the child survey of a page
If a user has not submitted they will see the survey form
If a user has already submitted an answer they see their results
NOTE: This currently only works for Radio Buttons with True/False
and uses a hack where data stored in the survey thank you text will
store true, false string values seperated by commas. I apologise
if you are responsible for maintaining this in the future.
'''
user = context['request'].user
survey = page.get_children().first().specific
survey_results = get_survey_results_for_user(survey, user)
if survey_results:
if page.get_next_sibling():
next_article = page.get_next_sibling().specific
else:
next_article = None
return {
"survey_answered": True,
"answers": survey_results,
"next_article": next_article,
}
else:
return {
"survey_answered": False,
"survey": survey
}
|
praekelt/nurseconnect
|
nurseconnect/templatetags/nurseconnect_tags.py
|
Python
|
bsd-2-clause
| 2,516
|
#!/usr/bin/env python
# encoding: utf-8
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="cubehelix",
version="0.1.0",
author="James Davenport",
# author_email="",
description="Cubehelix colormaps for matplotlib",
long_description=read('README.md'),
# license="BSD",
py_modules=['cubehelix'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Scientific/Engineering :: Visualization",
# "License :: OSI Approved :: BSD License",
]
)
|
jradavenport/cubehelix
|
setup.py
|
Python
|
bsd-2-clause
| 605
|
import unittest
from petsc4py import PETSc
# --------------------------------------------------------------------
class TestVersion(unittest.TestCase):
def testGetVersion(self):
version = PETSc.Sys.getVersion()
self.assertTrue(version > (0, 0, 0))
v, patch = PETSc.Sys.getVersion(patch=True)
self.assertTrue(version == v)
self.assertTrue(patch >= 0)
v, date = PETSc.Sys.getVersion(date=True)
self.assertTrue(version == v)
self.assertTrue(isinstance(date, str))
v, author = PETSc.Sys.getVersion(author=True)
self.assertTrue(version == v)
self.assertTrue(isinstance(author, (list,tuple)))
def testGetVersionInfo(self):
version = PETSc.Sys.getVersion()
info = PETSc.Sys.getVersionInfo()
self.assertEqual(version,
(info['major'],
info['minor'],
info['subminor'],))
self.assertTrue(isinstance(info['release'], bool))
_, patch = PETSc.Sys.getVersion(patch=True)
self.assertEqual(patch, info['patch'])
v, date = PETSc.Sys.getVersion(date=True)
self.assertEqual(date, info['date'])
def testGetSetDefaultComm(self):
c = PETSc.Sys.getDefaultComm()
self.assertEqual(c, PETSc.COMM_WORLD)
PETSc.Sys.setDefaultComm(PETSc.COMM_SELF)
c = PETSc.Sys.getDefaultComm()
self.assertEqual(c, PETSc.COMM_SELF)
PETSc.Sys.setDefaultComm(PETSc.COMM_WORLD)
c = PETSc.Sys.getDefaultComm()
self.assertEqual(c, PETSc.COMM_WORLD)
f = lambda : PETSc.Sys.setDefaultComm(PETSc.COMM_NULL)
self.assertRaises(ValueError, f)
# --------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
# --------------------------------------------------------------------
|
zonca/petsc4py
|
test/test_sys.py
|
Python
|
bsd-2-clause
| 1,904
|
#! /usr/bin/env python
# ______________________________________________________________________
'''test_filter2d
Test the filter2d() example from the PyCon'12 slide deck.
'''
# ______________________________________________________________________
import numpy
from numba import *
from numba.decorators import jit
import sys
import unittest
# ______________________________________________________________________
def filter2d(image, filt):
M, N = image.shape
Mf, Nf = filt.shape
Mf2 = Mf // 2
Nf2 = Nf // 2
result = numpy.zeros_like(image)
for i in range(Mf2, M - Mf2):
for j in range(Nf2, N - Nf2):
num = 0.0
for ii in range(Mf):
for jj in range(Nf):
num += (filt[Mf-1-ii, Nf-1-jj] * image[i-Mf2+ii, j-Nf2+jj])
result[i, j] = num
return result
# ______________________________________________________________________
class TestFilter2d(unittest.TestCase):
def test_vectorized_filter2d(self):
ufilter2d = jit(argtypes=[double[:,:], double[:,:]],
restype=double[:,:])(filter2d)
image = numpy.random.random((50, 50))
filt = numpy.random.random((5, 5))
filt /= filt.sum()
plain_old_result = filter2d(image, filt)
hot_new_result = ufilter2d(image, filt)
self.assertTrue((abs(plain_old_result - hot_new_result) < 1e-9).all())
# ______________________________________________________________________
@autojit
def func():
return numpy.empty(10)
if __name__ == "__main__":
# func()
# TestFilter2d('test_vectorized_filter2d').debug()
unittest.main(*sys.argv[1:])
# ______________________________________________________________________
# End of test_filter2d.py
|
shiquanwang/numba
|
numba/tests/test_filter2d.py
|
Python
|
bsd-2-clause
| 1,782
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import zmq
if __name__ == '__main__':
ctx = zmq.Context()
sinker = ctx.socket(zmq.PULL)
sinker.bind('tcp://*:6666')
print 'sender server init success ...'
msg = sinker.recv()
print '\t%s' % msg
while True:
try:
msg = sinker.recv()
print msg
except KeyboardInterrupt:
break
sinker.close()
|
ASMlover/study
|
zeroMQ/python/push-pull/sinker.py
|
Python
|
bsd-2-clause
| 1,726
|
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import Authorization
from tastypie.resources import NamespacedModelResource, fields, ALL, ALL_WITH_RELATIONS
from django.contrib.auth.models import User #BUG: Import the correct user object from settings.py
from .models import Incident, Status
import logging
logger = logging.getLogger(__name__)
class ReadOnlyFieldNamespacedModelResource(NamespacedModelResource):
""" Allows you to add a 'readonly_fields' setting on a ModelResource """
def __init__(self, **kwargs):
super(ReadOnlyFieldNamespacedModelResource, self).__init__(**kwargs)
for fld in getattr(self.Meta, 'readonly_fields', []):
self.fields[fld].readonly = True
class StatusResource(ReadOnlyFieldNamespacedModelResource):
class Meta:
detail_uri_name = 'name'
queryset = Status.objects.all()
allowed_methods = ['get']
resource_name = 'status'
authentication = ApiKeyAuthentication()
authorization = Authorization()
class IncidentResource(ReadOnlyFieldNamespacedModelResource):
status = fields.ForeignKey(StatusResource, 'status', full=True, null=True, blank=True)
#TODO: We need to include the related user object at some point
def hydrate(self, bundle):
u = User.objects.get(username=bundle.request.GET['username'])
bundle.obj.user = u
return bundle
class Meta:
readonly_fields = ['created', 'updated']
queryset = Incident.objects.all()
allowed_methods = ['get', 'post', 'delete']
resource_name = 'incident'
authentication = ApiKeyAuthentication()
authorization = Authorization()
always_return_data = True
filtering = {
'created': ALL,
'updates': ALL,
'status': ALL_WITH_RELATIONS,
}
|
darkpixel/statuspage
|
status/api.py
|
Python
|
bsd-3-clause
| 1,878
|
# -*- coding: utf-8 -*-
from gensim.models import word2vec
from gensim import models
import jieba
import codecs
import io
from collections import Counter
import operator
import numpy
f = codecs.open("target_article.txt",'r','utf8')
content = f.readlines()
article = []
jieba.set_dictionary('jieba_dict/dict.txt.big')
model = models.Word2Vec.load_word2vec_format('med250.model.bin',binary=True)
# import stopword
stopwordset = set()
with io.open('jieba_dict/stopwords.txt','r',encoding='utf-8') as sw:
for line in sw:
stopwordset.add(line.strip('\n'))
# Cut The Words , Output: short words in article
for line in content:
seg_list = jieba.cut(line)
for gg in seg_list:
if gg not in stopwordset:
article.append(gg)
# Count frequency
raw_data = Counter(article)
raw_data = { key:raw_data[key] for key in raw_data if key in model.vocab}
low_level = 0
for key in raw_data:
low_level += raw_data[key]
low_level = int(round(low_level*0.01))
# Initial Accumalation
words = []
acc_data = dict()
map_words = []
related_word = dict()
for keys in raw_data:
words.append(keys)
# acc_data[keys] = 0
# Pick up the Friends
for word_1 in words:
cand_words = []
for word_2 in words:
if model.similarity(word_1, word_2) >= 0.6:
cand_words.append(word_2)
map_words.append(cand_words)
for i in range(len(map_words)):
friend_list = map_words[i]
value = 0.0
for friend_1 in friend_list:
for friend_2 in friend_list:
if friend_1 == friend_2:
continue
value += model.similarity(friend_1, friend_2)
leng = len(friend_list)
related_word[words[i]] = value/float(leng*leng)
s_imp_words = sorted(related_word.items(), key=operator.itemgetter(1), reverse=True)
for i in s_imp_words[:20]:
print i[0]
print "-----------------------"
#print s_imp_words
# for value in output:
# if value[1] == 0.0:
# continue
# print value[0], value[1]
# print "-----------------------"
keywords = []
fg = numpy.zeros(len(s_imp_words))
for i in range(len(s_imp_words)):
if fg[i] == 1:
continue
for j in range(i+1,len(s_imp_words)):
if fg[j] != 1:
if model.similarity(s_imp_words[i][0], s_imp_words[j][0]) >= 0.7:
fg[j] = 1
keywords.append(s_imp_words[i])
#print s_imp_words[i][0]
for i in keywords[:10]:
print i[0]
# with io.open("target_keywords.txt",'w',encoding='utf-8') as output:
# for text in keywords:
# output.write(text + '\n')
|
chunchih/article-matching
|
experiment/find_key_relation.py
|
Python
|
bsd-3-clause
| 2,408
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cookiecutter.main
-----------------
Main entry point for the `cookiecutter` command.
The code in this module is also a good example of how to use Cookiecutter as a
library rather than a script.
"""
from __future__ import unicode_literals
import logging
import os
from datetime import datetime
from . import __version__ as cookiecutter_version
from .config import get_user_config, USER_CONFIG_PATH
from .prompt import prompt_for_config
from .generate import generate_context, generate_files
from .vcs import clone
from .compat import PY3
logger = logging.getLogger(__name__)
builtin_abbreviations = {
'gh': 'https://github.com/{0}.git',
'bb': 'https://bitbucket.org/{0}',
}
def expand_abbreviations(template, config_dict):
"""
Expand abbreviations in a template name.
:param template: The project template name.
:param config_dict: The user config, which will contain abbreviation
definitions.
"""
abbreviations = builtin_abbreviations.copy()
abbreviations.update(config_dict.get('abbreviations', {}))
if template in abbreviations:
return abbreviations[template]
# Split on colon. If there is no colon, rest will be empty
# and prefix will be the whole template
prefix, sep, rest = template.partition(':')
if prefix in abbreviations:
return abbreviations[prefix].format(rest)
return template
def cookiecutter(template, checkout=None, no_input=False, extra_context=None,
extra_globals=None, rc_file=USER_CONFIG_PATH):
"""
API equivalent to using Cookiecutter at the command line.
:param template: A directory containing a project template directory,
or a URL to a git repository.
:param checkout: The branch, tag or commit ID to checkout after clone.
:param no_input: Prompt the user at command line for manual configuration?
:param extra_context: A dictionary of context that overrides default
and user configuration.
:param extra_globals: A dictionary of values added to the Jinja2 context,
e.g. custom filters.
:param rc_file: Path to the user configuration file
"""
# Get user config from ~/.cookiecutterrc or equivalent
# If no config file, sensible defaults from config.DEFAULT_CONFIG are used
config_dict = get_user_config(rc_file)
template = expand_abbreviations(template, config_dict)
# TODO: find a better way to tell if it's a repo URL
if 'git@' in template or 'https://' in template:
repo_dir = clone(
repo_url=template,
checkout=checkout,
clone_to_dir=config_dict['cookiecutters_dir'],
no_input=no_input
)
else:
# If it's a local repo, no need to clone or copy to your
# cookiecutters_dir
repo_dir = template
context_file = os.path.join(repo_dir, 'cookiecutter.json')
logging.debug('context_file is {0}'.format(context_file))
context = generate_context(
context_file=context_file,
default_context=config_dict['default_context'],
extra_context=extra_context,
)
# prompt the user to manually configure at the command line.
# except when 'no-input' flag is set
context['cookiecutter'] = prompt_for_config(context, no_input)
# Add some system values, especially for use by hook scripts
now = datetime.now()
context.update(extra_globals or {})
context.update(dict(
version=cookiecutter_version,
repo_dir=os.path.abspath(repo_dir),
context_file=os.path.abspath(context_file),
current_year=now.year,
current_date=now.ctime(),
current_date_iso=now.isoformat(b' ' if not PY3 else u' '),
))
# Create project from local context and project template.
generate_files(
repo_dir=repo_dir,
context=context
)
|
jhermann/cookiecutter
|
cookiecutter/main.py
|
Python
|
bsd-3-clause
| 3,903
|
# -*- coding: utf-8 -*-
from decouple import config
from mongrey.web.settings import Test as BaseTest
class Test(BaseTest):
DB_SETTINGS = {
'host': config('MONGREY_DB', 'sqlite:///../mongrey_test.db'),
}
|
radical-software/mongrey
|
mongrey/tests/storage/sql/flask_settings.py
|
Python
|
bsd-3-clause
| 233
|
# Copyright 2021 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
r"""Experimental multi-sample make_examples for DeepVariant.
This is a prototype for experimentation with multiple samples in DeepVariant, a
proof of concept enabled by a refactoring to join together DeepVariant and
DeepTrio, generalizing the functionality of make_examples to work with multiple
samples.
The output of this script is not compatible with any of DeepVariant's public
models, and the DeepVariant team does not intend to provide support for users
of this script.
Example usage:
multisample_make_examples \
--mode calling \
--ref "reference.fa" \
--reads "sample1.bam;sample2.bam;sample3.bam;sample4.bam;sample5.bam" \
--sample_names "sample1;sample2;sample3;sample4;sample5" \
--examples "examples.tfrecord.gz" \
--pileup_image_heights "20;20;20;20;20" \ # optional
--downsample_fractions "0.5;0.5;0.5;0.5;0.5" # optional
"""
import os
from absl import app
from absl import flags
from deepvariant import logging_level
from deepvariant import make_examples_core
from deepvariant import make_examples_options
from deepvariant.protos import deepvariant_pb2
from third_party.nucleus.io.python import hts_verbose
from third_party.nucleus.util import errors
from third_party.nucleus.util import proto_utils
MAIN_SAMPLE_INDEX = 0 # This is the first sample listed in --reads.
FLAGS = flags.FLAGS
# Adopt more general flags from make_examples_options.
flags.adopt_module_key_flags(make_examples_options)
# Define flags specific to multi-sample make_examples.
flags.DEFINE_string(
'reads', None, 'Required. A list of BAM/CRAM files, with different '
'samples separated by semi-colons. '
'At least one aligned, sorted, indexed BAM/CRAM file is required for '
'each sample. '
'All must be aligned to the same reference genome compatible with --ref. '
'Can provide multiple BAMs (comma-separated) for each sample. '
'Format is, for example: sample1;sample2_BAM1,sample2_BAM2;sample3 ')
flags.DEFINE_string(
'sample_names', 'DEFAULT',
'Sample names corresponding to the samples (must match order and length of '
'samples in --reads). Separate names for each sample with semi-colons, '
'e.g. "sample1;sample2;sample3". '
'If not specified, (i.e. "sample1;;sample3" or even ";;") '
'any sample without a sample name from this flag the will be inferred from '
'the header information from --reads.')
flags.DEFINE_string(
'downsample_fractions', 'DEFAULT',
'If not empty string ("") must be a value between 0.0 and 1.0. '
'Reads will be kept (randomly) with a probability of downsample_fraction '
'from the input sample BAMs. This argument makes it easy to create '
'examples as though the input BAM had less coverage. '
'Similar to --reads and --sample_name, supply different '
'values for each sample by separating them with semi-colons, '
'where the order of samples is the same as in --reads.')
flags.DEFINE_string(
'pileup_image_heights', 'DEFAULT',
'Height for the part of the pileup image showing reads from each sample. '
'By default, use a height of 100 for all samples. '
'Similar to --reads and --sample_name, supply different '
'values for each sample by separating them with semi-colons, '
'where the order of samples is the same as in --reads.')
def n_samples_from_flags(add_flags=True, flags_obj=None):
"""Collects sample-related options into a list of samples."""
n_reads = flags_obj.reads.split(';')
num_samples = len(n_reads)
flags_organized = {}
for flag_name in [
'reads', 'sample_names', 'downsample_fractions', 'pileup_image_heights'
]:
if flags_obj[flag_name].value != 'DEFAULT':
flags_organized[flag_name] = flags_obj[flag_name].value.split(';')
if len(flags_organized[flag_name]) != num_samples:
raise ValueError(f'--{flag_name} has {len(flags_organized[flag_name])} '
f'samples, but it should be matching the number of '
f'samples in --reads, which was {num_samples}.')
else:
flags_organized[flag_name] = [''] * num_samples
n_sample_options = []
for i in range(num_samples):
sample_name = make_examples_core.assign_sample_name(
sample_name_flag=flags_organized['sample_names'][i],
reads_filenames=flags_organized['reads'][i])
n_sample_options.append(
deepvariant_pb2.SampleOptions(
role=str(i),
name=sample_name,
variant_caller_options=make_examples_core.make_vc_options(
sample_name=sample_name, flags_obj=flags_obj),
order=range(num_samples),
pileup_height=100))
if add_flags:
for i in range(num_samples):
n_sample_options[i].reads_filenames.extend(
flags_organized['reads'][i].split(','))
if flags_organized['downsample_fractions'][i]:
n_sample_options[i].downsample_fraction = float(
flags_organized['downsample_fractions'][i])
if flags_organized['pileup_image_heights'][i]:
n_sample_options[i].pileup_height = int(
flags_organized['pileup_image_heights'][i])
# Ordering here determines the default order of samples, and when a sample
# above has a custom .order, then this is the list those indices refer to.
samples_in_order = n_sample_options
sample_role_to_train = '0'
return samples_in_order, sample_role_to_train
def default_options(add_flags=True, flags_obj=None):
"""Creates a MakeExamplesOptions proto populated with reasonable defaults.
Args:
add_flags: bool. defaults to True. If True, we will push the value of
certain FLAGS into our options. If False, those option fields are left
uninitialized.
flags_obj: object. If not None, use as the source of flags, else use global
FLAGS.
Returns:
deepvariant_pb2.MakeExamplesOptions protobuf.
Raises:
ValueError: If we observe invalid flag values.
"""
if not flags_obj:
flags_obj = FLAGS
samples_in_order, sample_role_to_train = n_samples_from_flags(
add_flags=add_flags, flags_obj=flags_obj)
options = make_examples_options.shared_flags_to_options(
add_flags=add_flags,
flags_obj=flags_obj,
samples_in_order=samples_in_order,
sample_role_to_train=sample_role_to_train,
main_sample_index=MAIN_SAMPLE_INDEX)
if add_flags:
options.bam_fname = '|'.join(
[os.path.basename(x) for x in flags_obj.reads.split(';')])
return options
def check_options_are_valid(options):
"""Checks that all the options chosen make sense together."""
# Check for general flags (shared for DeepVariant and DeepTrio).
make_examples_options.check_options_are_valid(
options, main_sample_index=MAIN_SAMPLE_INDEX)
sample_names = [s.name for s in options.sample_options]
if len(sample_names) != len(set(sample_names)):
raise ValueError('--sample_names cannot contain duplicate names.')
def main(argv=()):
with errors.clean_commandline_error_exit():
if len(argv) > 1:
errors.log_and_raise(
'Command line parsing failure: make_examples does not accept '
'positional arguments but some are present on the command line: '
'"{}".'.format(str(argv)), errors.CommandLineError)
del argv # Unused.
proto_utils.uses_fast_cpp_protos_or_die()
logging_level.set_from_flag()
hts_verbose.set(hts_verbose.htsLogLevel[FLAGS.hts_logging_level])
# Set up options; may do I/O.
options = default_options(add_flags=True, flags_obj=FLAGS)
check_options_are_valid(options)
# Run!
make_examples_core.make_examples_runner(options)
if __name__ == '__main__':
flags.mark_flags_as_required([
'examples',
'mode',
'reads',
'ref',
])
app.run(main)
|
google/deepvariant
|
deepvariant/multisample_make_examples.py
|
Python
|
bsd-3-clause
| 9,287
|
from django.test import TestCase
from django.conf import settings
from django.contrib.sites.models import Site
from django.db.models.query import QuerySet
from preferences import preferences
from music.models import TrackContributor, Credit, Track, Album, CreditOption
from music.utils import wikipedia, lastfm
class ScraperTestCase(TestCase):
@classmethod
def setUpClass(cls):
# Disable scraping
settings.JMBO_MUSIC['scrapers'] = []
# Bootstrap music preferences
prefs = preferences.MusicPreferences
prefs.save()
creditoption = CreditOption.objects.create(
music_preferences=prefs, role_type='artist', role_name='Artist',
role_priority=1
)
# Legitimate entries
artist = TrackContributor.objects.create(title="Oasis")
album = Album.objects.create(title="What's the story morning glory")
track = Track.objects.create(title="Don't look back in anger")
track.create_credit("Oasis", "artist")
track.album.add(album.id)
track.save()
cls.wikipedia_artist = artist
cls.wikipedia_album = album
cls.wikipedia_track = track
artist = TrackContributor.objects.create(title="Foo Fighters")
album = Album.objects.create(title="One By One")
track = Track.objects.create(title="All My Life")
track.create_credit("Foo Fighters", "artist")
track.album.add(album.id)
track.save()
cls.lastfm_artist = artist
cls.lastfm_album = album
cls.lastfm_track = track
# Illegitimate entries
artist = TrackContributor.objects.create(title="vgnfdnvnvfnsncfd")
album = Album.objects.create(title="tggbfbvfvf")
track = Track.objects.create(title="grfgrgeagteg")
track.create_credit("vgnfdnvnvfnsncfd", "artist")
track.album = [album]
track.save()
cls.iartist = artist
cls.ialbum = album
cls.itrack = track
def test_wikipedia(self):
settings.JMBO_MUSIC['scrapers'] = ['wikipedia']
wikipedia(self.wikipedia_artist)
wikipedia(self.wikipedia_album)
wikipedia(self.wikipedia_track)
wikipedia(self.iartist)
wikipedia(self.ialbum)
wikipedia(self.itrack)
self.failUnless(self.wikipedia_artist.image)
self.failUnless(self.wikipedia_album.image)
self.failUnless(self.wikipedia_track.image)
self.failIf(self.iartist.image)
self.failIf(self.ialbum.image)
# Track is exempt because it always gets a default image
def test_lastfm(self):
# Abort test if no API key was set
try:
dc = settings.JMBO_MUSIC['lastfm_api_key']
dc = settings.JMBO_MUSIC['lastfm_api_secret']
except KeyError:
return
settings.JMBO_MUSIC['scrapers'] = ['lastfm']
lastfm(self.lastfm_artist)
lastfm(self.lastfm_album)
lastfm(self.lastfm_track)
lastfm(self.iartist)
lastfm(self.ialbum)
lastfm(self.itrack)
self.failUnless(self.lastfm_artist.image)
self.failUnless(self.lastfm_album.image)
self.failUnless(self.lastfm_track.image)
self.failIf(self.iartist.image)
self.failIf(self.ialbum.image)
# Track is exempt because it always gets a default image
|
praekelt/jmbo-music
|
music/tests/__init__.py
|
Python
|
bsd-3-clause
| 3,379
|
from django.conf import settings
def mask_toggle(number_to_mask_or_unmask):
return int(number_to_mask_or_unmask) ^ settings.MASKING_KEY
|
shafiquejamal/socialassistanceregistry
|
nr/nr/formulas.py
|
Python
|
bsd-3-clause
| 137
|
# -*- coding: utf-8 -*-
"""SIP Flask Master Device package."""
import logging
__subsystem__ = 'TangoControl'
__service_name__ = 'FlaskMaster'
__version_info__ = (1, 3, 0)
__version__ = '.'.join(map(str, __version_info__))
__service_id__ = ':'.join(map(str, (__subsystem__,
__service_name__,
__version__)))
LOG = logging.getLogger('sip.tc.flask_master')
__all__ = [
'__subsystem__',
'__service_name__',
'__version__',
'__service_id__',
'LOG'
]
|
SKA-ScienceDataProcessor/integration-prototype
|
sip/tango_control/flask_master/app/release.py
|
Python
|
bsd-3-clause
| 535
|
#!/usr/bin/env python3
"""Read migrated cache file."""
import argparse
import logging
import sys
import rss2irc
def main():
"""Try to read given cache file."""
args = parse_args()
logger = logging.getLogger('read-migrated-cache')
cache = rss2irc.read_cache(logger, args.cache)
assert isinstance(cache, rss2irc.CachedData)
assert len(cache.items)
sys.exit(0)
def parse_args() -> argparse.Namespace:
"""Return parsed CLI args."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--cache',
dest='cache', type=str, default=None,
help='File which contains cache.'
)
return parser.parse_args()
if __name__ == '__main__':
main()
|
zstyblik/rss2irc
|
migrations/tests/files/read_migrated_cache.py
|
Python
|
bsd-3-clause
| 714
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from telemetry import timeline_model
def Import(data):
trace = json.loads(data) # pylint: disable=W0612
model = timeline_model.TimelineModel()
# TODO(nduca): Actually import things.
return model
|
nacl-webkit/chrome_deps
|
tools/telemetry/telemetry/trace_event_importer.py
|
Python
|
bsd-3-clause
| 386
|
import h5py
import sys
import os.path as op
from fos import *
import numpy as np
a=np.loadtxt(op.join(op.dirname(__file__), "data", "rat-basal-forebrain.swc") )
pos = a[:,2:5].astype( np.float32 )
radius = a[:,5].astype( np.float32 ) * 4
# extract parent connectivity and create full connectivity
parents = a[1:,6] - 1
parents = parents.astype(np.uint32).T
connectivity = np.vstack( (parents, np.arange(1, len(parents)+1) ) ).T.astype(np.uint32)
colors = np.random.random( ( (len(connectivity)/2, 4)) )
colors[:,3] = 1.0
w = Window()
scene = Scene( scenename = "Main" )
act = Skeleton( name = "Neuron",
vertices = pos,
connectivity = connectivity,
connectivity_colors=colors) #, radius = radius)
scene.add_actor( act )
w.add_scene( scene )
w.refocus_camera()
|
fos/fos
|
examples/neuron.py
|
Python
|
bsd-3-clause
| 815
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['MovingAverage'] , ['Seasonal_WeekOfYear'] , ['AR'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingAverage_Seasonal_WeekOfYear_AR.py
|
Python
|
bsd-3-clause
| 167
|
# -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le module secondaire diligence."""
from abstraits.module import *
from primaires.format.fonctions import format_nb
from secondaires.diligence import commandes
from secondaires.diligence.diligence import DiligenceMaudite
from secondaires.diligence import editeurs
class Module(BaseModule):
"""Module proposant des zones aléatoires.
Ce module est appelé "diligence", car la diligence maudite est
le premier type de zone semi-aléatoire développée sur ce MUD.
L'idée est de définir une zone modèle et de créer des salles
répliques pour des diligences à déplacement semi-aléatoire. les
salles définies dans le modèle proposent les titres, descriptions,
détails et scripts pour les salles dupliquées.
"""
def __init__(self, importeur):
"""Constructeur du module"""
BaseModule.__init__(self, importeur, "diligence", "secondaire")
self.commandes = []
self.diligences = {}
self.logger = self.importeur.man_logs.creer_logger(
"diligence", "diligence")
def config(self):
"""Configuration du module."""
self.importeur.scripting.a_charger.append(self)
BaseModule.config(self)
def init(self):
"""Chargement des objets du module."""
diligences = self.importeur.supenr.charger_groupe(DiligenceMaudite)
for diligence in diligences:
self.ajouter_diligence(diligence)
self.logger.info(format_nb(len(diligences),
"{nb} diligence{s} maudite{s} récupérée{s}", fem=True))
BaseModule.init(self)
def ajouter_commandes(self):
"""Ajout des commandes dans l'interpréteur"""
self.commandes = [
commandes.diligence.CmdDiligence(),
]
for cmd in self.commandes:
self.importeur.interpreteur.ajouter_commande(cmd)
# Ajout des éditeurs
self.importeur.interpreteur.ajouter_editeur(
editeurs.diledit.EdtDiledit)
@property
def zones(self):
"""Retourne toutes les zones des diligences (actives)."""
cles = [cle + "_" for cle in self.diligences.keys()]
zones = []
for zone in importeur.salle.zones.values():
if any(zone.cle.startswith(c) for c in cles):
zones.append(zone)
return zones
def creer_diligence(self, cle):
"""Crée une diligence."""
if cle in self.diligences:
raise ValueError("la diligence {} existe déjà".format(
repr(cle)))
diligence = DiligenceMaudite(cle)
self.ajouter_diligence(diligence)
return diligence
def ajouter_diligence(self, diligence):
"""Ajoute le diligence."""
if diligence.cle in self.diligences:
raise ValueError("la diligence de clé {} est " \
"déjà définie".format(repr(diligence.cle)))
self.diligences[diligence.cle] = diligence
def supprimer_diligence(self, cle):
"""Supprime une diligence."""
if cle not in self.diligences:
raise ValueError("la diligence {} n'existe pas".format(
repr(cle)))
self.diligences.pop(cle).detruire()
|
stormi/tsunami
|
src/secondaires/diligence/__init__.py
|
Python
|
bsd-3-clause
| 4,815
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<hongjuanx.wang@intel.com>
# Yun, Liu<yunx.liu@intel.com>
import os
import sys
import commands
import shutil
import urllib2
SCRIPT_PATH = os.path.realpath(__file__)
ConstPath = os.path.dirname(SCRIPT_PATH)
def setUp():
global device, XwalkPath, crosswalkVersion, PackTools, ARCH, cachedir
#device = "E6OKCY411012"
device = os.environ.get('DEVICE_ID')
cachedir = os.environ.get('CROSSWALK_APP_TOOLS_CACHE_DIR')
if not device:
print ("Get env error\n")
sys.exit(1)
fp = open(ConstPath + "/../arch.txt", 'r')
if fp.read().strip("\n\t") != "x86":
ARCH = "arm"
else:
ARCH = "x86"
fp.close()
vp = open(ConstPath + "/../version.txt", 'r')
crosswalkVersion = vp.read().strip("\n\t")
vp.close()
PackTools = ConstPath + "/../tools/crosswalk-app-tools/src/"
XwalkPath = ConstPath + "/../tools/"
if "crosswalk-app-tools" not in os.listdir(XwalkPath):
print "Please check if the crosswalk-app-tools exists in " + ConstPath + "/../tools/"
sys.exit(1)
elif "crosswalk-app-tools" in os.listdir(XwalkPath) and len(os.listdir(XwalkPath)) < 2:
print "Please check if the Crosswalk Binary exists in " + ConstPath + "/../tools/"
sys.exit(1)
def clear(pkg):
os.chdir(XwalkPath)
if os.path.exists(ConstPath + "/../tools/" + pkg):
try:
shutil.rmtree(XwalkPath + pkg)
except Exception as e:
os.system("rm -rf " + XwalkPath + pkg + " &>/dev/null")
def create(self):
clear("org.xwalk.test")
setUp()
os.chdir(XwalkPath)
cmd = PackTools + \
"crosswalk-app create org.xwalk.test --android-crosswalk=" + \
crosswalkVersion
packstatus = commands.getstatusoutput(cmd)
self.assertEquals(packstatus[0], 0)
self.assertIn("org.xwalk.test", os.listdir(os.getcwd()))
def build(self, cmd):
buildstatus = commands.getstatusoutput(cmd)
self.assertEquals(buildstatus[0], 0)
self.assertIn("pkg", os.listdir(XwalkPath + "org.xwalk.test"))
os.chdir('pkg')
apks = os.listdir(os.getcwd())
self.assertNotEquals(len(apks), 0)
for i in range(len(apks)):
self.assertTrue(apks[i].endswith(".apk"))
if "x86" in apks[i]:
self.assertIn("x86", apks[i])
if i < len(os.listdir(os.getcwd())):
self.assertIn("arm", apks[i - 1])
else:
self.assertIn("arm", apks[i + 1])
elif "arm" in apks[i]:
self.assertIn("arm", apks[i])
if i < len(os.listdir(os.getcwd())):
self.assertIn("x86", apks[i - 1])
else:
self.assertIn("x86", apks[i + 1])
def update(self, cmd):
updatestatus = commands.getstatusoutput(cmd)
self.assertEquals(updatestatus[0], 0)
self.assertNotIn("ERROR:", updatestatus[1])
version = updatestatus[1].split('\n')[-1].split(' ')[-1][1:-1]
if not cachedir:
namelist = os.listdir(os.getcwd())
else:
newcachedir = os.environ.get('CROSSWALK_APP_TOOLS_CACHE_DIR')
os.chdir(newcachedir)
namelist = os.listdir(os.getcwd())
os.chdir(XwalkPath + 'org.xwalk.test')
crosswalk = 'crosswalk-{}.zip'.format(version)
self.assertIn(crosswalk, namelist)
return version
def run(self):
setUp()
apks = os.listdir(os.getcwd())
for apk in apks:
if ARCH in apk:
inststatus = commands.getstatusoutput(
'adb -s ' +
device +
' install -r ' +
os.getcwd() +
'/' +
apk)
# print inststatus
self.assertEquals(inststatus[0], 0)
self.assertIn("Success", inststatus[1])
pmstatus = commands.getstatusoutput(
'adb -s ' +
device +
' shell pm list package |grep org.xwalk.test')
self.assertEquals(pmstatus[0], 0)
launstatus = commands.getstatusoutput(
'adb -s ' +
device +
' shell am start -n org.xwalk.test/.TestActivity')
self.assertEquals(launstatus[0], 0)
stopstatus = commands.getstatusoutput(
'adb -s ' +
device +
' shell am force-stop org.xwalk.test')
self.assertEquals(stopstatus[0], 0)
uninstatus = commands.getstatusoutput(
'adb -s ' +
device +
' uninstall org.xwalk.test')
self.assertEquals(uninstatus[0], 0)
def channel(self, channel):
createcmd = PackTools + \
"crosswalk-app create org.xwalk.test --android-crosswalk=" + channel
packstatus = commands.getstatusoutput(createcmd)
self.assertEquals(packstatus[0], 0)
self.assertIn(channel, packstatus[1])
crosswalklist = urllib2.urlopen(
'https://download.01.org/crosswalk/releases/crosswalk/android/' +
channel +
'/').read()
fp = open('test', 'w')
fp.write(crosswalklist)
fp.close()
line = commands.getstatusoutput(
"cat test|sed -n '/src\=\"\/icons\/folder.gif\"/=' |sed -n '$p'")[1].strip()
cmd = "cat test |sed -n '%dp' |awk -F 'href=' '{print $2}' |awk -F '\"|/' '{print $2}'" % int(
line)
version = commands.getstatusoutput(cmd)[1]
if not '.' in version:
line = commands.getstatusoutput(
"tac test|sed -n '/src\=\"\/icons\/folder.gif\"/=' |sed -n '2p'")[1].strip()
cmd = "tac test |sed -n '%dp' |awk -F 'href=' '{print $2}' |awk -F '\"|/' '{print $2}'" % int(
line)
version = commands.getstatusoutput(cmd)[1]
commands.getstatusoutput("rm -rf test")
crosswalk = 'crosswalk-{}.zip'.format(version)
namelist = os.listdir(os.getcwd())
self.assertIn(crosswalk, namelist)
|
jiajiax/crosswalk-test-suite
|
apptools/apptools-android-tests/apptools/comm.py
|
Python
|
bsd-3-clause
| 7,398
|
# -*- coding: utf-8 -*-
from ooni.utils import log
from twisted.python import usage
from twisted.internet import defer
from ooni.templates import dnst
class UsageOptions(usage.Options):
optParameters = [
['target', 't', None, 'Specify a single hostname to query.'],
['expected', 'e', None, 'Speficy file containing expected lookup results'],
]
class DNSLookup(dnst.DNSTest):
name = "DNSLookupTest"
version = 0.1
usageOptions = UsageOptions
def setUp(self):
self.expected_results = []
self.dns_servers = []
if self.input:
self.hostname = self.input
elif self.localOptions['target']:
self.hostname = self.localOptions['target']
else:
self.hostname = "torproject.org"
if self.localOptions['expected']:
with open (self.localOptions['expected']) as file:
for line in file:
self.expected_results.append(line.strip())
else:
self.expected_results = [
'154.35.132.70',
'38.229.72.14',
'38.229.72.16',
'82.195.75.101',
'86.59.30.40',
'93.95.227.222'
]
self.report['expected_results'] = self.expected_results
with open('/etc/resolv.conf') as f:
for line in f:
if line.startswith('nameserver'):
self.dns_servers.append(line.split(' ')[1].strip())
self.report['dns_servers'] = self.dns_servers
def verify_results(self, results):
for result in results:
if result not in self.expected_results:
return False
return True
@defer.inlineCallbacks
def test_dns_comparison(self):
"""
Performs A lookup on specified host and matches the results
against a set of expected results. When not specified, host and
expected results default to "torproject.org" and
['38.229.72.14', '38.229.72.16', '82.195.75.101', '86.59.30.40', '93.95.227.222'].
"""
for s in self.dns_servers:
dnsServer = (s, 53)
results = yield self.performALookup(self.hostname, dnsServer)
if results:
if self.verify_results(results):
self.report['TestStatus'] = 'OK'
else:
self.report['TestStatus'] = 'FAILED'
self.report['TestException'] = 'unexpected results'
@defer.inlineCallbacks
def test_control_results(self):
"""
Googles 8.8.8.8 server is queried, in order to generate
control data.
"""
results = yield self.performALookup(self.hostname, ("8.8.8.8", 53) )
if results:
self.report['control_results'] = results
|
frankcash/censorship-analyser
|
dnscompare.py
|
Python
|
bsd-3-clause
| 3,151
|
"""`Factory of Factories` pattern."""
from dependency_injector import containers, providers
class SqlAlchemyDatabaseService:
def __init__(self, session, base_class):
self.session = session
self.base_class = base_class
class TokensService:
def __init__(self, id_generator, database):
self.id_generator = id_generator
self.database = database
class Token:
...
class UsersService:
def __init__(self, id_generator, database):
self.id_generator = id_generator
self.database = database
class User:
...
# Sample objects
session = object()
id_generator = object()
class Container(containers.DeclarativeContainer):
database_factory = providers.Factory(
providers.Factory,
SqlAlchemyDatabaseService,
session=session,
)
token_service = providers.Factory(
TokensService,
id_generator=id_generator,
database=database_factory(base_class=Token),
)
user_service = providers.Factory(
UsersService,
id_generator=id_generator,
database=database_factory(base_class=User),
)
if __name__ == '__main__':
container = Container()
token_service = container.token_service()
assert token_service.database.base_class is Token
user_service = container.user_service()
assert user_service.database.base_class is User
|
rmk135/objects
|
examples/miniapps/factory-patterns/factory_of_factories.py
|
Python
|
bsd-3-clause
| 1,394
|
from django.test import TestCase
class CollectionTests(TestCase):
pass
|
takeplace/django-composite
|
composite/tests/urls.py
|
Python
|
bsd-3-clause
| 77
|
# -*- coding: utf8 -*-
"""
.. module:: burpui.api.misc
:platform: Unix
:synopsis: Burp-UI misc api module.
.. moduleauthor:: Ziirish <hi+burpui@ziirish.me>
"""
from . import api, cache_key, force_refresh
from ..engines.server import BUIServer # noqa
from .custom import fields, Resource
from .client import ClientLabels
from ..filter import mask
from ..exceptions import BUIserverException
from ..decorators import browser_cache
from ..ext.cache import cache
from ..ext.i18n import LANGUAGES
from flask import flash, get_flashed_messages, url_for, current_app, session
from flask_login import current_user
import random
import re
bui = current_app # type: BUIServer
ns = api.namespace("misc", "Misc methods")
def clear_cache(pattern=None):
"""Clear the cache, you can also provide a pattern to only clean matching keys"""
if pattern is None:
cache.clear()
else:
if hasattr(cache.cache, "_client") and hasattr(cache.cache._client, "keys"):
if hasattr(cache.cache, "key_prefix") and cache.cache.key_prefix:
pattern = cache.cache.key_prefix + pattern
keys = cache.cache._client.keys(pattern)
cache.cache._client.delete(keys)
counters_fields = ns.model(
"Counters",
{
"phase": fields.String(description="Backup phase"),
"Total": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="total",
),
"Files": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="files",
),
"Files (encrypted)": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="files_encrypted",
),
"Meta data": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="meta_data",
),
"Meta data (enc)": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="meta_data_encrypted",
),
"Directories": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="directories",
),
"Soft links": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="soft_links",
),
"Hard links": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="hard_links",
),
"Special files": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="special_files",
),
"VSS headers": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="vss_headers",
),
"VSS headers (enc)": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="vss_headers_encrypted",
),
"VSS footers": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="vss_footers",
),
"VSS footers (enc)": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="vss_footers_encrypted",
),
"Grand total": fields.List(
fields.Integer,
description="new/deleted/scanned/unchanged/total",
attribute="grand_total",
),
"warning": fields.Integer(description="Number of warnings so far"),
"estimated_bytes": fields.Integer(description="Estimated Bytes in backup"),
"bytes": fields.Integer(description="Bytes in backup"),
"bytes_in": fields.Integer(description="Bytes received since backup started"),
"bytes_out": fields.Integer(description="Bytes sent since backup started"),
"start": fields.String(description="Timestamp of the start date of the backup"),
"speed": fields.Integer(description="Backup speed", default=-1),
"timeleft": fields.Integer(description="Estimated time left"),
"percent": fields.Integer(required=True, description="Percentage done"),
"path": fields.String(description="File that is currently treated by burp"),
},
)
@ns.route(
"/counters",
"/<server>/counters",
"/counters/<client>",
"/<server>/counters/<client>",
endpoint="counters",
)
@ns.doc(
params={
"server": "Which server to collect data from when in multi-agent mode",
"client": "Client name",
},
)
class Counters(Resource):
"""The :class:`burpui.api.misc.Counters` resource allows you to
render the *live view* template of a given client.
This resource is part of the :mod:`burpui.api.api` module.
An optional ``GET`` parameter called ``serverName`` is supported when running
in multi-agent mode.
A mandatory ``GET`` parameter called ``clientName`` is used to know what client we
are working on.
"""
parser = ns.parser()
parser.add_argument(
"serverName", help="Which server to collect data from when in multi-agent mode"
)
parser.add_argument("clientName", help="Client name")
monitor_fields = ns.model(
"Monitor",
{
"client": fields.String(required=True, description="Client name"),
"agent": fields.String(description="Server (agent) name"),
"counters": fields.Nested(
counters_fields,
description="Various statistics about the running backup",
),
"labels": fields.List(fields.String, description="List of labels"),
},
)
@ns.marshal_with(monitor_fields, code=200, description="Success")
@ns.expect(parser)
@ns.doc(
responses={
400: "Missing argument",
403: "Insufficient permissions",
404: "Client not found in the running clients list",
},
)
def get(self, server=None, client=None):
"""Returns counters for a given client
**GET** method provided by the webservice.
:param name: the client name if any. You can also use the GET parameter
'name' to achieve the same thing
:returns: Counters
"""
args = self.parser.parse_args()
server = server or args["serverName"]
client = client or args["clientName"]
# Check params
if not client:
self.abort(400, "No client name provided")
# Manage ACL
if (
not current_user.is_anonymous
and not current_user.acl.is_admin()
and not current_user.acl.is_client_allowed(client, server)
):
self.abort(403, "Not allowed to view '{}' counters".format(client))
running = bui.client.is_one_backup_running()
if isinstance(running, dict):
if server and client not in running[server]:
self.abort(
404,
"'{}' not found in the list of running clients for '{}'".format(
client, server
),
)
else:
found = False
for (_, cls) in running.items():
if client in cls:
found = True
break
if not found:
api.bort(404, "'{}' not found in running clients".format(client))
else:
if client not in running:
self.abort(404, "'{}' not found in running clients".format(client))
try:
counters = bui.client.get_counters(client, agent=server)
except BUIserverException:
counters = {}
res = {}
res["client"] = client
res["agent"] = server
res["counters"] = counters
try:
res["labels"] = ClientLabels._get_labels(client, server)
except BUIserverException as exp:
self.abort(500, str(exp))
return res
@ns.route("/monitor", "/<server>/monitor", endpoint="live")
@ns.doc(
params={
"server": "Which server to collect data from when in multi-agent mode",
},
)
class Live(Resource):
"""The :class:`burpui.api.misc.Live` resource allows you to
retrieve a list of servers that are currently *alive*.
This resource is part of the :mod:`burpui.api.misc` module.
An optional ``GET`` parameter called ``serverName`` is supported when running
in multi-agent mode.
"""
parser = ns.parser()
parser.add_argument(
"serverName", help="Which server to collect data from when in multi-agent mode"
)
live_fields = ns.model(
"Live",
{
"client": fields.String(required=True, description="Client name"),
"agent": fields.String(description="Server (agent) name"),
"counters": fields.Nested(
counters_fields,
description="Various statistics about the running backup",
),
"labels": fields.List(fields.String, description="List of labels"),
},
)
@ns.marshal_list_with(live_fields, code=200, description="Success")
@ns.expect(parser)
def get(self, server=None):
"""Returns a list of clients that are currently running a backup
**GET** method provided by the webservice.
The *JSON* returned is:
::
[
{
'client': 'client1',
'agent': 'burp1',
'counters': {
'phase': 2,
'path': '/etc/some/configuration',
'...': '...'
},
'labels': [
'...'
]
},
{
'client': 'client12',
'agent': 'burp2',
'counters': {
'phase': 3,
'path': '/etc/some/other/configuration',
'...': '...'
},
'labels': [
'...'
]
}
]
The output is filtered by the :mod:`burpui.misc.acl` module so that you
only see stats about the clients you are authorized to.
:param server: Which server to collect data from when in multi-agent mode
:type server: str
:returns: The *JSON* described above
"""
args = self.parser.parse_args()
server = server or args["serverName"]
res = []
is_admin = True
has_acl = not current_user.is_anonymous
if has_acl:
is_admin = current_user.acl.is_admin()
# ACL
if (
has_acl
and not is_admin
and server
and not current_user.acl.is_server_allowed(server)
):
self.abort(403, "You are not allowed to view stats of this server")
if server:
running = bui.client.is_one_backup_running(server)
# ACL
if mask.has_filters(current_user):
running = [
x
for x in running
if mask.is_client_allowed(current_user, x, server)
]
else:
running = bui.client.is_one_backup_running()
if isinstance(running, dict):
for (serv, clients) in running.items():
for client in clients:
# ACL
if mask.has_filters(current_user) and not mask.is_client_allowed(
current_user, client, serv
):
continue
data = {}
data["client"] = client
data["agent"] = serv
try:
data["counters"] = bui.client.get_counters(client, agent=serv)
except BUIserverException:
data["counters"] = {}
try:
data["labels"] = ClientLabels._get_labels(client, serv)
except BUIserverException:
data["labels"] = []
res.append(data)
else:
for client in running:
# ACL
if mask.has_filters(current_user) and not mask.is_client_allowed(
current_user, client, server
):
continue
data = {}
data["client"] = client
try:
data["counters"] = bui.client.get_counters(client, agent=server)
except BUIserverException:
data["counters"] = {}
try:
data["labels"] = ClientLabels._get_labels(client)
except BUIserverException:
data["labels"] = []
res.append(data)
return res
@ns.route("/alert", endpoint="alert")
class Alert(Resource):
"""The :class:`burpui.api.misc.Alert` resource allows you to propagate a
message to the next screen.
This resource is part of the :mod:`burpui.api.misc` module.
"""
parser = ns.parser()
parser.add_argument("message", required=True, help="Message to display")
parser.add_argument(
"level",
help="Alert level",
choices=("danger", "warning", "info", "success", "0", "1", "2", "3"),
default="danger",
)
@ns.expect(parser)
@ns.doc(
responses={
201: "Success",
},
)
def post(self):
"""Propagate a message to the next screen (or whatever reads the session)"""
def translate(level):
levels = ["danger", "warning", "info", "success"]
convert = {"0": "success", "1": "warning", "2": "error", "3": "info"}
if not level:
return "danger"
# return the converted value or the one we already had
new = convert.get(level, level)
# if the level is not handled, assume 'danger'
if new not in levels:
return "danger"
return new
# retrieve last flashed messages so we don't loose anything
for level, message in get_flashed_messages(with_categories=True):
flash(message, level)
args = self.parser.parse_args()
message = args["message"]
level = translate(args["level"])
flash(message, level)
return {"message": message, "level": level}, 201
@ns.route("/languages", endpoint="languages")
class Languages(Resource):
"""The :class:`burpui.api.misc.Languages` resource allows you to retrieve
a list of supported languages.
This resource is part of the :mod:`burpui.api.misc` module.
"""
wild = fields.Wildcard(fields.String, description="Supported languages")
languages_fields = ns.model(
"Languages",
{
"*": wild,
},
)
@cache.cached(timeout=3600, key_prefix=cache_key, unless=force_refresh)
@ns.marshal_with(languages_fields, code=200, description="Success")
@browser_cache(3600)
def get(self):
"""Returns a list of supported languages
**GET** method provided by the webservice.
The *JSON* returned is:
::
{
"en": "English",
"fr": "Français"
}
:returns: The *JSON* described above.
"""
return LANGUAGES
@ns.route("/about", "/<server>/about", endpoint="about")
@ns.doc(
params={
"server": "Which server to collect data from when in multi-agent mode",
},
)
class About(Resource):
"""The :class:`burpui.api.misc.About` resource allows you to retrieve
various informations about ``Burp-UI``
An optional ``GET`` parameter called ``serverName`` is supported when running
in multi-agent mode.
"""
# Login not required on this view
login_required = False
parser = ns.parser()
parser.add_argument(
"serverName", help="Which server to collect data from when in multi-agent mode"
)
burp_fields = ns.model(
"Burp",
{
"name": fields.String(
required=True, description="Instance name", default="Burp"
),
"client": fields.String(description="Burp client version"),
"server": fields.String(description="Burp server version"),
},
)
about_fields = ns.model(
"About",
{
"version": fields.String(required=True, description="Burp-UI version"),
"release": fields.String(description="Burp-UI release (commit number)"),
"api": fields.String(description="Burp-UI API documentation URL"),
"burp": fields.Nested(
burp_fields, as_list=True, description="Burp version"
),
},
)
@cache.cached(timeout=3600, key_prefix=cache_key, unless=force_refresh)
@ns.marshal_with(about_fields, code=200, description="Success")
@ns.expect(parser)
@browser_cache(3600)
def get(self, server=None):
"""Returns various informations about Burp-UI"""
args = self.parser.parse_args()
res = {}
server = server or args["serverName"]
res["version"] = api.version
res["release"] = api.release
res["api"] = url_for("api.doc")
res["burp"] = []
cli = bui.client.get_client_version(server)
srv = bui.client.get_server_version(server)
multi = {}
if isinstance(cli, dict):
for (name, val) in cli.items():
multi[name] = {"client": val}
if isinstance(srv, dict):
for (name, val) in srv.items():
multi[name]["server"] = val
if not multi:
res["burp"].append({"client": cli, "server": srv})
else:
for (name, val) in multi.items():
tmp = val
tmp.update({"name": name})
res["burp"].append(tmp)
return res
@ns.route("/ping", endpoint="ping")
class Ping(Resource):
"""The :class:`burpui.api.misc.Ping` resource allows you to ping the API.
It is actually a Dummy endpoint that does nothing"""
# Login not required on this view
login_required = False
ping_fields = ns.model(
"Ping",
{
"alive": fields.Boolean(required=True, description="API alive?"),
},
)
@ns.marshal_list_with(ping_fields, code=200, description="Success")
@ns.doc(
responses={
200: "Success",
403: "Insufficient permissions",
},
)
def get(self):
"""Tells if the API is alive"""
return {"alive": True}
@ns.route(
"/history",
"/history/<client>",
"/<server>/history",
"/<server>/history/<client>",
endpoint="history",
)
@ns.doc(
params={
"server": "Which server to collect data from when in multi-agent mode",
"client": "Client name",
},
)
class History(Resource):
"""The :class:`burpui.api.misc.History` resource allows you to retrieve
an history of the backups
An optional ``GET`` parameter called ``serverName`` is supported when
running in multi-agent mode and ``clientName`` is also allowed to filter
by client.
::
$('#calendar').fullCalendar({
eventSources: [
// your event source
{
events: [ // put the array in the `events` property
{
title : 'event1',
start : '2010-01-01'
},
{
title : 'event2',
start : '2010-01-05',
end : '2010-01-07'
},
{
title : 'event3',
start : '2010-01-09T12:30:00',
}
],
color: 'black', // an option!
textColor: 'yellow' // an option!
}
// any other event sources...
]
});
"""
parser = ns.parser()
parser.add_argument(
"serverName", help="Which server to collect data from when in multi-agent mode"
)
parser.add_argument("clientName", help="Which client to collect data from")
parser.add_argument("start", help="Return events after this date")
parser.add_argument("end", help="Return events before this date")
event_fields = ns.model(
"Event",
{
"title": fields.String(required=True, description="Event name"),
"start": fields.DateTime(
dt_format="iso8601",
description="Start time of the event",
attribute="date",
),
"end": fields.DateTime(
dt_format="iso8601", description="End time of the event"
),
"name": fields.String(description="Client name"),
"backup": fields.BackupNumber(
description="Backup number", attribute="number"
),
"url": fields.String(description="Callback URL"),
},
)
history_fields = ns.model(
"History",
{
"events": fields.Nested(
event_fields, as_list=True, description="Events list"
),
"color": fields.String(description="Background color"),
"textColor": fields.String(description="Text color"),
"name": fields.String(description="Feed name"),
},
)
@cache.cached(timeout=1800, key_prefix=cache_key, unless=force_refresh)
@ns.marshal_list_with(history_fields, code=200, description="Success")
@ns.expect(parser)
@ns.doc(
responses={
200: "Success",
403: "Insufficient permissions",
},
)
@browser_cache(1800)
def get(self, client=None, server=None):
"""Returns a list of calendars describing the backups that have been
completed so far
**GET** method provided by the webservice.
The *JSON* returned is:
::
[
{
"color": "#7C6F44",
"events": [
{
"backup": "0000001",
"end": "2015-01-25 13:32:04+01:00",
"name": "toto-test",
"start": "2015-01-25 13:32:00+01:00",
"title": "Client: toto-test, Backup n°0000001",
"url": "/client/toto-test"
}
],
"name": "toto-test",
"textColor": "white"
}
]
The output is filtered by the :mod:`burpui.misc.acl` module so that you
only see stats about the clients you are authorized to.
:param server: Which server to collect data from when in multi-agent mode
:type server: str
:param client: Which client to collect data from
:type client: str
:returns: The *JSON* described above
"""
self._check_acl(client, server)
return self._get_backup_history(client, server)
def _check_acl(self, client=None, server=None):
args = self.parser.parse_args()
client = client or args["clientName"]
server = server or args["serverName"]
if (
server
and mask.has_filters(current_user)
and not mask.is_server_allowed(current_user, server)
):
self.abort(403, "You are not allowed to view this server infos")
if (
client
and mask.has_filters(current_user)
and not mask.is_client_allowed(current_user, client, server)
):
self.abort(403, "You are not allowed to view this client infos")
def _get_backup_history(self, client=None, server=None, data=None):
import arrow
ret = []
args = self.parser.parse_args()
client = client or args["clientName"]
server = server or args["serverName"]
moments = {"start": None, "end": None}
has_filters = mask.has_filters(current_user)
for moment in moments.keys():
if moment in args:
try:
if args[moment] is not None:
moments[moment] = arrow.get(args[moment]).int_timestamp
except arrow.parser.ParserError:
pass
if client:
(color, text) = self.gen_colors(client, server)
feed = {
"color": color,
"textColor": text,
"events": self.gen_events(client, moments, server, data),
}
name = client
if server:
name += " on {}".format(server)
feed["name"] = name
ret.append(feed)
return ret
elif server:
if data and server in data:
clients = [{"name": x} for x in data[server].keys()]
else:
clients = bui.client.get_all_clients(agent=server, last_attempt=False)
# manage ACL
if has_filters:
clients = [
x
for x in clients
if mask.is_client_allowed(current_user, x["name"], server)
]
for cl in clients:
(color, text) = self.gen_colors(cl["name"], server)
feed = {
"events": self.gen_events(cl["name"], moments, server, data),
"textColor": text,
"color": color,
"name": "{} on {}".format(cl["name"], server),
}
ret.append(feed)
return ret
if bui.config["STANDALONE"]:
if data:
clients_list = data.keys()
else:
try:
clients_list = [
x["name"]
for x in bui.client.get_all_clients(last_attempt=False)
]
except BUIserverException:
clients_list = []
if has_filters:
clients_list = [
x
for x in clients_list
if mask.is_client_allowed(current_user, x)
]
for cl in clients_list:
(color, text) = self.gen_colors(cl)
feed = {
"events": self.gen_events(cl, moments, data=data),
"textColor": text,
"color": color,
"name": cl,
}
ret.append(feed)
return ret
else:
grants = {}
for serv in bui.client.servers:
if has_filters:
try:
all_clients = [
x["name"]
for x in bui.client.get_all_clients(
serv, last_attempt=False
)
]
except BUIserverException:
all_clients = []
grants[serv] = [
x
for x in all_clients
if mask.is_client_allowed(current_user, x, serv)
]
else:
grants[serv] = "all"
for (serv, clients) in grants.items():
if not isinstance(clients, list):
if data and serv in data:
clients = data[serv].keys()
else:
clients = [
x["name"]
for x in bui.client.get_all_clients(
agent=serv, last_attempt=False
)
]
for cl in clients:
(color, text) = self.gen_colors(cl, serv)
feed = {
"events": self.gen_events(cl, moments, serv, data),
"textColor": text,
"color": color,
"name": "{} on {}".format(cl, serv),
}
ret.append(feed)
return ret
def gen_colors(self, client=None, agent=None):
"""Generates color for an events feed"""
cache = self._get_color_session(client, agent)
if cache:
return (cache["color"], cache["text"])
labels = bui.client.get_client_labels(client, agent)
HTML_COLOR = r"((?P<hex>#(?P<red_hex>[0-9a-f]{1,2})(?P<green_hex>[0-9a-f]{1,2})(?P<blue_hex>[0-9a-f]{1,2}))|(?P<rgb>rgb\s*\(\s*(?P<red>2[0-5]{2}|2[0-4]\d|[0-1]?\d\d?)\s*,\s*(?P<green>2[0-5]{2}|2[0-4]\d|[0-1]?\d\d?)\s*,\s*(?P<blue>2[0-5]{2}|2[0-4]\d|[0-1]?\d\d?)\s*\))|(?P<plain>[\w-]+$))"
color_found = False
color = None
text = None
for label in labels:
# We are looking for labels starting with "color:" or "text:"
if re.search(r"^color:", label, re.IGNORECASE):
search = re.search(
r"^color:\s*{}".format(HTML_COLOR), label, re.IGNORECASE
)
# we allow various color forms. For instance:
# hex: #fa12e6
# rgb: rgb (123, 42, 9)
# plain: black
if search.group("hex"):
red = search.group("red_hex")
green = search.group("green_hex")
blue = search.group("blue_hex")
# ensure ensure the hex part is of the form XX
red = red + red if len(red) == 1 else red
green = green + green if len(green) == 1 else green
blue = blue + blue if len(blue) == 1 else blue
# Now convert the hex to an int
red = int(red, 16)
green = int(green, 16)
blue = int(blue, 16)
elif search.group("rgb"):
red = int(search.group("red"))
green = int(search.group("green"))
blue = int(search.group("blue"))
elif search.group("plain"):
# if plain color is provided, we cannot guess the adapted
# text color, so we assume white (unless text is specified)
red = 0
green = 0
blue = 0
color = search.group("plain")
else:
continue
color = color or "#{:02X}{:02X}{:02X}".format(red, green, blue)
color_found = True
if re.search(r"^text:", label, re.IGNORECASE):
search = re.search(
r"^text:\s*{}".format(HTML_COLOR), label, re.IGNORECASE
)
# if we don't find anything, we'll generate a color based on
# the value of the red, green and blue variables
text = (
search.group("hex") or search.group("rgb") or search.group("plain")
)
if color and text:
break
if not color_found:
def rand():
return random.randint(0, 255)
red = rand()
green = rand()
blue = rand()
text = text or self._get_text_color(red, green, blue)
color = color or "#{:02X}{:02X}{:02X}".format(red, green, blue)
self._set_color_session(color, text, client, agent)
return (color, text)
def _get_text_color(self, red=0, green=0, blue=0):
"""Generates the text color for a given color"""
yiq = ((red * 299) + (green * 587) + (blue * 114)) / 1000
return "black" if yiq >= 128 else "white"
def _get_color_session(self, client, agent=None):
"""Since we can *paginate* the rendering, we need to store the already
generated colors
This method allows to retrieve already generated colors if any
"""
sess = session._get_current_object()
if "colors" in sess:
colors = sess["colors"]
if agent and agent in colors:
return colors[agent].get(client)
elif not agent:
return colors.get(client)
return None
def _set_color_session(self, color, text, client, agent=None):
"""Since we can *paginate* the rendering, we need to store the already
generated colors
This method allows to store already generated colors in the session
"""
sess = session._get_current_object()
dic = {}
if agent:
if "colors" in sess and agent in sess["colors"]:
dic[agent] = sess["colors"][agent]
else:
dic[agent] = {}
dic[agent][client] = {"color": color, "text": text}
else:
dic[client] = {"color": color, "text": text}
if "colors" in sess:
sess["colors"].update(dic)
else:
sess["colors"] = dic
def gen_events(self, client, moments, server=None, data=None):
"""Creates events for a given client"""
events = []
filtered = False
if data:
if bui.config["STANDALONE"]:
events = data.get(client, [None])
else:
events = data.get(server, {}).get(client, [None])
if not events:
events = bui.client.get_client_filtered(
client, start=moments["start"], end=moments["end"], agent=server
)
filtered = True
ret = []
for ev in events:
if not ev:
continue
if data and not filtered:
# events are sorted by date DESC
if moments["start"] and ev["date"] < moments["start"]:
continue
if moments["end"] and ev["date"] > moments["end"]:
continue
ev["title"] = "Client: {0}, Backup n°{1:07d}".format(
client, int(ev["number"])
)
if server:
ev["title"] += ", Server: {0}".format(server)
ev["name"] = client
ev["url"] = url_for(
"view.backup_report",
name=client,
server=server,
backup=int(ev["number"]),
)
ret.append(ev)
return ret
|
ziirish/burp-ui
|
burpui/api/misc.py
|
Python
|
bsd-3-clause
| 35,390
|
# -*- coding: utf-8 -*-
"""
Network Plugin
Network usage and connections
"""
import os, netifaces, psutil, time
from pkm import utils, SHAREDIR
from pkm.decorators import never_raise, threaded_method
from pkm.plugin import BasePlugin, BaseConfig
from pkm.filters import register_filter
NAME = 'Network'
DEFAULT_IGNORES = 'lxc tun'
class Plugin(BasePlugin):
DEFAULT_INTERVAL = 1
@threaded_method
def enable(self):
self.nics = {}
self.ignores = self.pkmeter.config.get(self.namespace, 'ignores', '')
self.ignores = list(filter(None, self.ignores.split(' ')))
super(Plugin, self).enable()
@never_raise
def update(self):
for iface, newio in psutil.net_io_counters(True).items():
if not iface.startswith('lo'):
netinfo = netifaces.ifaddresses(iface)
if netinfo.get(netifaces.AF_INET) and not self._is_ignored(iface):
newio = self._net_io_counters(newio)
newio['iface'] = iface
newio.update(netinfo[netifaces.AF_INET][0])
self._deltas(self.nics.get(iface,{}), newio)
self.nics[iface] = newio
elif iface in self.nics:
del self.nics[iface]
self.data['nics'] = sorted(self.nics.values(), key=lambda n:n['iface'])
self.data['total'] = self._deltas(self.data.get('total',{}), self._net_io_counters())
super(Plugin, self).update()
def _is_ignored(self, iface):
if self.ignores:
for ignore in self.ignores:
if iface.startswith(ignore):
return True
return False
def _net_io_counters(self, io=None):
io = io or psutil.net_io_counters()
return {
'bytes_sent': io.bytes_sent,
'bytes_recv': io.bytes_recv,
'packets_sent': io.packets_sent,
'packets_recv': io.packets_recv,
'errin': io.errin,
'errout': io.errout,
'dropin': io.dropin,
'dropout': io.dropout,
}
def _deltas(self, previo, newio):
now = time.time()
tdelta = now - previo.get('updated',0)
for key in ['bytes_sent', 'bytes_recv']:
newio['%s_per_sec' % key] = int((newio[key] - previo.get(key,0)) / tdelta)
newio['updated'] = now
return newio
class Config(BaseConfig):
TEMPLATE = os.path.join(SHAREDIR, 'templates', 'network_config.html')
FIELDS = utils.Bunch(BaseConfig.FIELDS,
ignores = {'default':DEFAULT_IGNORES}
)
@register_filter()
def network_friendly_iface(iface):
iface = iface.replace('eth', 'Ethernet ')
iface = iface.replace('wlan', 'Wireless ')
iface = iface.replace(' 0', '')
return iface
|
mjs7231/pkmeter
|
pkm/plugins/network.py
|
Python
|
bsd-3-clause
| 2,803
|
from parameter import *
from parse_digit import *
from os import system
cmd = "make -C tools >/dev/null 2>/dev/null;mkdir log model 2>/dev/null"
system(cmd)
#remove those method/data you are not interested in its result
methodlist = ['random-forest','gbdt']
data = ['MQ2007','MQ2008','MSLR','YAHOO_SET1','YAHOO_SET2','MQ2007-list','MQ2008-list']
print "\\begin{tabular}{l"+"|rrr"*len(methodlist) +"}"
if 'random-forest' in methodlist:
print "& \\multicolumn{3}{|c}{Random forests}",
if 'gbdt' in methodlist:
print "& \\multicolumn{3}{|c}{GBDT}",
print "\\\\"
print "& Training & Pairwise & "*len(methodlist), "\\\\"
print "Data set "+"& time (s) & accuracy & NDCG "*len(methodlist) +"\\\\"
print "\\hline"
for d in data:
o = []
for method in methodlist:
dp = log_path + d + '.' + method+ '.fewtrees.log'
try:
tmp_data = open(dp,'r').readlines()
except:
traindata = path + data_path[d]
testdata = path + test_path[d]
if method == 'random-forest':
cmd = "%s -f %s -F -z -p %s -k %s -t %s %s %s ./tmp_file >> %s 2>/dev/null"%(tree_exe,num_feature[d],num_processors, num_sampled_feature[d], tree_num_few[method],traindata,testdata,dp)
elif method == 'gbdt':
model = model_path + d + '.' + method + '.' + 'fewtrees.model'
cmd = "mpirun -np %s %s %s %s %s 4 100 0.1 -m >%s 2>> %s"%(8,gbrt_exe,traindata,num_instance[d],num_feature[d]+1,model,dp)
system('echo \'%s\' >> %s'%(cmd, dp))
system(cmd)
cmd = "cat %s|python %s ./tmp_exe"%(model,gbrt_compile_test)
system('echo \'%s\' >> %s'%(cmd, dp))
system(cmd)
cmd = "cat %s|./tmp_exe > ./tmp_file"%testdata
system('echo \'%s\' >> %s'%(cmd, dp))
system(cmd)
cmd = "tools/eval ./tmp_file %s >> %s;rm -f tmp_file ./tmp_exe*"%(testdata, dp)
system('echo \'%s\' >> %s'%(cmd, dp))
system(cmd)
tmp_data = open(dp,'r').readlines()
for l in tmp_data:
if 'time' in l:
time = l.split(' ')[-1].strip()
digit = FormatWithCommas("%5.1f",float(time))
digit = "$"+digit+"$"
o.append(digit)
if 'accuracy' in l:
acc = l.split(' ')[-2].strip().strip('%')
digit = "$%5.2f$"%float(acc)+"\\%"
o.append(digit)
if d == 'YAHOO_SET1' or d == 'YAHOO_SET2':
if '(YAHOO)' in l:
ndcg = l.split(' ')[-2].strip()
digit = "$%1.4f$"%float(ndcg)
o.append(digit)
else:
if 'Mean' in l:
if d == 'MQ2007-list' or d == 'MQ2008-list':
digit = "NA"
else:
ndcg = l.split(' ')[-2].strip()
digit = "$%1.4f$"%float(ndcg)
o.append(digit)
print output_name[d],
for l in o:
print "& %s "%l,
print "\\\\"
print "\\end{tabular}"
|
JasonWyse/FacRankSvm_c
|
table6.py
|
Python
|
bsd-3-clause
| 2,609
|
"""
Prepare Sparse Matrix for Sparse Affinity Propagation Clustering (SAP)
"""
# Authors: Huojun Cao <bioinfocao at gmail.com>
# License: BSD 3 clause
import numpy as np
import pandas as pd
import sparseAP_cy # cython for calculation
############################################################################################
#
def copySym(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleRowInds):
"""
For single col items or single row items, copy sym minimal value
For example if for sample 'A', the only datapoint of [s(A,A),s(A,B),s(A,C)...] is s(A,B),
then we copy the minimal value of [s(A,A),s(C,A),s(D,A)...] (except s(B,A), because if we copy s(B,A), for 'A' we still only have one data point)
"""
copy_row_array,copy_col_array,copy_data_array=sparseAP_cy.copySingleRows(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleRowInds)
#if symCopy=='all':
#rowBased_row_array=np.concatenate((rowBased_row_array,copy_col_array))
#rowBased_col_array=np.concatenate((rowBased_col_array,copy_row_array))
#rowBased_data_array=np.concatenate((rowBased_data_array,copy_data_array))
#else:# symCopy=='min' or others will be treated as 'min'
df = pd.DataFrame(zip(copy_row_array,copy_col_array,copy_data_array), columns=['row', 'col', 'data'])
copy_row_list,copy_col_list,copy_data_list=[],[],[]
for ind in singleRowInds:
copyData=df[(df.col==ind) & (df.row!=ind)].sort_values(['data']).copy()
copyData_min=copyData[0:1]
copy_row_list+=list(copyData_min.col)
copy_col_list+=list(copyData_min.row)
copy_data_list+=list(copyData_min.data)
rowBased_row_array=np.concatenate((rowBased_row_array,copy_row_list))
rowBased_col_array=np.concatenate((rowBased_col_array,copy_col_list))
rowBased_data_array=np.concatenate((rowBased_data_array,copy_data_list))
return rowBased_row_array,rowBased_col_array,rowBased_data_array
def rmSingleSamples(rowBased_row_array,rowBased_col_array,rowBased_data_array,nSamplesOri):
"""
Affinity/similarity matrix does not need be symmetric, that is s(A,B) does not need be same as s(B,A).
Also since Affinity/similarity matrix is sparse, it could be that s(A,B) exist but s(B,A) does not exist in the sparse matrix.
For the FSAPC to work, specifically in computation of R and A matrix, each row/column of Affinity/similarity matrix should have at least two datapoints.
So in FSAPC, we first remove samples that do not have affinity/similarity with other samples, that is samples that only have affinity/similarity with itself
And we remove samples only have one symmetric datapoint, for example for sample 'B' only s(B,C) exist and for sample 'C' only s(C,B) exist
In these two cases, these samples are removed from FSAPC computation and their examplers are set to themself.
For samples that only have one data (affinity/similarity) with others, For example if for sample 'A', the only datapoint of [s(A,A),s(A,B),s(A,C)...] is s(A,B),
and there exist at least one value in [s(A,A),s(C,A),s(D,A)...] (except s(B,A), because if we copy s(B,A), for 'A' we still only have one data point)
then we copy the minimal value of [s(A,A),s(C,A),s(D,A)...]
nSamplesOri is the number of samples of orignail input data
"""
# find rows and cols that only have one datapoint
singleRowInds=set(sparseAP_cy.singleItems(rowBased_row_array))
singleColInds=set(sparseAP_cy.singleItems(rowBased_col_array))
# samples that have one datapoint in row and col are samples only have affinity/similarity with itself
singleSampleInds=singleRowInds & singleColInds
# in case every col/row have more than one datapoint, just return original data
if len(singleRowInds)==0 and len(singleColInds)==0:
return rowBased_row_array,rowBased_col_array,rowBased_data_array,None,None,nSamplesOri
# remove samples that only have affinity/similarity with itself
# or only have one symmetric datapoint, for example for sample 'B' only s(B,C) exist and for sample 'C' only s(C,B) exist
# in these two cases, these samples are removed from FSAPC computation and their examplers are set to themself.
if len(singleSampleInds)>0:
# row indexs that left after remove single samples
rowLeft=sorted(list(set(range(nSamplesOri))-singleSampleInds))
# map of original row index to current row index(after remove rows/cols that only have single item)
rowOriLeftDict={ori:left for left,ori in enumerate(rowLeft)}
rowLeftOriDict={left:ori for ori,left in rowOriLeftDict.items()}
rowBased_row_array,rowBased_col_array,rowBased_data_array=sparseAP_cy.removeSingleSamples(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleSampleInds)
else: # no samples are removed
rowLeftOriDict=None
#if len(singleSampleInds)>0:
#rowBased_row_array,rowBased_col_array,rowBased_data_array=sparseAP_cy.removeSingleSamples(rowBased_row_array,rowBased_col_array,rowBased_data_array,singleSampleInds)
# for samples that need copy a minimal value to have at least two datapoints in row/column
# for samples that row have single data point, copy minimal value of this sample's column
singleRowInds=singleRowInds-singleSampleInds
if len(singleRowInds)>0:
rowBased_row_array,rowBased_col_array,rowBased_data_array=copySym(rowBased_row_array.astype(np.int),rowBased_col_array.astype(np.int),rowBased_data_array,singleRowInds)
# for samples that col have single data point, copy minimal value of this sample's row
singleColInds=singleColInds-singleSampleInds
if len(singleColInds)>0:
rowBased_col_array,rowBased_row_array,rowBased_data_array=copySym(rowBased_col_array.astype(np.int),rowBased_row_array.astype(np.int),rowBased_data_array,singleColInds)
# change row, col index if there is any sample removed
if len(singleSampleInds)>0:
changeIndV=np.vectorize(lambda x:rowOriLeftDict[x])
rowBased_row_array=changeIndV(rowBased_row_array)
rowBased_col_array=changeIndV(rowBased_col_array)
#rearrange based on new row index and new col index, print ('{0}, sort by row,col'.format(datetime.now()))
sortedLeftOriInd = np.lexsort((rowBased_col_array,rowBased_row_array)).astype(np.int)
rowBased_row_array=sparseAP_cy.npArrRearrange_int_para(rowBased_row_array.astype(np.int),sortedLeftOriInd)
rowBased_col_array=sparseAP_cy.npArrRearrange_int_para(rowBased_col_array.astype(np.int),sortedLeftOriInd)
rowBased_data_array=sparseAP_cy.npArrRearrange_float_para(rowBased_data_array,sortedLeftOriInd)
return rowBased_row_array,rowBased_col_array,rowBased_data_array,rowLeftOriDict,singleSampleInds,nSamplesOri-len(singleSampleInds)
def preCompute(rowBased_row_array,rowBased_col_array,S_rowBased_data_array):
"""
format affinity/similarity matrix
"""
# Get parameters
data_len=len(S_rowBased_data_array)
row_indptr=sparseAP_cy.getIndptr(rowBased_row_array)
if row_indptr[-1]!=data_len: row_indptr=np.concatenate((row_indptr,np.array([data_len])))
row_to_col_ind_arr=np.lexsort((rowBased_row_array,rowBased_col_array))
colBased_row_array=sparseAP_cy.npArrRearrange_int_para(rowBased_row_array,row_to_col_ind_arr)
colBased_col_array=sparseAP_cy.npArrRearrange_int_para(rowBased_col_array,row_to_col_ind_arr)
col_to_row_ind_arr=np.lexsort((colBased_col_array,colBased_row_array))
col_indptr=sparseAP_cy.getIndptr(colBased_col_array)
if col_indptr[-1]!=data_len: col_indptr=np.concatenate((col_indptr,np.array([data_len])))
kk_col_index=sparseAP_cy.getKKIndex(colBased_row_array,colBased_col_array)
#Initialize matrix A, R
A_rowbased_data_array=np.array([0.0]*data_len)
R_rowbased_data_array=np.array([0.0]*data_len)
#Add random samll value to remove degeneracies
random_state=np.random.RandomState(0)
S_rowBased_data_array+=1e-12*random_state.randn(data_len)*(np.amax(S_rowBased_data_array)-np.amin(S_rowBased_data_array))
#Convert row_to_col_ind_arr/col_to_row_ind_arr data type to np.int datatype so it is compatible with cython code
row_to_col_ind_arr=row_to_col_ind_arr.astype(np.int)
col_to_row_ind_arr=col_to_row_ind_arr.astype(np.int)
return S_rowBased_data_array, A_rowbased_data_array, R_rowbased_data_array,col_indptr,row_indptr,row_to_col_ind_arr,col_to_row_ind_arr,kk_col_index
|
bioinfocao/pysapc
|
pysapc/sparseMatrixPrepare.py
|
Python
|
bsd-3-clause
| 8,514
|
import cPickle
import numpy as np
import sys
from collections import OrderedDict
def format_chars(chars_sent_ls):
max_leng = max([len(l) for l in chars_sent_ls])
to_pads = [max_leng - len(l) for l in chars_sent_ls]
for i, to_pad in enumerate(to_pads):
if to_pad % 2 == 0:
chars_sent_ls[i] = [0] * (to_pad / 2) + chars_sent_ls[i] + [0] * (to_pad / 2)
else:
chars_sent_ls[i] = [0] * (1 + (to_pad / 2)) + chars_sent_ls[i] + [0] * (to_pad / 2)
return chars_sent_ls
def load_bin_vec(fname, vocab):
"""
Loads word vecs from word2vec bin file
"""
word_vecs = OrderedDict()
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * layer1_size
for line in xrange(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == ' ':
word = ''.join(word)
break
if ch != '\n':
word.append(ch)
if word in vocab:
idx = vocab[word]
word_vecs[idx] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
return word_vecs
def add_unknown_words(word_vecs, vocab, min_df=1, k=200):
"""
For words that occur in at least min_df documents, create a separate word vector.
0.25 is chosen so the unknown vectors have (approximately) same variance as pre-trained ones
"""
for word in vocab:
if word not in word_vecs:
idx = vocab[word]
word_vecs[idx] = np.random.uniform(-0.25,0.25,k)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def is_user(s):
if len(s)>1 and s[0] == "@":
return True
else:
return False
def is_url(s):
if len(s)>4 and s[:5] == "http:":
return True
else:
return False
def digits(n):
digit_str = ''
for i in range(n):
digit_str = digit_str + 'DIGIT'
return digit_str
def establishdic(fname, gname, hname, binfile):
data = open(fname, "rb").readlines() + open(gname, "rb").readlines() + open(hname, "rb").readlines()
char_dict = OrderedDict()
vocab_dict = OrderedDict()
tag_dict = OrderedDict()
char_count = 0
vocab_count = 0
tag_count = 0
for line in data:
line = line.replace('\n', '').replace('\r', '')
line = line.split("\t")
if line == ['', ''] or line == [''] or line[0].isdigit() != True:
continue
vocab = line[1]
tag = line[3]
if is_number(vocab): # check if the term is a number
vocab = digits(len(vocab))
if is_url(vocab):
vocab = "URL"
if is_user(vocab):
vocab = "USR"
if vocab not in vocab_dict:
vocab_dict[vocab] = vocab_count
vocab_count += 1
if tag not in tag_dict:
tag_dict[tag] = tag_count
tag_count += 1
# generate char dictionary
chars = list(vocab)
for char in chars:
if char not in char_dict:
char_dict[char] = char_count
char_count += 1
pos_dictionary = OrderedDict()
pos_dictionary['words2idx'] = vocab_dict
pos_dictionary['labels2idx'] = tag_dict
pos_dictionary['chars2idx'] = char_dict
wordvec_dict = load_bin_vec(binfile, vocab_dict)
add_unknown_words(wordvec_dict, vocab_dict)
pos_dictionary['idx2vec'] = wordvec_dict
return pos_dictionary
def sepdata(fname, gname, hname, pos_dictionary):
vocab_dict = pos_dictionary['words2idx']
tag_dict = pos_dictionary['labels2idx']
char_dict = pos_dictionary['chars2idx']
# of all sets
dataset_words = []
dataset_labels = []
dataset_chars = []
for f in [fname, gname, hname]:
data = open(f, "rb").readlines()
# of a whole set
words_set = []
tag_labels_set = []
chars_set = []
# of a whole sentence
example_words = []
example_tag_labels = []
example_char = []
count = 0
for line in data:
line = line.replace('\n', '').replace('\r', '')
line = line.split("\t")
if (not line[0].isdigit()) and (line != ['']):
continue # this is the heading line
# this means a example finishes
if (line == ['', ''] or line == ['']) and (len(example_words) > 0):
words_set.append(np.array(example_words, dtype = "int32"))
tag_labels_set.append(np.array(example_tag_labels, dtype = "int32"))
chars_set.append(np.array(example_char, dtype = "int32"))
# restart a new example after one finishes
example_words = []
example_tag_labels = []
example_char = []
count += 1
else: # part of an example
vocab = line[1]
tag = line[3]
if is_number(vocab): # check if the term is a number
vocab = digits(len(vocab))
if is_url(vocab):
vocab = "URL"
if is_user(vocab):
vocab = "USR"
example_words.append(vocab_dict[vocab])
example_tag_labels.append(tag_dict[tag])
char_word_list = map(lambda u: char_dict[u], list(vocab))
example_char.append(char_word_list)
example_char = format_chars(example_char)
# for each example do a padding
dataset_words.append(words_set)
dataset_labels.append(tag_labels_set)
dataset_chars.append(chars_set)
train_pos= [dataset_words[0], dataset_chars[0], dataset_labels[0]]
valid_pos = [dataset_words[1], dataset_chars[1], dataset_labels[1]]
test_pos = [dataset_words[2], dataset_chars[2], dataset_labels[2]]
assert len(dataset_words[0]+dataset_words[1]+dataset_words[2]) == len(train_pos[0]) + len(valid_pos[0]) + len(test_pos[0])
return train_pos, valid_pos, test_pos
def main():
if len(sys.argv) != 6:
sys.exit("file paths not specified")
binfile = sys.argv[1]
fname = sys.argv[2] # train file
gname = sys.argv[3] # validation file
hname = sys.argv[4] # test file
outfilename = sys.argv[5]
pos_dictionary = establishdic(fname, gname, hname, binfile)
train_pos, valid_pos, test_pos = sepdata(fname, gname, hname, pos_dictionary)
print "train pos examples", len(train_pos[0])
print "valid pos examples", len(valid_pos[0])
print "test pos examples", len(test_pos[0])
with open(outfilename + ".pkl", "wb") as f:
cPickle.dump([train_pos, valid_pos, test_pos, pos_dictionary], f)
print "data %s is generated." % (outfilename + ".pkl")
if __name__ == '__main__':
main()
|
cosmozhang/NCRF-AE
|
process_data.py
|
Python
|
bsd-3-clause
| 7,056
|
from ..module import get_introspection_module
from ..overrides import override
import warnings
GElektra = get_introspection_module('GElektra')
def __func_alias(klass, old, new):
func = getattr(klass, old)
setattr(klass, new, func)
def __func_rename(klass, old, new):
__func_alias(klass, old, new)
delattr(klass, old)
__all__ = []
## make the enums global
for n in GElektra.KeySwitch.__dict__:
if n.isupper():
globals()['KEY_' + n] = getattr(GElektra.KeySwitch, n)
__all__.append('KEY_' + n)
for n in GElektra.KdbOptions.__dict__:
if n.isupper():
globals()['KDB_O_' + n] = getattr(GElektra.KdbOptions, n)
__all__.append('KDB_O_' + n)
KS_END = None
__all__.append('KS_END')
# exceptions
class Exception(Exception):
def __init__(self, args = "Exception thrown by Elektra"):
super().__init__(args)
class KeyException(Exception):
def __init__(self, args = "Exception thrown by a Key, typically "
"because you called a method on a null key. "
"Make sure to check this with !key first"):
super().__init__(args)
class KeyInvalidName(KeyException):
def __init__(self, args = "Invalid Keyname: keyname needs to start "
"with user/ or system/"):
super().__init__(args)
__all__.extend([ 'Exception', 'KeyException', 'KeyInvalidName' ])
## Key
# rename gi-specific functions
__func_rename(GElektra.Key, 'gi_init', '_init')
__func_rename(GElektra.Key, 'gi_make', '_make')
__func_rename(GElektra.Key, 'gi_getstring', '_getstring')
__func_rename(GElektra.Key, 'gi_getbinary', '_getbinary')
# Python API convenience
__func_rename(GElektra.Key, 'cmp', '__cmp__')
__func_rename(GElektra.Key, 'setname', '_setname')
__func_rename(GElektra.Key, 'setbasename', '_setbasename')
__func_rename(GElektra.Key, 'getnamesize', '_getnamesize')
__func_rename(GElektra.Key, 'getbasenamesize', '_getbasenamesize')
__func_rename(GElektra.Key, 'getfullnamesize', '_getfullnamesize')
__func_rename(GElektra.Key, 'setstring', '_setstring')
__func_rename(GElektra.Key, 'setbinary', '_setbinary')
__func_rename(GElektra.Key, 'getvaluesize', '_getvaluesize')
__func_rename(GElektra.Key, 'rewindmeta', '_rewindmeta')
__func_rename(GElektra.Key, 'nextmeta', '_nextmeta')
__func_rename(GElektra.Key, 'currentmeta', '_currentmeta')
class Key(GElektra.Key):
def __new__(cls, *args):
# copy constructor
if len(args) == 1 and isinstance(args[0], cls):
return super()._make(args[0])
return super().__new__(cls, args)
def __init__(self, *args):
super().__init__()
if len(args) == 0:
return
arg0, *args = args
# copy constructor has been used, no init needed
if isinstance(arg0, self.__class__):
return
flags = 0
value = None
meta = {}
args = iter(args)
for arg in args:
if arg == KEY_END:
break
elif arg == KEY_SIZE:
# ignore value
next(args)
elif arg == KEY_VALUE:
value = next(args)
elif arg == KEY_FUNC:
raise TypeError("Unsupported meta type")
elif arg == KEY_FLAGS:
flags = next(args)
elif arg == KEY_META:
k = next(args)
meta[k] = next(args)
elif isinstance(arg, GElektra.KeySwitch):
warnings.warn("Deprecated option in keyNew: {0}".format(arg),
DeprecationWarning)
flags |= arg
else:
warnings.warn("Unknown option in keyNew: {0}".format(arg),
RuntimeWarning)
# _init clears our key
if isinstance(value, bytes):
super()._init(arg0, flags | KEY_BINARY, None, value)
else:
super()._init(arg0, flags & ~KEY_BINARY, value, None)
for k in meta:
self.setmeta(k, meta[k])
def _setname(self, name):
ret = super()._setname(name)
if ret < 0:
raise KeyInvalidName()
return ret
def _setbasename(self, name):
ret = super()._setbasename(name)
if ret < 0:
raise KeyInvalidName()
return ret
def addbasename(self, name):
ret = super().addbasename(name)
if ret < 0:
raise KeyInvalidName()
return ret
def get(self):
"""returns the keys value"""
if self.isbinary():
return self._getbinary()
return self._getstring()
def set(self, value):
"""set the keys value. Can be either string or binary"""
if isinstance(value, bytes):
return self._setbinary(value)
return self._setstring(str(value))
def getmeta(self, name = None):
"""returns a metakey given by name. Name can be either string or Key.
If no metakey is found None is returned.
If name is omitted an iterator object is returned.
"""
if name is not None:
meta = super().getmeta(name)
return meta if meta else None
return self.__metaIter()
def setmeta(self, name, value):
"""set a new metakey consisting of name and value"""
if isinstance(value, str):
return super().setmeta(name, value)
raise TypeError("Unsupported value type")
def __metaIter(self):
self._rewindmeta()
meta = self._nextmeta()
while meta:
yield meta
meta = self._nextmeta()
def __str__(self):
return self.name
def __bool__(self):
return not self.isnull();
def __eq__(self, o):
return self.__cmp__(o) == 0
def __ne__(self, o):
return self.__cmp__(o) != 0
def __gt__(self, o):
return self.__cmp__(o) > 0
def __ge__(self, o):
return self.__cmp__(o) >= 0
def __lt__(self, o):
return self.__cmp__(o) < 0
def __le__(self, o):
return self.__cmp__(o) <= 0
name = property(lambda self: self.get_property('name'), _setname)
value = property(get, set, None, "Key value")
basename = property(lambda self: self.get_property('basename'), _setbasename)
fullname = property(lambda self: self.get_property('fullname'))
Key = override(Key)
__all__.append('Key')
## KeySet
# rename gi-specific functions
__func_rename(GElektra.KeySet, 'gi_append', 'append')
__func_rename(GElektra.KeySet, 'gi_append_keyset', '_append_keyset')
# Python API convenience
__func_rename(GElektra.KeySet, 'len', '__len__')
__func_rename(GElektra.KeySet, 'lookup_byname', '_lookup_byname')
__func_rename(GElektra.KeySet, 'rewind', '_rewind')
__func_rename(GElektra.KeySet, 'next', '_next')
__func_rename(GElektra.KeySet, 'current', '_current')
__func_rename(GElektra.KeySet, 'atcursor', '_atcursor')
class KeySet(GElektra.KeySet):
def __new__(cls, *args):
if len(args) == 1 and isinstance(args[0], __class__):
return super().dup(args[0])
return super().__new__(cls, args)
def __init__(self, *args):
super().__init__()
if len(args) == 0:
return
arg0, *args = args
if isinstance(arg0, __class__):
return
self.resize(arg0)
for arg in args:
if arg is KS_END:
break
self.append(arg)
def lookup(self, name):
"""Lookup a key by name. Name can be either string, Key or indexes.
If index is negative, search starts at the end.
Returns None if no key is found.
"""
if isinstance(name, Key):
key = super().lookup(name, KDB_O_NONE)
elif isinstance(name, str):
key = self._lookup_byname(name, KDB_O_NONE)
elif isinstance(name, int):
key = self._atcursor(name)
else:
raise TypeError("Unsupported type")
return key if key else None
def append(self, data):
if isinstance(data, __class__):
return self._append_keyset(data)
return super().append(data)
def __getitem__(self, key):
"""See lookup(...) for details.
Slices and negative indexes are supported as well.
"""
if isinstance(key, slice):
return [ self[k] for k in range(*key.indices(len(self))) ]
elif isinstance(key, ( int )):
item = self.lookup(key)
if item is None:
raise IndexError("index out of range")
return item
elif isinstance(key, ( str, Key )):
item = self.lookup(key)
if item is None:
raise KeyError(str(key))
return item
raise TypeError("Invalid argument type")
def __contains__(self, item):
"""See lookup(...) for details"""
if isinstance(item, ( str, Key )):
key = self.lookup(item)
return True if key else False
raise TypeError("Invalid argument type")
def __iter__(self):
i = 0
key = self.lookup(i)
while key:
yield key
i = i + 1
key = self.lookup(i)
KeySet = override(KeySet)
__all__.append('KeySet')
## Kdb
# rename gi-specific functions
__func_rename(GElektra.Kdb, 'gi_open', 'open')
class Kdb(GElektra.Kdb):
def __init__(self, *args):
super().__init__()
self.open(args[0] if len(args) else Key())
def get(self, ks, parent):
if isinstance(parent, str):
parent = Key(parent)
return super().get(ks, parent)
def set(self, ks, parent):
if isinstance(parent, str):
parent = Key(parent)
super().set(ks, parent)
def __enter__(self):
"""Internal method for usage with context managers"""
return self
def __exit__(self, type, value, tb):
"""Internal method for usage with context managers.
Closes the database.
"""
try:
self.close(Key())
except:
pass
Kdb = override(Kdb)
KDB = Kdb
__all__.extend([ 'Kdb', 'KDB' ])
|
e1528532/libelektra
|
src/bindings/gi/python/gi/overrides/GElektra.py
|
Python
|
bsd-3-clause
| 8,767
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initalization and other aspects of Angle and subclasses"""
import threading
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import astropy.units as u
from astropy.coordinates.angles import Longitude, Latitude, Angle
from astropy.coordinates.errors import (
IllegalSecondError, IllegalMinuteError, IllegalHourError,
IllegalSecondWarning, IllegalMinuteWarning)
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_create_angles():
"""
Tests creating and accessing Angle objects
"""
''' The "angle" is a fundamental object. The internal
representation is stored in radians, but this is transparent to the user.
Units *must* be specified rather than a default value be assumed. This is
as much for self-documenting code as anything else.
Angle objects simply represent a single angular coordinate. More specific
angular coordinates (e.g. Longitude, Latitude) are subclasses of Angle.'''
a1 = Angle(54.12412, unit=u.degree)
a2 = Angle("54.12412", unit=u.degree)
a3 = Angle("54:07:26.832", unit=u.degree)
a4 = Angle("54.12412 deg")
a5 = Angle("54.12412 degrees")
a6 = Angle("54.12412°") # because we like Unicode
a7 = Angle((54, 7, 26.832), unit=u.degree)
a8 = Angle("54°07'26.832\"")
# (deg,min,sec) *tuples* are acceptable, but lists/arrays are *not*
# because of the need to eventually support arrays of coordinates
a9 = Angle([54, 7, 26.832], unit=u.degree)
assert_allclose(a9.value, [54, 7, 26.832])
assert a9.unit is u.degree
a10 = Angle(3.60827466667, unit=u.hour)
a11 = Angle("3:36:29.7888000120", unit=u.hour)
a12 = Angle((3, 36, 29.7888000120), unit=u.hour) # *must* be a tuple
# Regression test for #5001
a13 = Angle((3, 36, 29.7888000120), unit='hour')
Angle(0.944644098745, unit=u.radian)
with pytest.raises(u.UnitsError):
Angle(54.12412)
# raises an exception because this is ambiguous
with pytest.raises(u.UnitsError):
Angle(54.12412, unit=u.m)
with pytest.raises(ValueError):
Angle(12.34, unit="not a unit")
a14 = Angle("03h36m29.7888000120") # no trailing 's', but unambiguous
a15 = Angle("5h4m3s") # single digits, no decimal
assert a15.unit == u.hourangle
a16 = Angle("1 d")
a17 = Angle("1 degree")
assert a16.degree == 1
assert a17.degree == 1
a18 = Angle("54 07.4472", unit=u.degree)
a19 = Angle("54:07.4472", unit=u.degree)
a20 = Angle("54d07.4472m", unit=u.degree)
a21 = Angle("3h36m", unit=u.hour)
a22 = Angle("3.6h", unit=u.hour)
a23 = Angle("- 3h", unit=u.hour)
a24 = Angle("+ 3h", unit=u.hour)
# ensure the above angles that should match do
assert a1 == a2 == a3 == a4 == a5 == a6 == a7 == a8 == a18 == a19 == a20
assert_allclose(a1.radian, a2.radian)
assert_allclose(a2.degree, a3.degree)
assert_allclose(a3.radian, a4.radian)
assert_allclose(a4.radian, a5.radian)
assert_allclose(a5.radian, a6.radian)
assert_allclose(a6.radian, a7.radian)
assert_allclose(a10.degree, a11.degree)
assert a11 == a12 == a13 == a14
assert a21 == a22
assert a23 == -a24
# check for illegal ranges / values
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.degree)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.degree)
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.hour)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.hour)
with pytest.raises(IllegalHourError):
a = Angle("99 25 51.0", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12 25 51.0xxx", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12h34321m32.2s")
assert a1 is not None
def test_angle_from_view():
q = np.arange(3.) * u.deg
a = q.view(Angle)
assert type(a) is Angle
assert a.unit is q.unit
assert np.all(a == q)
q2 = np.arange(4) * u.m
with pytest.raises(u.UnitTypeError):
q2.view(Angle)
def test_angle_ops():
"""
Tests operations on Angle objects
"""
# Angles can be added and subtracted. Multiplication and division by a
# scalar is also permitted. A negative operator is also valid. All of
# these operate in a single dimension. Attempting to multiply or divide two
# Angle objects will return a quantity. An exception will be raised if it
# is attempted to store output with a non-angular unit in an Angle [#2718].
a1 = Angle(3.60827466667, unit=u.hour)
a2 = Angle("54:07:26.832", unit=u.degree)
a1 + a2 # creates new Angle object
a1 - a2
-a1
assert_allclose((a1 * 2).hour, 2 * 3.6082746666700003)
assert abs((a1 / 3.123456).hour - 3.60827466667 / 3.123456) < 1e-10
# commutativity
assert (2 * a1).hour == (a1 * 2).hour
a3 = Angle(a1) # makes a *copy* of the object, but identical content as a1
assert_allclose(a1.radian, a3.radian)
assert a1 is not a3
a4 = abs(-a1)
assert a4.radian == a1.radian
a5 = Angle(5.0, unit=u.hour)
assert a5 > a1
assert a5 >= a1
assert a1 < a5
assert a1 <= a5
# check operations with non-angular result give Quantity.
a6 = Angle(45., u.degree)
a7 = a6 * a5
assert type(a7) is u.Quantity
# but those with angular result yield Angle.
# (a9 is regression test for #5327)
a8 = a1 + 1.*u.deg
assert type(a8) is Angle
a9 = 1.*u.deg + a1
assert type(a9) is Angle
with pytest.raises(TypeError):
a6 *= a5
with pytest.raises(TypeError):
a6 *= u.m
with pytest.raises(TypeError):
np.sin(a6, out=a6)
def test_angle_methods():
# Most methods tested as part of the Quantity tests.
# A few tests here which caused problems before: #8368
a = Angle([0., 2.], 'deg')
a_mean = a.mean()
assert type(a_mean) is Angle
assert a_mean == 1. * u.degree
a_std = a.std()
assert type(a_std) is Angle
assert a_std == 1. * u.degree
a_var = a.var()
assert type(a_var) is u.Quantity
assert a_var == 1. * u.degree ** 2
a_ptp = a.ptp()
assert type(a_ptp) is Angle
assert a_ptp == 2. * u.degree
a_max = a.max()
assert type(a_max) is Angle
assert a_max == 2. * u.degree
a_min = a.min()
assert type(a_min) is Angle
assert a_min == 0. * u.degree
def test_angle_convert():
"""
Test unit conversion of Angle objects
"""
angle = Angle("54.12412", unit=u.degree)
assert_allclose(angle.hour, 3.60827466667)
assert_allclose(angle.radian, 0.944644098745)
assert_allclose(angle.degree, 54.12412)
assert len(angle.hms) == 3
assert isinstance(angle.hms, tuple)
assert angle.hms[0] == 3
assert angle.hms[1] == 36
assert_allclose(angle.hms[2], 29.78879999999947)
# also check that the namedtuple attribute-style access works:
assert angle.hms.h == 3
assert angle.hms.m == 36
assert_allclose(angle.hms.s, 29.78879999999947)
assert len(angle.dms) == 3
assert isinstance(angle.dms, tuple)
assert angle.dms[0] == 54
assert angle.dms[1] == 7
assert_allclose(angle.dms[2], 26.831999999992036)
# also check that the namedtuple attribute-style access works:
assert angle.dms.d == 54
assert angle.dms.m == 7
assert_allclose(angle.dms.s, 26.831999999992036)
assert isinstance(angle.dms[0], float)
assert isinstance(angle.hms[0], float)
# now make sure dms and signed_dms work right for negative angles
negangle = Angle("-54.12412", unit=u.degree)
assert negangle.dms.d == -54
assert negangle.dms.m == -7
assert_allclose(negangle.dms.s, -26.831999999992036)
assert negangle.signed_dms.sign == -1
assert negangle.signed_dms.d == 54
assert negangle.signed_dms.m == 7
assert_allclose(negangle.signed_dms.s, 26.831999999992036)
def test_angle_formatting():
"""
Tests string formatting for Angle objects
"""
'''
The string method of Angle has this signature:
def string(self, unit=DEGREE, decimal=False, sep=" ", precision=5,
pad=False):
The "decimal" parameter defaults to False since if you need to print the
Angle as a decimal, there's no need to use the "format" method (see
above).
'''
angle = Angle("54.12412", unit=u.degree)
# __str__ is the default `format`
assert str(angle) == angle.to_string()
res = 'Angle as HMS: 3h36m29.7888s'
assert f"Angle as HMS: {angle.to_string(unit=u.hour)}" == res
res = 'Angle as HMS: 3:36:29.7888'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':')}" == res
res = 'Angle as HMS: 3:36:29.79'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':', precision=2)}" == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = 'Angle as HMS: 3h36m29.7888s'
assert "Angle as HMS: {}".format(angle.to_string(unit=u.hour,
sep=("h", "m", "s"),
precision=4)) == res
res = 'Angle as HMS: 3-36|29.7888'
assert "Angle as HMS: {}".format(angle.to_string(unit=u.hour, sep=["-", "|"],
precision=4)) == res
res = 'Angle as HMS: 3-36-29.7888'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep='-', precision=4)}" == res
res = 'Angle as HMS: 03h36m29.7888s'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, precision=4, pad=True)}" == res
# Same as above, in degrees
angle = Angle("3 36 29.78880", unit=u.degree)
res = 'Angle as DMS: 3d36m29.7888s'
assert f"Angle as DMS: {angle.to_string(unit=u.degree)}" == res
res = 'Angle as DMS: 3:36:29.7888'
assert f"Angle as DMS: {angle.to_string(unit=u.degree, sep=':')}" == res
res = 'Angle as DMS: 3:36:29.79'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep=":",
precision=2)) == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = 'Angle as DMS: 3d36m29.7888s'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree,
sep=("d", "m", "s"),
precision=4)) == res
res = 'Angle as DMS: 3-36|29.7888'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep=["-", "|"],
precision=4)) == res
res = 'Angle as DMS: 3-36-29.7888'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep="-",
precision=4)) == res
res = 'Angle as DMS: 03d36m29.7888s'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, precision=4,
pad=True)) == res
res = 'Angle as rad: 0.0629763rad'
assert f"Angle as rad: {angle.to_string(unit=u.radian)}" == res
res = 'Angle as rad decimal: 0.0629763'
assert f"Angle as rad decimal: {angle.to_string(unit=u.radian, decimal=True)}" == res
# check negative angles
angle = Angle(-1.23456789, unit=u.degree)
angle2 = Angle(-1.23456789, unit=u.hour)
assert angle.to_string() == '-1d14m04.444404s'
assert angle.to_string(pad=True) == '-01d14m04.444404s'
assert angle.to_string(unit=u.hour) == '-0h04m56.2962936s'
assert angle2.to_string(unit=u.hour, pad=True) == '-01h14m04.444404s'
assert angle.to_string(unit=u.radian, decimal=True) == '-0.0215473'
def test_to_string_vector():
# Regression test for the fact that vectorize doesn't work with Numpy 1.6
assert Angle([1./7., 1./7.], unit='deg').to_string()[0] == "0d08m34.28571429s"
assert Angle([1./7.], unit='deg').to_string()[0] == "0d08m34.28571429s"
assert Angle(1./7., unit='deg').to_string() == "0d08m34.28571429s"
def test_angle_format_roundtripping():
"""
Ensures that the string representation of an angle can be used to create a
new valid Angle.
"""
a1 = Angle(0, unit=u.radian)
a2 = Angle(10, unit=u.degree)
a3 = Angle(0.543, unit=u.degree)
a4 = Angle('1d2m3.4s')
assert Angle(str(a1)).degree == a1.degree
assert Angle(str(a2)).degree == a2.degree
assert Angle(str(a3)).degree == a3.degree
assert Angle(str(a4)).degree == a4.degree
# also check Longitude/Latitude
ra = Longitude('1h2m3.4s')
dec = Latitude('1d2m3.4s')
assert_allclose(Angle(str(ra)).degree, ra.degree)
assert_allclose(Angle(str(dec)).degree, dec.degree)
def test_radec():
"""
Tests creation/operations of Longitude and Latitude objects
"""
'''
Longitude and Latitude are objects that are subclassed from Angle. As with Angle, Longitude
and Latitude can parse any unambiguous format (tuples, formatted strings, etc.).
The intention is not to create an Angle subclass for every possible
coordinate object (e.g. galactic l, galactic b). However, equatorial Longitude/Latitude
are so prevalent in astronomy that it's worth creating ones for these
units. They will be noted as "special" in the docs and use of the just the
Angle class is to be used for other coordinate systems.
'''
with pytest.raises(u.UnitsError):
ra = Longitude("4:08:15.162342") # error - hours or degrees?
with pytest.raises(u.UnitsError):
ra = Longitude("-4:08:15.162342")
# the "smart" initializer allows >24 to automatically do degrees, but the
# Angle-based one does not
# TODO: adjust in 0.3 for whatever behavior is decided on
# ra = Longitude("26:34:15.345634") # unambiguous b/c hours don't go past 24
# assert_allclose(ra.degree, 26.570929342)
with pytest.raises(u.UnitsError):
ra = Longitude("26:34:15.345634")
# ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(12)
with pytest.raises(ValueError):
ra = Longitude("garbage containing a d and no units")
ra = Longitude("12h43m23s")
assert_allclose(ra.hour, 12.7230555556)
ra = Longitude((56, 14, 52.52), unit=u.degree) # can accept tuples
# TODO: again, fix based on >24 behavior
# ra = Longitude((56,14,52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((56, 14, 52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((12, 14, 52)) # ambiguous w/o units
ra = Longitude((12, 14, 52), unit=u.hour)
ra = Longitude([56, 64, 52.2], unit=u.degree) # ...but not arrays (yet)
# Units can be specified
ra = Longitude("4:08:15.162342", unit=u.hour)
# TODO: this was the "smart" initializer behavior - adjust in 0.3 appropriately
# Where Longitude values are commonly found in hours or degrees, declination is
# nearly always specified in degrees, so this is the default.
# dec = Latitude("-41:08:15.162342")
with pytest.raises(u.UnitsError):
dec = Latitude("-41:08:15.162342")
dec = Latitude("-41:08:15.162342", unit=u.degree) # same as above
def test_negative_zero_dms():
# Test for DMS parser
a = Angle('-00:00:10', u.deg)
assert_allclose(a.degree, -10. / 3600.)
# Unicode minus
a = Angle('−00:00:10', u.deg)
assert_allclose(a.degree, -10. / 3600.)
def test_negative_zero_dm():
# Test for DM parser
a = Angle('-00:10', u.deg)
assert_allclose(a.degree, -10. / 60.)
def test_negative_zero_hms():
# Test for HMS parser
a = Angle('-00:00:10', u.hour)
assert_allclose(a.hour, -10. / 3600.)
def test_negative_zero_hm():
# Test for HM parser
a = Angle('-00:10', u.hour)
assert_allclose(a.hour, -10. / 60.)
def test_negative_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle('-00:60', u.hour)
assert_allclose(a.hour, -1.)
def test_plus_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle('00:60', u.hour)
assert_allclose(a.hour, 1.)
def test_negative_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('-00:59:60', u.deg)
assert_allclose(a.degree, -1.)
def test_plus_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('+00:59:60', u.deg)
assert_allclose(a.degree, 1.)
def test_negative_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('-00:00:60', u.deg)
assert_allclose(a.degree, -1. / 60.)
def test_plus_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('+00:00:60', u.deg)
assert_allclose(a.degree, 1. / 60.)
def test_angle_to_is_angle():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
assert isinstance(a, Angle)
assert isinstance(a.to(u.rad), Angle)
def test_angle_to_quantity():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
q = u.Quantity(a)
assert isinstance(q, u.Quantity)
assert q.unit is u.deg
def test_quantity_to_angle():
a = Angle(1.0*u.deg)
assert isinstance(a, Angle)
with pytest.raises(u.UnitsError):
Angle(1.0*u.meter)
a = Angle(1.0*u.hour)
assert isinstance(a, Angle)
assert a.unit is u.hourangle
with pytest.raises(u.UnitsError):
Angle(1.0*u.min)
def test_angle_string():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
assert str(a) == '0d01m00s'
a = Angle('00:00:59S', u.deg)
assert str(a) == '-0d00m59s'
a = Angle('00:00:59N', u.deg)
assert str(a) == '0d00m59s'
a = Angle('00:00:59E', u.deg)
assert str(a) == '0d00m59s'
a = Angle('00:00:59W', u.deg)
assert str(a) == '-0d00m59s'
a = Angle('-00:00:10', u.hour)
assert str(a) == '-0h00m10s'
a = Angle('00:00:59E', u.hour)
assert str(a) == '0h00m59s'
a = Angle('00:00:59W', u.hour)
assert str(a) == '-0h00m59s'
a = Angle(3.2, u.radian)
assert str(a) == '3.2rad'
a = Angle(4.2, u.microarcsecond)
assert str(a) == '4.2uarcsec'
a = Angle('1.0uarcsec')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecN')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecS')
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecE')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecW')
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle("3d")
assert_allclose(a.value, 3.0)
assert a.unit == u.degree
a = Angle("3dN")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dS")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle("3dE")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dW")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle('10"')
assert_allclose(a.value, 10.0)
assert a.unit == u.arcsecond
a = Angle("10'N")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'S")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle("10'E")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'W")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle('45°55′12″N')
assert str(a) == '45d55m12s'
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle('45°55′12″S')
assert str(a) == '-45d55m12s'
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
a = Angle('45°55′12″E')
assert str(a) == '45d55m12s'
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle('45°55′12″W')
assert str(a) == '-45d55m12s'
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
with pytest.raises(ValueError):
Angle('00h00m10sN')
with pytest.raises(ValueError):
Angle('45°55′12″NS')
def test_angle_repr():
assert 'Angle' in repr(Angle(0, u.deg))
assert 'Longitude' in repr(Longitude(0, u.deg))
assert 'Latitude' in repr(Latitude(0, u.deg))
a = Angle(0, u.deg)
repr(a)
def test_large_angle_representation():
"""Test that angles above 360 degrees can be output as strings,
in repr, str, and to_string. (regression test for #1413)"""
a = Angle(350, u.deg) + Angle(350, u.deg)
a.to_string()
a.to_string(u.hourangle)
repr(a)
repr(a.to(u.hourangle))
str(a)
str(a.to(u.hourangle))
def test_wrap_at_inplace():
a = Angle([-20, 150, 350, 360] * u.deg)
out = a.wrap_at('180d', inplace=True)
assert out is None
assert np.all(a.degree == np.array([-20., 150., -10., 0.]))
def test_latitude():
with pytest.raises(ValueError):
lat = Latitude(['91d', '89d'])
with pytest.raises(ValueError):
lat = Latitude('-91d')
lat = Latitude(['90d', '89d'])
# check that one can get items
assert lat[0] == 90 * u.deg
assert lat[1] == 89 * u.deg
# and that comparison with angles works
assert np.all(lat == Angle(['90d', '89d']))
# check setitem works
lat[1] = 45. * u.deg
assert np.all(lat == Angle(['90d', '45d']))
# but not with values out of range
with pytest.raises(ValueError):
lat[0] = 90.001 * u.deg
with pytest.raises(ValueError):
lat[0] = -90.001 * u.deg
# these should also not destroy input (#1851)
assert np.all(lat == Angle(['90d', '45d']))
# conserve type on unit change (closes #1423)
angle = lat.to('radian')
assert type(angle) is Latitude
# but not on calculations
angle = lat - 190 * u.deg
assert type(angle) is Angle
assert angle[0] == -100 * u.deg
lat = Latitude('80d')
angle = lat / 2.
assert type(angle) is Angle
assert angle == 40 * u.deg
angle = lat * 2.
assert type(angle) is Angle
assert angle == 160 * u.deg
angle = -lat
assert type(angle) is Angle
assert angle == -80 * u.deg
# Test errors when trying to interoperate with longitudes.
with pytest.raises(TypeError) as excinfo:
lon = Longitude(10, 'deg')
lat = Latitude(lon)
assert "A Latitude angle cannot be created from a Longitude angle" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
lon = Longitude(10, 'deg')
lat = Latitude([20], 'deg')
lat[0] = lon
assert "A Longitude angle cannot be assigned to a Latitude angle" in str(excinfo.value)
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lon = Longitude(10, 'deg')
lat = Latitude(Angle(lon))
assert lat.value == 10.0
# Check setitem.
lon = Longitude(10, 'deg')
lat = Latitude([20], 'deg')
lat[0] = Angle(lon)
assert lat.value[0] == 10.0
def test_longitude():
# Default wrapping at 360d with an array input
lon = Longitude(['370d', '88d'])
assert np.all(lon == Longitude(['10d', '88d']))
assert np.all(lon == Angle(['10d', '88d']))
# conserve type on unit change and keep wrap_angle (closes #1423)
angle = lon.to('hourangle')
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[0]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[1:]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
# but not on calculations
angle = lon / 2.
assert np.all(angle == Angle(['5d', '44d']))
assert type(angle) is Angle
assert not hasattr(angle, 'wrap_angle')
angle = lon * 2. + 400 * u.deg
assert np.all(angle == Angle(['420d', '576d']))
assert type(angle) is Angle
# Test setting a mutable value and having it wrap
lon[1] = -10 * u.deg
assert np.all(lon == Angle(['10d', '350d']))
# Test wrapping and try hitting some edge cases
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
assert np.all(lon.degree == np.array([0., 90, 180, 270, 0]))
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian, wrap_angle='180d')
assert np.all(lon.degree == np.array([0., 90, -180, -90, 0]))
# Wrap on setting wrap_angle property (also test auto-conversion of wrap_angle to an Angle)
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
lon.wrap_angle = '180d'
assert np.all(lon.degree == np.array([0., 90, -180, -90, 0]))
lon = Longitude('460d')
assert lon == Angle('100d')
lon.wrap_angle = '90d'
assert lon == Angle('-260d')
# check that if we initialize a longitude with another longitude,
# wrap_angle is kept by default
lon2 = Longitude(lon)
assert lon2.wrap_angle == lon.wrap_angle
# but not if we explicitly set it
lon3 = Longitude(lon, wrap_angle='180d')
assert lon3.wrap_angle == 180 * u.deg
# check that wrap_angle is always an Angle
lon = Longitude(lon, wrap_angle=Longitude(180 * u.deg))
assert lon.wrap_angle == 180 * u.deg
assert lon.wrap_angle.__class__ is Angle
# check that wrap_angle is not copied
wrap_angle=180 * u.deg
lon = Longitude(lon, wrap_angle=wrap_angle)
assert lon.wrap_angle == 180 * u.deg
assert np.may_share_memory(lon.wrap_angle, wrap_angle)
# check for problem reported in #2037 about Longitude initializing to -0
lon = Longitude(0, u.deg)
lonstr = lon.to_string()
assert not lonstr.startswith('-')
# also make sure dtype is correctly conserved
assert Longitude(0, u.deg, dtype=float).dtype == np.dtype(float)
assert Longitude(0, u.deg, dtype=int).dtype == np.dtype(int)
# Test errors when trying to interoperate with latitudes.
with pytest.raises(TypeError) as excinfo:
lat = Latitude(10, 'deg')
lon = Longitude(lat)
assert "A Longitude angle cannot be created from a Latitude angle" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
lat = Latitude(10, 'deg')
lon = Longitude([20], 'deg')
lon[0] = lat
assert "A Latitude angle cannot be assigned to a Longitude angle" in str(excinfo.value)
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lat = Latitude(10, 'deg')
lon = Longitude(Angle(lat))
assert lon.value == 10.0
# Check setitem.
lat = Latitude(10, 'deg')
lon = Longitude([20], 'deg')
lon[0] = Angle(lat)
assert lon.value[0] == 10.0
def test_wrap_at():
a = Angle([-20, 150, 350, 360] * u.deg)
assert np.all(a.wrap_at(360 * u.deg).degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at(Angle(360, unit=u.deg)).degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at('360d').degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at('180d').degree == np.array([-20., 150., -10., 0.]))
assert np.all(a.wrap_at(np.pi * u.rad).degree == np.array([-20., 150., -10., 0.]))
# Test wrapping a scalar Angle
a = Angle('190d')
assert a.wrap_at('180d') == Angle('-170d')
a = Angle(np.arange(-1000.0, 1000.0, 0.125), unit=u.deg)
for wrap_angle in (270, 0.2, 0.0, 360.0, 500, -2000.125):
aw = a.wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
aw = a.to(u.rad).wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
def test_is_within_bounds():
a = Angle([-20, 150, 350] * u.deg)
assert a.is_within_bounds('0d', '360d') is False
assert a.is_within_bounds(None, '360d') is True
assert a.is_within_bounds(-30 * u.deg, None) is True
a = Angle('-20d')
assert a.is_within_bounds('0d', '360d') is False
assert a.is_within_bounds(None, '360d') is True
assert a.is_within_bounds(-30 * u.deg, None) is True
def test_angle_mismatched_unit():
a = Angle('+6h7m8s', unit=u.degree)
assert_allclose(a.value, 91.78333333333332)
def test_regression_formatting_negative():
# Regression test for a bug that caused:
#
# >>> Angle(-1., unit='deg').to_string()
# '-1d00m-0s'
assert Angle(-0., unit='deg').to_string() == '-0d00m00s'
assert Angle(-1., unit='deg').to_string() == '-1d00m00s'
assert Angle(-0., unit='hour').to_string() == '-0h00m00s'
assert Angle(-1., unit='hour').to_string() == '-1h00m00s'
def test_regression_formatting_default_precision():
# Regression test for issue #11140
assert Angle('10:20:30.12345678d').to_string() == '10d20m30.12345678s'
assert Angle('10d20m30.123456784564s').to_string() == '10d20m30.12345678s'
assert Angle('10d20m30.123s').to_string() == '10d20m30.123s'
def test_empty_sep():
a = Angle('05h04m31.93830s')
assert a.to_string(sep='', precision=2, pad=True) == '050431.94'
def test_create_tuple():
"""
Tests creation of an angle with a (d,m,s) or (h,m,s) tuple
"""
a1 = Angle((1, 30, 0), unit=u.degree)
assert a1.value == 1.5
a1 = Angle((1, 30, 0), unit=u.hourangle)
assert a1.value == 1.5
def test_list_of_quantities():
a1 = Angle([1*u.deg, 1*u.hourangle])
assert a1.unit == u.deg
assert_allclose(a1.value, [1, 15])
a2 = Angle([1*u.hourangle, 1*u.deg], u.deg)
assert a2.unit == u.deg
assert_allclose(a2.value, [15, 1])
def test_multiply_divide():
# Issue #2273
a1 = Angle([1, 2, 3], u.deg)
a2 = Angle([4, 5, 6], u.deg)
a3 = a1 * a2
assert_allclose(a3.value, [4, 10, 18])
assert a3.unit == (u.deg * u.deg)
a3 = a1 / a2
assert_allclose(a3.value, [.25, .4, .5])
assert a3.unit == u.dimensionless_unscaled
def test_mixed_string_and_quantity():
a1 = Angle(['1d', 1. * u.deg])
assert_array_equal(a1.value, [1., 1.])
assert a1.unit == u.deg
a2 = Angle(['1d', 1 * u.rad * np.pi, '3d'])
assert_array_equal(a2.value, [1., 180., 3.])
assert a2.unit == u.deg
def test_array_angle_tostring():
aobj = Angle([1, 2], u.deg)
assert aobj.to_string().dtype.kind == 'U'
assert np.all(aobj.to_string() == ['1d00m00s', '2d00m00s'])
def test_wrap_at_without_new():
"""
Regression test for subtle bugs from situations where an Angle is
created via numpy channels that don't do the standard __new__ but instead
depend on array_finalize to set state. Longitude is used because the
bug was in its _wrap_angle not getting initialized correctly
"""
l1 = Longitude([1]*u.deg)
l2 = Longitude([2]*u.deg)
l = np.concatenate([l1, l2])
assert l._wrap_angle is not None
def test__str__():
"""
Check the __str__ method used in printing the Angle
"""
# scalar angle
scangle = Angle('10.2345d')
strscangle = scangle.__str__()
assert strscangle == '10d14m04.2s'
# non-scalar array angles
arrangle = Angle(['10.2345d', '-20d'])
strarrangle = arrangle.__str__()
assert strarrangle == '[10d14m04.2s -20d00m00s]'
# summarizing for large arrays, ... should appear
bigarrangle = Angle(np.ones(10000), u.deg)
assert '...' in bigarrangle.__str__()
def test_repr_latex():
"""
Check the _repr_latex_ method, used primarily by IPython notebooks
"""
# try with both scalar
scangle = Angle(2.1, u.deg)
rlscangle = scangle._repr_latex_()
# and array angles
arrangle = Angle([1, 2.1], u.deg)
rlarrangle = arrangle._repr_latex_()
assert rlscangle == r'$2^\circ06{}^\prime00{}^{\prime\prime}$'
assert rlscangle.split('$')[1] in rlarrangle
# make sure the ... appears for large arrays
bigarrangle = Angle(np.ones(50000)/50000., u.deg)
assert '...' in bigarrangle._repr_latex_()
def test_angle_with_cds_units_enabled():
"""Regression test for #5350
Especially the example in
https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
"""
from astropy.units import cds
# the problem is with the parser, so remove it temporarily
from astropy.coordinates.angle_formats import _AngleParser
del _AngleParser._thread_local._parser
with cds.enable():
Angle('5d')
del _AngleParser._thread_local._parser
Angle('5d')
def test_longitude_nan():
# Check that passing a NaN to Longitude doesn't raise a warning
Longitude([0, np.nan, 1] * u.deg)
def test_latitude_nan():
# Check that passing a NaN to Latitude doesn't raise a warning
Latitude([0, np.nan, 1] * u.deg)
def test_angle_wrap_at_nan():
# Check that passing a NaN to Latitude doesn't raise a warning
Angle([0, np.nan, 1] * u.deg).wrap_at(180*u.deg)
def test_angle_multithreading():
"""
Regression test for issue #7168
"""
angles = ['00:00:00']*10000
def parse_test(i=0):
Angle(angles, unit='hour')
for i in range(10):
threading.Thread(target=parse_test, args=(i,)).start()
@pytest.mark.parametrize("cls", [Angle, Longitude, Latitude])
@pytest.mark.parametrize("input, expstr, exprepr",
[(np.nan*u.deg,
"nan",
"nan deg"),
([np.nan, 5, 0]*u.deg,
"[nan 5d00m00s 0d00m00s]",
"[nan, 5., 0.] deg"),
([6, np.nan, 0]*u.deg,
"[6d00m00s nan 0d00m00s]",
"[6., nan, 0.] deg"),
([np.nan, np.nan, np.nan]*u.deg,
"[nan nan nan]",
"[nan, nan, nan] deg"),
(np.nan*u.hour,
"nan",
"nan hourangle"),
([np.nan, 5, 0]*u.hour,
"[nan 5h00m00s 0h00m00s]",
"[nan, 5., 0.] hourangle"),
([6, np.nan, 0]*u.hour,
"[6h00m00s nan 0h00m00s]",
"[6., nan, 0.] hourangle"),
([np.nan, np.nan, np.nan]*u.hour,
"[nan nan nan]",
"[nan, nan, nan] hourangle"),
(np.nan*u.rad,
"nan",
"nan rad"),
([np.nan, 1, 0]*u.rad,
"[nan 1rad 0rad]",
"[nan, 1., 0.] rad"),
([1.50, np.nan, 0]*u.rad,
"[1.5rad nan 0rad]",
"[1.5, nan, 0.] rad"),
([np.nan, np.nan, np.nan]*u.rad,
"[nan nan nan]",
"[nan, nan, nan] rad")])
def test_str_repr_angles_nan(cls, input, expstr, exprepr):
"""
Regression test for issue #11473
"""
q = cls(input)
assert str(q) == expstr
# Deleting whitespaces since repr appears to be adding them for some values
# making the test fail.
assert repr(q).replace(" ", "") == f'<{cls.__name__}{exprepr}>'.replace(" ","")
|
lpsinger/astropy
|
astropy/coordinates/tests/test_angles.py
|
Python
|
bsd-3-clause
| 35,759
|
import os
PACKAGE_NAME = 'pyflux'
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(PACKAGE_NAME, parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('arma')
config.add_subpackage('ensembles')
config.add_subpackage('families')
config.add_subpackage('garch')
config.add_subpackage('gas')
config.add_subpackage('gpnarx')
config.add_subpackage('inference')
config.add_subpackage('output')
config.add_subpackage('ssm')
config.add_subpackage('tests')
config.add_subpackage('var')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
RJT1990/pyflux
|
pyflux/setup.py
|
Python
|
bsd-3-clause
| 792
|
import uuid
from couchdbkit import ResourceConflict
from couchdbkit.exceptions import ResourceNotFound
from django.test import TestCase
from toggle.shortcuts import update_toggle_cache, namespaced_item, clear_toggle_cache
from .models import generate_toggle_id, Toggle
from .shortcuts import toggle_enabled, set_toggle
class ToggleTestCase(TestCase):
def setUp(self):
super(ToggleTestCase, self).setUp()
self.slug = uuid.uuid4().hex
def tearDown(self):
try:
toggle = Toggle.get(self.slug)
except ResourceNotFound:
pass
else:
toggle.delete()
super(ToggleTestCase, self).tearDown()
def test_generate_id(self):
self.assertEqual('hqFeatureToggle-sluggy', generate_toggle_id('sluggy'))
def test_save_and_get_id(self):
users = ['bruce', 'alfred']
toggle = Toggle(slug=self.slug, enabled_users=users)
toggle.save()
self.assertEqual(generate_toggle_id(self.slug), toggle._id)
for id in (toggle._id, self.slug):
fromdb = Toggle.get(id)
self.assertEqual(self.slug, fromdb.slug)
self.assertEqual(users, fromdb.enabled_users)
def test_no_overwrite(self):
existing = Toggle(slug=self.slug)
existing.save()
conflict = Toggle(slug=self.slug)
try:
conflict.save()
self.fail('saving a toggle on top of an existing document should not be allowed')
except ResourceConflict:
pass
def test_toggle_enabled(self):
users = ['prof', 'logan']
toggle = Toggle(slug=self.slug, enabled_users=users)
toggle.save()
self.assertTrue(toggle_enabled(self.slug, 'prof'))
self.assertTrue(toggle_enabled(self.slug, 'logan'))
self.assertFalse(toggle_enabled(self.slug, 'richard'))
self.assertFalse(toggle_enabled('gotham', 'prof'))
def test_add_remove(self):
toggle = Toggle(slug=self.slug, enabled_users=['petyr', 'jon'])
toggle.save()
rev = toggle._rev
self.assertTrue('jon' in toggle.enabled_users)
self.assertTrue('petyr' in toggle.enabled_users)
# removing someone who doesn't exist shouldn't do anything
toggle.remove('robert')
self.assertEqual(rev, toggle._rev)
# removing someone should save it and update toggle
toggle.remove('jon')
next_rev = toggle._rev
self.assertNotEqual(rev, next_rev)
self.assertFalse('jon' in toggle.enabled_users)
self.assertTrue('petyr' in toggle.enabled_users)
# adding someone who already exists should do nothing
toggle.add('petyr')
self.assertEqual(next_rev, toggle._rev)
# adding someone should save it and update toggle
toggle.add('ned')
self.assertNotEqual(next_rev, toggle._rev)
self.assertTrue('ned' in toggle.enabled_users)
self.assertTrue('petyr' in toggle.enabled_users)
self.assertFalse('jon' in toggle.enabled_users)
def test_set_toggle(self):
toggle = Toggle(slug=self.slug, enabled_users=['benjen', 'aemon'])
toggle.save()
self.assertTrue(toggle_enabled(self.slug, 'benjen'))
self.assertTrue(toggle_enabled(self.slug, 'aemon'))
set_toggle(self.slug, 'benjen', False)
self.assertFalse(toggle_enabled(self.slug, 'benjen'))
self.assertTrue(toggle_enabled(self.slug, 'aemon'))
set_toggle(self.slug, 'jon', True)
self.assertTrue(toggle_enabled(self.slug, 'jon'))
self.assertFalse(toggle_enabled(self.slug, 'benjen'))
self.assertTrue(toggle_enabled(self.slug, 'aemon'))
def test_toggle_cache(self):
ns = 'ns'
toggle = Toggle(slug=self.slug, enabled_users=['mojer', namespaced_item('fizbod', ns)])
toggle.save()
self.assertTrue(toggle_enabled(self.slug, 'mojer'))
self.assertFalse(toggle_enabled(self.slug, 'fizbod'))
self.assertTrue(toggle_enabled(self.slug, 'fizbod', namespace=ns))
update_toggle_cache(self.slug, 'mojer', False)
update_toggle_cache(self.slug, 'fizbod', False, namespace=ns)
self.assertFalse(toggle_enabled(self.slug, 'mojer'))
self.assertFalse(toggle_enabled(self.slug, 'fizbod', namespace=ns))
clear_toggle_cache(self.slug, 'mojer')
clear_toggle_cache(self.slug, 'fizbod', namespace=ns)
self.assertTrue(toggle_enabled(self.slug, 'mojer'))
self.assertTrue(toggle_enabled(self.slug, 'fizbod', namespace=ns))
|
qedsoftware/commcare-hq
|
corehq/ex-submodules/toggle/tests.py
|
Python
|
bsd-3-clause
| 4,576
|
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import unittest
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from testlib import random_string
from systestlib import DutSystemTest
class TestApiSystem(DutSystemTest):
def test_get(self):
for dut in self.duts:
dut.config('default hostname')
resp = dut.api('system').get()
keys = ['hostname', 'iprouting', 'banner_motd', 'banner_login']
self.assertEqual(sorted(keys), sorted(resp.keys()))
def test_get_with_period(self):
for dut in self.duts:
dut.config('hostname host.domain.net')
response = dut.api('system').get()
self.assertEqual(response['hostname'], 'host.domain.net')
def test_get_check_hostname(self):
for dut in self.duts:
dut.config('hostname teststring')
response = dut.api('system').get()
self.assertEqual(response['hostname'], 'teststring')
def test_get_check_banners(self):
for dut in self.duts:
motd_banner_value = random_string() + "\n"
login_banner_value = random_string() + "\n"
dut.config([dict(cmd="banner motd", input=motd_banner_value)])
dut.config([dict(cmd="banner login", input=login_banner_value)])
resp = dut.api('system').get()
self.assertEqual(resp['banner_login'], login_banner_value.rstrip())
self.assertEqual(resp['banner_motd'], motd_banner_value.rstrip())
def test_get_banner_with_EOF(self):
for dut in self.duts:
motd_banner_value = '!!!newlinebaner\nSecondLIneEOF!!!newlinebanner\n'
dut.config([dict(cmd="banner motd", input=motd_banner_value)])
resp = dut.api('system').get()
self.assertEqual(resp['banner_motd'], motd_banner_value.rstrip())
def test_set_hostname_with_value(self):
for dut in self.duts:
dut.config('default hostname')
value = random_string()
response = dut.api('system').set_hostname(value)
self.assertTrue(response, 'dut=%s' % dut)
value = 'hostname %s' % value
self.assertIn(value, dut.running_config)
def test_set_hostname_with_no_value(self):
for dut in self.duts:
dut.config('hostname test')
response = dut.api('system').set_hostname(disable=True)
self.assertTrue(response, 'dut=%s' % dut)
value = 'no hostname'
self.assertIn(value, dut.running_config)
def test_set_hostname_with_default(self):
for dut in self.duts:
dut.config('hostname test')
response = dut.api('system').set_hostname(default=True)
self.assertTrue(response, 'dut=%s' % dut)
value = 'no hostname'
self.assertIn(value, dut.running_config)
def test_set_hostname_default_over_value(self):
for dut in self.duts:
dut.config('hostname test')
response = dut.api('system').set_hostname(value='foo', default=True)
self.assertTrue(response, 'dut=%s' % dut)
value = 'no hostname'
self.assertIn(value, dut.running_config)
def test_set_iprouting_to_true(self):
for dut in self.duts:
dut.config('no ip routing')
resp = dut.api('system').set_iprouting(True)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertNotIn('no ip rotuing', dut.running_config)
def test_set_iprouting_to_false(self):
for dut in self.duts:
dut.config('ip routing')
resp = dut.api('system').set_iprouting(False)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn('no ip routing', dut.running_config)
def test_set_iprouting_to_no(self):
for dut in self.duts:
dut.config('ip routing')
resp = dut.api('system').set_iprouting(disable=True)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn('no ip routing', dut.running_config)
def test_set_iprouting_to_default(self):
for dut in self.duts:
dut.config('ip routing')
resp = dut.api('system').set_iprouting(default=True)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn('no ip routing', dut.running_config)
def test_set_hostname_with_period(self):
for dut in self.duts:
dut.config('hostname localhost')
response = dut.api('system').set_hostname(value='host.domain.net')
self.assertTrue(response, 'dut=%s' % dut)
value = 'hostname host.domain.net'
self.assertIn(value, dut.running_config)
def test_set_banner_motd(self):
for dut in self.duts:
banner_value = random_string()
dut.config([dict(cmd="banner motd",
input=banner_value)])
self.assertIn(banner_value, dut.running_config)
banner_api_value = random_string()
resp = dut.api('system').set_banner("motd", banner_api_value)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn(banner_api_value, dut.running_config)
def test_set_banner_motd_donkey(self):
for dut in self.duts:
donkey_chicken = r"""
/\ /\
( \\ // )
\ \\ // /
\_\\||||//_/
\/ _ _ \
\/|(o)(O)|
\/ | |
___________________\/ \ /
// // |____| Cluck cluck cluck!
// || / \
//| \| \ 0 0 /
// \ ) V / \____/
// \ / ( /
"" \ /_________| |_/
/ /\ / | ||
/ / / / \ ||
| | | | | ||
| | | | | ||
|_| |_| |_||
\_\ \_\ \_\\
"""
resp = dut.api('system').set_banner("motd", donkey_chicken)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn(donkey_chicken, dut.running_config)
def test_set_banner_motd_default(self):
for dut in self.duts:
dut.config([dict(cmd="banner motd",
input="!!!!REMOVE BANNER TEST!!!!")])
dut.api('system').set_banner('motd', None, True)
self.assertIn('no banner motd', dut.running_config)
def test_set_banner_login(self):
for dut in self.duts:
banner_value = random_string()
dut.config([dict(cmd="banner login",
input=banner_value)])
self.assertIn(banner_value, dut.running_config)
banner_api_value = random_string()
resp = dut.api('system').set_banner("login", banner_api_value)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn(banner_api_value, dut.running_config)
config_login_banner = dut.api('system').get()['banner_login']
self.assertTrue(config_login_banner, banner_api_value.strip())
def test_set_banner_login_default(self):
for dut in self.duts:
dut.config([dict(cmd="banner login",
input="!!!!REMOVE LOGIN BANNER TEST!!!!")])
dut.api('system').set_banner('login', None, True)
self.assertIn('no banner login', dut.running_config)
def test_set_banner_login_negate(self):
for dut in self.duts:
dut.config([dict(cmd="banner login",
input="!!!!REMOVE LOGIN BANNER TEST!!!!")])
dut.api('system').set_banner('login', None, False, True)
self.assertIn('no banner login', dut.running_config)
if __name__ == '__main__':
unittest.main()
|
arista-eosplus/pyeapi
|
test/system/test_api_system.py
|
Python
|
bsd-3-clause
| 9,596
|
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from django.core.urlresolvers import RegexURLResolver
from django.http import Http404
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.url_routing import RouteResult
_creation_counter = 0
def route(pattern, name=None):
def decorator(view_func):
global _creation_counter
_creation_counter += 1
# Make sure page has _routablepage_routes attribute
if not hasattr(view_func, '_routablepage_routes'):
view_func._routablepage_routes = []
# Add new route to view
view_func._routablepage_routes.append((
url(pattern, view_func, name=(name or view_func.__name__)),
_creation_counter,
))
return view_func
return decorator
class RoutablePageMixin(object):
"""
This class can be mixed in to a Page model, allowing extra routes to be
added to it.
"""
@classmethod
def get_subpage_urls(cls):
routes = []
for attr in dir(cls):
val = getattr(cls, attr, None)
if hasattr(val, '_routablepage_routes'):
routes.extend(val._routablepage_routes)
return tuple([
route[0]
for route in sorted(routes, key=lambda route: route[1])
])
@classmethod
def get_resolver(cls):
if '_routablepage_urlresolver' not in cls.__dict__:
subpage_urls = cls.get_subpage_urls()
cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls)
return cls._routablepage_urlresolver
def reverse_subpage(self, name, args=None, kwargs=None):
"""
This method takes a route name/arguments and returns a URL path.
"""
args = args or []
kwargs = kwargs or {}
return self.get_resolver().reverse(name, *args, **kwargs)
def resolve_subpage(self, path):
"""
This method takes a URL path and finds the view to call.
"""
view, args, kwargs = self.get_resolver().resolve(path)
# Bind the method
view = view.__get__(self, type(self))
return view, args, kwargs
def route(self, request, path_components):
"""
This hooks the subpage URLs into Wagtail's routing.
"""
if self.live:
try:
path = '/'
if path_components:
path += '/'.join(path_components) + '/'
view, args, kwargs = self.resolve_subpage(path)
return RouteResult(self, args=(view, args, kwargs))
except Http404:
pass
return super(RoutablePageMixin, self).route(request, path_components)
def serve(self, request, view=None, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
if view is None:
return super(RoutablePageMixin, self).serve(request, *args, **kwargs)
return view(request, *args, **kwargs)
def serve_preview(self, request, mode_name):
view, args, kwargs = self.resolve_subpage('/')
request.is_preview = True
return view(request, *args, **kwargs)
class RoutablePage(RoutablePageMixin, Page):
"""
This class extends Page by adding methods which allows extra routes to be
added to it.
"""
class Meta:
abstract = True
|
Toshakins/wagtail
|
wagtail/contrib/wagtailroutablepage/models.py
|
Python
|
bsd-3-clause
| 3,477
|
# -*- coding: utf-8 -*-
from decimal import Decimal
from django.http import HttpResponse, HttpRequest
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from ....cart.app import cart_app
from ....cart.models import CART_SESSION_KEY
from ....cart.tests import TestCart
from ....contrib.delivery.simplepost.models import PostShippingType
from ....order import handler as order_handler
from ....order.models import Order
from ....payment import ConfirmationFormNeeded
from ....payment.tests import TestPaymentProvider
from ....product.tests import DeadParrot
from ..common.decorators import require_order
from ..common.views import prepare_order, reactivate_order
from . import views
urlpatterns = patterns('',
url(r'^cart/', include(cart_app.urls)),
url(r'^checkout/', include('satchless.contrib.checkout.multistep.urls')),
url(r'^order/', include('satchless.order.urls')),
)
class TestPaymentProviderWithConfirmation(TestPaymentProvider):
def confirm(self, order):
raise ConfirmationFormNeeded(action='http://test.payment.gateway.example.com')
class CheckoutTest(TestCase):
urls = 'satchless.contrib.checkout.multistep.tests'
def _setup_settings(self, custom_settings):
original_settings = {}
for setting_name, value in custom_settings.items():
if hasattr(settings, setting_name):
original_settings[setting_name] = getattr(settings, setting_name)
setattr(settings, setting_name, value)
return original_settings
def _teardown_settings(self, original_settings, custom_settings=None):
custom_settings = custom_settings or {}
for setting_name, value in custom_settings.items():
if setting_name in original_settings:
setattr(settings, setting_name, value)
else:
delattr(settings, setting_name)
def setUp(self):
self.macaw = DeadParrot.objects.create(slug='macaw',
species="Hyacinth Macaw")
self.cockatoo = DeadParrot.objects.create(slug='cockatoo',
species="White Cockatoo")
self.macaw_blue = self.macaw.variants.create(color='blue', looks_alive=False)
self.macaw_blue_fake = self.macaw.variants.create(color='blue', looks_alive=True)
self.cockatoo_white_a = self.cockatoo.variants.create(color='white', looks_alive=True)
self.cockatoo_white_d = self.cockatoo.variants.create(color='white', looks_alive=False)
self.cockatoo_blue_a = self.cockatoo.variants.create(color='blue', looks_alive=True)
self.cockatoo_blue_d = self.cockatoo.variants.create(color='blue', looks_alive=False)
self.custom_settings = {
'SATCHLESS_DELIVERY_PROVIDERS': ['satchless.contrib.delivery.simplepost.PostDeliveryProvider'],
'SATCHLESS_ORDER_PARTITIONERS': ['satchless.contrib.order.partitioner.simple'],
'SATCHLESS_PAYMENT_PROVIDERS': [TestPaymentProviderWithConfirmation],
'SATCHLESS_DJANGO_PAYMENT_TYPES': ['dummy'],
'PAYMENT_VARIANTS': {'dummy': ('payments.dummy.DummyProvider', {'url': '/', })},
}
self.original_settings = self._setup_settings(self.custom_settings)
order_handler.init_queues()
self.anon_client = Client()
PostShippingType.objects.create(price=12, typ='polecony', name='list polecony')
PostShippingType.objects.create(price=20, typ='list', name='List zwykly')
def tearDown(self):
self._teardown_settings(self.original_settings, self.custom_settings)
order_handler.init_queues()
def _test_status(self, url, method='get', *args, **kwargs):
status_code = kwargs.pop('status_code', 200)
client = kwargs.pop('client_instance', Client())
data = kwargs.pop('data', {})
response = getattr(client, method)(url, data=data, follow=False)
self.assertEqual(response.status_code, status_code,
'Incorrect status code for: %s, (%s, %s)! Expected: %s, received: %s. HTML:\n\n%s' % (
url.decode('utf-8'), args, kwargs, status_code, response.status_code,
response.content.decode('utf-8')))
return response
def _get_or_create_cart_for_client(self, client, typ='satchless_cart'):
self._test_status(reverse('satchless-cart-view'), client_instance=self.anon_client)
return TestCart.objects.get(pk=self.anon_client.session[CART_SESSION_KEY % typ], typ=typ)
def _get_order_from_session(self, session):
order_pk = session.get('satchless_order', None)
if order_pk:
return Order.objects.get(pk=order_pk)
return None
def _get_order_items(self, order):
order_items = set()
for group in order.groups.all():
order_items.update(group.items.values_list('product_variant', 'quantity'))
return order_items
def test_order_from_cart_view_creates_proper_order(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.macaw_blue, 1)
cart.replace_item(self.macaw_blue_fake, Decimal('2.45'))
cart.replace_item(self.cockatoo_white_a, Decimal('2.45'))
self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
order = self._get_order_from_session(self.anon_client.session)
self.assertNotEqual(order, None)
order_items = self._get_order_items(order)
self.assertEqual(set(cart.items.values_list('variant', 'quantity')), order_items)
def test_order_is_updated_after_cart_changes(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.macaw_blue, 1)
cart.replace_item(self.macaw_blue_fake, Decimal('2.45'))
cart.replace_item(self.cockatoo_white_a, Decimal('2.45'))
self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
order = self._get_order_from_session(self.anon_client.session)
order_items = self._get_order_items(order)
# compare cart and order
self.assertEqual(set(cart.items.values_list('variant', 'quantity')), order_items)
# update cart
cart.add_item(self.macaw_blue, 100)
cart.add_item(self.macaw_blue_fake, 100)
self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
old_order = order
order = self._get_order_from_session(self.anon_client.session)
# order should be reused
self.assertEqual(old_order.pk, order.pk)
self.assertNotEqual(order, None)
order_items = self._get_order_items(order)
# compare cart and order
self.assertEqual(set(cart.items.values_list('variant', 'quantity')), order_items)
def test_prepare_order_creates_order_and_redirects_to_checkout_when_cart_is_not_empty(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.macaw_blue, 1)
response = self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
order_pk = self.anon_client.session.get('satchless_order', None)
order = Order.objects.get(pk=order_pk)
self.assertRedirects(response, reverse(views.checkout,
kwargs={'order_token':
order.token}))
def test_prepare_order_redirects_to_cart_when_cart_is_empty(self):
self._get_or_create_cart_for_client(self.anon_client)
response = self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
# 'satchless_cart' is taken from multistep/urls.py:
# url(r'^prepare-order/$', prepare_order, {'typ': 'satchless_cart'}...)
self.assertRedirects(response, reverse('satchless-cart-view'))
def test_prepare_order_redirects_to_checkout_when_order_exists(self):
order = self._create_order(self.anon_client)
response = self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
self.assertRedirects(response, reverse(views.checkout,
kwargs={'order_token':
order.token}))
def _create_cart(self, client):
cart = self._get_or_create_cart_for_client(client)
cart.replace_item(self.macaw_blue, 1)
cart.replace_item(self.macaw_blue_fake, Decimal('2.45'))
cart.replace_item(self.cockatoo_white_a, Decimal('2.45'))
return cart
def _create_order(self, client):
self._create_cart(client)
self._test_status(reverse(prepare_order), method='post',
client_instance=client, status_code=302)
return self._get_order_from_session(client.session)
def test_order_is_deleted_when_all_cart_items_are_deleted(self):
order = self._create_order(self.anon_client)
for cart_item in order.cart.items.all():
self.assertTrue(Order.objects.filter(pk=order.pk).exists())
order.cart.replace_item(cart_item.variant, 0)
self.assertFalse(Order.objects.filter(pk=order.pk).exists())
def test_checkout_view(self):
order = self._create_order(self.anon_client)
response = self._test_status(reverse(views.checkout,
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=200)
group = order.groups.get()
dtypes = order_handler.get_delivery_types(group)
dtype = dtypes[0][0]
df = response.context['delivery_formset']
data = {'billing_first_name': 'First',
'billing_last_name': 'Last',
'billing_street_address_1': 'Via Rodeo 1',
'billing_city': 'Beverly Hills',
'billing_country': 'US',
'billing_country_area': 'AZ',
'billing_phone': '555-555-5555',
'billing_postal_code': '90210'}
data[df.add_prefix('INITIAL_FORMS')] = '1'
data[df.add_prefix('MAX_NUM_FORMS')] = ''
data[df.add_prefix('TOTAL_FORMS')] = '1'
for form in df.forms:
data[form.add_prefix('delivery_type')] = dtype
data[form.add_prefix('id')] = group.id
self._test_status(reverse(views.checkout, kwargs={'order_token':
order.token}),
data=data, status_code=302,
client_instance=self.anon_client, method='post')
self.assertEqual(order.groups.get().delivery_type, dtype)
def test_delivery_details_view(self):
order = self._create_order(self.anon_client)
group = order.groups.get()
dtypes = order_handler.get_delivery_types(group)
group.delivery_type = dtypes[0][0]
group.save()
self._test_status(reverse(views.delivery_details,
kwargs={'order_token': order.token}),
client_instance=self.anon_client, method='get')
def test_delivery_details_view_redirects_to_checkout_when_delivery_type_is_missing(self):
order = self._create_order(self.anon_client)
response = self._test_status(reverse(views.delivery_details,
kwargs={'order_token':
order.token}),
status_code=302,
client_instance=self.anon_client,
method='get')
self.assertRedirects(response, reverse(views.checkout,
kwargs={'order_token':
order.token}))
def test_payment_view_redirects_to_payment_choice_view_when_payment_type_is_missing(self):
order = self._create_order(self.anon_client)
response = self._test_status(reverse(views.payment_details,
kwargs={'order_token':
order.token}),
status_code=302,
client_instance=self.anon_client,
method='get')
self.assertRedirects(response, reverse(views.payment_choice,
kwargs={'order_token':
order.token}))
def test_checkout_views_redirects_to_confirmation_page_when_order_has_payment_pending_status(self):
order = self._create_order(self.anon_client)
order.set_status('payment-pending')
self._test_status(reverse(views.payment_details,
kwargs={'order_token':
order.token}),
status_code=302,
client_instance=self.anon_client,
method='get')
def test_reactive_order_view_changes_order_status_to_checkout(self):
order = self._create_order(self.anon_client)
order.set_status('payment-failed')
self._test_status(reverse(reactivate_order,
kwargs={'order_token':
order.token}),
status_code=302,
client_instance=self.anon_client,
method='post')
self.assertEqual(Order.objects.get(pk=order.pk).status, 'checkout')
def test_reactive_order_view_redirects_to_checkout_for_correct_order(self):
order = self._create_order(self.anon_client)
order.set_status('payment-failed')
response = self._test_status(reverse(reactivate_order,
kwargs={'order_token':
order.token}),
status_code=302,
client_instance=self.anon_client,
method='post')
self.assertRedirects(response, reverse('satchless-checkout', args=(order.token,)))
def test_require_order_decorator(self):
def assertRedirects(response, path):
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], path)
def view_factory(status):
@require_order(status=status)
def view(request, order_token):
return HttpResponse()
return view
request = HttpRequest()
order = self._create_order(self.anon_client)
# decorator should not redirect if status is correct
for status, name in Order.STATUS_CHOICES:
view = view_factory(status)
order.set_status(status)
self.assertTrue(view(request, order_token=order.token).status_code, 200)
view = view_factory('non-existing-status')
order.set_status('payment-pending')
assertRedirects(view(request, order_token=order.token),
reverse('satchless-checkout-confirmation', args=(order.token,)))
order.set_status('checkout')
assertRedirects(view(request, order_token=order.token),
reverse('satchless-checkout', args=(order.token,)))
for status in ('payment-failed', 'delivery', 'payment-complete', 'cancelled'):
order.set_status(status)
assertRedirects(view(request, order_token=order.token),
reverse('satchless-order-view', args=(order.token,)))
assertRedirects(view(request, order_token='non-existing-order-token'),
reverse('satchless-cart-view'))
|
fusionbox/satchless
|
satchless/contrib/checkout/multistep/tests.py
|
Python
|
bsd-3-clause
| 16,546
|
# -*- coding:utf-8 -*-
import logging
import warnings
from flypwd.config import config
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
log = logging.getLogger(__name__)
def check_key(keyfile):
"""
checks the RSA key file
raises ValueError if not valid
"""
with open(keyfile, 'r') as f:
return RSA.importKey(f.read(), passphrase="")
def gen_key():
return RSA.generate(config.getint('keys', 'dimension'))
def encrypt_with_pub(pwd, pub):
cipher = PKCS1_v1_5.new(pub)
return cipher.encrypt(pwd.encode('utf-8'))
|
giupo/flypwd
|
flypwd/keys.py
|
Python
|
bsd-3-clause
| 658
|
import os
import shutil
import unittest
import copy
from nvpy.nvpy import Config
from nvpy.notes_db import NotesDB
notes = {
'1': {
'modifydate': 1111111222,
'tags': [],
'createdate': 1111111111,
'syncdate': 0,
'content': 'active note 1',
'savedate': 0,
},
'2': {
'modifydate': 1111111222,
'tags': [],
'createdate': 1111111111,
'syncdate': 0,
'content': 'active note 2',
'savedate': 0,
},
'3': {
'modifydate': 1111111222,
'tags': ['foo'],
'createdate': 1111111111,
'syncdate': 0,
'content': 'active note 3',
'savedate': 0,
},
'4': {
'modifydate': 1111111222,
'tags': [],
'createdate': 1111111111,
'syncdate': 0,
'content': 'deleted note',
'savedate': 0,
'deleted': True,
}
}
class FilterGstyle(unittest.TestCase):
BASE_DIR = '/tmp/.nvpyUnitTests'
def setUp(self):
if os.path.isdir(self.BASE_DIR):
shutil.rmtree(self.BASE_DIR)
def __mock_config(self, notes_as_txt=False):
app_dir = os.path.abspath('nvpy')
mockConfig = Config(app_dir, [])
mockConfig.sn_username = ''
mockConfig.sn_password = ''
mockConfig.db_path = self.BASE_DIR
mockConfig.txt_path = self.BASE_DIR + '/notes'
mockConfig.simplenote_sync = 0
mockConfig.notes_as_txt = notes_as_txt
return mockConfig
def test_search_by_none_or_empty(self):
db = NotesDB(self.__mock_config())
db.notes = copy.deepcopy(notes)
filtered_notes, match_regexp, active_notes = db.filter_notes_gstyle()
self.assertEqual(len(filtered_notes), 3)
self.assertEqual(match_regexp, '')
self.assertEqual(active_notes, 3)
filtered_notes, match_regexp, active_notes = db.filter_notes_gstyle('')
self.assertEqual(len(filtered_notes), 3)
self.assertEqual(match_regexp, '')
self.assertEqual(active_notes, 3)
def test_search_by_tag(self):
db = NotesDB(self.__mock_config())
db.notes = copy.deepcopy(notes)
filtered_notes, match_regexp, active_notes = db.filter_notes_gstyle('tag:foo')
self.assertEqual(len(filtered_notes), 1)
self.assertEqual(filtered_notes[0].note['content'], notes['3']['content'])
self.assertEqual(match_regexp, '') # Should ignore for tag pattern
self.assertEqual(active_notes, 3)
def test_search_by_single_words(self):
db = NotesDB(self.__mock_config())
db.notes = copy.deepcopy(notes)
filtered_notes, match_regexp, active_notes = db.filter_notes_gstyle('note 1 active')
self.assertEqual(len(filtered_notes), 1)
self.assertEqual(filtered_notes[0].note['content'], notes['1']['content'])
self.assertEqual(match_regexp, 'note|1|active') # Should ignore for tag pattern
self.assertEqual(active_notes, 3)
def test_search_by_multi_word(self):
db = NotesDB(self.__mock_config())
db.notes = copy.deepcopy(notes)
filtered_notes, match_regexp, active_notes = db.filter_notes_gstyle('"note 1" active')
self.assertEqual(len(filtered_notes), 1)
self.assertEqual(filtered_notes[0].note['content'], notes['1']['content'])
self.assertEqual(match_regexp, r'note\ 1|active') # Should ignore for tag pattern
self.assertEqual(active_notes, 3)
class FilterRegexp(unittest.TestCase):
BASE_DIR = '/tmp/.nvpyUnitTests'
def setUp(self):
if os.path.isdir(self.BASE_DIR):
shutil.rmtree(self.BASE_DIR)
def __mock_config(self, notes_as_txt=False):
app_dir = os.path.abspath('nvpy')
mockConfig = Config(app_dir, [])
mockConfig.sn_username = ''
mockConfig.sn_password = ''
mockConfig.db_path = self.BASE_DIR
mockConfig.txt_path = self.BASE_DIR + '/notes'
mockConfig.simplenote_sync = 0
mockConfig.notes_as_txt = notes_as_txt
return mockConfig
def test_search_by_none_or_empty(self):
db = NotesDB(self.__mock_config())
db.notes = copy.deepcopy(notes)
filtered_notes, match_regexp, active_notes = db.filter_notes_regexp()
self.assertEqual(len(filtered_notes), 3)
self.assertEqual(match_regexp, '')
self.assertEqual(active_notes, 3)
filtered_notes, match_regexp, active_notes = db.filter_notes_regexp('')
self.assertEqual(len(filtered_notes), 3)
self.assertEqual(match_regexp, '')
self.assertEqual(active_notes, 3)
def test_search_by_invalid_regexp(self):
db = NotesDB(self.__mock_config())
db.notes = copy.deepcopy(notes)
filtered_notes, match_regexp, active_notes = db.filter_notes_regexp('(deleted')
self.assertEqual(len(filtered_notes), 3)
self.assertEqual(match_regexp, '')
self.assertEqual(active_notes, 3)
def test_search_by_valid_regexp(self):
db = NotesDB(self.__mock_config())
db.notes = copy.deepcopy(notes)
filtered_notes, match_regexp, active_notes = db.filter_notes_regexp('foo| [12]')
self.assertEqual(len(filtered_notes), 3)
self.assertEqual(match_regexp, 'foo| [12]')
self.assertEqual(active_notes, 3)
|
yuuki0xff/nvpy
|
tests/notes_db/filter.py
|
Python
|
bsd-3-clause
| 5,345
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 100, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_None/trend_MovingMedian/cycle_12/ar_12/test_artificial_128_None_MovingMedian_12_12_100.py
|
Python
|
bsd-3-clause
| 265
|
import tests.periodicities.period_test as per
per.buildModel((120 , 'S' , 25));
|
antoinecarme/pyaf
|
tests/periodicities/Second/Cycle_Second_25_S_120.py
|
Python
|
bsd-3-clause
| 82
|
from nose.plugins.skip import SkipTest
from oyster.conf import settings
from oyster.core import Kernel
from oyster.storage.gridfs import GridFSStorage
from oyster.storage.dummy import DummyStorage
def _simple_storage_test(StorageCls):
kernel = Kernel(mongo_db='oyster_test')
kernel.doc_classes['default'] = {}
storage = StorageCls(kernel)
# ensure the class has a storage_type attribute
assert hasattr(storage, 'storage_type')
doc = {'_id': 'aabbccddeeff', 'url': 'http://localhost:8000/#test',
'doc_class': 'default', 'metadata': {} }
storage_id = storage.put(doc, 'hello oyster', 'text/plain')
assert storage_id
assert storage.get(storage_id) == 'hello oyster'
def test_s3():
if not hasattr(settings, 'AWS_BUCKET'):
raise SkipTest('S3 not configured')
from oyster.storage.s3 import S3Storage
_simple_storage_test(S3Storage)
def test_gridfs():
_simple_storage_test(GridFSStorage)
def test_dummy():
_simple_storage_test(DummyStorage)
|
jamesturk/oyster
|
oyster/tests/test_storage.py
|
Python
|
bsd-3-clause
| 1,021
|
#!/usr/bin/env python
from flexbe_core import EventState, Logger
import rospy
from flexbe_core.proxy import ProxyPublisher
from geometry_msgs.msg import Twist
"""Created on June. 21, 2017
@author: Alireza Hosseini
"""
class PublishTwistState(EventState):
"""
Publishes a velocity command from userdata.
-- topic string Topic to which the velocity command will be published.
># twist Twist Velocity command to be published.
<= done Velcoity command has been published.
"""
def __init__(self, topic):
"""Constructor"""
super(PublishTwistState, self).__init__(outcomes=['done'],
input_keys=['twist'])
self._topic = topic
self._pub = ProxyPublisher({self._topic: Twist})
def execute(self, userdata):
return 'done'
def on_enter(self, userdata):
self._pub.publish(self._topic, userdata.twist)
|
FlexBE/generic_flexbe_states
|
flexbe_utility_states/src/flexbe_utility_states/publish_twist_state.py
|
Python
|
bsd-3-clause
| 850
|
#! /usr/bin/python
"""
Runs GenomeMapper on single-end or paired-end data.
"""
import optparse, os, sys, tempfile
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option('', '--threads', dest='threads', help='The number of threads to run')
parser.add_option('', '--input1', dest='input1', help='The (forward or single-end) reads file in Sanger FASTQ format')
parser.add_option('', '--input2', dest='input2', help='The reverse reads file in Sanger FASTQ format')
parser.add_option('', '--output', dest='output', help='The output file')
parser.add_option('', '--paired', dest='paired', help='Whether the data is single- or paired-end')
parser.add_option('', '--genomeSource', dest='genomeSource', help='The type of reference provided')
parser.add_option('', '--ref', dest='ref', help='The reference genome to use or index')
parser.add_option('', '--indexSettings', dest='index_settings', help='Whether or not indexing options are to be set')
parser.add_option('', '--params', dest='params', help='Whether to use default or specified parameters')
parser.add_option('', '--seedlength', dest='seedlength', help='GenomeMapper Index Seed Length')
parser.add_option('', '--alignseedlength', dest='alignseedlength', help='GenomeMapper Alignment Seed Length')
parser.add_option('', '--format', dest='format', help='Output format (bed or shore)')
parser.add_option('', '--maxmismatches', dest='maxmismatches', help='Maximal number of mismatches')
parser.add_option('', '--maxgaps', dest='maxgaps', help='Maximal number of gaps')
parser.add_option('', '--maxedits', dest='maxedits', help='Maximal number of edit operations')
parser.add_option('', '--reportall', dest='reportall', help='Report all hits')
(options, args) = parser.parse_args()
# index if necessary
if options.genomeSource == 'history':
# set up commands
if options.index_settings =='index_pre_set':
indexing_cmds = ''
else:
try:
indexing_cmds = '%s ' % \
(('','-s %s'%options.seedlength)[options.seedlength!='None' and options.seedlength>=1])
except ValueError:
indexing_cmds = ''
# make temp directory for placement of indices and copy reference file there
tmp_dir = tempfile.gettempdir()
try:
os.system('cp %s %s' % (options.ref, tmp_dir))
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
options.ref = os.path.join(tmp_dir, os.path.split(options.ref)[1])
cmd1 = 'gmindex -v -i %s %s' % (options.ref, indexing_cmds)
try:
os.system(cmd1)
except Exception, erf:
stop_err('Error indexing reference sequence\n' + str(erf))
if options.params == 'pre_set':
aligning_cmds = '-v '
else:
try:
print options
aligning_cmds = '%s %s %s %s %s %s -v ' % \
(('','-f %s' % options.format)[options.format!='None'],
('','-a')[options.reportall!='None'],
('','-M %s' % options.maxmismatches)[options.maxmismatches!='None'],
('','-G %s' % options.maxgaps)[options.maxgaps!='None'],
('','-E %s' % options.maxedits)[options.maxedits!='None'],
('','-l %s' % options.alignseedlength)[options.alignseedlength!='None'])
except ValueError, erf:
stop_err('Something is wrong with the alignment parameters and the alignment could not be run\n' + str(erf))
# prepare actual aligning commands
if options.paired == 'paired':
print "Sorry, paired end alignments are not implemented yet"
return
#cmd2 = 'genomemapper %s %s -1 %s -2 %s > %s ' % (options.ref, options.input1, options.input2, options.output)
else:
cmd2 = 'genomemapper %s -i %s -q %s -o %s ' % (aligning_cmds, options.ref, options.input1, options.output)
# align
try:
print cmd2
os.system(cmd2)
except Exception, erf:
stop_err("Error aligning sequence\n" + str(erf))
if __name__=="__main__": __main__()
|
vipints/oqtans
|
oqtans_tools/PALMapper/0.5/galaxy/genomemapper_wrapper.py
|
Python
|
bsd-3-clause
| 4,421
|
#!/usr/bin/env python
r"""Browse raw data.
This uses :func:`mne.io.read_raw` so it supports the same formats
(without keyword arguments).
Examples
--------
.. code-block:: console
$ mne browse_raw sample_audvis_raw.fif \
--proj sample_audvis_ecg-proj.fif \
--eve sample_audvis_raw-eve.fif
"""
# Authors : Eric Larson, PhD
import sys
import mne
def run():
"""Run command."""
import matplotlib.pyplot as plt
from mne.commands.utils import get_optparser, _add_verbose_flag
from mne.viz import _RAW_CLIP_DEF
parser = get_optparser(__file__, usage='usage: %prog raw [options]')
parser.add_option("--raw", dest="raw_in",
help="Input raw FIF file (can also be specified "
"directly as an argument without the --raw prefix)",
metavar="FILE")
parser.add_option("--proj", dest="proj_in",
help="Projector file", metavar="FILE",
default='')
parser.add_option("--eve", dest="eve_in",
help="Events file", metavar="FILE",
default='')
parser.add_option("-d", "--duration", dest="duration", type="float",
help="Time window for plotting (sec)",
default=10.0)
parser.add_option("-t", "--start", dest="start", type="float",
help="Initial start time for plotting",
default=0.0)
parser.add_option("-n", "--n_channels", dest="n_channels", type="int",
help="Number of channels to plot at a time",
default=20)
parser.add_option("-o", "--order", dest="group_by",
help="Order to use for grouping during plotting "
"('type' or 'original')", default='type')
parser.add_option("-p", "--preload", dest="preload",
help="Preload raw data (for faster navigaton)",
default=False, action="store_true")
parser.add_option("-s", "--show_options", dest="show_options",
help="Show projection options dialog",
default=False)
parser.add_option("--allowmaxshield", dest="maxshield",
help="Allow loading MaxShield processed data",
action="store_true")
parser.add_option("--highpass", dest="highpass", type="float",
help="Display high-pass filter corner frequency",
default=-1)
parser.add_option("--lowpass", dest="lowpass", type="float",
help="Display low-pass filter corner frequency",
default=-1)
parser.add_option("--filtorder", dest="filtorder", type="int",
help="Display filtering IIR order (or 0 to use FIR)",
default=4)
parser.add_option("--clipping", dest="clipping",
help="Enable trace clipping mode, either 'clamp' or "
"'transparent'", default=_RAW_CLIP_DEF)
parser.add_option("--filterchpi", dest="filterchpi",
help="Enable filtering cHPI signals.", default=None,
action="store_true")
_add_verbose_flag(parser)
options, args = parser.parse_args()
if len(args):
raw_in = args[0]
else:
raw_in = options.raw_in
duration = options.duration
start = options.start
n_channels = options.n_channels
group_by = options.group_by
preload = options.preload
show_options = options.show_options
proj_in = options.proj_in
eve_in = options.eve_in
maxshield = options.maxshield
highpass = options.highpass
lowpass = options.lowpass
filtorder = options.filtorder
clipping = options.clipping
if isinstance(clipping, str):
if clipping.lower() == 'none':
clipping = None
else:
try:
clipping = float(clipping) # allow float and convert it
except ValueError:
pass
filterchpi = options.filterchpi
verbose = options.verbose
if raw_in is None:
parser.print_help()
sys.exit(1)
kwargs = dict(preload=preload)
if maxshield:
kwargs.update(allow_maxshield='yes')
raw = mne.io.read_raw(raw_in, **kwargs)
if len(proj_in) > 0:
projs = mne.read_proj(proj_in)
raw.info['projs'] = projs
if len(eve_in) > 0:
events = mne.read_events(eve_in)
else:
events = None
if filterchpi:
if not preload:
raise RuntimeError(
'Raw data must be preloaded for chpi, use --preload')
raw = mne.chpi.filter_chpi(raw)
highpass = None if highpass < 0 or filtorder < 0 else highpass
lowpass = None if lowpass < 0 or filtorder < 0 else lowpass
raw.plot(duration=duration, start=start, n_channels=n_channels,
group_by=group_by, show_options=show_options, events=events,
highpass=highpass, lowpass=lowpass, filtorder=filtorder,
clipping=clipping, verbose=verbose)
plt.show(block=True)
mne.utils.run_command_if_main()
|
kambysese/mne-python
|
mne/commands/mne_browse_raw.py
|
Python
|
bsd-3-clause
| 5,212
|
"""
Module for parsing & validation of EPICS database definition (dbd) files.
Copyright 2012 Australian National University. Licensed under the new BSD License, as specified in the LICENSE file.
"""
from pyparsing import *
from parserutils import *
import os
import re
import dbparser
import pickle
import traceback
name = Word(alphanums+"_") # valid name format for DBD names (also library symbol names)
## Menu definitions
choice_value = Group( Literal("choice").suppress()
- parentheses( comma_delimited(name, dblQuotedString )) )
choice_list = curly_braces( Group( ZeroOrMore(choice_value) ))
menu = Literal("menu").suppress() - parentheses(name) + choice_list
class Menu:
def __init__(self, name, choices, loc, s):
self.name = name
self.choices = choices
self.lineno = lineno(loc,s)
def _process_menu(s,loc,toks):
choices = dict([(value.strip('"'),name) for name,value in toks[1]])
return [ Menu(toks[0], choices, loc, s) ]
menu.setParseAction(_process_menu)
## recordtype definitions
field_param = Group( name - parentheses(dblQuotedString|name) )
field_param_list = Group( curly_braces( ZeroOrMore(field_param) ) )
field_value = ( Literal("field").suppress()
- parentheses(comma_delimited(name, name))
+ field_param_list )
class Field:
def __init__(self, name, field_type, params, loc, s):
self.name = name
self.field_type = field_type
self.params = params
def verify_field(self, record, record_type, value):
t = self.field_type
if t == "DBF_STRING":
return validate_string(self.params, value)
elif t == "DBF_CHAR":
return validate_int(value, True, 8)
elif t == "DBF_UCHAR":
return validate_int(value, False, 8)
elif t == "DBF_SHORT":
return validate_int(value, True, 16)
elif t == "DBF_USHORT":
return validate_int(value, False, 16)
elif t == "DBF_LONG":
return validate_int(value, True, 32)
elif t == "DBF_ULONG":
return validate_int(value, False, 32)
elif t in ( "DBF_FLOAT", "DBF_DOUBLE" ):
return validate_double, # ignore precision
elif t == "DBF_ENUM":
return validate_enum(record, value)
elif t == "DBF_MENU":
return validate_menu(self.params, value)
elif t == "DBF_DEVICE":
return validate_device(record, record_type.device_types, value)
elif t in ("DBF_INLINK", "DBF_OUTLINK", "DBF_FWDLINK"):
return validate_link(value)
elif t == "DBF_NOACCESS":
raise DbdFieldError("Field is type NOACCESS, not settable in database")
else:
raise DbdFieldError("Got unexpected field type '%s' for field '%s'" % (t, self.name))
def _process_field(s,loc,toks):
params = dict([ (name,value) for name,value in toks[2] ])
return [ Field(toks[0], toks[1], params, loc, s) ]
field_value.setParseAction(_process_field)
field_list = Group( curly_braces( ZeroOrMore(field_value) ))
record_type = Literal("recordtype").suppress() - parentheses(name) + field_list
class RecordType:
def __init__(self, name, fields, loc, s):
self.name = name
self.fields = dict([ (f.name,f) for f in fields ])
self.device_types = {}
def _process_record_type(s,loc,toks):
return [ RecordType(toks[0], toks[1], loc, s) ]
record_type.setParseAction(_process_record_type)
device = Literal("device").suppress() - parentheses( comma_delimited(name, name, name, dblQuotedString))
class Device:
def __init__(self, rec_type, dev_type, dev_name, dev_label):
self.rec_type = rec_type
self.dev_type = dev_type
self.dev_name = dev_name
self.dev_label = dev_label.strip('"')
def _process_device(s,loc,toks):
return [ Device(*toks) ]
device.setParseAction(_process_device)
# ignore all other dbd directives, mean nothing here
driver = Literal("driver") - parentheses(name)
registrar = Literal("registrar") - parentheses(name)
variable = Literal("variable") - parentheses(comma_delimited(name,name))
function = Literal("function") - parentheses(name)
breaktable = Literal("breaktable") - parentheses(name) + Regex("{.*?}")
ignore = (driver|registrar|variable|function|breaktable).suppress()
dbd_content = ZeroOrMore(menu|record_type|device|ignore) + StringEnd()
def parse_dbd(dbd_file, dbd_cache_path=None):
try:
result = try_read_cache(dbd_file, dbd_cache_path)
if result:
return result
raw = dbd_content.parseString(dbd_file.read())
# temporary dict to keep track of menu names
menus = {}
# result, a dict of record types w/ embedded menus & device types
record_types = {}
for item in raw:
if isinstance(item, Menu):
menus[item.name] = item
elif isinstance(item, Device):
record_types[item.rec_type].device_types[item.dev_label] = item
elif isinstance(item, RecordType):
record_types[item.name] = item
for field in item.fields.values():
if "menu" in field.params: # instead of just menu name, also assign actual menu
field.params["menu_values"] = menus[field.params["menu"]]
update_cache(dbd_file, record_types, dbd_cache_path)
return record_types
except ParseBaseException as err:
raise dbparser.DatabaseInnerParseException(dbd_file.name, err)
## Cache methods
def try_read_cache(dbd_file, dbd_cache_path):
"""
Try to read a cached dbd file from the given path,
return the dbd contents or None if failed or out of date.
"""
try:
with open(dbd_cache_path, "rb") as f:
size,mtime = pickle.load(f)
stat = os.fstat(dbd_file.fileno())
if stat.st_size == size and stat.st_mtime == mtime:
return pickle.load(f)
except (TypeError, IOError):
return None # path was null, or file didn't exist was or not readable
except (EOFError, pickle.PickleError):
pass # something went wrong while reading the file
return None
def update_cache(dbd_file, dbd_contents, dbd_cache_path):
if not dbd_cache_path:
return
with open(dbd_cache_path, "wb") as f:
stat = os.fstat(dbd_file.fileno())
pickle.dump((stat.st_size,stat.st_mtime), f)
pickle.dump(dbd_contents, f)
# validation methods
class DbdFieldError(Exception):
pass
def verify_record(dbd, record, s,loc):
"""
Verify all fields in record 'record' against dbd 'dbd'
This is called as part of a pyparsing parse run, so parsing context s & loc are supplied to allow Parser exceptions w/ locations
"""
try:
rtype = dbd[record.rtype]
except KeyError:
raise ParseFatalException(s,loc,"Record type '%s' not found in dbd" %
self.rtype)
for name,value in record.fields:
try:
rtype.fields[name].verify_field(record, rtype, value)
except KeyError:
raise ParseFatalException(s,loc,"Record '%s' - type '%s' does not define a field named '%s'" % (record.name, rtype.name, name))
except DbdFieldError as err:
raise ParseFatalException(s,loc,"Record '%s' - invalid field '%s': %s" %
(record.name, name, err))
except Exception as err:
traceback.print_exc()
raise ParseFatalException(s,loc,"Failed to verify field '%s' against dbd: %s" % (name, err))
def validate_string(params, value):
size = int(params["size"])
if len(value) > size:
raise DbdFieldError("Value '%s' exceeds maximum length %d" % (value,size))
def validate_int(value, signed, bits):
if value == "":
return # empty string is OK as a standin for zero
if not re.match("^-?[0-9]*$", value) and not re.match("^-?0x[0-9A-F]*$", value):
raise DbdFieldError("Numeric value '%s' is not a valid number" % value)
if value.startswith("-") and not signed:
raise DbdFieldError("Unsigned field contains a negative number")
try:
intval = eval(value)
if not isinstance(intval, int):
raise SyntaxError
except SyntaxError:
raise DbdFieldError("Numeric value '%s' is not a valid number" % value)
if not signed and intval >= pow(2,bits):
raise DbdFieldError("Field value %d overflows %d-bit unsigned field" % (value,
bits))
if signed and abs(intval) >= pow(2,bits-1):
raise DbdFieldError("Field value %d overflows %d-bit signed field" % (value, bits))
def validate_double(params, value):
try:
float(value)
except:
raise DbdFieldError("Field value '%s' is not a valid floating-point number", value)
def validate_enum(record, value):
if record.rtype in ( "bi", "bo" ) and not value in ["0", "1"]:
raise DbdFieldError("Field value '%s' is invalid for a boolean enum (valid is 0,1")
if record.rtype in ( "mbbi", "mbbo" ):
try:
intval = int(value)
if intval < 0:
raise DbdFieldError("Enum field values cannot be negative")
number = ['ZRVL', 'ONVL', 'TWVL', 'THVL', 'FRVL', 'FVVL', 'SXVL', 'SVVL', 'EIVL', 'NIVL', 'TEVL', 'ELVL', 'TVVL', 'TTVL', 'FTVL', 'FFVL'][intval]
matching = [ (name,value) for name,value in record.fields if name == number ]
if len(matching) == 0:
raise DbdFieldError("Field value '%s' is invalid, record contains no field '%s'" % (value, number))
except IndexError:
raise DbdFieldError("Field value '%s' is out of range for record type %s'" % (value, record.rtype))
except ValueError:
raise DbdFieldError("Field value '%s' is not a valid integer" % (value))
def validate_menu(params, value):
if not "menu" in params:
raise DbdFieldError("Menu field has no menu definition in dbd file")
try:
choices = params["menu_values"].choices
intval = int(value)
if intval < 0 or intval >= len(choices):
raise DbdFieldError("Menu field index '%s' is out of range, menu only has %d choices" % (value, len(choices)))
except KeyError:
raise DbdFieldError("Menu field '%s' has no list of valid choices" % params["menu"])
except ValueError:
# not a valid integer, try as a string
if not value in choices:
raise DbdFieldError("'%s' is not a valid choice for menu %s" % (value, params["menu"]))
def validate_device(record, device_types, value):
if not value in device_types:
raise DbdFieldError("'%s' is not a known device type for record type %s" % (value, record.rtype))
def validate_link(value):
if value.startswith("@"):
return # TODO: verify @asyn masks look alright
parts = value.strip().split(" ")
PROCESS = ( "NPP", "PP", "CA", "CP", "CPP" )
MAXIMIZE = ( "NMS", "MS" )
if ( len(parts) == 2 and not parts[1] in PROCESS+MAXIMIZE ) or \
( len(parts) == 3 and (not parts[1] in PROCESS and parts[2] in MAXIMIZE) ):
raise DbdFieldError("'%s' is not a valid link format" % value)
|
anunuclear/dbpreproc
|
dbdparser.py
|
Python
|
bsd-3-clause
| 11,369
|
from django.http import HttpResponse
def hello_world(request):
return HttpResponse("Hello, world.")
|
xyloeric/pi
|
piExp/pi/views.py
|
Python
|
bsd-3-clause
| 101
|
import os
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from easy_thumbnails.fields import ThumbnailerImageField
class VisibilityModel(models.Model):
is_active = models.BooleanField(_('is active'), default=True)
is_public = models.BooleanField(_('is public'), default=False)
class Meta:
abstract = True
class AlbumManager(models.Manager):
def active(self):
return self.filter(is_active=True)
def public(self):
return self.filter(is_active=True, is_public=True)
class Album(VisibilityModel):
created_on = models.DateTimeField(_('created on'), default=timezone.now)
date = models.DateField(_('date'), default=timezone.now)
title = models.CharField(_('title'), max_length=200)
slug = models.SlugField(_('slug'), max_length=200, unique=True)
objects = AlbumManager()
class Meta:
get_latest_by = 'date'
ordering = ['-date']
verbose_name = _('album')
verbose_name_plural = _('albums')
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('chet_album_detail', kwargs={
'year': self.date.strftime('%Y'),
'slug': self.slug,
})
class PhotoManager(models.Manager):
def active(self):
return self.filter(
is_active=True,
album__is_active=True,
)
def public(self):
return self.filter(
is_active=True,
album__is_active=True,
is_public=True,
album__is_public=True,
)
class Photo(VisibilityModel):
created_on = models.DateTimeField(_('created on'), default=timezone.now)
file = ThumbnailerImageField(_('file'), upload_to='chet/photos/%Y/%m/')
album = models.ForeignKey(
Album, verbose_name=_('album'), related_name='photos')
shot_on = models.DateTimeField(_('shot on'), default=timezone.now)
title = models.CharField(_('title'), max_length=200, blank=True)
is_dark = models.BooleanField(
_('is dark'), default=False,
help_text=_('Dark images are shown on a light background.'))
objects = PhotoManager()
class Meta:
get_latest_by = 'shot_on'
ordering = ['shot_on']
verbose_name = _('photo')
verbose_name_plural = _('photos')
def __unicode__(self):
return self.title or os.path.basename(self.file.name)
def get_absolute_url(self):
return reverse('chet_photo_detail', kwargs={
'year': self.album.date.strftime('%Y'),
'slug': self.album.slug,
'photo': self.pk,
})
|
matthiask/django-chet
|
chet/models.py
|
Python
|
bsd-3-clause
| 2,741
|
# Copyright (C) 2018 Henrique Pereira Coutada Miranda, Alejandro Molina Sanchez, Alexandre Morlet, Fulvio Paleari
#
# All rights reserved.
#
# This file is part of yambopy
#
#
import os
from operator import itemgetter
from collections import OrderedDict
from yambopy import *
#
# by Henrique Miranda.
#
def pack_files_in_folder(folder,save_folder=None,mask='',verbose=True):
"""
Pack the output files in a folder to json files
"""
if not save_folder: save_folder = folder
#pack the files in .json files
for dirpath,dirnames,filenames in os.walk(folder):
#check if the folder fits the mask
if mask in dirpath:
#check if there are some output files in the folder
if ([ f for f in filenames if 'o-' in f ]):
if verbose: print(dirpath)
y = YamboOut(dirpath,save_folder=save_folder)
y.pack()
#
# by Alejandro Molina-Sanchez
#
def breaking_symmetries(efield1,efield2=[0,0,0],folder='.',RmTimeRev=True):
"""
Breaks the symmetries for a given field.
Second field used in circular polarized pump configuration
RmTimeRev : Remove time symmetry is set True by default
"""
os.system('mkdir -p %s'%folder)
os.system('cp -r database/SAVE %s'%folder)
os.system('cd %s; yambo'%folder)
ypp = YamboIn.from_runlevel('-y -V all',executable='ypp',folder=folder,filename='ypp.in')
ypp['Efield1'] = efield1 # Field in the X-direction
ypp['Efield2'] = efield2 # Field in the X-direction
if RmTimeRev:
ypp.arguments.append('RmTimeRev') # Remove Time Symmetry
ypp.write('%s/ypp.in'%folder)
os.system('cd %s ; ypp_ph -F ypp.in'%folder )
os.system('cd %s ; cd FixSymm; yambo '%folder )
os.system('rm -r %s/SAVE'%folder)
os.system('mv %s/FixSymm/SAVE %s/'%(folder,folder))
os.system('rm -r %s/FixSymm'%folder)
#
# by Alexandre Morlet & Henrique Miranda
#
def analyse_gw(folder,var,bandc,kpointc,bandv,kpointv,pack,text,draw,verbose=False):
"""
Study the convergence of GW calculations by looking at the change in band-gap value.
The script reads from <folder> all results from <variable> calculations and display them.
Use the band and k-point options (or change default values) according to the size of your k-grid and
the location of the band extrema.
"""
print(' K-point Band')
print('Conduction state %6d %6d'%(kpointc, bandc))
print(' Valence state %6d %6d'%(kpointv, bandv))
#find all ndb.QP files in the folder
io = OrderedDict()
for root, dirs, files in os.walk(folder):
#get starting name of folder
basename = os.path.basename(root)
#look into folders starting with var or reference
if any( [basename.startswith(v) for v in [var,'reference']] ):
for filename in files:
if filename != 'ndb.QP': continue
#get ndb.QP file in folder
io[basename] = ( YamboIn.from_file(folder=folder,filename="%s.in"%basename),
YamboQPDB.from_db(folder=root,filename=filename) )
#consistency check
#TODO
convergence_data = []
for basename, (inp,out) in io.items():
#get input
value, unit = inp[var]
#get qp value
# Be careful because the array of eigenvalues is defined now in another way
eigenvalues_dft, eigenvalues_qp, lifetimes, z = out.get_qps()
#save result
qp_gap = eigenvalues_qp[kpointc-out.min_kpoint,bandc-out.min_band] - eigenvalues_qp[kpointv-out.min_kpoint,bandv-out.min_band]
#check type of variable
if isinstance(value,list): value = value[1]
convergence_data.append([value,qp_gap])
convergence_data = np.array(sorted(convergence_data))
if convergence_data.dtype == 'object': raise ValueError('Unknown type of variable')
if text:
output_folder = 'analyse_%s'%folder
if not os.path.isdir(output_folder): os.mkdir(output_folder)
outname = os.path.join(output_folder,'%s_%s.dat'%(folder,var))
header = var+' ('+str(unit)+')'
np.savetxt(outname,convergence_data,delimiter='\t',header=header)
if draw:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(convergence_data[:,0],convergence_data[:,1],'o-')
ax.set_xlabel(var+' ('+unit+')')
ax.set_ylabel('E_gw = E_lda + \Delta E')
fig.savefig('%s.png'%var)
#
# by Alexandre Morlet
#
def analyse_bse(folder,var,numbexc,intexc,degenexc,maxexc,text,draw,verbose=False):
"""
Using ypp, you can study the convergence of BSE calculations in 2 ways:
Create a .png of all absorption spectra relevant to the variable you study
Look at the eigenvalues of the first n "bright" excitons (given a threshold intensity)
The script reads from <folder> all results from <variable> calculations for processing.
The resulting pictures and data files are saved in the ./analyse_<folder>/ folder.
Arguments:
folder -> Folder containing SAVE and convergence runs.
var -> Variable tested (e.g. FFTGvecs)
numbexc -> Number of excitons to read beyond threshold (default=2)
intexc -> Minimum intensity for excitons to be considered bright (default=0.05)
degenexc -> Energy threshold under which different peaks are merged (eV) (default=0.01)
maxexc -> Energy threshold after which excitons are not read anymore (eV) (default=8.0)
text -> Skips writing the .dat file (default: True)
draw -> Skips drawing (plotting) the abs spectra (default: True)
"""
#find the save folder
lat = YamboSaveDB.from_db_file(os.path.join(folder,'SAVE'))
#find all ndb.BS_diago_Q01 files in the folder
io = OrderedDict()
for root, dirs, files in os.walk(folder):
#get starting name of folder
basename = os.path.basename(root)
#look into folders starting with var or reference
if any( [basename.startswith(v) for v in [var,'reference']] ):
for filename in files:
if filename != 'ndb.BS_diago_Q01': continue
#get ndb.BS_diago_Q01 file in folder
io[basename] = ( YamboIn.from_file(folder=folder,filename="%s.in"%basename),
YamboExcitonDB.from_db_file(lat,folder=root,filename=filename) )
#TODO consistency check
exciton_energies = []
exciton_spectras = []
for basename, (inp,out) in io.items():
#get input
value, unit = inp[var]
#get exiton energies
exciton_energy = out.eigenvalues.real
#get excitonic spectra
exciton_spectra = out.get_chi()
#check type of variable
if isinstance(value,list): value = value[1]
exciton_energies.append([value,exciton_energy])
exciton_spectras.append([value,exciton_spectra])
exciton_spectras = sorted(exciton_spectras,key=lambda x: x[0])
exciton_energies = sorted(exciton_energies,key=lambda x: x[0])
#save a file with the exciton eneergies
output_folder = 'analyse_%s'%folder
if not os.path.isdir(output_folder): os.mkdir(output_folder)
output_file = '%s_exciton_energies.dat'%var
with open(os.path.join(output_folder,output_file),'w') as f:
header = "%s (%s)\n"%(var,unit) if unit else "%s\n"%var
f.write(header)
for value,energies in exciton_energies:
f.write("{} ".format(value)+("%10.6lf "*numbexc)%tuple(energies[:numbexc])+"\n")
import matplotlib.pyplot as plt
## Exciton spectra plots
filename = 'exciton_spectra.png'
fig = plt.figure(figsize=(6,5))
ax = fig.add_subplot(1,1,1)
#plot the spectra
cmap = plt.get_cmap('viridis')
nspectra = len(exciton_spectras)
for i,(value,(w,spectra)) in enumerate(exciton_spectras):
plt.plot(w,spectra.imag,c=cmap(i/nspectra),label="{} = {} {}".format(var,value,unit))
## Spectra plots
ax.set_xlabel('$\omega$ (eV)')
ax.set_ylabel('Im($\epsilon_M$)')
ax.legend(frameon=False)
output_file = '%s_exciton_spectra.pdf'%var
fig.savefig(os.path.join(output_folder,output_file))
if draw: plt.show()
#
# by Fulvio Paleari & Henrique Miranda
#
def merge_qp(output,files,verbose=False):
"""
Merge the quasiparticle databases produced by yambo
"""
#read all the files and display main info in each of them
print("=========input=========")
filenames = [ f.name for f in files]
datasets = [ Dataset(filename) for filename in filenames]
QP_table, QP_kpts, QP_E_E0_Z = [], [], []
for d,filename in zip(datasets,filenames):
_, nkpoints, nqps, _, nstrings = list(map(int,d['PARS'][:]))
print("filename: ", filename)
if verbose:
print("description:")
for i in range(1,nstrings+1):
print(''.join(d['DESC_strings_%05d'%i][0]))
else:
print("description:", ''.join(d['DESC_strings_%05d'%(nstrings)][0]))
print()
QP_table.append( d['QP_table'][:].T )
QP_kpts.append( d['QP_kpts'][:].T )
QP_E_E0_Z.append( d['QP_E_Eo_Z'][:] )
# create the QP_table
QP_table_save = np.vstack(QP_table)
# create the kpoints table
#create a list with the bigger size of QP_table
nkpoints = int(max(QP_table_save[:,2]))
QP_kpts_save = np.zeros([nkpoints,3])
#iterate over the QP's and store the corresponding kpoint
for qp_file,kpts in zip(QP_table,QP_kpts):
#iterate over the kpoints and save the coordinates on the list
for qp in qp_file:
n1,n2,nk = list(map(int,qp))
QP_kpts_save[nk-1] = kpts[nk-1]
# create the QPs energies table
QP_E_E0_Z_save = np.concatenate(QP_E_E0_Z,axis=1)
#create reference file from one of the files
netcdf_format = datasets[0].data_model
fin = datasets[0]
fout = Dataset(output,'w',format=netcdf_format)
variables_update = ['QP_table', 'QP_kpts', 'QP_E_Eo_Z']
variables_save = [QP_table_save.T, QP_kpts_save.T, QP_E_E0_Z_save]
variables_dict = dict(list(zip(variables_update,variables_save)))
PARS_save = fin['PARS'][:]
PARS_save[1:3] = nkpoints,len(QP_table_save)
#create the description string
kmin,kmax = np.amin(QP_table_save[:,2]),np.amax(QP_table_save[:,2])
bmin,bmax = np.amin(QP_table_save[:,1]),np.amax(QP_table_save[:,1])
description = "QP @ K %03d - %03d : b %03d - %03d"%(kmin,kmax,bmin,bmax)
description_save = np.array([i for i in " %s"%description])
#output data
print("========output=========")
print("filename: ", output)
print("description: ", description)
#copy dimensions
for dname, the_dim in list(fin.dimensions.items()):
fout.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None)
#get dimensions
def dimensions(array):
return tuple([ 'D_%010d'%d for d in array.shape ])
#create missing dimensions
for v in variables_save:
for dname,d in zip( dimensions(v),v.shape ):
if dname not in list(fout.dimensions.keys()):
fout.createDimension(dname, d)
#copy variables
for v_name, varin in list(fin.variables.items()):
if v_name in variables_update:
#get the variable
merged = variables_dict[v_name]
# create the variable
outVar = fout.createVariable(v_name, varin.datatype, dimensions(merged))
# Copy variable attributes
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
#save outvar
outVar[:] = merged
else:
# create the variable
outVar = fout.createVariable(v_name, varin.datatype, varin.dimensions)
# Copy variable attributes
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
if v_name=='PARS':
outVar[:] = PARS_save[:]
elif v_name=='DESC_strings_%05d'%(nstrings):
outVar[:] = varin[:]
outVar[:,:len(description_save)] = description_save.T
else:
outVar[:] = varin[:]
fout.close()
#
# by Alexandre Morlet, Fulvio Paleari & Henrique Miranda
#
def add_qp(output,add=[],substract=[],addimg=[],verbose=False):
"""
Add quasiparticle lifetimes from multiple files
"""
# Define filenames
addf=[f.name for f in add]
subf=[f.name for f in substract]
addimgf=[f.name for f in addimg]
filenames = addf+subf+addimgf
if len(filenames) is 0:
raise ValueError('No files passed to function.')
# Init empty lists and dics
sizes=[] # contains the various 'PARS'
QP_table, QP_kpts, QP_E_E0_Z = {},{},{} # read value for each file
qpdic = {} # used to calculate the final E (real part)
qpdici = {} # used to calculate the final E (img part)
# Read the files
datasets = [ Dataset(filename) for filename in filenames]
print("\n Reading input files\n")
for d,f in zip(datasets,filenames):
print("filename: %s"%f)
# read sizes
_, nkpoints, nqps, _, nstrings = list(map(int,d['PARS'][:]))
sizes.append((f,(nkpoints,nqps,nstrings)))
# Check if the number of kpoints is consistent
# (Don't forget to break symmetries on every file for RT)
if nkpoints!=sizes[0][1][0]:
raise ValueError('File %s does not have the same number of kpoints'%f)
# printing the description string
# (breaking the symmetries doesn't update the descr)
if verbose:
print("description:")
for i in range(1,nstrings+1):
print(''.join(d['DESC_strings_%05d'%i][0]))
else:
print("description:", ''.join(d['DESC_strings_%05d'%(nstrings)][0]))
# fill dictionaries with data for all files
QP_table[f] = d['QP_table'][:].T
QP_kpts[f] = d['QP_kpts'][:].T
QP_E_E0_Z[f]= d['QP_E_Eo_Z'][:]
# Init qpdic & qpdici (going through each file in case the number of bands is different)
# For qpdici, we assume Im(Eo)=0
for (n1,n2,k),(E,Eo,Z) in zip(QP_table[f],QP_E_E0_Z[f][0]):
qpdic[(n1,n2,k)]=Eo
qpdici[(n1,n2,k)]=0
print("Number of k points: %s\n"%nkpoints)
# keys are sorted in the order yambo usually writes DBs
qpkeys = sorted(list(qpdic.keys()),key=itemgetter(2,1))
# For E, [0,:,:] is real part and [1,:,:] is img part
QP_E_E0_Z_save = np.zeros((2,len(qpkeys),3))
QP_table_save = np.zeros((len(qpkeys),3))
# create and init the QPs energies table
# The E0 is simply written in the real part (is 0 in the img part)
# and Z = 1 (since we merge different calculation types)
for i,(n1,n2,k) in enumerate(qpkeys):
QP_E_E0_Z_save[0,i,1] = qpdic[(n1,n2,k)]
QP_E_E0_Z_save[0,:,2] = 1
QP_E_E0_Z_save[1,:,1] = 0
QP_E_E0_Z_save[1,:,2] = 1
# Add corrections in real part (-a files)
for f in addf:
print('Add E corr for real part : %s'%f)
for (n1,n2,k),(E,Eo,Z) in zip(QP_table[f],QP_E_E0_Z[f][0]):
qpdic[(n1,n2,k)]+=E-Eo
# Sub corrections in real part (-s files)
for f in subf:
print('Sub E corr for real part : %s'%f)
for (n1,n2,k),(E,Eo,Z) in zip(QP_table[f],QP_E_E0_Z[f][0]):
qpdic[(n1,n2,k)]-=E-Eo
# Add corrections in img part (-ai files)
for f in addimgf:
print('Add E corr for img part : %s'%f)
for (n1,n2,k),(E,Eo,Z) in zip(QP_table[f],QP_E_E0_Z[f][1]):
qpdici[(n1,n2,k)]+=E-Eo
# create the kpoints table
# We put the restriction to have the same number of k points (same grid), so any file fits
QP_kpts_save = QP_kpts[filenames[0]]
# Filling the E column
for i,(n1,n2,k) in enumerate(qpkeys):
QP_table_save[i]=[n1,n2,k]
QP_E_E0_Z_save[0,i,0]+=qpdic[(n1,n2,k)]
QP_E_E0_Z_save[1,i,0]+=qpdici[(n1,n2,k)]
## Output file
#create reference file from one of the files
netcdf_format = datasets[0].data_model
fin = datasets[0]
fout = Dataset(output,'w',format=netcdf_format)
variables_update = ['QP_table', 'QP_kpts', 'QP_E_Eo_Z']
variables_save = [QP_table_save.T, QP_kpts_save.T, QP_E_E0_Z_save]
variables_dict = dict(list(zip(variables_update,variables_save)))
PARS_save = fin['PARS'][:]
PARS_save[1:3] = sizes[0][1][0],len(QP_table_save)
#create the description string
kmin,kmax = np.amin(QP_table_save[:,2]),np.amax(QP_table_save[:,2])
bmin,bmax = np.amin(QP_table_save[:,1]),np.amax(QP_table_save[:,1])
description = "QP @ K %03d - %03d : b %03d - %03d"%(kmin,kmax,bmin,bmax)
description_save = np.array([i for i in " %s"%description])
#output data
print("\n Producing output file\n")
print("filename: ", output)
print("description: ", description)
#copy dimensions
for dname, the_dim in list(fin.dimensions.items()):
fout.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None)
#get dimensions
def dimensions(array):
return tuple([ 'D_%010d'%d for d in array.shape ])
#create missing dimensions
for v in variables_save:
for dname,d in zip( dimensions(v),v.shape ):
if dname not in list(fout.dimensions.keys()):
fout.createDimension(dname, d)
#copy variables
for v_name, varin in list(fin.variables.items()):
if v_name in variables_update:
#get the variable
merged = variables_dict[v_name]
# create the variable
outVar = fout.createVariable(v_name, varin.datatype, dimensions(merged))
# Copy variable attributes
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
#save outvar
outVar[:] = merged
else:
# create the variable
outVar = fout.createVariable(v_name, varin.datatype, varin.dimensions)
# Copy variable attributes
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
if v_name=='PARS':
outVar[:] = PARS_save[:]
elif v_name=='DESC_strings_%05d'%(nstrings):
outVar[:] = varin[:]
outVar[:,:len(description_save)] = description_save.T
else:
outVar[:] = varin[:]
fout.close()
#
# by Henrique Miranda
#
def plot_excitons(filename,cut=0.2,size=20):
from math import ceil, sqrt
def get_var(dictionary,variables):
"""
To have compatibility with different versions of yambo
We provide a list of different possible tags
"""
for var in variables:
if var in dictionary:
return dictionary[var]
raise ValueError( 'Could not find the variables %s in the output file'%str(variables) )
#
# read file
#
f = open(filename)
data = json.load(f)
f.close()
#
# plot the absorption spectra
#
nexcitons = len(data['excitons'])
print("nexitons", nexcitons)
plt.plot(get_var(data,['E/ev','E/ev[1]']), get_var(data,['EPS-Im[2]' ]),label='BSE',lw=2)
plt.plot(get_var(data,['E/ev','E/ev[1]']), get_var(data,['EPSo-Im[4]']),label='IP',lw=2)
for n,exciton in enumerate(data['excitons']):
plt.axvline(exciton['energy'])
plt.xlabel('$\\omega$ (eV)')
plt.ylabel('Intensity arb. units')
plt.legend(frameon=False)
plt.draw()
#
# plot excitons
#
#dimensions
nx = int(ceil(sqrt(nexcitons)))
ny = int(ceil(nexcitons*1.0/nx))
print("cols:",nx)
print("rows:",ny)
cmap = plt.get_cmap("gist_heat_r")
fig = plt.figure(figsize=(nx*3,ny*3))
sorted_excitons = sorted(data['excitons'],key=lambda x: x['energy'])
for n,exciton in enumerate(sorted_excitons):
#get data
w = np.array(exciton['weights'])
qpt = np.array(exciton['qpts'])
#plot
ax = plt.subplot(ny,nx,n+1)
ax.scatter(qpt[:,0], qpt[:,1], s=size, c=w, marker='H', cmap=cmap, lw=0, label="%5.2lf (eV)"%exciton['energy'])
ax.text(-cut*.9,-cut*.9,"%5.2lf (eV)"%exciton['energy'])
# axis
plt.xlim([-cut,cut])
plt.ylim([-cut,cut])
ax.yaxis.set_major_locator(plt.NullLocator())
ax.xaxis.set_major_locator(plt.NullLocator())
ax.set_aspect('equal')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.01, hspace=0.01)
#remove extension from file
figure_filename = os.path.splitext(filename)[0]
plt.savefig('%s.png'%figure_filename)
|
henriquemiranda/yambo-py
|
yambopy/recipes.py
|
Python
|
bsd-3-clause
| 20,837
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_Quantization/trend_PolyTrend/cycle_12/ar_12/test_artificial_32_Quantization_PolyTrend_12_12_0.py
|
Python
|
bsd-3-clause
| 267
|
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.template import loader
from django.template.loader import select_template
from django.utils.translation import ugettext_lazy as _
from . import models
from .conf import settings
from filer.models.imagemodels import Image
class FilerFolderPlugin(CMSPluginBase):
module = 'Filer'
model = models.FilerFolder
name = _("Folder")
TEMPLATE_NAME = 'cmsplugin_filer_folder/plugins/folder/%s.html'
render_template = TEMPLATE_NAME % 'default'
text_enabled = False
admin_preview = False
fieldsets = (
(None, {'fields': ['title', 'folder']}),
)
if settings.CMSPLUGIN_FILER_FOLDER_STYLE_CHOICES:
fieldsets[0][1]['fields'].append('style')
def get_folder_files(self, folder, user):
qs_files = folder.files.filter(image__isnull=True)
if user.is_staff:
return qs_files
else:
return qs_files.filter(is_public=True)
def get_folder_images(self, folder, user):
qs_files = folder.files.instance_of(Image)
if user.is_staff:
return qs_files
else:
return qs_files.filter(is_public=True)
def get_children(self, folder):
return folder.get_children()
def render(self, context, instance, placeholder):
self.render_template = select_template((
'cmsplugin_filer_folder/folder.html', # backwards compatibility. deprecated!
self.TEMPLATE_NAME % instance.style,
self.TEMPLATE_NAME % 'default')
).template
folder_files = self.get_folder_files(instance.folder,
context['request'].user)
folder_images = self.get_folder_images(instance.folder,
context['request'].user)
folder_folders = self.get_children(instance.folder)
context.update({
'object': instance,
'folder_files': sorted(folder_files),
'folder_images': sorted(folder_images),
'folder_folders': folder_folders,
'placeholder': placeholder
})
return context
plugin_pool.register_plugin(FilerFolderPlugin)
|
douwevandermeij/cmsplugin-filer
|
cmsplugin_filer_folder/cms_plugins.py
|
Python
|
bsd-3-clause
| 2,258
|
###############################
# Author : septicmk
# Date : 2015/07/25 16:14:09
# FileName : main.py
################################
from lambdaimage import preprocess as prep
from lambdaimage import registration as reg
from lambdaimage import fusion as fus
from pyspark import SparkContext, SparkConf
from lambdaimage import lambdaimageContext
from lambdaimage.utils.tool import exeTime, log, showsize
from parseXML import load_xml_file, get_function
import numpy as np
conf = SparkConf().setAppName('test').setMaster('local[1]').set('spark.executor.memory','2g').set('spark.driver.maxResultSize','6g').set('spark.driver.memory','8g').set('spark.local.dir','/dev/shm').set('spark.storage.memoryFraction','0.2').set('spark.default.parallelism','10')
tsc=lambdaimageContext.start(conf=conf)
result = load_xml_file("./lambdaimage.xml")
log('info')('tiff load start...')
rddA = tsc.loadImages('/home/wb/data/1-L/*.tif', inputFormat='tif-stack')
rddB = tsc.loadImages('/home/wb/data/1-R/*.tif', inputFormat='tif-stack')
log('info')('tiff load over...')
log('info')('intensity normalization start ...')
rddA = prep.intensity_normalization(rddA)
rddB = prep.intensity_normalization(rddB)
rddB = prep.flip(rddB)
_rddA = prep.intensity_normalization(rddA,8)
_rddB = prep.intensity_normalization(rddB,8)
log('info')('intensity normalization over ...')
log('info')('registration start ...')
vec0 = [0,0,0,1,1,0,0]
#vec = reg.c_powell(_rddA.get(4), _rddB.get(4), vec0)
vec = eval(get_function("reg",result))(_rddA.get(4), _rddB.get(4), vec0)
rddB = reg.execute(rddB, vec)
log('info')('registration over ...')
log('info')('fusion start ...')
L_img_stack = rddA.collectValuesAsArray()
R_img_stack = rddB.collectValuesAsArray()
img_stack = zip(L_img_stack, R_img_stack)
rdd = tsc.loadImagesFromArray(img_stack)
#fused_img = fus.wavelet_fusion(rdd)
fused_img = eval(get_function("fus", result))(rdd)
fused_img = tsc.loadImagesFromArray(fused_img)
log('info')('fusion over ...')
log('info')('saving ...')
fused_img.exportAsTiffs('/home/wb/data/lambdaimage/fusion',overwrite = True)
#fused_img = np.squeeze(np.array(fused_img.values().collect()))
log('info')('subtract background start ...')
sb_img = prep.subtract_Background(fused_img)
log('info')('sbutract background over ... ')
log('info')('saving ...')
sb_img.exportAsTiffs('/home/wb/data/lambdaimage/subtract',overwrite = True)
|
genialwang/lambda-image
|
script/mehi_local.py
|
Python
|
bsd-3-clause
| 2,385
|
import os
import io
import shutil
import tempfile
import unittest
from functools import partial
from pathlib import Path
from nbformat import validate
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from .. import engines
from ..log import logger
from ..iorw import load_notebook_node
from ..utils import chdir
from ..execute import execute_notebook
from ..exceptions import PapermillExecutionError
from . import get_notebook_path, kernel_name
execute_notebook = partial(execute_notebook, kernel_name=kernel_name)
class TestNotebookHelpers(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.notebook_name = 'simple_execute.ipynb'
self.notebook_path = get_notebook_path(self.notebook_name)
self.nb_test_executed_fname = os.path.join(
self.test_dir, 'output_{}'.format(self.notebook_name)
)
def tearDown(self):
shutil.rmtree(self.test_dir)
@patch(engines.__name__ + '.PapermillNotebookClient')
def test_start_timeout(self, preproc_mock):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, start_timeout=123)
args, kwargs = preproc_mock.call_args
expected = [
('timeout', None),
('startup_timeout', 123),
('kernel_name', kernel_name),
('log', logger),
]
actual = set([(key, kwargs[key]) for key in kwargs])
self.assertTrue(
set(expected).issubset(actual),
msg='Expected arguments {} are not a subset of actual {}'.format(expected, actual),
)
@patch(engines.__name__ + '.PapermillNotebookClient')
def test_default_start_timeout(self, preproc_mock):
execute_notebook(self.notebook_path, self.nb_test_executed_fname)
args, kwargs = preproc_mock.call_args
expected = [
('timeout', None),
('startup_timeout', 60),
('kernel_name', kernel_name),
('log', logger),
]
actual = set([(key, kwargs[key]) for key in kwargs])
self.assertTrue(
set(expected).issubset(actual),
msg='Expected arguments {} are not a subset of actual {}'.format(expected, actual),
)
def test_cell_insertion(self):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'msg': 'Hello'})
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[1].get('source').split('\n'), ['# Parameters', 'msg = "Hello"', '']
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'msg': 'Hello'})
def test_no_tags(self):
notebook_name = 'no_parameters.ipynb'
nb_test_executed_fname = os.path.join(self.test_dir, 'output_{}'.format(notebook_name))
execute_notebook(get_notebook_path(notebook_name), nb_test_executed_fname, {'msg': 'Hello'})
test_nb = load_notebook_node(nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[0].get('source').split('\n'), ['# Parameters', 'msg = "Hello"', '']
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'msg': 'Hello'})
def test_quoted_params(self):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'msg': '"Hello"'})
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[1].get('source').split('\n'), ['# Parameters', r'msg = "\"Hello\""', '']
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'msg': '"Hello"'})
def test_backslash_params(self):
execute_notebook(
self.notebook_path, self.nb_test_executed_fname, {'foo': r'do\ not\ crash'}
)
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[1].get('source').split('\n'),
['# Parameters', r'foo = "do\\ not\\ crash"', ''],
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'foo': r'do\ not\ crash'})
def test_backslash_quote_params(self):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'foo': r'bar=\"baz\"'})
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[1].get('source').split('\n'),
['# Parameters', r'foo = "bar=\\\"baz\\\""', ''],
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'foo': r'bar=\"baz\"'})
def test_double_backslash_quote_params(self):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'foo': r'\\"bar\\"'})
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[1].get('source').split('\n'),
['# Parameters', r'foo = "\\\\\"bar\\\\\""', ''],
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'foo': r'\\"bar\\"'})
def test_prepare_only(self):
for example in ['broken1.ipynb', 'keyboard_interrupt.ipynb']:
path = get_notebook_path(example)
result_path = os.path.join(self.test_dir, example)
# Should not raise as we don't execute the notebook at all
execute_notebook(path, result_path, {'foo': r'do\ not\ crash'}, prepare_only=True)
nb = load_notebook_node(result_path)
self.assertEqual(nb.cells[0].cell_type, "code")
self.assertEqual(
nb.cells[0].get('source').split('\n'),
['# Parameters', r'foo = "do\\ not\\ crash"', ''],
)
class TestBrokenNotebook1(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test(self):
path = get_notebook_path('broken1.ipynb')
# check that the notebook has two existing marker cells, so that this test is sure to be
# validating the removal logic (the markers are simulatin an error in the first code cell
# that has since been fixed)
original_nb = load_notebook_node(path)
self.assertEqual(original_nb.cells[0].metadata["tags"], ["papermill-error-cell-tag"])
self.assertIn("In [1]", original_nb.cells[0].source)
self.assertEqual(original_nb.cells[2].metadata["tags"], ["papermill-error-cell-tag"])
result_path = os.path.join(self.test_dir, 'broken1.ipynb')
with self.assertRaises(PapermillExecutionError):
execute_notebook(path, result_path)
nb = load_notebook_node(result_path)
self.assertEqual(nb.cells[0].cell_type, "markdown")
self.assertRegex(
nb.cells[0].source, r'^<span .*<a href="#papermill-error-cell".*In \[2\].*</span>$'
)
self.assertEqual(nb.cells[0].metadata["tags"], ["papermill-error-cell-tag"])
self.assertEqual(nb.cells[1].cell_type, "markdown")
self.assertEqual(nb.cells[2].execution_count, 1)
self.assertEqual(nb.cells[3].cell_type, "markdown")
self.assertEqual(nb.cells[4].cell_type, "markdown")
self.assertEqual(nb.cells[5].cell_type, "markdown")
self.assertRegex(nb.cells[5].source, '<span id="papermill-error-cell" .*</span>')
self.assertEqual(nb.cells[5].metadata["tags"], ["papermill-error-cell-tag"])
self.assertEqual(nb.cells[6].execution_count, 2)
self.assertEqual(nb.cells[6].outputs[0].output_type, 'error')
self.assertEqual(nb.cells[7].execution_count, None)
# double check the removal (the new cells above should be the only two tagged ones)
self.assertEqual(
sum("papermill-error-cell-tag" in cell.metadata.get("tags", []) for cell in nb.cells), 2
)
class TestBrokenNotebook2(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test(self):
path = get_notebook_path('broken2.ipynb')
result_path = os.path.join(self.test_dir, 'broken2.ipynb')
with self.assertRaises(PapermillExecutionError):
execute_notebook(path, result_path)
nb = load_notebook_node(result_path)
self.assertEqual(nb.cells[0].cell_type, "markdown")
self.assertRegex(
nb.cells[0].source, r'^<span .*<a href="#papermill-error-cell">.*In \[2\].*</span>$'
)
self.assertEqual(nb.cells[1].execution_count, 1)
self.assertEqual(nb.cells[2].cell_type, "markdown")
self.assertRegex(nb.cells[2].source, '<span id="papermill-error-cell" .*</span>')
self.assertEqual(nb.cells[3].execution_count, 2)
self.assertEqual(nb.cells[3].outputs[0].output_type, 'display_data')
self.assertEqual(nb.cells[3].outputs[1].output_type, 'error')
self.assertEqual(nb.cells[4].execution_count, None)
class TestReportMode(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.notebook_name = 'report_mode_test.ipynb'
self.notebook_path = get_notebook_path(self.notebook_name)
self.nb_test_executed_fname = os.path.join(
self.test_dir, 'output_{}'.format(self.notebook_name)
)
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_report_mode(self):
nb = execute_notebook(
self.notebook_path, self.nb_test_executed_fname, {'a': 0}, report_mode=True
)
for cell in nb.cells:
if cell.cell_type == 'code':
self.assertEqual(cell.metadata.get('jupyter', {}).get('source_hidden'), True)
class TestCWD(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.base_test_dir = tempfile.mkdtemp()
self.check_notebook_name = 'read_check.ipynb'
self.check_notebook_path = os.path.join(self.base_test_dir, 'read_check.ipynb')
# Setup read paths so base_test_dir has check_notebook_name
shutil.copyfile(get_notebook_path(self.check_notebook_name), self.check_notebook_path)
with io.open(os.path.join(self.test_dir, 'check.txt'), 'w', encoding='utf-8') as f:
# Needed for read_check to pass
f.write(u'exists')
self.simple_notebook_name = 'simple_execute.ipynb'
self.simple_notebook_path = os.path.join(self.base_test_dir, 'simple_execute.ipynb')
# Setup read paths so base_test_dir has simple_notebook_name
shutil.copyfile(get_notebook_path(self.simple_notebook_name), self.simple_notebook_path)
self.nb_test_executed_fname = 'test_output.ipynb'
def tearDown(self):
shutil.rmtree(self.test_dir)
shutil.rmtree(self.base_test_dir)
def test_local_save_ignores_cwd_assignment(self):
with chdir(self.base_test_dir):
# Both paths are relative
execute_notebook(
self.simple_notebook_name, self.nb_test_executed_fname, cwd=self.test_dir
)
self.assertTrue(
os.path.isfile(os.path.join(self.base_test_dir, self.nb_test_executed_fname))
)
def test_execution_respects_cwd_assignment(self):
with chdir(self.base_test_dir):
# Both paths are relative
execute_notebook(
self.check_notebook_name, self.nb_test_executed_fname, cwd=self.test_dir
)
self.assertTrue(
os.path.isfile(os.path.join(self.base_test_dir, self.nb_test_executed_fname))
)
def test_pathlib_paths(self):
# Copy of test_execution_respects_cwd_assignment but with `Path`s
with chdir(self.base_test_dir):
execute_notebook(
Path(self.check_notebook_name),
Path(self.nb_test_executed_fname),
cwd=Path(self.test_dir),
)
self.assertTrue(Path(self.base_test_dir).joinpath(self.nb_test_executed_fname).exists())
class TestSysExit(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_sys_exit(self):
notebook_name = 'sysexit.ipynb'
result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name))
execute_notebook(get_notebook_path(notebook_name), result_path)
nb = load_notebook_node(result_path)
self.assertEqual(nb.cells[0].cell_type, "code")
self.assertEqual(nb.cells[0].execution_count, 1)
self.assertEqual(nb.cells[1].execution_count, 2)
self.assertEqual(nb.cells[1].outputs[0].output_type, 'error')
self.assertEqual(nb.cells[1].outputs[0].ename, 'SystemExit')
self.assertEqual(nb.cells[1].outputs[0].evalue, '')
self.assertEqual(nb.cells[2].execution_count, None)
def test_sys_exit0(self):
notebook_name = 'sysexit0.ipynb'
result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name))
execute_notebook(get_notebook_path(notebook_name), result_path)
nb = load_notebook_node(result_path)
self.assertEqual(nb.cells[0].cell_type, "code")
self.assertEqual(nb.cells[0].execution_count, 1)
self.assertEqual(nb.cells[1].execution_count, 2)
self.assertEqual(nb.cells[1].outputs[0].output_type, 'error')
self.assertEqual(nb.cells[1].outputs[0].ename, 'SystemExit')
self.assertEqual(nb.cells[1].outputs[0].evalue, '0')
self.assertEqual(nb.cells[2].execution_count, None)
def test_sys_exit1(self):
notebook_name = 'sysexit1.ipynb'
result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name))
with self.assertRaises(PapermillExecutionError):
execute_notebook(get_notebook_path(notebook_name), result_path)
nb = load_notebook_node(result_path)
self.assertEqual(nb.cells[0].cell_type, "markdown")
self.assertRegex(
nb.cells[0].source, r'^<span .*<a href="#papermill-error-cell".*In \[2\].*</span>$'
)
self.assertEqual(nb.cells[1].execution_count, 1)
self.assertEqual(nb.cells[2].cell_type, "markdown")
self.assertRegex(nb.cells[2].source, '<span id="papermill-error-cell" .*</span>')
self.assertEqual(nb.cells[3].execution_count, 2)
self.assertEqual(nb.cells[3].outputs[0].output_type, 'error')
self.assertEqual(nb.cells[4].execution_count, None)
def test_system_exit(self):
notebook_name = 'systemexit.ipynb'
result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name))
execute_notebook(get_notebook_path(notebook_name), result_path)
nb = load_notebook_node(result_path)
self.assertEqual(nb.cells[0].cell_type, "code")
self.assertEqual(nb.cells[0].execution_count, 1)
self.assertEqual(nb.cells[1].execution_count, 2)
self.assertEqual(nb.cells[1].outputs[0].output_type, 'error')
self.assertEqual(nb.cells[1].outputs[0].ename, 'SystemExit')
self.assertEqual(nb.cells[1].outputs[0].evalue, '')
self.assertEqual(nb.cells[2].execution_count, None)
class TestNotebookValidation(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_from_version_4_4_upgrades(self):
notebook_name = 'nb_version_4.4.ipynb'
result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name))
execute_notebook(get_notebook_path(notebook_name), result_path, {'var': 'It works'})
nb = load_notebook_node(result_path)
validate(nb)
class TestMinimalNotebook(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_no_v3_language_backport(self):
notebook_name = 'blank-vscode.ipynb'
result_path = os.path.join(self.test_dir, 'output_{}'.format(notebook_name))
execute_notebook(get_notebook_path(notebook_name), result_path, {'var': 'It works'})
nb = load_notebook_node(result_path)
validate(nb)
|
nteract/papermill
|
papermill/tests/test_execute.py
|
Python
|
bsd-3-clause
| 16,273
|
#!/usr/bin/env python
"""
This file contains the sample code for XXX. Search for the string CONTENT to
skip directly to it.
Executing this file will begin an enhanced interactive Python session. You
can step through each slide of sample code and explore the results. If your
explorations hose the environment, just quit, restart and jump directly to the
slide where you left off.
The block of code below sets up the interactive session. Skip over it to view
the sample code.
"""
##############################################################################
#
# sliderepl - 0.12
# Copyright (c) Jason Kirtland <jek@discorporate.us>
# sliderepl lives at http://discorporate.us/projects/sliderepl
# sliderepl is licensed under the MIT License:
# http://www.opensource.org/licenses/mit-license.php
#
# sliderepl may be textually included in a file that also contains its input
# data. The input data may be under a different license. sliderepl's MIT
# License covers only this block of sliderepl code, extensions and other
# derivative works.
#
# Looking for the sample code?
# _ _ _
# ___ ___ _ __ ___ | | | __| | _____ ___ __
# / __|/ __| '__/ _ \| | | / _` |/ _ \ \ /\ / / '_ \
# \__ \ (__| | | (_) | | | | (_| | (_) \ V V /| | | |
# |___/\___|_| \___/|_|_| \__,_|\___/ \_/\_/ |_| |_|
#
# This bootstrapping code loads the sample code below into an interactive
# Python session.
environ = globals().copy()
import code, inspect, itertools, logging, re, sys, traceback
try:
import rlcompleter, readline
except ImportError:
readline = None
class Deck(object):
expose = ('commands', 'next', 'goto', 'show', 'info', 'quick')
def __init__(self, path=None):
self.path = path or '<no file>'
self.slides = []
self.current = 0
self.enter_advances = False
def start(self):
pass
def next(self):
"""Advance to the next slide."""
if self.current >= len(self.slides):
print "% The slideshow is over."
return
slide = self.slides[self.current]
self.current += 1
print "%%\n%% Running slide %s\n%%" % (self.current)
if (slide.name and
not (slide.name.isdigit() and int(slide.name) == self.current)):
print "%% %s" % slide.name
slide.run()
def slide_actor(fn):
def decorated(self, slide_number):
if isinstance(slide_number, str) and not slide_number.isdigit():
print "%% Usage: %s slide_number" % fn.__name__
return
num = int(slide_number)
if num < 1 or num > len(self.slides):
print "%% Slide #%s is out of range (1 - %s)." % (
num, len(self.slides))
else:
return fn(self, num)
decorated.__doc__ = fn.__doc__
return decorated
def show(self, slide_number):
"""show NUM, display a slide without executing it."""
print str(self.slides[slide_number - 1]).strip()
show = slide_actor(show)
def goto(self, slide_number):
"""goto NUM, skip forward to another slide."""
if slide_number <= self.current:
print "% Cowardly refusing to re-run slides."
else:
for _ in range(slide_number - self.current):
self.next()
goto = slide_actor(goto)
def info(self):
"""Display information about this slide deck."""
print "%% Now at slide %s of %s from deck %s" % (
self.current, len(self.slides), self.path)
def commands(self):
"""Display this help message."""
for cmd in self.expose:
print "% " + cmd
print "%\t" + getattr(self, cmd).__doc__
def quick(self, toggle):
"""quick on|off, type enter to advance to the next slide."""
if toggle not in ('on', 'off'):
print 'usage: quick on|off'
else:
self.enter_advances = (toggle == 'on')
print "%% Quick mode %s (enter will advance to the next slide)" % (
toggle)
del slide_actor
class Slide(object):
def __init__(self, name=None):
self.name = name
self.codeblocks = []
self.lines = []
self._stack = []
self._level = None
def run(self):
for display, co in self.codeblocks:
if not getattr(self, 'no_echo', False):
shown = [getattr(sys, 'ps1', '>>> ') + display[0]]
shown.extend([getattr(sys, 'ps2', '... ') + l
for l in display[1:]])
Deck._add_history(''.join(display).rstrip())
print ''.join(shown).rstrip()
try:
exec co in environ
except:
traceback.print_exc()
def __str__(self):
return ''.join(self.lines)
def _append(self, line):
self.lines.append(line)
if not self._stack and line.isspace():
return
indent = len(line) - len(line.lstrip())
if not self._stack:
self._level = indent
elif indent <= self._level:
try:
co = self._compile()
if co:
self.codeblocks.append((self._pop(), co))
except SyntaxError:
pass
self._stack.append(line)
def _close(self):
if self._stack:
co = self._compile()
assert co
self.codeblocks.append((self._pop(), co))
def _compile(self):
style = getattr(self, 'no_return', False) and 'exec' or 'single'
return code.compile_command(''.join(self._stack), '<input>', style)
def _pop(self):
self._stack.reverse()
lines = list(itertools.dropwhile(str.isspace, self._stack))
lines.reverse()
self._stack = []
return lines
def run(cls, path=None):
"""Run an interactive session for this Deck and exit when complete.
If '--run-all' is first on the command line, all slides are executed
immediately and the script will exit. Useful for sanity testing.
"""
if path is None:
path = sys.argv[0]
deck = cls.from_path(path)
if not deck:
sys.stderr.write("Aborting: no slides!\n")
sys.exit(-1)
deck.start()
if sys.argv[1:] and sys.argv[1] == '--run-all':
deck.goto(len(deck.slides))
sys.exit(0)
console = code.InteractiveConsole()
global environ
environ = console.locals
console.raw_input = deck.readfunc
if readline:
readline.parse_and_bind('tab: complete')
readline.set_completer(rlcompleter.Completer(environ).complete)
console.interact(deck.banner)
sys.exit(0)
run = classmethod(run)
def from_path(cls, path):
"""Create a Deck from slides embedded in a file at path."""
sl_re = re.compile(r'### +slide::\s*')
a_re = re.compile(r',\s*')
fh, deck, slide = open(path), cls(path), None
for line in fh:
if not sl_re.match(line):
if slide:
slide._append(line)
continue
if slide:
slide._close()
deck.slides.append(slide)
metadata = sl_re.split(line, 2)[1].split('-*-', 2)
name = metadata[0].strip()
if name == 'end':
break
slide = cls.Slide(name=name or None)
if len(metadata) >= 2:
for option in (metadata[1] and a_re.split(metadata[1]) or []):
setattr(slide, option.strip(), True)
fh.close()
return deck.slides and deck or None
from_path = classmethod(from_path)
def banner(self):
return """\
%% This is an interactive Python prompt.
%% Extra commands: %s
%% Type "next" to start the presentation.""" % ', '.join(self.expose)
banner = property(banner)
def readfunc(self, prompt=''):
line = raw_input(prompt)
if prompt == getattr(sys, 'ps1', '>>> '):
tokens = line.split()
if line == '' and self.enter_advances:
tokens = ('next',)
if tokens and tokens[0] in self.expose:
fn = getattr(self, tokens[0])
if len(tokens) != len(inspect.getargspec(fn)[0]):
print "usage: %s %s" % (
tokens[0], ' '.join(inspect.getargspec(fn)[0][1:]))
else:
self._add_history(line)
fn(*tokens[1:])
return ''
return line
def _add_history(cls, line):
if readline and line:
readline.add_history(line)
_add_history = classmethod(_add_history)
# end of sliderepl
#
##############################################################################
Deck.run()
# ____ ___ _ _ _____ _____ _ _ _____ _
# / ___/ _ \| \ | |_ _| ____| \ | |_ _| |
# | | | | | | \| | | | | _| | \| | | | | |
# | |__| |_| | |\ | | | | |___| |\ | | | |_|
# \____\___/|_| \_| |_| |_____|_| \_| |_| (_)
#
# Slide CONTENT starts here.
### slide::
welcome = ['Welcome to the demo slide content.',
'Type "next" to advance to the next slide.']
print '\n'.join(welcome)
### slide::
msg = ('sliderepl was written to provide an interactive accompaniment '
'to an in-person presentation. Slides of sample code '
'corresponded to sample code shown on screen. With sliderepl, '
'participants could run the code themselves without retyping or '
'copy-and-pasting from a text file.')
more = ('The >>> prompt is a regular interactive session, with tab '
'completion enabled if the host supports it.'
'Type "show 2" to see the source for this slide.')
### slide::
# A basic deck with sliderepl embedded will read its own source file
# for slide definitions.
#
# Just include sliderepl before your slides and call Deck.run().
### slide::
s = 234
s
### slide::
print s
# the execution environment is preserved from slide to slide.
### slide::
try:
Deck
assert False
except NameError:
print "Slides are executed in an isolated Python environment."
print "sliderepl and its imports will not be visible to "
print "the code in your slides."
### slide::
s = """\
Slides are delimited with ### slide:: markers in the text.
They must start at the beginning of a line. If you like, you
can number them for folks reading the slide source:
### slide:: 12
The last slide must be marked:
### slide:: end
sliderepl ignores everything in the slide source file except the lines
between the first slide and the last.
"""
### slide:: -*- no_echo -*-
print """\
There are a couple of additional tricks available when defining slides.
If you want to do a purely messaging slide, like this one, you can mark
the slide -*- no_echo -*- and its code won't be shown: just the output.
Type "show 8" to see the source for this slide.
"""
### slide::
# but most of the time you'll probably just comment to illustrate a point
stuff = [i ** 2 for i in range(1, 10)]
### slide:: -*- no_echo -*-
print """\
This is about walking through code interactively, not projecting
onto a screen. There are other tools that combine a Python prompt
with "bullet points and graphics" style presentation.
That said, this deck will keep acting like a presentation anyways.
Consider it a kind of doctest.
"""
### slide::
a = "Does your terminal have readline support?"
b = "Each bit of code is added to the readline history as if you typed it."""
### slide:: -*- no_return -*-
"Adding -*- no_return -*- suppresses the auto-repr of a statement's result."
"Both options can be used in combination, -*- no_echo, no_return -*-"
### slide:: -*- no_echo -*-
print """\
If you execute this script with --run-all, it will run all the slides
and exit. Useful for testing that the code still works.
"""
### slide:: -*- no_echo, no_return -*-
print """\
You can extend Deck to add custom initialization and commands.
A Deck calls .start() as its first action. In start() you can do
things like run commands (self.quick('on')) and ensure that the
user has the correct versions of required libraries installed.
To add commands, just provide an extended .expose in your subclass and
add the command as a method. It's simple, see Deck for examples.
sliderepl has been tested with Python 2.3, 2.4, 2.5. and 2,6a.
"""
### slide:: -*- no_echo -*
print """That's it!
Test cases follow."""
### slide::
class Foo(object):
def __init__(self):
self._bar = 123
def bar(self):
return self._bar
f = Foo()
### slide::
assert f.bar() == 123
### slide:: end
|
dcolish/Presentations
|
pdxpython/sliderepl.py
|
Python
|
bsd-3-clause
| 13,115
|
################################
# Author : septicmk
# Date : 2015/07/24 19:41:26
# FileName : test_utils.py
################################
import shutil
import tempfile
import unittest
from numpy import vstack
from pyspark import SparkContext
class PySparkTestCase(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
self.sc = SparkContext('local', class_name)
self.sc._jvm.System.setProperty("spark.ui.showConsoleProgress", "false")
log4j = self.sc._jvm.org.apache.log4j
log4j.LogManager.getRootLogger().setLevel(log4j.Level.FATAL)
def tearDown(self):
self.sc.stop()
# To avoid Akka rebinding to the same port, since it doesn't unbind
# immediately on shutdown
self.sc._jvm.System.clearProperty("spark.driver.port")
class PySparkTestCaseWithOutputDir(PySparkTestCase):
def setUp(self):
super(PySparkTestCaseWithOutputDir, self).setUp()
self.outputdir = tempfile.mkdtemp()
def tearDown(self):
super(PySparkTestCaseWithOutputDir, self).tearDown()
shutil.rmtree(self.outputdir)
class LocalTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
class LocalTestCaseWithOutputDir(LocalTestCase):
def setUp(self):
super(LocalTestCaseWithOutputDir, self).setUp()
self.outputdir = tempfile.mktemp()
def tearDown(self):
super(LocalTestCaseWithOutputDir, self).tearDown()
shutil.rmtree(self.outputdir)
|
septicmk/MEHI
|
test/test_utils.py
|
Python
|
bsd-3-clause
| 1,542
|
import factory
from questionnaire.models import Theme
class ThemeFactory(factory.DjangoModelFactory):
class Meta:
model = Theme
name = "A title"
description = 'Description'
|
eJRF/ejrf
|
questionnaire/tests/factories/theme_factory.py
|
Python
|
bsd-3-clause
| 196
|
def extractTranslasiSanusiMe(item):
'''
Parser for 'translasi.sanusi.me'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractTranslasiSanusiMe.py
|
Python
|
bsd-3-clause
| 550
|
__author__ = 'frank'
# Setup our test environment
import os
os.environ['NETKI_ENV'] = 'test'
from unittest import TestCase
from netki.api.domain import *
from mock import patch, Mock
class TestWalletLookup(TestCase):
# This is the open wallet name lookup API
def setUp(self):
self.patcher1 = patch("netki.api.domain.InputValidation")
self.patcher2 = patch("netki.api.domain.create_json_response")
self.patcher3 = patch("netki.api.domain.WalletNameResolver")
self.patcher4 = patch("netki.api.domain.requests")
self.mockInputValidation = self.patcher1.start()
self.mockCreateJSONResponse = self.patcher2.start()
self.mockWalletNameResolver = self.patcher3.start()
self.mockRequests = self.patcher4.start()
config.namecoin.enabled = True
self.mockRequests.get.return_value.json.return_value = {'success': True, 'wallet_address': '1walletaddy'}
def tearDown(self):
self.patcher1.stop()
self.patcher2.stop()
self.patcher3.stop()
def get_json_call(self):
# Utility function to get JSON call_args_list cleaning up assertions in below tests
return self.mockCreateJSONResponse.call_args_list[0][1].get('data')
def test_invalid_wallet_name_field(self):
# Used to simulate failure in validation for each iteration [iteration 1, iteration 2, etc.]
self.mockInputValidation.is_valid_field.side_effect = [False]
api_wallet_lookup('wallet.frankcontreras.me', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertFalse(self.mockWalletNameResolver.called)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'Invalid Parameters')
def test_invalid_currency_field(self):
# Used to simulate failure in validation for each iteration [iteration 1, iteration 2, etc.]
self.mockInputValidation.is_valid_field.side_effect = [True, False]
api_wallet_lookup('wallet.frankcontreras.me', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 2)
self.assertFalse(self.mockWalletNameResolver.called)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'Invalid Parameters')
def test_invalid_wallet_name_field_no_dot(self):
api_wallet_lookup('walletfrankcontrerasme', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 2)
self.assertFalse(self.mockWalletNameResolver.called)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'Invalid Parameters')
def test_wallet_address_returned_success(self):
self.mockWalletNameResolver.return_value.resolve_wallet_name.return_value = '1djskfaklasdjflkasdf'
api_wallet_lookup('wallet.frankcontreras.me', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 2)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(self.mockWalletNameResolver.return_value.resolve_wallet_name.call_count, 1)
self.assertEqual(self.mockWalletNameResolver.return_value.set_namecoin_options.call_count, 1)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertTrue(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), '')
# Returned Data Validation
call_dict = self.get_json_call()
self.assertEqual(call_dict.get('wallet_name'), 'wallet.frankcontreras.me')
self.assertEqual(call_dict.get('currency'), 'btc')
self.assertEqual(call_dict.get('wallet_address'), '1djskfaklasdjflkasdf')
def test_namecoin_config_disabled(self):
self.mockWalletNameResolver.return_value.resolve_wallet_name.return_value = '1djskfaklasdjflkasdf'
config.namecoin.enabled = False
api_wallet_lookup('wallet.frankcontreras.me', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 2)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(self.mockWalletNameResolver.return_value.resolve_wallet_name.call_count, 1)
self.assertEqual(self.mockWalletNameResolver.return_value.set_namecoin_options.call_count, 0)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertTrue(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), '')
# Returned Data Validation
call_dict = self.get_json_call()
self.assertEqual(call_dict.get('wallet_name'), 'wallet.frankcontreras.me')
self.assertEqual(call_dict.get('currency'), 'btc')
self.assertEqual(call_dict.get('wallet_address'), '1djskfaklasdjflkasdf')
def test_namecoin_use_api_returned_success(self):
config.namecoin.use_api = True
api_wallet_lookup('wallet.frankcontreras.bit', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 2)
self.assertEqual(self.mockRequests.get.call_count, 1)
self.assertEqual(self.mockWalletNameResolver.return_value.resolve_wallet_name.call_count, 0)
self.assertEqual(self.mockWalletNameResolver.return_value.set_namecoin_options.call_count, 0)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertTrue(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), '')
# Returned Data Validation
call_dict = self.get_json_call()
self.assertEqual(call_dict.get('wallet_name'), 'wallet.frankcontreras.bit')
self.assertEqual(call_dict.get('currency'), 'btc')
self.assertEqual(call_dict.get('wallet_address'), '1walletaddy')
def test_namecoin_use_api_returned_failure(self):
config.namecoin.use_api = True
self.mockRequests.get.return_value.json.return_value['success'] = False
api_wallet_lookup('wallet.frankcontreras.bit', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 2)
self.assertEqual(self.mockRequests.get.call_count, 1)
self.assertEqual(self.mockWalletNameResolver.return_value.resolve_wallet_name.call_count, 0)
self.assertEqual(self.mockWalletNameResolver.return_value.set_namecoin_options.call_count, 0)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
# Returned Data Validation
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'Wallet Name does not exist')
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('data'), {})
def test_wallet_lookup_returned_insecure_error(self):
self.mockInputValidation.is_valid_field.return_value = True
self.mockWalletNameResolver.return_value.resolve_wallet_name.side_effect = WalletNameLookupInsecureError()
api_wallet_lookup('wallet.frankcontreras.me', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 2)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(self.mockWalletNameResolver.return_value.resolve_wallet_name.call_count, 1)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'Wallet Name Lookup is Insecure')
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('data'), {})
def test_wallet_lookup_returned_does_not_exist(self):
self.mockInputValidation.is_valid_field.return_value = True
self.mockWalletNameResolver.return_value.resolve_wallet_name.side_effect = WalletNameLookupError()
api_wallet_lookup('wallet.frankcontreras.me', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 2)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(self.mockWalletNameResolver.return_value.resolve_wallet_name.call_count, 1)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'Wallet Name does not exist')
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('data'), {})
def test_wallet_lookup_returned_empty_currency_list(self):
self.mockInputValidation.is_valid_field.return_value = True
self.mockWalletNameResolver.return_value.resolve_wallet_name.side_effect = WalletNameUnavailableError()
api_wallet_lookup('wallet.frankcontreras.me', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 2)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(self.mockWalletNameResolver.return_value.resolve_wallet_name.call_count, 1)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'Wallet Name does not exist')
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('data'), {})
def test_wallet_lookup_returned_currency_unavailable(self):
self.mockInputValidation.is_valid_field.return_value = True
self.mockWalletNameResolver.return_value.resolve_wallet_name.side_effect = WalletNameCurrencyUnavailableError()
api_wallet_lookup('wallet.frankcontreras.me', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 2)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(self.mockWalletNameResolver.return_value.resolve_wallet_name.call_count, 1)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'Wallet Name Does Not Contain Requested Currency')
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('data'), {})
def test_wallet_lookup_exception(self):
self.mockWalletNameResolver.return_value.resolve_wallet_name.side_effect = Exception('Raising Exception for testing')
api_wallet_lookup('wallet.frankcontreras.me', 'btc')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 2)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(self.mockWalletNameResolver.return_value.resolve_wallet_name.call_count, 1)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'General Wallet Lookup Failure')
def test_uppercase_currency_and_wallet_name_to_lowercase(self):
api_wallet_lookup('Wallet.FrankContreras.Me', 'BTC')
# Validate call to resolve has values in lowercase
call_args = self.mockWalletNameResolver.return_value.resolve_wallet_name.call_args_list[0][0]
self.assertEqual('wallet.frankcontreras.me', call_args[0])
self.assertEqual('btc', call_args[1])
def test_dogecoin_transform(self):
api_wallet_lookup('wallet.frankContreras.me', 'doge')
# Validate call to resolve has values in lowercase
call_args = self.mockWalletNameResolver.return_value.resolve_wallet_name.call_args_list[0][0]
self.assertEqual('wallet.frankcontreras.me', call_args[0])
self.assertEqual('dgc', call_args[1])
class TestWalletnameCurrencyLookup(TestCase):
def setUp(self):
self.patcher1 = patch("netki.api.domain.InputValidation")
self.patcher2 = patch("netki.api.domain.create_json_response")
self.patcher3 = patch("netki.api.domain.WalletNameResolver")
self.patcher4 = patch("netki.api.domain.requests")
self.mockInputValidation = self.patcher1.start()
self.mockCreateJSONResponse = self.patcher2.start()
self.mockWalletNameResolver = self.patcher3.start()
self.mockRequests = self.patcher4.start()
self.mockWalletNameResolver.return_value.resolve_available_currencies.return_value = ['btc','ltc']
self.mockRequests.get.return_value.json.return_value = {'success': True, 'available_currencies': ['btc','ltc']}
def tearDown(self):
self.patcher1.stop()
self.patcher2.stop()
self.patcher4.stop()
def get_json_call(self):
# Utility function to get JSON call_args_list cleaning up assertions in below tests
return self.mockCreateJSONResponse.call_args_list[0][1].get('data')
def test_invalid_wallet_name_field(self):
# Used to simulate failure in validation for each iteration [iteration 1, iteration 2, etc.]
self.mockInputValidation.is_valid_field.side_effect = [False]
walletname_currency_lookup('wallet.frankcontreras.me')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'Invalid Parameters')
self.assertFalse(self.mockWalletNameResolver.called)
def test_invalid_wallet_name_field_no_dot(self):
walletname_currency_lookup('walletfrankcontrerasme')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'Invalid Parameters')
self.assertFalse(self.mockWalletNameResolver.called)
def test_wallet_address_returned_success(self):
walletname_currency_lookup('wallet.frankcontreras.me')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertTrue(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), '')
self.assertEqual(1, self.mockWalletNameResolver.return_value.resolve_available_currencies.call_count)
self.assertEqual('wallet.frankcontreras.me', self.mockWalletNameResolver.return_value.resolve_available_currencies.call_args[0][0])
# Returned Data Validation
call_dict = self.get_json_call()
self.assertEqual(call_dict.get('wallet_name'), 'wallet.frankcontreras.me')
self.assertEqual(call_dict.get('available_currencies'), ['btc','ltc'])
def test_wallet_address_namecoin_use_api_returned_success(self):
config.namecoin.use_api = True
walletname_currency_lookup('wallet.frankcontreras.bit')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertEqual(self.mockRequests.get.call_count, 1)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertTrue(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), '')
self.assertEqual(0, self.mockWalletNameResolver.return_value.resolve_available_currencies.call_count)
# Returned Data Validation
call_dict = self.get_json_call()
self.assertEqual(call_dict.get('wallet_name'), 'wallet.frankcontreras.bit')
self.assertEqual(call_dict.get('available_currencies'), ['btc','ltc'])
def test_wallet_address_namecoin_use_api_returned_failure(self):
config.namecoin.use_api = True
self.mockRequests.get.return_value.json.return_value['success'] = False
walletname_currency_lookup('wallet.frankcontreras.bit')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertEqual(self.mockRequests.get.call_count, 1)
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'Wallet Name Does Not Exist')
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('data'), {})
self.assertEqual(0, self.mockWalletNameResolver.return_value.resolve_available_currencies.call_count)
def test_wallet_lookup_returned_error(self):
self.mockInputValidation.is_valid_field.return_value = True
self.mockWalletNameResolver.return_value.resolve_available_currencies.side_effect = WalletNameUnavailableError()
walletname_currency_lookup('wallet.frankcontreras.me')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(1, self.mockWalletNameResolver.return_value.resolve_available_currencies.call_count)
self.assertEqual('wallet.frankcontreras.me', self.mockWalletNameResolver.return_value.resolve_available_currencies.call_args[0][0])
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'),'Wallet Name Does Not Exist')
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('data'), {})
def test_wallet_lookup_returned_insecure(self):
self.mockInputValidation.is_valid_field.return_value = True
self.mockWalletNameResolver.return_value.resolve_available_currencies.side_effect = WalletNameLookupInsecureError()
walletname_currency_lookup('wallet.frankcontreras.me')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(1, self.mockWalletNameResolver.return_value.resolve_available_currencies.call_count)
self.assertEqual('wallet.frankcontreras.me', self.mockWalletNameResolver.return_value.resolve_available_currencies.call_args[0][0])
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'),'Wallet Name Lookup is Insecure')
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('data'), {})
def test_wallet_lookup_returned_currency_unavailable(self):
self.mockInputValidation.is_valid_field.return_value = True
self.mockWalletNameResolver.return_value.resolve_available_currencies.side_effect = WalletNameCurrencyUnavailableError()
walletname_currency_lookup('wallet.frankcontreras.me')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(1, self.mockWalletNameResolver.return_value.resolve_available_currencies.call_count)
self.assertEqual('wallet.frankcontreras.me', self.mockWalletNameResolver.return_value.resolve_available_currencies.call_args[0][0])
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'),'Requested Currency Unavailable')
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('data'), {})
def test_wallet_lookup_returned_currency_namecoin_unavailable(self):
self.mockInputValidation.is_valid_field.return_value = True
self.mockWalletNameResolver.return_value.resolve_available_currencies.side_effect = WalletNameNamecoinUnavailable()
walletname_currency_lookup('wallet.frankcontreras.me')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(1, self.mockWalletNameResolver.return_value.resolve_available_currencies.call_count)
self.assertEqual('wallet.frankcontreras.me', self.mockWalletNameResolver.return_value.resolve_available_currencies.call_args[0][0])
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'),'Namecoin-based Wallet Name Lookup Unavailable')
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('data'), {})
def test_wallet_lookup_failed(self):
self.mockInputValidation.is_valid_field.return_value = True
self.mockWalletNameResolver.return_value.resolve_available_currencies.return_value = None
walletname_currency_lookup('wallet.frankcontreras.me')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(1, self.mockWalletNameResolver.return_value.resolve_available_currencies.call_count)
self.assertEqual('wallet.frankcontreras.me', self.mockWalletNameResolver.return_value.resolve_available_currencies.call_args[0][0])
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'LOOKUP_FAILURE')
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('data').get('wallet_name'), 'wallet.frankcontreras.me')
def test_wallet_lookup_exception(self):
self.mockWalletNameResolver.return_value.resolve_available_currencies.side_effect = Exception()
walletname_currency_lookup('wallet.frankcontreras.me')
self.assertEqual(self.mockInputValidation.is_valid_field.call_count, 1)
self.assertEqual(self.mockRequests.get.call_count, 0)
self.assertEqual(1, self.mockWalletNameResolver.return_value.resolve_available_currencies.call_count)
self.assertEqual('wallet.frankcontreras.me', self.mockWalletNameResolver.return_value.resolve_available_currencies.call_args[0][0])
self.assertEqual(self.mockCreateJSONResponse.call_count, 1)
self.assertFalse(self.mockCreateJSONResponse.call_args_list[0][1].get('success'))
self.assertEqual(self.mockCreateJSONResponse.call_args_list[0][1].get('message'), 'General Wallet Lookup Failure')
def test_uppercase_currency_and_wallet_name_to_lowercase(self):
walletname_currency_lookup('wallet.frankcontreras.me')
# Validate call to resolve has values in lowercase
self.assertEqual('wallet.frankcontreras.me', self.mockWalletNameResolver.return_value.resolve_available_currencies.call_args[0][0])
if __name__ == "__main__":
import unittest
unittest.main()
|
netkicorp/wns-api-server
|
netki/api/test_domain.py
|
Python
|
bsd-3-clause
| 24,663
|
import base64
import re
import os
URL_FINDER = re.compile('url\(.*?\)')
STRIP_URL = re.compile('url\(|\)|\'|"')
file_extensions_to_types = {
"png": "image/png",
"jpg": "image/jpg",
"gif": "image/gif",
}
def _extract_image_urls_from_css(css):
return URL_FINDER.findall(css)
def _extract_image_urls_from_css_file(css_path):
with open(css_path, 'r') as css_file:
return _extract_image_urls_from_css(css_file.read())
def _get_image_path_from_css_url(css_image_url):
"""
>>> _get_image_path_from_css_url('url(../test/nice.png)')
'../test/nice.png'
>>> _get_image_path_from_css_url('url("../test/nice.png")')
'../test/nice.png'
>>> _get_image_path_from_css_url('url(\'../test/nice.png\')')
'../test/nice.png'
"""
return ''.join(STRIP_URL.split(css_image_url))
def _parse_image_url_into_data_uris(css_path, image_url):
return convert_file_to_data_uri(os.path.join(
os.path.dirname(css_path),
image_url))
def add_data_uris_to_css_file(css_path):
css_file_content = ''
with open(css_path, 'r') as css_file:
css_file_content = css_file.read()
image_urls = _extract_image_urls_from_css(css_file_content)
for css_image_url in image_urls:
try:
image_url = _get_image_path_from_css_url(css_image_url)
data_uri = _parse_image_url_into_data_uris(css_path, image_url)
css_file_content = css_file_content.replace(image_url, data_uri)
except KeyError:
pass
except IOError:
pass
with open(css_path, 'w') as css_file:
css_file.write(css_file_content)
def _get_file_type(path):
return file_extensions_to_types[_get_file_extension_from_path(path)]
def _get_file_extension_from_path(path):
"""
>>> _get_file_extension_from_path('/test/nice/png.png')
"png"
>>> _get_file_extension_from_path('nice.png')
"png"
>>> _get_file_extension_from_path('/nice.JPG')
"jpg"
"""
return path.split('.')[-1].lower()
def convert_file_to_data_uri(path):
with open(path, 'r') as image:
return 'data:%s;base64,%s' % (
_get_file_type(path),
base64.b64encode(image.read()))
|
eroh92/asset-manager
|
asset_manager/datauris.py
|
Python
|
bsd-3-clause
| 2,268
|
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-faq-views',
version='0.1',
packages=['faq'],
include_package_data=True,
license='BSD License',
description='A simple Django app to list frequently asked questions.',
long_description=README,
#url='http://www.example.com',
author='Donny Davis',
author_email='donnywdavis@icloud.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
donnywdavis/Django-faq-views
|
setup.py
|
Python
|
bsd-3-clause
| 1,185
|
#!/usr/bin/env python
import re
import os
import time
import sys
import unittest
import ConfigParser
from setuptools import setup, Command
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class SQLiteTest(Command):
"""
Run the tests on SQLite
"""
description = "Run tests on SQLite"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
os.environ['TRYTOND_DATABASE_URI'] = 'sqlite://'
os.environ['DB_NAME'] = ':memory:'
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
class PostgresTest(Command):
"""
Run the tests on Postgres.
"""
description = "Run tests on Postgresql"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
os.environ['TRYTOND_DATABASE_URI'] = 'postgresql://'
os.environ['DB_NAME'] = 'test_' + str(int(time.time()))
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
config = ConfigParser.ConfigParser()
config.readfp(open('tryton.cfg'))
info = dict(config.items('tryton'))
for key in ('depends', 'extras_depend', 'xml'):
if key in info:
info[key] = info[key].strip().splitlines()
major_version, minor_version, _ = info.get('version', '0.0.1').split('.', 2)
major_version = int(major_version)
minor_version = int(minor_version)
requires = []
MODULE2PREFIX = {
'report_webkit': 'openlabs'
}
MODULE = "waiting_customer_shipment_report"
PREFIX = "fio"
for dep in info.get('depends', []):
if not re.match(r'(ir|res|webdav)(\W|$)', dep):
requires.append(
'%s_%s >= %s.%s, < %s.%s' % (
MODULE2PREFIX.get(dep, 'trytond'), dep,
major_version, minor_version, major_version,
minor_version + 1
)
)
requires.append(
'trytond >= %s.%s, < %s.%s' % (
major_version, minor_version, major_version, minor_version + 1
)
)
setup(
name='%s_%s' % (PREFIX, MODULE),
version=info.get('version', '0.0.1'),
description="",
author="Fulfil.IO Inc., Openlabs Technologies and Consulting (P) Ltd.",
author_email='info@fulfil.io',
url='http://www.fulfil.io/',
package_dir={'trytond.modules.%s' % MODULE: '.'},
packages=[
'trytond.modules.%s' % MODULE,
'trytond.modules.%s.tests' % MODULE,
],
package_data={
'trytond.modules.%s' % MODULE: info.get('xml', []) +
info.get('translation', []) +
['tryton.cfg', 'locale/*.po', 'tests/*.rst', 'reports/*.odt'] +
['view/*.xml', 'reports/*.html', 'reports/css/bootstrap/css/*'] +
['reports/css/bootstrap/fonts/*', 'reports/css/font-awesome/css/*'] +
['reports/css/font-awesome/fonts/*', 'reports/js/*.js']
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Tryton',
'Topic :: Office/Business',
],
long_description=open('README.rst').read(),
license='BSD',
install_requires=requires,
zip_safe=False,
entry_points="""
[trytond.modules]
%s = trytond.modules.%s
""" % (MODULE, MODULE),
test_suite='tests',
test_loader='trytond.test_loader:Loader',
cmdclass={
'test': SQLiteTest,
'test_on_postgres': PostgresTest,
}
)
|
fulfilio/trytond-waiting-customer-shipment-report
|
setup.py
|
Python
|
bsd-3-clause
| 4,152
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 0, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_RelativeDifference/trend_MovingAverage/cycle_30/ar_12/test_artificial_1024_RelativeDifference_MovingAverage_30_12_0.py
|
Python
|
bsd-3-clause
| 279
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from preggy import expect
import click
from click.testing import CliRunner
from terrible.run import compile_template
from tests.base import TestCase
import os
class CompileTemplateTestCase(TestCase):
def test_compile_template(self):
base_dir = os.path.dirname(os.path.realpath(__file__)) + "/../"
template_path = "%stests_resources/" % base_dir
template = "ansible-inventory.j2"
tfstate = "%stests_resources/terraform.tfstate" % base_dir
inventory_output = "%stests_resources/test_output" % base_dir
# Empty any previous test output
open(inventory_output, 'w').close()
runner = CliRunner()
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--template', template,
'--tfstate', tfstate,
'--inventory-output', inventory_output])
expect(hasattr(runner, 'exception')).to_equal(False)
expect(result.exit_code).to_equal(0)
output = open(inventory_output).read()
expect(output).to_include("1.2.3.4")
def test_missing_required_params(self):
base_dir = os.path.dirname(os.path.realpath(__file__)) + "/../"
template_path = "%stests_resources/" % base_dir
template = "ansible-inventory.j2"
tfstate = "%stests_resources/terraform.tfstate" % base_dir
inventory_output = "%stests_resources/test_output" % base_dir
runner = CliRunner()
# Missing --template-path arg
result = runner.invoke(compile_template, [
'--template', template,
'--tfstate', tfstate,
'--inventory-output', inventory_output])
expect(result.exit_code).to_be_greater_than(0)
# Missing --template arg
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--tfstate', tfstate,
'--inventory-output', inventory_output])
expect(result.exit_code).to_be_greater_than(0)
# Missing --tfstate arg
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--template', template,
'--inventory-output', inventory_output])
expect(result.exit_code).to_be_greater_than(0)
# Missing --inventory-output arg
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--template', template,
'--tfstate', tfstate])
expect(result.exit_code).to_be_greater_than(0)
# Give a file instead of a directory for template path
result = runner.invoke(compile_template, [
'--template-path', tfstate])
expect(result.exit_code).to_be_greater_than(0)
# Give a path instead of an acutal template for --template
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--template', template_path])
expect(result.exit_code).to_be_greater_than(0)
# Give an inviald path for tfstate
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--template', template,
'--tfstate', tfstate + "blahblahdoesnotexist",
'--inventory-output', inventory_output])
expect(result.exit_code).to_be_greater_than(0)
|
RobotsAndPencils/terrible
|
tests/test_run.py
|
Python
|
bsd-3-clause
| 3,415
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2017 by Brendt Wohlberg <brendt@ieee.org>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""ADMM algorithm for the CMOD problem"""
from __future__ import division
from __future__ import absolute_import
import copy
import numpy as np
from scipy import linalg
from sporco.admm import admm
import sporco.linalg as sl
__author__ = """Brendt Wohlberg <brendt@ieee.org>"""
class CnstrMOD(admm.ADMMEqual):
r"""**Class inheritance structure**
.. inheritance-diagram:: CnstrMOD
:parts: 2
|
ADMM algorithm for a constrained variant of the Method of Optimal
Directions (MOD) :cite:`engan-1999-method` problem, referred to here
as Constrained MOD (CMOD).
Solve the optimisation problem
.. math::
\mathrm{argmin}_D \| D X - S \|_2^2 \quad \text{such that}
\quad \| \mathbf{d}_m \|_2 = 1 \;\;,
where :math:`\mathbf{d}_m` is column :math:`m` of matrix :math:`D`,
via the ADMM problem
.. math::
\mathrm{argmin}_D \| D X - S \|_2^2 + \iota_C(G) \quad
\text{such that} \quad D = G \;\;,
where :math:`\iota_C(\cdot)` is the indicator function of feasible
set :math:`C` consisting of matrices with unit-norm columns.
After termination of the :meth:`solve` method, attribute :attr:`itstat` is
a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``DFid`` : Value of data fidelity term :math:`(1/2) \| D X - S
\|_2^2`
``Cnstr`` : Constraint violation measure
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``Time`` : Cumulative run time
"""
class Options(admm.ADMMEqual.Options):
"""CMOD algorithm options
Options include all of those defined in
:class:`sporco.admm.admm.ADMMEqual.Options`, together with
additional options:
``AuxVarObj`` : Flag indicating whether the objective
function should be evaluated using variable X (``False``) or
Y (``True``) as its argument. Setting this flag to ``True``
often gives a better estimate of the objective function
``ZeroMean`` : Flag indicating whether the solution
dictionary :math:`D` should have zero-mean components.
"""
defaults = copy.deepcopy(admm.ADMMEqual.Options.defaults)
# Warning: although __setitem__ below takes care of setting
# 'fEvalX' and 'gEvalY' from the value of 'AuxVarObj', this
# cannot be relied upon for initialisation since the order of
# initialisation of the dictionary keys is not deterministic;
# if 'AuxVarObj' is initialised first, the other two keys are
# correctly set, but this setting is overwritten when 'fEvalX'
# and 'gEvalY' are themselves initialised
defaults.update({'AuxVarObj': True, 'fEvalX': False,
'gEvalY': True, 'ReturnX': False,
'RelaxParam': 1.8, 'ZeroMean': False})
defaults['AutoRho'].update({'Enabled': True})
def __init__(self, opt=None):
"""Initialise CMOD algorithm options object."""
if opt is None:
opt = {}
admm.ADMMEqual.Options.__init__(self, opt)
if self['AutoRho', 'RsdlTarget'] is None:
self['AutoRho', 'RsdlTarget'] = 1.0
def __setitem__(self, key, value):
"""Set options 'fEvalX' and 'gEvalY' appropriately when option
'AuxVarObj' is set.
"""
admm.ADMMEqual.Options.__setitem__(self, key, value)
if key == 'AuxVarObj':
if value is True:
self['fEvalX'] = False
self['gEvalY'] = True
else:
self['fEvalX'] = True
self['gEvalY'] = False
itstat_fields_objfn = ('DFid', 'Cnstr')
hdrtxt_objfn = ('DFid', 'Cnstr')
hdrval_objfun = {'DFid': 'DFid', 'Cnstr': 'Cnstr'}
def __init__(self, Z, S, dsz=None, opt=None):
"""
Initialise a CnstrMOD object with problem parameters.
|
**Call graph**
.. image:: _static/jonga/cmod_init.svg
:width: 20%
:target: _static/jonga/cmod_init.svg
|
Parameters
----------
Z : array_like, shape (M, K)
Sparse representation coefficient matrix
S : array_like, shape (N, K)
Signal vector or matrix
dsz : tuple
Dictionary size
opt : :class:`CnstrMOD.Options` object
Algorithm options
"""
if opt is None:
opt = CnstrMOD.Options()
Nc = S.shape[0]
# If Z not specified, get dictionary size from dsz
if Z is None:
Nm = dsz[0]
else:
Nm = Z.shape[0]
super(CnstrMOD, self).__init__((Nc, Nm), S.dtype, opt)
# Set penalty parameter
self.set_attr('rho', opt['rho'], dval=S.shape[1] / 500.0,
dtype=self.dtype)
self.S = np.asarray(S, dtype=self.dtype)
# Create constraint set projection function
self.Pcn = getPcn(opt['ZeroMean'])
if Z is not None:
self.setcoef(Z)
def uinit(self, ushape):
"""Return initialiser for working variable U"""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
return self.Y
def setcoef(self, Z):
"""Set coefficient array."""
self.Z = np.asarray(Z, dtype=self.dtype)
self.SZT = self.S.dot(Z.T)
# Factorise dictionary for efficient solves
self.lu, self.piv = sl.lu_factor(Z, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype)
def getdict(self):
"""Get final dictionary."""
return self.Y
def xstep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`.
"""
self.X = np.asarray(sl.lu_solve_AATI(self.Z, self.rho, self.SZT +
self.rho*(self.Y - self.U), self.lu, self.piv,),
dtype=self.dtype)
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
"""
self.Y = self.Pcn(self.AX + self.U)
def eval_objfn(self):
"""Compute components of objective function as well as total
contribution to objective function.
"""
dfd = self.obfn_dfd()
cns = self.obfn_cns()
return (dfd, cns)
def obfn_dfd(self):
r"""Compute data fidelity term :math:`(1/2) \| D \mathbf{x} -
\mathbf{s} \|_2^2`.
"""
return 0.5*linalg.norm((self.obfn_fvar().dot(self.Z) - self.S))**2
def obfn_cns(self):
r"""Compute constraint violation measure :math:`\| P(\mathbf{y}) -
\mathbf{y}\|_2`.
"""
return linalg.norm((self.Pcn(self.obfn_gvar()) - self.obfn_gvar()))
def rhochange(self):
"""Re-factorise matrix when rho changes"""
self.lu, self.piv = sl.lu_factor(self.Z, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype)
def getPcn(zm):
"""Construct constraint set projection function.
Parameters
----------
zm : bool
Flag indicating whether the projection function should include
column mean subtraction
Returns
-------
fn : function
Constraint set projection function
"""
if zm:
return lambda x: normalise(zeromean(x))
else:
return normalise
def zeromean(v):
"""Subtract mean of each column of matrix.
Parameters
----------
v : array_like
Input dictionary array
Returns
-------
vz : ndarray
Dictionary array with column means subtracted
"""
return v - np.mean(v, 0)
def normalise(v):
"""Normalise columns of matrix.
Parameters
----------
v : array_like
Array with columns to be normalised
Returns
-------
vnrm : ndarray
Normalised array
"""
vn = np.sqrt(np.sum(v**2, 0))
vn[vn == 0] = 1.0
return np.asarray(v / vn, dtype=v.dtype)
|
alphacsc/alphacsc
|
alphacsc/other/sporco/sporco/admm/cmod.py
|
Python
|
bsd-3-clause
| 8,925
|
# -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from phonedusk.user.models import User, Role
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
def test_get_by_id(self):
user = User('foo', 'foo@bar.com')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert user.password is None
def test_factory(self, db):
user = UserFactory(password="myprecious")
db.session.commit()
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
user = User.create(username="foo", email="foo@bar.com",
password="foobarbaz123")
assert user.check_password('foobarbaz123') is True
assert user.check_password("barfoobaz") is False
def test_full_name(self):
user = UserFactory(first_name="Foo", last_name="Bar")
assert user.full_name == "Foo Bar"
def test_roles(self):
role = Role(name='admin')
role.save()
u = UserFactory()
u.roles.append(role)
u.save()
assert role in u.roles
|
kevana/phonedusk-server
|
tests/test_models.py
|
Python
|
bsd-3-clause
| 1,682
|
from robofab.pens.pointPen import BasePointToSegmentPen
from ufoLib.pointPen import AbstractPointPen
"""
Printing pens print their data. Useful for demos and debugging.
"""
__all__ = ["PrintingPointPen", "PrintingSegmentPen", "SegmentPrintingPointPen"]
class PrintingPointPen(AbstractPointPen):
"""A PointPen that prints every step.
"""
def __init__(self):
self.havePath = False
def beginPath(self):
self.havePath = True
print("pen.beginPath()")
def endPath(self):
self.havePath = False
print("pen.endPath()")
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
assert self.havePath
args = ["(%s, %s)" % (pt[0], pt[1])]
if segmentType is not None:
args.append("segmentType=%r" % segmentType)
if smooth:
args.append("smooth=True")
if name is not None:
args.append("name=%r" % name)
if kwargs:
args.append("**%s" % kwargs)
print("pen.addPoint(%s)" % ", ".join(args))
def addComponent(self, baseGlyphName, transformation):
assert not self.havePath
print("pen.addComponent(%r, %s)" % (baseGlyphName, tuple(transformation)))
from fontTools.pens.basePen import AbstractPen
class PrintingSegmentPen(AbstractPen):
"""A SegmentPen that prints every step.
"""
def moveTo(self, pt):
print("pen.moveTo(%s)" % (pt,))
def lineTo(self, pt):
print("pen.lineTo(%s)" % (pt,))
def curveTo(self, *pts):
print("pen.curveTo%s" % (pts,))
def qCurveTo(self, *pts):
print("pen.qCurveTo%s" % (pts,))
def closePath(self):
print("pen.closePath()")
def endPath(self):
print("pen.endPath()")
def addComponent(self, baseGlyphName, transformation):
print("pen.addComponent(%r, %s)" % (baseGlyphName, tuple(transformation)))
class SegmentPrintingPointPen(BasePointToSegmentPen):
"""A SegmentPen that pprints every step.
"""
def _flushContour(self, segments):
from pprint import pprint
pprint(segments)
if __name__ == "__main__":
p = SegmentPrintingPointPen()
from robofab.test.test_pens import TestShapes
TestShapes.onCurveLessQuadShape(p)
|
adrientetar/robofab
|
Lib/robofab/pens/printingPens.py
|
Python
|
bsd-3-clause
| 2,042
|
"""
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
import flask
from functools import wraps
from flask import Blueprint, redirect, render_template, request, session, flash, url_for, current_app
#from flask_openid import OpenID
#from flask_oauth import OAuth
mod = Blueprint('login', __name__)
#oid = OpenID()
# Load users
import os
thispath = os.path.dirname(os.path.abspath(__file__))
from WebUI import app
fin = open(os.path.join(app.config['ETC_DIR'], 'users.json'))
import json
USERS = json.loads(fin.read())
# Decorator for urls that require login
def login_required(f):
"""Checks whether user is logged in or redirects to login"""
@wraps(f)
def decorator(*args, **kwargs):
if not 'user' in flask.session:
flask.flash("Login required !", "error")
return flask.redirect(url_for("login.login") + "?next=" + flask.request.url)
else:
return f(*args, **kwargs)
return decorator
# Decorator for urls that require specific role
def role_required(role):
"""Checks whether user is logged in or redirects to login"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not 'user' in flask.session:
flask.flash("this Login required !", "error")
return flask.redirect(url_for("login.login") + "&next=" + flask.request.url)
else:
if 'roles' in flask.session["user"]:
if role in flask.session["user"]["roles"]:
# flask.flash("Found access for \"" + role + "\" group :(", "success")
return f(*args, **kwargs)
flask.flash("Access restricted only to login group \"" + role + "\" group :(", "error")
return flask.redirect(url_for("home"))
return decorated_function
return decorator
@mod.route('/login', methods=["get"])
def login():
return render_template("login.html", next=flask.request.args.get("next","/home"))
@mod.route('/login.passwd', methods=['post'])
def login_passwd():
# Try to find the user
userid = request.form["login"]
app = flask.current_app
if userid in USERS:
# Load user
user = USERS[userid]
if user["passwd"] != request.form['passwd']:
flash('Authentication Error for: ' + userid, "error")
return redirect('/login')
flask.flash("Loading user: "+userid, "success")
return do_user_login(user, next=flask.request.form["next"])
else:
flash('Unknown user: ' + request.form['login'], "error")
return redirect('/login')
def do_user_login(user, next="/home"):
session['user'] = {
'fullname': user["fullname"],
'roles' : user["roles"],
}
flash('Successfully logged in user: ' + user["fullname"], 'success')
return redirect(next)
@mod.route('/logout', methods=['GET', 'POST'])
def logout():
"""Does the login via OpenID. Has to call into `oid.try_login`
to start the OpenID machinery.
"""
# if we are already logged in, go back to were we came from
flask.g.logged_in = False
session.clear()
return redirect(url_for('home'))
|
anguoyang/SMQTK
|
OLD_ROOT/WebUI/login.py
|
Python
|
bsd-3-clause
| 3,379
|