code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import utils
from flask import render_template, redirect, request, session, url_for, json, jsonify
from . import murmurbp
from .User import User
# User Views
@murmurbp.route("/users", methods = ['GET'])
def get_all_users():
u = User()
ul = utils.obj_to_dict(u.get_all())
data = [{'UserId': k, 'UserName': v} for k, v in ul.iteritems()]
resp = jsonify(users=data)
return resp, 200
@murmurbp.route("/users/<int:id>", methods = ['GET'])
def get_user(id):
u = User()
data = utils.obj_to_dict(u.get(id))
resp = jsonify(data)
return resp, 200
@murmurbp.route("/users", methods = ['POST'])
def add_user():
u = User()
user = json.loads('{"UserName": "TestUser7"}')
new_user = u.add(user)
data = utils.obj_to_dict(new_user)
resp = jsonify(data)
return resp, 200
@murmurbp.route("/users/<int:id>", methods = ['DELETE'])
def delete_user(id):
u = User()
u.delete(id)
return jsonify(), 201
from .Channel import Channel
# Channel Views
@murmurbp.route("/channels", methods = ['GET'])
def get_all_channels():
c = Channel()
cl = utils.obj_to_dict(c.get_all())
data = [ v for k, v in cl.iteritems()]
resp = jsonify(channels=data)
return resp, 200
@murmurbp.route("/channels", methods = ['POST'])
def add_channel():
c = Channel()
name = request.form['channelName']
parent = request.form['parent']
new_channel = c.add_channel(name, parent)
data = utils.obj_to_dict(new_channel)
resp = jsonify(data)
return resp, 200
@murmurbp.route("/channels/<int:id>", methods = ['DELETE'])
def delete_channel(id):
c = Channel()
c.delete(id)
return jsonify(), 201
from .ACLGroup import ACL, Group
# ACL and Group Views
@murmurbp.route("/acls/<int:channel_id>", methods = ['GET'])
def get_all_acls(channel_id):
a = ACL()
data = utils.obj_to_dict(a.get_all(channel_id))
resp = jsonify(acls=data)
return resp, 200
@murmurbp.route("/groups/<int:channel_id>", methods = ['GET'])
def get_all_groups(channel_id):
g = Group()
data = utils.obj_to_dict(g.get_all(channel_id))
resp = jsonify(groups=data)
return resp, 200
@murmurbp.route("/acls/<int:channel_id>", methods = ['POST'])
def add_acl_to_channel(channel_id):
# TODO: load json object
a = ACL()
acl = json.loads('{"applyHere": true,"applySubs": true,"userid": 1,"group": "admin","allow": 1024,"deny": 0}')
data = a.add(channel_id, acl)
resp = jsonify(data)
return resp, 200
|
aqisnotliquid/minder2
|
app/murmur/views.py
|
Python
|
mit
| 2,492
|
from collections import OrderedDict
import astropy.coordinates as coord
import astropy.units as u
import matplotlib.pyplot as plt
#import mpl_toolkits.basemap as bm
import numpy as np
import spherical_geometry.polygon as sp
from astropy.table import Table
import astropy.time as time
from .gbm_detector import BGO0, BGO1
from .gbm_detector import NaI0, NaI1, NaI2, NaI3, NaI4, NaI5
from .gbm_detector import NaI6, NaI7, NaI8, NaI9, NaIA, NaIB
from .gbm_frame import GBMFrame
from gbmgeometry.utils.gbm_time import GBMTime
import seaborn as sns
_det_color_cycle = np.linspace(0, 1, 12)
class GBM(object):
def __init__(self, quaternion, sc_pos=None, gbm_time=None):
"""
Parameters
----------
quaternion : Fermi GBM quarternion array
"""
if gbm_time is not None:
if isinstance(gbm_time, str):
self._gbm_time = GBMTime.from_UTC_fits(gbm_time)
else:
# assuming MET
self._gbm_time = GBMTime.from_MET(gbm_time)
else:
self._gbm_time = None
if self._gbm_time is not None:
self.n0 = NaI0(quaternion, sc_pos, self._gbm_time.time)
self.n1 = NaI1(quaternion, sc_pos, self._gbm_time.time)
self.n2 = NaI2(quaternion, sc_pos, self._gbm_time.time)
self.n3 = NaI3(quaternion, sc_pos, self._gbm_time.time)
self.n4 = NaI4(quaternion, sc_pos, self._gbm_time.time)
self.n5 = NaI5(quaternion, sc_pos, self._gbm_time.time)
self.n6 = NaI6(quaternion, sc_pos, self._gbm_time.time)
self.n7 = NaI7(quaternion, sc_pos, self._gbm_time.time)
self.n8 = NaI8(quaternion, sc_pos, self._gbm_time.time)
self.n9 = NaI9(quaternion, sc_pos, self._gbm_time.time)
self.na = NaIA(quaternion, sc_pos, self._gbm_time.time)
self.nb = NaIB(quaternion, sc_pos, self._gbm_time.time)
self.b0 = BGO0(quaternion, sc_pos, self._gbm_time.time)
self.b1 = BGO1(quaternion, sc_pos, self._gbm_time.time)
else:
self.n0 = NaI0(quaternion, sc_pos, None)
self.n1 = NaI1(quaternion, sc_pos, None)
self.n2 = NaI2(quaternion, sc_pos, None)
self.n3 = NaI3(quaternion, sc_pos, None)
self.n4 = NaI4(quaternion, sc_pos, None)
self.n5 = NaI5(quaternion, sc_pos, None)
self.n6 = NaI6(quaternion, sc_pos, None)
self.n7 = NaI7(quaternion, sc_pos, None)
self.n8 = NaI8(quaternion, sc_pos, None)
self.n9 = NaI9(quaternion, sc_pos, None)
self.na = NaIA(quaternion, sc_pos, None)
self.nb = NaIB(quaternion, sc_pos, None)
self.b0 = BGO0(quaternion, sc_pos, None)
self.b1 = BGO1(quaternion, sc_pos, None)
self._detectors = OrderedDict(n0=self.n0,
n1=self.n1,
n2=self.n2,
n3=self.n3,
n4=self.n4,
n5=self.n5,
n6=self.n6,
n7=self.n7,
n8=self.n8,
n9=self.n9,
na=self.na,
nb=self.nb,
b0=self.b0,
b1=self.b1)
self._quaternion = quaternion
self._sc_pos = sc_pos
def set_quaternion(self, quaternion):
"""
Parameters
----------
quaternion
"""
for key in self._detectors.keys():
self._detectors[key].set_quaternion(quaternion)
self._quaternion = quaternion
def set_sc_pos(self, sc_pos):
"""
Parameters
----------
sc_pos
"""
for key in self._detectors.keys():
self._detectors[key].set_sc_pos(sc_pos)
self._sc_pos = sc_pos
def get_good_detectors(self, point, fov):
"""
Returns a list of detectors containing the point in the FOV
Parameters
----------
point
fov
Returns
-------
"""
good_detectors = self._contains_point(point, fov)
return good_detectors
def get_fov(self, radius, fermi_frame=False):
"""
Parameters
----------
fermi_frame
radius
"""
polys = []
for key in self._detectors.keys():
if key[0] == 'b':
this_rad = 90
else:
this_rad = radius
polys.append(self._detectors[key].get_fov(this_rad, fermi_frame))
polys = np.array(polys)
return polys
def get_good_fov(self, point, radius, fermi_frame=False):
"""
Returns the detectors that contain the given point
for the given angular radius
Parameters
----------
point
radius
"""
good_detectors = self._contains_point(point, radius)
polys = []
for key in good_detectors:
polys.append(self._detectors[key].get_fov(radius, fermi_frame))
return [polys, good_detectors]
def get_sun_angle(self, keys=None):
"""
Returns
-------
"""
angles = []
if keys is None:
for key in self._detectors.keys():
angles.append(self._detectors[key].sun_angle)
else:
for key in keys:
angles.append(self._detectors[key].sun_angle)
return angles
def get_centers(self, keys=None):
"""
Returns
-------
"""
centers = []
if keys is None:
for key in self._detectors.keys():
centers.append(self._detectors[key].get_center())
else:
for key in keys:
centers.append(self._detectors[key].get_center())
return centers
def get_separation(self, source):
"""
Get the andular separation of the detectors from a point
Parameters
----------
source
Returns
-------
"""
tab = Table(names=["Detector", "Separation"], dtype=["|S2", np.float64])
for key in self._detectors.keys():
sep = self._detectors[key].get_center().separation(source)
tab.add_row([key, sep])
tab['Separation'].unit = u.degree
tab.sort("Separation")
return tab
def get_earth_points(self, fermi_frame=False):
"""
Returns
-------
"""
if self._sc_pos is not None:
self._calc_earth_points(fermi_frame)
return self._earth_points
else:
print("No spacecraft position set")
def _calc_earth_points(self, fermi_frame):
xyz_position = coord.SkyCoord(x=self._sc_pos[0],
y=self._sc_pos[1],
z=self._sc_pos[2],
frame='icrs',
representation='cartesian')
earth_radius = 6371. * u.km
fermi_radius = np.sqrt((self._sc_pos ** 2).sum())
horizon_angle = 90 - np.rad2deg(np.arccos((earth_radius / fermi_radius).to(u.dimensionless_unscaled)).value)
horizon_angle = (180 - horizon_angle) * u.degree
num_points = 300
ra_grid_tmp = np.linspace(0, 360, num_points)
dec_range = [-90, 90]
cosdec_min = np.cos(np.deg2rad(90.0 + dec_range[0]))
cosdec_max = np.cos(np.deg2rad(90.0 + dec_range[1]))
v = np.linspace(cosdec_min, cosdec_max, num_points)
v = np.arccos(v)
v = np.rad2deg(v)
v -= 90.
dec_grid_tmp = v
ra_grid = np.zeros(num_points ** 2)
dec_grid = np.zeros(num_points ** 2)
itr = 0
for ra in ra_grid_tmp:
for dec in dec_grid_tmp:
ra_grid[itr] = ra
dec_grid[itr] = dec
itr += 1
if fermi_frame:
all_sky = coord.SkyCoord(Az=ra_grid, Zen=dec_grid, frame=GBMFrame(quaternion=self._quaternion), unit='deg')
else:
all_sky = coord.SkyCoord(ra=ra_grid, dec=dec_grid, frame='icrs', unit='deg')
condition = all_sky.separation(xyz_position) > horizon_angle
# self.seps = all_sky.separation(xyz_position)
self._earth_points = all_sky[condition]
@property
def detectors(self):
return self._detectors
def _contains_point(self, point, radius):
"""
returns detectors that contain a points
"""
condition = []
steps = 500
for key in self._detectors.keys():
if key[0] == 'b':
this_rad = 90
else:
this_rad = radius
j2000 = self._detectors[key]._center.icrs
poly = sp.SphericalPolygon.from_cone(j2000.ra.value,
j2000.dec.value,
this_rad,
steps=steps)
if poly.contains_point(point.cartesian.xyz.value):
condition.append(key)
return condition
def get_legal_pairs():
"""
Plots the legal pairs of detectors for GBM observations
Returns
-------
"""
dlp = np.array([[0, 274, 39, 171, 12, 29, 0, 5, 1, 6, 1, 0],
[258, 0, 233, 55, 4, 100, 2, 1, 1, 12, 27, 0],
[55, 437, 0, 2, 2, 311, 0, 1, 1, 13, 235, 0],
[215, 80, 3, 0, 330, 107, 4, 8, 19, 2, 1, 0],
[13, 4, 8, 508, 0, 269, 2, 29, 236, 0, 1, 0],
[44, 188, 337, 166, 279, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 2, 2, 0, 0, 238, 46, 180, 12, 33],
[0, 2, 0, 18, 35, 0, 222, 0, 221, 61, 3, 109],
[0, 0, 1, 16, 215, 0, 51, 399, 0, 4, 2, 303],
[3, 18, 21, 4, 0, 0, 190, 82, 1, 0, 324, 110],
[1, 25, 191, 0, 0, 0, 16, 6, 4, 516, 0, 293],
[0, 0, 0, 0, 0, 0, 32, 147, 297, 138, 263, 0]])
sns.heatmap(dlp, annot=True, fmt='d', cmap="YlGnBu")
plt.ylabel("NaI")
plt.xlabel("NaI")
|
drJfunk/gbmgeometry
|
gbmgeometry/gbm.py
|
Python
|
mit
| 10,575
|
from django.db import models
from django.template.defaultfilters import truncatechars
class Setting(models.Model):
name = models.CharField(max_length=100, unique=True, db_index=True)
value = models.TextField(blank=True, default='')
value_type = models.CharField(max_length=1, choices=(('s', 'string'), ('i', 'integer'), ('f', 'float'), ('b', 'boolean')))
hide_value_in_list = models.BooleanField(default=False)
def __str__(self):
return "%s = %s (%s)" % (self.name, "**скрыто**" if self.hide_value_in_list else truncatechars(self.value, 150), self.get_value_type_display())
def get_value(self):
val = self.value
types = {'s': str, 'i': int, 'b': (lambda v: v.lower() == "true"), 'f': float}
return types[self.value_type](val)
class Meta:
verbose_name = 'Параметр'
verbose_name_plural = 'Параметры'
|
moodpulse/l2
|
appconf/models.py
|
Python
|
mit
| 902
|
# run scripts/jobslave-nodatabase.py
import os
os.environ["SEAMLESS_COMMUNION_ID"] = "simple-remote"
os.environ["SEAMLESS_COMMUNION_INCOMING"] = "localhost:8602"
import seamless
seamless.set_ncores(0)
from seamless import communion_server
communion_server.configure_master(
buffer=True,
transformation_job=True,
transformation_status=True,
)
from seamless.core import context, cell, transformer, unilink
ctx = context(toplevel=True)
ctx.cell1 = cell().set(1)
ctx.cell2 = cell().set(2)
ctx.result = cell()
ctx.tf = transformer({
"a": "input",
"b": "input",
"c": "output"
})
ctx.cell1_unilink = unilink(ctx.cell1)
ctx.cell1_unilink.connect(ctx.tf.a)
ctx.cell2.connect(ctx.tf.b)
ctx.code = cell("transformer").set("c = a + b")
ctx.code.connect(ctx.tf.code)
ctx.result_unilink = unilink(ctx.result)
ctx.tf.c.connect(ctx.result_unilink)
ctx.result_copy = cell()
ctx.result.connect(ctx.result_copy)
ctx.compute(0.1)
print(ctx.cell1.value)
print(ctx.code.value)
ctx.compute()
print(ctx.result.value, ctx.status)
print(ctx.tf.exception)
ctx.cell1.set(10)
ctx.compute()
print(ctx.result.value, ctx.status)
ctx.code.set("c = a + b + 1000")
ctx.compute()
print(ctx.result.value, ctx.status)
print("Introduce delay...")
ctx.code.set("import time; time.sleep(2); c = -(a + b)")
ctx.compute(1.0)
print("after 1.0 sec...")
print(ctx.result.value, ctx.status)
print("...")
ctx.compute()
print(ctx.result.value, ctx.status)
|
sjdv1982/seamless
|
tests/lowlevel/simple-remote.py
|
Python
|
mit
| 1,438
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
import re
from setuptools import setup
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
packages = [
"the_big_username_blacklist"
]
# Handle requirements
install_requires = []
tests_requires = [
"pytest==3.0.5",
]
# Convert markdown to rst
try:
from pypandoc import convert
long_description = convert("README.md", "rst")
except:
long_description = ""
version = ''
with io.open('the_big_username_blacklist/__init__.py', 'r', encoding='utf-8') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(
name="the_big_username_blacklist",
version=version,
description="Validate usernames against a blacklist", # NOQA
long_description=long_description,
author="Martin Sandström",
author_email="martin@marteinn.se",
url="https://github.com/marteinn/the-big-username-blacklist-python",
packages=packages,
package_data={"": ["LICENSE", ], "the_big_username_blacklist": ["*.txt"]},
package_dir={"the_big_username_blacklist": "the_big_username_blacklist"},
include_package_data=True,
install_requires=install_requires,
license="MIT",
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: PyPy"
],
)
|
marteinn/The-Big-Username-Blacklist-Python
|
setup.py
|
Python
|
mit
| 1,848
|
#-*- coding: utf-8 -*-
""" This script contains the abstract animation object that must be implemented
by all animation extension.
"""
class AbstractAnimation(object):
""" An abstract animation that defines method(s) that must be implemented
by animation extensions.
"""
def __init__(self, driver):
self.driver = driver
def animate(self, message):
""" This method is called by the thread that pop() messages from the
MessageQueue.
It's in this method that there is a "discution" with the driver to
tell it how to display the message in a beautiful way (or not, you
decide it after all!).
Keyboard Arguments:
message - The message pop from the queue.
"""
raise NotImplementedError()
|
juliendelplanque/lcddaemon
|
animations/abstractanimation.py
|
Python
|
mit
| 814
|
#!/usr/bin/python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time, sys, signal, atexit
import pyupm_bmp280 as sensorObj
# Instantiate a BMP280 instance using default i2c bus and address
sensor = sensorObj.BMP280()
# For SPI, bus 0, you would pass -1 as the address, and a valid pin for CS:
# BMP280(0, -1, 10);
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print "Exiting"
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
while (1):
sensor.update()
print "Compensation Temperature:", sensor.getTemperature(), "C /",
print sensor.getTemperature(True), "F"
print "Pressure: ", sensor.getPressure(), "Pa"
print "Computed Altitude:", sensor.getAltitude(), "m"
print
time.sleep(1)
|
andreivasiliu2211/upm
|
examples/python/bmp280.py
|
Python
|
mit
| 2,069
|
from django.db.backends import BaseDatabaseIntrospection
class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("SHOW TABLES")
return [row[0] for row in cursor.fetchall()]
|
ikeikeikeike/django-impala-backend
|
impala/introspection.py
|
Python
|
mit
| 310
|
import csv
import loremipsum
import random
import re
from encoded.loadxl import *
class Anonymizer(object):
"""Change email addresses and names consistently
"""
# From Colander. Not exhaustive, will not match .museum etc.
email_re = re.compile(r'(?i)[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}')
random_words = loremipsum._generator.words
def __init__(self):
self.mapped_emails = {}
self.mapped_names = {}
self.generated_emails = set()
self.generated_names = set()
def replace_emails(self, dictrows):
for row in dictrows:
for k, v in list(row.items()):
if v is None:
continue
new_value, num_subs = self.email_re.subn(
self._replace_emails, v)
row[k] = new_value
yield row
def replace_non_pi_names(self, dictrows):
for row in dictrows:
if row.get('job_title') != 'PI':
if 'first_name' in row:
row['first_name'] = random.choice(self.random_words).capitalize()
if 'last_name' in row:
row['last_name'] = self._random_name()
yield row
def _random_email(self):
for _ in range(1000):
generated = "%s.%s@%s.%s" % \
tuple(random.choice(self.random_words) for n in range(4))
if generated not in self.generated_emails:
self.generated_emails.add(generated)
return generated
raise AssertionError("Unable to find random email")
def _replace_emails(self, matchobj):
found = matchobj.group(0)
new, original = self.mapped_emails.get(found.lower(), (None, None))
if new is not None:
if found != original:
raise ValueError(
"Case mismatch for %s, %s" % (found, original))
return new
new = self._random_email()
self.mapped_emails[found.lower()] = (new, found)
return new
def _random_name(self):
for _ in range(1000):
if random.choice(range(4)):
generated = random.choice(self.random_words).capitalize()
else:
generated = "%s-%s" % \
tuple(random.choice(self.random_words).capitalize()
for n in range(2))
if generated not in self.generated_names:
self.generated_names.add(generated)
return generated
raise AssertionError("Unable to find random name")
def set_existing_key_value(**kw):
def component(dictrows):
for row in dictrows:
for k, v in kw.items():
if k in row:
row[k] = v
yield row
return component
def drop_rows_with_all_key_value(**kw):
def component(dictrows):
for row in dictrows:
if not all(row[k] == v if k in row else False for k, v in kw.items()):
yield row
return component
def extract_pipeline():
return [
skip_rows_with_all_falsey_value('test'),
skip_rows_with_all_key_value(test='skip'),
skip_rows_with_all_falsey_value('test'),
skip_rows_missing_all_keys('uuid'),
drop_rows_with_all_key_value(_skip=True),
]
def anon_pipeline():
anonymizer = Anonymizer()
return extract_pipeline() + [
set_existing_key_value(
fax='000-000-0000',
phone1='000-000-0000',
phone2='000-000-0000',
skype='skype',
google='google',
),
anonymizer.replace_emails,
anonymizer.replace_non_pi_names,
]
def run(pipeline, inpath, outpath):
for item_type in ORDER:
source = read_single_sheet(inpath, item_type)
fieldnames = [k for k in source.fieldnames if ':ignore' not in k]
with open(os.path.join(outpath, item_type + '.tsv'), 'wb') as out:
writer = csv.DictWriter(out, fieldnames, dialect='excel-tab', extrasaction='ignore')
writer.writeheader()
writer.writerows(combine(source, pipeline))
def main():
import argparse
parser = argparse.ArgumentParser(description='Extract test data set.')
parser.add_argument('--anonymize', '-a', action="store_true",
help="anonymize the data.")
parser.add_argument('inpath',
help="input zip file of excel sheets.")
parser.add_argument('outpath',
help="directory to write filtered tsv files to.")
args = parser.parse_args()
pipeline = anon_pipeline() if args.anonymize else extract_pipeline()
import pdb
import sys
import traceback
try:
run(pipeline, args.inpath, args.outpath)
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
if __name__ == '__main__':
main()
|
ENCODE-DCC/encoded
|
src/encoded/commands/extract_test_data.py
|
Python
|
mit
| 4,920
|
from cast.analysers import log, mainframe
class EmptyParagraphEndOfSection(mainframe.Extension):
def __init__(self):
self.program = None
def start_program(self, program):
self.program = program
def end_program(self, _):
self.program = None
def start_section(self, section):
last_paragraph = section.get_children()[-1]
if 'paragraph' == last_paragraph.get_kind():
children = last_paragraph.get_children()
if len(children) > 1:
# violation test_ko2
self.program.save_violation('MyCompany_COBOL_Rules.sectionEndParagraph', section.get_position())
elif len(children) == 1:
kind = children[0].get_kind()
if kind not in ['exit', 'stop_run', 'goback']:
self.program.save_violation('MyCompany_COBOL_Rules.sectionEndParagraph', section.get_position())
else:
# violation test_ko1
self.program.save_violation('MyCompany_COBOL_Rules.sectionEndParagraph', section.get_position())
|
CAST-projects/Extension-SDK
|
samples/analyzer_level/mainframe/mainframe.quality_rule/empty_paragraph_end.py
|
Python
|
mit
| 1,192
|
# -*- coding: utf-8 -*-
"""This module contains some functions for EM analysis.
"""
__author__ = 'Wenzhi Mao'
__all__ = ['genPvalue', 'calcPcutoff', 'showPcutoff', 'transCylinder',
'showMRCConnection', 'showMRCConnectionEach', 'gaussian3D']
def interpolationball(matrix, index, step, r, **kwargs):
"""Interpolation the value by the radius(ball).
The Inverse distance weighting is used to weight each value."""
from numpy import array, arange, floor, ceil
position = index * step
w = []
v = []
for i in arange(ceil(index[0] - (r / step[0])) // 1, floor(index[0] + (r / step[0])) // 1 + 1):
for j in arange(ceil(index[1] - (r / step[1])) // 1, floor(index[1] + (r / step[1])) // 1 + 1):
for k in arange(ceil(index[2] - (r / step[2])) // 1, floor(index[2] + (r / step[2])) // 1 + 1):
if (((index[0] - i) * step[0]) ** 2 + ((index[1] - j) * step[1]) ** 2 + ((index[2] - k) * step[2]) ** 2) <= r ** 2:
w.append(1.0 / ((((index[0] - i) * step[0]) ** 2 + (
(index[1] - j) * step[1]) ** 2 + ((index[2] - k) * step[2]) ** 2) ** 2) ** .5)
v.append(matrix[i, j, k])
w = array(w)
v = array(v)
w = w / w.sum()
return (w * v).sum()
def interpolationcube(m, p, way, *kwargs):
"""Interpolation the value by the smallest box.
The Inverse distance weighting or Trilinear interpolation is
used to weight each value."""
from numpy import array
if way == 'idw':
tt = array([[[0, 0], [0, 0]], [[0, 0], [0, 0]]], dtype=float)
tt[0, :, :] += p[0] ** 2
tt[1, :, :] += (1 - p[0]) ** 2
tt[:, 0, :] += p[1] ** 2
tt[:, 1, :] += (1 - p[1]) ** 2
tt[:, :, 0] += p[2] ** 2
tt[:, :, 1] += (1 - p[2]) ** 2
tt = tt ** .5
tt = 1. / tt
tt = tt / tt.sum()
elif way == 'interpolation':
tt = array([[[1, 1], [1, 1]], [[1, 1], [1, 1]]], dtype=float)
tt[0, :, :] *= 1 - p[0]
tt[1, :, :] *= p[0]
tt[:, 0, :] *= 1 - p[1]
tt[:, 1, :] *= p[1]
tt[:, :, 0] *= 1 - p[2]
tt[:, :, 1] *= p[2]
if m.shape != (2, 2, 2):
return -999999999999
else:
return (tt * m).sum()
def genPvalue(
pdb, mrc, sample=None, method=('cube', 'interpolation'), sampleradius=3.0,
assumenorm=False, **kwargs):
"""This function assign p-value for pdb structure in mrc. sample is used to get the population values.
`method` must be a tuple or list.
There are 2 methods now: `cube` and `ball`.
For the `cube` method, you should provide either ('cube','interpolation') or ('cube','idw').
`idw` stand for `Inverse distance weighting`, and it is the default option.
For the `ball` method, you should provide the radius(A) like ('ball',3).
`sample` should be a `prody.AtomGroup` or a `numpy` n*3 array or `None` to indicate all data to sample.
`assumenorm` is set to be `False` in default which will get the sample p-value. If it is set
to `True`, the p-value will be the norm-p-value.
The p-value will be set to the beta in the pdb.
"""
from ..IO.output import printError, printInfo
from .mrc import MRC as MRCclass
from ..Application.algorithm import binarySearch
from prody import AtomGroup as pdbclass
from prody.atomic.selection import Selection as selectionclass
from numpy import ndarray, zeros_like, array, floor, ceil, rint
from scipy.stats import norm
if isinstance(method, (tuple, list)):
if len(method) == 0:
printError("The method is not valid.")
return None
if method[0].lower() == 'cube':
if len(method) == 1:
way = 'idw'
method = 'cube'
elif method[1].lower() in ['interpolation', 'idw']:
way = method[1].lower()
method = 'cube'
else:
printError("The method[1] is not valid.")
printError(
"Only 'idw' or 'interpolation' supported for 'cube'.")
return None
elif method[0].lower() == 'ball':
if len(method) < 2:
printError("The radius must provide for the `ball` method.")
try:
way = float(eval(str(method[1])))
except:
printError(
"Only numbers are support as the second option for `ball`.")
return None
if way <= 0:
printError("Radius must be positive.")
return None
else:
method = 'ball'
elif isinstance(method, (str)):
if method.lower() == 'cube':
method = 'cube'
way = 'idw'
else:
printError("Only `cube` support no option format.")
return None
else:
printError("The method must be tuple or list")
return None
if not isinstance(mrc, MRCclass):
printError("Only mbio.MRC class supported for `mrc`.")
return None
if not isinstance(pdb, (pdbclass, selectionclass)):
printError("Only prody.AtomGroup class supported for `pdb`.")
return None
if type(sample) == type(None):
sample = None
elif isinstance(sample, ndarray):
if not (sample.shape == (3,) or (len(sample.shape) == 2 and sample.shape[1] == 3)):
printError("The sample coordinates must has 3 columns.")
return None
if sample.shape == (3,):
sample = array([sample])
elif isinstance(sample, pdbclass):
sample = sample.getCoords()
printInfo("Getting the sample set.")
mark = zeros_like(mrc.data)
grid = array(mrc.getGridCoords())
gridstart = array([grid[0, 0], grid[1, 0], grid[2, 0]])
step = mrc.getGridSteps()
if type(sample) == type(None):
findset = mrc.data.flatten()
else:
tempindex = array(
rint(array(((sample - grid[:, 0]) / step), dtype=float)), dtype=int)
ballindex = ([], [], [])
for i in xrange(int(floor(-sampleradius / step[0])), int(ceil(sampleradius / step[0]) + 1)):
for j in xrange(int(floor(-sampleradius / step[1])), int(ceil(sampleradius / step[1]) + 1)):
for k in xrange(int(floor(-sampleradius / step[2])), int(ceil(sampleradius / step[2]) + 1)):
if (i * step[0]) ** 2 + (j * step[1]) ** 2 + (k * step[2]) ** 2 <= sampleradius ** 2:
ballindex[0].append(i)
ballindex[1].append(j)
ballindex[2].append(k)
ballindex = [array(i, dtype=int) for i in ballindex]
k = array([[len(grid[0])], [len(grid[1])], [len(grid[2])]])
for i in xrange(len(sample)):
t = array([ballindex[0] + tempindex[i][0], ballindex[1] +
tempindex[i][1], ballindex[2] + tempindex[i][2]])
t = t[:, (t >= 0).all(0) & (t < k).all()]
mark[(t[0], t[1], t[2])] = 1
findset = mrc.data[mark != 0]
printInfo("Sorting the sample set.")
findset.sort(kind='quicksort')
findsetlength = len(findset)
if assumenorm:
mu = findset.mean()
sigma = (
((findset - findset.mean()) ** 2).sum() / (findsetlength - 1)) ** .5
else:
mu = sigma = 0.
printInfo("Interpolating the data and assigning p-value.")
beta = pdb.getBetas()
coor = pdb.getCoords()
if method == 'ball':
index = (coor - gridstart) / step
for i in xrange(len(coor)):
beta[i] = interpolationball(mrc.data, index[i], step, r=way)
if assumenorm:
beta[i] = norm.cdf(-(beta[i] - mu) / sigma)
else:
beta[i] = 1. - \
binarySearch(findset, beta[i]) * 1.0 / findsetlength
elif method == 'cube':
index = (coor - gridstart) // step
for i in xrange(len(coor)):
beta[i] = interpolationcube(mrc.data[index[i][0]:index[i][
0] + 2, index[i][1]:index[i][1] + 2, index[i][2]:index[i][2] + 2], coor[i] % array(step) / array(step), way)
if assumenorm:
beta[i] = norm.cdf(-(beta[i] - mu) / sigma)
else:
beta[i] = 1. - \
binarySearch(findset, beta[i]) * 1.0 / findsetlength
pdb.setBetas(beta)
return pdb
def chainsort(x, y):
"""Chain id sort function. A-Z then number."""
if x == y:
return cmp(0, 0)
elif x.isdigit() == y.isdigit() == True:
return cmp(float(x), float(y))
elif x.isdigit() == y.isdigit() == False:
return cmp(x, y)
elif x.isdigit():
return cmp(2, 1)
else:
return cmp(1, 2)
def calcPcutoff(data, scale=5.0, **kwargs):
"""This is a function to calculate the cutoff for high p-values.
`data` could be a `prody.AtomGroup` with p-values in the Beta, the
backbone average is calculated to perform analysis. It could also be raw
number array.
A linear regression is performed by the first half data and the sigma
is calculated.
Cutoff is set to be the first one accepted by `scale`*sigma in the
tail of data.
We suggest the cutoff is set for each chain. You need select atom and
then use this function."""
from ..IO.output import printError
from prody import AtomGroup as pdbclass
from prody.atomic.selection import Selection as selectionclass
from numpy import ndarray, array, arange
if isinstance(data, (pdbclass, selectionclass)):
data = [i for i in data.getHierView().iterResidues()]
# data.sort(cmp=lambda x,y:chainsort(x.getChid(),y.getChid()) if
# x.getChid()!=y.getChid() else cmp(x.getResnum(),y.getResnum()))
data = array([i.select('backbone').getBetas().mean() for i in data])
data.sort()
elif isinstance(data, ndarray):
data.sort()
else:
printError(
"The data format is not supported.(`prody.AtomGroup` or `numpy.ndarray`)")
return None
index = arange(len(data))
firsthalfdata = array(data[:len(data) // 2])
firsthalfindex = arange(len(firsthalfdata))
n = len(firsthalfdata)
# Regression the line
beta1 = ((firsthalfindex * firsthalfdata).sum() - n * firsthalfindex.mean() * firsthalfdata.mean()) / \
((firsthalfindex * firsthalfindex).sum() - n *
firsthalfindex.mean() * firsthalfindex.mean())
beta0 = firsthalfdata.mean() - beta1 * firsthalfindex.mean()
# Determine the RMSE
rmse = (
((firsthalfindex * beta1 + beta0 - firsthalfdata) ** 2).sum() / (n - 1)) ** .5
# Test the second half and get cutoff
tvalue = abs(index * beta1 + beta0 - data)
tbigset = (tvalue <= scale * rmse).nonzero()[0]
return data[max(tbigset)]
def showPcutoff(data, plot, scale=5.0, color=None, detail=False, **kwargs):
"""This is a function to plot the p-value cutoff.
`data` must be a `prody.AtomGroup` with p-values in the Beta, the
backbone average is calculated to perform analysis.
`color` could set to draw in specific color.
`detail` could be used to plot more detail information.
1 to plot the error bar. Provide color list.
2 to also plot sidechain information. Provide 2 colors.
A linear regression is performed by the first half data and the sigma
is calculated.
Cutoff is set to be the first one accepted by `scale`*sigma in the
tail of data.
We suggest the cutoff is set for each chain. You need select atom and
then use this function."""
from ..IO.output import printError, printInfo
from prody import AtomGroup as pdbclass
from prody.atomic.selection import Selection as selectionclass
from matplotlib.axes import Axes
from numpy import ndarray, array, arange
if isinstance(data, (pdbclass, selectionclass)):
data = [i for i in data.getHierView().iterResidues()]
data.sort(cmp=lambda x, y: chainsort(x.getChid(), y.getChid()) if x.getChid(
) != y.getChid() else cmp(x.getResnum(), y.getResnum()))
labelindex = array([i.getResnum() for i in data])
else:
printError("The data format is not supported.(`prody.AtomGroup`)")
return None
data1 = array([i.select('backbone').getBetas().mean() for i in data])
data1.sort()
index = arange(len(data1))
firsthalfdata = array(data1[:len(data1) // 2])
firsthalfindex = arange(len(firsthalfdata))
n = len(firsthalfdata)
# Regression the line
beta1 = ((firsthalfindex * firsthalfdata).sum() - n * firsthalfindex.mean() * firsthalfdata.mean()) / \
((firsthalfindex * firsthalfindex).sum() - n *
firsthalfindex.mean() * firsthalfindex.mean())
beta0 = firsthalfdata.mean() - beta1 * firsthalfindex.mean()
# Determine the RMSE
rmse = (
((firsthalfindex * beta1 + beta0 - firsthalfdata) ** 2).sum() / (n - 1)) ** .5
# Test the second half and get cutoff
tvalue = abs(index * beta1 + beta0 - data1)
tbigset = (tvalue <= scale * rmse).nonzero()[0]
cutoff = data1[max(tbigset)]
if isinstance(plot, Axes):
if detail <= 0:
if type(color) != type(None):
plot.plot(labelindex, array(
[i.select('backbone').getBetas().mean() for i in data]), '-', c=color, zorder=10)
else:
plot.plot(labelindex, array(
[i.select('backbone').getBetas().mean() for i in data]), '-', zorder=10)
plot.plot(
list(plot.get_xlim()), [cutoff, cutoff], '--', c='grey', alpha=0.8, zorder=5)
dd = array([i.select('backbone').getBetas().mean() for i in data])
for i in xrange(len(dd) - 2):
if (dd[i:i + 3] > cutoff).all():
plot.plot(
labelindex[i:i + 3], dd[i:i + 3], '.-', c='red', zorder=11)
elif dd[i] > cutoff:
plot.plot(
labelindex[i:i + 1], dd[i:i + 1], '.-', c='red', zorder=11)
x = plot.get_xlim()
y = plot.get_ylim()
if detail != -1:
plot.text(
x[1] - 0.05 * (x[1] - x[0]), y[1] - 0.05 * (y[1] - y[0]),
"cutoff=%.3f" % (cutoff), va='top', multialignment='left', ha='right')
elif detail > 0:
if detail == 2:
dd = array([i.select('not backbone').getBetas().mean()
for i in data])
yerr3 = dd - \
array([i.select('not backbone').getBetas().min()
for i in data])
yerr4 = array(
[i.select('not backbone').getBetas().max() for i in data]) - dd
if type(color) != type(None):
plot.plot(
labelindex, dd, '-', c=color[1], zorder=10, alpha=0.5)
plot.errorbar(labelindex, dd, yerr=[
yerr3, yerr4], capsize=0, elinewidth=0.2, c=color[1], zorder=1)
else:
plot.plot(labelindex, dd, '-', zorder=10, alpha=0.5)
plot.errorbar(
labelindex, dd, yerr=[yerr3, yerr4], capsize=0, elinewidth=0.1, zorder=1)
if type(color) != type(None):
plot.plot(labelindex, array(
[i.select('backbone').getBetas().mean() for i in data]), '-', c=color[0], zorder=10)
else:
plot.plot(labelindex, array(
[i.select('backbone').getBetas().mean() for i in data]), '-', zorder=10)
plot.plot(
list(plot.get_xlim()), [cutoff, cutoff], '--', c='grey', alpha=0.8, zorder=5)
dd = array([i.select('backbone').getBetas().mean() for i in data])
for i in xrange(len(dd) - 2):
if (dd[i:i + 3] > cutoff).all():
plot.plot(
labelindex[i:i + 3], dd[i:i + 3], '.-', c='red', zorder=11)
elif dd[i] > cutoff:
plot.plot(
labelindex[i:i + 1], dd[i:i + 1], '.-', c='red', zorder=11)
yerr1 = dd - \
array([i.select('backbone').getBetas().min() for i in data])
yerr2 = array([i.select('backbone').getBetas().max()
for i in data]) - dd
if type(color) != type(None):
plot.errorbar(labelindex, dd, yerr=[
yerr1, yerr2], capsize=0, elinewidth=0.2, c=color[0], zorder=2)
else:
plot.errorbar(
labelindex, dd, yerr=[yerr1, yerr2], capsize=0, elinewidth=0.2, zorder=2)
x = plot.get_xlim()
y = plot.get_ylim()
plot.text(x[1] - 0.05 * (x[1] - x[0]), y[1] - 0.05 * (y[1] - y[0]),
"cutoff=%.3f" % (cutoff), va='top', multialignment='left', ha='right')
else:
try:
if detail <= 0:
if type(color) != type(None):
plot[0].plot(labelindex, array(
[i.select('backbone').getBetas().mean() for i in data]), '-', c=color, zorder=10)
plot[1].plot(data1, '-', c=color, zorder=10)
else:
plot[0].plot(labelindex, array(
[i.select('backbone').getBetas().mean() for i in data]), '-', zorder=10)
plot[1].plot(data1, '-', zorder=10)
plot[0].plot(
list(plot[0].get_xlim()), [cutoff, cutoff], '--', c='grey', alpha=0.8, zorder=5)
dd = array([i.select('backbone').getBetas().mean()
for i in data])
for i in xrange(len(dd) - 2):
if (dd[i:i + 3] > cutoff).all():
plot[0].plot(
labelindex[i:i + 3], dd[i:i + 3], '.-', c='red', zorder=11)
elif dd[i] > cutoff:
plot[0].plot(
labelindex[i:i + 1], dd[i:i + 1], '.-', c='red', zorder=11)
plot[1].plot(
plot[1].get_xlim(), [cutoff, cutoff], '--', c='grey', alpha=0.8, zorder=5)
plot[1].set_ylim(plot[0].get_ylim())
x = plot[1].get_xlim()
y = plot[1].get_ylim()
if detail != -1:
plot[1].text(
x[1] - 0.05 * (x[1] - x[0]), y[
1] - 0.05 * (y[1] - y[0]),
"cutoff=%.3f" % (cutoff), va='top', multialignment='left', ha='right')
elif detail > 0:
if detail == 2:
dd = array(
[i.select('not backbone').getBetas().mean() for i in data])
yerr3 = dd - \
array([i.select('not backbone').getBetas().min()
for i in data])
yerr4 = array(
[i.select('not backbone').getBetas().max() for i in data]) - dd
if type(color) != type(None):
plot[0].plot(
labelindex, dd, '-', c=color[1], zorder=10, alpha=0.5)
plot[0].errorbar(labelindex, dd, yerr=[
yerr3, yerr4], capsize=0, elinewidth=0.2, c=color[1], zorder=1)
else:
plot[0].plot(labelindex, dd, '-', zorder=10, alpha=0.5)
plot[0].errorbar(
labelindex, dd, yerr=[yerr3, yerr4], capsize=0, elinewidth=0.1, zorder=1)
if type(color) != type(None):
plot[0].plot(labelindex, array(
[i.select('backbone').getBetas().mean() for i in data]), '-', c=color[0], zorder=10)
plot[1].plot(data1, '-', c=color[0], zorder=10)
else:
plot[0].plot(labelindex, array(
[i.select('backbone').getBetas().mean() for i in data]), '-', zorder=10)
plot[1].plot(data1, '-', zorder=10)
plot[0].plot(
list(plot[0].get_xlim()), [cutoff, cutoff], '--', c='grey', alpha=0.8, zorder=5)
dd = array([i.select('backbone').getBetas().mean()
for i in data])
for i in xrange(len(dd) - 2):
if (dd[i:i + 3] > cutoff).all():
plot[0].plot(
labelindex[i:i + 3], dd[i:i + 3], '.-', c='red', zorder=11)
elif dd[i] > cutoff:
plot[0].plot(
labelindex[i:i + 1], dd[i:i + 1], '.-', c='red', zorder=11)
yerr1 = dd - \
array([i.select('backbone').getBetas().min()
for i in data])
yerr2 = array([i.select('backbone').getBetas().max()
for i in data]) - dd
if type(color) != type(None):
plot[0].errorbar(labelindex, dd, yerr=[
yerr1, yerr2], capsize=0, elinewidth=0.2, c=color[0], zorder=2)
else:
plot[0].errorbar(
labelindex, dd, yerr=[yerr1, yerr2], capsize=0, elinewidth=0.2, zorder=2)
plot[1].plot(
plot[1].get_xlim(), [cutoff, cutoff], '--', c='grey', alpha=0.8, zorder=5)
plot[1].set_ylim(plot[0].get_ylim())
x = plot[1].get_xlim()
y = plot[1].get_ylim()
plot[1].text(
x[1] - 0.05 * (x[1] - x[0]), y[1] - 0.05 * (y[1] - y[0]),
"cutoff=%.3f" % (cutoff), va='top', multialignment='left', ha='right')
except:
printError(
"The plot type wrong. Must be 1 or 2 `matplotlib.axes.Axes`.")
return None
def genPvalueSample(mrc, sample=None, sampleradius=3.0, **kwargs):
"""Given the `mrc` and a sample structure, return the sample set around the sample
structure with radius `sampleradius`.
"""
from ..IO.output import printError, printInfo
from .mrc import MRC as MRCclass
from prody import AtomGroup as pdbclass
from numpy import ndarray, zeros_like, array, floor, ceil, rint
if not isinstance(mrc, MRCclass):
printError("Only mbio.MRC class supported for `mrc`.")
return None
if type(sample) == type(None):
sample = None
elif isinstance(sample, ndarray):
if not (sample.shape == (3,) or (len(sample.shape) == 2 and sample.shape[1] == 3)):
printError("The sample coordinates must has 3 columns.")
return None
if sample.shape == (3,):
sample = array([sample])
elif isinstance(sample, pdbclass):
sample = sample.getCoords()
printInfo("Getting the sample set.")
mark = zeros_like(mrc.data)
grid = array(mrc.getGridCoords())
gridstart = array([grid[0, 0], grid[1, 0], grid[2, 0]])
step = mrc.getGridSteps()
if type(sample) == type(None):
findset = mrc.data.flatten()
else:
tempindex = array(
rint(array(((sample - grid[:, 0]) / step), dtype=float)), dtype=int)
ballindex = ([], [], [])
for i in xrange(int(floor(-sampleradius / step[0])), int(ceil(sampleradius / step[0]) + 1)):
for j in xrange(int(floor(-sampleradius / step[1])), int(ceil(sampleradius / step[1]) + 1)):
for k in xrange(int(floor(-sampleradius / step[2])), int(ceil(sampleradius / step[2]) + 1)):
if (i * step[0]) ** 2 + (j * step[1]) ** 2 + (k * step[2]) ** 2 <= sampleradius ** 2:
ballindex[0].append(i)
ballindex[1].append(j)
ballindex[2].append(k)
ballindex = [array(i, dtype=int) for i in ballindex]
k = array([[len(grid[0])], [len(grid[1])], [len(grid[2])]])
for i in xrange(len(sample)):
t = array([ballindex[0] + tempindex[i][0], ballindex[1] +
tempindex[i][1], ballindex[2] + tempindex[i][2]])
t = t[:, (t >= 0).all(0) & (t < k).all()]
mark[(t[0], t[1], t[2])] = 1
findset = mrc.data[mark != 0]
printInfo("Sorting the sample set.")
findset.sort(kind='quicksort')
return findset
def transCylinder(pdb, **kwargs):
"""Transfer the PDB fragment to Cylinder.
Given the PDB, extract the CA C N and generate the center, 3 directions of the cube
and the length."""
from ..IO.output import printInfo
from numpy.linalg import svd
from numpy import cross, array, ndarray
if not isinstance(pdb, ndarray):
p1 = pdb.select("name CA C N").copy()
if p1 is None:
printError("The pdb has no CA C or N in the backbone.")
return None
if p1.numAtoms() < 15:
printError("The atom number({0}) is not enough to perform calculation.".format(
p1.numAtoms()))
printError("The result is not reliable.")
data = p1.getCoords()
else:
data = pdb
datamean = data.mean(axis=0)
uu, dd, vv = svd(data - datamean)
cent = datamean
dirc1 = vv[0]
if abs((dirc1 ** 2).sum() ** .5 - 1) > 1e-10:
raise ValueError(
"length of dirc is not 1, is {0}".format((dirc1 ** 2).sum() ** .5))
if (data[-1] - data[0]).dot(dirc1) < 0:
dirc1 = -dirc1
rank = (data - cent).dot(dirc1)
rankrange = [rank.min(), rank.max()]
dirc2 = cent - cent.dot(dirc1) * dirc1
dirc2 = dirc2 / ((dirc2 ** 2).sum() ** .5)
dirc3 = cross(dirc1, dirc2, axis=0)
dirc3 = dirc3 / ((dirc3 ** 2).sum() ** .5)
dirc = array((dirc1, dirc2, dirc3))
return cent, dirc, rankrange
def showMRCConnection(mrc, cutoff=2, **kwargs):
"""Plot 3D plot of connected parts in different color for MRC."""
from matplotlib import pyplot as plt
from matplotlib import use as matplotlibuse
import mpl_toolkits.mplot3d.axes3d as p3
from mpl_toolkits.mplot3d import Axes3D
from ..Application.setting import getMatplotlibDisplay
from ..Application.plotting import setAxesEqual
from numpy import array
try:
if not getMatplotlibDisplay():
matplotlibuse('Agg')
except:
pass
fig = plt.figure(figsize=(6, 6), facecolor='white')
ax = p3.Axes3D(fig, aspect=1)
ax.w_xaxis.set_pane_color((0, 0, 0))
ax.w_yaxis.set_pane_color((0, 0, 0))
ax.w_zaxis.set_pane_color((0, 0, 0))
ax.w_xaxis.line.set_lw(0)
ax.w_yaxis.line.set_lw(0)
ax.w_zaxis.line.set_lw(0)
classes = {}
step = mrc.getGridSteps()
cutoff = cutoff**2
for i, j, k in zip(*mrc.data.nonzero()):
if mrc.data[i, j, k] == 0:
continue
if mrc.data[i, j, k] not in classes.keys():
classes[mrc.data[i, j, k]] = [[i, j, k]]
else:
classes[mrc.data[i, j, k]].append([i, j, k])
for ty, i in zip(classes.keys(), xrange(len(classes))):
color = plt.cm.gist_ncar(i * 1. / len(classes) * .9)
pos = array(classes[ty])
ax.scatter(pos[:, 0] * step[0] + mrc.origin[0],
pos[:, 1] * step[1] + mrc.origin[1],
pos[:, 2] * step[2] + mrc.origin[2], lw=0, c=color, zorder=10)
if cutoff > 0:
for j in xrange(len(pos)):
for k in xrange(j):
if (((pos[j] - pos[k]) * step)**2).sum() <= cutoff:
ax.plot(pos[[j, k], 0] * step[0] + mrc.origin[0],
pos[[j, k], 1] * step[1] + mrc.origin[1],
pos[[j, k], 2] * step[2] + mrc.origin[2], lw=3, c=color, zorder=10)
del pos
ax.set_xlabel('X', fontsize=15)
ax.set_ylabel('Y', fontsize=15)
ax.set_zlabel('Z', fontsize=15)
setAxesEqual(ax)
del classes
del ax
# plt.ion()
# try:
# if getMatplotlibDisplay():
# plt.show()
# except:
# pass
return fig
def showMRCConnectionEach(mrc, cutoff=2, path=None, **kwargs):
"""Plot 3D plot of connected parts in different color for MRC."""
from matplotlib import pyplot as plt
from matplotlib import use as matplotlibuse
import mpl_toolkits.mplot3d.axes3d as p3
from mpl_toolkits.mplot3d import Axes3D
from ..Application.setting import getMatplotlibDisplay
from ..Application.plotting import setAxesEqual
from numpy import array
import os
try:
if not getMatplotlibDisplay():
matplotlibuse('Agg')
except:
pass
if path is None:
path = os.getcwd()
fig = plt.figure(figsize=(6, 6), facecolor='white')
ax = p3.Axes3D(fig, aspect=1)
ax.w_xaxis.set_pane_color((0, 0, 0))
ax.w_yaxis.set_pane_color((0, 0, 0))
ax.w_zaxis.set_pane_color((0, 0, 0))
ax.w_xaxis.line.set_lw(0)
ax.w_yaxis.line.set_lw(0)
ax.w_zaxis.line.set_lw(0)
classes = {}
step = mrc.getGridSteps()
grid = mrc.getGridCoords()
cutoff = cutoff**2
for i, j, k in zip(*mrc.data.nonzero()):
if mrc.data[i, j, k] == 0:
continue
if mrc.data[i, j, k] not in classes.keys():
classes[mrc.data[i, j, k]] = [[i, j, k]]
else:
classes[mrc.data[i, j, k]].append([i, j, k])
sca = ax.scatter([60, 240], [60, 240], [60, 240], lw=0, zorder=10)
plt.ion()
ax.set_xlabel('X', fontsize=15)
ax.set_ylabel('Y', fontsize=15)
ax.set_zlabel('Z', fontsize=15)
setAxesEqual(ax)
for ty, i in zip(classes.keys(), xrange(len(classes))):
color = plt.cm.gist_ncar(i * 1. / len(classes) * .9)
pos = array(classes[ty])
sca._offsets3d = pos[:, 0] * step[0] + mrc.origin[0], pos[:, 1] * \
step[1] + mrc.origin[1], pos[:, 2] * step[2] + mrc.origin[2]
sca._facecolor3d = color
del pos
plt.savefig(os.path.join(path, str(i) + '.png'))
del classes
del ax
return fig
def testfit(pos, step):
from numpy import array, diag
from numpy.linalg import svd
data = array(pos) * step
datamean = data.mean(axis=0)
uu, dd, vv = svd(data - datamean, full_matrices=False)
d = dd**2
dd[0] = 0
if (((uu.dot(diag(dd)).dot(vv))**2).sum(1)**.5 > 4).any():
return 2
elif d[0] / d.sum() > .6:
return 1
else:
return 0
# Old 3
# if (((uu.dot(diag(dd)).dot(vv))**2).sum(1)**.5<6).sum()*1./data.shape[0]>.9:
# return 1
# else:
# return 2
# Old 2
# if d[0]/d.sum() <.8:
# return 2
# else:
# return 1
# Old 1
# if len(pos)>30:
# return 2
# else:
# return 1
def mrcSegment(mrc, percentage=0.001, cutoff=3, autostop=False, **kwargs):
"""Segment the MRC with the top `percentage` points.
Only two points closer than the cutoff will be taken as connected."""
from numpy import floor, ceil, argsort, zeros, zeros_like, array, unique
from ..IO.output import printUpdateInfo, finishUpdate, printInfo
from .mrc import MRC
maxnum = int(percentage * mrc.data.size)
args = argsort(mrc.data.ravel())[:mrc.data.size - maxnum - 1:-1]
pos = zeros((maxnum, 3), dtype=int)
pos[:, 0] = args // (mrc.data.shape[1] * mrc.data.shape[2])
pos[:, 1] = args % (mrc.data.shape[1] *
mrc.data.shape[2]) // mrc.data.shape[2]
pos[:, 2] = args % (mrc.data.shape[2])
data = mrc.data.ravel()[args]
save = zeros_like(mrc.data, dtype=int)
save[[pos[:, 0], pos[:, 1], pos[:, 2]]] = -1
save1 = zeros_like(mrc.data, dtype=float)
origin = mrc.origin
grid = mrc.getGridSteps()
step = cutoff / grid
ranges = [xrange(int(floor(-step[i])), int(ceil(step[i]) +
int(step[i].is_integer()))) for i in xrange(3)]
cutoff2 = cutoff**2
classnum = 0
save1count = 0
classmp = {}
classmpreverse = {}
classcount = {}
classpos = {}
statuscount = [0, 0, 0]
for posnum in xrange(maxnum):
if posnum % 1000 == 0:
printUpdateInfo("Building {:10d}/{:10d}".format(posnum, maxnum))
temp = pos[posnum]
closeset = []
closetype = []
closenumber = 0
for i in ranges[0]:
for j in ranges[1]:
for k in ranges[2]:
if save[temp[0] + i, temp[1] + j, temp[2] + k] > 0 and (i * grid[0])**2 + (j * grid[1])**2 + (k * grid[2])**2 <= cutoff2:
closeset.append([temp + array([i, j, k])])
closetype.append(
classmp[save[temp[0] + i, temp[1] + j, temp[2] + k]])
closenumber += 1
if closenumber == 0:
classnum += 1
save[temp[0], temp[1], temp[2]] = classnum
classcount[classnum] = [1, 0]
statuscount[0] += 1
classmp[classnum] = classnum
classmpreverse[classnum] = [classnum]
classpos[classnum] = [pos[posnum]]
elif len(unique(closetype)) == 1:
typeclass = closetype[0]
save[temp[0], temp[1], temp[2]] = typeclass
orilen = classcount[typeclass][0]
classcount[typeclass][0] += 1
classpos[typeclass].append(pos[posnum])
if classcount[typeclass][1] == 0:
if classcount[typeclass][0] >= 10:
statuscount[0] -= 1
classcount[typeclass][1] = testfit(
classpos[typeclass], grid)
statuscount[classcount[typeclass][1]] += 1
elif classcount[typeclass][1] == 1:
statuscount[1] -= 1
classcount[typeclass][1] = testfit(classpos[typeclass], grid)
statuscount[classcount[typeclass][1]] += 1
if classcount[typeclass][1] == 2:
save1count += 1
tempposlist = classpos[typeclass]
for i in xrange(orilen):
save1[tempposlist[i][0], tempposlist[i]
[1], tempposlist[i][2]] = save1count
del tempposlist
else:
pass
del typeclass
else:
closetypesort = unique(closetype)
typeclass = closetypesort[0]
save[temp[0], temp[1], temp[2]] = typeclass
orilen = classcount[typeclass][0]
classcount[typeclass][0] += 1
classpos[typeclass].append(pos[posnum])
hasnocylinder = False
for i in closetypesort[1:]:
if classcount[i][1] == 2:
hasnocylinder = True
classcount[typeclass][0] += classcount[i][0]
classpos[typeclass] += classpos[i]
classmp[i] = typeclass
for j in classmpreverse[i]:
classmp[j] = typeclass
classmpreverse[typeclass] += classmpreverse[i]
classmpreverse.pop(i)
if classcount[typeclass][1] == 0:
if classcount[typeclass][0] >= 10:
statuscount[0] -= 1
classcount[typeclass][1] = testfit(
classpos[typeclass], grid) if not hasnocylinder else 2
statuscount[classcount[typeclass][1]] += 1
if classcount[typeclass][1] == 2:
for i in closetypesort[1:]:
if classcount[i][1] == 1:
save1count += 1
tempposlist = classpos[i]
for i in xrange(len(classpos[i])):
save1[tempposlist[i][0], tempposlist[i][
1], tempposlist[i][2]] = save1count
del tempposlist
elif classcount[typeclass][1] == 1:
statuscount[1] -= 1
classcount[typeclass][1] = testfit(
classpos[typeclass], grid) if not hasnocylinder else 2
statuscount[classcount[typeclass][1]] += 1
if classcount[typeclass][1] == 2:
for i in closetypesort[1:]:
if classcount[i][1] == 1:
save1count += 1
tempposlist = classpos[i]
for i in xrange(len(classpos[i])):
save1[tempposlist[i][0], tempposlist[i]
[1], tempposlist[i][2]] = save1count
del tempposlist
save1count += 1
tempposlist = classpos[typeclass]
for i in xrange(orilen):
save1[tempposlist[i][0], tempposlist[i]
[1], tempposlist[i][2]] = save1count
del tempposlist
else:
pass
for i in closetypesort[1:]:
statuscount[classcount[i][1]] -= 1
classcount.pop(i)
classpos.pop(i)
del typeclass, closetypesort
del temp, closeset, closetype, closenumber
if autostop:
if statuscount[0] == 0 and statuscount[1] == 0:
finishUpdate()
printInfo('Autostop')
break
if statuscount[2] != 0 and statuscount[1] == 0:
finishUpdate()
printInfo('Autostop')
break
for i in classcount:
if classcount[i][1] == 1:
save1count += 1
tempposlist = classpos[i]
for i in xrange(len(tempposlist)):
save1[tempposlist[i][0], tempposlist[i]
[1], tempposlist[i][2]] = save1count
del classnum, save1count, classmp, classmpreverse, classcount, classpos
finishUpdate()
mrc1 = MRC()
for i in mrc.header.__dict__:
setattr(mrc1.header, i, getattr(mrc.header, i))
mrc1.data = save1
mrc1.update()
return mrc1
def gaussian3D(matrix, sigma, *args, **kwargs):
"""Gaussian 3D filter with specific sigma. The filter is ignored after 4*sigma.
The program is written in C(OpenMP) to perform quicker calculation than `scipy.ndimage`.
The boundary condition using wrap mode in scipy.
wrap:
7 8 9|1 2 3 4 5 6 7 8 9|1 2 3
"""
from ..IO.output import printError
from numpy import zeros_like
from .Cmrc_analysis_p import Cgaussian
if ~matrix.flags.contiguous:
matrix = matrix.copy()
result = zeros_like(matrix)
result = Cgaussian(matrix=matrix, sigma=sigma, result=result)
if isinstance(result, tuple):
if result[0] is None:
printError(result[1])
else:
printError("Get wrong return from C function.")
return None
return result
|
wzmao/mbio
|
mbio/EM/analysis.py
|
Python
|
mit
| 39,422
|
# Link: https://leetcode.com/problems/longest-common-prefix/
class Solution:
# @param {string[]} strs
# @return {string}
def longestCommonPrefix(self, strs):
if not len(strs):
return ''
if len(strs) == 1:
return strs[0]
ret = []
for i in range(0, len(strs[0])):
for j in range(1, len(strs)):
if len(strs[j]) == i or strs[j][i] != strs[0][i]:
return ''.join(ret)
ret.append(strs[0][i])
return ''.join(ret)
|
ibigbug/leetcode
|
longest-common-prefix.py
|
Python
|
mit
| 542
|
from __future__ import absolute_import
import numpy as np
from keras import backend as K
from .utils import utils
def negate(grads):
"""Negates the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The negated gradients.
"""
return -grads
def absolute(grads):
"""Computes absolute gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The absolute gradients.
"""
return np.abs(grads)
def invert(grads):
"""Inverts the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The inverted gradients.
"""
return 1. / (grads + K.epsilon())
def relu(grads):
"""Clips negative gradient values.
Args:
grads: A numpy array of grads to use.
Returns:
The rectified gradients.
"""
grads[grads < 0.] = 0.
return grads
def small_values(grads):
"""Can be used to highlight small gradient values.
Args:
grads: A numpy array of grads to use.
Returns:
The modified gradients that highlight small values.
"""
return absolute(invert(grads))
def get(identifier):
return utils.get_identifier(identifier, globals(), __name__)
|
raghakot/keras-vis
|
vis/grad_modifiers.py
|
Python
|
mit
| 1,247
|
import parser
import logging
def test(code):
log = logging.getLogger()
parser.parser.parse(code, tracking=True)
print "Programa con 1 var y 1 asignacion bien: "
s = "program id; var beto: int; { id = 1234; }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con 1 var mal: "
s = "program ; var beto: int; { id = 1234; }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa sin vars bien: "
s = "program id; { id = 1234; }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con var mal: "
s = "program id; var beto int; { id = 1234; }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con var mal: "
s = "program id; var beto: int { id = 1234; }"
test(s);
print "Original: \n{0}".format(s)
print "\n"
print "Programa con var mal: "
s = "program id; beto: int; { id = 1234; }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con bloque vacio bien: "
s = "program id; var beto: int; { }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con bloque lleno y estatuto mal: "
s = "program id; var beto: int; { id = 1234; id2 = 12345 }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con bloque lleno y condicion mal: "
s = "program id; var beto: int; { id = 1234; if ( 8 > 3 ) { id3 = 34234; } else { } }"
test(s)
print "\n"
print "Original: \n{0}".format(s)
|
betoesquivel/PLYpractice
|
testingParser.py
|
Python
|
mit
| 1,412
|
# -*- coding: utf-8 -*-
#
# CoderDojo Twin Cities Python for Minecraft documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 24 00:52:04 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'CoderDojo Twin Cities Python for Minecraft'
copyright = u'by multiple <a href="https://github.com/CoderDojoTC/python-minecraft/graphs/contributors">contributors</a>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CoderDojoTwinCitiesPythonforMinecraftdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'CoderDojoTwinCitiesPythonforMinecraft.tex', u'CoderDojo Twin Cities Python for Minecraft Documentation',
u'Mike McCallister', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'coderdojotwincitiespythonforminecraft', u'CoderDojo Twin Cities Python for Minecraft Documentation',
[u'Mike McCallister'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CoderDojoTwinCitiesPythonforMinecraft', u'CoderDojo Twin Cities Python for Minecraft Documentation',
u'Mike McCallister', 'CoderDojoTwinCitiesPythonforMinecraft', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
CoderDojoTC/python-minecraft
|
docs/conf.py
|
Python
|
mit
| 8,611
|
# -*- coding: utf-8 -*-
import unittest
from wechatpy.constants import WeChatErrorCode
class WeChatErrorCodeTestCase(unittest.TestCase):
"""ensure python compatibility"""
def test_error_code(self):
self.assertEqual(-1000, WeChatErrorCode.SYSTEM_ERROR.value)
self.assertEqual(42001, WeChatErrorCode.EXPIRED_ACCESS_TOKEN.value)
self.assertEqual(48001, WeChatErrorCode.UNAUTHORIZED_API.value)
def test_enum(self):
self.assertEqual(WeChatErrorCode.SYSTEM_BUSY, WeChatErrorCode(-1))
|
wechatpy/wechatpy
|
tests/test_constants.py
|
Python
|
mit
| 527
|
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
flags = pd.read_csv("../dataviz/datasets/countries.csv").filter_rows("organisations >> un").split_columns('country', "|").split_rows('country').set_index('country').drop_duplicates(subset='flag', keep='first')
def flag_image(c):
return Image.from_url_with_cache(flags['flag'][c]).convert("RGBA").remove_transparency("white").convert("RGB")
def average_image(imgs, size, weights=None):
if weights is None: weights = [1 for _ in imgs]
average = ImageColor.from_linear(sum(ImageColor.to_linear(np.array(img.resize(size))) * w for img,w in zip(imgs, weights)) / sum(weights))
return Image.fromarray(np.uint8(average))
def average_flag(df, size, weights=None):
if callable(weights): weights = weights(df)
flags = [flag_image(i) for i in df.index]
return average_image(flags, (size[0]-2,size[1]-2), weights).pad(1, "black")
continents = flags.groupby("continent").count().index
continentlabels = [ Image.from_text(continent.upper(), calibri(60, bold=True), "black", "white") for continent in continents ]
world = average_flag(flags, (1200,800))
world_weighted = average_flag(flags, (1200,800), lambda df: df.population)
continent = Image.from_array([continentlabels, [average_flag(flags[flags.continent == continent], (600, 400)) for continent in continents]], padding=5, bg="white")
continent_weighted = Image.from_array([continentlabels, [average_flag(flags[flags.continent == continent], (600, 400), lambda df: df.population) for continent in continents]], padding=5, bg="white")
os.makedirs("output/averageflags", exist_ok=True)
world.save("output/averageflags/world.png")
world_weighted.save("output/averageflags/world_weighted.png")
continent.save("output/averageflags/continents.png")
continent_weighted.save("output/averageflags/continents_weighted.png")
# quick and dirty scrape of some area data: will add to the country dataset at some point
df = pd.read_html("https://en.wikipedia.org/wiki/List_of_countries_and_dependencies_by_area")[0]
df = df.rename(columns=df.iloc[0])[1:].fillna("0")
df = df.assign_rows(country=lambda d: d[next(c for c in df.columns if "state" in c)].split(" (")[0].split(" !")[-1].strip(" \xa0"),
area=lambda d: d[next(c for c in df.columns if "Total" in c)].split(" (")[0].split(chr(9824))[1].replace(",","").replace("<","")).set_index("country")
flags = flags.assign_rows(area=lambda d,c: df["area"][c]).apply(pd.to_numeric, errors='ignore')
world_area = average_flag(flags, (1200,800), lambda df: df.area)
world_area.save("output/averageflags/world_area.png")
world_density = average_flag(flags, (1200,800), lambda df: df.population / df.area)
world_density.save("output/averageflags/world_density.png")
continent_area = Image.from_array([continentlabels, [average_flag(flags[flags.continent == continent], (600, 400), lambda df: df.area) for continent in continents]], padding=5, bg="white")
continent_area.save("output/averageflags/continents_area.png")
continent_density = Image.from_array([continentlabels, [average_flag(flags[flags.continent == continent], (600, 400), lambda df: df.population / df.area) for continent in continents]], padding=5, bg="white")
continent_density.save("output/averageflags/continents_density.png")
|
Udzu/pudzu
|
photosets/averageflags.py
|
Python
|
mit
| 3,300
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from rcr.robots.dagucar.DaguCar import DaguCar
def main():
car = DaguCar( "/dev/rfcomm1", 500 )
car.MoveForward( 15 )
car.Pause( 1000 )
car.MoveBackward( 15 )
car.Pause( 1000 )
car.MoveLeft( 15 )
car.Pause( 1000 )
car.MoveRight( 15 )
car.Pause( 1000 )
car.MoveForwardLeft( 15 )
car.Pause( 1000 )
car.MoveForwardRight( 15 )
car.Pause( 1000 )
car.MoveBackwardLeft( 15 )
car.Pause( 1000 )
car.MoveBackwardRight( 15 )
car.Pause( 1000 )
car.Stop()
car.Close()
###
main()
|
titos-carrasco/DaguCar
|
Python/TestDaguCar.py
|
Python
|
mit
| 591
|
from django.conf.urls import url
from cats.views.cat import (
CatList,
CatDetail
)
from cats.views.breed import (
BreedList,
BreedDetail
)
urlpatterns = [
# Cats URL's
url(r'^cats/$', CatList.as_view(), name='list'),
url(r'^cats/(?P<pk>\d+)/$', CatDetail.as_view(), name='detail'),
# Breeds URL's
url(r'^breeds/$', BreedList.as_view(), name='list_breeds'),
url(r'^breeds/(?P<pk>\d+)/$', BreedDetail.as_view(), name='detail_breed'),
]
|
OscaRoa/api-cats
|
cats/urls.py
|
Python
|
mit
| 475
|
import os
import sys
import yaml
from etllib.conf import Conf
from etllib.yaml_helper import YAMLHelper
from plugins import PluginEngine
class RulesEngine(list):
def __init__(self):
self.rules_path = os.path.dirname(os.path.realpath(__file__))
self.conf = Conf()
self.load()
self.filter_recursion()
self.pe = PluginEngine()
def parse_rule_file(self, file_path):
yaml_data = YAMLHelper(file_path).read()
yaml_data['rule_name'] = os.path.split(file_path)[1]
if yaml_data['rule_type'] == 'group':
# Group Rule, i.e. with child rules
pass
else:
# Single Rule, i.e. with no child rules
# Get Data Nodes parameters from Config file
src = yaml_data['source_node']
dst = yaml_data['destination_node']
yaml_data['source_node'] = self.conf.get_data_nodes(src)
yaml_data['destination_node'] = self.conf.get_data_nodes(dst)
return yaml_data
def load(self):
rule_files = [os.path.join(self.rules_path, f)
for f in os.listdir(self.rules_path)
if os.path.isfile(os.path.join(self.rules_path, f))
and f.endswith('.yml')
]
for rule_file in rule_files:
self.append(self.parse_rule_file(rule_file))
def filter_recursion(self):
# Filter out group rules with members of type groups
for rule in self:
if rule['rule_type'] == 'group':
rule_members = [
child for child in rule['members']
if self.get_rule_by_name(child)['rule_type'] == 'single'
]
rule['members'] = rule_members
def get_rule_by_name(self, rule_name):
for rule in self:
if rule['rule_name'] == rule_name:
return rule
#print 'rule not found'
def expand_action(self, action):
if isinstance(action, str):
if action.startswith('$rule:'):
_, subrule_name, subrule_field = action.strip().split(':')
subrule = self.get_rule_by_name(subrule_name)
return self.apply_rule_ingress(subrule)[subrule_field]
else:
return action
elif isinstance(action, dict):
for key, val in action.iteritems():
action[key] = self.expand_action(val)
return action
else:
return action
def apply_rule_ingress(self, rule):
ingress_plugin_name = rule['ingress_plugin']
ingress_plugin_runnable = self.pe[ingress_plugin_name].init(rule)
data = ingress_plugin_runnable.run(rule, None)
ingress_plugin_runnable.exit()
return data
def apply_rule_egress(self, rule, data):
egress_plugin_name = rule['egress_plugin']
egress_plugin_runnable = self.pe[egress_plugin_name].init(rule)
egress_plugin_runnable.run(rule, data)
egress_plugin_runnable.exit()
def apply_data_processors(self, rule, data):
if not rule.get('data_processors', False):
return data
if type(rule['data_processors']) is str:
data_processors = [rule['data_processors']]
else:
data_processors = rule['data_processors']
for processor_plugin_name in data_processors:
processor_plugin_runnable = self.pe[processor_plugin_name].init(rule)
data = processor_plugin_runnable.run(rule, data)
processor_plugin_runnable.exit()
return data
def apply_rule(self, rule):
print 'Applying {0}'.format(rule['rule_name'])
if rule['rule_type'] == 'single':
rule['action'] = self.expand_action(rule['action'])
data = self.apply_rule_ingress(rule)
data = self.apply_data_processors(rule, data)
self.apply_rule_egress(rule, data)
else:
for child_rule_name in rule['members']:
self.apply_rule_by_name(child_rule_name)
def apply_rule_by_name(self, rule_name):
for rule in self:
if rule['rule_name'] == rule_name:
self.apply_rule(rule)
break
else:
sys.exit('Error! Rule not found')
def apply_rules(self):
for rule in self:
if rule['active']:
self.apply_rule(rule)
|
gr33ndata/rivellino
|
ruleset/__init__.py
|
Python
|
mit
| 4,470
|
import py
from rpython.rlib.signature import signature, finishsigs, FieldSpec, ClassSpec
from rpython.rlib import types
from rpython.annotator import model
from rpython.rtyper.llannotation import SomePtr
from rpython.annotator.signature import SignatureError
from rpython.translator.translator import TranslationContext, graphof
from rpython.rtyper.lltypesystem import rstr
from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy
def annotate_at(f, policy=None):
t = TranslationContext()
t.config.translation.check_str_without_nul = True
a = t.buildannotator(policy=policy)
a.annotate_helper(f, [model.s_ImpossibleValue]*f.func_code.co_argcount, policy=policy)
return a
def sigof(a, f):
# returns [param1, param2, ..., ret]
g = graphof(a.translator, f)
return [a.binding(v) for v in g.startblock.inputargs] + [a.binding(g.getreturnvar())]
def getsig(f, policy=None):
a = annotate_at(f, policy=policy)
return sigof(a, f)
def check_annotator_fails(caller):
exc = py.test.raises(model.AnnotatorError, annotate_at, caller).value
assert caller.func_name in str(exc)
def test_bookkeeping():
@signature('x', 'y', returns='z')
def f(a, b):
return a + len(b)
f.foo = 'foo'
assert f._signature_ == (('x', 'y'), 'z')
assert f.func_name == 'f'
assert f.foo == 'foo'
assert f(1, 'hello') == 6
def test_basic():
@signature(types.int(), types.str(), returns=types.char())
def f(a, b):
return b[a]
assert getsig(f) == [model.SomeInteger(), model.SomeString(), model.SomeChar()]
def test_arg_errors():
@signature(types.int(), types.str(), returns=types.int())
def f(a, b):
return a + len(b)
@check_annotator_fails
def ok_for_body(): # would give no error without signature
f(2.0, 'b')
@check_annotator_fails
def bad_for_body(): # would give error inside 'f' body, instead errors at call
f('a', 'b')
def test_return():
@signature(returns=types.str())
def f():
return 'a'
assert getsig(f) == [model.SomeString()]
@signature(types.str(), returns=types.str())
def f(x):
return x
def g():
return f('a')
a = annotate_at(g)
assert sigof(a, f) == [model.SomeString(), model.SomeString()]
def test_return_errors():
@check_annotator_fails
@signature(returns=types.int())
def int_not_char():
return 'a'
@check_annotator_fails
@signature(types.str(), returns=types.int())
def str_to_int(s):
return s
@signature(returns=types.str())
def str_not_None():
return None
@check_annotator_fails
def caller_of_str_not_None():
return str_not_None()
@py.test.mark.xfail
def test_return_errors_xfail():
@check_annotator_fails
@signature(returns=types.str())
def str_not_None():
return None
def test_none():
@signature(returns=types.none())
def f():
pass
assert getsig(f) == [model.s_None]
def test_float():
@signature(types.longfloat(), types.singlefloat(), returns=types.float())
def f(a, b):
return 3.0
assert getsig(f) == [model.SomeLongFloat(), model.SomeSingleFloat(), model.SomeFloat()]
def test_unicode():
@signature(types.unicode(), returns=types.int())
def f(u):
return len(u)
assert getsig(f) == [model.SomeUnicodeString(), model.SomeInteger()]
def test_str0():
@signature(types.unicode0(), returns=types.str0())
def f(u):
return 'str'
assert getsig(f) == [model.SomeUnicodeString(no_nul=True),
model.SomeString(no_nul=True)]
def test_ptr():
policy = LowLevelAnnotatorPolicy()
@signature(types.ptr(rstr.STR), returns=types.none())
def f(buf):
pass
argtype = getsig(f, policy=policy)[0]
assert isinstance(argtype, SomePtr)
assert argtype.ll_ptrtype.TO == rstr.STR
def g():
f(rstr.mallocstr(10))
getsig(g, policy=policy)
def test_list():
@signature(types.list(types.int()), returns=types.int())
def f(a):
return len(a)
argtype = getsig(f)[0]
assert isinstance(argtype, model.SomeList)
item = argtype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == True
@check_annotator_fails
def ok_for_body():
f(['a'])
@check_annotator_fails
def bad_for_body():
f('a')
@signature(returns=types.list(types.char()))
def ff():
return ['a']
@check_annotator_fails
def mutate_broader():
ff()[0] = 'abc'
@check_annotator_fails
def mutate_unrelated():
ff()[0] = 1
@check_annotator_fails
@signature(types.list(types.char()), returns=types.int())
def mutate_in_body(l):
l[0] = 'abc'
return len(l)
def can_append():
l = ff()
l.append('b')
getsig(can_append)
def test_array():
@signature(returns=types.array(types.int()))
def f():
return [1]
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeList)
item = rettype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == False
def try_append():
l = f()
l.append(2)
check_annotator_fails(try_append)
def test_dict():
@signature(returns=types.dict(types.str(), types.int()))
def f():
return {'a': 1, 'b': 2}
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeDict)
assert rettype.dictdef.dictkey.s_value == model.SomeString()
assert rettype.dictdef.dictvalue.s_value == model.SomeInteger()
def test_instance():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3), returns=types.instance(C2))
def f(x):
assert isinstance(x, C2)
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
@check_annotator_fails
def ok_for_body():
f(None)
def test_instance_or_none():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3, can_be_None=True), returns=types.instance(C2, can_be_None=True))
def f(x):
assert isinstance(x, C2) or x is None
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert argtype.can_be_None
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
assert rettype.can_be_None
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
def test_self():
@finishsigs
class C(object):
@signature(types.self(), types.self(), returns=types.none())
def f(self, other):
pass
class D1(C):
pass
class D2(C):
pass
def g():
D1().f(D2())
a = annotate_at(g)
argtype = sigof(a, C.__dict__['f'])[0]
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C
def test_self_error():
class C(object):
@signature(types.self(), returns=types.none())
def incomplete_sig_meth(self):
pass
exc = py.test.raises(SignatureError, annotate_at, C.incomplete_sig_meth).value
assert 'incomplete_sig_meth' in str(exc)
assert 'finishsigs' in str(exc)
def test_any_as_argument():
@signature(types.any(), types.int(), returns=types.float())
def f(x, y):
return x + y
@signature(types.int(), returns=types.float())
def g(x):
return f(x, x)
sig = getsig(g)
assert sig == [model.SomeInteger(), model.SomeFloat()]
@signature(types.float(), returns=types.float())
def g(x):
return f(x, 4)
sig = getsig(g)
assert sig == [model.SomeFloat(), model.SomeFloat()]
@signature(types.str(), returns=types.int())
def cannot_add_string(x):
return f(x, 2)
exc = py.test.raises(model.AnnotatorError, annotate_at, cannot_add_string).value
assert 'Blocked block' in str(exc)
def test_return_any():
@signature(types.int(), returns=types.any())
def f(x):
return x
sig = getsig(f)
assert sig == [model.SomeInteger(), model.SomeInteger()]
@signature(types.str(), returns=types.any())
def cannot_add_string(x):
return f(3) + x
exc = py.test.raises(model.AnnotatorError, annotate_at, cannot_add_string).value
assert 'Blocked block' in str(exc)
assert 'cannot_add_string' in str(exc)
@py.test.mark.xfail
def test_class_basic():
class C(object):
_fields_ = ClassSpec({'x': FieldSpec(types.int)})
def wrong_type():
c = C()
c.x = 'a'
check_annotator_fails(wrong_type)
def bad_field():
c = C()
c.y = 3
check_annotator_fails(bad_field)
@py.test.mark.xfail
def test_class_shorthand():
class C1(object):
_fields_ = {'x': FieldSpec(types.int)}
def wrong_type_1():
c = C1()
c.x = 'a'
check_annotator_fails(wrong_type_1)
class C2(object):
_fields_ = ClassSpec({'x': types.int})
def wrong_type_2():
c = C2()
c.x = 'a'
check_annotator_fails(wrong_type_1)
@py.test.mark.xfail
def test_class_inherit():
class C(object):
_fields_ = ClassSpec({'x': FieldSpec(types.int)})
class C1(object):
_fields_ = ClassSpec({'y': FieldSpec(types.int)})
class C2(object):
_fields_ = ClassSpec({'y': FieldSpec(types.int)}, inherit=True)
def no_inherit():
c = C1()
c.x = 3
check_annotator_fails(no_inherit)
def good():
c = C2()
c.x = 3
annotate_at(good)
def wrong_type():
c = C2()
c.x = 'a'
check_annotator_fails(wrong_type)
|
oblique-labs/pyVM
|
rpython/rlib/test/test_signature.py
|
Python
|
mit
| 10,192
|
from fabric.api import task, local, run
from fabric.context_managers import lcd
import settings
@task(default=True)
def build():
"""
(Default) Build Sphinx HTML documentation
"""
with lcd('docs'):
local('make html')
@task()
def deploy():
"""
Upload docs to server
"""
build()
destination = '/usr/share/nginx/localhost/mysite/docs/build/html'
if settings.environment == 'vagrant':
local("rsync -avz --rsync-path='sudo rsync' -e 'ssh -p 2222 -i .vagrant/machines/web/virtualbox/private_key -o StrictHostKeyChecking=no' docs/build/html/ %s@%s:%s " % ('vagrant', 'localhost', destination))
elif settings.environment == 'ci':
local("rsync -avz --rsync-path='sudo rsync' -e 'ssh -p 2222 -i /var/go/id_rsa_web -o StrictHostKeyChecking=no' docs/build/html/ %s@%s:%s " % ('vagrant', '192.168.10.10', destination))
|
brady-vitrano/full-stack-django-kit
|
fabfile/docs.py
|
Python
|
mit
| 877
|
import threading
import upnp
import nupnp
class DiscoveryThread(threading.Thread):
def __init__(self, bridges):
super(DiscoveryThread, self).__init__()
self.bridges = bridges
self.upnp_thread = upnp.UPnPDiscoveryThread(self.bridges)
self.nupnp_thread = nupnp.NUPnPDiscoveryThread(self.bridges)
def run(self):
self.upnp_thread.start()
self.nupnp_thread.start()
self.upnp_thread.join()
self.nupnp_thread.join()
def discover():
bridges = set()
discovery_thread = DiscoveryThread(bridges)
discovery_thread.start()
discovery_thread.join()
return bridges
|
mpolednik/reddit-button-hue
|
app/discovery/bridges.py
|
Python
|
mit
| 649
|
"""
This module is responsible for doing all the authentication.
Adapted from the Google API Documentation.
"""
from __future__ import print_function
import os
import httplib2
import apiclient
import oauth2client
try:
import argparse
flags = argparse.ArgumentParser(
parents=[oauth2client.tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'client_secret.json'
# Enter your project name here!!
APPLICATION_NAME = 'API Project'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.gdrive-credentials-cache')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gdrive-credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = oauth2client.client.flow_from_clientsecrets(
CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = oauth2client.tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = oauth2client.tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
file_service = apiclient.discovery.build('drive', 'v3', http=http).files()
|
Anmol-Singh-Jaggi/gDrive-auto-sync
|
gDrive-auto-sync/api_boilerplate.py
|
Python
|
mit
| 1,850
|
import numpy as np
import pandas as pd
from ElectionsTools.Seats_assignation import DHondt_assignation
from previous_elections_spain_parser import *
import os
pathfiles = '../data/spain_previous_elections_results/provincia/'
pathfiles = '/'.join(os.path.realpath(__file__).split('/')[:-1]+[pathfiles])
fles = [pathfiles+'PROV_02_197706_1.xlsx',
pathfiles+'PROV_02_197903_1.xlsx',
pathfiles+'PROV_02_198210_1.xlsx',
pathfiles+'PROV_02_198606_1.xlsx',
pathfiles+'PROV_02_198910_1.xlsx',
pathfiles+'PROV_02_199306_1.xlsx',
pathfiles+'PROV_02_199603_1.xlsx',
pathfiles+'PROV_02_200003_1.xlsx',
pathfiles+'PROV_02_200403_1.xlsx',
pathfiles+'PROV_02_200803_1.xlsx',
pathfiles+'PROV_02_201111_1.xlsx']
years = [1977, 1979, 1982, 1986, 1989, 1993, 1996, 2000, 2004, 2008, 2011]
def compute_diputes_DHont(filename):
## 1. Parse
circ, parties, votes, diputes = parse_data_elecciones_esp(filename)
circ_com, votes_com, dips_com = collapse_by_col(circ, votes, diputes, 0)
circ_sp, votes_sp, dips_sp = collapse_by_col(circ, votes, diputes, None)
votes_sp = votes_sp.reshape(1,len(parties))
## 2. Assignation objects
assign = DHondt_assignation(diputes.sum(1))
assign1 = DHondt_assignation(dips_com.sum(1))
assign2 = DHondt_assignation(np.array([dips_sp.sum(0)]))
## 3. Compute assignations
d, price = assign.assignation(pd.DataFrame(votes, columns=parties))
d1, price1 = assign1.assignation(pd.DataFrame(votes_com, columns=parties))
d2, price2 = assign2.assignation(pd.DataFrame(votes_sp, columns=parties))
return d, d1, d2, parties
def prepare2export(d, d1, d2, parties):
logi = np.logical_or(np.logical_or(d.sum(0)>0, d1.sum(0)>0), d2.sum(0)>0)
parties = [parties[i] for i in np.where(logi)[0]]
d, d1, d2 = d[:, logi].sum(0), d1[:, logi].sum(0), d2[:, logi].sum(0)
return d, d1, d2, parties
def compute_all_year(year):
filename = fles[years.index(year)]
d, d1, d2, parties = compute_diputes_DHont(filename)
exp_d, exp_d1, exp_d2, exp_parties = prepare2export(d, d1, d2, parties)
return exp_d, exp_d1, exp_d2, exp_parties
def compute_table_all_years(year):
d1, d2, d3, cols = compute_all_year(year)
d1, d2, d3 = pd.DataFrame(d1), pd.DataFrame(d2), pd.DataFrame(d3)
ind = ['Dhont_estado', 'Dhont_comunidad', 'Dhont_provincia']
exp = pd.concat([d1.T, d2.T, d3.T], axis=0)
exp.columns = cols
exp.index = ind
return exp
|
tgquintela/ElectionsTools
|
ElectionsTools/cases/previous_elections_spain_analysis.py
|
Python
|
mit
| 2,527
|
from django.utils.translation import ugettext_lazy as _
# Legend Position
def get_legend_class(position):
return 'legend-' + str(position)
class LEGEND_POSITIONS:
BOTTOM = _('bottom')
TOP = _('top')
LEFT = _('left')
RIGHT = _('right')
get_choices = ((get_legend_class(BOTTOM), BOTTOM),
(get_legend_class(TOP), TOP),
(get_legend_class(LEFT), LEFT),
(get_legend_class(RIGHT), RIGHT),)
def get_chart_position_class(position):
return 'chart-' + str(position)
class CHART_POSITIONS:
CENTER = _('center')
LEFT = _('left')
RIGHT = _('right')
get_choices = ((get_chart_position_class(CENTER), CENTER),
(get_chart_position_class(LEFT), LEFT),
(get_chart_position_class(RIGHT), RIGHT),)
|
mcldev/DjangoCMS_Charts
|
djangocms_charts/base/consts.py
|
Python
|
mit
| 824
|
from enum import Enum
from typing import List, Union
import logging
import math
try:
from flask_babel import _
except ModuleNotFoundError:
pass
class VehicleType(Enum):
CAR = 1
TRUCK_UPTO_4 = 2
PICKUP_UPTO_4 = 3
TRUCK_4_TO_10 = 4
TRUCK_12_TO_16 = 5
TRUCK_16_TO_34 = 6
TRUCK_ABOVE_34 = 7
MOTORCYCLE_UPTO_50 = 8
MOTORCYCLE_50_TO_250 = 9
MOTORCYCLE_250_TO_500 = 10
BUS = 11
TAXI = 12
WORK = 13
TRACTOR = 14
BIKE = 15
TRAIN = 16
OTHER_AND_UNKNOWN = 17
MINIBUS = 18
MOTORCYCLE_ABOVE_500 = 19
ELECTRIC_SCOOTER = 21
MOBILITY_SCOOTER = 22
ELECTRIC_BIKE = 23
TRUCK_3_5_TO_10 = 24
TRUCK_10_TO_12 = 25
def get_categories(self) -> List[int]:
res = []
for t in list(VehicleCategory):
if self in t.value:
res.append(t)
return res
def get_english_display_name(self):
english_vehicle_type_display_names = {
VehicleType.CAR: "private car",
VehicleType.TRUCK_UPTO_4: "truck upto 4 tons",
VehicleType.PICKUP_UPTO_4: "pickup upto 4 tons",
VehicleType.TRUCK_4_TO_10: "truck 4 to 10 tons",
VehicleType.TRUCK_12_TO_16: "truck 12 to 16 tons",
VehicleType.TRUCK_16_TO_34: "truck 16 to 34 tons",
VehicleType.TRUCK_ABOVE_34: "truck above 34 tons",
VehicleType.MOTORCYCLE_UPTO_50: "motorcycle upto 50 cc",
VehicleType.MOTORCYCLE_50_TO_250: "motorcycle 50 to 250 cc",
VehicleType.MOTORCYCLE_250_TO_500: "motorcycle 250 to 500 cc",
VehicleType.BUS: "bus",
VehicleType.TAXI: "taxi",
VehicleType.WORK: "work vehicle",
VehicleType.TRACTOR: "tractor",
VehicleType.BIKE: "bike",
VehicleType.TRAIN: "train",
VehicleType.OTHER_AND_UNKNOWN: "other and unknown",
VehicleType.MINIBUS: "minibus",
VehicleType.MOTORCYCLE_ABOVE_500: "motorcycle above 500 cc",
VehicleType.ELECTRIC_SCOOTER: "electric scooter",
VehicleType.MOBILITY_SCOOTER: "mobility scooter",
VehicleType.ELECTRIC_BIKE: "electric bike",
VehicleType.TRUCK_3_5_TO_10: "truck 3.5 to 10 tons",
VehicleType.TRUCK_10_TO_12: "truck 10 to 12 tons",
}
try:
return english_vehicle_type_display_names[self]
except (KeyError, TypeError):
logging.exception(f"VehicleType.get_display_name: {self}: no display string defined")
return "no display name defined"
@staticmethod
def to_type_code(db_val: Union[float, int]) -> int:
"""Values read from DB may arrive as float, and empty values come as nan"""
if isinstance(db_val, float):
if math.isnan(db_val):
return VehicleType.OTHER_AND_UNKNOWN.value
else:
return int(db_val)
elif isinstance(db_val, int):
return db_val
else:
logging.error(
f"VehicleType.fo_type_code: unknown value: {db_val}({type(db_val)})"
". returning OTHER_AND_UNKNOWN"
)
return VehicleType.OTHER_AND_UNKNOWN.value
VT = VehicleType
class VehicleCategory(Enum):
PROFESSIONAL_DRIVER = 1
PRIVATE_DRIVER = 2
LIGHT_ELECTRIC = 3
CAR = 4
LARGE = 5
MOTORCYCLE = 6
BICYCLE_AND_SMALL_MOTOR = 7
OTHER = 8
def get_codes(self) -> List[int]:
"""returns VehicleType codes of category"""
category_vehicle_types = {
VehicleCategory.PROFESSIONAL_DRIVER: [
VehicleType.TRUCK_UPTO_4,
VehicleType.PICKUP_UPTO_4,
VehicleType.TRUCK_4_TO_10,
VehicleType.TRUCK_12_TO_16,
VehicleType.TRUCK_16_TO_34,
VehicleType.TRUCK_ABOVE_34,
VehicleType.BUS,
VehicleType.TAXI,
VehicleType.WORK,
VehicleType.TRACTOR,
VehicleType.MINIBUS,
VehicleType.TRUCK_3_5_TO_10,
VehicleType.TRUCK_10_TO_12,
],
VehicleCategory.PRIVATE_DRIVER: [
VehicleType.CAR,
VehicleType.MOTORCYCLE_UPTO_50,
VehicleType.MOTORCYCLE_50_TO_250,
VehicleType.MOTORCYCLE_250_TO_500,
VehicleType.MOTORCYCLE_ABOVE_500,
],
VehicleCategory.LIGHT_ELECTRIC: [
VehicleType.ELECTRIC_SCOOTER,
VehicleType.MOBILITY_SCOOTER,
VehicleType.ELECTRIC_BIKE,
],
VehicleCategory.CAR: [VehicleType.CAR, VehicleType.TAXI],
VehicleCategory.LARGE: [
VehicleType.TRUCK_UPTO_4,
VehicleType.PICKUP_UPTO_4,
VehicleType.TRUCK_4_TO_10,
VehicleType.TRUCK_12_TO_16,
VehicleType.TRUCK_16_TO_34,
VehicleType.TRUCK_ABOVE_34,
VehicleType.BUS,
VehicleType.WORK,
VehicleType.TRACTOR,
VehicleType.MINIBUS,
VehicleType.TRUCK_3_5_TO_10,
VehicleType.TRUCK_10_TO_12,
],
VehicleCategory.MOTORCYCLE: [
VehicleType.MOTORCYCLE_UPTO_50,
VehicleType.MOTORCYCLE_50_TO_250,
VehicleType.MOTORCYCLE_250_TO_500,
VehicleType.MOTORCYCLE_ABOVE_500,
],
VehicleCategory.BICYCLE_AND_SMALL_MOTOR: [
VehicleType.BIKE,
VehicleType.ELECTRIC_SCOOTER,
VehicleType.ELECTRIC_BIKE,
],
VehicleCategory.OTHER: [
VehicleType.BIKE,
VehicleType.TRAIN,
VehicleType.OTHER_AND_UNKNOWN,
],
}
return list(map(lambda x: x.value, category_vehicle_types[self]))
def contains(self, vt_code: int) -> bool:
# noinspection PyTypeChecker
if not isinstance(int, vt_code):
logging.warning(f"VehicleCategory.contains: {vt_code}:{type(vt_code)}: not int")
return False
return vt_code in self.get_codes()
def get_english_display_name(self):
english_vehicle_type_display_names = {
VehicleCategory.PROFESSIONAL_DRIVER: "professional driver",
VehicleCategory.PRIVATE_DRIVER: "private driver",
VehicleCategory.LIGHT_ELECTRIC: "light electric vehicles",
VehicleCategory.CAR: "private car",
VehicleCategory.LARGE: "large vehicle",
VehicleCategory.MOTORCYCLE: "motorcycle",
VehicleCategory.BICYCLE_AND_SMALL_MOTOR: "bicycle and small motor vehicles",
VehicleCategory.OTHER: "other vehicle",
}
try:
return english_vehicle_type_display_names[self]
except (KeyError, TypeError):
logging.exception(f"VehicleType.get_display_name: {self}: no display string defined")
return "no display name defined"
_("professional driver")
_("private driver")
_("light electric vehicles")
_("private car")
_("large vehicle")
_("motorcycle")
_("bicycle and small motor vehicles")
_("other vehicle")
|
hasadna/anyway
|
anyway/vehicle_type.py
|
Python
|
mit
| 7,301
|
#!/usr/bin/env python
def get_secret_for_user(user, ipparam):
print("Looking up user %s with ipparam %s" % (user, ipparam))
return "user_secret"
def allowed_address_hook(ip):
return True
def chap_check_hook():
return True
def ip_up_notifier(ifname, localip, remoteip):
print("ip_up_notifier")
def ip_down_notifier(arg):
print("ip_down_notifier")
def auth_up_notifier(arg):
print("auth_up_notifier")
def link_down_notifier(arg):
print("link_down_notifier")
|
metricube/pppd_pyhook
|
hooks.py
|
Python
|
mit
| 495
|
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 5, 0.1)
y = np.sin(x)
plt.plot(x, y)
plt.show()
|
kantel/python-schulung
|
sources/hallowelt/hallomatplotlib01.py
|
Python
|
mit
| 117
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Configure/ConfigureDryRunError.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify the ConfigureDryRunError.
"""
import os
import TestSCons
_obj = TestSCons._obj
test = TestSCons.TestSCons()
lib = test.Configure_lib
NCR = test.NCR # non-cached rebuild
CR = test.CR # cached rebuild (up to date)
NCF = test.NCF # non-cached build failure
CF = test.CF # cached build failure
SConstruct_path = test.workpath('SConstruct')
test.write(SConstruct_path, """
env = Environment()
import os
env.AppendENVPath('PATH', os.environ['PATH'])
conf = Configure(env)
r1 = conf.CheckLib('%s') # will pass
r2 = conf.CheckLib('hopefullynolib') # will fail
env = conf.Finish()
if not (r1 and not r2):
Exit(1)
""" % (lib))
expect = """
scons: *** Cannot create configure directory ".sconf_temp" within a dry-run.
""" + test.python_file_line(SConstruct_path, 5)
test.run(arguments='-n', status=2, stderr=expect)
test.must_not_exist('config.log')
test.subdir('.sconf_temp')
conftest_0_c = os.path.join(".sconf_temp", "conftest_0.c")
SConstruct_file_line = test.python_file_line(SConstruct_path, 6)[:-1]
expect = """
scons: *** Cannot update configure test "%(conftest_0_c)s" within a dry-run.
%(SConstruct_file_line)s
""" % locals()
test.run(arguments='-n', status=2, stderr=expect)
test.run()
test.checkLogAndStdout( ["Checking for C library %s... " % lib,
"Checking for C library hopefullynolib... "],
["yes", "no"],
[[((".c", NCR), (_obj, NCR))],
[((".c", NCR), (_obj, NCF))]],
"config.log", ".sconf_temp", "SConstruct")
oldLog = test.read(test.workpath('config.log'))
test.run(arguments='-n')
test.checkLogAndStdout( ["Checking for C library %s... " % lib,
"Checking for C library hopefullynolib... "],
["yes", "no"],
[[((".c", CR), (_obj, CR))],
[((".c", CR), (_obj, CF))]],
"config.log", ".sconf_temp", "SConstruct",
doCheckLog=0)
newLog = test.read(test.workpath('config.log'))
if newLog != oldLog:
print "Unexpected update of log file within a dry run"
test.fail_test()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
EmanueleCannizzaro/scons
|
test/Configure/ConfigureDryRunError.py
|
Python
|
mit
| 3,514
|
# -*- coding: utf-8 -*-
# Copyright © 2012-2016 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Define and register a listing directive using the existing CodeBlock."""
from __future__ import unicode_literals
import io
import os
import uuid
try:
from urlparse import urlunsplit
except ImportError:
from urllib.parse import urlunsplit # NOQA
import docutils.parsers.rst.directives.body
import docutils.parsers.rst.directives.misc
from docutils import core
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.roles import set_classes
from docutils.parsers.rst.directives.misc import Include
from pygments.lexers import get_lexer_by_name
import pygments
import pygments.util
from nikola import utils
from nikola.plugin_categories import RestExtension
# A sanitized version of docutils.parsers.rst.directives.body.CodeBlock.
class CodeBlock(Directive):
"""Parse and mark up content of a code block."""
optional_arguments = 1
option_spec = {'class': directives.class_option,
'name': directives.unchanged,
'number-lines': directives.unchanged, # integer or None
'linenos': directives.unchanged,
'tab-width': directives.nonnegative_int}
has_content = True
def run(self):
"""Run code block directive."""
self.assert_has_content()
if 'linenos' in self.options:
self.options['number-lines'] = self.options['linenos']
if 'tab-width' in self.options:
self.content = [x.replace('\t', ' ' * self.options['tab-width']) for x in self.content]
if self.arguments:
language = self.arguments[0]
else:
language = 'text'
set_classes(self.options)
classes = ['code']
if language:
classes.append(language)
if 'classes' in self.options:
classes.extend(self.options['classes'])
code = '\n'.join(self.content)
try:
lexer = get_lexer_by_name(language)
except pygments.util.ClassNotFound:
raise self.error('Cannot find pygments lexer for language "{0}"'.format(language))
if 'number-lines' in self.options:
linenos = 'table'
# optional argument `startline`, defaults to 1
try:
linenostart = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer start value')
else:
linenos = False
linenostart = 1 # actually unused
if self.site.invariant: # for testing purposes
anchor_ref = 'rest_code_' + 'fixedvaluethatisnotauuid'
else:
anchor_ref = 'rest_code_' + uuid.uuid4().hex
formatter = utils.NikolaPygmentsHTML(anchor_ref=anchor_ref, classes=classes, linenos=linenos, linenostart=linenostart)
out = pygments.highlight(code, lexer, formatter)
node = nodes.raw('', out, format='html')
self.add_name(node)
# if called from "include", set the source
if 'source' in self.options:
node.attributes['source'] = self.options['source']
return [node]
# Monkey-patch: replace insane docutils CodeBlock with our implementation.
docutils.parsers.rst.directives.body.CodeBlock = CodeBlock
docutils.parsers.rst.directives.misc.CodeBlock = CodeBlock
class Plugin(RestExtension):
"""Plugin for listing directive."""
name = "rest_listing"
def set_site(self, site):
"""Set Nikola site."""
self.site = site
# Even though listings don't use CodeBlock anymore, I am
# leaving these to make the code directive work with
# docutils < 0.9
CodeBlock.site = site
Listing.site = site
directives.register_directive('code', CodeBlock)
directives.register_directive('code-block', CodeBlock)
directives.register_directive('sourcecode', CodeBlock)
directives.register_directive('listing', Listing)
Listing.folders = site.config['LISTINGS_FOLDERS']
return super(Plugin, self).set_site(site)
# Add sphinx compatibility option
listing_spec = Include.option_spec
listing_spec['linenos'] = directives.unchanged
class Listing(Include):
"""Create a highlighted block of code from a file in listings/.
Usage:
.. listing:: nikola.py python
:number-lines:
"""
has_content = False
required_arguments = 1
optional_arguments = 1
option_spec = listing_spec
def run(self):
"""Run listing directive."""
_fname = self.arguments.pop(0)
fname = _fname.replace('/', os.sep)
try:
lang = self.arguments.pop(0)
self.options['code'] = lang
except IndexError:
self.options['literal'] = True
if len(self.folders) == 1:
listings_folder = next(iter(self.folders.keys()))
if fname.startswith(listings_folder):
fpath = os.path.join(fname) # new syntax: specify folder name
else:
fpath = os.path.join(listings_folder, fname) # old syntax: don't specify folder name
else:
fpath = os.path.join(fname) # must be new syntax: specify folder name
self.arguments.insert(0, fpath)
if 'linenos' in self.options:
self.options['number-lines'] = self.options['linenos']
with io.open(fpath, 'r+', encoding='utf8') as fileobject:
self.content = fileobject.read().splitlines()
self.state.document.settings.record_dependencies.add(fpath)
target = urlunsplit(("link", 'listing', fpath.replace('\\', '/'), '', ''))
src_target = urlunsplit(("link", 'listing_source', fpath.replace('\\', '/'), '', ''))
src_label = self.site.MESSAGES('Source')
generated_nodes = (
[core.publish_doctree('`{0} <{1}>`_ `({2}) <{3}>`_' .format(
_fname, target, src_label, src_target))[0]])
generated_nodes += self.get_code_from_file(fileobject)
return generated_nodes
def get_code_from_file(self, data):
"""Create CodeBlock nodes from file object content."""
return super(Listing, self).run()
def assert_has_content(self):
"""Listing has no content, override check from superclass."""
pass
|
wcmckee/nikola
|
nikola/plugins/compile/rest/listing.py
|
Python
|
mit
| 7,496
|
""" Python expresses functional and modular scope for variables.
"""
# Global to the module, not global in the builtin sense.
x = 5
def f1():
"""If not local, reference global.
"""
return x
def f2():
"""Local references global.
"""
global x
x = 3
return x
# Should print 5.
print f1()
# Should print 3.
print f2()
# Should print 3.
print x
# When done, open the python interpreter and import this module.
# Note the output when importing.
# Note that our "global" x is only available via reference of scope.x.
|
jeremyosborne/python
|
scope/scope.py
|
Python
|
mit
| 548
|
#!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
from setuptools import setup
import os
import sys
import platform
import imp
import argparse
version = imp.load_source('version', 'lib/version.py')
if sys.version_info[:3] < (3, 4, 0):
sys.exit("Error: Electrum requires Python version >= 3.4.0...")
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
install_requires=[
'pyaes>=0.1a1',
'ecdsa>=0.9',
'pbkdf2',
'requests',
'qrcode',
'protobuf',
'dnspython',
'jsonrpclib-pelix',
'PySocks>=1.6.6',
],
packages=[
'electrum',
'electrum_gui',
'electrum_gui.qt',
'electrum_plugins',
'electrum_plugins.audio_modem',
'electrum_plugins.cosigner_pool',
'electrum_plugins.email_requests',
'electrum_plugins.greenaddress_instant',
'electrum_plugins.hw_wallet',
'electrum_plugins.keepkey',
'electrum_plugins.labels',
'electrum_plugins.ledger',
'electrum_plugins.trezor',
'electrum_plugins.digitalbitbox',
'electrum_plugins.trustedcoin',
'electrum_plugins.virtualkeyboard',
],
package_dir={
'electrum': 'lib',
'electrum_gui': 'gui',
'electrum_plugins': 'plugins',
},
package_data={
'electrum': [
'servers.json',
'servers_testnet.json',
'currencies.json',
'checkpoints.json',
'www/index.html',
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
]
},
scripts=['electrum'],
data_files=data_files,
description="Lightweight Bitcoin Wallet",
author="Thomas Voegtlin",
author_email="thomasv@electrum.org",
license="MIT Licence",
url="https://electrum.org",
long_description="""Lightweight Bitcoin Wallet"""
)
|
digitalbitbox/electrum
|
setup.py
|
Python
|
mit
| 2,673
|
from django.conf.urls.defaults import patterns, url
from django.contrib.auth.decorators import login_required
from views import PollDetailView, PollListView, PollVoteView
urlpatterns = patterns('',
url(r'^$', PollListView.as_view(), name='list'),
url(r'^(?P<pk>\d+)/$', PollDetailView.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/vote/$', login_required(PollVoteView.as_view()), name='vote'),
)
|
Mercy-Nekesa/sokoapp
|
sokoapp/polls/urls.py
|
Python
|
mit
| 411
|
from django.db import transaction
from .base import BaseForm
from .composite import CompositeForm
from .formset import FormSet
class BaseModelForm(BaseForm):
def save(self, commit=True):
retval = []
with transaction.atomic():
for form in self._subforms:
if form.empty_permitted and not form.has_changed():
continue
retval.append(form.save(commit=commit))
return retval
class CompositeModelForm(BaseModelForm, CompositeForm):
def __init__(self, *args, **kwargs):
super(CompositeModelForm, self).__init__(*args, **kwargs)
if not all(hasattr(obj, 'save') for obj in self._subforms):
raise ValueError('all form instance must have save method (model form)')
class ModelFormSet(FormSet, BaseModelForm):
def __init__(self, data=None, files=None, form_class=None, repeat=1, instances=None, **kwargs):
if not hasattr(form_class, 'save'):
raise ValueError('form_class must be ModelForm')
self._instances = instances
super(ModelFormSet, self).__init__(data, files, form_class, repeat, **kwargs)
def _update_kwargs(self, kwargs, i):
try:
kwargs['instance'] = self._instances[i]
except (IndexError, TypeError):
pass
return kwargs
|
lovasb/django-smart-forms
|
smartforms/models.py
|
Python
|
mit
| 1,341
|
HTBRootQdisc = """\
tc qdisc add dev {interface!s} root handle 1: \
htb default {default_class!s}\
"""
HTBQdisc = """\
tc qdisc add dev {interface!s} parent {parent!s} handle {handle!s} \
htb default {default_class!s}\
"""
NetemDelayQdisc = """\
tc qdisc add dev {interface!s} parent {parent!s} handle {handle!s} \
netem delay {delay!s}ms\
"""
IngressQdisc = "tc qdisc add dev {interface!s} ingress"
PRIOQdisc = "tc qdisc add dev {interface!s} root handle 1: prio"
pfifoQdisc = "tc qdisc add dev {interface!s} root handle 1: pfifo"
|
praus/shapy
|
shapy/framework/commands/qdisc.py
|
Python
|
mit
| 537
|
from flask import request, abort, session
from functools import wraps
import logging
import urllib.request as urllib2
import numpy as np
import cv2
import random
from annotator_supreme.views import error_views
from io import StringIO
from PIL import Image
from annotator_supreme import app
import os
import base64
def read_image_from_stream(stream):
try:
arr = np.asarray(bytearray(stream.read()), dtype=np.uint8)
image = cv2.imdecode(arr, cv2.IMREAD_COLOR)
height, width = image.shape[:2]
if height <= 0 or width <= 0:
raise Exception('Invalid image file from stream')
except:
raise Exception('Invalid image file from stream')
return image
def read_image_from_url(url):
req = urllib2.Request(url, headers={'User-Agent' : "VirtualMakeup-API"})
res = urllib2.urlopen(req)
if res.getcode() != 200:
raise Exception('Invalid status code '+str(res.getcode())+' from image url')
else:
return read_image_from_stream(res)
def read_image_b64(base64_string):
dec = base64.b64decode(base64_string)
npimg = np.fromstring(dec, dtype=np.uint8)
cvimg = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
return cvimg
def image_to_dict(image):
anno_vec = []
for bb in image.bboxes:
curr_anno = {}
curr_anno['labels'] = bb.labels
curr_anno['left'] = bb.left
curr_anno['top'] = bb.top
curr_anno['right'] = bb.right
curr_anno['bottom'] = bb.bottom
curr_anno['ignore'] = bb.ignore
anno_vec.append(curr_anno)
image_dict = {'anno': anno_vec}
image_dict['dataset_name'] = image.dataset_name
image_dict['name'] = image.name
image_dict['phash'] = image.phash
image_dict['category'] = image.category
image_dict['partition'] = image.partition
image_dict['fold'] = image.fold
image_dict['last_modified'] = image.last_modified
return image_dict
def parse_content_type(request):
"""
This function is used to extract the content type from the header.
"""
try:
content_type = request.headers['content-type']
except:
raise error_views.InvalidParametersError('No Content-Type provided')
json_type = 'application/json'
data_type = 'multipart/form-data'
lower_content_type = content_type.lower()
if lower_content_type.find(json_type) >= 0:
return json_type
elif lower_content_type.find(data_type) >= 0:
return data_type
else:
raise error_views.InvalidParametersError('Invalid Content-Type')
def get_param_from_request(request, label):
"""
This function is used to extract a field from a POST or GET request.
Returns a tuple with (ok:boolean, error:string, value)
"""
if request.method == 'POST':
content_type = parse_content_type(request)
if content_type == "multipart/form-data":
if label in request.form:
return (True, "", request.form[label])
else:
return (False, "No "+label+" provided in form-data request", None)
elif content_type == 'application/json':
try:
input_params = request.get_json(True)
except:
return (False, 'No valid JSON present', None)
if label in input_params:
return (True, "", input_params[label])
else:
return (False, "No "+label+" provided in json payload", None)
elif request.method == 'GET':
if request.args.get(label) == None:
return (False, "No "+label+" in GET params", None)
else:
return (True, "", request.args.get(label))
else:
return (False, "Invalid request method", None)
def get_image_from_request(request):
"""
This function is used to extract the image from a POST or GET request.
Usually it is a url of the image and, in case of the POST is possible
to send it as a multi-part data.
Returns a tuple with (ok:boolean, error:string, image:ndarray)
"""
if request.method == 'POST':
content_type = parse_content_type(request)
if content_type == "multipart/form-data":
if 'image' in request.files:
try:
image = read_image_from_stream(request.files['image'])
return (True, '', image)
except:
return (False, "Unable to read uploaded file", None)
else:
return (False, "No image provided in form-data request", None)
elif content_type == 'application/json':
try:
input_params = request.get_json(True)
except:
return (False, 'No valid JSON present', None)
if 'imageUrl' in input_params:
image_url = input_params['imageUrl']
try:
image = read_image_from_url(image_url)
return (True, '', image)
except:
return (False, 'Unable to read image from url', None)
elif 'imageB64' in input_params:
image_b64 = input_params['imageB64']
try:
image = read_image_b64(image_b64)
return (True, '', image)
except:
return (False, 'Unable to read base 64 image', None)
else:
return (False, 'Image url or base 64 string not informed', None)
elif request.method == 'GET':
if request.args.get('imageUrl') == None:
return (False, 'Image url not informed', None)
else:
image_url = request.args.get('imageUrl')
try:
image = read_image_from_url(image_url)
return (True, '', image)
except:
return (False, 'Unable to read image from url', None)
|
meerkat-cv/annotator-supreme
|
annotator_supreme/views/view_tools.py
|
Python
|
mit
| 5,904
|
# by Art FY Poon, 2012
# modified by Rosemary McCloskey, 2014
from Bio import Phylo
from numpy import zeros
import math
import multiprocessing as mp
class PhyloKernel:
def __init__(self,
kmat=None,
rotate='ladder',
rotate2='none',
subtree=False,
normalize='mean',
sigma=1,
gaussFactor=1,
withLengths=True,
decayFactor=0.1,
verbose=False,
resolve_poly=False):
"""
requires a list of Phylo.Tree objects
can cast iterator returned by Phylo.parse() as list
"""
self.resolve_poly = resolve_poly
self.normalize = normalize
self.trees = []
self.kmat = []
self.is_kmat_computed = False
# using **kwargs would probably make this cleaner
self.rotate = rotate
self.rotate2 = rotate2
self.subtree = subtree
self.normalize = normalize
self.sigma = sigma
self.gaussFactor = gaussFactor
self.withLengths = withLengths
self.decayFactor = decayFactor
self.verbose = verbose
self.resolve_poly = resolve_poly
self.pcache = {}
self.subtrees = {} # used for matching polytomies
#self.cache_productions()
self.sigma = sigma
self.gaussFactor = gaussFactor
self.decayFactor = decayFactor
self.withLengths = withLengths
self.verbose = verbose
if self.verbose:
print('creating PhyloKernel with settings')
print('sigma = {}'.format(self.sigma))
print('gaussFactor = {}'.format(self.gaussFactor))
print('decayFactor = {}'.format(self.decayFactor))
@property
def ntrees (self):
return len(self.trees)
def load_trees_from_file (self, handle):
"""
Parse a file containing Newick tree strings
"""
self.trees = []
tree_iter = Phylo.parse(handle, 'newick')
for t in tree_iter:
if self.rotate=='ladder':
t.ladderize()
elif rotate=='random':
scramble(t)
else:
pass
if self.rotate2 == 'none':
pass
else:
gravitate(t, subtree=subtree, mode=rotate2)
if self.normalize != 'none': self.normalize_tree(t, mode=self.normalize)
if self.resolve_poly:
collapse_polytomies(t)
self.annotate_tree(t)
self.trees.append(t)
self.kmat = zeros( (self.ntrees, self.ntrees) )
self.is_kmat_computed = False
self.delta_values = {}
def normalize_tree (self, t, mode='median'):
"""
Normalize branch lengths in tree by mean branch length.
This helps us compare trees of different overall size.
Ignore the root as its branch length is meaningless.
"""
# compute number of branches in tree
branches = t.get_nonterminals() + t.get_terminals()
nbranches = len(branches) - 1
if mode == 'mean':
tree_length = t.total_branch_length() - t.root.branch_length
mean_branch_length = tree_length / nbranches
for branch in branches[int(not t.rooted):]:
branch.branch_length /= mean_branch_length
elif mode == 'median':
branch_lengths = [branch.branch_length for branch in branches[int(not t.rooted):]]
branch_lengths.sort()
if nbranches%2 == 0:
median_branch_length = (branch_lengths[(nbranches/2)-1] +
branch_lengths[nbranches/2]) / 2.
else:
median_branch_length = branch_lengths[nbranches/2]
for branch in branches[int(not t.rooted):]:
branch.branch_length /= median_branch_length
def annotate_tree(self, t):
"""
Add annotations to Clade objects in place
"""
for tip in t.get_terminals():
tip.production = 0
for i, node in enumerate(t.get_nonterminals(order='postorder')):
children = node.clades
nterms = sum( [c.production == 0 for c in children] )
node.production = nterms + 1
node.index = i
branch_lengths = [c.branch_length for c in node.clades]
node.sqbl = sum([bl**2 for bl in branch_lengths])
def compute_matrix(self):
for i in range(self.ntrees):
for j in range(i, self.ntrees):
self.kmat[i,j] = self.kmat[j,i] = self.kernel(self.trees[i], self.trees[j])
if self.verbose:
print('%d\t%d\t%f' % (i, j, self.kmat[i,j]))
self.is_kmat_computed = True
def kernel(self, t1, t2, myrank=None, nprocs=None, output=None):
"""
Recursive function for computing tree convolution
kernel. Adapted from Moschitti (2006) Making tree kernels
practical for natural language learning. Proceedings of the
11th Conference of the European Chapter of the Association
for Computational Linguistics.
"""
nodes1 = t1.get_nonterminals(order='postorder')
nodes2 = t2.get_nonterminals(order='postorder')
k = 0
if not hasattr(nodes1[0], 'production'):
self.annotate_tree(t1)
if not hasattr(nodes2[0], 'production'):
self.annotate_tree(t2)
dp_matrix = [[0 for n2 in nodes2] for n1 in nodes1]
# iterate over non-terminals, visiting children before parents
for ni, n1 in enumerate(nodes1):
if myrank is not None and nprocs and ni % nprocs != myrank:
continue
for n2 in nodes2:
if n1.production == n2.production:
bl1 = [c1.branch_length for c1 in n1.clades]
bl2 = [c2.branch_length for c2 in n2.clades]
try:
res = self.decayFactor * math.exp( -1. / self.gaussFactor
* (n1.sqbl + n2.sqbl - 2*sum([(bl1[i]*bl2[i]) for i in range(len(bl1))])))
except:
raise
for cn1 in range(2):
c1 = n1.clades[cn1]
c2 = n2.clades[cn1]
if c1.production != c2.production:
continue
if c1.production == 0:
# branches are terminal
res *= self.sigma + self.decayFactor
else:
res *= self.sigma + dp_matrix[c1.index][c2.index]
dp_matrix[n1.index][n2.index] = res
k += res
if output is None:
return k
output.put(k)
def kernel_parallel(self, t1, t2, nthreads):
"""
Wrapper around kernel().
Attempt to use Python multiprocessing module to speed up computation.
Borrowing code snippets from:
http://sebastianraschka.com/Articles/2014_multiprocessing_intro.html
:param t1: first Phylo.Tree to be compared
:param t2: second Phylo.Tree to be compared
:param nthreads: number of threads in pool
:return: kernel score (double)
"""
# FIXME: this gives the wrong answer
output = mp.Queue()
processes = [mp.Process(target=self.kernel, args=(t1, t2, i, nthreads, output))
for i in range(nthreads)]
for p in processes:
p.start()
# exit completed processes
for p in processes:
p.join()
# collect results and calculate sum
k = sum([output.get() for p in processes])
return k
|
sdwfrost/pangea-round2
|
villagetraining/phyloK2.py
|
Python
|
mit
| 8,205
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
from torch import nn
from fairseq.modules.quant_noise import quant_noise
class AdaptiveInput(nn.Module):
def __init__(
self,
vocab_size: int,
padding_idx: int,
initial_dim: int,
factor: float,
output_dim: int,
cutoff: List[int],
q_noise: float = 0,
qn_block_size: int = 8,
):
super().__init__()
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
else:
assert (
vocab_size == cutoff[-1]
), "cannot specify cutoff larger than vocab size"
self.cutoff = cutoff
self.embedding_dim = output_dim
self.padding_idx = padding_idx
self.embeddings = nn.ModuleList()
for i in range(len(self.cutoff)):
prev = self.cutoff[i - 1] if i > 0 else 0
size = self.cutoff[i] - prev
dim = int(initial_dim // (factor**i))
seq = nn.Sequential(
nn.Embedding(size, dim, self.padding_idx),
quant_noise(
nn.Linear(dim, output_dim, bias=False), q_noise, qn_block_size
),
)
self.embeddings.append(seq)
self.padding_idx = None
self.padding_idx = padding_idx
def init_weights(m):
if isinstance(m, nn.Embedding):
nn.init.normal_(m.weight, mean=0, std=m.weight.shape[1] ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
elif hasattr(m, "weight"):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
def weights_for_band(self, band: int):
return self.embeddings[band][0].weight, self.embeddings[band][1].weight
def forward(self, input: torch.Tensor):
result = self._float_tensor.new(input.shape + (self.embedding_dim,))
for i in range(len(self.cutoff)):
mask = input.lt(self.cutoff[i])
if i > 0:
mask.mul_(input.ge(self.cutoff[i - 1]))
chunk_input = input[mask] - self.cutoff[i - 1]
else:
chunk_input = input[mask]
if mask.any():
result[mask] = self.embeddings[i](chunk_input)
return result
|
pytorch/fairseq
|
fairseq/modules/adaptive_input.py
|
Python
|
mit
| 2,565
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import numpy
import math
class Image:
def __init__(self, matrix=[[]], width=0, height=0, depth=0):
self.matrix = matrix
self.width = width
self.height = height
self.depth = depth
def set_width_and_height(self, width, height):
self.width = width
self.height = height
self.matrix = [[0 for j in xrange(height)] for i in xrange(width)]
def multiply_matrices(matrixU, matrixS, matrixVt, kmin, kmax, depth, rescale=False, contrast=False):
matrixScopy = matrixS.copy()
# when kmax is not 0 use the provided kmax
if kmax > 0:
i = 0
contrast_factor = (1.0 + (1 - (math.log(kmax, 2) / 10)))
for t in numpy.nditer(matrixScopy, op_flags=["readwrite"]):
if i < kmin or i >= kmax:
t[...] = 0
else:
if contrast:
t[...] = t * contrast_factor #* math.pi / 2
i += 1
# when kmax is 0 then drop eigen values less than 1.0E-14
else:
for t in numpy.nditer(matrixScopy, op_flags=["readwrite"]):
if round(t, 14) <= 0:
t[...] = 0
# recompose the trimmed SVD matrices back into matrix matrixComposed
matrixComposed = numpy.dot(numpy.dot(matrixU, numpy.diag(matrixScopy)), matrixVt)
# attempt the handle out of range values (TODO: pull out to own function)
if rescale:
curMin = 0
curMax = 0
# find min and max values
for n in numpy.nditer(matrixComposed):
if int(round(n)) < curMin:
curMin = int(round(n))
if int(round(n)) > curMax:
curMax = int(round(n))
# shift values up
if curMax < depth and curMin < 0:
shiftVal = depth - curMax
for t in numpy.nditer(matrixComposed, op_flags=["readwrite"]):
t[...] = int(round(t + shiftVal))
if t > depth:
t[...] = depth
elif t < 0:
t[...] = 0
# shift values down
elif curMax > depth and curMin > 0:
shiftVal = curMin
for t in numpy.nditer(matrixComposed, op_flags=["readwrite"]):
t[...] = int(round(t - shiftVal))
if t > depth:
t[...] = depth
elif t < 0:
t[...] = 0
# no chance to shift, just chop (TODO: perform some sort of scaling)
else:
for t in numpy.nditer(matrixComposed, op_flags=["readwrite"]):
t[...] = int(round(t))
if t > depth:
t[...] = depth
elif t < 0:
t[...] = 0
if contrast:
depth_limit = depth # int(depth - (depth * .01))
for t in numpy.nditer(matrixComposed, op_flags=["readwrite"]):
if t < depth_limit:
t[...] = 0
return matrixComposed
def write_matrices_to_file(matrixU, matrixS, matrixVt, kmin, kmax, file_handle, width, height, depth, rescale=False, contrast=False):
"""
Write a decomposed matrix to file uncompressed as it would show compressed
Keyword Arguments:
matrixU -- the U portion of the SVD
matrixS -- the S (sigma) portion of the SVD
matrixVt -- the V transpose portion of the SVD
kmin -- the minimum k value to use for compresion (ignored if kmax = 0)
kmax -- the maximum kvalue to use for compresion (find optimal if zero)
filename -- the file to write to (stdout if blank)
width -- the image width
height -- the image height
depth -- the maximum grey scale value (normally 255)
rescale -- True to shift resulting image into 0 < n < depth bounds
"""
A = multiply_matrices(matrixU, matrixS, matrixVt, kmin, kmax, depth, rescale, contrast)
pixelate_count = 4
for x in xrange(1, pixelate_count):
U, s, Vt = numpy.linalg.svd(A, full_matrices=True)
A = multiply_matrices(U, s, Vt, kmin, kmax, depth, rescale, contrast)
file_handle.write("P2\n")
file_handle.write("# Generated by Stoll \n")
file_handle.write(str(width))
file_handle.write(" ")
file_handle.write(str(height))
file_handle.write("\n")
file_handle.write(str(depth))
file_handle.write("\n")
for n in numpy.nditer(A):
file_handle.write(str(int(round(n))))
file_handle.write(" ")
file_handle.write("\n")
def read_matrix_from_file(file_handle):
"""
Read an ASCII PGM file and create an Image object from it
"""
row = 0
col = 0
rownull = True
image = Image()
for line in file_handle:
if line[0] == '#':
pass
elif line[0] == 'P' and line[1] == '2':
pass
elif image.width == 0 and image.height == 0:
x = 0
y = 0
x, y = [int(n) for n in line.split()]
image.set_width_and_height(x, y)
elif image.depth == 0:
image.depth = int(line)
else:
for value in line.split():
if col >= image.width:
row += 1
col = 0
# rows which are all black become all white
if rownull:
for x in xrange(0, image.width):
image.matrix[row][x] = image.depth
rownull = True
image.matrix[row][col] = value
if int(value) != 0:
rownull = False
col += 1
# columns which are all black become all white
for x in xrange(0, image.width):
colnull = True
for y in xrange(0, image.height):
if int(image.matrix[y][x]) != 0:
colnull = False
if colnull:
for y in xrange(0, image.height):
image.matrix[y][x] = image.depth
return image
def process_svd(source_file_a, source_file_b, destination_file, kmin, kmax, rescale, contrast):
"""
Read from file provided on the command line or from stdin
then save uncompressed representations of the SVD compressed version
"""
"""
imagea = read_matrix_from_file(source_file_a)
Ma = numpy.asmatrix(imagea.matrix)
U, s, Vt = numpy.linalg.svd(Ma, full_matrices=True)
"""
pixelate_count = 2 + int(kmax / 2)
imagea = read_matrix_from_file(source_file_a)
Ma = numpy.asmatrix(imagea.matrix)
# for x in xrange(1, pixelate_count):
# Ua, sa, Vta = numpy.linalg.svd(Ma, full_matrices=True)
# Ma = multiply_matrices(Ua, sa, Vta, kmin, kmax, imagea.depth, rescale, contrast)
Ua, sa, Vta = numpy.linalg.svd(Ma, full_matrices=True)
imageb = read_matrix_from_file(source_file_b)
Mb = numpy.asmatrix(imageb.matrix)
for x in xrange(1, pixelate_count):
Ub, sb, Vtb = numpy.linalg.svd(Mb, full_matrices=True)
Mb = multiply_matrices(Ub, sb, Vtb, kmin, kmax, imageb.depth, rescale, contrast)
U = Ua
for (x,y), value in numpy.ndenumerate(Ua):
inta = Ua[x, y]
intb = Ub[x, y]
#intc = ((inta * 1.618) + (intb * 0.3)) / 1.9
#intc = (inta + intb) / 2.0
#intc = ((inta * 2) + intb) / 3.0
#intc = ((inta * 3) + intb) / 4.0
#intc = ((inta * 4) + intb) / 5.0
intc = ((inta * 5) + intb) / 6.0
#intc = ((inta * 6) + intb) / 7.0
#intc = ((inta * 7) + intb) / 8.0
U[x, y] = intc
s = sa
for (x,), value in numpy.ndenumerate(sa):
inta = sa[x]
intb = sb[x]
#intc = ((inta * 1.618) + (intb * 0.3)) / 1.9
#intc = (inta + intb) / 2.0
#intc = ((inta * 2) + intb) / 3.0
#intc = ((inta * 3) + intb) / 4.0
#intc = ((inta * 4) + intb) / 5.0
intc = ((inta * 5) + intb) / 6.0
#intc = ((inta * 6) + intb) / 7.0
#intc = ((inta * 7) + intb) / 8.0
s[x] = intc
Vt = Vta
for (x,y), value in numpy.ndenumerate(Vta):
inta = Vta[x, y]
intb = Vtb[x, y]
#intc = ((inta * 1.618) + (intb * 0.3)) / 1.9
#intc = (inta + intb) / 2.0
#intc = ((inta * 2) + intb) / 3.0
#intc = ((inta * 3) + intb) / 4.0
#intc = ((inta * 4) + intb) / 5.0
intc = ((inta * 5) + intb) / 6.0
#intc = ((inta * 6) + intb) / 7.0
#intc = ((inta * 7) + intb) / 8.0
Vt[x, y] = intc
write_matrices_to_file(U, s, Vt, kmin, kmax, destination_file, imagea.width, imagea.height, imagea.depth, rescale, contrast)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("infile1", nargs='?', help="The source ASCII PGM file", type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument("infile2", nargs='?', help="The source ASCII PGM file", type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument("outfile", nargs='?', help="The destination ASCII PGM file", type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument("-j", "--kmin", help="The number of high k values to exlude", type=int, default=0)
parser.add_argument("-k", "--kmax", help="The number k values to use", type=int, default=0)
parser.add_argument("-s", "--scale", help="Fit resulting image depth into '0 < n < depth' bounds", action="store_true")
parser.add_argument("-c", "--contrast", help="Improve high contrast images", action="store_true")
args = parser.parse_args()
try:
process_svd(args.infile1, args.infile2, args.outfile, args.kmin, args.kmax, args.scale, args.contrast)
except KeyboardInterrupt:
exit(0)
|
stollcri/Research-Matrices
|
pgm/svd-pgm-avg.py
|
Python
|
mit
| 8,300
|
"""Tests for mongodb backend
Authors:
* Min RK
"""
#-------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------
import os
from unittest import TestCase
from nose import SkipTest
from pymongo import Connection
from IPython.parallel.controller.mongodb import MongoDB
from . import test_db
conn_kwargs = {}
if 'DB_IP' in os.environ:
conn_kwargs['host'] = os.environ['DB_IP']
if 'DBA_MONGODB_ADMIN_URI' in os.environ:
# On ShiningPanda, we need a username and password to connect. They are
# passed in a mongodb:// URI.
conn_kwargs['host'] = os.environ['DBA_MONGODB_ADMIN_URI']
if 'DB_PORT' in os.environ:
conn_kwargs['port'] = int(os.environ['DB_PORT'])
try:
c = Connection(**conn_kwargs)
except Exception:
c = None
class TestMongoBackend(test_db.TaskDBTest, TestCase):
"""MongoDB backend tests"""
def create_db(self):
try:
return MongoDB(database='iptestdb', _connection=c)
except Exception:
raise SkipTest("Couldn't connect to mongodb")
def teardown(self):
if c is not None:
c.drop_database('iptestdb')
|
mattvonrocketstein/smash
|
smashlib/ipy3x/parallel/tests/test_mongodb.py
|
Python
|
mit
| 1,544
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ExtendedElementDescription(vim, *args, **kwargs):
''''''
obj = vim.client.factory.create('ns0:ExtendedElementDescription')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'messageCatalogKeyPrefix', 'key', 'label', 'summary' ]
optional = [ 'messageArg', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
xuru/pyvisdk
|
pyvisdk/do/extended_element_description.py
|
Python
|
mit
| 1,021
|
#!/usr/bin/env python3
#coding:utf-8
__author__ = 'zhuzhezhe'
'''
功能实现:命令行下发布微博,获取最新微博
'''
from weibo import Client
import getopt
import sys
import configparser
versions = '0.1.5'
# 写入用户数据
def write_data(uname, pwd):
conf = configparser.ConfigParser()
conf['LOGIN'] = {}
conf['LOGIN']['username'] = uname
conf['LOGIN']['password'] = pwd
with open('config.ini', 'w') as configfile:
conf.write(configfile)
print('写入成功')
# 读取用户数据
config = configparser.ConfigParser()
config.read('config.ini')
username = ''
password = ''
if 'LOGIN' in config:
username = config['LOGIN']['username']
password = config['LOGIN']['password']
else:
print('确保已完成登陆.请填写用户名和密码.')
# 接入新浪接口基本信息
api_key = '3842240593'
api_secret = '93f0c80150239e02c52011c858b20ce6'
# 默认回调地址
redirect_url = 'https://api.weibo.com/oauth2/default.html'
# 登陆验证
c = Client(api_key=api_key,
api_secret=api_secret,
redirect_uri=redirect_url,
username=username,
password=password)
# 最新微博
def new_weibo():
try:
data = c.get('statuses/friends_timeline')["statuses"]
for i in range(len(data)):
print("用户:"+data[i]["user"]["screen_name"])
print("微博:"+data[i]["text"])
print("\n")
except Exception as err:
print(err)
print('确保已完成登陆.请填写用户名和密码.')
# 发布微博
def add_weibo(words):
try:
c.post('statuses/update', status=words)
print("发布成功!")
except Exception as err:
print(err)
print('确保已完成登陆.请填写用户名和密码.')
# 用法
def usage():
text = '--------weibobash使用帮助--------\n' \
'-h<--help>: 显示帮助信息\n' \
'-u<--user>: 输入用户名和密码\n' \
'-n<--new>: 显示20条最新微博\n' \
'-a<--add>: 发布一条微博\n'
print(text)
# 主程序
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hna:vu", ["help", "new", "add=", "user"])
except getopt.GetoptError as err:
print(err)
sys.exit(2)
for o, a in opts:
if o == "-v":
print(versions)
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-n", "--new"):
new_weibo()
elif o in ("-a", "--add"):
add_weibo(a)
elif o in ("-u", "--user"):
user = input("请输入用户名:")
pwd = input("请输入密码:")
write_data(user, pwd)
else:
assert False, "unhandled option"
if __name__ == "__main__":
main()
|
zhuzhezhe/weibobash
|
weibo_bash/weibo_bash.py
|
Python
|
mit
| 2,831
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
import logging
def setup_logging(name="cct", level=logging.DEBUG):
# create logger
logger = logging.getLogger(name)
logger.handlers = []
logger.setLevel(level)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
|
goldmann/cct
|
cct/__init__.py
|
Python
|
mit
| 696
|
from datetime import datetime
import structlog
from flask import Blueprint, request
from conditional.util.ldap import ldap_get_intro_members
from conditional.models.models import FreshmanCommitteeAttendance
from conditional.models.models import CommitteeMeeting
from conditional.models.models import FreshmanAccount
from conditional.models.models import FreshmanEvalData
from conditional.models.models import FreshmanHouseMeetingAttendance
from conditional.models.models import FreshmanSeminarAttendance
from conditional.models.models import MemberHouseMeetingAttendance
from conditional.models.models import MemberSeminarAttendance
from conditional.models.models import HouseMeeting
from conditional.models.models import TechnicalSeminar
from conditional.util.flask import render_template
from conditional.util.member import get_cm, get_hm
from conditional import start_of_year
intro_evals_bp = Blueprint('intro_evals_bp', __name__)
logger = structlog.get_logger()
@intro_evals_bp.route('/intro_evals/')
def display_intro_evals(internal=False):
log = logger.new(request=request)
log.info('Display Intro Evals Listing')
# get user data
def get_fid_cm_count(member_id):
return len([a for a in FreshmanCommitteeAttendance.query.filter(
FreshmanCommitteeAttendance.fid == member_id)
if CommitteeMeeting.query.filter(CommitteeMeeting.id == a.meeting_id).first().approved])
user_name = None
if not internal:
user_name = request.headers.get('x-webauth-user')
members = [account for account in ldap_get_intro_members()]
ie_members = []
# freshmen who don't have accounts
fids = [f for f in FreshmanAccount.query.filter(
FreshmanAccount.eval_date > start_of_year(),
FreshmanAccount.eval_date > datetime.now())]
for fid in fids:
h_meetings = [m.meeting_id for m in
FreshmanHouseMeetingAttendance.query.filter(
FreshmanHouseMeetingAttendance.fid == fid.id
).filter(
FreshmanHouseMeetingAttendance.attendance_status == "Absent"
)]
if fid.signatures_missed is None:
signatures_missed = -1
else:
signatures_missed = fid.signatures_missed
freshman = {
'name': fid.name,
'uid': fid.id,
'eval_date': fid.eval_date.strftime("%Y-%m-%d"),
'signatures_missed': signatures_missed,
'committee_meetings': get_fid_cm_count(fid.id),
'committee_meetings_passed': get_fid_cm_count(fid.id) >= 10,
'house_meetings_missed':
[
{
"date": m.date.strftime("%Y-%m-%d"),
"reason":
FreshmanHouseMeetingAttendance.query.filter(
FreshmanHouseMeetingAttendance.fid == fid.id).filter(
FreshmanHouseMeetingAttendance.meeting_id == m.id).first().excuse
}
for m in HouseMeeting.query.filter(
HouseMeeting.id.in_(h_meetings)
)
],
'technical_seminars':
[s.name for s in TechnicalSeminar.query.filter(
TechnicalSeminar.id.in_(
[a.seminar_id for a in FreshmanSeminarAttendance.query.filter(
FreshmanSeminarAttendance.fid == fid.id)
if TechnicalSeminar.query.filter(TechnicalSeminar.id == a.seminar_id).first().approved]
))
],
'social_events': '',
'freshman_project': "Pending",
'comments': "",
'ldap_account': False,
'status': "Pending"
}
ie_members.append(freshman)
# freshmen who have accounts
for member in members:
uid = member.uid
name = member.cn
freshman_data = FreshmanEvalData.query.filter(
FreshmanEvalData.eval_date > start_of_year(),
FreshmanEvalData.uid == uid).first()
if freshman_data is None:
continue
elif freshman_data.freshman_eval_result != "Pending" and internal:
continue
h_meetings = [m.meeting_id for m in get_hm(member)]
member_info = {
'name': name,
'uid': uid,
'eval_date': freshman_data.eval_date.strftime("%Y-%m-%d"),
'signatures_missed': freshman_data.signatures_missed,
'committee_meetings': len(get_cm(member)),
'committee_meetings_passed': len(get_cm(member)) >= 10,
'house_meetings_missed':
[
{
"date": m.date.strftime("%Y-%m-%d"),
"reason":
MemberHouseMeetingAttendance.query.filter(
MemberHouseMeetingAttendance.uid == uid,
MemberHouseMeetingAttendance.meeting_id == m.id).first().excuse
}
for m in HouseMeeting.query.filter(
HouseMeeting.id.in_(h_meetings)
)
],
'technical_seminars':
[s.name for s in TechnicalSeminar.query.filter(
TechnicalSeminar.id.in_(
[a.seminar_id for a in MemberSeminarAttendance.query.filter(
MemberSeminarAttendance.uid == uid)
if TechnicalSeminar.query.filter(
TechnicalSeminar.id == a.seminar_id,
TechnicalSeminar.timestamp > start_of_year()).first().approved]
))
],
'social_events': freshman_data.social_events,
'freshman_project': freshman_data.freshman_project,
'comments': freshman_data.other_notes,
'ldap_account': True,
'status': freshman_data.freshman_eval_result
}
ie_members.append(member_info)
ie_members.sort(key=lambda x: x['freshman_project'] == "Passed")
ie_members.sort(key=lambda x: len(x['house_meetings_missed']))
ie_members.sort(key=lambda x: x['committee_meetings'], reverse=True)
ie_members.sort(key=lambda x: x['signatures_missed'])
ie_members.sort(key=lambda x: x['status'] == "Passed")
if internal:
return ie_members
# return names in 'first last (username)' format
return render_template(request,
'intro_evals.html',
username=user_name,
members=ie_members)
|
RamZallan/conditional
|
conditional/blueprints/intro_evals.py
|
Python
|
mit
| 6,796
|
"""
Django settings for jstest project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(n=5&yvpo-9!=db58cbix!za-$30^osiq1i42o42xh8)9j81i1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'samplepage',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jstest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jstest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'samplepage/statics'),
)
|
motobyus/moto
|
module_django/jstest/jstest/settings.py
|
Python
|
mit
| 3,255
|
from itertools import count
from typing import Union
from dataclasses import dataclass, field
from OnePy.constants import ActionType, OrderType
from OnePy.sys_module.components.exceptions import (OrderConflictError,
PctRangeError)
from OnePy.sys_module.metabase_env import OnePyEnvBase
@dataclass
class Signal(OnePyEnvBase):
counter = count(1)
strategy_name: str
action_type: ActionType
size: int
ticker: str
takeprofit: float = None
takeprofit_pct: float = None
stoploss: float = None
stoploss_pct: float = None
trailingstop: float = None
trailingstop_pct: float = None
price: float = None
price_pct: float = None
signal_id: int = None
datetime: str = field(init=False)
def __post_init__(self):
self.datetime = self.env.sys_date
self.next_datetime = self.env.feeds[self.ticker].next_ohlc['date']
self.signal_id = next(self.counter)
self._check_all_conflict()
self._save_signals()
def _save_signals(self):
self.env.signals_normal_cur.append(self)
if self.env.is_save_original:
self.env.signals_normal.append(self)
def _check_all_conflict(self):
self._check_size()
self._check_conflict(self.price, self.price_pct, name='price')
self._check_conflict(
self.takeprofit, self.takeprofit_pct, name='takeprofit')
self._check_conflict(self.stoploss, self.stoploss_pct, name='stoploss')
self._check_conflict(
self.trailingstop, self.trailingstop_pct, name='trailingstop')
def _check_size(self):
if self.size <= 0:
raise Exception("size should be Positive")
@staticmethod
def _check_conflict(obj: float, obj_pct: float, name: str):
if obj and obj_pct:
raise OrderConflictError("$ and pct can't be set together")
if obj_pct:
if not -1 < obj_pct < 1:
raise PctRangeError("pct should be -1 < pct < 1")
if name != 'price':
if obj:
if obj <= 0:
raise ValueError(f"{name.upper()} should be Positive")
if obj_pct:
if obj_pct <= 0:
raise ValueError(f"{name.upper()} should be Positive")
def get(self, name: str):
return getattr(self, name)
def set(self, name: str, value: float):
setattr(self, name, value)
@dataclass
class SignalForPending(Signal):
price: float = None
price_pct: float = None
def _save_signals(self):
self.env.signals_pending_cur.append(self)
if self.env.is_save_original:
self.env.signals_pending.append(self)
@dataclass
class SignalByTrigger(SignalForPending):
counter = count(1)
order_type: OrderType = None
mkt_id: int = None
trigger_key: str = None
execute_price: float = None # 用来确定是否是必成单,用于挂单
first_cur_price: float = None # 记录挂单信号产生时候的价格
parent_order: str = None # 其实不是str,是一个order对象
def _save_signals(self):
self.env.signals_trigger_cur.append(self)
if self.env.is_save_original:
self.env.signals_trigger.append(self)
@dataclass
class SignalCancelBase(OnePyEnvBase):
counter = None
action_type: ActionType
strategy_name: str
ticker: str
long_or_short: str
def __post_init__(self):
self.datetime = self.env.sys_date
self.signal_id = next(self.counter)
self._check_all_conflict()
self._save_signals()
def _save_signals(self):
self.env.signals_cancel_cur.append(self)
if self.env.is_save_original:
self.env.signals_cancel.append(self)
def _check_all_conflict(self):
raise NotImplementedError
@dataclass
class SignalCancelTST(SignalCancelBase):
counter = count(1)
takeprofit: bool
stoploss: bool
trailingstop: bool
def _check_all_conflict(self):
pass
@dataclass
class SignalCancelPending(SignalCancelBase):
counter = count(1)
below_price: float = None
above_price: float = None
def _check_all_conflict(self):
if self.below_price is not None and self.above_price is not None:
raise ValueError(f"below and above price can't be set together!")
|
Chandlercjy/OnePy
|
OnePy/sys_module/models/signals.py
|
Python
|
mit
| 4,404
|
#!/usr/bin/env python
import json
import sys
import web
from coloredcoinlib import BlockchainState, ColorDefinition
blockchainstate = BlockchainState.from_url(None, True)
urls = (
'/tx', 'Tx',
'/prefetch', 'Prefetch',
)
class ErrorThrowingRequestProcessor:
def require(self, data, key, message):
value = data.get(key)
if not value:
raise web.HTTPError("400 Bad request",
{"content-type": "text/plain"},
message)
class Tx(ErrorThrowingRequestProcessor):
def POST(self):
# data is sent in as json
data = json.loads(web.input().keys()[0])
self.require(data, 'txhash', "TX requires txhash")
txhash = data.get('txhash')
return blockchainstate.get_raw(txhash)
class Prefetch(ErrorThrowingRequestProcessor):
def POST(self):
# data is sent in as json
data = json.loads(web.input().keys()[0])
self.require(data, 'txhash', "Prefetch requires txhash")
self.require(data, 'output_set', "Prefetch requires output_set")
self.require(data, 'color_desc', "Prefetch requires color_desc")
txhash = data.get('txhash')
output_set = data.get('output_set')
color_desc = data.get('color_desc')
limit = data.get('limit')
color_def = ColorDefinition.from_color_desc(17, color_desc)
tx_lookup = {}
def process(current_txhash, current_outindex):
"""For any tx out, process the colorvalues of the affecting
inputs first and then scan that tx.
"""
if limit and len(tx_lookup) > limit:
return
if tx_lookup.get(current_txhash):
return
current_tx = blockchainstate.get_tx(current_txhash)
if not current_tx:
return
tx_lookup[current_txhash] = blockchainstate.get_raw(current_txhash)
# note a genesis tx will simply have 0 affecting inputs
inputs = set()
inputs = inputs.union(
color_def.get_affecting_inputs(current_tx,
[current_outindex]))
for i in inputs:
process(i.prevout.hash, i.prevout.n)
for oi in output_set:
process(txhash, oi)
return tx_lookup
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
|
elkingtowa/alphacoin
|
Bitcoin/ngcccbase-master/server/run.py
|
Python
|
mit
| 2,469
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="heatmapgl.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/heatmapgl/hoverlabel/font/_color.py
|
Python
|
mit
| 472
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm
fig = plt.figure()
ax = {}
ax["DropOut"] = fig.add_subplot(121)
ax["NoDropOut"] = fig.add_subplot(122)
dList = {}
dList["DropOut"] = ["DropOut1","DropOut2","DropOut3"]
dList["NoDropOut"] = ["NoDropOut1","NoDropOut2"]
def myPlot(ax,dName):
cList = ["black","blue","red","green","cyan"]
for i,dfile in enumerate(dName):
print dfile
d = pd.read_csv("Output_DropTest/%s/output.dat"%dfile)
dTrain = d[d["mode"]=="Train"]
dTest = d[d["mode"]=="Test" ]
ax.plot(dTrain.epoch, dTrain.accuracy*100., lineStyle="-" , color=cList[i], label=dfile)
ax.plot(dTest .epoch, dTest .accuracy*100., lineStyle="--", color=cList[i], label="")
ax.set_xlim(0,50)
ax.set_ylim(0,100)
ax.legend(loc=4,fontsize=8)
ax.grid()
for k in dList:
myPlot(ax[k],dList[k])
plt.show()
|
ysasaki6023/NeuralNetworkStudy
|
cifar02/analysis_DropTest.py
|
Python
|
mit
| 925
|
from wordbook.domain.models import Translation
def test_translation_dto():
t = Translation(
id=1,
from_language='en',
into_language='pl',
word='apple',
ipa='ejpyl',
simplified='epyl',
translated='jabłko',
)
assert t.dto_autocomplete() == dict(
id=1,
word='apple',
translation='jabłko',
ipa='ejpyl',
simplified='epyl',
)
|
lizardschool/wordbook
|
tests/test_domain_translation.py
|
Python
|
mit
| 436
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import Cookie
import base64
import calendar
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import httplib
import logging
import mimetypes
import os.path
import re
import stat
import sys
import time
import types
import urllib
import urlparse
import uuid
from tornado import web
from tornado.web import HTTPError, utf8
from tld_name import tld_name
from tornado import escape
from tornado import locale
from tornado import stack_context
from tornado import template
def set_cookie(self, name, value, domain=None, expires=None, path='/',
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See http://docs.python.org/library/cookie.html#morsel-objects
for available attributes.
"""
if domain is None:
domain = '.%s'%tld_name(self.request.host)
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
if type(expires) is not str:
timestamp = calendar.timegm(expires.utctimetuple())
expires = email.utils.formatdate(
timestamp, localtime=False, usegmt=True
)
else:
expires = 'Tue, 01 Jan 2030 00:00:00 GMT'
morsel['expires'] = expires
if path:
morsel["path"] = path
for k, v in kwargs.iteritems():
if k == 'max_age':
k = 'max-age'
morsel[k] = v
web.RequestHandler.set_cookie = set_cookie
def clear_cookie(self, name, path='/', domain=None):
"""Deletes the cookie with the given name."""
expires = 'Tue, 01 Jun 2000 00:00:00 GMT'
self.set_cookie(name, value='', path=path, expires=expires, domain=domain)
web.RequestHandler.clear_cookie = clear_cookie
#from model._db import SQLSTORE, mc
from os import getpid
PID = str(getpid()).ljust(7)
#logging.warn("PID:%s", PID)
def _init(self, *args, **kwds):
pass
web.RequestHandler.init = _init
def redirect(self, url, permanent=False):
"""Sends a redirect to the given (optionally relative) URL."""
if self._headers_written:
raise Exception('Cannot redirect after headers have been written')
self.set_status(301 if permanent else 302)
self.set_header('Location', url)
self.finish()
web.RequestHandler.redirect = redirect
def xsrf_form_html(self):
return '<input type="hidden" name="_xsrf" value="%s">'%self.xsrf_token
web.RequestHandler.xsrf_form_html = property(xsrf_form_html)
|
tonghuashuai/42qu-notepad
|
lib/_tornado.py
|
Python
|
mit
| 3,187
|
from setuptools import setup, find_packages
setup(
name="RaspberryRacer",
version="0.1",
description="Raspberry Racer",
author="Diez B. Roggisch",
author_email="deets@web.de",
entry_points= {
'console_scripts' : [
'rracer = rracer.main:main',
]},
install_requires = [
],
zip_safe=True,
packages=find_packages(),
classifiers = [
'Development Status :: 3 - Alpha',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
|
deets/raspberry-racer
|
python/setup.py
|
Python
|
mit
| 559
|
#vim
import sqlite3
from flask import Flask, request, g, redirect, url_for, abort, \
render_template, flash, session
from wtforms import Form, TextField, validators
from model import QueueEntry
import os
from sqlobject import connectionForURI, sqlhub
#configuration
DATABASE = 'bifi.db'
DEBUG = True
SECRET_KEY = "CHANGEME"
app = Flask(__name__)
app.config.from_object(__name__)
db_filename = os.path.abspath(app.config['DATABASE'])
connection_string = 'sqlite:' + db_filename
connection = connectionForURI(connection_string)
sqlhub.processConnection = connection
class SubmitForm(Form):
link = TextField('Torrent- or Magnet-URL')
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
g.db.close()
@app.route('/')
def index():
torrents = QueueEntry.select().reversed()
return render_template('index.html', torrents=torrents)
@app.route('/add', methods=['GET', 'POST'])
def add():
form = SubmitForm(request.form)
if request.method == "POST" and form.validate():
print form.link
entry = QueueEntry(torrent=form.link.data)
flash('Wurde ja Zeit')
return render_template('add.html', form=form)
if __name__ == '__main__':
app.run()
|
silsha/bifibits-web
|
bifibits.py
|
Python
|
mit
| 1,341
|
# -*- coding: utf-8 -*-
"""Test package."""
|
jayvdb/flake8-putty
|
tests/__init__.py
|
Python
|
mit
| 44
|
import cProfile
import unittest
import pstats
if __name__ == '__main__':
suite = unittest.TestLoader().discover('.')
def runtests():
# set verbosity to 2 to see each test
unittest.TextTestRunner(verbosity=1, buffer=True).run(suite)
cProfile.run(
'runtests()', filename='test_cprofile_results.log', sort='cumtime')
p = pstats.Stats('test_cprofile_results.log')
p.strip_dirs().sort_stats('cumulative').print_stats(100)
|
wanqizhu/mtg-python-engine
|
test.py
|
Python
|
mit
| 466
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-16 16:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('campaign', '0005_auto_20160716_1624'),
]
operations = [
migrations.AddField(
model_name='charity',
name='gateway',
field=models.ManyToManyField(through='campaign.GatewayProperty', to='campaign.Gateway'),
),
migrations.AlterField(
model_name='campaign',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='gatewayproperty',
name='value',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
toast38coza/FlashGiving
|
campaign/migrations/0006_auto_20160716_1641.py
|
Python
|
mit
| 895
|
# vim: set fileencoding=utf-8 :
"""python-opscripts setup
"""
# Standard library
from __future__ import absolute_import, division, print_function
import os.path
import re
import site
import sys
import glob
# Third-party
from setuptools import find_packages, setup
setup_path = os.path.dirname(os.path.realpath(__file__))
re_info = re.compile(r"""
# Description docstring
^" " "(?P<description>.+)
^" " ".*
# Version variable
__version__\s*=\s*"(?P<version>[^"]+)".*
# Maintainer variable
__maintainer__\s*=\s*"(?P<maintainer>[^"]+)".*
# Maintainer_email variable
__maintainer_email__\s*=\s*"(?P<maintainer_email>[^"]+)".*
# URL variable
__url__\s*=\s*"(?P<url>[^"]+)".*
# License variable
__license__\s*=\s*"(?P<license>[^"]+)".*
""", re.DOTALL | re.MULTILINE | re.VERBOSE)
with open(os.path.join(setup_path, "opscripts/__init__.py"), "rb") as f:
results = re_info.search(f.read().decode("utf-8"))
metadata = results.groupdict()
with open(os.path.join(setup_path, "README.rst"), "rb") as f:
long_description = f.read().decode("utf-8")
install_requires = ["ConfigArgParse"]
classifiers = ["Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Systems Administration"]
packages = find_packages()
# Install config file appropriately
docs_path = ""
examples_path = "examples"
if hasattr(sys, "real_prefix"):
docs_path = sys.prefix
elif "--user" in sys.argv:
docs_path = site.USER_BASE
examples_path = os.path.join(docs_path, examples_path)
examples = glob.glob(os.path.join(setup_path, "example*.py"))
docs = [os.path.join(setup_path, "README.rst"),
os.path.join(setup_path, "LICENSE")]
setup(name="OpScripts",
version=metadata["version"],
maintainer=metadata["maintainer"],
maintainer_email=metadata["maintainer_email"],
license=metadata["license"],
description=metadata["description"],
long_description=long_description,
url=metadata["url"],
packages=packages,
data_files=[(docs_path, docs), (examples_path, examples)],
keywords="CLI, DevOps, Ops, sysadmin, Systems administration",
classifiers=classifiers,
download_url="https://github.com/ClockworkNet/OpScripts/releases",
zip_safe=True)
|
ClockworkNet/OpScripts
|
setup.py
|
Python
|
mit
| 2,971
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-19 07:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photos', '0002_auto_20160919_0737'),
]
operations = [
migrations.CreateModel(
name='Rover',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nasa_id', models.IntegerField(unique=True)),
('name', models.CharField(max_length=30)),
('landing_date', models.DateField()),
('max_date', models.DateField()),
('max_sol', models.IntegerField()),
('total_photos', models.IntegerField()),
],
),
]
|
WillWeatherford/mars-rover
|
photos/migrations/0003_rover.py
|
Python
|
mit
| 852
|
#!/usr/bin/env python
print "HANDLING IMPORTS...",
import os
import time
import random
import operator
import argparse
import numpy as np
import cv2
from sklearn.utils import shuffle
import itertools
import scipy.io.wavfile as wave
from scipy import interpolate
import python_speech_features as psf
from pydub import AudioSegment
import pickle
import theano
import theano.tensor as T
from lasagne import random as lasagne_random
from lasagne import layers as l
from lasagne import nonlinearities
from lasagne import init
from lasagne import objectives
from lasagne import updates
from lasagne import regularization
try:
from lasagne.layers.dnn import BatchNormDNNLayer as BatchNormLayer
except ImportError:
from lasagne.layers import BatchNormLayer
print "DONE!"
######################## CONFIG #########################
#Fixed random seed
RANDOM_SEED = 1337
RANDOM = np.random.RandomState(RANDOM_SEED)
lasagne_random.set_rng(RANDOM)
#Image params
IM_SIZE = (512, 256) #(width, height)
IM_DIM = 1
#General model params
MODEL_TYPE = 1
MULTI_LABEL = False
NONLINEARITY = nonlinearities.elu #nonlinearities.rectify
INIT_GAIN = 1.0 #1.0 if elu, sqrt(2) if rectify
#Pre-trained model params
MODEL_PATH = 'model/'
PRETRAINED_MODEL = 'birdCLEF_TUCMI_Run1_Model.pkl'
#We need to define the class labels our net has learned
#but we use another file for that
from birdCLEF_class_labels import CLASSES
################### ARGUMENT PARSER #####################
def parse_args():
parser = argparse.ArgumentParser(description='BirdCLEF bird sound classification')
parser.add_argument('--filename', dest='filename', help='path to sample wav file for testing', type=str, default='')
parser.add_argument('--overlap', dest='spec_overlap', help='spectrogram overlap in seconds', type=int, default=0)
parser.add_argument('--results', dest='num_results', help='number of results', type=int, default=5)
parser.add_argument('--confidence', dest='min_confidence', help='confidence threshold', type=float, default=0.01)
args = parser.parse_args()
return args
################ SPECTROGRAM EXTRACTION #################
#Change sample rate if not 44.1 kHz
def changeSampleRate(sig, rate):
duration = sig.shape[0] / rate
time_old = np.linspace(0, duration, sig.shape[0])
time_new = np.linspace(0, duration, int(sig.shape[0] * 44100 / rate))
interpolator = interpolate.interp1d(time_old, sig.T)
new_audio = interpolator(time_new).T
sig = np.round(new_audio).astype(sig.dtype)
return sig, 44100
#Get magnitude spec from signal split
def getMagSpec(sig, rate, winlen, winstep, NFFT):
#get frames
winfunc = lambda x:np.ones((x,))
frames = psf.sigproc.framesig(sig, winlen*rate, winstep*rate, winfunc)
#Magnitude Spectrogram
magspec = np.rot90(psf.sigproc.magspec(frames, NFFT))
return magspec
#Split signal into five-second chunks with overlap of 4 and minimum length of 1 second
#Use these settings for other chunk lengths:
#winlen, winstep, seconds
#0.05, 0.0097, 5s
#0.05, 0.0195, 10s
#0.05, 0.0585, 30s
def getMultiSpec(path, seconds=5, overlap=2, minlen=1, winlen=0.05, winstep=0.0097, NFFT=840):
#open wav file
(rate,sig) = wave.read(path)
#adjust to different sample rates
if rate != 44100:
sig, rate = changeSampleRate(sig, rate)
#split signal with overlap
sig_splits = []
for i in xrange(0, len(sig), int((seconds - overlap) * rate)):
split = sig[i:i + seconds * rate]
if len(split) >= minlen * rate:
sig_splits.append(split)
#is signal too short for segmentation?
if len(sig_splits) == 0:
sig_splits.append(sig)
#calculate spectrogram for every split
for sig in sig_splits:
#preemphasis
sig = psf.sigproc.preemphasis(sig, coeff=0.95)
#get spec
magspec = getMagSpec(sig, rate, winlen, winstep, NFFT)
#get rid of high frequencies
h, w = magspec.shape[:2]
magspec = magspec[h - 256:, :]
#normalize in [0, 1]
magspec -= magspec.min(axis=None)
magspec /= magspec.max(axis=None)
#fix shape to 512x256 pixels without distortion
magspec = magspec[:256, :512]
temp = np.zeros((256, 512), dtype="float32")
temp[:magspec.shape[0], :magspec.shape[1]] = magspec
magspec = temp.copy()
magspec = cv2.resize(magspec, (512, 256))
#DEBUG: show spec
#cv2.imshow('SPEC', magspec)
#cv2.waitKey(-1)
yield magspec
################## BUILDING THE MODEL ###################
def buildModel(mtype=1):
print "BUILDING MODEL TYPE", mtype, "..."
#default settings (Model 1)
filters = 64
first_stride = 2
last_filter_multiplier = 16
#specific model type settings (see working notes for details)
if mtype == 2:
first_stride = 1
elif mtype == 3:
filters = 32
last_filter_multiplier = 8
#input layer
net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))
#conv layers
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
if mtype == 2:
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.MaxPool2DLayer(net, pool_size=2)
print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net)
#dense layers
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
#Classification Layer
if MULTI_LABEL:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
else:
net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))
print "...DONE!"
#model stats
print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
print "MODEL HAS", l.count_params(net), "PARAMS"
return net
NUM_CLASSES = len(CLASSES)
NET = buildModel(MODEL_TYPE)
#################### MODEL LOAD ########################
def loadParams(epoch, filename=None):
print "IMPORTING MODEL PARAMS...",
net_filename = MODEL_PATH + filename
with open(net_filename, 'rb') as f:
params = pickle.load(f)
l.set_all_param_values(NET, params)
print "DONE!"
#load params of trained model
loadParams(-1, filename=PRETRAINED_MODEL)
################# PREDICTION FUNCTION ####################
def getPredictionFuntion(net):
net_output = l.get_output(net, deterministic=True)
print "COMPILING THEANO TEST FUNCTION...",
start = time.time()
test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
print "DONE! (", int(time.time() - start), "s )"
return test_net
TEST_NET = getPredictionFuntion(NET)
################# PREDICTION POOLING ####################
def predictionPooling(p):
#You can test different prediction pooling strategies here
#We only use average pooling
p_pool = np.mean(p, axis=0)
return p_pool
####################### PREDICT #########################
def predict(img):
#transpose image if dim=3
try:
img = np.transpose(img, (2, 0, 1))
except:
pass
#reshape image
img = img.reshape(-1, IM_DIM, IM_SIZE[1], IM_SIZE[0])
#calling the test function returns the net output
prediction = TEST_NET(img)[0]
return prediction
####################### TESTING #########################
def testFile(path, spec_overlap=4, num_results=5, confidence_threshold=0.01):
#time
start = time.time()
#extract spectrograms from wav-file and process them
predictions = []
spec_cnt = 0
for spec in getMultiSpec(path, overlap=spec_overlap, minlen=1):
#make prediction
p = predict(spec)
spec_cnt += 1
#stack predictions
if len(predictions):
predictions = np.vstack([predictions, p])
else:
predictions = p
#prediction pooling
p_pool = predictionPooling(predictions)
#get class labels for predictions
p_labels = {}
for i in range(p_pool.shape[0]):
if p_pool[i] >= confidence_threshold:
p_labels[CLASSES[i]] = p_pool[i]
#sort by confidence and limit results (None returns all results)
p_sorted = sorted(p_labels.items(), key=operator.itemgetter(1), reverse=True)[:num_results]
#take time again
dur = time.time() - start
return p_sorted, spec_cnt, dur
#################### EXAMPLE USAGE ######################
if __name__ == "__main__":
#adjust config
args = parse_args()
#do testing
print 'TESTING:', args.filename
pred, cnt, dur = testFile(args.filename, args.spec_overlap, args.num_results, args.min_confidence)
print 'TOP PREDICTION(S):'
for p in pred:
print '\t', p[0], int(p[1] * 100), '%'
print 'PREDICTION FOR', cnt, 'SPECS TOOK', int(dur * 1000), 'ms (', int(dur / cnt * 1000) , 'ms/spec', ')'
|
kahst/BirdCLEF2017
|
birdCLEF_test.py
|
Python
|
mit
| 10,275
|
from exchanges import helpers
from exchanges import kraken
from decimal import Decimal
### Kraken opportunities
#### ARBITRAGE OPPORTUNITY 1
def opportunity_1():
sellLTCbuyEUR = kraken.get_current_bid_LTCEUR()
sellEURbuyXBT = kraken.get_current_ask_XBTEUR()
sellXBTbuyLTC = kraken.get_current_ask_XBTLTC()
opport = 1-((sellLTCbuyEUR/sellEURbuyBTX)*sellXBTbuyLTC)
return Decimal(opport)
def opportunity_2():
sellEURbuyLTC = kraken.get_current_ask_LTCEUR()
sellLTCbuyXBT = kraken.get_current_ask_XBTLTC()
sellXBTbuyEUR = kraken.get_current_bid_XBTEUR()
opport = 1-(((1/sellEURbuyLTC)/sellLTCbuyXBT)*sellXBTbuyEUR)
return Decimal(opport)
|
Humantrashcan/prices
|
exchanges/opportunity_kraken.py
|
Python
|
mit
| 658
|
import six
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class EnumField(serializers.ChoiceField):
default_error_messages = {"invalid_choice": _('"{input}" is not a valid choice.')}
def __init__(self, enum, **kwargs):
self.enum = enum
choices = (
(self.get_choice_value(enum_value), enum_value.label)
for _, enum_value in enum.choices()
)
super(EnumField, self).__init__(choices, **kwargs)
def get_choice_value(self, enum_value):
return enum_value.value
def to_internal_value(self, data):
if isinstance(data, six.string_types) and data.isdigit():
data = int(data)
try:
value = self.enum.get(data).value
except AttributeError: # .get() returned None
if not self.required:
raise serializers.SkipField()
self.fail("invalid_choice", input=data)
return value
def to_representation(self, value):
enum_value = self.enum.get(value)
if enum_value is not None:
return self.get_choice_value(enum_value)
class NamedEnumField(EnumField):
def get_choice_value(self, enum_value):
return enum_value.name
class Meta:
swagger_schema_fields = {"type": "string"}
|
5monkeys/django-enumfield
|
django_enumfield/contrib/drf.py
|
Python
|
mit
| 1,336
|
import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="tickformat",
parent_name="scattercarpet.marker.colorbar",
**kwargs
):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scattercarpet/marker/colorbar/_tickformat.py
|
Python
|
mit
| 516
|
#! /usr/bin/python
from TOSSIM import *
from sets import Set
import sys
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] filename",
version="%prog 1.0")
parser.add_option("-g", "--gainfile",
action="store",
dest="gainfile",
default="topology.txt",
help="file containing gains between simulation nodes")
parser.add_option("-n", "--noisefile",
action="store",
dest="noise",
default="Noise/meyer-heavy-short.txt",
help="file containing gains between simulation nodes")
(options, args) = parser.parse_args()
options_dict = vars(options)
print options_dict['gainfile']
print "Simulation start"
from tinyos.tossim.TossimApp import *
n = NescApp()
vars = n.variables.variables()
t = Tossim(vars)
r = t.radio()
mac = t.mac()
# Topology configuration
gainfile = open(options_dict['gainfile'], "r")
nodes = Set([])
print "Simulation Topology:"
lines = gainfile.readlines()
for line in lines:
splitlines = line.split()
if (len(splitlines) > 0):
if (splitlines[0] == "gain"):
r.add(int(splitlines[1]), int(splitlines[2]), float(splitlines[3].replace(",",".")))
print "Source:", splitlines[1], "Destination:", splitlines[2], "Gain:", splitlines[3], "dBm";
nodes.add(int(splitlines[1]))
nodes.add(int(splitlines[2]))
print "Number of nodes: " + str(len(nodes)) + ", nodes' ids:", nodes
# Allocating debug outputs
energy_output = open("Simulation/Energy.txt", "w")
packet_output = open("Simulation/Packet.txt", "w")
t.addChannel("PacketState", packet_output)
t.addChannel("ENERGY_HANDLER", energy_output)
# Opening simulation result file
resultfile = open("Simulation/Result.txt", "w")
# Default noise trace
noise = open(options_dict['noise'], "r")
lines = noise.readlines()
for line in lines:
stripline = line.strip()
if (stripline != ""):
val = int(stripline)
for node in nodes:
t.getNode(node).addNoiseTraceReading(val)
for node in nodes:
print "Creating noise model for node " + str(node) + "."
t.getNode(node).createNoiseModel()
# Boot time spread
boot_time = 0
for node in nodes:
t.getNode(node).bootAtTime(0 + boot_time);
boot_time += 50000000 # equal to 5 ms
# This runs the network for 50 seconds:
time = t.time()
while (time + t.ticksPerSecond() * 50 > t.time()):
t.runNextEvent()
resultfile.write("%d\n" % (t.time()))
for node in nodes:
m = t.getNode(node)
v = m.getVariable("MacPerformanceC.received_packets")
received_packets = v.getData()
c = m.getVariable("MacPerformanceC.counter")
sent_packets = c.getData()
print "The node id", node, "has sent", sent_packets, "and received", received_packets, "in total.";
resultfile.write("%d,%d,%d\n" % (node, sent_packets, received_packets))
print "End of simulation."
|
w1lq/MacPerformance
|
Simulation.py
|
Python
|
mit
| 2,721
|
"""
XKCD plot generator
-------------------
Author: Jake Vanderplas
This is a script that will take any matplotlib line diagram, and convert it
to an XKCD-style plot. It will work for plots with line & text elements,
including axes labels and titles (but not axes tick labels).
The idea for this comes from work by Damon McDougall
http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg25499.html
from:
http://nbviewer.ipython.org/url/jakevdp.github.com/downloads/notebooks/XKCD_plots.ipynb
"""
import numpy as np
import pylab as pl
from scipy import interpolate, signal
import matplotlib.font_manager as fm
# We need a special font for the code below. It can be downloaded this way:
import os
import urllib2
#import urllib.request as urllib2
if not os.path.exists('Humor-Sans.ttf'):
fhandle = urllib2.urlopen('http://antiyawn.com/uploads/Humor-Sans-1.0.ttf')
open('Humor-Sans.ttf', 'wb').write(fhandle.read())
def xkcd_line(x, y, xlim=None, ylim=None,
mag=1.0, f1=30, f2=0.05, f3=15):
"""
Mimic a hand-drawn line from (x, y) data
Parameters
----------
x, y : array_like
arrays to be modified
xlim, ylim : data range
the assumed plot range for the modification. If not specified,
they will be guessed from the data
mag : float
magnitude of distortions
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
Returns
-------
x, y : ndarrays
The modified lines
"""
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None:
xlim = (x.min(), x.max())
if ylim is None:
ylim = (y.min(), y.max())
if xlim[1] == xlim[0]:
xlim = ylim
if ylim[1] == ylim[0]:
ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
k = min(3, len(x) - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we'll perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
def XKCDify(ax, mag=1.0,
f1=50, f2=0.01, f3=15,
bgcolor='w',
xaxis_loc=None,
yaxis_loc=None,
xaxis_arrow='+',
yaxis_arrow='+',
ax_extend=0.1,
expand_axes=False):
"""Make axis look hand-drawn
This adjusts all lines, text, legends, and axes in the figure to look
like xkcd plots. Other plot elements are not modified.
Parameters
----------
ax : Axes instance
the axes to be modified.
mag : float
the magnitude of the distortion
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
xaxis_loc, yaxis_log : float
The locations to draw the x and y axes. If not specified, they
will be drawn from the bottom left of the plot
xaxis_arrow, yaxis_arrow : str
where to draw arrows on the x/y axes. Options are '+', '-', '+-', or ''
ax_extend : float
How far (fractionally) to extend the drawn axes beyond the original
axes limits
expand_axes : bool
if True, then expand axes to fill the figure (useful if there is only
a single axes in the figure)
"""
# Get axes aspect
ext = ax.get_window_extent().extents
aspect = (ext[3] - ext[1]) / (ext[2] - ext[0])
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xspan = xlim[1] - xlim[0]
yspan = ylim[1] - xlim[0]
xax_lim = (xlim[0] - ax_extend * xspan,
xlim[1] + ax_extend * xspan)
yax_lim = (ylim[0] - ax_extend * yspan,
ylim[1] + ax_extend * yspan)
if xaxis_loc is None:
xaxis_loc = ylim[0]
if yaxis_loc is None:
yaxis_loc = xlim[0]
# Draw axes
xaxis = pl.Line2D([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc],
linestyle='-', color='k')
yaxis = pl.Line2D([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]],
linestyle='-', color='k')
# Label axes3, 0.5, 'hello', fontsize=14)
ax.text(xax_lim[1], xaxis_loc - 0.02 * yspan, ax.get_xlabel(),
fontsize=14, ha='right', va='top', rotation=12)
ax.text(yaxis_loc - 0.02 * xspan, yax_lim[1], ax.get_ylabel(),
fontsize=14, ha='right', va='top', rotation=78)
ax.set_xlabel('')
ax.set_ylabel('')
# Add title
ax.text(0.5 * (xax_lim[1] + xax_lim[0]), yax_lim[1],
ax.get_title(),
ha='center', va='bottom', fontsize=16)
ax.set_title('')
Nlines = len(ax.lines)
lines = [xaxis, yaxis] + [ax.lines.pop(0) for i in range(Nlines)]
for line in lines:
x, y = line.get_data()
x_int, y_int = xkcd_line(x, y, xlim, ylim,
mag, f1, f2, f3)
# create foreground and background line
lw = line.get_linewidth()
line.set_linewidth(2 * lw)
line.set_data(x_int, y_int)
# don't add background line for axes
if (line is not xaxis) and (line is not yaxis):
line_bg = pl.Line2D(x_int, y_int, color=bgcolor,
linewidth=8 * lw)
ax.add_line(line_bg)
ax.add_line(line)
# Draw arrow-heads at the end of axes lines
arr1 = 0.03 * np.array([-1, 0, -1])
arr2 = 0.02 * np.array([-1, 0, 1])
arr1[::2] += np.random.normal(0, 0.005, 2)
arr2[::2] += np.random.normal(0, 0.005, 2)
x, y = xaxis.get_data()
if '+' in str(xaxis_arrow):
ax.plot(x[-1] + arr1 * xspan * aspect,
y[-1] + arr2 * yspan,
color='k', lw=2)
if '-' in str(xaxis_arrow):
ax.plot(x[0] - arr1 * xspan * aspect,
y[0] - arr2 * yspan,
color='k', lw=2)
x, y = yaxis.get_data()
if '+' in str(yaxis_arrow):
ax.plot(x[-1] + arr2 * xspan * aspect,
y[-1] + arr1 * yspan,
color='k', lw=2)
if '-' in str(yaxis_arrow):
ax.plot(x[0] - arr2 * xspan * aspect,
y[0] - arr1 * yspan,
color='k', lw=2)
# Change all the fonts to humor-sans.
prop = fm.FontProperties(fname='Humor-Sans.ttf', size=16)
for text in ax.texts:
text.set_fontproperties(prop)
# modify legend
leg = ax.get_legend()
if leg is not None:
leg.set_frame_on(False)
for child in leg.get_children():
if isinstance(child, pl.Line2D):
x, y = child.get_data()
child.set_data(xkcd_line(x, y, mag=10, f1=100, f2=0.001))
child.set_linewidth(2 * child.get_linewidth())
if isinstance(child, pl.Text):
child.set_fontproperties(prop)
# Set the axis limits
ax.set_xlim(xax_lim[0] - 0.1 * xspan,
xax_lim[1] + 0.1 * xspan)
ax.set_ylim(yax_lim[0] - 0.1 * yspan,
yax_lim[1] + 0.1 * yspan)
# adjust the axes
ax.set_xticks([])
ax.set_yticks([])
if expand_axes:
ax.figure.set_facecolor(bgcolor)
ax.set_axis_off()
ax.set_position([0, 0, 1, 1])
return ax
|
OpenBookProjects/ipynb
|
XKCD-style/xkcdplot.py
|
Python
|
mit
| 8,352
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.widget import Widget
from kivy.clock import Clock
import os
import dynamixel
import sys
import subprocess
import optparse
import yaml
import cinematica
import itertools
import collections
import time
import serial as arduinoSerial
import copy
import math
from robot import *
class Inverso(MastermindInvers):
def empezar(self):
"""
Complementa la funcion empezar del juego inverso
Se piensa un codigo aleatorio el cual el usuario ha de adivinar
"""
self.codigo = [randint(0,4) for i in range(5)]
print 'ready'
print self.codigo
def continuar(self, guess):
"""
Complementa la funcion continuar el siguiente turno del juego inverso
Calcula la respuesta del robot ante el codigo propuesta por el usuario
"""
self.guess = copy.copy(guess)
self.codigoPrueba = copy.copy(self.codigo)
self.rojas = 0
self.blancas = 0
self.buscarRojas()
self.buscarBlancas()
return self.rojas, self.blancas
class Delta(MastermindDirecte, TabbedPanel):
text_input = ObjectProperty(None)
colors = ['Negro', 'Azul', 'Verde', 'Blanco', 'Rojo']
#red = (255, 0, 0, 1) azul = (0, 100, 250, 1) amari = (255, 255, 0, 1) white = (255, 255, 255, 1) negro = (0, 0, 0, 1)
colorsrgba = [(0, 0, 0, 1),(0, 100, 250, 1),(255, 255, 0, 1),(255, 255, 255, 1),(255, 0, 0, 1)]
robot=Robot()
robot.buscarServos()
inv = Inverso()
def jugar_p_directo(self):
"""
Empieza el juego directo con un primer movimiento
Si el programa tiene una configuracion cargada, primero limpia el tablero y luego empieza
Dentro del while:
Continua un turno del juego, requiere la respuesta del usuario por pulsadores
Siempre guarda la ultima configuracion del robot para siguientes inicios
"""
self.linea = 1
choices, holes = 5, 5
self.pool = self.generate_initial_pool(choices, holes)#genera una lista con todas las posibilidades sin repetir
self.guess = [0,0,1,1,1]
self.ids.textprueba.text = "Try this:"
huecoX = 'hueco'+str(self.linea)
for y in range(5):
huecoXY=huecoX+str(y)
self.ids[huecoXY].background_color = self.colorsrgba[self.guess[y]]
Clock.schedule_once(self.esperar, 1)
checkbox = self.ids.checkbox
checkbox.bind(active=self.on_checkbox_active)
print checkbox.active
try:
######################################## cargamos la ultima configuracion
ultimoCodigo = open('ultimoCodigo', 'r')
ultimo = ultimoCodigo.readline().split('|')
codigo = [int(i) for i in ultimo[:-1]]
huecos = ultimo[5].split(',')
huecos.reverse()
matrizOcupados = [int(i) for i in huecos]
for i in range(5):
for e in range(5):
p=matrizOcupados.pop()
self.robot.listaHuecosColores[i][e][3]=p
for i in range(5):
p=matrizOcupados.pop()
self.robot.listaHuecosRobot[i][3]=p
ultimoCodigo.close()
#########################################aqui ha de venir el movimiento de bolitas!!!!
self.robot.quitar_bolitas(codigo, self.guess)
self.robot.poner_bolitas(self.guess, codigo)
self.robot.mover_robot([0, 0, -24])
except:
print 'No hay archivo ultimoCodigo = No hay bolitas puestas'
#########################################aqui ha de venir el movimiento de bolitas!!!!
ultimo = [None, None, None, None, None]
self.robot.poner_bolitas(self.guess, ultimo)
self.robot.mover_robot([0, 0, -24])
######################################### Guardamos la ultima combinacion y la matriz de huecos
ultimoCodigo = open('ultimoCodigo', 'w')
s=''
for listaHuecosColor in self.robot.listaHuecosColores:
for listaHuecoColor in listaHuecosColor:
s+='{0},'.format(listaHuecoColor[3])
for listaHuecoRobot in self.robot.listaHuecosRobot:
s+='{0},'.format(listaHuecoRobot[3])
ultimoCodigo.write('{0}|{1}|{2}|{3}|{4}|{5}'.format(self.guess[0],self.guess[1],self.guess[2],self.guess[3],self.guess[4],s[:-1]))
ultimoCodigo.close()
"""
continuar = Clock.create_trigger(self.continuar_p_directo)
continuar() #1 intento
continuar = Clock.create_trigger(self.continuar_p_directo)
continuar() #2 intento
continuar = Clock.create_trigger(self.continuar_p_directo)
continuar() #3 intento
continuar = Clock.create_trigger(self.continuar_p_directo)
continuar() #4 intento
continuar = Clock.create_trigger(self.continuar_p_directo)
continuar() #5 intento
continuar = Clock.create_trigger(self.continuar_p_directo)
continuar() #6 intento
"""
"""
while True:
self.continuar_p_directo()
"""
def continuar_p_directo(self):
######################################### respuesta del arduino
self.pulsadores = Arduino()
respuesta = self.pulsadores.codigo2()
correct = respuesta[0]
close = respuesta[1]
########################################## termina respuesta
if self.linea>7:
self.robot.perder()
return None
self.linea+=1
"""arduino.write('2')
self.respuesta=''
Clock.schedule_interval(self.respuesta_d_arduino, 1)# respuesta del arduino
respuesta = self.respuesta_d
correct = respuesta[0]
close = respuesta[1]
"""
feedback = self.Feedback(correct, close)
if self.linea >1:
self.ids['textrojo'+str(self.linea-1)].text = str(correct)
self.ids['textblanco'+str(self.linea-1)].text = str(close)
if feedback.correct == 5:
print "\nHe ganado!!"
self.ids.textprueba.text = "He ganado! (juas) (juas)"
self.ids.jugar_p_directo.text='Reiniciar (pulsadores)'
self.ids.empezar_i_directo.text='Reiniciar (interfaz)'
self.robot.celebrar()
return None
#Clock.schedule_once(self.esperar, 1)
try:
initime = time.time()
self.previousPool = copy.copy(self.pool)
self.pool = list(self.filter_pool(feedback)) #renueva la lista de posibles combinaciones restantes en base a la interaccion del usuario
print "{0} posibles opciones restantes. Pensando...\n".format(len(self.pool))
self.previousGuess = copy.copy(self.guess)
self.guess = list(self.make_guess(feedback, initime))
huecoX = 'hueco'+str(self.linea)
for y in range(5):
huecoXY=huecoX+str(y)
self.ids[huecoXY].background_color = self.colorsrgba[self.guess[y]]
#########################################aqui ha de venir el movimiento de bolitas!!!!
print self.previousGuess
print self.guess
self.robot.quitar_bolitas(self.previousGuess, self.guess)
self.robot.poner_bolitas(self.guess, self.previousGuess)
self.robot.mover_robot([0, 0, -24])
######################################### Guardamos la ultima combinacion y la matriz de huecos
ultimoCodigo = open('ultimoCodigo', 'w')
s=''
for listaHuecosColor in self.robot.listaHuecosColores:
for listaHuecoColor in listaHuecosColor:
s+='{0},'.format(listaHuecoColor[3])
for listaHuecoRobot in self.robot.listaHuecosRobot:
s+='{0},'.format(listaHuecoRobot[3])
ultimoCodigo.write('{0}|{1}|{2}|{3}|{4}|{5}'.format(self.guess[0],self.guess[1],self.guess[2],self.guess[3],self.guess[4],s[:-1]))
ultimoCodigo.close()
except:
self.ids.textprueba.text = "Te has equivocado. Cambia tu respuesta y vuelve a intentarlo. Si persiste, reinicia."
self.ids.jugar_p_directo.text = 'Reiniciar (pulsadores)'
self.ids.empezar_i_directo.text='Reiniciar (interfaz)'
self.ids.continuar_i_directo.text ='No tocar (interfaz)'
self.pool = copy.copy(self.previousPool)
def empezar_i_directo(self):
self.linea = 1
choices, holes = 5, 5
self.pool = self.generate_initial_pool(choices, holes)#genera una lista con todas las posibilidades sin repetir
self.guess = []
for i in range(holes):
self.guess =[0,0,1,1,1]
print "Try this: {0}".format(self.codeToColor(self.guess))
self.ids.textprueba.text = "Try this:"#genera una combinacion cualquiera primera
huecoX = 'hueco'+str(self.linea)
for y in range(5):
huecoXY=huecoX+str(y)
self.ids[huecoXY].background_color = self.colorsrgba[self.guess[y]]
Clock.schedule_once(self.esperar, 1)
#checkbox = self.ids.checkbox
#checkbox.bind(active=self.on_checkbox_active)
try:
######################################## cargamos la ultima configuracion
ultimoCodigo = open('ultimoCodigo', 'r')
ultimo = ultimoCodigo.readline().split('|')
codigo = [int(i) for i in ultimo[:-1]]
huecos = ultimo[5].split(',')
huecos.reverse()
matrizOcupados = [int(i) for i in huecos]
for i in range(5):
for e in range(5):
p=matrizOcupados.pop()
self.robot.listaHuecosColores[i][e][3]=p
for i in range(5):
p=matrizOcupados.pop()
self.robot.listaHuecosRobot[i][3]=p
ultimoCodigo.close()
#########################################aqui ha de venir el movimiento de bolitas!!!!
self.robot.quitar_bolitas(codigo, self.guess)
self.robot.poner_bolitas(self.guess, codigo)
self.robot.mover_robot([0, 0, -24])
except:
print 'No hay archivo ultimoCodigo = No hay bolitas puestas'
#########################################aqui ha de venir el movimiento de bolitas!!!!
ultimo = [None, None, None, None, None]
self.robot.poner_bolitas(self.guess, ultimo)
self.robot.mover_robot([0, 0, -24])
######################################### Guardamos la ultima combinacion y la matriz de huecos
ultimoCodigo = open('ultimoCodigo', 'w')
s=''
for listaHuecosColor in self.robot.listaHuecosColores:
for listaHuecoColor in listaHuecosColor:
s+='{0},'.format(listaHuecoColor[3])
for listaHuecoRobot in self.robot.listaHuecosRobot:
s+='{0},'.format(listaHuecoRobot[3])
ultimoCodigo.write('{0}|{1}|{2}|{3}|{4}|{5}'.format(self.guess[0],self.guess[1],self.guess[2],self.guess[3],self.guess[4],s[:-1]))
ultimoCodigo.close()
def continuar_i_directo(self):
"""
Continua un turno del juego, requiere la respuesta del usuario por texto de interfaz
Siempre guarda la ultima configuracion del robot para siguientes inicios
"""
if self.linea>7:
self.robot.perder()
return None
self.linea+=1
correct = int(self.ids.reds.text)
close = int(self.ids.whites.text)
feedback = self.Feedback(correct, close)
if self.linea >1:
self.ids['textrojo'+str(self.linea-1)].text = str(correct)
self.ids['textblanco'+str(self.linea-1)].text = str(close)
if feedback.correct == 5:
print "\nHe ganado!!"
self.ids.textprueba.text = "He ganado! (juas) (juas)"
self.ids.jugar_p_directo.text='Reiniciar (pulsadores)'
self.ids.empezar_i_directo.text='Reiniciar (interfaz)'
self.robot.celebrar()
return None
Clock.schedule_once(self.esperar, 1)
try:
initime = time.time()
self.previousPool = copy.copy(self.pool)
self.pool = list(self.filter_pool(feedback)) #renueva la lista de posibles combinaciones restantes en base a la interaccion del usuario
print "{0} posibles opciones restantes. Pensando...\n".format(len(self.pool))
self.previousGuess = copy.copy(self.guess)
self.guess = list(self.make_guess(feedback, initime))
huecoX = 'hueco'+str(self.linea)
for y in range(5):
huecoXY=huecoX+str(y)
self.ids[huecoXY].background_color = self.colorsrgba[self.guess[y]]
#########################################aqui ha de venir el movimiento de bolitas!!!!
print self.previousGuess
print self.guess
self.robot.quitar_bolitas(self.previousGuess, self.guess)
self.robot.poner_bolitas(self.guess, self.previousGuess)
self.robot.mover_robot([0, 0, -24])
######################################### Guardamos la ultima combinacion y la matriz de huecos
ultimoCodigo = open('ultimoCodigo', 'w')
s=''
for listaHuecosColor in self.robot.listaHuecosColores:
for listaHuecoColor in listaHuecosColor:
s+='{0},'.format(listaHuecoColor[3])
for listaHuecoRobot in self.robot.listaHuecosRobot:
s+='{0},'.format(listaHuecoRobot[3])
ultimoCodigo.write('{0}|{1}|{2}|{3}|{4}|{5}'.format(self.guess[0],self.guess[1],self.guess[2],self.guess[3],self.guess[4],s[:-1]))
ultimoCodigo.close()
except:
self.ids.textprueba.text = "Te has equivocado. Cambia tu respuesta y vuelve a intentarlo. Si persiste, reinicia."
self.ids.jugar_p_directo.text = 'Reiniciar (pulsadores)'
self.ids.empezar_i_directo.text='Reiniciar (interfaz)'
self.ids.continuar_i_directo.text ='Reintentar (interfaz)'
self.pool = copy.copy(self.previousPool)
def jugar_p_inverso(self):
"""
Empieza el juego inverso con un primer movimiento
Requiere la respuesta del usuario por pulsadores
Dentro del while:
Continua un turno del juego, requiere la respuesta del usuario por pulsadores
"""
self.linea2 = 0
self.inv.empezar()
self.linea2+=1
"""
arduino.write('3')
Clock.schedule_interval(self.respuesta_i_arduino, 1)# respuesta del arduino
guess2 = self.respuesta_i_arduino
print guess2
"""
######################################### respuesta del arduino
print 'hola'
self.pulsadores = Arduino()
guess2 = self.pulsadores.codigo5()
print guess2
rojas2, blancas2 = self.inv.continuar(guess2)
print rojas2, blancas2
self.nuevas = [None, None, None, None, None]
for i in range(rojas2):
self.nuevas[i]=4
for i in range(blancas2):
self.nuevas[i+rojas2]=3
self.viejas = [None, None, None, None, None]
huecoX = '2hueco'+str(self.linea2)
for y in range(5):
huecoXY=huecoX+str(y)
print guess2[y]
self.ids[huecoXY].background_color = self.colorsrgba[guess2[y]]
Clock.schedule_once(self.esperar, 1)
#checkbox = self.ids.checkbox
#checkbox.bind(active=self.on_checkbox_active)
try:
######################################## cargamos la ultima configuracion
ultimoCodigo = open('ultimoCodigo', 'r')
ultimo = ultimoCodigo.readline().split('|')
codigo = [int(i) for i in ultimo[:-1]]
huecos = ultimo[5].split(',')
huecos.reverse()
matrizOcupados = [int(i) for i in huecos]
for i in range(5):
for e in range(5):
p=matrizOcupados.pop()
self.robot.listaHuecosColores[i][e][3]=p
for i in range(5):
p=matrizOcupados.pop()
self.robot.listaHuecosRobot[i][3]=p
ultimoCodigo.close()
#########################################aqui ha de venir el movimiento de bolitas!!!!
self.robot.quitar_bolitas(codigo, self.nuevas)
self.robot.poner_bolitas(self.nuevas, codigo)
self.robot.mover_robot([0, 0, -24])
except:
print 'No hay archivo ultimoCodigo = No hay bolitas puestas'
#########################################aqui ha de venir el movimiento de bolitas!!!!
ultimo = [None, None, None, None, None]
self.robot.poner_bolitas(self.nuevas, ultimo)
self.robot.mover_robot([0, 0, -24])
self.ids['2textrojo'+str(self.linea2)].text = str(rojas2)
self.ids['2textblanco'+str(self.linea2)].text = str(blancas2)
if rojas2 == 5:
self.ids.textprueba2.text = "Has ganado! (jo) (jo)"
self.ids.jugar_p_inverso.text='Reiniciar (pulsadores)'
self.ids.empezar_i_inverso.text='Reiniciar (interfaz)'
self.robot.perder()
return None
"""
continuar = Clock.create_trigger(self.continuar_p_inverso)
continuar() #2 intento
continuar() #3 intento
continuar() #4 intento
continuar() #5 intento
continuar() #6 intento
continuar() #7 intento
"""
"""
while True:
self.continuar_p_inverso()
"""
def continuar_p_inverso(self):
if self.linea2>7:
self.robot.celebrar()
return None
self.linea2+=1
"""
arduino.write('3')
Clock.schedule_interval(self.respuesta_i_arduino, 1)# respuesta del arduino
guess2 = self.respuesta_i_arduino
print guess2
"""
######################################### respuesta del arduino
print 'hola'
self.pulsadores = Arduino()
guess2 = self.pulsadores.codigo5()
print guess2
rojas2, blancas2 = self.inv.continuar(guess2)
print rojas2, blancas2
self.viejas = copy.copy(self.nuevas)
self.nuevas = [None, None, None, None, None]
for i in range(rojas2):
self.nuevas[i]=4
for i in range(blancas2):
self.nuevas[i+rojas2]=3
huecoX = '2hueco'+str(self.linea2)
for y in range(5):
huecoXY=huecoX+str(y)
print guess2[y]
self.ids[huecoXY].background_color = self.colorsrgba[guess2[y]]
Clock.schedule_once(self.esperar, 1)
#########################################aqui ha de venir el movimiento de bolitas!!!!
print self.viejas
print self.nuevas
self.robot.quitar_bolitas(self.viejas, self.nuevas)
self.robot.poner_bolitas(self.nuevas, self.viejas)
self.robot.mover_robot([0, 0, -24])
######################################### Guardamos la ultima combinacion y la matriz de huecos
ultimoCodigo = open('ultimoCodigo', 'w')
s=''
for listaHuecosColor in self.robot.listaHuecosColores:
for listaHuecoColor in listaHuecosColor:
s+='{0},'.format(listaHuecoColor[3])
for listaHuecoRobot in self.robot.listaHuecosRobot:
s+='{0},'.format(listaHuecoRobot[3])
ultimoCodigo.write('{0}|{1}|{2}|{3}|{4}|{5}'.format(self.nuevas[0],self.nuevas[1],self.nuevas[2],self.nuevas[3],self.nuevas[4],s[:-1]))
ultimoCodigo.close()
self.ids['2textrojo'+str(self.linea2)].text = str(rojas2)
self.ids['2textblanco'+str(self.linea2)].text = str(blancas2)
if rojas2 == 5:
self.ids.textprueba2.text = "Has ganado! (jo) (jo)"
self.ids.jugar_p_inverso.text='Reiniciar (pulsadores)'
self.ids.empezar_i_inverso.text='Reiniciar (interfaz)'
self.robot.perder()
return None
def empezar_i_inverso(self):
"""
Empieza el juego inverso con un primer movimiento
Requiere la respuesta del usuario por por texto de interfaz
"""
self.linea2 = 0
self.inv.empezar()
self.linea2+=1
guess2 = self.ids.codigo.text
guess2 = guess2.split()
guess2 = [int(i) for i in guess2]
print guess2
rojas2, blancas2 = self.inv.continuar(guess2)
print rojas2, blancas2
self.nuevas = [None, None, None, None, None]
for i in range(rojas2):
self.nuevas[i]=4
for i in range(blancas2):
self.nuevas[i+rojas2]=3
self.viejas = [None, None, None, None, None]
huecoX = '2hueco'+str(self.linea2)
for y in range(5):
huecoXY=huecoX+str(y)
print guess2[y]
self.ids[huecoXY].background_color = self.colorsrgba[guess2[y]]
Clock.schedule_once(self.esperar, 1)
#checkbox = self.ids.checkbox
#checkbox.bind(active=self.on_checkbox_active)
try:
######################################## cargamos la ultima configuracion
ultimoCodigo = open('ultimoCodigo', 'r')
ultimo = ultimoCodigo.readline().split('|')
codigo = [int(i) for i in ultimo[:-1]]
huecos = ultimo[5].split(',')
huecos.reverse()
matrizOcupados = [int(i) for i in huecos]
for i in range(5):
for e in range(5):
p=matrizOcupados.pop()
self.robot.listaHuecosColores[i][e][3]=p
for i in range(5):
p=matrizOcupados.pop()
self.robot.listaHuecosRobot[i][3]=p
ultimoCodigo.close()
#########################################aqui ha de venir el movimiento de bolitas!!!!
self.robot.quitar_bolitas(codigo, self.nuevas)
self.robot.poner_bolitas(self.nuevas, codigo)
self.robot.mover_robot([0, 0, -24])
except:
print 'No hay archivo ultimoCodigo = No hay bolitas puestas'
#########################################aqui ha de venir el movimiento de bolitas!!!!
ultimo = [None, None, None, None, None]
self.robot.poner_bolitas(self.nuevas, ultimo)
self.robot.mover_robot([0, 0, -24])
self.ids['2textrojo'+str(self.linea2)].text = str(rojas2)
self.ids['2textblanco'+str(self.linea2)].text = str(blancas2)
if rojas2 == 5:
self.ids.textprueba2.text = "Has ganado! (jo) (jo)"
self.ids.jugar_p_inverso.text='Reiniciar (pulsadores)'
self.ids.empezar_i_inverso.text='Reiniciar (interfaz)'
self.robot.perder()
return None
def continuar_i_inverso(self):
"""
Continua un turno del juego, requiere la respuesta del usuario por texto de interfaz
"""
if self.linea2>7:
self.robot.celebrar()
return None
self.linea2+=1
guess2 = self.ids.codigo.text
guess2 = guess2.split()
guess2 = [int(i) for i in guess2]
print guess2
rojas2, blancas2 = self.inv.continuar(guess2)
print rojas2, blancas2
self.viejas = copy.copy(self.nuevas)
self.nuevas = [None, None, None, None, None]
for i in range(rojas2):
self.nuevas[i]=4
for i in range(blancas2):
self.nuevas[i+rojas2]=3
huecoX = '2hueco'+str(self.linea2)
for y in range(5):
huecoXY=huecoX+str(y)
print guess2[y]
self.ids[huecoXY].background_color = self.colorsrgba[guess2[y]]
Clock.schedule_once(self.esperar, 1)
#########################################aqui ha de venir el movimiento de bolitas!!!!
print self.viejas
print self.nuevas
self.robot.quitar_bolitas(self.viejas, self.nuevas)
self.robot.poner_bolitas(self.nuevas, self.viejas)
self.robot.mover_robot([0, 0, -24])
######################################### Guardamos la ultima combinacion y la matriz de huecos
ultimoCodigo = open('ultimoCodigo', 'w')
s=''
for listaHuecosColor in self.robot.listaHuecosColores:
for listaHuecoColor in listaHuecosColor:
s+='{0},'.format(listaHuecoColor[3])
for listaHuecoRobot in self.robot.listaHuecosRobot:
s+='{0},'.format(listaHuecoRobot[3])
ultimoCodigo.write('{0}|{1}|{2}|{3}|{4}|{5}'.format(self.nuevas[0],self.nuevas[1],self.nuevas[2],self.nuevas[3],self.nuevas[4],s[:-1]))
ultimoCodigo.close()
self.ids['2textrojo'+str(self.linea2)].text = str(rojas2)
self.ids['2textblanco'+str(self.linea2)].text = str(blancas2)
if rojas2 == 5:
self.ids.textprueba2.text = "Has ganado! (jo) (jo)"
self.ids.jugar_p_inverso.text='Reiniciar (pulsadores)'
self.ids.empezar_i_inverso.text='Reiniciar (interfaz)'
self.robot.perder()
return None
def respuesta_d_arduino(self, dt):
self.respuesta += arduino.read()
print respuesta
if len(respuesta)==4:
respuesta=[int(i) for i in self.respuesta.split('|')]
self.respuesta_d = respuesta
return False
def respuesta_i_arduino(self, dt):
respuesta = arduino.readline()
if len(respuesta)==11:
respuesta=[int(i) for i in respuesta[1:-1].split('|')]
self.respuesta_i = respuesta
return False
def esperar(self, dt):
return None
def on_checkbox_active(checkbox, value):
if value:
return True
else:
return False
class DeltaApp(App):
def build(self):
juego = Delta()
return juego
if __name__ == '__main__':
try:
arduino = arduinoSerial.Serial('/dev/ttyACM0', 115200)
except:
print "No se encuentra Arduino"
exit()
DeltaApp().run()
arduino.close()
|
jandrikus/BAMM
|
deltaApp.py
|
Python
|
mit
| 26,395
|
from django.conf.urls import include, url
from article import views
urlpatterns = [
url(r'^$', views.articles, name='articles'),
url(r'^add/?$', views.add_articles, name='add-articles'),
]
|
anush7/django-article-project
|
article/urls.py
|
Python
|
mit
| 197
|
from __future__ import absolute_import, print_function, unicode_literals
from django.views.generic.base import TemplateView
class CoachToolsView(TemplateView):
template_name = "coach/coach.html"
|
aronasorman/kolibri
|
kolibri/plugins/coach/views.py
|
Python
|
mit
| 201
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Log Config
"""
__author__ = 'Zagfai'
__date__ = '2018-06'
SANIC_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format':
'%(levelname)s [%(asctime)s %(name)s:%(lineno)d] %(message)s',
'datefmt': '%y%m%d %H:%M:%S',
},
"access": {
"format": "VISIT [%(asctime)s %(host)s]: " +
"%(request)s %(message)s %(status)d %(byte)d",
'datefmt': '%y%m%d %H:%M:%S',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
},
"access_console": {
"class": "logging.StreamHandler",
"formatter": "access",
},
},
'loggers': {
'': {
'level': 'INFO',
'handlers': ['console'],
'propagate': True
},
'sanic.access': {
'level': 'INFO',
'handlers': ['access_console'],
'propagate': False
},
}
}
|
zagfai/webtul
|
webtul/log.py
|
Python
|
mit
| 1,135
|
#! /usr/bin/env python
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
import django_openS3
with open(os.path.join(os.path.dirname(__file__), "README.rst")) as file:
README = file.read()
with open(os.path.join(os.path.dirname(__file__), 'LICENSE')) as file:
LICENSE = file.read()
class Tox(TestCommand):
"""Command to make python setup.py test run."""
def finalize_options(self):
super().finalize_options()
self.test_args = []
self.test_suite = True
def run_tests(self):
# Do this import here because tests_require isn't processed
# early enough to do a module-level import.
from tox._cmdline import main
sys.exit(main(self.test_args))
CLASSIFIERS = [
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Utilities",
]
setup(name='django_openS3',
version=django_openS3.__version__,
author=django_openS3.__author__,
author_email=django_openS3.__email__,
maintainer=django_openS3.__author__,
maintainer_email=django_openS3.__email__,
url='http://github.com/logston/django_openS3',
description='An openS3 wrapper for use with Django',
long_description=README,
license=LICENSE,
classifiers=CLASSIFIERS,
packages=['django_openS3'],
include_package_data=True,
package_data={'': ['LICENSE', 'README.rst']},
install_requires=[
'Django>=1.6',
'openS3>=0.2.0'
],
tests_require=['tox'],
cmdclass={'test': Tox})
|
logston/django_openS3
|
setup.py
|
Python
|
mit
| 2,012
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 生成器表达式----一边循环一边计算
# 列表元素可以在循环的过程中不断推算出后续的元素
# 这样就不必创建完整的list,从而节省大量的空间
from collections import Iterable
import array
# 第一种方法:将列表生成式最外面的[] 改成()
# 列表生成式
list_comp = [x * x for x in range(10)]
# 生成器表达式
list_gene = (x * x for x in range(10))
# 生成器是可迭代对象
print(isinstance(list_gene, Iterable))
# 如果生成器表达式是一个函数调用过程中的唯一参数,那么不需要额外再用括号把它围起来
symbols = '$¢£¥€¤'
t = tuple(ord(symbol) for symbol in symbols)
print(t)
# 如果生成器表达式不是一个函数的唯一参数,则外面的圆括号是必须的
array.array('I', (ord(s) for s in symbols))
# 生成器表达式是逐个产出元素,从来不会一次性产出一个含有6个T恤样式的列表
colors = ['black', 'white']
sizes = ['S', 'M', 'L']
for t_shirts in ('%s %s' % (c, s) for c in colors for s in sizes):
print(t_shirts)
# 用函数循环的方法实现斐波拉契数列
def fibonacci1(num):
n, a, b = 0, 0, 1
while n < num:
print(b, end=' ')
a, b = b, a + b
n = n + 1
print('done')
return 'done'
fibonacci1(20)
# 第二种方法:如果一个函数定义中包含yield关键字,那么这个函数就不再是一个普通函数,而是一个generator(生成器函数)
# 把上面定义的函数改一下就成了一个生成器
def fibonacci2(num):
n, a, b = 0, 0, 1
while n < num:
yield b
a, b = b, a + b
n = n + 1
print('done')
return 'done'
for g in fibonacci2(20):
print(g, end=' ')
# 练习:输出杨辉三角
def triangles(num):
n, list1 = 0, [1]
while n < num:
yield list1
i = len(list1) - 1
while i:
list1[i] = list1[i] + list1[i-1]
i -= 1
list1.append(1)
n = n + 1
# 输出杨辉三角更简洁的写法
def triangles():
list2 = [1]
while True:
yield list2
list2 = [x + y for x, y in zip([0] + list2, list2 + [0])]
x = 0
results = []
for t in triangles():
print(t)
results.append(t)
x = x + 1
if x == 10:
break
if results == [
[1],
[1, 1],
[1, 2, 1],
[1, 3, 3, 1],
[1, 4, 6, 4, 1],
[1, 5, 10, 10, 5, 1],
[1, 6, 15, 20, 15, 6, 1],
[1, 7, 21, 35, 35, 21, 7, 1],
[1, 8, 28, 56, 70, 56, 28, 8, 1],
[1, 9, 36, 84, 126, 126, 84, 36, 9, 1]
]:
print('测试通过!')
else:
print('测试失败!')
|
felix9064/python
|
Demo/liaoxf/do_generator.py
|
Python
|
mit
| 2,677
|
from helper import greeting
greeting('Hello')
|
MRaiti/cs3240-labdemo
|
hello.py
|
Python
|
mit
| 46
|
#----------------------------------------------------------------------
#This utility sets up the python configuration files so as to
#allow Python to find files in a specified directory, regardless
#of what directory the user is working from. This is typically
#used to create a directory where the user will put resources shared
#by many Python scripts, such as courseware modules
#
#----------------------------------------------------------------------
#Usage:
# (1) Put a copy of this file (setpath.py) in the directory
# you want to share
#
# (2) Execute setpath.py, either by opening it and running it
# in Canopy, or from the command line by changing director
# to the directory you want to share and then typing
# python setup.py
# If you run it by opening it in the Canopy editor you need to
# select the directory popup menu item that tells Canopy to
# change the working directory to the Editor directory.
# in Canopy, the working directory always appears at the upper
# right corner of the Python interpreter window.
#
#----------------------------------------------------------------------
#Notes:
#
# This will create a startup file which will properly
# initialize ipython (whether used directly or via Enthought
# Canopy) to find your files, and will do that regardless
# of your operating system.
#
# If you are using a Linux or Mac OSX operating system, it
# will also edit your .cshrc and .bash_profile shell startup
# scripts to set the environment variable PYTHONPATH so that
# any version of the python interperter started from the
# command line (i.e. whether ipython or python) will find
# the shared files. This feature will not work on
# Windows operating systems, so Windows users should start
# either start up python by clicking on the Canopy app, or
# by starting ipython from the command line. It is possible
# to set the PYTHONPATH environment variable in Windows,
# but this script does not yet implement that feature.
#
# Note that it is also possible to manually set up a temporary
# shared path (for example /home/MyModules) in a given script
# by executing the lines:
#
# import sys
# sys.path.append('home/MyModules')
#
# where you would replace '/home/MyModules') with the
# actual full path to the directory you want on your own
# system
#----------------------------------------------------------------------
import os,glob,platform
#Utility function to return an acceptable filename for the
#startup file
def makeFileName(startupDir):
files = glob.glob(os.path.join(startupDir,'*.py'))
#Make a startup filename that doesn't already exist
for i in range(10000):
if i<100:
fname = '%02d-startup.py'%i
else:
fname ='%04d-startup.py'%i
fname = os.path.join(startupDir,fname)
if not fname in files: break
return fname
#
#--------Main program starts here
#
#Get current path
curPath = os.getcwd()
#Get home directory
home = os.path.expanduser('~')
#
#If this is a Linux or Mac OS X system, edit the
#shell initialization files to set the PYTHONPATH environment
#variable
if ( (platform.system()=='Darwin') or ('inux' in platform.system())):
#We are on a Linux or Mac system. Edit Shell startup files
print 'This is a Linux or Mac system. Adding path to shell startup scripts'
#
#csh script: (Note, should also do this for .tcshrc if it exists)
cshFile = os.path.join(home,'.cshrc')
print 'csh family -- Editing '+cshFile
#Make backup copy of file
os.system('cp %s %s'%(cshFile,cshFile+'.setPathBackup'))
#Append line to set PYTHONPATH
outfile = open(cshFile,'a')
outfile.write('#Line added by setPath.py. Original in %s\n'%(cshFile+'.setPathBackup'))
#Note: the double quotes allow paths to contain spaces
outfile.write('setenv PYTHONPATH \"%s:$PYTHONPATH\"\n'%curPath)
outfile.close()
#
#bash script (ToDo: also edit .profile, for sh users)
bashFile = os.path.join(home,'.bash_profile')
print 'sh family -- Editing '+bashFile
#Make backup copy of file
os.system('cp %s %s'%(bashFile,bashFile+'.setPathBackup'))
#Append line to set PYTHONPATH
outfile = open(bashFile,'a')
outfile.write('#Line added by setPath.py. Original in %s\n'%(bashFile+'.setPathBackup'))
#Note: the double quotes allow paths to contain spaces
outfile.write('export PYTHONPATH=\"%s:$PYTHONPATH\"\n'%curPath)
outfile.close()
#
#
#Set paths for ipython startup. This takes care of starting up ipython from
#double-clicking the Canopy app on any operating system
#
profilepath = os.path.join(home,'.ipython/profile_default/startup')
if os.path.isdir(profilepath):
fname = makeFileName(profilepath)
else:
print "Could not find .ipython startup directory. Exiting."
exit(1)
#
#Write the startup file
contents = 'import sys \nsys.path.append(\'%s\')\n'%curPath
outfile = open(fname,'w')
outfile.write(contents)
outfile.close()
|
CommonClimate/teaching_notebooks
|
GEOL351/CoursewareModules/setpath.py
|
Python
|
mit
| 5,073
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-08 18:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0004_ediimages'),
]
operations = [
migrations.AlterModelOptions(
name='ediimages',
options={'ordering': ('sort_order',)},
),
migrations.AddField(
model_name='ediimages',
name='sort_order',
field=models.PositiveIntegerField(default=0),
),
]
|
CT-Data-Collaborative/edi-v2
|
edi/content/old_mig/0005_auto_20170808_1832.py
|
Python
|
mit
| 584
|
import importlib
from flask import render_template
import lib.es as es
def get(p):
# get data source definiton
query = 'name:{}'.format(p['nav'][3])
p['ds'] = es.list(p['host'], 'core_data', 'datasource', query)[0]
# load service
path = "web.modules.dataservice.services.{}".format(p['ds']['type'])
mod = importlib.import_module(path)
return mod.execute(p)
|
unkyulee/elastic-cms
|
src/web/modules/dataservice/controllers/json.py
|
Python
|
mit
| 392
|
#!/usr/bin/env python
from __future__ import print_function,division
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from pint.templates import lctemplate,lcprimitives,lcfitters
from pint.eventstats import z2m,sf_z2m, hm, sf_hm, sig2sigma
import sys
from astropy import log
import scipy.stats
def compute_fourier(phases,nh=10,pow_phase=False):
'''Compute Fourier amplitudes from an array of pulse phases
phases should be [0,1.0)
nh is the number of harmonics (1 = fundamental only)
Returns: cos and sin component arrays, unless pow_phase is True
then returns Fourier power (Leahy normalized) and phase arrays
DC bin is not computed or returned
'''
phis = 2.0*np.pi*phases # Convert phases to radians
n = len(phis)
c = np.asarray([(np.cos(k*phis)).sum() for k in range(1,nh+1)])/n
s = np.asarray([(np.sin(k*phis)).sum() for k in range(1,nh+1)])/n
c *= 2.0
s *= 2.0
if pow_phase:
# CHECK! There could be errors here!
# These should be Leahy normalized powers
fourier_pow = (n/2)*(c**2+s**2)
fourier_phases = np.arctan2(s,c)
return n,fourier_pow,fourier_phases
else:
return n,c,s
def evaluate_fourier(n,c,s,nbins,k=None):
# This should be updated to do a little integral over each bin.
# Currently evaluates the model at the center of each bin
model = np.zeros(nbins)+n/nbins
theta = 2.0*np.pi*np.arange(nbins,dtype=np.float)/nbins
theta += theta[1]/2.0
if k is not None:
model += (n/nbins)*(c[k]*np.cos((k+1)*theta) + s[k]*np.sin((k+1)*theta))
else:
for k in range(len(c)):
model += (n/nbins)*(c[k]*np.cos((k+1)*theta) + s[k]*np.sin((k+1)*theta))
return model
def evaluate_chi2(hist,model):
# Question here is whether error should be sqrt(data) or sqrt(model)
return ((hist-model)**2/model).sum()
def compute_phist(phases,nbins=200):
h, edges = np.histogram(phases,bins=np.linspace(0.0,1.0,nbins+1,endpoint=True))
return edges[:-1], h
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description = "Fit a set of pulse phases to harmonics")
parser.add_argument("evname", help="Input event file (must have PULSE_PHASE column)")
parser.add_argument("--white",help = "Replace phases with white random numbers, for testing", action="store_true")
parser.add_argument("--txt",help = "Assume input file is .txt instead of FITS", action="store_true")
parser.add_argument("--showcomps",help = "Show individual components of harmonic fit on plot", action="store_true")
parser.add_argument("--noplot",help = "Don't show any plots", action="store_true")
parser.add_argument("--output",help = "Save figures with basename", default=None)
parser.add_argument("--numharm",help="Max harmonic to use in analysis (1=Fundamental only)",default=4,type=int)
parser.add_argument("--numbins",help="Number of bins for histograms",default=200,type=int)
parser.add_argument("--emin",help="Minimum energy to include (keV)",default=0.25,type=float)
parser.add_argument("--emax",help="Maximum energy to include (keV)",default=12.0,type=float)
args = parser.parse_args()
if args.txt:
exposure = None
ph,en = np.loadtxt(args.evname,unpack=True,usecols=(1,2),skiprows=3)
log.info("Read {0} phases from .txt file".format(len(ph)))
tstart = 0.0
else:
f = fits.open(args.evname)
en = f['events'].data.field('pi')
ph = f['events'].data.field('pulse_phase')
log.info("Read {0} phases from FITS file".format(len(ph)))
exposure = float(f['events'].header['EXPOSURE'])
tstart = float(f['events'].header['TSTART'])
log.info("Exposure = {0} s".format(exposure))
if args.white:
# Random phases uniform over [0,1)
ph = np.random.random_sample(len(en))
log.info("Replaced with {0} random phases".format(len(en)))
matplotlib.rcParams['font.family'] = "serif"
matplotlib.rcParams.update({'font.size': 13})
matplotlib.rc('axes', linewidth=1.5)
if args.output:
resultsfile = open("{0}_results.txt".format(args.output),"w")
print("{0:.6f}".format(tstart),file=resultsfile)
# Filter on energy
idx = np.where(np.logical_and(en > int(args.emin*100), en < int(args.emax*100) ))[0]
ph = ph[idx]
en = en[idx]
# Hack to manually split out a segment
#q = 3 # Use 0, 1, 2, 3
#qn = len(ph)//4
#ph = ph[q*qn:(q+1)*qn]
#en = en[q*qn:(q+1)*qn]
nbins = args.numbins
bins,phist = compute_phist(ph,nbins=nbins)
fig,axs = plt.subplots(nrows=2,ncols=1)
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.97, top=0.94,hspace=0.001)
ax=axs[0]
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True, labelbottom=False)
# ax.text(.5,.8,'PSR J0030+0451', horizontalalignment='center', transform=ax.transAxes)
# ax.text(.5,.8,'PSR J0437-4715', horizontalalignment='center', transform=ax.transAxes)
# ax.text(.2,.8,'PSR J1231-1411', horizontalalignment='center', transform=ax.transAxes)
# ax.text(.8,.8,'PSR J2124-3358', horizontalalignment='center', transform=ax.transAxes)
ax.step(np.concatenate((bins,np.ones(1))),np.concatenate((phist,phist[-1:])),color='k',where='post')
ax.set_xlim(0.0,1.0)
ax.set_ylabel('Counts per bin')
n,c,s = compute_fourier(ph,nh=args.numharm)
model = evaluate_fourier(n,c,s,nbins)
ax.plot(bins+bins[1]/2.0,model,color='r',lw=2)
if args.showcomps:
for k in range(len(c)):
ax.plot(np.linspace(0.0,1.0,nbins),evaluate_fourier(n,c,s,nbins,k=k),ls='--')
fn,fpow,fphase = compute_fourier(ph,nh=args.numharm,pow_phase=True)
i=1
log.info("Harm LeahyPower Phase(deg)")
for fp, fph in zip(fpow,fphase):
log.info("{0:2d} {1:12.3f} {2:9.3f} deg".format(i,fp,np.rad2deg(fph)))
if args.output:
print("{0:2d} {1:12.3f} {2:9.3f}".format(i,fp,np.rad2deg(fph)),file=resultsfile)
i+=1
pcounts = (model-model.min()).sum()
pcounts_err = np.sqrt(model.sum() + model.min()*len(model))
if exposure:
log.info("Pulsed counts = {0:.3f}, count rate = {1:.3f}+/-{2:.4f} c/s".format(pcounts, pcounts/exposure, pcounts_err/exposure))
log.info("Total rate = {0:.3f} c/s, Unpulsed rate = {1:.3f} c/s".format(n/exposure, n/exposure-pcounts/exposure))
ax = axs[1]
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
ax.errorbar(np.linspace(0.0,1.0,nbins),phist-model,yerr=np.sqrt(phist),fmt='.',ecolor='k')
chisq = evaluate_chi2(phist,model)
nparams = 1 + 2*args.numharm # 1 for DC + 2 for each sinusoidal component
ax.set_xlim(0.0,1.0)
ax.set_xlabel('Pulse Phase')
ax.set_ylabel('Residuals (counts)')
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True)
ndof = len(phist)-nparams
axs[0].set_title("NumHarm = {0}, Chisq = {1:.2f}, DOF = {2}".format(args.numharm,chisq,ndof))
ax.grid(1)
# ax.set_label("{0} Harmonic Fit to Profile".format(args.numharm))
plt.tight_layout()
if args.output:
fig.savefig("{0}_harmfit.pdf".format(args.output))
# Plot distribution of residuals to compare to a gaussian
fig,ax = plt.subplots()
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
chi = (phist-model)/np.sqrt(model)
#x, y = np.histogram(chi,bins=np.linspace(-2.0,2.0,0.1))
x = np.linspace(-3.0,3.0,32,endpoint=True)
ax.hist(chi,bins=x,density=True)
ax.set_title('Histogram of residuals')
ax.plot(x,scipy.stats.norm.pdf(x))
plt.tight_layout()
# Plot histogram of phase differences to see if they are Poisson
fig,ax = plt.subplots()
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
ph.sort()
pdiffs = (ph[1:]-ph[:-1])*1.0
x = np.linspace(0.0,50.0e-6,200,endpoint=True)
histn, histbins, histpatches = ax.hist(pdiffs,bins=x,density=True,log=True)
ax.set_title('Histogram of phase differences')
ax.set_xlabel('Phase diff')
ax.plot(x,np.exp(-len(pdiffs)*(x*1.0))*n)
plt.tight_layout()
# Compute number of significant harmonics
# First by plotting Leahy powers
fig,axs = plt.subplots(nrows=2,ncols=1)
ax = axs[0]
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
n,pow,phases = compute_fourier(ph,nh=nbins//2,pow_phase=True)
ax.semilogy(np.arange(len(pow))+1,pow,marker='o')
# Leahy power of 5.99 corresponds to 2 sigma, I think
ax.axhline(5.99,color='r')
ax.axhline(2.0,color='b',ls='--')
#ax.xaxis.set_ticks(np.arange(1,len(pow)+1))
#ax.set_xlabel('Harmonic Number')
ax.set_ylabel('Leahy Power')
ax.set_title("Power Spectrum")
plt.tight_layout()
ax = axs[1]
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
ax.plot(np.arange(len(pow))+1,pow,marker='o')
ax.axhline(5.99,color='r')
ax.axhline(2.0,color='b',ls='--')
#ax.xaxis.set_ticks(np.arange(1,len(pow)+1))
ax.set_ylim(0.0,10.0)
ax.text(1.0,7.0,'Mean power {0:.3f}'.format(pow.mean()))
ax.set_xlabel('Harmonic Number')
ax.set_ylabel('Leahy Power')
if args.output:
fig.savefig("{0}_leahy.pdf".format(args.output))
plt.tight_layout()
# Then by computing chisq as a function of number of harmonics in model
chisq = []
ndof = []
maxharms = np.arange(1,min(33,nbins//4+1))
n,c,s = compute_fourier(ph,nh=maxharms[-1])
for maxharm in maxharms:
model = evaluate_fourier(n,c[:maxharm],s[:maxharm],nbins)
chisq.append(evaluate_chi2(phist,model))
nparams = 1 + 2*maxharm # 1 for DC + 2 for each sinusoidal component
ndof.append(len(phist)-nparams)
chisq = np.asarray(chisq)
ndof = np.asarray(ndof)
fig,ax = plt.subplots()
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
ax.plot(maxharms,chisq/ndof,'o',ls='-')
ax.set_ylim(0.5,3.0)
ax.axhline(1.0,color='r',ls='--')
ax.set_xlabel('Number of Harmonics')
ax.set_ylabel('Chisq')
ax.set_title("Chisq/DOF vs. Number of Harmonics")
#ax.xaxis.set_ticks(maxharms)
#ax.semilogy(maxharms,ndof)
plt.tight_layout()
if args.output:
fig.savefig("{0}_chisq.pdf".format(args.output))
# Then look at amplitudes and phases as a function of energy cuts
# Look at color oscillations
# Select photons above and below some energy cut and look at the ratio
ensplit = 55
softidx = np.where(en<ensplit)[0]
hardidx = np.where(en>=ensplit)[0]
colorbins = 32
softbins, softn = compute_phist(ph[softidx],nbins=colorbins)
hardbins, hardn = compute_phist(ph[hardidx],nbins=colorbins)
softn = np.asarray(softn,dtype=np.float)
hardn = np.asarray(hardn,dtype=np.float)
fig,ax = plt.subplots()
color = hardn/softn
# Propagate Poisson errors to get error in ratio
cerr = color*np.sqrt(1.0/softn + 1.0/hardn)
#ax.step(np.concatenate((softbins,np.ones(1))),np.concatenate((color,color[-1:])),color='C0',where='post')
ax.errorbar(softbins+0.5*softbins[1],color,yerr=cerr,color='k',fmt='.')
ax.set_xlim(0.0,1.0)
ax.set_xlabel('Pulse Phase')
ax.set_ylabel('Spectral Color')
if not args.noplot:
plt.show()
|
paulray/NICERsoft
|
scripts/fitharms.py
|
Python
|
mit
| 11,515
|
#! /usr/bin/env python
# example of for loop
words = ['this', 'is', 'an', 'ex', 'parrot']
for word in words:
print word,
print '\n'
# example of for loop in dictionary
d = {'x': 1, 'y': 2, 'z': 3}
for key in d:
print key, 'corresponds to', d[key]
# additional sequence unpacking in for loop
for key, value in d.items():
print key, 'to', value
|
IPVL/Tanvin-PythonWorks
|
chapter5/codes/forLoop.py
|
Python
|
mit
| 353
|
from __future__ import absolute_import
import codecs
import re
import types
import sys
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
#Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode(u"ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode(u"ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode(u"ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([">", "<"])
invalid_unicode_re = re.compile(u"[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile(u"[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
u"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1,0] #chunk number, offset
__init__.func_annotations = {}
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
tell.func_annotations = {}
def seek(self, pos):
assert pos < self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= pos
i += 1
self.position = [i, offset]
seek.func_annotations = {}
def read(self, str):
if not self.buffer:
return self._readStream(str)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(str)
else:
return self._readFromBuffer(str)
read.func_annotations = {}
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
_bufferedBytes.func_annotations = {}
def _readStream(self, str):
data = self.stream.read(str)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
_readStream.func_annotations = {}
def _readFromBuffer(self, str):
remainingBytes = str
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
data = rv.append(bufferedData[bufferOffset:
bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return u"".join(rv)
_readFromBuffer.func_annotations = {}
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if hasattr(source, u"read"):
isUnicode = isinstance(source.read(0), unicode)
else:
isUnicode = isinstance(source, unicode)
if isUnicode:
if encoding is not None:
raise TypeError(u"Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
HTMLInputStream.func_annotations = {}
class HTMLUnicodeInputStream(object):
u"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
u"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
#Craziness
if len(u"\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile(u"[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile(u"([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (u"utf-8", u"certain")
self.dataStream = self.openStream(source)
self.reset()
__init__.func_annotations = {}
def reset(self):
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
#Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
reset.func_annotations = {}
def openStream(self, source):
u"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, u'read'):
stream = source
else:
stream = StringIO(source)
if (#not isinstance(stream, BufferedIOBase) and
not(hasattr(stream, u"tell") and
hasattr(stream, u"seek")) or
stream is sys.stdin):
stream = BufferedStream(stream)
return stream
openStream.func_annotations = {}
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count(u'\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind(u'\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
_position.func_annotations = {}
def position(self):
u"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line+1, col)
position.func_annotations = {}
def char(self):
u""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
char.func_annotations = {}
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
#Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub(u"\ufffd", data)
data = data.replace(u"\r\n", u"\n")
data = data.replace(u"\r", u"\n")
self.chunk = data
self.chunkSize = len(data)
return True
readChunk.func_annotations = {}
def characterErrorsUCS4(self, data):
for i in xrange(len(invalid_unicode_re.findall(data))):
self.errors.append(u"invalid-codepoint")
characterErrorsUCS4.func_annotations = {}
def characterErrorsUCS2(self, data):
#Someone picked the wrong compile option
#You lose
skip = False
import sys
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
#Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos+2]):
#We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos+2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append(u"invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append(u"invalid-codepoint")
else:
skip = False
self.errors.append(u"invalid-codepoint")
characterErrorsUCS2.func_annotations = {}
def charsUntil(self, characters, opposite = False):
u""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = u"".join([u"\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = u"^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile(u"[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = u"".join(rv)
return r
charsUntil.func_annotations = {}
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
unget.func_annotations = {}
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
u"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
u"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), u"certain")
# Encoding Information
#Number of bytes to use when looking for a meta element with
#encoding information
self.numBytesMeta = 512
#Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
#Encoding to use if no other information can be found
self.defaultEncoding = u"windows-1252"
#Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
#Call superclass
self.reset()
__init__.func_annotations = {}
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
u'replace')
HTMLUnicodeInputStream.reset(self)
reset.func_annotations = {}
def openStream(self, source):
u"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, u'read'):
stream = source
else:
stream = BytesIO(source)
if (not(hasattr(stream, u"tell") and hasattr(stream, u"seek")) or
stream is sys.stdin):
stream = BufferedStream(stream)
return stream
openStream.func_annotations = {}
def detectEncoding(self, parseMeta=True, chardet=True):
#First look for a BOM
#This will also read past the BOM if present
encoding = self.detectBOM()
confidence = u"certain"
#If there is no BOM need to look for meta elements with encoding
#information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = u"tentative"
#Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = u"tentative"
try:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, str)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result[u'encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence=u"tentative"
encoding = self.defaultEncoding
#Substitute for equivalent encodings:
encodingSub = {u"iso-8859-1":u"windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
detectEncoding.func_annotations = {}
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != u"certain"
newEncoding = codecName(newEncoding)
if newEncoding in (u"utf-16", u"utf-16-be", u"utf-16-le"):
newEncoding = u"utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], u"certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, u"certain")
raise ReparseException(u"Encoding changed from %s to %s"%(self.charEncoding[0], newEncoding))
changeEncoding.func_annotations = {}
def detectBOM(self):
u"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: u'utf-8',
codecs.BOM_UTF16_LE: u'utf-16-le', codecs.BOM_UTF16_BE: u'utf-16-be',
codecs.BOM_UTF32_LE: u'utf-32-le', codecs.BOM_UTF32_BE: u'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, str)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
detectBOM.func_annotations = {}
def detectEncodingMeta(self):
u"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, str)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in (u"utf-16", u"utf-16-be", u"utf-16-le"):
encoding = u"utf-8"
return encoding
detectEncodingMeta.func_annotations = {}
class EncodingBytes(str):
u"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, str)
return str.__new__(self, value.lower())
__new__.func_annotations = {}
def __init__(self, value):
self._position=-1
__init__.func_annotations = {}
def __iter__(self):
return self
__iter__.func_annotations = {}
def next(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p+1]
next.func_annotations = {}
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p+1]
previous.func_annotations = {}
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
setPosition.func_annotations = {}
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
getPosition.func_annotations = {}
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position+1]
getCurrentByte.func_annotations = {}
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
u"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p+1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
skip.func_annotations = {}
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p+1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
skipUntil.func_annotations = {}
def matchBytes(self, str):
u"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p+len(str)]
rv = data.startswith(str)
if rv:
self.position += len(str)
return rv
matchBytes.func_annotations = {}
def jumpTo(self, str):
u"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(str)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(str)-1)
return True
else:
raise StopIteration
jumpTo.func_annotations = {}
class EncodingParser(object):
u"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
u"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
__init__.func_annotations = {}
def getEncoding(self):
methodDispatch = (
("<!--",self.handleComment),
("<meta",self.handleMeta),
("</",self.handlePossibleEndTag),
("<!",self.handleOther),
("<?",self.handleOther),
("<",self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing=False
break
if not keepParsing:
break
return self.encoding
getEncoding.func_annotations = {}
def handleComment(self):
u"""Skip over comments"""
return self.data.jumpTo("-->")
handleComment.func_annotations = {}
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
#if we have <meta not followed by a space so just keep going
return True
#We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
#Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == "http-equiv":
hasPragma = attr[1] == "content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == "charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == "content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
handleMeta.func_annotations = {}
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
handlePossibleStartTag.func_annotations = {}
def handlePossibleEndTag(self):
self.data.next()
return self.handlePossibleTag(True)
handlePossibleEndTag.func_annotations = {}
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
#If the next byte is not an ascii letter either ignore this
#fragment (possible start tag case) or treat it according to
#handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == "<":
#return to the first step in the overall "two step" algorithm
#reprocessing the < byte
data.previous()
else:
#Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
handlePossibleTag.func_annotations = {}
def handleOther(self):
return self.data.jumpTo(">")
handleOther.func_annotations = {}
def getAttribute(self):
u"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset(["/"]))
assert c is None or len(c) == 1
# Step 2
if c in (">", None):
return None
# Step 3
attrName = []
attrValue = []
#Step 4 attribute name
while True:
if c == "=" and attrName:
break
elif c in spaceCharactersBytes:
#Step 6!
c = data.skip()
break
elif c in ("/", ">"):
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c == None:
return None
else:
attrName.append(c)
#Step 5
c = data.next()
#Step 7
if c != "=":
data.previous()
return "".join(attrName), ""
#Step 8
data.next()
#Step 9
c = data.skip()
#Step 10
if c in ("'", '"'):
#10.1
quoteChar = c
while True:
#10.2
c = data.next()
#10.3
if c == quoteChar:
data.next()
return "".join(attrName), "".join(attrValue)
#10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
#10.5
else:
attrValue.append(c)
elif c == ">":
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = data.next()
if c in spacesAngleBrackets:
return "".join(attrName), "".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
getAttribute.func_annotations = {}
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, str)
self.data = data
__init__.func_annotations = {}
def parse(self):
try:
#Check if the attr name is charset
#otherwise return
self.data.jumpTo("charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == "=":
#If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
#Look for an encoding between matching quote marks
if self.data.currentByte in ('"', "'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
#Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
#Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
parse.func_annotations = {}
def codecName(encoding):
u"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, str):
try:
encoding = encoding.decode(u"ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub(u"", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
codecName.func_annotations = {}
|
rcarmo/soup-strainer
|
html5lib/inputstream.py
|
Python
|
mit
| 32,655
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from nagu import (get_color, pack_color, pack, reset,
sequence, parse_line, html)
def test_get_color():
"""
>>> assert get_color(0) == ""
>>> assert get_color(13) == "color: #ff0;"
>>> assert get_color(64) == 'background-color: #ff0;'
>>> assert get_color(13 + 64) == 'color: #ff0;background-color: #ff0;'
"""
pass
def test_pack_color():
"""
>>> assert pack_color(0, 0) == 0
>>> assert pack_color(30, 0) == 14
>>> assert pack_color(44, 14) == 158
>>> assert pack_color(36, 158) == 148
>>> assert pack_color(39, 148) == 144
>>> assert pack_color(49, 144) == 0
"""
pass
def test_pack():
"""
>>> assert pack(0, 0) == 0
>>> assert pack(30, 0) == 14
>>> assert pack(1, 14) == 270
>>> assert pack(4, 270) == 1294
>>> assert pack(21, 1294) == 1038
>>> assert pack(39, 1038) == 1024
>>> assert pack(24, 1024) == 0
"""
pass
def test_reset():
"""
>>> assert reset() is None
>>> assert reset(1) == 1
"""
pass
def test_sequence():
"""
Must return nothing on init
>>> assert sequence(0) == ''
Set to something
>>> assert sequence(1) == '<span style="font-weight: bold;">'
Update
>>> assert sequence(93) == '</span><span style="font-weight: bold;color: #ff0;">'
Not enabled modifier changes nothing
>>> assert sequence(22) == '</span><span style="font-weight: bold;color: #ff0;">'
Disable one
>>> assert sequence(21) == '</span><span style="color: #ff0;">'
Disable
>>> assert sequence(0) == '</span>'
"""
pass
def test_parse_line():
"""
>>> string = "This is \033[1;33myellow\033[39m bold"
>>> result = 'This is <span style="font-weight: bold;color: #cc0;">'+ \
'yellow</span><span style="font-weight: bold;"> bold'
>>> line = ''.join([x for x in parse_line(string)])
>>> assert line == result
>>> reset()
"""
pass
html_text = '''This text is \033[4;34mblue \033[42mwith green background
have \033[1;39mtwo\033[21m lines\033[49m and still underlined\033[0m or not'''
def ts(s, r):
for i in range(0, len(s)):
if s[i] != r[i]:
print i, s[i], r[i]
def test_html():
"""
>>> result = 'This text is <span style="text-decoration: underline;'+ \
'color: #28f;">blue </span><span style="text-decoration: underline;'+ \
'color: #28f;background-color: #0c0;">with green background<br />'+ \
'have </span><span style="font-weight: bold;text-decoration: underline;'+ \
'background-color: #0c0;">two</span><span style="text-decoration: '+ \
'underline;background-color: #0c0;"> lines</span><span '+ \
'style="text-decoration: underline;"> and still underlined'+ \
'</span> or not'
>>> assert html(html_text) == result
>>> reset()
"""
pass
if __name__ == '__main__':
import doctest
doctest.testmod()
|
SirAnthony/nagu
|
tests.py
|
Python
|
mit
| 2,974
|
import io
import pytest
from contextlib import redirect_stdout
from mock import patch
from mythril.mythril import MythrilLevelDB, MythrilConfig
from mythril.exceptions import CriticalError
@patch("mythril.ethereum.interface.leveldb.client.EthLevelDB.search")
@patch("mythril.ethereum.interface.leveldb.client.ETH_DB", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None)
def test_leveldb_code_search(mock_leveldb, f1, f2, f3):
config = MythrilConfig()
config.set_api_leveldb("some path")
leveldb_search = MythrilLevelDB(leveldb=config.eth_db)
leveldb_search.search_db("code#PUSH#")
mock_leveldb.assert_called()
@patch("mythril.ethereum.interface.leveldb.client.ETH_DB", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None)
def test_leveldb_hash_search_incorrect_input(f1, f2, f3):
config = MythrilConfig()
config.set_api_leveldb("some path")
leveldb_search = MythrilLevelDB(leveldb=config.eth_db)
with pytest.raises(CriticalError):
leveldb_search.contract_hash_to_address("0x23")
@patch(
"mythril.ethereum.interface.leveldb.client.EthLevelDB.contract_hash_to_address",
return_value="0xddbb615cb2ffaff7233d8a6f3601621de94795e1",
)
@patch("mythril.ethereum.interface.leveldb.client.ETH_DB", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None)
def test_leveldb_hash_search_correct_input(mock_hash_to_address, f1, f2, f3):
config = MythrilConfig()
config.set_api_leveldb("some path")
leveldb_search = MythrilLevelDB(leveldb=config.eth_db)
f = io.StringIO()
with redirect_stdout(f):
leveldb_search.contract_hash_to_address(
"0x0464e651bcc40de28fc7fcde269218d16850bac9689da5f4a6bd640fd3cdf6aa"
)
out = f.getvalue()
mock_hash_to_address.assert_called()
assert out == "0xddbb615cb2ffaff7233d8a6f3601621de94795e1\n"
|
b-mueller/mythril
|
tests/mythril/mythril_leveldb_test.py
|
Python
|
mit
| 2,235
|
#!python
import math
import fractions
import pygame
import argparse
import os.path
import sys
import subprocess
import time
from itertools import combinations,islice
from ntracer import NTracer,Material,ImageFormat,Channel,BlockingRenderer,CUBE
from ntracer.pygame_render import PygameRenderer
ROT_SENSITIVITY = 0.005
WHEEL_INCREMENT = 8
def excepthook(type,value,traceback):
if isinstance(value,Exception):
print('error: '+str(value),file=sys.stderr)
else:
sys.__excepthook__(type,value,traceback)
sys.excepthook = excepthook
def schlafli_component(x):
x = x.partition('/')
p = int(x[0],10)
if p < 3: raise argparse.ArgumentTypeError('a component cannot be less than 3')
if not x[2]: return fractions.Fraction(p)
s = int(x[2],10)
if s < 1: raise argparse.ArgumentTypeError('for component p/q: q cannot be less than 1')
if s >= p: raise argparse.ArgumentTypeError('for component p/q: q must be less than p')
if fractions.gcd(s,p) != 1: raise argparse.ArgumentTypeError('for component p/q: p and q must be co-prime')
return fractions.Fraction(p,s)
def positive_int(x):
x = int(x,10)
if x < 1: raise argparse.ArgumentTypeError('a positive number is required')
return x
def screen_size(x):
w,_,h = x.partition('x')
w = int(w,10)
h = int(h,10)
if w < 1 or h < 1: raise argparse.ArgumentTypeError('invalid screen size')
return w,h
def fov_type(x):
x = float(x)
if x <= 0 or x >= 180: raise argparse.ArgumentTypeError('fov must be between 0 and 180 degrees')
return x/180*math.pi
parser = argparse.ArgumentParser(
description='Display a regular polytope given its Schl\u00e4fli symbol.')
parser.add_argument('schlafli',metavar='N',type=schlafli_component,nargs='+',help='the Schl\u00e4fli symbol components')
parser.add_argument('-o','--output',metavar='PATH',help='save an animation to PATH instead of displaying the polytope')
parser.add_argument('-t','--type',metavar='TYPE',default='h264',
help='Specifies output type when --output is used. If TYPE is "png", the '+
'output is a series of PNG images. For any other value, it is used '+
'as the video codec for ffmpeg.')
parser.add_argument('-f','--frames',metavar='F',type=positive_int,default=160,help='when creating an animation or benchmarking, the number of frames to render')
parser.add_argument('-s','--screen',metavar='WIDTHxHEIGHT',type=screen_size,default=(800,600),help='screen size')
parser.add_argument('-a','--fov',metavar='FOV',type=fov_type,default=0.8,help='field of vision in degrees')
parser.add_argument('-d','--cam-dist',metavar='DIST',type=float,default=4,
help='How far the view-port is from the center of the polytope. The '+
'value is a multiple of the outer raidius of the polytope.')
parser.add_argument('--benchmark',action='store_true',help='measure the speed of rendering the scene')
parser.add_argument('--no-special',action='store_true',help='use the slower generic version of library even if a specialized version exists')
args = parser.parse_args()
material = Material((1,0.5,0.5))
nt = NTracer(max(len(args.schlafli)+1,3),force_generic=args.no_special)
def higher_dihedral_supplement(schlafli,ds):
a = math.pi*schlafli.denominator/schlafli.numerator
return 2*math.asin(math.sin(math.acos(1/(math.tan(ds/2)*math.tan(a))))*math.sin(a))
def almost_equal(a,b,threshold=0.001):
return (a-b).absolute() < threshold
def radial_vector(angle):
return nt.Vector.axis(0,math.sin(angle)) + nt.Vector.axis(1,math.cos(angle))
class Instance:
def __init__(self,shape,position,orientation=nt.Matrix.identity()):
self.shape = shape
self.position = position
self.orientation = orientation
self.inv_orientation = orientation.inverse()
def translated(self,position=nt.Vector(),orientation=nt.Matrix.identity()):
return (
position + (orientation * self.position),
orientation * self.orientation)
def tesselate(self,*args):
return self.shape.tesselate(*self.translated(*args))
def tesselate_inner(self,*args):
return self.shape.tesselate_inner(*self.translated(*args))
def any_point(self,*args):
return self.shape.any_point(*self.translated(*args))
def contains(self,p):
return self.shape.contains(self.inv_orientation * (p - self.position))
def star_component(x):
return (x.numerator - 1) > x.denominator > 1
class LineSegment:
star = False
def __init__(self,index,convex_ds,polygon):
self.index = index
self.p = polygon
self.position = radial_vector(index*convex_ds)
def tesselate(self,position,orientation):
return [
orientation*self.p.base_points[self.index-1]+position,
orientation*self.p.base_points[self.index]+position]
class Polygon:
apothem = 1
def __init__(self,schlafli):
self.star = star_component(schlafli)
convex_ds = 2 * math.pi / schlafli.numerator
self.dihedral_s = convex_ds * schlafli.denominator
self.parts = [LineSegment(i,convex_ds,self) for i in range(schlafli.numerator)]
self._circumradius = 1/math.cos(convex_ds/2)
self.base_points = [self._circumradius * radial_vector((i+0.5) * convex_ds) for i in range(schlafli.numerator)]
if self.star:
self._circumradius = math.tan(convex_ds)*math.tan(convex_ds/2) + 1
self.outer_points = [self._circumradius * radial_vector(i * convex_ds) for i in range(schlafli.numerator)]
def points(self,position,orientation,pset=None):
if pset is None: pset = self.base_points
return (orientation * bp + position for bp in pset)
def tesselate_inner(self,position,orientation):
points = list(self.points(position,orientation))
r = [points[0:3]]
for i in range(len(points)-3):
r.append([points[0],points[i+2],points[i+3]])
return r
def tesselate(self,position,orientation):
if not self.star:
return self.tesselate_inner(position,orientation)
points = list(self.points(position,orientation))
opoints = list(self.points(position,orientation,self.outer_points))
return [[opoints[i],points[i-1],points[i]] for i in range(len(points))]
def any_point(self,position,orientation):
return next(self.points(position,orientation))
def contains(self,p):
return any(almost_equal(p,test_p) for test_p in self.base_points)
def hull(self,position=nt.Vector(),orientation=nt.Matrix.identity()):
tris = [nt.TrianglePrototype(tri,material) for tri in self.tesselate_inner(position,orientation)]
if self.star: tris.extend(nt.TrianglePrototype(tri,material) for tri in
self.tesselate(position,orientation))
return tris
def circumradius(self):
return self._circumradius
def circumradius_square(self):
return self._circumradius*self._circumradius
def line_apothem_square(self):
return 1
class Plane:
def __init__(self,nt,position):
self.normal = position.unit()
self.d = -position.absolute()
self._dot = nt.dot
def distance(self,point):
return self._dot(point,self.normal) + self.d
class Line:
def __init__(self,nt,p0,v,planes,outer=False):
self.p0 = p0
self.v = v
self.planes = set(planes)
self.outer = outer
self._dot = nt.dot
def point_at(self,t):
return self.p0 + self.v*t
def dist_square(self,point):
a = point - self.p0
b = self._dot(a,self.v)
return a.square() - b*b/self.v.square()
def __repr__(self):
return 'Line({0!r},{1!r})'.format(self.p0,self.v)
def plane_point_intersection(nt,planes):
assert nt.dimension == len(planes)
try:
return nt.Matrix(p.normal for p in planes).inverse()*nt.Vector(-p.d for p in planes)
except ValueError:
return None
def plane_line_intersection(nt,planes):
assert nt.dimension - 1 == len(planes)
v = nt.cross(p.normal for p in planes).unit()
return Line(
nt,
nt.Matrix([p.normal for p in planes] + [v]).inverse() * nt.Vector([-p.d for p in planes] + [0]),
v,
planes)
def line_intersection(nt,l1,l2):
d = nt.dot(l1.v,l2.v)
denom = 1 - d*d
if not denom: return None
id = 1/denom
a = nt.dot(l2.p0 - l1.p0,l1.v)
b = nt.dot(l1.p0 - l2.p0,l2.v)
t1 = id*(a + d*b)
t2 = id*(d*a + b)
p1 = l1.point_at(t1)
p2 = l2.point_at(t2)
if abs(p1-p2) > 0.01: return None
return (p1 + p2) * 0.5, t1, t2
class Node:
def __init__(self,pos,planes,outer,alive=True):
self.pos = pos
self.planes = planes
self.outer = outer
self.neighbors = set() if alive else None
def detach(self):
for n in self.neighbors:
n.neighbors.remove(self)
self.neighbors = None
@property
def dead(self):
return self.neighbors is None
def find_cycles(self,length,sequence=None,exclude=None):
if sequence is None: sequence = [self]
if len(sequence) < length:
exclude = exclude.copy() if exclude is not None else set([self])
for n in self.neighbors:
if n not in exclude:
exclude.add(n)
for r in n.find_cycles(length,sequence + [n],exclude):
yield r
else:
for n in self.neighbors:
if n is sequence[0] and n.planes.intersection(*(sequence[i].planes for i in range(1,len(sequence)))):
yield sequence
def join(a,b):
if not (a.dead or b.dead):
a.neighbors.add(b)
b.neighbors.add(a)
class FuzzyGraph:
def __init__(self):
self.nodes = []
def add(self,pos,planes,outer):
for n in self.nodes:
if almost_equal(n.pos,pos):
n.planes |= planes
return n
n = Node(pos,planes,outer)
self.nodes.append(n)
return n
def remove_at(self,i):
self.nodes[i].detach()
if i+1 != len(self.nodes):
self.nodes[i] = self.nodes[-1]
del self.nodes[-1]
def remove(self,pos):
if isinstance(pos,Node):
if not pos.dead:
self.remove_at(self.nodes.index(pos))
else:
for i,n in enumerate(self.nodes):
if almost_equal(n.pos,pos):
self.remove_at(i)
break
# Cells are enlarged ever so slightly to prevent the view frustum from being
# wedged exactly between two adjacent primitives, which, do to limited
# precision, can cause that volume to appear to vanish.
fuzz_scale = nt.Matrix.scale(1.00001)
class PolyTope:
def __init__(self,dimension,schlafli,dihedral_s,face_apothem):
self.dimension = dimension
self.schlafli = schlafli
self.dihedral_s = dihedral_s
self.apothem = math.tan((math.pi - dihedral_s)/2) * face_apothem
self.star = star_component(schlafli)
self.parts = []
@property
def facet(self):
return self.parts[0].shape
def propogate_faces(self,potentials):
new_p = []
for instance,p in potentials:
dir = (instance.orientation * p.position).unit()
reflect = nt.Matrix.reflection(dir)
turn = nt.Matrix.rotation(
instance.position.unit(),
dir,
self.dihedral_s)
new_p += self.add_face(Instance(
instance.shape,
turn * instance.position,
fuzz_scale * turn * reflect * instance.orientation))
return new_p
def add_face(self,instance):
for p in self.parts:
if almost_equal(instance.position,p.position): return []
self.parts.append(instance)
return [(instance,p) for p in instance.shape.parts]
def star_tesselation(self):
t = getattr(self,'_star_tesselation',None)
if t is None:
co_nt = NTracer(self.dimension)
lines = []
planes = [Plane(co_nt,co_nt.Vector(islice(part.position,co_nt.dimension))) for part in self.parts]
las = self.line_apothem_square()
for pgroup in combinations(planes,co_nt.dimension-1):
try:
line = plane_line_intersection(co_nt,pgroup)
except ValueError:
pass
else:
if line:
for lineb in lines:
if almost_equal(line.p0,lineb.p0) and almost_equal(line.v,lineb.v):
lineb.planes |= line.planes
break
else:
outer_dist = line.dist_square(co_nt.Vector()) - las
if outer_dist < 0.1:
line.outer = outer_dist > -0.1
lines.append(line)
pmap = {}
for line in lines:
pmap[line] = {}
graph = FuzzyGraph()
maxr = self.circumradius_square() + 0.1
for l1,l2 in combinations(lines,2):
inter = line_intersection(co_nt,l1,l2)
if inter and inter[0].square() < maxr:
n = graph.add(inter[0],l1.planes | l2.planes,l1.outer or l2.outer)
pmap[l1][n] = inter[1]
pmap[l2][n] = inter[2]
for line,poss in pmap.items():
if len(poss) == 0: continue
if len(poss) == 1:
graph.remove(poss[0])
continue
poss = sorted(poss.items(),key=(lambda x: x[1]))
if line.outer:
for i in range(len(poss)-1):
join(poss[i][0],poss[i+1][0])
elif len(poss) == 2:
join(poss[0][0],poss[1][0])
elif len(poss) > 3:
for i in range(2,len(poss)-2):
graph.remove(poss[i][0])
join(poss[0][0],poss[1][0])
join(poss[-1][0],poss[-2][0])
t = []
self._star_tesselation = t
for n in islice(graph.nodes,0,len(graph.nodes)-co_nt.dimension):
for cycle in n.find_cycles(co_nt.dimension):
t.append([nt.Vector(tuple(x.pos) + (0,) * (nt.dimension-co_nt.dimension)) for x in cycle] + [nt.Vector()])
n.detach()
return t
def tesselate(self,position,orientation):
if self.star or self.facet.star:
return [[orientation * p + position for p in tri] for tri in self.star_tesselation()]
return self.tesselate_inner(position,orientation)
def tesselate_inner(self,position,orientation):
tris = []
point1 = self.parts[0].any_point(position,orientation)
inv_orientation = orientation.inverse()
for part in self.parts[1:]:
if not part.contains(inv_orientation * (point1 - position)):
new_t = part.tesselate(position,orientation)
for t in new_t: t.append(point1)
tris += new_t
return tris
def hull(self,position=nt.Vector(),orientation=nt.Matrix.identity()):
tris = []
for p in self.parts:
tris += p.tesselate(position,orientation)
return [nt.TrianglePrototype(tri,material) for tri in tris]
def any_point(self,position,orientation):
return self.parts[0].any_point(position,orientation)
def contains(self,p):
return any(part.contains(p) for part in self.parts)
def circumradius_square(self):
return self.apothem*self.apothem + self.facet.circumradius_square()
def circumradius(self):
return math.sqrt(self.circumradius_square())
def line_apothem_square(self):
return self.apothem*self.apothem + self.facet.line_apothem_square()
def compose(part,order,schlafli):
if schlafli.numerator * (math.pi - part.dihedral_s) >= math.pi * 2 * schlafli.denominator:
exit("Component #{0} ({1}) is invalid because the angles of the parts add up to 360\u00b0 or\nmore and thus can't be folded inward".format(order,schlafli))
higher = PolyTope(
order+1,
schlafli,
higher_dihedral_supplement(schlafli,part.dihedral_s),
part.apothem)
potentials = higher.add_face(Instance(part,nt.Vector.axis(order,higher.apothem)))
while potentials:
potentials = higher.propogate_faces(potentials)
return higher
jitter = nt.Vector((0,0,0) + (0.0001,) * (nt.dimension-3))
def process_movement():
global x_move, y_move, w_move
if x_move or y_move or w_move:
h = math.sqrt(x_move*x_move + y_move*y_move + w_move*w_move)
a2 = camera.axes[0]*(x_move/h) + camera.axes[1]*(-y_move/h)
if w_move: a2 += camera.axes[3] * (w_move / h)
camera.transform(nt.Matrix.rotation(
camera.axes[2],
a2,
h * ROT_SENSITIVITY))
camera.normalize()
camera.origin = camera.axes[2] * cam_distance + jitter
scene.set_camera(camera)
x_move = 0
y_move = 0
w_move = 0
run()
def run():
global running
running = True
render.begin_render(screen,scene)
try:
timer = time.perf_counter
except AttributeError:
timer = time.clock
if args.benchmark and not sys.platform.startswith('win'):
print('''warning: on multi-core systems, Python\'s high-resolution timer may combine
time spent on all cores, making the reported time spent rendering, much higher
than the actual time''',file=sys.stderr)
class RotatingCamera(object):
incr = 2 * math.pi / args.frames
h = 1/math.sqrt(nt.dimension-1)
_timer = staticmethod(timer if args.benchmark else (lambda: 0))
def __enter__(self):
self.frame = 0
self.total_time = 0
return self
def __exit__(self,type,value,tb):
if type is None and self.total_time:
print('''rendered {0} frame(s) in {1} seconds
time per frame: {2} seconds
frames per second: {3}'''.format(self.frame,self.total_time,self.total_time/self.frame,self.frame/self.total_time))
def start_timer(self):
self.t = self._timer()
def end_timer(self):
self.total_time += self._timer() - self.t
def advance_camera(self):
self.frame += 1
if self.frame >= args.frames: return False
a2 = camera.axes[0]*self.h + camera.axes[1]*self.h
for i in range(nt.dimension-3): a2 += camera.axes[i+3]*self.h
camera.transform(nt.Matrix.rotation(camera.axes[2],a2,self.incr))
camera.normalize()
camera.origin = camera.axes[2] * cam_distance
scene.set_camera(camera)
return True
if nt.dimension >= 3 and args.schlafli[0] == 4 and all(c == 3 for c in args.schlafli[1:]):
cam_distance = -math.sqrt(nt.dimension) * args.cam_dist
scene = nt.BoxScene()
else:
print('building geometry...')
timing = timer()
p = Polygon(args.schlafli[0])
for i,s in enumerate(args.schlafli[1:]):
p = compose(p,i+2,s)
hull = p.hull()
timing = timer() - timing
print('done in {0} seconds'.format(timing))
cam_distance = -math.sqrt(p.circumradius_square()) * args.cam_dist
print('partitioning scene...')
timing = timer()
scene = nt.build_composite_scene(hull)
timing = timer() - timing
print('done in {0} seconds'.format(timing))
del p
del hull
camera = nt.Camera()
camera.translate(nt.Vector.axis(2,cam_distance) + jitter)
scene.set_camera(camera)
scene.set_fov(args.fov)
if args.output is not None:
if args.type != 'png':
render = BlockingRenderer()
format = ImageFormat(
args.screen[0],
args.screen[1],
[Channel(16,1,0,0),
Channel(16,0,1,0),
Channel(16,0,0,1)])
surf = bytearray(args.screen[0]*args.screen[1]*format.bytes_per_pixel)
pipe = subprocess.Popen(['ffmpeg',
'-y',
'-f','rawvideo',
'-vcodec','rawvideo',
'-s','{0}x{1}'.format(*args.screen),
'-pix_fmt','rgb48be',
'-r','60',
'-i','-',
'-an',
'-vcodec',args.type,
'-crf','10',
args.output],
stdin=subprocess.PIPE)
try:
with RotatingCamera() as rc:
while True:
rc.start_timer()
render.render(surf,format,scene)
rc.end_timer()
print(surf,file=pipe.stdin,sep='',end='')
if not rc.advance_camera(): break
finally:
pipe.stdin.close()
r = pipe.wait()
sys.exit(r)
pygame.display.init()
render = PygameRenderer()
surf = pygame.Surface(args.screen,depth=24)
def announce_frame(frame):
print('drawing frame {0}/{1}'.format(frame+1,args.frames))
with RotatingCamera() as rc:
announce_frame(0)
rc.start_timer()
render.begin_render(surf,scene)
while True:
e = pygame.event.wait()
if e.type == pygame.USEREVENT:
rc.end_timer()
pygame.image.save(
surf,
os.path.join(args.output,'frame{0:04}.png'.format(rc.frame)))
if not rc.advance_camera(): break
announce_frame(rc.frame)
rc.start_timer()
render.begin_render(surf,scene)
elif e.type == pygame.QUIT:
render.abort_render()
break
else:
pygame.display.init()
render = PygameRenderer()
screen = pygame.display.set_mode(args.screen)
if args.benchmark:
with RotatingCamera() as rc:
rc.start_timer()
render.begin_render(screen,scene)
while True:
e = pygame.event.wait()
if e.type == pygame.USEREVENT:
rc.end_timer()
pygame.display.flip()
if not rc.advance_camera(): break
rc.start_timer()
render.begin_render(screen,scene)
elif e.type == pygame.QUIT:
render.abort_render()
break
else:
running = False
run()
x_move = 0
y_move = 0
w_move = 0
while True:
e = pygame.event.wait()
if e.type == pygame.MOUSEMOTION:
if e.buttons[0]:
x_move += e.rel[0]
y_move += e.rel[1]
if not running:
process_movement()
elif e.type == pygame.MOUSEBUTTONDOWN:
if nt.dimension > 3:
if e.button == 4 or e.button == 5:
if e.button == 4:
w_move += WHEEL_INCREMENT
else:
w_move -= WHEEL_INCREMENT
if not running:
process_movement()
elif e.type == pygame.USEREVENT:
running = False
pygame.display.flip()
process_movement()
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_c:
x,y = pygame.mouse.get_pos()
fovI = (2 * math.tan(scene.fov/2)) / screen.get_width()
print(camera.origin)
print((camera.axes[2] + camera.axes[0] * (fovI * (x - screen.get_width()/2)) - camera.axes[1] * (fovI * (y - screen.get_height()/2))).unit())
elif e.type == pygame.QUIT:
render.abort_render()
break
|
Rouslan/NTracer
|
scripts/polytope.py
|
Python
|
mit
| 24,145
|
from growler_guys import scrape_growler_guys
|
ryanpitts/growlerbot
|
scrapers/__init__.py
|
Python
|
mit
| 45
|
# coding: utf-8
from __future__ import absolute_import
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
from apscheduler.util import datetime_to_utc_timestamp
class MemoryJobStore(BaseJobStore):
"""
Stores jobs in an array in RAM. Provides no persistence support.
Plugin alias: ``memory``
"""
def __init__(self):
super(MemoryJobStore, self).__init__()
self._jobs = [] # list of (job, timestamp), sorted by next_run_time and job id (ascending)
self._jobs_index = {} # id -> (job, timestamp) lookup table
def lookup_job(self, job_id):
return self._jobs_index.get(job_id, (None, None))[0]
def get_due_jobs(self, now):
now_timestamp = datetime_to_utc_timestamp(now)
pending = []
for job, timestamp in self._jobs:
if timestamp is None or timestamp > now_timestamp:
break
pending.append(job)
return pending
def get_next_run_time(self):
return self._jobs[0][0].next_run_time if self._jobs else None
def get_all_jobs(self):
return [j[0] for j in self._jobs]
def add_job(self, job):
if job.id in self._jobs_index:
raise ConflictingIdError(job.id)
timestamp = datetime_to_utc_timestamp(job.next_run_time)
index = self._get_job_index(timestamp, job.id)
self._jobs.insert(index, (job, timestamp))
self._jobs_index[job.id] = (job, timestamp)
def update_job(self, job):
old_job, old_timestamp = self._jobs_index.get(job.id, (None, None))
if old_job is None:
raise JobLookupError(job.id)
# If the next run time has not changed, simply replace the job in its present index.
# Otherwise, reinsert the job to the list to preserve the ordering.
old_index = self._get_job_index(old_timestamp, old_job.id)
new_timestamp = datetime_to_utc_timestamp(job.next_run_time)
if old_timestamp == new_timestamp:
self._jobs[old_index] = (job, new_timestamp)
else:
del self._jobs[old_index]
new_index = self._get_job_index(new_timestamp, job.id)
self._jobs.insert(new_index, (job, new_timestamp))
self._jobs_index[old_job.id] = (job, new_timestamp)
def remove_job(self, job_id):
job, timestamp = self._jobs_index.get(job_id, (None, None))
if job is None:
raise JobLookupError(job_id)
index = self._get_job_index(timestamp, job_id)
del self._jobs[index]
del self._jobs_index[job.id]
def remove_all_jobs(self):
self._jobs = []
self._jobs_index = {}
def shutdown(self):
self.remove_all_jobs()
def _get_job_index(self, timestamp, job_id):
"""
Returns the index of the given job, or if it's not found, the index where the job should be inserted based on
the given timestamp.
:type timestamp: int
:type job_id: str
"""
lo, hi = 0, len(self._jobs)
timestamp = float('inf') if timestamp is None else timestamp
while lo < hi:
mid = (lo + hi) // 2
mid_job, mid_timestamp = self._jobs[mid]
mid_timestamp = float('inf') if mid_timestamp is None else mid_timestamp
if mid_timestamp > timestamp:
hi = mid
elif mid_timestamp < timestamp:
lo = mid + 1
elif mid_job.id > job_id:
hi = mid
elif mid_job.id < job_id:
lo = mid + 1
else:
return mid
return lo
|
cychenyin/windmill
|
apscheduler/jobstores/memory.py
|
Python
|
mit
| 3,664
|
import pygame
from pygame.locals import *
class Application:
def __init__(self, screen_size, caption="PyGame"):
pygame.init()
self.display_surface = pygame.display.set_mode(screen_size)
pygame.display.set_caption(caption)
self.is_run = False
self.update_func = self.__update_stub
self.draw_func = self.__draw_stub
self.keyup_listeners = []
self.keydown_listeners = []
self.mouseup_listeners = []
self.mousedown_listeners = []
self.clock = pygame.time.Clock()
self.fps_limit = 60
def __update_stub(self):
pass
def __draw_stub(self, display_surface):
pass
def set_update(self, update):
self.update_func = update
def set_draw(self, draw):
self.draw_func = draw
def esc(self):
self.is_run = False
def append_keyup_listener(self, listener):
self.keyup_listeners.append(listener)
def remove_keyup_listener(self, listener):
self.keyup_listeners.remove(listener)
def append_keydown_listener(self, listener):
self.keydown_listeners.append(listener)
def remove_keydown_listener(self, listener):
self.keydown_listeners.remove(listener)
def append_mouseup_listener(self, listener):
self.mouseup_listeners.append(listener)
def remove_mouseup_listener(self, listener):
self.mouseup_listeners.remove(listener)
def append_mousedown_listener(self, listener):
self.mousedown_listeners.append(listener)
def remove_mousedown_listener(self, listener):
self.mousedown_listeners.remove(listener)
def __events(self):
for event in pygame.event.get():
if event.type == QUIT:
self.esc()
elif event.type == KEYUP:
for listener in self.keyup_listeners:
listener(event.type, event.key)
elif event.type == KEYDOWN:
for listener in self.keydown_listeners:
listener(event.type, event.key)
elif event.type == MOUSEBUTTONUP:
for listener in self.mouseup_listeners:
listener(event.type, event.button, event.pos)
elif event.type == MOUSEBUTTONDOWN:
for listener in self.mousedown_listeners:
listener(event.type, event.button, event.pos)
def __update(self, dt):
self.update_func(dt)
def __draw(self, dt):
self.draw_func(self.display_surface, dt)
def set_fps_limit(self, fps_limit):
self.fps_limit = fps_limit
def get_fps(self):
return self.clock.get_fps()
def set_caption(self, caption):
pygame.display.set_caption(caption)
def run(self):
dt = 0
self.is_run = True
while self.is_run:
self.__events()
self.__update(dt)
self.__draw(dt)
pygame.display.update()
dt = self.clock.tick(self.fps_limit)
pygame.quit()
|
Plambir/pyclicker
|
game/Application.py
|
Python
|
mit
| 3,037
|
from __future__ import absolute_import
from .activation_maximization import visualize_activation_with_losses
from .activation_maximization import visualize_activation
from .saliency import visualize_saliency_with_losses
from .saliency import visualize_saliency
from .saliency import visualize_cam_with_losses
from .saliency import visualize_cam
from keras import backend as K
def get_num_filters(layer):
"""Determines the number of filters within the given `layer`.
Args:
layer: The keras layer to use.
Returns:
Total number of filters within `layer`.
For `keras.layers.Dense` layer, this is the total number of outputs.
"""
# Handle layers with no channels.
if K.ndim(layer.output) == 2:
return K.int_shape(layer.output)[-1]
channel_idx = 1 if K.image_data_format() == 'channels_first' else -1
return K.int_shape(layer.output)[channel_idx]
def overlay(array1, array2, alpha=0.5):
"""Overlays `array1` onto `array2` with `alpha` blending.
Args:
array1: The first numpy array.
array2: The second numpy array.
alpha: The alpha value of `array1` as overlayed onto `array2`. This value needs to be between [0, 1],
with 0 being `array2` only to 1 being `array1` only (Default value = 0.5).
Returns:
The `array1`, overlayed with `array2` using `alpha` blending.
"""
if alpha < 0. or alpha > 1.:
raise ValueError("`alpha` needs to be between [0, 1]")
if array1.shape != array2.shape:
raise ValueError('`array1` and `array2` must have the same shapes')
return (array1 * alpha + array2 * (1. - alpha)).astype(array1.dtype)
|
raghakot/keras-vis
|
vis/visualization/__init__.py
|
Python
|
mit
| 1,680
|
# -*- coding: UTF-8 -*-
"""
Package-wide constants.
"""
CALL = 'C'
PUT = 'P'
|
zzzoidberg/landscape
|
finance/consts.py
|
Python
|
mit
| 78
|
from distutils.core import setup
setup( name='dramatis',
version='0.1.1',
author='Steven Parkes',
author_email='smparkes@smparkes.net',
url='http://dramatis.mischance.net',
description="an actor library for ruby and python",
package_dir = {'':'lib'},
packages=[
'dramatis',
'dramatis.error',
'dramatis.future_value',
'dramatis.actor',
'dramatis.actor.name',
'dramatis.runtime',
'dramatis.runtime.actor',
'dramatis.runtime.continuation',
],
)
|
dramatis/dramatis
|
setup.py
|
Python
|
mit
| 633
|
#!/usr/bin/python
import cgi
from redis import Connection
from socket import gethostname
from navi import *
fields = cgi.FieldStorage()
title = "Message Box"
msg_prefix = 'custom.message.'
def insert_msg(cust, tm, msg):
conn = Connection(host=gethostname(),port=6379)
conn.send_command('set', msg_prefix+cust+'--'+tm, msg)
conn.disconnect()
def read_msg():
ret = ''
conn = Connection(host=gethostname(),port=6379)
conn.send_command('keys', msg_prefix+'*')
keys = conn.read_response()
vals = []
if len(keys) != 0:
conn.send_command('mget', *keys)
vals = conn.read_response()
ret += "<h2>" + "Message log" + "</h2>"
for k, v in zip(keys, vals):
ret += "<span>" + k.replace(msg_prefix, '').replace('--', ' ') + "</span>"
ret += "<pre readonly=\"true\">" + v + "</pre>"
conn.disconnect()
ret += "<br>"
return ret
def reply():
import time, os
ret = ""
ret += "Content-Type: text/html\n\n"
ret += "<!DOCTYPE html>"
ret += "<html>"
ret += default_head(title)
ret += default_navigator()
ret += "<body>"
ret += "<div class=\"content\">"
ret += "<h2>Welcome, " + os.environ["REMOTE_ADDR"] + "!</h2>"
ret += "<span>" + os.environ["HTTP_USER_AGENT"] + "</span><br><br>"
if fields.has_key('msgbox'):
insert_msg(os.environ["REMOTE_ADDR"], time.strftime(time.asctime()), fields['msgbox'].value)
ret += read_msg()
ret += "</div>"
ret += "</body>"
ret += "</html>"
print ret
reply()
|
Zex/Starter
|
cgi-bin/leave_message.py
|
Python
|
mit
| 1,575
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import
import os
import sys
from textwrap import dedent
from twisted.trial import unittest
from twisted.persisted import sob
from twisted.python import components
from twisted.persisted.styles import Ephemeral
class Dummy(components.Componentized):
pass
objects = [
1,
"hello",
(1, "hello"),
[1, "hello"],
{1:"hello"},
]
class FakeModule(object):
pass
class PersistTests(unittest.TestCase):
def testStyles(self):
for o in objects:
p = sob.Persistent(o, '')
for style in 'source pickle'.split():
p.setStyle(style)
p.save(filename='persisttest.'+style)
o1 = sob.load('persisttest.'+style, style)
self.assertEqual(o, o1)
def testStylesBeingSet(self):
o = Dummy()
o.foo = 5
o.setComponent(sob.IPersistable, sob.Persistent(o, 'lala'))
for style in 'source pickle'.split():
sob.IPersistable(o).setStyle(style)
sob.IPersistable(o).save(filename='lala.'+style)
o1 = sob.load('lala.'+style, style)
self.assertEqual(o.foo, o1.foo)
self.assertEqual(sob.IPersistable(o1).style, style)
def testPassphraseError(self):
"""
Calling save() with a passphrase is an error.
"""
p = sob.Persistant(None, 'object')
self.assertRaises(
TypeError, p.save, 'filename.pickle', passphrase='abc')
def testNames(self):
o = [1,2,3]
p = sob.Persistent(o, 'object')
for style in 'source pickle'.split():
p.setStyle(style)
p.save()
o1 = sob.load('object.ta'+style[0], style)
self.assertEqual(o, o1)
for tag in 'lala lolo'.split():
p.save(tag)
o1 = sob.load('object-'+tag+'.ta'+style[0], style)
self.assertEqual(o, o1)
def testPython(self):
with open("persisttest.python", 'w') as f:
f.write('foo=[1,2,3] ')
o = sob.loadValueFromFile('persisttest.python', 'foo')
self.assertEqual(o, [1,2,3])
def testTypeGuesser(self):
self.assertRaises(KeyError, sob.guessType, "file.blah")
self.assertEqual('python', sob.guessType("file.py"))
self.assertEqual('python', sob.guessType("file.tac"))
self.assertEqual('python', sob.guessType("file.etac"))
self.assertEqual('pickle', sob.guessType("file.tap"))
self.assertEqual('pickle', sob.guessType("file.etap"))
self.assertEqual('source', sob.guessType("file.tas"))
self.assertEqual('source', sob.guessType("file.etas"))
def testEverythingEphemeralGetattr(self):
"""
L{_EverythingEphermal.__getattr__} will proxy the __main__ module as an
L{Ephemeral} object, and during load will be transparent, but after
load will return L{Ephemeral} objects from any accessed attributes.
"""
self.fakeMain.testMainModGetattr = 1
dirname = self.mktemp()
os.mkdir(dirname)
filename = os.path.join(dirname, 'persisttest.ee_getattr')
global mainWhileLoading
mainWhileLoading = None
with open(filename, "w") as f:
f.write(dedent("""
app = []
import __main__
app.append(__main__.testMainModGetattr == 1)
try:
__main__.somethingElse
except AttributeError:
app.append(True)
else:
app.append(False)
from twisted.test import test_sob
test_sob.mainWhileLoading = __main__
"""))
loaded = sob.load(filename, 'source')
self.assertIsInstance(loaded, list)
self.assertTrue(loaded[0], "Expected attribute not set.")
self.assertTrue(loaded[1], "Unexpected attribute set.")
self.assertIsInstance(mainWhileLoading, Ephemeral)
self.assertIsInstance(mainWhileLoading.somethingElse, Ephemeral)
del mainWhileLoading
def testEverythingEphemeralSetattr(self):
"""
Verify that _EverythingEphemeral.__setattr__ won't affect __main__.
"""
self.fakeMain.testMainModSetattr = 1
dirname = self.mktemp()
os.mkdir(dirname)
filename = os.path.join(dirname, 'persisttest.ee_setattr')
with open(filename, 'w') as f:
f.write('import __main__\n')
f.write('__main__.testMainModSetattr = 2\n')
f.write('app = None\n')
sob.load(filename, 'source')
self.assertEqual(self.fakeMain.testMainModSetattr, 1)
def testEverythingEphemeralException(self):
"""
Test that an exception during load() won't cause _EE to mask __main__
"""
dirname = self.mktemp()
os.mkdir(dirname)
filename = os.path.join(dirname, 'persisttest.ee_exception')
with open(filename, 'w') as f:
f.write('raise ValueError\n')
self.assertRaises(ValueError, sob.load, filename, 'source')
self.assertEqual(type(sys.modules['__main__']), FakeModule)
def setUp(self):
"""
Replace the __main__ module with a fake one, so that it can be mutated
in tests
"""
self.realMain = sys.modules['__main__']
self.fakeMain = sys.modules['__main__'] = FakeModule()
def tearDown(self):
"""
Restore __main__ to its original value
"""
sys.modules['__main__'] = self.realMain
|
whitehorse-io/encarnia
|
pyenv/lib/python2.7/site-packages/twisted/test/test_sob.py
|
Python
|
mit
| 5,632
|
#! flask/bin/python
from os.path import abspath
from flask import current_app
from flask.ext.script import Manager
from flask.ext.assets import ManageAssets
from flask.ext.migrate import Migrate, MigrateCommand
from bluespot import create_app
from bluespot.extensions import db
app = create_app(mode='development')
manager = Manager(app)
manager.add_command('assets', ManageAssets())
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
manager.run()
#app.run(host='0.0.0.0',debug = True)
|
unifispot/unifispot-free
|
manage.py
|
Python
|
mit
| 510
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import re
from io import StringIO
from .strings import escape
EMBEDDED_NEWLINE_MATCHER = re.compile(r'[^\n]\n+[^\n]')
class PoFile(object):
def __init__(self):
self.header_fields = []
self._header_index = {}
self.entries = {}
def clone(self):
po_file = PoFile()
po_file.header_fields.extend(self.header_fields)
for msgid, entry in self.entries.items():
po_file.entries[msgid] = entry.clone()
return po_file
def add_header_field(self, field, value):
if field in self._header_index:
self.header_fields[self._header_index[field]] = (field, value)
else:
self._header_index[field] = len(self.header_fields)
self.header_fields.append((field, value))
def add_entry(self, message, plural=None, context=None):
msgid = get_msgid(message, context)
if msgid in self.entries:
entry = self.entries[msgid]
# Allow merging a non-plural entry with a plural entry
# If more than one plural entry only keep the first
if entry.plural is None:
entry.plural = plural
else:
entry = TranslationEntry(message, plural, context)
self.entries[msgid] = entry
return entry
def dump(self, fp, include_locations=True, prune_obsoletes=False):
needs_blank_line = False
if len(self.header_fields):
print('msgid ""', file=fp)
print('msgstr ""', file=fp)
for field, value in self.header_fields:
print(r'"{}: {}\n"'.format(field, value), file=fp)
needs_blank_line = True
nplurals = self.get_nplurals()
for entry in sorted(self.entries.values(), key=get_entry_sort_key):
if needs_blank_line:
print('', file=fp)
needs_blank_line = entry.dump(
fp, nplurals, include_locations=include_locations, prune_obsolete=prune_obsoletes)
def dumps(self, include_locations=True, prune_obsoletes=False):
string_file = StringIO()
self.dump(string_file, include_locations, prune_obsoletes)
return string_file.getvalue()
def get_catalog(self):
catalog = {}
for entry in self.entries.values():
entry.fill_catalog(catalog)
return catalog
def get_nplurals(self):
plural_field_index = self._header_index.get('Plural-Forms', -1)
if plural_field_index != -1:
field, value = self.header_fields[plural_field_index]
if field == 'Plural-Forms':
for pair in value.split(';'):
parts = pair.partition('=')
if parts[0].strip() == 'nplurals':
return int(parts[2].strip())
return None
class TranslationEntry(object):
MIN_NPLURALS = 2
def __init__(self, message, plural=None, context=None):
self.message = message
self.plural = plural
self.context = context
self.locations = []
self.translations = {}
def clone(self):
entry = TranslationEntry(self.message, self.plural, self.context)
entry.locations.extend(self.locations)
entry.translations = self.translations.copy()
return entry
def add_location(self, filename, lineno):
self.locations.append((filename, lineno))
def add_translation(self, translation):
self.add_plural_translation(0, translation)
def add_plural_translation(self, index, translation):
self.translations[index] = translation
def fill_catalog(self, catalog):
msgid = get_msgid(self.message, self.context)
if self.plural is not None:
for index, translation in self.translations.items():
if translation:
catalog[(msgid, index)] = translation
else:
translation = self.translations.get(0, '')
if translation:
catalog[msgid] = translation
def dump(self, fp, nplurals=None, include_locations=True, prune_obsolete=False):
"""
If plural, shows exactly 'nplurals' plurals if 'nplurals' is not None, else shows at least min_nplurals.
All plural index are ordered and consecutive, missing entries are displayed with an empty string.
"""
if not len(self.locations):
if prune_obsolete or all(translation == '' for index, translation in self.translations.items()):
return False
else:
print('#. obsolete entry', file=fp)
if include_locations and len(self.locations):
print('#: {}'.format(' '.join('{}:{}'.format(*location) for location in self.locations)), file=fp)
if self.context is not None:
print('msgctxt {}'.format(multiline_escape(self.context)), file=fp)
print('msgid {}'.format(multiline_escape(self.message)), file=fp)
if self.plural is not None:
print('msgid_plural {}'.format(multiline_escape(self.plural)), file=fp)
if nplurals is None:
nplurals = self.get_suggested_nplurals()
for index in range(nplurals):
print('msgstr[{}] {}'.format(index, multiline_escape(self.translations.get(index, ''))), file=fp)
else:
print('msgstr {}'.format(multiline_escape(self.translations.get(0, ''))), file=fp)
return True
def get_suggested_nplurals(self):
if len(self.translations) > 0:
return max(max(self.translations.keys()) + 1, self.MIN_NPLURALS)
else:
return self.MIN_NPLURALS
def multiline_escape(string):
if EMBEDDED_NEWLINE_MATCHER.search(string):
lines = string.split('\n')
return (
'""\n'
+ '\n'.join('"{}\\n"'.format(escape(line)) for line in lines[:-1])
+ ('\n"{}"'.format(escape(lines[-1])) if len(lines[-1]) else ""))
else:
return '"{}"'.format(escape(string))
def get_msgid(message, context=None):
if context is not None:
return '{}\x04{}'.format(context, message)
else:
return message
def get_entry_sort_key(entry):
return entry.locations, entry.context if entry.context else '', entry.message
|
kmichel/po-localization
|
po_localization/po_file.py
|
Python
|
mit
| 6,436
|
import sys
try:
data = map(int, sys.stdin.readline().split())
except ValueError:
sys.stdout.write("NO" + '\n')
exit()
if not data:
sys.stdout.write("NO" + '\n')
exit()
if len(data) != 2:
sys.stdout.write("NO" + '\n')
exit()
if data[0] < 1 or data[0] > 1000:
sys.stdout.write("NO" + '\n')
exit()
if data[1] < 0 or data[1] > 100000:
sys.stdout.write("NO" + '\n')
exit()
if data[1] == 0:
sys.stdout.write("YES" + '\n')
exit()
constraints = []
try:
for i in range(data[1]):
constraints.append(map(int, sys.stdin.readline().split()))
if sum(1 for number in constraints[i] if number > data[0] or number < 1) > 0:
sys.stdout.write("NO" + '\n')
exit()
except ValueError:
sys.stdout.write("NO" + '\n')
exit()
studyplan = []
try:
studyplan = map(int, sys.stdin.readline().split())
if sum(1 for number in studyplan if number > data[0] or number < 1) > 0:
sys.stdout.write("NO" + '\n')
exit()
except ValueError:
sys.stdout.write("NO" + '\n')
exit()
for item in constraints:
for i in range(len(item)):
for j in range(i+1, len(item)):
if item[i] in studyplan and item[j] in studyplan:
if studyplan.index(item[i]) > studyplan.index(item[j]):
sys.stdout.write("NO" + '\n')
exit()
sys.stdout.write("YES" + '\n')
|
Arnukk/IEEE-XTREME-8.0-Problems
|
IEEEXTREME/rano.py
|
Python
|
mit
| 1,429
|
#!/usr/bin/env python
"""alerts.py Classes for sendings alerts
"""
__author__ = "Jean-Martin Archer"
__copyright__ = "Copyright 2013, MIT License."
import smtplib
from twilio.rest import TwilioRestClient
from vendors.pushbullet.pushbullet import PushBullet
import configuration
class Alerts(object):
"""<ac:image ac:thumbnail="true" ac:width="300">for alerts"""
def __init__(self, config_path='./config/'):
self.config = configuration.load(config_path)
self.register()
def register(self):
alerts = self.config['alerts']
alerts_list = []
if alerts['sms']['on']:
alerts_list.append(alerts.sms(alerts['AlertSMS']))
if alerts['pushbullet']['on']:
alerts_list.append(alerts.pushbullet(alerts['AlertPushBullet']))
if alerts['email']['on']:
alerts_list.append(alerts.sms(alerts['AlertPushBullet']))
self.alerts = alerts_list
def send(self, message):
for alert in self.alerts:
alert.send_notification(message)
class BasicAlert(object):
"""<ac:image ac:thumbnail="true" ac:width="300">for BasicAlert class. This is more an interface/contract
than anything else"""
def __init__(self, config):
self.config = config
self.setup()
def setup(self):
raise NotImplementedError
def send_notification(self, message):
raise NotImplementedError
class AlertEmail(BasicAlert):
"""<ac:image ac:thumbnail="true" ac:width="300">for AlertEmail"""
def setup(self):
self.sender = self.config['email_sender']
self.receivers = self.config['email_receivers']
self.server = self.config['server']
def send_notification(self, message):
email_body = """From: Alert <%s>
To: Alert <%s>
Subject: %s
This is a test e-mail message.
""" % (self.sender, self.receivers, message)
try:
smtpObj = smtplib.SMTP(self.server)
smtpObj.sendmail(self.sender, self.receivers, email_body)
print "Successfully sent AlertEmail"
except SMTPException:
print "Error: unable to send AlertEmail"
class AlertPushBullet(BasicAlert):
"""<ac:image ac:thumbnail="true" ac:width="300">for AlertPushBullet. Get you api key from
https://www.PushBullet.com/account
Use the pyPushBullet API to know which deviceID to use.
"""
def setup(self):
self.push = PushBullet(self.config['apikey'])
def send_notification(self, message):
for device in self.config['device']:
self.push.pushNote(device, message, message)
def get_device_id(self):
print self.push.getDevices()
class AlertSMS(BasicAlert):
"""<ac:image ac:thumbnail="true" ac:width="300">for AlertSMS, uses your twilio.com account."""
def setup(self):
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = self.config['twilio_sid']
auth_token = self.config['twilio_auth_token']
self.client = TwilioRestClient(account_sid, auth_token)
self.create = client.sms.messages.create
def send_notification(self, message):
message = self.create(body=message,
to=self.config['to_number'],
from_=self.config["from_number"])
|
j-martin/raspberry-gpio-zmq
|
raspzmq/alerts.py
|
Python
|
mit
| 3,371
|
# -*- coding: utf-8 -*-
from flask import Flask
from flask.views import http_method_funcs
from .auth import secure
from .config import config_autoapi
from .messages import message
from .operations import invalid_operation, login, logout, password, roles, user
from .views import get, post, delete, put, patch
class AutoApi(Flask):
def __init__(self, auth=False, cors=True, port=None):
super(AutoApi, self).__init__(self.__class__.__name__)
self.auth = auth
self.cors = cors
config_autoapi(self, cors=cors, force_port=port)
# AutoApi operation routes
self.prefix = 'AutoApi'
self.load_operations()
# Customize routes
self.prefix = self.__class__.__name__
self.load_more_routes()
# AutoApi rest routes
self.prefix = 'AutoApi'
self.load_api_rest()
def welcome(self):
return message('Welcome to AutoApi.')
def _name(self, view):
return '{prefix}.{name}'.format(prefix=self.prefix, name=view.__name__)
def add(
self, path, view, api=None, skip_auth=False,
method='POST', role=None, all_methods=False, no_api=False
):
"""" Bind path with view on AutoApi """
auth = self.auth and not skip_auth
params = dict(view=view, role=role, api=api, auth=auth, no_api=no_api)
self.add_url_rule(
path, endpoint=self._name(view), view_func=secure(self, **params),
methods=all_methods and list(http_method_funcs) or [method]
)
def route(self, path, **kwargs):
""" Decorator to bind path with view on AutoApi """
def wrapper(view):
self.add(path, view, **kwargs)
return wrapper
def load_operations(self):
""" Bind operations related with Authentication & Authorization """
skip_all_params = dict(skip_auth=True, all_methods=True, no_api=True)
# AutoApi welcome message
self.add('/', lambda: self.welcome(), **skip_all_params)
# Invalid operation message
self.add('/<api>', invalid_operation, **skip_all_params)
# AutoApi auth operations
if self.auth:
self.add('/login', login, skip_auth=True)
self.add('/logout', logout)
self.add('/user', user, role='admin')
self.add('/password', password)
self.add('/roles', roles, role='admin')
def load_api_rest(self):
""" Bind automatic API REST for AutoApi """
path = '/<api>/<path:path>'
self.add(path, get, method='GET', role='read')
self.add(path, post, method='POST', role='create')
self.add(path, delete, method='DELETE', role='delete')
self.add(path, put, method='PUT', role='update')
self.add(path, patch, method='PATCH', role='update')
def load_more_routes(self):
""" Implement this method to add more routes """
pass
|
fvalverd/AutoApi
|
auto_api/__init__.py
|
Python
|
mit
| 2,932
|
from django.db import models
from django.contrib.auth.models import User
class IntegerRangeField(models.IntegerField):
def __init__(self, verbose_name=None, name=None, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
models.IntegerField.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {'min_value': self.min_value, 'max_value':self.max_value}
defaults.update(kwargs)
return super(IntegerRangeField, self).formfield(**defaults)
# Create your models here.
class Bin(models.Model):
description = models.CharField(max_length=300, null=True)
long = models.DecimalField(decimal_places=7, max_digits=10)
lat = models.DecimalField(decimal_places=7, max_digits=10)
access = models.CharField(max_length=300, null=True)
image = models.URLField(null=True)
asset = models.CharField(null=True, max_length=300)
def __unicode__(self):
return 'ID:{0} {1}'.format(self.pk, self.description)
class Found(models.Model):
user = models.ForeignKey(User)
bin = models.ForeignKey(Bin)
date_added = models.DateField(auto_now_add=True)
difficulty = IntegerRangeField(min_value=1, max_value=5)
overflowing = models.BooleanField(default=False)
notes = models.CharField(max_length=140)
def __str__(self):
return '{0} found {1} on {2}'.format(self.user.username, self.bin.asset, self.date_added)
|
bath-hacker/binny
|
binny/db/models.py
|
Python
|
mit
| 1,478
|
from .viewsets import *
from rest_framework import routers
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'person', PersonViewSet)
router.register(r'skill', SkillViewSet)
router.register(r'mycontent', MyContentViewSet)
router.register(r'job', JobViewSet)
router.register(r'course', CourseViewSet)
router.register(r'post', PostViewSet)
router.register(r'contact', ContactViewSet)
|
italomandara/mysite
|
myresume/routers.py
|
Python
|
mit
| 456
|
# encoding: utf8
from __future__ import print_function
import argparse
import os
import sys
import pokedex.cli.search
import pokedex.db
import pokedex.db.load
import pokedex.db.tables
import pokedex.lookup
from pokedex import defaults
def main(junk, *argv):
if len(argv) <= 0:
command_help()
return
parser = create_parser()
args = parser.parse_args(argv)
args.func(parser, args)
def setuptools_entry():
main(*sys.argv)
def create_parser():
"""Build and return an ArgumentParser.
"""
# Slightly clumsy workaround to make both `setup -v` and `-v setup` work
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument(
'-e', '--engine', dest='engine_uri', default=None,
help=u'By default, all commands try to use a SQLite database '
u'in the pokedex install directory. Use this option (or '
u'a POKEDEX_DB_ENGINE environment variable) to specify an '
u'alternate database.',
)
common_parser.add_argument(
'-i', '--index', dest='index_dir', default=None,
help=u'By default, all commands try to put the lookup index in '
u'the pokedex install directory. Use this option (or a '
u'POKEDEX_INDEX_DIR environment variable) to specify an '
u'alternate loction.',
)
common_parser.add_argument(
'-q', '--quiet', dest='verbose', action='store_false',
help=u'Don\'t print system output. This is the default for '
'non-system commands and setup.',
)
common_parser.add_argument(
'-v', '--verbose', dest='verbose', default=False, action='store_true',
help=u'Print system output. This is the default for system '
u'commands, except setup.',
)
parser = argparse.ArgumentParser(
prog='pokedex', description=u'A command-line Pokédex interface',
parents=[common_parser],
)
cmds = parser.add_subparsers(title='Commands')
cmd_help = cmds.add_parser(
'help', help=u'Display this message',
parents=[common_parser])
cmd_help.set_defaults(func=command_help)
cmd_lookup = cmds.add_parser(
'lookup', help=u'Look up something in the Pokédex',
parents=[common_parser])
cmd_lookup.set_defaults(func=command_lookup)
cmd_lookup.add_argument('criteria', nargs='+')
cmd_search = cmds.add_parser(
'search', help=u'Find things by various criteria',
parents=[common_parser])
pokedex.cli.search.configure_parser(cmd_search)
cmd_load = cmds.add_parser(
'load', help=u'Load Pokédex data into a database from CSV files',
parents=[common_parser])
cmd_load.set_defaults(func=command_load, verbose=True)
# TODO get the actual default here
cmd_load.add_argument(
'-d', '--directory', dest='directory', default=None,
help="directory containing the CSV files to load")
cmd_load.add_argument(
'-D', '--drop-tables', dest='drop_tables', default=False, action='store_true',
help="drop all tables before loading data")
cmd_load.add_argument(
'-r', '--recursive', dest='recursive', default=False, action='store_true',
help="load and drop all dependent tables (default is to use exactly the given list)")
cmd_load.add_argument(
'-S', '--safe', dest='safe', default=False, action='store_true',
help="disable database-specific optimizations, such as Postgres's COPY FROM")
# TODO need a custom handler for splittin' all of these
cmd_load.add_argument(
'-l', '--langs', dest='langs', default=None,
help="comma-separated list of language codes to load, or 'none' (default: all)")
cmd_load.add_argument(
'tables', nargs='*',
help="list of database tables to load (default: all)")
cmd_dump = cmds.add_parser(
'dump', help=u'Dump Pokédex data from a database into CSV files',
parents=[common_parser])
cmd_dump.set_defaults(func=command_dump, verbose=True)
cmd_dump.add_argument(
'-d', '--directory', dest='directory', default=None,
help="directory to place the dumped CSV files")
cmd_dump.add_argument(
'-l', '--langs', dest='langs', default=None,
help="comma-separated list of language codes to load, 'none', or 'all' (default: en)")
cmd_dump.add_argument(
'tables', nargs='*',
help="list of database tables to load (default: all)")
cmd_reindex = cmds.add_parser(
'reindex', help=u'Rebuild the lookup index from the database',
parents=[common_parser])
cmd_reindex.set_defaults(func=command_reindex, verbose=True)
cmd_setup = cmds.add_parser(
'setup', help=u'Combine load and reindex',
parents=[common_parser])
cmd_setup.set_defaults(func=command_setup, verbose=False)
cmd_status = cmds.add_parser(
'status', help=u'Print which engine, index, and csv directory would be used for other commands',
parents=[common_parser])
cmd_status.set_defaults(func=command_status, verbose=True)
return parser
def get_session(args):
"""Given a parsed options object, connects to the database and returns a
session.
"""
engine_uri = args.engine_uri
got_from = 'command line'
if engine_uri is None:
engine_uri, got_from = defaults.get_default_db_uri_with_origin()
session = pokedex.db.connect(engine_uri)
if args.verbose:
print("Connected to database %(engine)s (from %(got_from)s)"
% dict(engine=session.bind.url, got_from=got_from))
return session
def get_lookup(args, session=None, recreate=False):
"""Given a parsed options object, opens the whoosh index and returns a
PokedexLookup object.
"""
if recreate and not session:
raise ValueError("get_lookup() needs an explicit session to regen the index")
index_dir = args.index_dir
got_from = 'command line'
if index_dir is None:
index_dir, got_from = defaults.get_default_index_dir_with_origin()
if args.verbose:
print("Opened lookup index %(index_dir)s (from %(got_from)s)"
% dict(index_dir=index_dir, got_from=got_from))
lookup = pokedex.lookup.PokedexLookup(index_dir, session=session)
if recreate:
lookup.rebuild_index()
return lookup
def get_csv_directory(args):
"""Prints and returns the csv directory we're about to use."""
if not args.verbose:
return
csvdir = args.directory
got_from = 'command line'
if csvdir is None:
csvdir, got_from = defaults.get_default_csv_dir_with_origin()
print("Using CSV directory %(csvdir)s (from %(got_from)s)"
% dict(csvdir=csvdir, got_from=got_from))
return csvdir
### Plumbing commands
def command_dump(parser, args):
session = get_session(args)
get_csv_directory(args)
if args.langs is not None:
langs = [l.strip() for l in args.langs.split(',')]
else:
langs = None
pokedex.db.load.dump(
session,
directory=args.directory,
tables=args.tables,
verbose=args.verbose,
langs=langs,
)
def command_load(parser, args):
if not args.engine_uri:
print("WARNING: You're reloading the default database, but not the lookup index. They")
print(" might get out of sync, and pokedex commands may not work correctly!")
print("To fix this, run `pokedex reindex` when this command finishes. Or, just use")
print("`pokedex setup` to do both at once.")
print()
if args.langs == 'none':
langs = []
elif args.langs is None:
langs = None
else:
langs = [l.strip() for l in args.langs.split(',')]
session = get_session(args)
get_csv_directory(args)
pokedex.db.load.load(
session,
directory=args.directory,
drop_tables=args.drop_tables,
tables=args.tables,
verbose=args.verbose,
safe=args.safe,
recursive=args.recursive,
langs=langs,
)
def command_reindex(parser, args):
session = get_session(args)
get_lookup(args, session=session, recreate=True)
print("Recreated lookup index.")
def command_setup(parser, args):
args.directory = None
session = get_session(args)
get_csv_directory(args)
pokedex.db.load.load(
session, directory=None, drop_tables=True,
verbose=args.verbose, safe=False)
get_lookup(args, session=session, recreate=True)
print("Recreated lookup index.")
def command_status(parser, args):
args.directory = None
# Database, and a lame check for whether it's been inited at least once
session = get_session(args)
print(" - OK! Connected successfully.")
if pokedex.db.tables.Pokemon.__table__.exists(session.bind):
print(" - OK! Database seems to contain some data.")
else:
print(" - WARNING: Database appears to be empty.")
# CSV; simple checks that the dir exists
csvdir = get_csv_directory(args)
if not os.path.exists(csvdir):
print(" - ERROR: No such directory!")
elif not os.path.isdir(csvdir):
print(" - ERROR: Not a directory!")
else:
print(" - OK! Directory exists.")
if os.access(csvdir, os.R_OK):
print(" - OK! Can read from directory.")
else:
print(" - ERROR: Can't read from directory!")
if os.access(csvdir, os.W_OK):
print(" - OK! Can write to directory.")
else:
print(" - WARNING: Can't write to directory! "
"`dump` will not work. You may need to sudo.")
# Index; the PokedexLookup constructor covers most tests and will
# cheerfully bomb if they fail
get_lookup(args, recreate=False)
print(" - OK! Opened successfully.")
### User-facing commands
def command_lookup(parser, args):
name = u' '.join(args.criteria)
session = get_session(args)
lookup = get_lookup(args, session=session, recreate=False)
results = lookup.lookup(name)
if not results:
print("No matches.")
elif results[0].exact:
print("Matched:")
else:
print("Fuzzy-matched:")
for result in results:
if hasattr(result.object, 'full_name'):
name = result.object.full_name
else:
name = result.object.name
print("%s: %s" % (result.object.__tablename__, name), end='')
if result.language:
print("(%s in %s)" % (result.name, result.language))
else:
print()
def command_help(parser, args):
parser.print_help()
if __name__ == '__main__':
main(*sys.argv)
|
mschex1/pokedex
|
pokedex/main.py
|
Python
|
mit
| 10,768
|
from django.apps import AppConfig
class ScrapperConfig(AppConfig):
name = 'scrapper'
|
shashank-sharma/mythical-learning
|
scrapper/apps.py
|
Python
|
mit
| 91
|
from setuptools import setup, find_packages
setup(
name = "FreeCite",
version = "0.1",
py_modules = ['freecite'],
#install requirements
install_requires = [
'requests==1.1.0'
],
#author details
author = "James Ravenscroft",
author_email = "ravenscroftj@gmail.com",
description = "A wrapper around the FreeCite REST API",
url = "http://wwww.github.com/ravenscroftj/freecite"
)
|
ravenscroftj/freecite
|
setup.py
|
Python
|
mit
| 446
|
from django.conf.urls import patterns, url, include
urlpatterns = patterns('',
('', include('imago.urls')),
url(r'^report/(?P<module_name>[a-z0-9_]+)/$', 'reports.views.report', name='report'),
url(r'^represent/(?P<module_name>[a-z0-9_]+)/$', 'reports.views.represent', name='represent'),
url(r'^warnings/$', 'reports.views.warnings', name='warnings'),
url(r'^$', 'reports.views.home', name='home'),
)
|
datamade/scrapers_ca_app
|
scrapers_ca_app/urls.py
|
Python
|
mit
| 423
|