text stringlengths 4 1.02M | meta dict |
|---|---|
"""Tests for the view with a list of projects."""
from tests import profile_utils
from tests import test_utils
from tests.utils import project_utils
class ProjectListTest(test_utils.GSoCDjangoTestCase):
"""Tests project list page."""
def setUp(self):
self.init()
def assertProjectTemplatesUsed(self, response):
"""Asserts that all the templates from the dashboard were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'modules/gsoc/projects_list/base.html')
self.assertTemplateUsed(
response, 'modules/gsoc/projects_list/_project_list.html')
def testListProjects(self):
self.timeline_helper.studentsAnnounced()
url = '/gsoc/projects/list/' + self.gsoc.key().name()
response = self.get(url)
self.assertResponseOK(response)
self.assertProjectTemplatesUsed(response)
response = self.getListResponse(url, 0)
self.assertIsJsonResponse(response)
data = response.context['data']['']
self.assertEqual(0, len(data))
student = profile_utils.seedSOCStudent(self.program)
project_utils.seedProject(student, self.program.key(), org_key=self.org.key)
response = self.getListResponse(url, 0)
self.assertIsJsonResponse(response)
data = response.context['data']['']
self.assertEqual(1, len(data))
columns = response.context['data'][''][0]['columns']
self.assertIn('key', columns)
self.assertIn('title', columns)
self.assertIn('mentors', columns)
self.assertIn('student', columns)
self.assertIn('org', columns)
| {
"content_hash": "bff571d27903073ed5a4f239cbb291d2",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 33.91304347826087,
"alnum_prop": 0.7121794871794872,
"repo_name": "rhyolight/nupic.son",
"id": "0d6803753b399d60cb7a0f3aaaefe83adbb1e2ca",
"size": "2143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/app/soc/modules/gsoc/views/test_project_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "681301"
},
{
"name": "JavaScript",
"bytes": "392600"
},
{
"name": "PHP",
"bytes": "217376"
},
{
"name": "Python",
"bytes": "5162564"
}
],
"symlink_target": ""
} |
"""
_models_admin.py
"""
_models_admin_code = '''# coding: utf-8
"""
sql models
use: Flask-SQLAlchemy
-- http://flask-sqlalchemy.pocoo.org/2.1/
"""
from . import db, login_manager
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin, AnonymousUserMixin, current_user
from wtforms.validators import Email
# permissions
class Permission:
"""
1. COMMENT: 0x01
2. MODERATE_COMMENTS: 0x02
3. ADMINISTER: 0x04
"""
COMMENT = 0x01
MODERATE_COMMENTS = 0x02
ADMINISTER = 0x04
# user roles
class Role(db.Model):
"""
1. User: COMMENT
2. Moderator: MODERATE_COMMENTS
3. Administrator: ADMINISTER
"""
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.COMMENT, True),
'Moderator': (Permission.COMMENT |
Permission.MODERATE_COMMENTS, False),
'Administrator': (
Permission.COMMENT |
Permission.MODERATE_COMMENTS |
Permission.ADMINISTER,
False
)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model, UserMixin):
"""user"""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(164), unique=True, index=True)
email = db.Column(db.String(164), info={'validator' : Email()})
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(164))
@property
def password(self):
raise AttributeError('password is not readable')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
def is_admin(self):
if self.role_id == 2:
return True
return False
def __repr__(self):
return "<User %r>" % self.username
class AnonymousUser(AnonymousUserMixin):
""" anonymous user """
def is_admin(self):
return False
login_manager.anonymous_user = AnonymousUser
# you can writing your models here:
'''
| {
"content_hash": "6e3ca1be7014c4d7742c8546b9e1ddee",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 73,
"avg_line_length": 25.86842105263158,
"alnum_prop": 0.6035944387928112,
"repo_name": "neo1218/mana",
"id": "8ca875e9558a6a4bc7020f9d1614e58644f75013",
"size": "2965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mana/templates/models/_models_admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31876"
}
],
"symlink_target": ""
} |
from django.core.handlers.wsgi import WSGIRequest
from django.test import TestCase
import mock
from nose.tools import eq_, ok_
from rest_framework.serializers import Serializer, ValidationError
from test_utils import RequestFactory
from mkt.users.models import UserProfile
from mkt.api.serializers import PotatoCaptchaSerializer, URLSerializerMixin
from mkt.site.fixtures import fixture
class TestPotatoCaptchaSerializer(TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.request = mock.Mock()
self.request.META = {}
self.request.user = mock.Mock()
self.context = {'request': self.request}
self.request.user.is_authenticated = lambda: False
self.data = {'tuber': '', 'sprout': 'potato'}
def test_success_authenticated(self):
self.request.user = UserProfile.objects.get(id=999)
self.request.user.is_authenticated = lambda: True
serializer = PotatoCaptchaSerializer(data={}, context=self.context)
eq_(serializer.is_valid(), True)
def test_success_anonymous(self):
data = {'tuber': '', 'sprout': 'potato'}
serializer = PotatoCaptchaSerializer(data=data, context=self.context)
eq_(serializer.is_valid(), True)
def test_no_context(self):
data = {'tuber': '', 'sprout': 'potato'}
with self.assertRaises(ValidationError):
PotatoCaptchaSerializer(data=data)
def test_error_anonymous_bad_tuber(self):
data = {'tuber': 'HAMMMMMMMMMMMMM', 'sprout': 'potato'}
serializer = PotatoCaptchaSerializer(data=data, context=self.context)
eq_(serializer.is_valid(), False)
def test_error_anonymous_bad_sprout(self):
data = {'tuber': 'HAMMMMMMMMMMMMM', 'sprout': ''}
serializer = PotatoCaptchaSerializer(data=data, context=self.context)
eq_(serializer.is_valid(), False)
def test_error_anonymous_bad_tuber_and_sprout(self):
serializer = PotatoCaptchaSerializer(data={}, context=self.context)
eq_(serializer.is_valid(), False)
class TestURLSerializerMixin(TestCase):
SerializerClass = type('Potato', (URLSerializerMixin, Serializer),
{'Meta': None})
Struct = type('Struct', (object,), {})
url_basename = 'potato'
def setUp(self):
self.SerializerClass.Meta = type('Meta', (self.Struct,),
{'model': UserProfile,
'url_basename': self.url_basename})
self.request = RequestFactory().get('/')
self.request.API_VERSION = 1
self.serializer = self.SerializerClass(context=
{'request': self.request})
self.obj = self.Struct()
self.obj.pk = 42
@mock.patch('mkt.api.serializers.reverse')
def test_get_url(self, mock_reverse):
self.serializer.get_url(self.obj)
reverse_args, reverse_kwargs = mock_reverse.call_args
ok_(mock_reverse.called)
eq_(reverse_args[0], '%s-detail' % self.url_basename)
eq_(type(reverse_kwargs['request']), WSGIRequest)
eq_(reverse_kwargs['kwargs']['pk'], self.obj.pk)
| {
"content_hash": "0510ab655ae8a2201b53b4ec563fab85",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 38.925925925925924,
"alnum_prop": 0.6422454804947669,
"repo_name": "ngokevin/zamboni",
"id": "4ba0a5ee475ec8cd2fad86c9e159effc237ed5ff",
"size": "3177",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mkt/api/tests/test_serializer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "356777"
},
{
"name": "JavaScript",
"bytes": "536388"
},
{
"name": "Python",
"bytes": "3883015"
},
{
"name": "Shell",
"bytes": "13597"
}
],
"symlink_target": ""
} |
WIDTH = 640
HEIGHT = 480
class Ball(ZRect): pass
#
# The ball is a red square halfway across the game screen
#
ball = Ball(0, 0, 30, 30)
ball.center = WIDTH / 2, HEIGHT / 2
ball.colour = "red"
#
# The ball moves one step right and one step down each tick
#
ball.direction = 1, 1
#
# The ball moves at a speed of 3 steps each tick
#
ball.speed = 3
class Bat(ZRect): pass
#
# The bat is a green oblong which starts just along the bottom
# of the screen and halfway across.
#
BAT_W = 150
BAT_H = 15
bat = Bat(WIDTH / 2, HEIGHT - BAT_H, BAT_W, BAT_H)
bat.colour = "green"
def draw():
#
# Clear the screen and place the ball at its current position
#
screen.clear()
screen.draw.filled_rect(ball, ball.colour)
screen.draw.filled_rect(bat, bat.colour)
def on_mouse_move(pos):
#
# Make the bat follow the horizontal movement of the mouse.
#
x, y = pos
bat.centerx = x
def update():
#
# Move the ball along its current direction at its current speed
#
dx, dy = ball.direction
ball.move_ip(ball.speed * dx, ball.speed * dy)
#
# Bounce the ball off the left or right walls
#
if ball.right >= WIDTH or ball.left <= 0:
ball.direction = -dx, dy
#
# Bounce the ball off the top or bottom walls
# (We'll remove this later when the bat and the
# bricks are in place)
#
if ball.bottom >= HEIGHT or ball.top <= 0:
ball.direction = dx, -dy | {
"content_hash": "d01c4d597f23ef69ba31c27b270f6fb6",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 68,
"avg_line_length": 23.578125,
"alnum_prop": 0.6096752816434725,
"repo_name": "westpark/wallball",
"id": "ffccbc7675126074baa9b3d3923d64b447d01836",
"size": "1509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/steps/code/s2b.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import sqlite3
import argparse
import boto.mturk.connection
TRIM_DIFFERENCE_MAX = 1.0
def main(video_db, mturk_db, sandbox, aws_access_key_id, aws_secret_access_key):
"""
Command line tool to decide whether assignments are rejected
"""
# TODO verify correct verification labels here
# TODO make Mturk login details command line arguments
sandbox_host = 'mechanicalturk.sandbox.amazonaws.com'
real_host = 'mechanicalturk.amazonaws.com'
host = (sandbox_host if sandbox else real_host)
mturk = boto.mturk.connection.MTurkConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
host=host,
debug=1 # debug = 2 prints out all requests.
)
mturk_db = sqlite3.connect(mturk_db)
video_db = sqlite3.connect(video_db)
mturk_cur = mturk_db.cursor()
db_cursor = video_db.cursor()
try:
# TODO make pending approval a separate table if we think that would be time-efficient
mturk_cur.execute("SELECT assignment_id, hit_id, task FROM hits WHERE status='pending_manual_approval'")
except sqlite3.Error as e:
print (str(e))
return
query_result = mturk_cur.fetchall()
# We need to loop through every assignment/hit set pending approval
print "Hello!"
for result in query_result:
assignment_id = str(result[0])
hit_id = str(result[1])
task = str(result[2])
try:
if task == "name":
print "naming task!"
mturk_cur.execute("SELECT id, action_noun, action_verb FROM name_verification_attempts WHERE hit_id=?", (hit_id,))
action_query_result = mturk_cur.fetchall()
for attempt_action_set in action_query_result:
db_cursor.execute("SELECT action_noun, action_verb FROM video_db WHERE id=?",
(attempt_action_set[0],))
verified_action_set = db_cursor.fetchone()
if attempt_action_set[2] != verified_action_set[1]:
print ("Video " + str(attempt_action_set[0]) + " label has verb "
+ str(attempt_action_set[2])
+ " but the verified had verb "
+ str(verified_action_set[1]))
break
if attempt_action_set[1] != verified_action_set[0]:
print ("Video " + str(attempt_action_set[0]) + " label has noun "
+ str(attempt_action_set[1])
+ " but the verified had noun "
+ str(verified_action_set[0]))
else: # ie. elif task == "trim":
print str(task) + "ming task!"
mturk_cur.execute("SELECT id, start_time, end_time FROM trim_verification_attempts WHERE hit_id=?", (hit_id,))
times_query_result = mturk_cur.fetchall()
for attempt_times_set in times_query_result:
db_cursor.execute("SELECT start_time, end_time FROM video_db WHERE id=?",
(attempt_times_set[0],))
verified_times_set = db_cursor.fetchone()
if abs(attempt_times_set[1] - verified_times_set[0]) > TRIM_DIFFERENCE_MAX:
print ("Video " + str(attempt_times_set[0]) + " label has start time "
+ str(attempt_times_set[1])
+ " but the verified had start time "
+ str(verified_times_set[0]))
if abs(attempt_times_set[2] - verified_times_set[1]) > TRIM_DIFFERENCE_MAX:
print ("Video " + str(attempt_times_set[0]) + " label has end start time "
+ str(attempt_times_set[2])
+ " but the verified had end time "
+ str(verified_times_set[1]))
all_verifications_correct = False
break
except sqlite3.Error as e:
print (str(e))
continue
approval = str(raw_input("Approve? [Y/n]"))
approved = 'n' in approval or 'N' in approval
if approved:
try:
response = mturk.approve_assignment(assignment_id)
except boto.mturk.connection.MTurkRequestError as e:
print ("MTurk verification rejected. Typically, this means the client's completion "
+ "has not been completed on Amazon's end.")
print (str(e))
query_result = mturk_cur.fetchone()
continue
print (assignment_id + " approved. Amazon response: " + str(response))
try:
mturk_cur.execute('''UPDATE hits SET status='approved' WHERE hit_id=?''', (hit_id,))
mturk_db.commit()
except sqlite3.Error as e:
print (str(e))
else:
try:
response = mturk.reject_assignment(assignment_id)
except boto.mturk.connection.MTurkRequestError as e:
print ("MTurk verification rejected. Typically, this means the client's completion "
+ "has not been completed on Amazon's end.")
print (str(e))
query_result = mturk_cur.fetchone()
continue
print (assignment_id + " rejected. Amazon response: " + str(response))
try:
mturk_cur.execute('''UPDATE hits SET status='rejected' WHERE hit_id=?''', (hit_id,))
mturk_db.commit()
except sqlite3.Error as e:
print (str(e))
def parse_args():
parser = argparse.ArgumentParser(description='Accept or reject hits')
parser.add_argument('--video_db', dest='video_db',
help='SQLite3 database with videos',
default='video_db.db', type=str)
parser.add_argument('--mturk_db', dest='mturk_db',
help='SQLite3 database with logs for mturk',
default='mturk_db.db', type=str)
parser.add_argument('--sandbox', dest='sandbox',
help='If this is a sandbox HIT (otherwise is a real one)',
default=False, action='store_true')
parser.add_argument('--aws_key_id', dest='aws_access_key_id',
help='AWS Access Key ID',
default='', type=str)
parser.add_argument('--aws_key', dest='aws_secret_access_key',
help='AWS Secret Access Key',
default='', type=str)
args = parser.parse_args()
return args
def start_from_terminal():
args = parse_args()
main(args.video_db, args.mturk_db, args.sandbox, args.aws_access_key_id, args.aws_secret_access_key)
if __name__ == '__main__':
start_from_terminal()
| {
"content_hash": "a415a11e2a74c5746cdf7139ea730c40",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 130,
"avg_line_length": 49.42758620689655,
"alnum_prop": 0.5265801590623692,
"repo_name": "happyharrycn/vatic_fpv",
"id": "357a5fe408e535bdf841d44929efaa983ce37fdd",
"size": "7167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_vatic/server/approve_assignments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21327"
},
{
"name": "HTML",
"bytes": "62958"
},
{
"name": "Python",
"bytes": "153174"
},
{
"name": "Shell",
"bytes": "472"
}
],
"symlink_target": ""
} |
# http://www.pythonchallenge.com/pc/return/5808.html
# If needed, use username and password from challenge 8
from os import path
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageOps
from utils import download_file
def solve11():
filepath = download_file(
url='http://www.pythonchallenge.com/pc/return/cave.jpg',
binf=True,
username='huge',
password='file'
)
filename = path.split(filepath)[1]
try:
if not path.exists(filepath):
raise IOError('File does not exist')
with Image.open(filepath, 'r') as img:
width, height = img.size
template = (
'{:<8}: {}'.format('Filename', filename),
'{:<8}: {}'.format('Format', img.format),
'{:<8}: {}'.format('Mode', img.mode),
'{:<8}: {:,d} pixels'.format('Width', width),
'{:<8}: {:,d} pixels'.format('Height', height),
'{:<8}: {:,d} pixels'.format('Size', width * height)
)
pixels = np.asarray(img, dtype=np.uint8, order='F')
except (IOError, OSError) as err:
print('Cannot open:', filepath if filepath else '[not downloaded]')
print(err.strerror if err.strerror else err)
else:
print('\n'.join(template), end='\n\n')
del template, width, height
plt.ioff()
plt.figure(num=filename, frameon=False, clear=True)
plt.imshow(pixels, interpolation=None, filternorm=1)
plt.show()
plt.ioff()
plt.figure(num=filename, frameon=False, clear=True)
with Image.fromarray(pixels[0::2, 0::2]) as img:
img.paste(ImageOps.invert(img))
img.paste(ImageOps.autocontrast(img))
part = np.asarray(img, dtype=np.uint8, order='F')
plt.subplot(221)
plt.axis('off')
plt.imshow(part)
plt.subplot(222)
plt.axis('off')
plt.imshow(pixels[1::2, 1::2])
plt.subplot(223)
plt.axis('off')
plt.imshow(pixels[0::2, 1::2])
plt.subplot(224)
plt.axis('off')
plt.imshow(pixels[1::2, 0::2])
plt.show()
print('Magic word: evil')
if __name__ == '__main__':
solve11()
| {
"content_hash": "4e657a94b03f3074fd66af6fd3301c57",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 75,
"avg_line_length": 25.372340425531913,
"alnum_prop": 0.5220125786163522,
"repo_name": "f171a9a3497c8b/python_playground",
"id": "fc5a2e2c6217ef10398d127d8b3396fe71eb18e4",
"size": "2405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pythonchallenge solutions/pythonchallenge11.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79279"
}
],
"symlink_target": ""
} |
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
from airflow.exceptions import AirflowException
from airflow.contrib.hooks.emr_hook import EmrHook
class EmrTerminateJobFlowOperator(BaseOperator):
"""
Operator to terminate EMR JobFlows.
:param job_flow_id: id of the JobFlow to terminate
:type job_flow_name: str
:param aws_conn_id: aws connection to uses
:type aws_conn_id: str
"""
template_fields = ['job_flow_id']
template_ext = ()
ui_color = '#f9c915'
@apply_defaults
def __init__(
self,
job_flow_id,
aws_conn_id='s3_default',
*args, **kwargs):
super(EmrTerminateJobFlowOperator, self).__init__(*args, **kwargs)
self.job_flow_id = job_flow_id
self.aws_conn_id = aws_conn_id
def execute(self, context):
emr = EmrHook(aws_conn_id=self.aws_conn_id).get_conn()
self.log.info('Terminating JobFlow %s', self.job_flow_id)
response = emr.terminate_job_flows(JobFlowIds=[self.job_flow_id])
if not response['ResponseMetadata']['HTTPStatusCode'] == 200:
raise AirflowException('JobFlow termination failed: %s' % response)
else:
self.log.info('JobFlow with id %s terminated', self.job_flow_id)
| {
"content_hash": "3d0b95b08dfb12cbeb360a71c841fb61",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.6428027418126429,
"repo_name": "RealImpactAnalytics/airflow",
"id": "9d210922009e27fcb250fb8fc04e8b15fed4893b",
"size": "2126",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/emr_terminate_job_flow_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "270710"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "3765458"
},
{
"name": "Shell",
"bytes": "46923"
}
],
"symlink_target": ""
} |
"""
Support for monitoring energy usage using the DTE energy bridge.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.dte_energy_bridge/
"""
import logging
import voluptuous as vol
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_NAME
_LOGGER = logging.getLogger(__name__)
CONF_IP_ADDRESS = 'ip'
DEFAULT_NAME = 'Current Energy Usage'
ICON = 'mdi:flash'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the DTE energy bridge sensor."""
name = config.get(CONF_NAME)
ip_address = config.get(CONF_IP_ADDRESS)
add_devices([DteEnergyBridgeSensor(ip_address, name)])
# pylint: disable=too-many-instance-attributes
class DteEnergyBridgeSensor(Entity):
"""Implementation of a DTE Energy Bridge sensor."""
def __init__(self, ip_address, name):
"""Initialize the sensor."""
self._url = "http://{}/instantaneousdemand".format(ip_address)
self._name = name
self._unit_of_measurement = "kW"
self._state = None
@property
def name(self):
"""Return the name of th sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the energy usage data from the DTE energy bridge."""
import requests
try:
response = requests.get(self._url, timeout=5)
except (requests.exceptions.RequestException, ValueError):
_LOGGER.warning(
'Could not update status for DTE Energy Bridge (%s)',
self._name)
return
if response.status_code != 200:
_LOGGER.warning(
'Invalid status_code from DTE Energy Bridge: %s (%s)',
response.status_code, self._name)
return
response_split = response.text.split()
if len(response_split) != 2:
_LOGGER.warning(
'Invalid response from DTE Energy Bridge: "%s" (%s)',
response.text, self._name)
return
self._state = float(response_split[0])
| {
"content_hash": "0d170ed37186e5295cb7bcbf58bbe5a8",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 74,
"avg_line_length": 29.05263157894737,
"alnum_prop": 0.6333333333333333,
"repo_name": "varunr047/homefile",
"id": "90b484f46dc52efa6fdaeabcd13c5435dbe1a8f2",
"size": "2760",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/dte_energy_bridge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1594834"
},
{
"name": "JavaScript",
"bytes": "1216"
},
{
"name": "Python",
"bytes": "3696131"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
} |
"""Module for testing the add/del/show hub command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestHub(TestBrokerCommand):
def test_100_add_hub1_default_org(self):
command = ["add", "hub", "--hub", "hub1", "--fullname",
"hub1 example", "--comments", "Some hub comments"]
self.noouttest(command)
def test_110_add_hubtest_org(self):
command = ["add", "organization", "--organization", "hubtest",
"--fullname", "Hub Test, Inc"]
self.noouttest(command)
def test_115_add_hub2(self):
command = ["add", "hub", "--hub", "hub2", "--fullname", "hub2 example",
"--organization", "hubtest", "--comments", "Some other hub comments"]
self.noouttest(command)
def test_120_add_hk(self):
self.noouttest(["add_hub", "--hub", "hk", "--organization", "ms",
"--fullname", "Non-Japan-Asia"])
def test_120_add_ln(self):
self.noouttest(["add_hub", "--hub", "ln", "--organization", "ms",
"--fullname", "Europa"])
def test_120_add_ny(self):
self.noouttest(["add_hub", "--hub", "ny", "--organization", "ms",
"--fullname", "Americas"])
def test_130_verify_hub1(self):
command = "show hub --hub hub1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Hub: hub1", command)
self.matchoutput(out, " Fullname: hub1 example", command)
self.matchoutput(out, " Comments: Some hub comments", command)
self.matchoutput(out, " Location Parents: [Organization ms]", command)
def test_130_verify_hub2(self):
command = "show hub --hub hub2"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Hub: hub2", command)
self.matchoutput(out, " Fullname: hub2 example", command)
self.matchoutput(out, " Comments: Some other hub comments", command)
self.matchoutput(out, " Location Parents: [Organization hubtest]",
command)
def test_130_show_all(self):
command = "show hub --all"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Hub: hub1", command)
self.matchoutput(out, "Hub: hub2", command)
def test_200_add_hub1_net(self):
self.net.allocate_network(self, "hub1_net", 24, "unknown", "hub", "hub1",
comments="Made-up network")
def test_201_del_hub1_fail(self):
command = "del hub --hub hub1"
err = self.badrequesttest(command.split(" "))
self.matchoutput(err,
"Bad Request: Could not delete hub hub1, networks "
"were found using this location.",
command)
def test_202_cleanup_hub1_net(self):
self.net.dispose_network(self, "hub1_net")
def test_210_del_hub1(self):
command = "del hub --hub hub1"
self.noouttest(command.split(" "))
def test_220_del_hub1_again(self):
command = "del hub --hub hub1"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Hub hub1 not found.", command)
def test_230_del_hub2(self):
command = "del hub --hub hub2"
self.noouttest(command.split(" "))
def test_240_del_hubtest_org(self):
command = "del organization --organization hubtest"
self.noouttest(command.split(" "))
def test_250_add_hub_badname(self):
command = ["add_hub", "--hub", "foo bar"]
out = self.badrequesttest(command)
self.matchoutput(out, "'foo bar' is not a valid value for Hub", command)
def test_300_verify_hub1(self):
command = "show hub --hub hub1"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Hub hub1 not found.", command)
def test_300_verify_hub2(self):
command = "show hub --hub hub2"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Hub hub2 not found.", command)
def test_300_verify_all(self):
command = "show hub --all"
out = self.commandtest(command.split(" "))
self.matchclean(out, "Hub: hub1", command)
self.matchclean(out, "Hub: hub2", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestHub)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "99ba1128ca44dc12cce5e5ac57bc5259",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 88,
"avg_line_length": 37.833333333333336,
"alnum_prop": 0.5797356828193833,
"repo_name": "guillaume-philippon/aquilon",
"id": "3af3307cf4ea97a291170974b4c5f440b215b9c2",
"size": "5265",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/broker/test_hub.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
} |
from wallace.db.redisdb.hash import RedisHash
from wallace.errors import ConfigError
class ExpiringRedisHash(RedisHash):
ttl = None # must explicitly set ttl
def __init__(self):
RedisHash.__init__(self)
if not isinstance(self.ttl, int) or self.ttl < 1:
raise ConfigError(801, 'int ttl >=1 required')
def write_to_db(self, state, _, pipe=None):
with self._pipe_manager(pipe) as pipe:
super(ExpiringRedisHash, self).write_to_db(state, _, pipe=pipe)
pipe.expire(self.key, self.ttl)
| {
"content_hash": "c4aa52cd3ced0b90f902fed572e248da",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 31,
"alnum_prop": 0.6415770609318996,
"repo_name": "csira/wallace",
"id": "abbcf7f99db8aa426a0c4df41116d788da7d7e3f",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wallace/db/redisdb/expiring.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "73299"
},
{
"name": "Shell",
"bytes": "92"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import matplotlib
import numpy as np
import copy
import re
import itertools
from astropy import log
from astropy import units as u
from astropy.extern.six.moves import xrange
from ..config import mycfg
from ..config import ConfigDescriptor as cfgdec
from . import units
from . import models
from ..specwarnings import warn
from . import interactive
from . import history
from . import widgets
class Registry(object):
"""
This class is a simple wrapper to prevent fitter properties from being globals
"""
def __init__(self):
self.npars = {}
self.multifitters = {}
#to delete
self.peakbgfitters = {}
self.fitkeys = {}
self.associatedkeys = {}
self._interactive_help_message_root = """
'?' will print this help message again. The keys denoted by surrounding / / are
mnemonics.
1. Left-click or hit 'p' (/p/ick) with the cursor over the plot at both of the
two desired X-values to select a fitting range. You can e/x/clude parts of the
spectrum by hitting 'x' at two positions.
2. Then /m/iddle-click or hit 'm' twice to select (/m/ark) a peak and width -
the first mark should be on the peak of the line, the second should be at the
approximate half-max point on the curve.
3. When you're done, right-click or hit 'd' to perform the fit and disconnect
the mouse and keyboard (/d/isconnect because you're /d/one). Any time before
you're /d/one, you can select a different fitter (see below).
To /c/ancel or /c/lear all connections, press 'c'
'?' : get help (this message)
'c' : cancel / clear
'p','1' : pick / selection region for fitting
'm','2' : mark / identify a peak
'd','3' : done / do the fit, then disconnect the fitter
'i' : individual components / show each fitted component
You can select different fitters to use with the interactive fitting routine.
The default is gaussian ('g'), all options are listed below:
"""
self._make_interactive_help_message()
def __copy__(self):
# http://stackoverflow.com/questions/1500718/what-is-the-right-way-to-override-the-copy-deepcopy-operations-on-an-object-in-p
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def add_fitter(self, name, function, npars, override=False, key=None,
multisingle=None):
'''
Register a fitter function.
Parameters
----------
name: string
The fit function name.
function: function
The fitter function. Single-fitters should take npars + 1 input
parameters, where the +1 is for a 0th order baseline fit. They
should accept an X-axis and data and standard fitting-function
inputs (see, e.g., gaussfitter). Multi-fitters should take N *
npars, but should also operate on X-axis and data arguments.
npars: int
How many parameters does the function being fit accept?
Other Parameters
----------------
override: True | False
Whether to override any existing type if already present.
key: char
Key to select the fitter in interactive mode
'''
if multisingle is not None:
warn("The 'multisingle' keyword is no longer required.",
DeprecationWarning)
if not name in self.peakbgfitters or override:
self.peakbgfitters[name] = function
if not name in self.multifitters or override:
self.multifitters[name] = function
if key is not None:
self.fitkeys[key] = name
self._make_interactive_help_message()
self.npars[name] = npars
self.associated_keys = dict(zip(self.fitkeys.values(),self.fitkeys.keys()))
def _make_interactive_help_message(self):
"""
Generate the interactive help message from the fitkeys
"""
self.interactive_help_message = (
self._interactive_help_message_root +
"\n" +
"\n".join(["'%s' - select fitter %s" % (key,name) for key,name in self.fitkeys.items()]) +
"\n" # trailing \n so that users' input is on a fresh line
)
# Declare default registry built in for all spectra
default_Registry = Registry()
default_Registry.add_fitter('ammonia',models.ammonia_model(),6,key='a')
default_Registry.add_fitter('ammonia_tau',models.ammonia_model_vtau(),6)
# not implemented default_Registry.add_fitter(Registry,'ammonia',models.ammonia_model( ),6, ,key='A')
default_Registry.add_fitter('formaldehyde',models.formaldehyde_fitter,3,key='F') # CAN'T USE f! reserved for fitting
# do'nt override default_Registry.add_fitter('formaldehyde',models.formaldehyde_vheight_fitter,3)
default_Registry.add_fitter('gaussian',models.gaussian_fitter(),3,key='g')
default_Registry.add_fitter('vheightgaussian',models.gaussian_vheight_fitter(),4)
default_Registry.add_fitter('voigt',models.voigt_fitter(),4,key='v')
default_Registry.add_fitter('lorentzian',models.lorentzian_fitter(),3,key='L')
#default_Registry.add_fitter('hill5',models.hill5infall.hill5_fitter,5)
#default_Registry.add_fitter('hcn',models.hcn.hcn_vtau_fitter,4)
class Specfit(interactive.Interactive):
def __init__(self, Spectrum, Registry=None):
super(Specfit, self).__init__(Spectrum,
interactive_help_message=Registry.interactive_help_message)
self.model = None
self.parinfo = None
self.modelpars = None
self.modelerrs = None
self.modelplot = []
self.modelcomponents = None
self._plotted_components = []
self.npeaks = 0
#self.nclicks_b1 = 0
#self.nclicks_b2 = 0
#self.xmin = 0
#self.xmax = Spectrum.data.shape[0]
self.button2action = self.guesspeakwidth
self.guesses = []
self.click = 0
self.fitkwargs = {}
self.auto = False
self.fitleg=None
self.residuals=None
self.setfitspec()
self.fittype = 'gaussian'
self.measurements = None
self.vheight=False # vheight must be a boolean, can't be none
self._component_kwargs = {}
self.Registry = Registry
self.autoannotate = mycfg['autoannotate']
self.EQW_plots = []
#self.seterrspec()
@cfgdec
def __call__(self, interactive=False, usemoments=True,
clear_all_connections=True, debug=False, guesses='moments',
parinfo=None, save=True, annotate=None, show_components=None,
use_lmfit=False, verbose=True, clear=True,
reset_selection=True,
fit_plotted_area=True, use_window_limits=None, vheight=None,
exclude=None, **kwargs):
"""
Fit model functions to a spectrum
Parameters
----------
interactive : boolean
The plotter window will go into interactive mode. See
self.interactive_help_message for details on how to use the
interactive fitter.
fittype : str
[passed to fitting codes; defaults to gaussian]
The model to use. Model must be registered in self.Registry.
gaussian, lorentzian, and voigt profiles are registered by default
guesses : list or 'moments'
A list of guesses. Guesses must have length = n*number of parameters
in model. Guesses are *required* for multifit fits (there is no
automated guessing for most models)
EXAMPLE: for single-fit gaussian
guesses = [height,amplitude,center,width]
for multi-fit gaussian, it is
[amplitude, center, width]
You can also pass the keyword string 'moments' to have the moments
be used to automatically determine the guesses for a *single* peak
parinfo : `pyspeckit.spectrum.parinfo.ParinfoList`
An alternative way to specify guesses. Supercedes guesses.
use_lmfit : boolean
If lmfit-py (https://github.com/newville/lmfit-py) is installed, you
can use it instead of the pure-python (but slow) mpfit.
reset_selection : boolean
Override any selections previously made using `fit_plotted_area` or
other keywords?
fit_plotted_area : boolean
If no other limits are specified, the plotter's xmin/xmax will be
used to define the fit region. Only respects the x-axis limits,
not the y-axis limits.
use_window_limits : boolean
If ``fit_plotted_area==True`` and no other limits are specified,
will use the displayed window area (as set by the zoom tools) as
the fitting range. Only respects the x-axis limits, not the y-axis
limits.
exclude : None or list
Passed to selectregion; specifies regions to exclude in xarr units
Plotter-related Parameters
--------------------------
annotate : None or boolean
If None, will use the default stored in self.annotate, otherwise
overwrites. Annotations will appear on the plot if a plotter
exists.
show_components : boolean
Show the individual components of a multi-component fit (defaults
to blue)
clear : boolean
Clear previous fitter plots before overplotting the fit?
Advanced Parameters
-------------------
clear_all_connections : boolean
Clear all of the interactive connections from a previous interactive
session (e.g., a baseline fitting session) before continuing?
usemoments : boolean
Use the moments of the spectrum as input guesses. Only works
for gaussian and gaussian-like models. Only works for single-fit
mode (not multifit)
DEPRECATED
debug : boolean
Print debug statements?
save : boolean
Save best fits in the FITS header as keywords? ONLY IMPLEMENTED
FOR GAUSSIANS
verbose : boolean
Print out extra stuff
vheight : None or boolean
if None, defaults to self.vheight, otherwise overrides
Determines whether a 0th order baseline will be fit along with the
line
"""
if clear: self.clear()
if reset_selection:
self.selectregion(verbose=verbose, debug=debug,
fit_plotted_area=fit_plotted_area,
exclude=exclude,
use_window_limits=use_window_limits, **kwargs)
for arg in ['xmin','xmax','xtype','reset']:
if arg in kwargs: kwargs.pop(arg)
if 'multifit' in kwargs:
kwargs.pop('multifit')
log.warn("The multifit keyword is no longer required. All fits "
"allow for multiple components.", DeprecationWarning)
self.npeaks = 0
self.fitkwargs = kwargs
if interactive:
if self.Spectrum.plotter.axis is None:
raise Exception("Interactive fitting requires a plotter.")
# reset button count & guesses on every __call__
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.guesses = []
self.start_interactive(clear_all_connections=clear_all_connections,
reset_selection=True,
debug=debug, **kwargs)
elif (self.fittype in self.Registry.multifitters
or guesses is not None
or parinfo is not None):
if guesses is None and parinfo is None:
raise ValueError("You must input guesses when using multifit."
" Also, baseline (continuum fit) first!")
elif parinfo is not None:
self.guesses = parinfo.values
self.parinfo = parinfo
self.multifit(show_components=show_components, verbose=verbose,
debug=debug, use_lmfit=use_lmfit,
annotate=annotate, parinfo=parinfo,
guesses=guesses, **kwargs)
elif guesses is not None:
self.guesses = guesses
self.multifit(show_components=show_components, verbose=verbose,
debug=debug, use_lmfit=use_lmfit,
guesses=guesses, annotate=annotate, **kwargs)
else:
raise ValueError("Guess and parinfo were somehow invalid.")
else:
print("Can't fit with given fittype {0}:"
" it is not Registered as a fitter.".format(self.fittype))
return
if save: self.savefit()
def EQW(self, plot=False, plotcolor='g', fitted=True, continuum=None,
components=False, annotate=False, alpha=0.5, loc='lower left',
xmin=None, xmax=None, xunits='pixel', continuum_as_baseline=False,
midpt_location='plot-center'):
"""
Returns the equivalent width (integral of "baseline" or "continuum"
minus the spectrum) over the selected range
(the selected range defaults to self.xmin:self.xmax, so it may include
multiple lines!)
Parameters
----------
plot : bool
Plots a box indicating the EQW if plot==True (i.e., it will have a
width equal to the equivalent width, and a height equal to the
measured continuum)
fitted : bool
Use the fitted model? If false, uses the data
continuum : None or float
Can specify a fixed continuum with this keyword, otherwise will use
the fitted baseline. WARNING: continuum=0 will still "work", but
will give numerically invalid results. Similarly, a negative continuum
will work, but will yield results with questionable physical meaning.
continuum_as_baseline : bool
Replace the baseline with the specified continuum when computing
the absorption depth of the line
components : bool
If your fit is multi-component, will attempt to acquire centroids
for each component and print out individual EQWs
xmin : float
xmax : float
The range over which to compute the EQW
xunits : str
The units of xmin/xmax
midpt_location : 'fitted', 'plot-center'
If 'plot' is set, this determines where the EQW will be drawn. It
can be the fitted centroid or the plot-center, i.e. (xmin+xmax)/2
Returns
-------
Equivalent Width, or widths if components=True
"""
if continuum is not None:
# if continuum is specified, don't bother with checks
if np.median(self.Spectrum.baseline.basespec) == 0:
raise ValueError("Baseline / continuum is zero: equivalent width is undefined.")
elif np.median(self.Spectrum.baseline.basespec) < 0:
if mycfg.WARN: warn( "WARNING: Baseline / continuum is negative: equivalent width is poorly defined." )
# determine range to use
if xmin is None:
xmin = self.xmin #self.Spectrum.xarr.x_to_pix(self.xmin)
else:
xmin = self.Spectrum.xarr.x_to_pix(xmin, xval_units=xunits)
if xmax is None:
xmax = self.xmax #self.Spectrum.xarr.x_to_pix(self.xmax)
else:
xmax = self.Spectrum.xarr.x_to_pix(xmax, xval_units=xunits)
dx = np.abs(self.Spectrum.xarr[xmin:xmax].cdelt(approx=True))
log.debug("xmin={0} xmax={1} dx={2} continuum={3}"
.format(xmin, xmax, dx, continuum))
if components:
centroids = self.fitter.analytic_centroids()
integrals = self.fitter.component_integrals(self.Spectrum.xarr[xmin:xmax],dx=dx)
eqw = []
for cen,integ in zip(centroids,integrals):
center_pix = self.Spectrum.xarr.x_to_pix(cen)
if continuum is None:
continuum = self.Spectrum.baseline.basespec[center_pix]
elif continuum_as_baseline:
integrals[-1] += -(self.Spectrum.baseline.basespec[xmin:xmax] - continuum).sum() * dx
eqw.append( -integ / continuum)
if plot:
plot = False
if mycfg.WARN: warn( "Cannot plot multiple Equivalent Widths" )
elif fitted:
model = self.get_model(self.Spectrum.xarr[xmin:xmax],
add_baseline=False)
# EQW is positive for absorption lines
# fitted components are assume to be continuum-subtracted
integral = (-model).sum() * dx
if continuum is None:
# centroid in data units
# (may fail if model has pos + neg values)
center = (model*self.Spectrum.xarr[xmin:xmax]).sum()/model.sum()
center_pix = self.Spectrum.xarr.x_to_pix(center)
continuum = self.Spectrum.baseline.basespec[center_pix]
elif continuum_as_baseline:
integral += -(self.Spectrum.baseline.basespec[xmin:xmax] - continuum).sum() * dx
eqw = integral / continuum
else:
if continuum_as_baseline:
diffspec = (continuum - self.Spectrum.data)
elif self.Spectrum.baseline.subtracted is False:
diffspec = (self.Spectrum.baseline.basespec - self.Spectrum.data)
else:
diffspec = -self.Spectrum.data
sumofspec = diffspec[xmin:xmax].sum() * dx
if continuum is None:
continuum = np.median(self.Spectrum.baseline.basespec)
eqw = sumofspec / continuum
if plot and self.Spectrum.plotter.axis:
if midpt_location == 'plot-center':
midpt_pixel = np.round((xmin+xmax)/2.0)
midpt = self.Spectrum.xarr[midpt_pixel].value
elif midpt_location == 'fitted':
try:
shifts = [self.Spectrum.specfit.parinfo[x].value
for x in self.Spectrum.specfit.parinfo.keys()
if 'SHIFT' in x]
except AttributeError:
raise AttributeError("Can only specify midpt_location="
"fitted if there is a SHIFT parameter"
"for the fitted model")
# We choose to display the eqw fit at the center of the fitted
# line set, closest to the passed window.
# Note that this has the potential to show a eqw "rectangle"
# centered on a fitted line other than the one measured for the
# eqw call, if there are more than one fitted lines within the
# window.
midpt_pixel = (xmin+xmax)/2
midval = self.Spectrum.xarr[midpt_pixel].value
# *** LUM
midpt_index = np.argmin([np.abs(x-midval) for x in shifts])
midpt = shifts[midpt_index]
midpt_pixel = self.Spectrum.xarr.x_to_pix(midpt)
else:
raise ValueError("midpt_location must be 'plot-center' or "
"fitted")
if continuum_as_baseline:
midpt_level = continuum
else:
midpt_level = self.Spectrum.baseline.basespec[midpt_pixel]
log.debug("EQW plotting: midpt={0}, midpt_pixel={1}, "
"midpt_level={2}, eqw={3}".format(midpt, midpt_pixel,
midpt_level, eqw))
self.EQW_plots.append(self.Spectrum.plotter.axis.fill_between(
[midpt-eqw/2.0,midpt+eqw/2.0], [0,0],
[midpt_level,midpt_level], color=plotcolor, alpha=alpha,
label='EQW: %0.3g' % eqw))
if annotate:
self.Spectrum.plotter.axis.legend(
[(matplotlib.collections.CircleCollection([0],facecolors=[plotcolor],edgecolors=[plotcolor]))],
[('EQW: %0.3g' % eqw)],
markerscale=0.01, borderpad=0.1, handlelength=0.1,
handletextpad=0.1, loc=loc)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
if hasattr(self.Spectrum,'header'):
history.write_history(self.Spectrum.header, "EQW for %s: %s" %
(self.fittype,eqw))
return eqw
def register_fitter(self,*args,**kwargs):
"""
Register a model fitter
"""
self.Registry.add_fitter(*args,**kwargs)
register_fitter.__doc__ += Registry.add_fitter.__doc__
def seterrspec(self,usestd=None,useresiduals=True):
"""
Simple wrapper function to set the error spectrum; will either use the
input spectrum or determine the error using the RMS of the residuals,
depending on whether the residuals exist.
"""
if (self.Spectrum.error is not None) and not usestd:
if (self.Spectrum.error == 0).all():
if self.residuals is not None and useresiduals:
self.errspec = np.ones(self.spectofit.shape[0]) * self.residuals.std()
elif type(self.Spectrum.error) is np.ma.masked_array:
# force errspec to be a non-masked array of ones
self.errspec = self.Spectrum.error.data + 1
else:
self.errspec = self.Spectrum.error + 1
else:
# this is the default behavior if spectrum.error is set
self.errspec = self.Spectrum.error.copy()
elif self.residuals is not None and useresiduals:
self.errspec = np.ones(self.spectofit.shape[0]) * self.residuals.std()
else: self.errspec = np.ones(self.spectofit.shape[0]) * self.spectofit.std()
def setfitspec(self):
"""
Set the spectrum that will be fit. This is primarily to remove NANs
from consideration: if you simply remove the data from both the X-axis
and the Y-axis, it will not be considered for the fit, and a linear
X-axis is not needed for fitting.
However, it may be possible to do this using masked arrays instead of
setting errors to be 1e10....
"""
if self.Spectrum.data.sum() is np.ma.masked:
self.spectofit = np.zeros_like(self.Spectrum.data)
self.errspec = np.zeros_like(self.Spectrum.data)
self._valid = False
return
# see https://github.com/numpy/numpy/issues/3474
self.spectofit = np.ma.copy(self.Spectrum.data)
if hasattr(self.Spectrum.data, 'mask') and hasattr(self.spectofit,
'mask'):
assert np.all(self.Spectrum.data.mask == self.spectofit.mask)
self._valid = True
if hasattr(self.Spectrum,'baseline'):
if (self.Spectrum.baseline.subtracted is False
and self.Spectrum.baseline.basespec is not None
and len(self.spectofit) == len(self.Spectrum.baseline.basespec)):
self.spectofit -= self.Spectrum.baseline.basespec
OKmask = (self.spectofit==self.spectofit)
self.spectofit[~OKmask] = 0
self.seterrspec()
self.errspec[~OKmask] = 1e10
if self.includemask is not None and (self.includemask.shape == self.errspec.shape):
self.errspec[~self.includemask] = 1e10*self.errspec.max()
@property
def mask(self):
""" Mask: True means "exclude" """
if (hasattr(self.spectofit, 'mask') and
self.spectofit.shape==self.spectofit.mask.shape):
mask = self.spectofit.mask
else:
mask = np.zeros_like(self.spectofit, dtype='bool')
return mask
@property
def mask_sliced(self):
""" Sliced (subset) Mask: True means "exclude" """
return self.mask[self.xmin:self.xmax]
def multifit(self, fittype=None, renormalize='auto', annotate=None,
show_components=None, verbose=True, color=None,
guesses=None, parinfo=None, reset_fitspec=True,
use_window_limits=None, use_lmfit=False, plot=True, **kwargs):
"""
Fit multiple gaussians (or other profiles)
Parameters
----------
fittype : str
What function will be fit? fittype must have been Registryed in the
peakbgfitters dict. Uses default ('gaussian') if not specified
renormalize : 'auto' or bool
if 'auto' or True, will attempt to rescale small data (<1e-9) to be
closer to 1 (scales by the median) so that the fit converges better
parinfo : `~parinfo` structure
Guess structure; supercedes ``guesses``
guesses : list or 'moments'
Either a list of guesses matching the number of parameters * the
number of peaks for the model, or 'moments' to fit a single
spectrum with the moments as guesses
"""
if reset_fitspec:
self.setfitspec()
if not self._valid:
raise ValueError("Data are invalid; cannot be fit.")
#if self.fitkwargs.has_key('negamp'): self.fitkwargs.pop('negamp') # We now do this in gaussfitter.py
if fittype is not None:
self.fittype = fittype
bad_kws = ['fittype','plot']
for kw in bad_kws:
if kw in self.fitkwargs:
del self.fitkwargs[kw]
if guesses is None:
guesses = self.guesses
elif guesses in ('moment','moments'):
guesses = self.moments(vheight=False, **kwargs)
if parinfo is not None:
guesses = parinfo.values
if len(guesses) < self.Registry.npars[self.fittype]:
raise ValueError("Too few parameters input. Need at least %i for %s models" % (self.Registry.npars[self.fittype],self.fittype))
self.npeaks = len(guesses)/self.Registry.npars[self.fittype]
self.fitter = self.Registry.multifitters[self.fittype]
self.vheight = False
if self.fitter.vheight:
# Need to reset the parinfo if vheight has previously been set,
# otherwise npars will disagree, which causes problems if
# renormalization happens
self.fitter.vheight = False
self.fitter.npeaks = self.npeaks
self.fitter._make_parinfo(npeaks=self.npeaks)
# add kwargs to fitkwargs
self.fitkwargs.update(kwargs)
if 'renormalize' in self.fitkwargs:
del self.fitkwargs['renormalize']
scalefactor = 1.0
if renormalize in ('auto',True):
datarange = self.spectofit[self.xmin:self.xmax].max() - self.spectofit[self.xmin:self.xmax].min()
if abs(datarange) < 1e-9:
scalefactor = np.median(np.abs(self.spectofit))
log.info("Renormalizing data by factor %e to improve fitting procedure"
% scalefactor)
self.spectofit /= scalefactor
self.errspec /= scalefactor
# this error should be unreachable, but is included as a sanity check
if self.fitter.npeaks * self.fitter.npars != len(self.fitter.parinfo):
raise ValueError("Length of parinfo doesn't agree with npeaks * npars = %i" %
(self.fitter.npeaks * self.fitter.npars))
# zip guesses with parinfo: truncates parinfo if len(parinfo) > len(guesses)
# actually not sure how/when/if this should happen; this might be a bad hack
# revisit with tests!!
for jj,(guess,par) in enumerate(zip(guesses,self.fitter.parinfo)):
if par.scaleable:
guesses[jj] /= scalefactor
xtofit = self.Spectrum.xarr[self.xmin:self.xmax][~self.mask_sliced]
spectofit = self.spectofit[self.xmin:self.xmax][~self.mask_sliced]
err = self.errspec[self.xmin:self.xmax][~self.mask_sliced]
if parinfo is not None:
self._validate_parinfo(parinfo, mode='fix')
else:
pinf, _ = self.fitter._make_parinfo(parvalues=guesses,
npeaks=self.npeaks,
**self.fitkwargs)
new_guesses = self._validate_parinfo(pinf, 'guesses')
if any((x!=y) for x,y in zip(guesses, new_guesses)):
warn("Guesses have been changed from {0} to {1}"
.format(guesses, new_guesses))
guesses = new_guesses
mpp,model,mpperr,chi2 = self.fitter(xtofit, spectofit, err=err,
npeaks=self.npeaks,
parinfo=parinfo, # the user MUST be allowed to override parinfo.
params=guesses,
use_lmfit=use_lmfit,
**self.fitkwargs)
any_out_of_range = self._validate_parinfo(self.fitter.parinfo, mode='check')
if any(any_out_of_range):
warn("The fitter returned values that are outside the "
"parameter limits. DEBUG INFO: {0}".format(any_out_of_range))
self.spectofit *= scalefactor
self.errspec *= scalefactor
if hasattr(self.fitter.mp,'status'):
self.mpfit_status = models.mpfit_messages[self.fitter.mp.status]
if model is None:
raise ValueError("Model was not set by fitter. Examine your fitter.")
self.chi2 = chi2
self.model = model * scalefactor
self.parinfo = self.fitter.parinfo
self.dof = (self.includemask.sum() - self.mask.sum() - self.npeaks *
self.Registry.npars[self.fittype] +
np.sum(self.parinfo.fixed))
# rescale any scaleable parameters
for par in self.parinfo:
if par.scaleable:
par.value = par.value * scalefactor
par.error = par.error * scalefactor
self.modelpars = self.parinfo.values
self.modelerrs = self.parinfo.errors
self.residuals = spectofit - self.model
if self.Spectrum.plotter.axis is not None and plot:
if color is not None:
kwargs.update({'composite_fit_color':color})
self.plot_fit(annotate=annotate,
show_components=show_components,
use_window_limits=use_window_limits,
**kwargs)
# Re-organize modelerrs so that any parameters that are tied to others inherit the errors of the params they are tied to
if 'tied' in self.fitkwargs:
for ii, element in enumerate(self.fitkwargs['tied']):
if not element.strip(): continue
if '[' in element and ']' in element:
i1 = element.index('[') + 1
i2 = element.index(']')
loc = int(element[i1:i2])
else: # assume lmfit version
varnames = re.compile('([a-zA-Z][a-zA-Z_0-9]*)').search(element).groups()
if not varnames:
continue
elif len(varnames) > 1:
warn("The 'tied' parameter {0} is not simple enough for error propagation".format(element))
continue
else:
varname = varnames[0]
loc = self.parinfo.names.index(varname)
self.modelerrs[ii] = self.modelerrs[loc]
# make sure the full model is populated
self._full_model()
self.history_fitpars()
def refit(self, use_lmfit=False):
""" Redo a fit using the current parinfo as input """
return self.multifit(parinfo=self.parinfo, use_lmfit=use_lmfit,
reset_fitspec=False)
def history_fitpars(self):
if hasattr(self.Spectrum,'header'):
history.write_history(self.Spectrum.header, "SPECFIT: Fitted "
"profile of type %s" % (self.fittype))
history.write_history(self.Spectrum.header, "Chi^2: %g DOF: %i" %
(self.chi2, self.dof))
for par in self.parinfo:
history.write_history(self.Spectrum.header, str(par))
def peakbgfit(self, usemoments=True, annotate=None, vheight=True, height=0,
negamp=None, fittype=None, renormalize='auto', color=None,
use_lmfit=False, show_components=None, debug=False,
use_window_limits=True, guesses=None,
nsigcut_moments=None, plot=True, parinfo=None, **kwargs):
"""
Fit a single peak (plus a background)
Parameters
----------
usemoments : bool
The initial guess will be set by the fitter's 'moments' function
(this overrides 'guesses')
annotate : bool
Make a legend?
vheight : bool
Fit a (constant) background as well as a peak?
height : float
initial guess for background
negamp : bool
If True, assumes amplitude is negative. If False, assumes positive. If
None, can be either.
fittype : bool
What function will be fit? fittype must have been Registryed in the
peakbgfitters dict
renormalize : 'auto' or bool
if 'auto' or True, will attempt to rescale small data (<1e-9) to be
closer to 1 (scales by the median) so that the fit converges better
nsigcut_moments : bool
pass to moment guesser; can do a sigma cut for moment guessing
"""
self.npeaks = 1
self.auto = True
self.setfitspec()
if fittype is not None:
self.fittype=fittype
NP = self.Registry.peakbgfitters[self.fittype].default_npars
if guesses is not None:
log.debug("Using user-specified guesses.")
self.guesses = guesses
if len(guesses) != NP + vheight:
raise ValueError("Invalid guesses specified for single-fitter."
"Expected {0}, got {1}. Perhaps you should "
"use the multifitter (multifit=True)?"
.format(NP+vheight, len(guesses)))
elif usemoments: # this can be done within gaussfit but I want to save them
# use this INDEPENDENT of fittype for now (voigt and gauss get same guesses)
log.debug("Using moment-based guesses.")
moments_f = self.Registry.peakbgfitters[self.fittype].moments
self.guesses = moments_f(self.Spectrum.xarr[self.xmin:self.xmax],
self.spectofit[self.xmin:self.xmax],
vheight=vheight,
negamp=negamp,
nsigcut=nsigcut_moments,
**kwargs)
else:
if negamp:
self.guesses = [height,-1,0,1]
else:
self.guesses = [height,1,0,1]
# If we're fitting anything but a simple Gaussian, we need the length
# of guesses to be right so we pad with appended zeros
# BUT, if the guesses from the moments have the right number of
# parameters, we don't need to do this.
if NP > len(self.guesses):
for ii in xrange(len(self.guesses),NP):
self.guesses += [0.0]
self.fitter = self.Registry.peakbgfitters[self.fittype]
log.debug("n(guesses): %s Guesses: %s vheight: %s " %
(len(self.guesses),self.guesses,vheight))
scalefactor = 1.0
if renormalize in ('auto',True):
datarange = self.spectofit[self.xmin:self.xmax].max() - self.spectofit[self.xmin:self.xmax].min()
if abs(datarange) < 1e-9:
scalefactor = np.median(np.abs(self.spectofit))
log.info("Renormalizing data by factor %e to improve fitting procedure"
% scalefactor)
self.spectofit /= scalefactor
self.errspec /= scalefactor
self.guesses[0] /= scalefactor
if vheight: self.guesses[1] /= scalefactor
log.debug("Guesses before fit: {0}".format(self.guesses))
if 'debug' in self.fitkwargs:
debug = self.fitkwargs['debug']
del self.fitkwargs['debug']
mpp,model,mpperr,chi2 = self.fitter(
self.Spectrum.xarr[self.xmin:self.xmax],
self.spectofit[self.xmin:self.xmax],
err=self.errspec[self.xmin:self.xmax],
vheight=vheight,
params=self.guesses,
parinfo=parinfo,
debug=debug,
use_lmfit=use_lmfit,
**self.fitkwargs)
log.debug("1. Guesses, fits after: {0}, {1}".format(self.guesses, mpp))
self.spectofit *= scalefactor
self.errspec *= scalefactor
if hasattr(self.fitter.mp,'status'):
self.mpfit_status = models.mpfit_messages[self.fitter.mp.status]
self.parinfo = self.fitter.parinfo
if model is None:
raise ValueError("Model was not set by fitter. Examine your fitter.")
self.chi2 = chi2
self.dof = self.includemask.sum()-self.npeaks*self.Registry.npars[self.fittype]-vheight+np.sum(self.parinfo.fixed)
self.vheight=vheight
if vheight:
self.Spectrum.baseline.order = 0
self.Spectrum.baseline.baselinepars = [mpp[0]*scalefactor] # first item in list form
self.Spectrum.baseline.basespec = self.Spectrum.data*0 + mpp[0]*scalefactor
self.model = model*scalefactor - mpp[0]*scalefactor
# I removed this recently for some reason, but more code depends on it being in place
# Need to figure out *WHY* anything would want an extra parameter
if len(mpp) == self.fitter.npars+1:
mpp = mpp[1:]
else: self.model = model*scalefactor
self.residuals = self.spectofit[self.xmin:self.xmax] - self.model*scalefactor
self.modelpars = mpp
self.modelerrs = mpperr
# rescale any scaleable parameters
for par in self.parinfo:
if par.scaleable:
par.value = par.value * scalefactor
par.error = par.error * scalefactor
if self.Spectrum.plotter.axis is not None and plot:
if color is not None:
kwargs.update({'composite_fit_color':color})
self.plot_fit(annotate=annotate,
use_window_limits=use_window_limits,
show_components=show_components,
**kwargs)
# make sure the full model is populated
self._full_model(debug=debug)
log.debug("2. Guesses, fits after vheight removal: {0},{1}"
.format(self.guesses, mpp))
self.history_fitpars()
def _full_model(self, debug=False, **kwargs):
"""
Compute the model for the whole spectrum
"""
self.fullmodel = self.get_full_model(debug=debug,**kwargs)
self.fullresiduals = self.Spectrum.data - self.fullmodel
def get_full_model(self, debug=False,**kwargs):
""" compute the model over the full axis """
return self.get_model(self.Spectrum.xarr, debug=debug,**kwargs)
def get_model(self, xarr, pars=None, debug=False, add_baseline=None):
""" Compute the model over a given axis """
if pars is None:
return self.get_model_frompars(xarr=xarr, pars=self.parinfo,
add_baseline=add_baseline, debug=debug)
else:
return self.get_model_frompars(xarr=xarr, pars=pars,
add_baseline=add_baseline, debug=debug)
def get_model_frompars(self, xarr, pars, debug=False, add_baseline=None):
""" Compute the model over a given axis """
if ((add_baseline is None and (self.Spectrum.baseline.subtracted or self.vheight))
or add_baseline is False):
return self.fitter.n_modelfunc(pars,**self.fitter.modelfunc_kwargs)(xarr)
else:
return (self.fitter.n_modelfunc(pars,
**self.fitter.modelfunc_kwargs)(xarr)
+ self.Spectrum.baseline.get_model(np.arange(xarr.size)))
def plot_model(self, pars, offset=0.0, annotate=False, clear=False, **kwargs):
"""
Plot a model from specified input parameters
(see plot_fit for kwarg specification)
annotate is set to "false" because arbitrary annotations are not yet implemented
"""
# really, plot_fit should be thin on top of plot_model, but that's
# not how I wrote it, so it will have to wait for a refactor
if clear: self.clear()
return self.plot_fit(pars=pars, offset=offset, annotate=False, **kwargs)
#def assess_npeaks(self):
# """
# Attempt to determine whether any of the peaks are unnecessary
# """
# if self.npeaks <= 1:
# return
# npars = self.fitter.npars
# perpeakpars = [self.parinfo.values[ii*npars:(ii+1)*npars] for ii in
# range(self.npeaks)]
# parsets = [((x[0][0],x[1][0]),x[0][1]+x[1][1]) for x in
# itertools.combinations(perpeakpars, self.npeaks-1)]
# parsets = [x
# for y in itertools.combinations(perpeakpars, self.npeaks-1)
# for x in y]
# chi2_without = [(self.spectofit[self.xmin:self.xmax] -
# self.get_model_frompars(self.xarr, self.pars[ii*npars:
def plot_fit(self, xarr=None, annotate=None, show_components=None,
composite_fit_color='red', lw=0.5,
composite_lw=0.75, pars=None, offset=None,
use_window_limits=None, show_hyperfine_components=None,
plotkwargs={}, **kwargs):
"""
Plot the fit. Must have fitted something before calling this!
It will be automatically called whenever a spectrum is fit (assuming an
axis for plotting exists)
kwargs are passed to the fitter's components attribute
Parameters
----------
xarr : None
If none, will use the spectrum's xarr. Otherwise, plot the
specified xarr. This is useful if you want to plot a well-sampled
model when the input spectrum is undersampled
annotate : None or bool
Annotate the plot? If not specified, defaults to self.autoannotate
show_components : None or bool
show_hyperfine_components : None or bool
Show the individual gaussian components overlaid on the composite fit
use_window_limits : None or bool
If False, will reset the window to include the whole spectrum. If
True, leaves the window as is. Defaults to self.use_window_limits
if None.
pars : parinfo
A parinfo structure or list of model parameters. If none, uses
best-fit
offset : None or float
Y-offset. If none, uses the default self.Spectrum.plotter offset, otherwise,
uses the specified float.
"""
#if self.Spectrum.baseline.subtracted is False and self.Spectrum.baseline.basespec is not None:
# # don't display baseline if it's included in the fit
# plot_offset = self.Spectrum.plotter.offset+(self.Spectrum.baseline.basespec * (~self.vheight))
#else:
if offset is None:
plot_offset = self.Spectrum.plotter.offset
else:
plot_offset = offset
if xarr is None:
xarr = self.Spectrum.xarr
if pars is not None:
model = self.get_model_frompars(xarr, pars)
else:
self._full_model()
model = self.fullmodel
self.modelplot += self.Spectrum.plotter.axis.plot(xarr,
model + plot_offset,
color=composite_fit_color,
linewidth=lw,
**plotkwargs)
# Plot components
if show_components or show_hyperfine_components:
self.plot_components(xarr=xarr,
show_hyperfine_components=show_hyperfine_components,
pars=pars, plotkwargs=plotkwargs)
uwl = use_window_limits if use_window_limits is not None else self.use_window_limits
# plotter kwargs are kwargs for the Spectrum.plotter,
# whereas plotkwargs are for the matplotlib plot command
plotterkwargs = {}
plotterkwargs.update(self.Spectrum.plotter.plotkwargs)
plotterkwargs['use_window_limits'] = uwl
self.Spectrum.plotter.reset_limits(**plotterkwargs)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
if annotate or ((annotate is None) and self.autoannotate):
self.annotate()
if self.vheight: self.Spectrum.baseline.annotate()
def plot_components(self, xarr=None, show_hyperfine_components=None,
component_yoffset=0.0, component_lw=0.75, pars=None,
component_fit_color='blue', component_kwargs={},
add_baseline=False, plotkwargs={}, **kwargs):
"""
Overplot the individual components of a fit
Parameters
----------
xarr : None
If none, will use the spectrum's xarr. Otherwise, plot the
specified xarr. This is useful if you want to plot a well-sampled
model when the input spectrum is undersampled
show_hyperfine_components : None | bool
Keyword argument to pass to component codes; determines whether to return
individual (e.g., hyperfine) components of a composite model
component_yoffset : float
Vertical (y-direction) offset to add to the components when plotting
component_lw : float
Line width of component lines
component_fitcolor : color
Color of component lines
component_kwargs : dict
Keyword arguments to pass to the fitter.components method
add_baseline : bool
Add the fit to the components before plotting. Makes sense to use
if self.Spectrum.baseline.subtracted == False
pars : parinfo
A parinfo structure or list of model parameters. If none, uses
best-fit
"""
plot_offset = self.Spectrum.plotter.offset
if xarr is None:
xarr = self.Spectrum.xarr
if show_hyperfine_components is not None:
component_kwargs['return_hyperfine_components'] = show_hyperfine_components
self._component_kwargs = component_kwargs
if pars is None:
pars = self.modelpars
self.modelcomponents = self.fitter.components(xarr, pars, **component_kwargs)
yoffset = plot_offset + component_yoffset
if add_baseline:
yoffset += self.Spectrum.baseline.basespec
for data in self.modelcomponents:
# can have multidimensional components
if len(data.shape) > 1:
for d in data:
self._plotted_components += self.Spectrum.plotter.axis.plot(xarr,
d + yoffset,
color=component_fit_color, linewidth=component_lw, **plotkwargs)
else:
self._plotted_components += self.Spectrum.plotter.axis.plot(xarr,
data + yoffset,
color=component_fit_color, linewidth=component_lw, **plotkwargs)
def fullsizemodel(self):
"""
If the model was fit to a sub-region of the spectrum, expand it (with
zeros wherever the model was not defined) to fill the spectrum.
Examples
--------
>>> noise = np.random.randn(100)
>>> xarr = np.linspace(-50,50,100)
>>> signal = np.exp(-(xarr-5)**2/(2*3.**2))
>>> sp = pyspeckit.Spectrum(data=noise + signal, xarr=xarr, xarrkwargs={'units':'km/s'})
>>> sp.specfit(xmin=-25,xmax=25)
>>> sp.specfit.model.shape
(48,)
>>> sp.specfit.fullsizemodel()
>>> sp.specfit.model.shape
(100,)
"""
if self.model.shape != self.Spectrum.data.shape:
temp = np.zeros(self.Spectrum.data.shape)
temp[self.xmin:self.xmax] = self.model
self.model = temp
self.residuals = self.spectofit - self.model
self.selectregion(reset=True)
def plotresiduals(self, fig=2, axis=None, clear=True, color='k',
linewidth=0.5, drawstyle='steps-mid', yoffset=0.0,
label=True, pars=None, zeroline=None,
set_limits=True, **kwargs):
"""
Plot residuals of the fit. Specify a figure or
axis; defaults to figure(2).
Parameters
----------
fig : int
Figure number. Overridden by axis
axis : axis
The axis to plot on
pars : None or parlist
If set, the residuals will be computed for the input parameters
zeroline : bool or None
Plot the "zero" line through the center of the residuals. If None,
defaults to "True if yoffset!=0, False otherwise"
kwargs are passed to matplotlib plot
"""
self._full_model(pars=pars)
if axis is None:
if isinstance(fig,int):
fig=matplotlib.pyplot.figure(fig)
self.residualaxis = matplotlib.pyplot.gca()
if clear:
self.residualaxis.clear()
else:
self.residualaxis = axis
if clear:
self.residualaxis.clear()
self.residualplot = self.residualaxis.plot(self.Spectrum.xarr,
self.fullresiduals+yoffset,
drawstyle=drawstyle,
linewidth=linewidth,
color=color, **kwargs)
if zeroline or (zeroline is None and yoffset != 0):
self.residualplot += self.residualaxis.plot(self.Spectrum.xarr,
(np.zeros_like(self.Spectrum.xarr.value)+yoffset),
linestyle='--',
color='k',
alpha=0.5)
if set_limits:
if ((self.Spectrum.plotter.xmin is not None) and
(self.Spectrum.plotter.xmax is not None)):
self.residualaxis.set_xlim(self.Spectrum.plotter.xmin.value,
self.Spectrum.plotter.xmax.value)
if ((self.Spectrum.plotter.ymin is not None) and
(self.Spectrum.plotter.ymax is not None)):
self.residualaxis.set_ylim(self.Spectrum.plotter.ymin,
self.Spectrum.plotter.ymax)
if label:
self.residualaxis.set_xlabel(self.Spectrum.plotter.xlabel)
self.residualaxis.set_ylabel(self.Spectrum.plotter.ylabel)
self.residualaxis.set_title("Residuals")
if self.Spectrum.plotter.autorefresh:
self.residualaxis.figure.canvas.draw()
def annotate(self,loc='upper right',labelspacing=0.25, markerscale=0.01,
borderpad=0.1, handlelength=0.1, handletextpad=0.1,
frameon=False, chi2=None, optimal_chi2_kwargs={}, **kwargs):
"""
Add a legend to the plot showing the fitted parameters
_clearlegend() will remove the legend
chi2 : {True or 'reduced' or 'optimal' or 'allthree'}
kwargs passed to legend
"""
self._clearlegend()
pl = matplotlib.collections.CircleCollection([0],edgecolors=['k'])
if hasattr(self.fitter,'annotations'):
self._annotation_labels = self.fitter.annotations()
else:
raise Exception("Fitter %s has no annotations." % self.fitter)
#xtypename = units.unit_type_dict[self.Spectrum.xarr.xtype]
xcharconv = units.SmartCaseNoSpaceDict({u.Hz.physical_type:'\\nu',
u.m.physical_type:'\\lambda',
(u.km/u.s).physical_type:'v',
'pixels':'x',
u.dimensionless_unscaled:'x',
'dimensionless':'x',
})
try:
xchar = xcharconv[self.Spectrum.xarr.unit.physical_type]
except AttributeError:
unit_key = self.Spectrum.xarr.unit
xchar = xcharconv[u.Unit(unit_key).physical_type]
self._annotation_labels = [L.replace('x',xchar) if L[1]=='x' else L for
L in self._annotation_labels]
if chi2 is not None:
chi2n_label = '$\\chi^2/\\nu = %0.2g$' % (self.chi2/self.dof)
chi2opt_label = '$\\chi^2_o/\\nu = %0.2g$' % self.optimal_chi2(**optimal_chi2_kwargs)
chi2_label = '$\\chi^2 = %0.2g$' % self.chi2
if chi2 == 'allthree':
self._annotation_labels.append("\n".join([chi2n_label,
chi2_label,
chi2opt_label]))
elif chi2 == 'reduced':
self._annotation_labels.append(chi2n_label)
elif chi2 == 'optimal':
self._annotation_labels.append(chi2opt_label)
else:
self._annotation_labels.append(chi2_label)
if self.Spectrum.plotter.axis:
self.fitleg = self.Spectrum.plotter.axis.legend(
tuple([pl]*len(self._annotation_labels)),
self._annotation_labels, loc=loc, markerscale=markerscale,
borderpad=borderpad, handlelength=handlelength,
handletextpad=handletextpad, labelspacing=labelspacing,
frameon=frameon, **kwargs)
self.Spectrum.plotter.axis.add_artist(self.fitleg)
self.fitleg.draggable(True)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
def print_fit(self, print_baseline=True, **kwargs):
"""
Print the best-fit parameters to the command line
"""
if self.Spectrum.baseline.baselinepars is not None and print_baseline:
print("Baseline: " + " + ".join(["%12g x^%i" % (x,i) for i,x in enumerate(self.Spectrum.baseline.baselinepars[::-1])]))
for i,p in enumerate(self.parinfo):
print("%15s: %12g +/- %12g" % (p['parname'],p['value'],p['error']))
def clear(self, legend=True, components=True):
"""
Remove the fitted model from the plot
Also removes the legend by default
"""
if self.Spectrum.plotter.axis is not None:
for p in self.modelplot:
p.set_visible(False)
if legend:
self._clearlegend()
if components:
self._clearcomponents()
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
# Empty the modelplot array to free memory
self.modelplot = []
# remove residuals from self if they're there.
if hasattr(self,'residualplot'):
for L in self.residualplot:
if L in self.Spectrum.plotter.axis.lines:
self.Spectrum.plotter.axis.lines.remove(L)
def _clearcomponents(self):
for pc in self._plotted_components:
pc.set_visible(False)
if pc in self.Spectrum.plotter.axis.lines:
self.Spectrum.plotter.axis.lines.remove(pc)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
# Empty the plotted components array to free memory
self._plotted_components = []
def _clearlegend(self):
"""
Remove the legend from the plot window
"""
axis = self.Spectrum.plotter.axis
if axis and axis.legend_ == self.fitleg:
axis.legend_ = None
if axis and self.fitleg is not None:
# don't remove fitleg unless it's in the current axis
# self.fitleg.set_visible(False)
if self.fitleg in axis.artists:
axis.artists.remove(self.fitleg)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
def savefit(self):
"""
Save the fit parameters from a Gaussian fit to the FITS header
.. todo::
THESE SHOULD BE WRITTEN FOR EACH TYPE OF MODEL TO BE FIT
"""
if self.modelpars is not None and hasattr(self.Spectrum,'header'):
for ii,p in enumerate(self.modelpars):
try:
if ii % 3 == 0:
self.Spectrum.header['AMP%1i' % (ii/3)] = (p,"Gaussian best fit amplitude #%i" % (ii/3))
elif ii % 3 == 1:
self.Spectrum.header['CEN%1i' % (ii/3)] = (p,"Gaussian best fit center #%i" % (ii/3))
elif ii % 3 == 2:
self.Spectrum.header['WID%1i' % (ii/3)] = (p,"Gaussian best fit width #%i" % (ii/3))
except ValueError as ex:
log.info("Failed to save fit to header: {0}".format(ex))
def downsample(self,factor):
"""
Downsample the model spectrum (and the spectofit spectra)
This should only be done when Spectrum.smooth is called
"""
if self.model is not None:
self.model = self.model[::factor]
if self.residuals is not None:
self.residuals = self.residuals[::factor]
self.spectofit = self.spectofit[::factor]
self.errspec = self.errspec[::factor]
self.includemask = self.includemask[::factor]
def crop(self,x1pix,x2pix):
"""
When spectrum.crop is called, this must be too
"""
if self.model is not None:
self.model = self.model[x1pix:x2pix]
if hasattr(self,'fullmodel'):
self.fullmodel = self.fullmodel[x1pix:x2pix]
self.includemask = self.includemask[x1pix:x2pix]
self.setfitspec()
def integral(self, analytic=False, direct=False, threshold='auto',
integration_limits=None, integration_limit_units='pixels',
return_error=False, **kwargs):
"""
Return the integral of the fitted spectrum
Parameters
----------
analytic : bool
Return the analytic integral of the fitted function?
.. WARNING:: This approach is only implemented for some models
.. todo:: Implement error propagation for this approach
direct : bool
Return the integral of the *spectrum* (as opposed to the *fit*)
over a range defined by the `integration_limits` if specified or
`threshold` otherwise
threshold : 'auto' or 'error' or float
Determines what data to be included in the integral based off of where
the model is greater than this number
If 'auto', the threshold will be set to peak_fraction * the peak
model value.
If 'error', uses the error spectrum as the threshold
See `self.get_model_xlimits` for details
integration_limits : None or 2-tuple
Manually specify the limits in `integration_limit_units` units
return_error : bool
Return the error on the integral if set.
The error computed by
sigma = sqrt(sum(sigma_i^2)) * dx
kwargs :
passed to `self.fitter.integral` if ``not(direct)``
Returns
-------
np.scalar or np.ndarray with the integral or integral & error
"""
if analytic:
return self.fitter.analytic_integral(modelpars=self.parinfo.values)
xmin,xmax = self.get_model_xlimits(units='pixels', threshold=threshold)
if integration_limits is None:
integration_limits = [xmin,xmax]
integration_limits = [
self.Spectrum.xarr.x_to_pix(x,xval_units=integration_limit_units)
for x in integration_limits]
if xmax - xmin > 1: # can only get cdelt if there's more than 1 pixel
dx = self.Spectrum.xarr[xmin:xmax].cdelt()
else:
dx = None
if dx is None:
#dx = np.abs(np.concatenate([np.diff(self.Spectrum.xarr),[0]]))
#warn("Irregular X-axis. The last pixel is ignored.")
self.Spectrum.xarr.make_dxarr()
dx = self.Spectrum.xarr.dxarr
else:
# shouldn't shape be a 'property'?
dx = np.repeat(np.abs(dx), self.Spectrum.shape)
if direct:
integrand = self.Spectrum.data[xmin:xmax]
if not self.Spectrum.baseline.subtracted:
integrand -= self.Spectrum.baseline.basespec[xmin:xmax]
integ = (integrand * dx[xmin:xmax]).sum()
if return_error:
# compute error assuming a "known mean" (not a sample mean). If sample mean, multiply
# by sqrt(len(dx)/(len(dx)-1)) (which should be very near 1)
error = np.sqrt((dx[xmin:xmax] * self.Spectrum.error[xmin:xmax]**2).sum() / dx[xmin:xmax].sum())
return np.array([integ,error])
else:
return integ
#OK = np.abs( fullmodel ) > threshold
#integ = (self.spectofit[OK] * dx[OK]).sum()
#error = np.sqrt((self.errspec[OK]**2 * dx[OK]).sum()/dx[OK].sum())
else:
if not hasattr(self.fitter,'integral'):
raise AttributeError("The fitter %s does not have an integral implemented" % self.fittype)
# the model considered here must NOT include the baseline!
# if it does, you'll get the integral of the continuum
fullmodel = self.get_full_model(add_baseline=False)
if self.Spectrum.xarr.cdelt() is not None:
dx = np.median(dx)
integ = self.fitter.integral(self.modelpars, dx=dx, **kwargs)
if return_error:
if mycfg.WARN: print("WARNING: The computation of the error "
"on the integral is not obviously "
"correct or robust... it's just a guess.")
OK = self.model_mask(threshold=threshold, add_baseline=False)
error = np.sqrt((self.errspec[OK]**2).sum()) * dx
#raise NotImplementedError("We haven't written up correct error estimation for integrals of fits")
else:
integ = 0
error = 0
warn("An analytic integal could not be computed because the X-axis is irregular. Try direct=True when integrating, or find a way to linearize the X-axis")
if return_error:
return integ,error
else:
return integ
def model_mask(self, **kwargs):
"""
Get a mask (boolean array) of the region where the fitted model is
significant
Parameters
----------
threshold : 'auto' or 'error' or float
The threshold to compare the model values to for selecting the mask
region.
* auto: uses `peak_fraction` times the model peak
* error: use the spectrum error
* float: any floating point number as an absolute threshold
peak_fraction : float
Parameter used if ``threshold=='auto'`` to determine fraction of
model peak to set threshold at
add_baseline : bool
Add the fitted baseline to the model before comparing to threshold?
Returns
-------
mask : `~numpy.ndarray`
A boolean mask array with the same size as the spectrum, set to
``True`` where the fitted model has values above a specified
threshold
"""
return self._compare_to_threshold(**kwargs)
def _compare_to_threshold(self, threshold='auto', peak_fraction=0.01,
add_baseline=False):
"""
Identify pixels that are above some threshold
"""
model = self.get_full_model(add_baseline=add_baseline)
# auto-set threshold from some fraction of the model peak
if threshold=='auto':
threshold = peak_fraction * np.abs(model).max()
elif threshold=='error':
threshold = self.errspec
OK = np.abs(model) > threshold
return OK
def get_model_xlimits(self, threshold='auto', peak_fraction=0.01,
add_baseline=False, units='pixels'):
"""
Return the x positions of the first and last points at which the model
is above some threshold
Parameters
----------
threshold : 'auto' or 'error' or float
If 'auto', the threshold will be set to peak_fraction * the peak
model value.
If 'error', uses the error spectrum as the threshold
peak_fraction : float
ignored unless threshold == 'auto'
add_baseline : bool
Include the baseline when computing whether the model is above the
threshold? default FALSE. Passed to get_full_model.
units : str
A valid unit type, e.g. 'pixels' or 'angstroms'
"""
OK = self._compare_to_threshold(threshold=threshold,
peak_fraction=peak_fraction,
add_baseline=add_baseline)
# find the first & last "True" values
xpixmin = OK.argmax()
xpixmax = len(OK) - OK[::-1].argmax() - 1
if units == 'pixels':
return [xpixmin,xpixmax]
else:
return self.Spectrum.xarr[[xpixmin,xpixmax]].as_unit(units)
def shift_pars(self, frame=None):
"""
Shift the velocity / wavelength / frequency of the fitted parameters
into a different frame
Right now this only takes care of redshift and only if redshift is defined.
It should be extended to do other things later
"""
for ii,pi in enumerate(self.parinfo):
for partype in ('shift','offset','velo'):
if partype in str.lower(pi['parname']):
if frame is not None:
self.modelpars[ii] = self.Spectrum.xarr.x_in_frame(self.modelpars[ii], frame)
def moments(self, fittype=None, **kwargs):
"""
Return the moments
see the :mod:`~pyspeckit.spectrum.moments` module
Parameters
----------
fittype : None or str
The registered fit type to use for moment computation
"""
if fittype is None:
fittype = self.fittype
return self.Registry.multifitters[fittype].moments(
self.Spectrum.xarr[self.xmin:self.xmax],
self.spectofit[self.xmin:self.xmax], **kwargs)
def button3action(self, event, debug=False, nwidths=1):
"""
Disconnect the interactiveness
Perform the fit (or die trying)
Hide the guesses
"""
self.Spectrum.plotter.figure.canvas.mpl_disconnect(self.click)
self.Spectrum.plotter.figure.canvas.mpl_disconnect(self.keyclick)
npars = 2+nwidths
if self.npeaks > 0:
log.info("{0} Guesses : {1} X channel range: {2}-{3}"
.format(len(self.guesses)/npars, self.guesses, self.xmin,
self.xmax))
if len(self.guesses) % npars == 0:
self.multifit(use_window_limits=True)
for p in self.button2plot + self.button1plot:
p.set_visible(False)
else:
log.error("Wrong # of parameters")
# disconnect interactive window (and more importantly, reconnect to
# original interactive cmds)
self.clear_all_connections()
def copy(self, parent=None):
"""
Create a copy of the spectral fit - includes copies of the _full_model,
the registry, the fitter, parinfo, modelpars, modelerrs, model, npeaks
[ parent ]
A `~Spectrum` instance that is the parent of the specfit
instance. This needs to be specified at some point, but defaults
to None to prevent overwriting a previous plot.
"""
newspecfit = Specfit(parent, copy.deepcopy(self.Registry))
newspecfit.parinfo = copy.deepcopy(self.parinfo)
if newspecfit.parinfo is None:
newspecfit.modelpars = None
newspecfit.modelerrs = None
else:
newspecfit.modelpars = newspecfit.parinfo.values
newspecfit.modelerrs = newspecfit.parinfo.errors
newspecfit.includemask = self.includemask.copy()
newspecfit.model = copy.copy( self.model )
newspecfit.npeaks = self.npeaks
if hasattr(self,'fitter'):
newspecfit.fitter = copy.deepcopy( self.fitter )
newspecfit.fitter.parinfo = newspecfit.parinfo
if hasattr(self,'fullmodel'):
newspecfit._full_model()
# this is ridiculous, absurd, and infuriating...
newspecfit.button2action = newspecfit.guesspeakwidth
if parent is not None:
newspecfit.Spectrum.plotter = parent.plotter
else:
newspecfit.Spectrum.plotter = None
return newspecfit
def __copy__(self):
return self.copy(parent=self.Spectrum)
def add_sliders(self, parlimitdict=None, **kwargs):
"""
Add a Sliders window in a new figure appropriately titled
Parameters
----------
parlimitdict: dict
Each parameter needs to have displayed limits; these are set in
min-max pairs. If this is left empty, the widget will try to guess
at reasonable limits, but the guessing is not very sophisticated
yet.
.. todo:: Add a button in the navbar that makes this window pop up
http://stackoverflow.com/questions/4740988/add-new-navigate-modes-in-matplotlib
"""
if parlimitdict is None:
# try to create a reasonable parlimit dict
parlimitdict = {}
for param in self.parinfo:
if not param.parname in parlimitdict:
if any( (x in param['parname'].lower() for x in ('shift','xoff')) ):
lower, upper = (self.Spectrum.xarr[self.includemask].min().value,
self.Spectrum.xarr[self.includemask].max().value)
elif any( (x in param['parname'].lower() for x in ('width','fwhm')) ):
xvalrange = (self.Spectrum.xarr[self.includemask].max().value -
self.Spectrum.xarr[self.includemask].min().value)
lower,upper = (0,xvalrange)
elif any( (x in param['parname'].lower() for x in ('amp','peak','height')) ):
datarange = self.spectofit.max() - self.spectofit.min()
lower,upper = (param['value']-datarange, param['value']+datarange)
else:
lower = param['value'] * 0.1
upper = param['value'] * 10
# override guesses with limits
if param.limited[0]:
# nextafter -> next representable float
lower = np.nextafter(param.limits[0], param.limits[0]+1)
if param.limited[1]:
upper = np.nextafter(param.limits[1], param.limits[1]-1)
parlimitdict[param.parname] = (lower,upper)
if hasattr(self,'fitter'):
self.SliderWidget = widgets.FitterSliders(self,
self.Spectrum.plotter.figure,
npars=self.fitter.npars,
parlimitdict=parlimitdict,
**kwargs)
else:
log.error("Must have a fitter instantiated before creating sliders")
def optimal_chi2(self, reduced=True, threshold='error', **kwargs):
"""
Compute an "optimal" :math:`\chi^2` statistic, i.e. one in which only pixels in
which the model is statistically significant are included
Parameters
----------
reduced : bool
Return the reduced :math:`\chi^2`
threshold : 'auto' or 'error' or float
If 'auto', the threshold will be set to peak_fraction * the peak
model value, where peak_fraction is a kwarg passed to
get_model_xlimits reflecting the fraction of the model peak
to consider significant
If 'error', uses the error spectrum as the threshold
kwargs : dict
passed to :meth:`get_model_xlimits`
Returns
-------
chi2 : float
:math:`\chi^2` statistic or reduced :math:`\chi^2` statistic (:math:`\chi^2/n`)
.. math::
\chi^2 = \sum( (d_i - m_i)^2 / e_i^2 )
"""
modelmask = self._compare_to_threshold(threshold=threshold, **kwargs)
chi2 = np.sum((self.fullresiduals[modelmask]/self.errspec[modelmask])**2)
if reduced:
# vheight included here or not? assuming it should be...
dof = (modelmask.sum() -
self.fitter.npars - self.vheight +
np.sum(self.parinfo.fixed))
return chi2/dof
else:
return chi2
def get_pymc(self, **kwargs):
"""
Create a pymc MCMC sampler from the current fitter. Defaults to 'uninformative' priors
`kwargs` are passed to the fitter's get_pymc method, with parameters defined below.
Parameters
----------
data : np.ndarray
error : np.ndarray
use_fitted_values : bool
Each parameter with a measured error will have a prior defined by
the Normal distribution with sigma = par.error and mean = par.value
Examples
--------
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> MCuninformed = sp.specfit.get_pymc()
>>> MCwithpriors = sp.specfit.get_pymc(use_fitted_values=True)
>>> MCuninformed.sample(1000)
>>> MCuninformed.stats()['AMPLITUDE0']
>>> # WARNING: This will fail because width cannot be set <0, but it may randomly reach that...
>>> # How do you define a likelihood distribution with a lower limit?!
>>> MCwithpriors.sample(1000)
>>> MCwithpriors.stats()['AMPLITUDE0']
"""
if hasattr(self.fitter,'get_pymc'):
return self.fitter.get_pymc(self.Spectrum.xarr, self.spectofit,
self.errspec, **kwargs)
else:
raise AttributeError("Fitter %r does not have pymc implemented." % self.fitter)
def get_emcee(self, nwalkers=None, **kwargs):
"""
Get an emcee walker ensemble for the data & model using the current model type
Parameters
----------
data : np.ndarray
error : np.ndarray
nwalkers : int
Number of walkers to use. Defaults to 2 * self.fitters.npars
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> emcee_ensemble = sp.specfit.get_emcee()
>>> p0 = emcee_ensemble.p0 * (np.random.randn(*emcee_ensemble.p0.shape) / 10. + 1.0)
>>> pos,logprob,state = emcee_ensemble.run_mcmc(p0,100)
"""
if hasattr(self.fitter,'get_emcee_ensemblesampler'):
nwalkers = (self.fitter.npars * self.fitter.npeaks + self.fitter.vheight) * 2
emc = self.fitter.get_emcee_ensemblesampler(self.Spectrum.xarr,
self.spectofit,
self.errspec, nwalkers)
emc.nwalkers = nwalkers
emc.p0 = np.array([self.parinfo.values] * emc.nwalkers)
return emc
def get_components(self, **kwargs):
"""
If a model has been fitted, return the components of the model
Parameters
----------
kwargs are passed to self.fitter.components
"""
if self.modelpars is not None:
self.modelcomponents = self.fitter.components(self.Spectrum.xarr,
self.modelpars, **kwargs)
return self.modelcomponents
def measure_approximate_fwhm(self, threshold='error', emission=True,
interpolate_factor=1, plot=False,
grow_threshold=2, **kwargs):
"""
Measure the FWHM of a fitted line
This procedure is designed for multi-component *blended* lines; if the
true FWHM is known (i.e., the line is well-represented by a single
gauss/voigt/lorentz profile), use that instead. Do not use this for
multiple independently peaked profiles.
This MUST be run AFTER a fit has been performed!
Parameters
----------
threshold : 'error' | float
The threshold above which the spectrum will be interpreted as part
of the line. This threshold is applied to the *model*. If it is
'noise', self.error will be used.
emission : bool
Is the line absorption or emission?
interpolate_factor : integer
Magnification factor for determining sub-pixel FWHM. If used,
"zooms-in" by using linear interpolation within the line region
plot : bool
Overplot a line at the FWHM indicating the FWHM. kwargs
are passed to matplotlib.plot
grow_threshold : int
Minimum number of valid points. If the total # of points above the
threshold is <= to this number, it will be grown by 1 pixel on each side
Returns
-------
The approximated FWHM, if it can be computed
If there are <= 2 valid pixels, a fwhm cannot be computed
"""
if threshold == 'error':
threshold = self.Spectrum.error
if np.all(self.Spectrum.error==0):
threshold = 1e-3*self.Spectrum.data.max()
if self.Spectrum.baseline.subtracted is False:
data = self.Spectrum.data - self.Spectrum.baseline.basespec
else:
data = self.Spectrum.data * 1
model = self.get_full_model(add_baseline=False)
if np.count_nonzero(model) == 0:
raise ValueError("The model is all zeros. No FWHM can be "
"computed.")
# can modify inplace because data is a copy of self.Spectrum.data
if not emission:
data *= -1
model *= -1
line_region = model > threshold
if line_region.sum() == 0:
raise ValueError("No valid data included in FWHM computation")
if line_region.sum() <= grow_threshold:
line_region[line_region.argmax()-1:line_region.argmax()+1] = True
reverse_argmax = len(line_region) - line_region.argmax() - 1
line_region[reverse_argmax-1:reverse_argmax+1] = True
log.warn("Fewer than {0} pixels were identified as part of the fit."
" To enable statistical measurements, the range has been"
" expanded by 2 pixels including some regions below the"
" threshold.".format(grow_threshold))
# determine peak (because data is neg if absorption, always use max)
peak = data[line_region].max()
xarr = self.Spectrum.xarr[line_region]
xarr.make_dxarr()
cd = xarr.dxarr.min()
if interpolate_factor > 1:
newxarr = units.SpectroscopicAxis(np.arange(xarr.min().value-cd,
xarr.max().value+cd,
cd /
float(interpolate_factor)
),
unit=xarr.unit,
equivalencies=xarr.equivalencies
)
# load the metadata from xarr
# newxarr._update_from(xarr)
data = np.interp(newxarr,xarr,data[line_region])
xarr = newxarr
else:
data = data[line_region]
# need the peak location so we can find left/right half-max locations
peakloc = data.argmax()
hm_left = np.argmin(np.abs(data[:peakloc]-peak/2.))
hm_right = np.argmin(np.abs(data[peakloc:]-peak/2.)) + peakloc
deltax = xarr[hm_right]-xarr[hm_left]
if plot:
# for plotting, use a negative if absorption
sign = 1 if emission else -1
# shift with baseline if baseline is plotted
if not self.Spectrum.baseline.subtracted:
basespec = self.Spectrum.baseline.get_model(xarr)
yoffleft = self.Spectrum.plotter.offset + basespec[hm_left]
yoffright = self.Spectrum.plotter.offset + basespec[hm_right]
else:
yoffleft = yoffright = self.Spectrum.plotter.offset
log.debug("peak={2} yoffleft={0} yoffright={1}".format(yoffleft, yoffright, peak))
log.debug("hm_left={0} hm_right={1} xarr[hm_left]={2} xarr[hm_right]={3}".format(hm_left, hm_right, xarr[hm_left], xarr[hm_right]))
self.Spectrum.plotter.axis.plot([xarr[hm_right].value,
xarr[hm_left].value],
np.array([sign*peak/2.+yoffleft,
sign*peak/2.+yoffright]),
**kwargs)
self.Spectrum.plotter.refresh()
# debug print hm_left,hm_right,"FWHM: ",deltax
# debug self.Spectrum.plotter.axis.plot(xarr,data,color='magenta')
# debug self.Spectrum.plotter.refresh()
# debug raise TheDead
return deltax
def _validate_parinfo(self, parinfo, mode='fix'):
assert mode in ('fix','raise','check','guesses')
any_out_of_range = []
for param in parinfo:
if (param.limited[0] and (param.value < param.limits[0])):
if (np.allclose(param.value, param.limits[0])):
# nextafter -> next representable float
if mode in ('fix', 'guesses'):
warn("{0} is less than the lower limit {1}, but very close."
" Converting to {1}+ULP".format(param.value,
param.limits[0]))
param.value = np.nextafter(param.limits[0], param.limits[0]+1)
elif mode == 'raise':
raise ValueError("{0} is less than the lower limit {1}, but very close."
.format(param.value, param.limits[1]))
elif mode == 'check':
any_out_of_range.append("lt:close",)
else:
raise ValueError("{0} is less than the lower limit {1}"
.format(param.value, param.limits[0]))
elif mode == 'check':
any_out_of_range.append(False)
if (param.limited[1] and (param.value > param.limits[1])):
if (np.allclose(param.value, param.limits[1])):
if mode in ('fix', 'guesses'):
param.value = np.nextafter(param.limits[1], param.limits[1]-1)
warn("{0} is greater than the upper limit {1}, but very close."
" Converting to {1}-ULP".format(param.value,
param.limits[1]))
elif mode == 'raise':
raise ValueError("{0} is greater than the upper limit {1}, but very close."
.format(param.value, param.limits[1]))
elif mode == 'check':
any_out_of_range.append("gt:close")
else:
raise ValueError("{0} is greater than the upper limit {1}"
.format(param.value, param.limits[0]))
elif mode == 'check':
any_out_of_range.append(False)
if mode == 'guesses':
return parinfo.values
return any_out_of_range
| {
"content_hash": "fdd4ea92111cff1147f1204cc0c06853",
"timestamp": "",
"source": "github",
"line_count": 1999,
"max_line_length": 171,
"avg_line_length": 44.05702851425713,
"alnum_prop": 0.5583967298739639,
"repo_name": "mikelum/pyspeckit",
"id": "3450a94651b3566fbbdef85c08e3e8aa94d4dcc9",
"size": "88070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyspeckit/spectrum/fitters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "53"
},
{
"name": "Python",
"bytes": "1198082"
},
{
"name": "Shell",
"bytes": "313"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from rhinocloud.utils import random_generator
def generate_username_from_email(sender, instance, **kwargs):
if isinstance(instance, User):
email = instance.email
username = random_generator(email[:25])
while sender.objects.filter(username=username).exists():
username = random_generator(username[:25])
instance.username = username
def username_shorten(sender, instance, **kwargs):
if sender == User:
if len(instance.username) > 30:
instance.username = instance.username[:30]
def first_name_shorten(sender, instance, **kwargs):
if sender == User:
if len(instance.first_name) > 30:
instance.first_name = instance.first_name[:30]
def last_name_shorten(sender, instance, **kwargs):
if sender == User:
if len(instance.username) > 30:
instance.last_name = instance.last_name[:30]
| {
"content_hash": "a2abb4a50a6e3c86af082fd42dd4d7f1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 64,
"avg_line_length": 37.19230769230769,
"alnum_prop": 0.6463288521199586,
"repo_name": "allanlei/rhinocloud-utils",
"id": "6be41bcd6dd8fa15d119543ee2ff31162b5f3964",
"size": "967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rhinocloud/contrib/auth/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "104927"
}
],
"symlink_target": ""
} |
"""
Non-cython methods for getting counts and distributions from data.
"""
import numpy as np
__all__ = (
'counts_from_data',
'distribution_from_data',
'get_counts',
)
try: # cython
from .pycounts import counts_from_data, distribution_from_data
except ImportError: # no cython
from boltons.iterutils import windowed_iter
from collections import Counter, defaultdict
from itertools import product
from .. import modify_outcomes
from ..exceptions import ditException
def counts_from_data(data, hLength, fLength, marginals=True, alphabet=None, standardize=True):
"""
Returns conditional counts from `data`.
To obtain counts for joint distribution only, use fLength=0.
Parameters
----------
data : NumPy array
The data used to calculate morphs. Note: `data` cannot be a generator.
Also, if standardize is True, then data can be any indexable iterable,
such as a list or tuple.
hLength : int
The maxmimum history word length used to calculate morphs.
fLength : int
The length of future words that defines the morph.
marginals : bool
If True, then the morphs for all histories words from L=0 to L=hLength
are calculated. If False, only histories of length L=hLength are
calculated.
alphabet : list
The alphabet to use when creating the morphs. If `None`, then one is
obtained from `data`. If not `None`, then the provided alphabet
supplements what appears in the data. So the data is always scanned
through in order to get the proper alphabet.
standardize : bool
The algorithm requires that the symbols in data be standardized to
a canonical alphabet consisting of integers from 0 to k-1, where k
is the alphabet size. If `data` is already standard, then an extra
pass through the data can be avoided by setting `standardize` to
`False`, but note: if `standardize` is False, then data MUST be a
NumPy array.
Returns
-------
histories : list
A list of observed histories, corresponding to the rows in `cCounts`.
cCounts : NumPy array
A NumPy array representing conditional counts. The rows correspond to
the observed histories, so this is sparse. The number of rows in this
array cannot be known in advance, but the number of columns will be
equal to the alphabet size raised to the `fLength` power.
hCounts : NumPy array
A 1D array representing the count of each history word.
alphabet : tuple
The ordered tuple representing the alphabet of the data. If `None`,
the one is created from the data.
Notes
-----
This requires three complete passes through the data. One to obtain
the full alphabet. Another to standardize the data. A final pass to
obtain the counts.
This is implemented densely. So during the course of the algorithm,
we work with a large array containing a row for each possible history.
Only the rows corresponding to observed histories are returned.
"""
try:
data = list(map(tuple, data))
except TypeError:
pass
counts = Counter(windowed_iter(data, hLength + fLength))
cond_counts = defaultdict(lambda: defaultdict(int))
for word, count in counts.items():
cond_counts[word[:hLength]][word[hLength:]] += count
histories = sorted(counts.keys())
alphabet = set(alphabet) if alphabet is not None else set()
alphabet = tuple(sorted(alphabet.union(*[set(hist) for hist in histories])))
cCounts = np.empty((len(histories), len(alphabet)**fLength))
for i, hist in enumerate(histories):
for j, future in enumerate(product(alphabet, repeat=fLength)):
cCounts[i, j] = cond_counts[hist][future]
hCounts = cCounts.sum(axis=1)
return histories, cCounts, hCounts, alphabet
def distribution_from_data(d, L, trim=True, base=None):
"""
Returns a distribution over words of length `L` from `d`.
The returned distribution is the naive estimate of the distribution,
which assigns probabilities equal to the number of times a particular
word appeared in the data divided by the total number of times a word
could have appeared in the data.
Roughly, it corresponds to the stationary distribution of a maximum
likelihood estimate of the transition matrix of an (L-1)th order Markov
chain.
Parameters
----------
d : list
A list of symbols to be converted into a distribution.
L : integer
The length of the words for the distribution.
trim : bool
If true, then words with zero probability are trimmed from the
distribution.
base : int or string
The desired base of the returned distribution. If `None`, then the
value of `dit.ditParams['base']` is used.
"""
from dit import ditParams, Distribution
try:
d = list(map(tuple, d))
except TypeError:
pass
if base is None:
base = ditParams['base']
words, _, counts, _ = counts_from_data(d, L, 0)
# We turn the counts to probabilities
pmf = counts / counts.sum()
dist = Distribution(words, pmf, trim=trim)
dist.set_base(base)
if L == 1:
try:
dist = modify_outcomes(dist, lambda o: o[0])
except ditException:
pass
return dist
def get_counts(data, length):
"""
Count the occurrences of all words of `length` in `data`.
Parameters
----------
data : iterable
The sequence of samples
length : int
The length to group samples into.
Returns
-------
counts : np.array
Array with the count values.
"""
hists, _, counts, _ = counts_from_data(data, length, 0)
mask = np.array([len(h) == length for h in hists])
counts = counts[mask]
return counts
| {
"content_hash": "d01f335ee4bd40f0b1e3790d7784ba77",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 98,
"avg_line_length": 34.556756756756755,
"alnum_prop": 0.6137963397465979,
"repo_name": "dit/dit",
"id": "904094329ff8325a20831b3a1e7275e3f63505a3",
"size": "6393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dit/inference/counts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5938"
},
{
"name": "Cython",
"bytes": "16890"
},
{
"name": "HTML",
"bytes": "265"
},
{
"name": "PHP",
"bytes": "614"
},
{
"name": "Python",
"bytes": "1297093"
},
{
"name": "Shell",
"bytes": "152"
},
{
"name": "TeX",
"bytes": "6951"
}
],
"symlink_target": ""
} |
from tsgettoolbox import tsgettoolbox
# %% [markdown]
# Let's say that I want flow (parameterCd=00060) for site '02325000'. All of the tsgettoolbox functions create a _pandas_ DataFrame.
# %%
df = tsgettoolbox.nwis_dv(sites="02325000", startDT="2000-01-01", parameterCd="00060")
# %%
df.head() # The .head() function gives the first 5 values of the time-series
# %% [markdown]
# ## 'tstoolbox ...': Process data using 'tstoolbox'
# Now lets use "tstoolbox" to plot the time-series. The 'input_ts' option is used to read in the time-series from the DataFrame.
# %%
from tstoolbox import tstoolbox
# %%
tstoolbox.plot(input_ts=df, ofilename="plot_api.png")
# %% [markdown]
# 
# %% [markdown]
# 'tstoolbox plot' has many options that can be used to modify the plot.
# %%
tstoolbox.plot(
input_ts=df,
ofilename="flow_api.png",
ytitle="Flow (cfs)",
title="02325000: FENHOLLOWAY RIVER NEAR PERRY, FLA",
legend=False,
)
# %% [markdown]
# 
# %%
mdf = tstoolbox.aggregate(input_ts=df, agg_interval="M", statistic="mean")
# %%
tstoolbox.plot(input_ts=mdf, drawstyle="steps-pre", ofilename="flow_api_monthly.png")
# %% [markdown]
# 
# %%
| {
"content_hash": "7f63f05ed7fc322900a65e173e8dff9d",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 133,
"avg_line_length": 24.959183673469386,
"alnum_prop": 0.6729354047424366,
"repo_name": "timcera/tsgettoolbox",
"id": "95e7d88e637c5635bc8385dcda4031ff21b7350b",
"size": "2061",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "notebooks/tsgettoolbox-nwis-api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "49109"
},
{
"name": "Python",
"bytes": "2676079"
},
{
"name": "Shell",
"bytes": "2740"
}
],
"symlink_target": ""
} |
""" DB Reporting Wrapper
"""
from .database import Database
# Depreciated
from .database import retrieve_data
from .database import insert_data
from .database import insert_data_many
from .database import run_command
| {
"content_hash": "6f58a840206bceaa25be523253f5b6f3",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 38,
"avg_line_length": 21.9,
"alnum_prop": 0.7899543378995434,
"repo_name": "aguinane/dbReportingWrapper",
"id": "08eda36ce1eed583fec852ca28979a091c21ef2d",
"size": "219",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "dbreportingwrapper/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20874"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import webnotes, json
from webnotes.utils import cint, now, cstr
from webnotes import _
class DocType:
def __init__(self, doc, doclist):
self.doc = doc
self.doclist = doclist
def autoname(self):
"""set name as email id"""
if self.doc.name not in ('Guest','Administrator'):
self.doc.email = self.doc.email.strip()
self.doc.name = self.doc.email
def validate(self):
self.in_insert = self.doc.fields.get("__islocal")
if self.doc.name not in ('Guest','Administrator'):
self.validate_email_type(self.doc.email)
self.validate_max_users()
self.add_system_manager_role()
if self.doc.fields.get('__islocal') and not self.doc.new_password:
webnotes.msgprint("Password required while creating new doc", raise_exception=1)
def check_enable_disable(self):
# do not allow disabling administrator/guest
if not cint(self.doc.enabled) and self.doc.name in ["Administrator", "Guest"]:
webnotes.msgprint("Hey! You cannot disable user: %s" % self.doc.name,
raise_exception=1)
if not cint(self.doc.enabled):
self.a_system_manager_should_exist()
# clear sessions if disabled
if not cint(self.doc.enabled) and getattr(webnotes, "login_manager", None):
webnotes.login_manager.logout(user=self.doc.name)
def validate_max_users(self):
"""don't allow more than max users if set in conf"""
import conf
# check only when enabling a user
if hasattr(conf, 'max_users') and self.doc.enabled and \
self.doc.name not in ["Administrator", "Guest"] and \
cstr(self.doc.user_type).strip() in ("", "System User"):
active_users = webnotes.conn.sql("""select count(*) from tabProfile
where ifnull(enabled, 0)=1 and docstatus<2
and ifnull(user_type, "System User") = "System User"
and name not in ('Administrator', 'Guest', %s)""", (self.doc.name,))[0][0]
if active_users >= conf.max_users and conf.max_users:
webnotes.msgprint("""
You already have <b>%(active_users)s</b> active users, \
which is the maximum number that you are currently allowed to add. <br /><br /> \
So, to add more users, you can:<br /> \
1. <b>Upgrade to the unlimited users plan</b>, or<br /> \
2. <b>Disable one or more of your existing users and try again</b>""" \
% {'active_users': active_users}, raise_exception=1)
def add_system_manager_role(self):
# if adding system manager, do nothing
if not cint(self.doc.enabled) or ("System Manager" in [user_role.role for user_role in
self.doclist.get({"parentfield": "user_roles"})]):
return
if not self.get_other_system_managers():
webnotes.msgprint("""Adding System Manager Role as there must
be atleast one 'System Manager'.""")
self.doclist.append({
"doctype": "UserRole",
"parentfield": "user_roles",
"role": "System Manager"
})
def on_update(self):
# owner is always name
webnotes.conn.set(self.doc, 'owner', self.doc.name)
self.update_new_password()
self.check_enable_disable()
def update_new_password(self):
"""update new password if set"""
if self.doc.new_password:
from webnotes.auth import update_password
update_password(self.doc.name, self.doc.new_password)
if self.in_insert:
webnotes.msgprint("New user created. - %s" % self.doc.name)
if cint(self.doc.send_invite_email):
webnotes.msgprint("Sent welcome mail.")
self.send_welcome_mail(self.doc.new_password)
else:
self.password_reset_mail(self.doc.new_password)
webnotes.msgprint("Password updated.")
webnotes.conn.set(self.doc, 'new_password', '')
def get_other_system_managers(self):
return webnotes.conn.sql("""select distinct parent from tabUserRole user_role
where role='System Manager' and docstatus<2
and parent not in ('Administrator', %s) and exists
(select * from `tabProfile` profile
where profile.name=user_role.parent and enabled=1)""", (self.doc.name,))
def get_fullname(self):
"""get first_name space last_name"""
return (self.doc.first_name or '') + \
(self.doc.first_name and " " or '') + (self.doc.last_name or '')
def password_reset_mail(self, password):
"""reset password"""
txt = """
## Password Reset
Dear %(first_name)s,
Your password has been reset. Your new password is:
password: %(password)s
To login to %(product)s, please go to:
%(login_url)s
Thank you,<br>
%(user_fullname)s
"""
self.send_login_mail("Your ERPNext password has been reset", txt, password)
def send_welcome_mail(self, password):
"""send welcome mail to user with password and login url"""
import startup
txt = """
## %(company)s
Dear %(first_name)s,
A new account has been created for you, here are your details:
Login Id: %(user)s<br>
Password: %(password)s
To login to your new %(product)s account, please go to:
%(login_url)s
Thank you,<br>
%(user_fullname)s
"""
self.send_login_mail("Welcome to " + startup.product_name, txt, password)
def send_login_mail(self, subject, txt, password):
"""send mail with login details"""
import os
import startup
from webnotes.utils.email_lib import sendmail_md
from webnotes.profile import get_user_fullname
from webnotes.utils import get_request_site_address
args = {
'first_name': self.doc.first_name or self.doc.last_name or "user",
'user': self.doc.name,
'password': password,
'company': webnotes.conn.get_default('company') or startup.product_name,
'login_url': get_request_site_address(),
'product': startup.product_name,
'user_fullname': get_user_fullname(webnotes.session['user'])
}
sender = webnotes.session.user not in ("Administrator", "Guest") and webnotes.session.user or None
sendmail_md(recipients=self.doc.email, sender=sender, subject=subject, msg=txt % args)
def a_system_manager_should_exist(self):
if not self.get_other_system_managers():
webnotes.msgprint(_("""Hey! There should remain at least one System Manager"""),
raise_exception=True)
def on_trash(self):
if self.doc.name in ["Administrator", "Guest"]:
webnotes.msgprint("""Hey! You cannot delete user: %s""" % (self.name, ),
raise_exception=1)
self.a_system_manager_should_exist()
# disable the user and log him/her out
self.doc.enabled = 0
if getattr(webnotes, "login_manager", None):
webnotes.login_manager.logout(user=self.doc.name)
# delete their password
webnotes.conn.sql("""delete from __Auth where user=%s""", self.doc.name)
# delete todos
webnotes.conn.sql("""delete from `tabToDo` where owner=%s""", self.doc.name)
webnotes.conn.sql("""update tabToDo set assigned_by=null where assigned_by=%s""",
self.doc.name)
# delete events
webnotes.conn.sql("""delete from `tabEvent` where owner=%s
and event_type='Private'""", self.doc.name)
webnotes.conn.sql("""delete from `tabEvent User` where person=%s""", self.doc.name)
# delete messages
webnotes.conn.sql("""delete from `tabComment` where comment_doctype='Message'
and (comment_docname=%s or owner=%s)""", (self.doc.name, self.doc.name))
def on_rename(self,newdn,olddn, merge=False):
self.validate_rename(newdn, olddn)
tables = webnotes.conn.sql("show tables")
for tab in tables:
desc = webnotes.conn.sql("desc `%s`" % tab[0], as_dict=1)
has_fields = []
for d in desc:
if d.get('Field') in ['owner', 'modified_by']:
has_fields.append(d.get('Field'))
for field in has_fields:
webnotes.conn.sql("""\
update `%s` set `%s`=%s
where `%s`=%s""" % \
(tab[0], field, '%s', field, '%s'), (newdn, olddn))
# set email
webnotes.conn.sql("""\
update `tabProfile` set email=%s
where name=%s""", (newdn, newdn))
# update __Auth table
if not merge:
webnotes.conn.sql("""update __Auth set user=%s where user=%s""", (newdn, olddn))
def validate_rename(self, newdn, olddn):
# do not allow renaming administrator and guest
if olddn in ["Administrator", "Guest"]:
webnotes.msgprint("""Hey! You are restricted from renaming the user: %s""" % \
(olddn, ), raise_exception=1)
self.validate_email_type(newdn)
def validate_email_type(self, email):
from webnotes.utils import validate_email_add
email = email.strip()
if not validate_email_add(email):
webnotes.msgprint("%s is not a valid email id" % email)
raise Exception
@webnotes.whitelist()
def get_all_roles(arg=None):
"""return all roles"""
return [r[0] for r in webnotes.conn.sql("""select name from tabRole
where name not in ('Administrator', 'Guest', 'All') order by name""")]
@webnotes.whitelist()
def get_user_roles(arg=None):
"""get roles for a user"""
return webnotes.get_roles(webnotes.form_dict['uid'])
@webnotes.whitelist()
def get_perm_info(arg=None):
"""get permission info"""
return webnotes.conn.sql("""select parent, permlevel, `read`, `write`, submit,
cancel, amend from tabDocPerm where role=%s
and docstatus<2 order by parent, permlevel""",
webnotes.form_dict['role'], as_dict=1)
@webnotes.whitelist()
def update_profile(fullname, password=None):
if not fullname:
return _("Name is required")
webnotes.conn.set_value("Profile", webnotes.session.user, "first_name", fullname)
webnotes.add_cookies["full_name"] = fullname
if password:
from webnotes.auth import update_password
update_password(webnotes.session.user, password)
return _("Updated")
@webnotes.whitelist(allow_guest=True)
def sign_up(email, full_name):
profile = webnotes.conn.get("Profile", {"email": email})
if profile:
if profile.disabled:
return _("Registered but disabled.")
else:
return _("Already Registered")
else:
if webnotes.conn.sql("""select count(*) from tabProfile where
TIMEDIFF(%s, modified) > '1:00:00' """, now())[0][0] > 200:
raise Exception, "Too Many New Profiles"
from webnotes.utils import random_string
profile = webnotes.bean({
"doctype":"Profile",
"email": email,
"first_name": full_name,
"enabled": 1,
"new_password": random_string(10),
"user_type": "Partner",
"send_invite_email": 1
})
profile.ignore_permissions = True
profile.insert()
return _("Registration Details Emailed.")
| {
"content_hash": "5161b8d0c692648e16bf9249d6909592",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 100,
"avg_line_length": 32.806451612903224,
"alnum_prop": 0.6764995083579154,
"repo_name": "gangadhar-kadam/mic-wnframework",
"id": "f0ee9ffe9ee60f2d623799971452b0c90fa843c1",
"size": "11339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/doctype/profile/profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2223249"
},
{
"name": "Python",
"bytes": "587618"
}
],
"symlink_target": ""
} |
"""
Module: mediahandler.types.tv
Module contains:
- |MHTv|
Child class of MHMediaType for the TV media type.
"""
import os
import logging
from re import escape, search, sub, IGNORECASE
import mediahandler.types
class MHTv(mediahandler.types.MHMediaType):
"""Child class of MHMediaType for the TV media type.
Required arguments:
- settings
Dict or MHSettings object.
- push
MHPush object.
Public method:
- |add()|
inherited from parent MHMediaType.
"""
def __init__(self, settings, push):
"""Initialize the MHTv class.
Required arguments:
- settings
Dict or MHSettings object.
- push
MHPush object.
"""
# Set ptype and call super
self.ptype = 'TV'
super(MHTv, self).__init__(settings, push)
# Run setup for video media types
self._video_settings()
# Set media type-specifc filebot db
self.cmd.db = 'thetvdb'
def _process_output(self, output, file_path):
"""Parses response from _media_info() query.
Returns good results and any skipped files.
Extends MHMediaType function to specifically parse TV Show
and episode information from Filebot output.
"""
info = super(MHTv, self)._process_output(output, file_path)
(added_files, skipped_files) = info
# Check for no new files
if not added_files:
return info
# Set destination path for query
dst_path = self.dst_path
if self.dst_path.endswith(os.path.sep):
dst_path = self.dst_path[:-1]
# Set search query
epath = escape(dst_path)
tv_find = r'{path}{s}(.*){s}(.*){s}.*\.S\d{{2,4}}E(\d{{2,3}})'.format(
path=epath, s=escape(os.path.sep))
logging.debug("Search query: %s", tv_find)
# See what TV files were added
new_added_files = []
for added_file in added_files:
# Extract info
ep_info = search(tv_find, added_file, IGNORECASE)
if ep_info is None:
continue
# Episode
ep_num = ep_info.group(3)
ep_num_fix = sub('^0', '', ep_num)
episode = "Episode %s" % ep_num_fix
# Set title
ep_title = "{0} ({1}, {2})".format(
ep_info.group(1), ep_info.group(2), episode)
# Append to new array
new_added_files.append(ep_title)
# Make sure we found episodes
if not new_added_files:
return self._match_error(', '.join(added_files))
return new_added_files, skipped_files
| {
"content_hash": "ba964c1c01a6af056aae34b4a8b6e857",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 26.403846153846153,
"alnum_prop": 0.5509832483612528,
"repo_name": "ErinMorelli/em-media-handler",
"id": "5eb22d5276baf5c465ad5b6ea615f96c9e21ffb1",
"size": "3457",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-linting",
"path": "mediahandler/types/tv.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "705"
},
{
"name": "Python",
"bytes": "218593"
}
],
"symlink_target": ""
} |
import mxnet as mx
def mlp_layer(input_layer, n_hidden, activation=None, BN=False):
"""
A MLP layer with activation layer and BN
:param input_layer: input sym
:param n_hidden: # of hidden neurons
:param activation: the activation function
:return: the symbol as output
"""
# get a FC layer
l = mx.sym.FullyConnected(data=input_layer, num_hidden=n_hidden)
# get activation, it can be relu, sigmoid, tanh, softrelu or none
if activation is not None:
l = mx.sym.Activation(data=l, act_type=activation)
if BN:
l = mx.sym.BatchNorm(l)
return l
def get_mlp_sym():
"""
:return: the mlp symbol
"""
data = mx.sym.Variable("data")
# Flatten the data from 4-D shape into 2-D (batch_size, num_channel*width*height)
data_f = mx.sym.flatten(data=data)
# Your Design
l = mlp_layer(input_layer=data_f, n_hidden=100, activation="relu", BN=True)
l = mlp_layer(input_layer=l, n_hidden=100, activation="relu", BN=True)
l = mlp_layer(input_layer=l, n_hidden=100, activation="relu", BN=True)
l = mlp_layer(input_layer=l, n_hidden=100, activation="relu", BN=True)
# MNIST has 10 classes
l = mx.sym.FullyConnected(data=l, num_hidden=10)
# Softmax with cross entropy loss
mlp = mx.sym.SoftmaxOutput(data=l, name='softmax')
return mlp
def conv_layer(data_in, filter_size, pooling_size, is_conv,is_pooling):
"""
:return: a single convolution layer symbol
"""
#input layer
layer = data_in
if is_conv:
input_layer = mx.sym.Convolution(data = layer,
kernel = (filter_size, filter_size),
num_filter = 64,
pad = (1, 1),
stride = (1, 1)
)
#batchnorm
batch_layer = mx.sym.BatchNorm(input_layer)
#Activation layer
layer = mx.sym.Activation(batch_layer,
act_type='relu',
)
#pooling layer
if is_pooling:
layer = mx.sym.Pooling(layer,
kernel=(pooling_size,pooling_size),
pool_type='max'
)
return layer
# Optional
def inception_layer(inputdata):
l1 = conv_layer(inputdata, 1, 1, True, False)
l21 = conv_layer(inputdata, 1, 1, True, False)
l2 = conv_layer(l21, 3, 2, True, False)
l31 = conv_layer(inputdata, 1, 1, True, False)
l3 = conv_layer(l31, 5, 2, True, False)
l41 = conv_layer(inputdata, 1, 3, False, True)
l4 = conv_layer(l41, 1, 1, True, False)
l = mx.sym.Concat(l1,l2,l3,l4)
return l
def get_conv_sym(n_layer):
"""
:return: symbol of a convolutional neural network
"""
data = mx.sym.Variable("data")
data_f = mx.sym.flatten(data=data)
layer = conv_layer(data_f,3,2,True,True)
for i in range(n_layer - 1):
layer = conv_layer(layer,3,2,True,True)
layer = mx.sym.Flatten(layer)
l = mx.sym.FullyConnected(layer,num_hidden = 10)
# Softmax
myl = mx.sym.SoftmaxOutput(data=l, name='softmax')
return myl
| {
"content_hash": "d2b7e346e8d97d1f46683d975228dc48",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 85,
"avg_line_length": 30.50467289719626,
"alnum_prop": 0.5612745098039216,
"repo_name": "amigao0502/AI",
"id": "4d479c9ded848acf5ce8ad8e471fb88fa141f26b",
"size": "3264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mxnet-week1-part2/mlp_sym.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "682154"
},
{
"name": "Python",
"bytes": "33668"
},
{
"name": "Shell",
"bytes": "388"
}
],
"symlink_target": ""
} |
from strands_executive_msgs.msg import Task
import rospy
import json
import requests
from calendar import timegm
from dateutil import parser
from dateutil import tz
from datetime import datetime
from datetime import timedelta
from strands_executive_msgs.srv import CreateTask
from pprint import pprint
from threading import Thread
PKG = 'gcal_routine'
def rostime_str(rt):
return str(datetime.fromtimestamp(rt.secs)) + ' ' + str(rt.secs)
class GCal:
def __init__(self, calendar, key, add_cb=None,
remove_cb=None, update_wait=60, minTimeDelta=None,
maxTimeDelta=None, file_name=None, time_critical=False):
self.tz_utc = tz.gettz('UTC')
if file_name is not None:
self.uri = file_name
else:
self.uri = self._get_url(calendar, key)
self.time_offset = rospy.Duration.from_sec(0)
rospy.loginfo('using uri %s', self.uri)
self.events = {}
self.gcal = {}
self.previous_events = {}
self.update_wait = update_wait
self.add_cb = add_cb
self.remove_cb = remove_cb
self.minTimeDelta = minTimeDelta
self.maxTimeDelta = maxTimeDelta
self.time_critical = time_critical
self.update_worker = Thread(target=self._update_run)
def start_worker(self):
self.update_worker.start()
def _get_url(self, calendar, key, max_results=2500):
return 'https://www.googleapis.com/calendar/v3/calendars/' \
'%s/events?key=%s&singleEvents=true&' \
'orderBy=startTime&maxResults=%d' % (calendar,
key, max_results)
def _update_run(self):
# make sure we can be killed here
while not rospy.is_shutdown():
added = []
removed = []
self.update(added, removed)
# sleep until next check
target = rospy.get_rostime()
target.secs = target.secs + self.update_wait
while rospy.get_rostime() < target and not rospy.is_shutdown():
rospy.sleep(1)
def shift_to_now(self):
times = [s.start_after for s in self.events.values()]
if len(times) < 1:
return
self.time_offset = min(times) - rospy.get_rostime()
rospy.logdebug('now is %s', rostime_str(rospy.get_rostime()))
for s in self.events.values():
s.start_after = s.start_after - self.time_offset
s.end_before = s.end_before - self.time_offset
rospy.logdebug('new event times for %s: %s -> %s',
s.action,
rostime_str(s.start_after),
rostime_str(s.end_before))
def update(self, added, removed):
self.previous_events = self.events.copy()
if self.uri.lower().startswith('http'):
try:
uri = self.uri
now = datetime.now()
if self.minTimeDelta is not None:
mt = now - timedelta(days=self.minTimeDelta)
uri = "%s&timeMin=%sZ" % (uri, mt.isoformat())
if self.maxTimeDelta is not None:
mt = now + timedelta(days=self.maxTimeDelta)
uri = "%s&timeMax=%sZ" % (uri, mt.isoformat())
rospy.loginfo('updating from google calendar %s', uri)
response = requests.get(uri)
self.gcal = json.loads(response.text)
except Exception, e:
rospy.logerr('failed to get response from %s: %s',
self.uri, str(e))
return
else:
g = open(self.uri, 'rb')
self.gcal = json.loads(g.read())
g.close()
self._to_task_list()
if self._find_changes(added, removed):
rospy.loginfo('changes in the calendar to process +%d -%d',
len(added), len(removed))
for a in added:
rospy.loginfo('instantiate %s' % a)
self.events[a] = self.task_from_gcal(self.events[a])
if self.add_cb is not None:
for a in added:
self.add_cb(a, self.events[a])
if self.remove_cb is not None:
for r in removed:
self.remove_cb(r, self.previous_events[r])
return True
else:
rospy.logdebug('no changes, keep watching')
return False
def get_task_list(self):
return self.events
def _find_changes(self, added=[], removed=[]):
"""
identifies the change set. Returns True when a change has been found
"""
new_ids = set(self.events.keys())
prev_ids = set(self.previous_events.keys())
additions = new_ids.difference(prev_ids)
deletions = prev_ids.difference(new_ids)
if len(additions) > 0 or len(deletions) > 0:
added.extend(additions)
removed.extend(deletions)
return True
else:
return False
def task_from_gcal(self, gcal_event):
start = parser.parse(gcal_event['start']['dateTime'])
start_utc = start.astimezone(self.tz_utc)
end = parser.parse(gcal_event['end']['dateTime'])
end_utc = end.astimezone(self.tz_utc)
action_name = gcal_event['summary']
factory_name = '/' + action_name + "_create"
try:
factory = rospy.ServiceProxy(factory_name, CreateTask)
# if 'description' in gcal_event:
# t = factory.call(gcal_event['description']).task
# else:
start_after = rospy.Time.from_sec(timegm(start_utc.timetuple())) \
- self.time_offset
end_before = rospy.Time.from_sec(timegm(end_utc.timetuple())) \
- self.time_offset
sa = "start_after: {secs: %d, nsecs: %d}" % \
(start_after.secs, start_after.nsecs)
eb = "end_before: {secs: %d, nsecs: %d}" % \
(end_before.secs, end_before.nsecs)
sn = "start_node_id: '%s'" % gcal_event['location']
en = "end_node_id: '%s'" % gcal_event['location']
if gcal_event.has_key('description'):
ds = "description: '%s'" % gcal_event['description']
else:
ds = "description: "
yaml = "{%s, %s, %s, %s, %s}" % (sa, eb, sn, en, ds)
rospy.loginfo("calling with pre-populated yaml: %s" % yaml)
t = factory.call(yaml).task
rospy.loginfo("got the task back: %s" % str(t))
except Exception as e:
rospy.logwarn("Couldn't instantiate task from factory %s."
"error: %s."
"Using default constructor." %
(factory_name, str(e)))
t = Task()
t.action = gcal_event['summary']
t.start_after = rospy.Time.from_sec(
timegm(start_utc.timetuple())) \
- self.time_offset
t.end_before = rospy.Time.from_sec(timegm(end_utc.timetuple())) \
- self.time_offset
if 'location' in gcal_event:
t.start_node_id = gcal_event['location']
if len(t.end_node_id) == 0:
t.end_node_id = gcal_event['location']
if t.max_duration.secs == 0:
t.max_duration = (t.end_before - t.start_after) / 2
# if it's a time critical task, then the new
# scheduler requires the task to have the same end
# time as start time, to indicate time "criticalness".
# Opportunistically, in this case we assume the
# max duration to be the event length in calendar.
if self.time_critical:
t.max_duration = t.end_before - t.start_after
t.max_duration.secs = t.max_duration.secs / 2
t.end_before = t.start_after
return t
def _to_task_list(self):
self.events = {}
for gcal_event in self.gcal['items']:
try:
k = gcal_event['id'] + gcal_event['updated']
self.events[k] = gcal_event
except Exception as e:
rospy.logerr('failed to convert event from iCal to task: %s',
str(e))
if __name__ == '__main__':
t = Task()
pprint(t)
| {
"content_hash": "ebde624b4e6f918e6537bb9411ee56de",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 78,
"avg_line_length": 38.862385321100916,
"alnum_prop": 0.5308073654390935,
"repo_name": "bfalacerda/strands_executive",
"id": "9365f49b8e3ed576e25df0cad86541e12c13b27e",
"size": "8494",
"binary": false,
"copies": "2",
"ref": "refs/heads/kinetic-devel",
"path": "gcal_routine/src/gcal_routine/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "27862"
},
{
"name": "Python",
"bytes": "436976"
},
{
"name": "Shell",
"bytes": "2693"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_storage_class import V1StorageClass
class TestV1StorageClass(unittest.TestCase):
""" V1StorageClass unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1StorageClass(self):
"""
Test V1StorageClass
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_storage_class.V1StorageClass()
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "b76d4fd26ed6a92cb019cdb95d1d54e2",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 105,
"avg_line_length": 22.095238095238095,
"alnum_prop": 0.6875,
"repo_name": "mbohlool/client-python",
"id": "ff46a61db1c874e1cf050167b0bebeed24bea5f9",
"size": "945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/test/test_v1_storage_class.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8417639"
},
{
"name": "Shell",
"bytes": "16830"
}
],
"symlink_target": ""
} |
import os
import re
PACKAGE_NAME_MESSAGE_TYPE_SEPARATOR = '/'
COMMENT_DELIMITER = '#'
CONSTANT_SEPARATOR = '='
ARRAY_UPPER_BOUND_TOKEN = '<='
STRING_UPPER_BOUND_TOKEN = '<='
SERVICE_REQUEST_RESPONSE_SEPARATOR = '---'
SERVICE_REQUEST_MESSAGE_SUFFIX = '_Request'
SERVICE_RESPONSE_MESSAGE_SUFFIX = '_Response'
PRIMITIVE_TYPES = [
'bool',
'byte',
'char',
# TODO reconsider wchar
'float32',
'float64',
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'int64',
'uint64',
'string',
# TODO reconsider wstring / u16string / u32string
# TODO duration and time
'duration', # for compatibility only
'time', # for compatibility only
]
VALID_PACKAGE_NAME_PATTERN = re.compile('^[a-z]([a-z0-9_]?[a-z0-9]+)*$')
VALID_FIELD_NAME_PATTERN = re.compile('^[a-z]([a-z0-9_]?[a-z0-9]+)*$')
# relaxed patterns used for compatibility with ROS 1 messages
# VALID_FIELD_NAME_PATTERN = re.compile('^[A-Za-z][A-Za-z0-9_]*$')
VALID_MESSAGE_NAME_PATTERN = re.compile('^[A-Z][A-Za-z0-9]*$')
# relaxed patterns used for compatibility with ROS 1 messages
# VALID_MESSAGE_NAME_PATTERN = re.compile('^[A-Za-z][A-Za-z0-9]*$')
VALID_CONSTANT_NAME_PATTERN = re.compile('^[A-Z]([A-Z0-9_]?[A-Z0-9]+)*$')
class InvalidSpecification(Exception):
pass
class InvalidServiceSpecification(InvalidSpecification):
pass
class InvalidResourceName(InvalidSpecification):
pass
class InvalidFieldDefinition(InvalidSpecification):
pass
class UnknownMessageType(InvalidSpecification):
pass
class InvalidValue(Exception):
def __init__(self, type_, value_string, message_suffix=None):
message = "value '%s' can not be converted to type '%s'" % \
(value_string, type_)
if message_suffix is not None:
message += ': %s' % message_suffix
super(InvalidValue, self).__init__(message)
def is_valid_package_name(name):
try:
m = VALID_PACKAGE_NAME_PATTERN.match(name)
except TypeError:
raise InvalidResourceName(name)
return m is not None and m.group(0) == name
def is_valid_field_name(name):
try:
m = VALID_FIELD_NAME_PATTERN.match(name)
except TypeError:
raise InvalidResourceName(name)
return m is not None and m.group(0) == name
def is_valid_message_name(name):
try:
prefix = 'Sample_'
if name.startswith(prefix):
name = name[len(prefix):]
for service_suffix in ['_Request', '_Response']:
if name.endswith(service_suffix):
name = name[:-len(service_suffix)]
break
m = VALID_MESSAGE_NAME_PATTERN.match(name)
except (AttributeError, TypeError):
raise InvalidResourceName(name)
return m is not None and m.group(0) == name
def is_valid_constant_name(name):
try:
m = VALID_CONSTANT_NAME_PATTERN.match(name)
except TypeError:
raise InvalidResourceName(name)
return m is not None and m.group(0) == name
class BaseType(object):
__slots__ = ['pkg_name', 'type', 'string_upper_bound']
def __init__(self, type_string, context_package_name=None):
# check for primitive types
if type_string in PRIMITIVE_TYPES:
self.pkg_name = None
self.type = type_string
self.string_upper_bound = None
elif type_string.startswith('string%s' % STRING_UPPER_BOUND_TOKEN):
self.pkg_name = None
self.type = 'string'
upper_bound_string = type_string[len(self.type) +
len(STRING_UPPER_BOUND_TOKEN):]
ex = TypeError(("the upper bound of the string type '%s' must " +
"be a valid integer value > 0") % type_string)
try:
self.string_upper_bound = int(upper_bound_string)
except ValueError:
raise ex
if self.string_upper_bound <= 0:
raise ex
else:
# split non-primitive type information
parts = type_string.split(PACKAGE_NAME_MESSAGE_TYPE_SEPARATOR)
if not (len(parts) == 2 or
(len(parts) == 1 and context_package_name is not None)):
raise InvalidResourceName(type_string)
if len(parts) == 2:
# either the type string contains the package name
self.pkg_name = parts[0]
self.type = parts[1]
else:
# or the package name is provided by context
self.pkg_name = context_package_name
self.type = type_string
if not is_valid_package_name(self.pkg_name):
raise InvalidResourceName(self.pkg_name)
if not is_valid_message_name(self.type):
raise InvalidResourceName(self.type)
self.string_upper_bound = None
def is_primitive_type(self):
return self.pkg_name is None
def __eq__(self, other):
if other is None or not isinstance(other, BaseType):
return False
return self.pkg_name == other.pkg_name and \
self.type == other.type and \
self.string_upper_bound == other.string_upper_bound
def __hash__(self):
return hash(str(self))
def __str__(self):
if self.pkg_name is not None:
return '%s/%s' % (self.pkg_name, self.type)
s = self.type
if self.string_upper_bound:
s += '%s%u' % \
(STRING_UPPER_BOUND_TOKEN, self.string_upper_bound)
return s
class Type(BaseType):
__slots__ = ['is_array', 'array_size', 'is_upper_bound']
def __init__(self, type_string, context_package_name=None):
# check for array brackets
self.is_array = type_string[-1] == ']'
self.array_size = None
self.is_upper_bound = False
if self.is_array:
try:
index = type_string.rindex('[')
except ValueError:
raise TypeError("the type ends with ']' but does not " +
"contain a '['" % type_string)
array_size_string = type_string[index + 1:-1]
# get array limit
if array_size_string != '':
# check if the limit is an upper bound
self.is_upper_bound = array_size_string.startswith(
ARRAY_UPPER_BOUND_TOKEN)
if self.is_upper_bound:
array_size_string = array_size_string[
len(ARRAY_UPPER_BOUND_TOKEN):]
ex = TypeError((
"the size of array type '%s' must be a valid integer " +
"value > 0 optionally prefixed with '%s' if it is only " +
"an upper bound") %
(ARRAY_UPPER_BOUND_TOKEN, type_string))
try:
self.array_size = int(array_size_string)
except ValueError:
raise ex
# check valid range
if self.array_size <= 0:
raise ex
type_string = type_string[:index]
super(Type, self).__init__(
type_string,
context_package_name=context_package_name)
def is_dynamic_array(self):
return self.is_array and (not self.array_size or self.is_upper_bound)
def is_fixed_size_array(self):
return self.is_array and self.array_size and not self.is_upper_bound
def __eq__(self, other):
if other is None or not isinstance(other, Type):
return False
return super(Type, self).__eq__(other) and \
self.is_array == other.is_array and \
self.array_size == other.array_size and \
self.is_upper_bound == other.is_upper_bound
def __hash__(self):
return hash(str(self))
def __str__(self):
s = super(Type, self).__str__()
if self.is_array:
s += '['
if self.is_upper_bound:
s += ARRAY_UPPER_BOUND_TOKEN
if self.array_size is not None:
s += '%u' % self.array_size
s += ']'
return s
class Constant(object):
__slots__ = ['type', 'name', 'value']
def __init__(self, primitive_type, name, value_string):
if primitive_type not in PRIMITIVE_TYPES:
raise TypeError("the constant type '%s' must be a primitive type" %
primitive_type)
self.type = primitive_type
if not is_valid_constant_name(name):
raise NameError("the constant name '%s' is not valid" % name)
self.name = name
if value_string is None:
raise ValueError("the constant value must not be 'None'")
self.value = parse_primitive_value_string(
Type(primitive_type), value_string)
def __eq__(self, other):
if other is None or not isinstance(other, Constant):
return False
return self.type == other.type and \
self.name == other.name and \
self.value == other.value
def __str__(self):
value = self.value
if self.type == 'string':
value = "'%s'" % value
return '%s %s=%s' % (self.type, self.name, value)
class Field(object):
def __init__(self, type_, name, default_value_string=None):
if not isinstance(type_, Type):
raise TypeError(
"the field type '%s' must be a 'Type' instance" % type_)
self.type = type_
if not is_valid_field_name(name):
raise NameError("the field name '%s' is not valid" % name)
self.name = name
if default_value_string is None:
self.default_value = None
else:
self.default_value = parse_value_string(
type_, default_value_string)
def __eq__(self, other):
if other is None or not isinstance(other, Field):
return False
else:
return self.type == other.type and \
self.name == other.name and \
self.default_value == other.default_value
def __str__(self):
s = '%s %s' % (str(self.type), self.name)
if self.default_value is not None:
if self.type.is_primitive_type() and not self.type.is_array and \
self.type.type == 'string':
s += " '%s'" % self.default_value
else:
s += ' %s' % self.default_value
return s
class MessageSpecification(object):
def __init__(self, pkg_name, msg_name, fields, constants):
self.base_type = BaseType(
pkg_name + PACKAGE_NAME_MESSAGE_TYPE_SEPARATOR + msg_name)
self.msg_name = msg_name
self.fields = []
for index, field in enumerate(fields):
if not isinstance(field, Field):
raise TypeError("field %u must be a 'Field' instance" % index)
self.fields.append(field)
# ensure that there are no duplicate field names
field_names = [f.name for f in self.fields]
duplicate_field_names = set([n for n in field_names
if field_names.count(n) > 1])
if duplicate_field_names:
raise ValueError(
'the fields iterable contains duplicate names: %s' %
', '.join(sorted(duplicate_field_names)))
self.constants = []
for index, constant in enumerate(constants):
if not isinstance(constant, Constant):
raise TypeError("constant %u must be a 'Constant' instance" %
index)
self.constants.append(constant)
# ensure that there are no duplicate constant names
constant_names = [c.name for c in self.constants]
duplicate_constant_names = set([n for n in constant_names
if constant_names.count(n) > 1])
if duplicate_constant_names:
raise ValueError(
'the constants iterable contains duplicate names: %s' %
', '.join(sorted(duplicate_constant_names)))
def __eq__(self, other):
if not other or not isinstance(other, MessageSpecification):
return False
return self.base_type == other.base_type and \
len(self.fields) == len(other.fields) and \
self.fields == other.fields and \
len(self.constants) == len(other.constants) and \
self.constants == other.constants
def parse_message_file(pkg_name, interface_filename):
basename = os.path.basename(interface_filename)
msg_name = os.path.splitext(basename)[0]
with open(interface_filename, 'r') as h:
return parse_message_string(
pkg_name, msg_name, h.read())
def parse_message_string(pkg_name, msg_name, message_string):
fields = []
constants = []
lines = message_string.splitlines()
for line in lines:
# ignore whitespaces and comments
line = line.strip()
if not line:
continue
index = line.find(COMMENT_DELIMITER)
if index == 0:
continue
if index != -1:
line = line[:index]
line = line.rstrip()
type_string, _, rest = line.partition(' ')
rest = rest.lstrip()
if not rest:
print("Error with:", pkg_name, msg_name)
raise InvalidFieldDefinition(line)
index = rest.find(CONSTANT_SEPARATOR)
if index == -1:
# line contains a field
field_name, _, default_value_string = rest.partition(' ')
default_value_string = default_value_string.lstrip()
if not default_value_string:
default_value_string = None
try:
fields.append(Field(
Type(type_string, context_package_name=pkg_name),
field_name, default_value_string))
except Exception as err:
print("Error processing '{line}' of '{pkg}/{msg}': '{err}'".format(
line=line, pkg=pkg_name, msg=msg_name, err=err,
))
raise
else:
# line contains a constant
name, _, value = rest.partition(CONSTANT_SEPARATOR)
name = name.rstrip()
value = value.lstrip()
constants.append(Constant(type_string, name, value))
return MessageSpecification(pkg_name, msg_name, fields, constants)
def parse_value_string(type_, value_string):
if type_.is_primitive_type() and not type_.is_array:
return parse_primitive_value_string(type_, value_string)
# TODO(mikaelarguedas) change this condition once string escape function is implemented
if type_.is_primitive_type() and type_.is_array and type_.type != 'string':
# check for array brackets
if not value_string.startswith('[') or not value_string.endswith(']'):
raise InvalidValue(
type_, value_string,
"array value must start with '[' and end with ']'")
elements_string = value_string[1:-1]
# split on separator and check size constraints
value_strings = elements_string.split(',') if elements_string else []
if type_.array_size:
# check for exact size
if not type_.is_upper_bound and \
len(value_strings) != type_.array_size:
raise InvalidValue(
type_, value_string,
'array must have exactly %u elements, not %u' %
(type_.array_size, len(value_strings)))
# check for upper bound
if type_.is_upper_bound and len(value_strings) > type_.array_size:
raise InvalidValue(
type_, value_string,
'array must have not more than %u elements, not %u' %
(type_.array_size, len(value_strings)))
# parse all primitive values one by one
values = []
for index, element_string in enumerate(value_strings):
element_string = element_string.strip()
try:
base_type = Type(BaseType.__str__(type_))
value = parse_primitive_value_string(base_type, element_string)
except InvalidValue as e:
raise InvalidValue(
type_, value_string, 'element %u with %s' % (index, e))
values.append(value)
return values
raise NotImplementedError(
"parsing string values into type '%s' is not supported" % type_)
def parse_primitive_value_string(type_, value_string):
if not type_.is_primitive_type() or type_.is_array:
raise ValueError('the passed type must be a non-array primitive type')
primitive_type = type_.type
if primitive_type == 'bool':
true_values = ['true', '1']
false_values = ['false', '0']
if value_string.lower() not in (true_values + false_values):
raise InvalidValue(
primitive_type, value_string,
"must be either 'true' / '1' or 'false' / '0'")
return value_string.lower() in true_values
if primitive_type == 'byte':
# same as uint8
ex = InvalidValue(primitive_type, value_string,
'must be a valid integer value >= 0 and <= 255')
try:
value = int(value_string)
except ValueError:
raise ex
if value < 0 or value > 255:
raise ex
return value
if primitive_type == 'char':
# same as int8
ex = InvalidValue(primitive_type, value_string,
'must be a valid integer value >= -128 and <= 127')
try:
value = int(value_string)
except ValueError:
raise ex
if value < -128 or value > 127:
raise ex
return value
if primitive_type in ['float32', 'float64']:
try:
return float(value_string)
except ValueError:
raise InvalidValue(
primitive_type, value_string,
"must be a floating point number using '.' as the separator")
if primitive_type in [
'int8', 'uint8',
'int16', 'uint16',
'int32', 'uint32',
'int64', 'uint64',
]:
# determine lower and upper bound
is_unsigned = primitive_type.startswith('u')
bits = int(primitive_type[4 if is_unsigned else 3:])
lower_bound = 0 if is_unsigned else -(2 ** (bits - 1))
upper_bound = (2 ** (bits if is_unsigned else (bits - 1))) - 1
ex = InvalidValue(primitive_type, value_string,
'must be a valid integer value >= %d and <= %u' %
(lower_bound, upper_bound))
try:
value = int(value_string)
except ValueError:
raise ex
# check that value is in valid range
if value < lower_bound or value > upper_bound:
raise ex
return value
if primitive_type == 'string':
# remove outer quotes to allow leading / trailing spaces in the string
for quote in ['"', "'"]:
if value_string.startswith(quote) and value_string.endswith(quote):
value_string = value_string[1:-1]
break
# check that value is in valid range
if type_.string_upper_bound and \
len(value_string) > type_.string_upper_bound:
base_type = Type(BaseType.__str__(type_))
raise InvalidValue(
base_type, value_string,
'string must not exceed the maximum length of %u characters' %
type_.string_upper_bound)
return value_string
assert False, "unknown primitive type '%s'" % primitive_type
def validate_field_types(spec, known_msg_types):
if isinstance(spec, MessageSpecification):
spec_type = 'Message'
fields = spec.fields
elif isinstance(spec, ServiceSpecification):
spec_type = 'Service'
fields = spec.request.fields + spec.response.fields
else:
assert False, 'Unknown specification type: %s' % type(spec)
for field in fields:
if field.type.is_primitive_type():
continue
base_type = BaseType(BaseType.__str__(field.type))
if base_type not in known_msg_types:
raise UnknownMessageType(
"%s interface '%s' contains an unknown field type: %s" %
(spec_type, spec.base_type, field))
class ServiceSpecification(object):
def __init__(self, pkg_name, srv_name, request_message, response_message):
self.pkg_name = pkg_name
self.srv_name = srv_name
self.request = request_message
self.response = response_message
def parse_service_file(pkg_name, interface_filename):
basename = os.path.basename(interface_filename)
srv_name = os.path.splitext(basename)[0]
with open(interface_filename, 'r') as h:
return parse_service_string(
pkg_name, srv_name, h.read())
def parse_service_string(pkg_name, srv_name, message_string):
lines = message_string.splitlines()
separator_indices = [
index for index, l in enumerate(lines) if l == SERVICE_REQUEST_RESPONSE_SEPARATOR]
if not separator_indices:
raise InvalidServiceSpecification(
"Could not find separator '%s' between request and response" %
SERVICE_REQUEST_RESPONSE_SEPARATOR)
if len(separator_indices) != 1:
raise InvalidServiceSpecification(
"Could not find unique separator '%s' between request and response" %
SERVICE_REQUEST_RESPONSE_SEPARATOR)
request_message_string = '\n'.join(lines[:separator_indices[0]])
request_message = parse_message_string(
pkg_name, srv_name + SERVICE_REQUEST_MESSAGE_SUFFIX, request_message_string)
response_message_string = '\n'.join(lines[separator_indices[0] + 1:])
response_message = parse_message_string(
pkg_name, srv_name + SERVICE_RESPONSE_MESSAGE_SUFFIX, response_message_string)
return ServiceSpecification(pkg_name, srv_name, request_message, response_message)
| {
"content_hash": "e99ac17f0bd44b52b127515cc9bc1d65",
"timestamp": "",
"source": "github",
"line_count": 628,
"max_line_length": 91,
"avg_line_length": 35.68789808917197,
"alnum_prop": 0.5655006246653579,
"repo_name": "esteve/rosidl",
"id": "47a27656358bb49333dc0a0824b8c07d70e8f43b",
"size": "23019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rosidl_parser/rosidl_parser/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "65487"
},
{
"name": "C++",
"bytes": "33967"
},
{
"name": "CMake",
"bytes": "77563"
},
{
"name": "Python",
"bytes": "99394"
}
],
"symlink_target": ""
} |
"""Windows UserAssist information collector."""
import codecs
import logging
from winregrc import data_format
from winregrc import errors
from winregrc import interface
class UserAssistEntry(object):
"""UserAssist entry.
Attributes:
guid (str): GUID.
name (str): name.
value_name (str): name of the Windows Registry value.
"""
def __init__(self, guid=None, name=None, value_name=None):
"""Initializes an UserAssist entry.
Args:
guid (Optional[str]): GUID.
name (Optional[str]): name.
value_name (Optional[str]): name of the Windows Registry value.
"""
super(UserAssistEntry, self).__init__()
self.guid = guid
self.name = name
self.value_name = value_name
class UserAssistDataParser(data_format.BinaryDataFormat):
"""UserAssist data parser."""
_DEFINITION_FILE = 'userassist.yaml'
# pylint: disable=missing-type-doc
def _DebugPrintEntry(self, format_version, user_assist_entry):
"""Prints UserAssist entry value debug information.
Args:
format_version (int): format version.
user_assist_entry (user_assist_entry_v3|user_assist_entry_v5):
UserAssist entry.
"""
self._DebugPrintValue('Unknown1', f'0x{user_assist_entry.unknown1:08x}')
self._DebugPrintDecimalValue(
'Number of executions', user_assist_entry.number_of_executions)
if format_version == 5:
self._DebugPrintDecimalValue(
'Application focus count',
user_assist_entry.application_focus_count)
self._DebugPrintDecimalValue(
'Application focus duration',
user_assist_entry.application_focus_duration)
self._DebugPrintValue('Unknown2', f'{user_assist_entry.unknown2:.2f}')
self._DebugPrintValue('Unknown3', f'{user_assist_entry.unknown3:.2f}')
self._DebugPrintValue('Unknown4', f'{user_assist_entry.unknown4:.2f}')
self._DebugPrintValue('Unknown5', f'{user_assist_entry.unknown5:.2f}')
self._DebugPrintValue('Unknown6', f'{user_assist_entry.unknown6:.2f}')
self._DebugPrintValue('Unknown7', f'{user_assist_entry.unknown7:.2f}')
self._DebugPrintValue('Unknown8', f'{user_assist_entry.unknown8:.2f}')
self._DebugPrintValue('Unknown9', f'{user_assist_entry.unknown9:.2f}')
self._DebugPrintValue('Unknown10', f'{user_assist_entry.unknown10:.2f}')
self._DebugPrintValue('Unknown11', f'{user_assist_entry.unknown11:.2f}')
self._DebugPrintValue('Unknown12', f'0x{user_assist_entry.unknown12:08x}')
self._DebugPrintFiletimeValue(
'Last execution time', user_assist_entry.last_execution_time)
if format_version == 5:
self._DebugPrintValue('Unknown13', '0x{user_assist_entry.unknown13:08x}')
self._DebugPrintText('\n')
# pylint: disable=missing-return-type-doc
def ParseEntry(self, format_version, entry_data):
"""Parses an UserAssist entry.
Args:
format_version (int): format version.
entry_data (bytes): entry data.
Returns:
user_assist_entry_v3|user_assist_entry_v5: UserAssist entry.
Raises:
ParseError: if the value data could not be parsed.
"""
if format_version == 3:
data_type_map = self._GetDataTypeMap('user_assist_entry_v3')
expected_entry_data_size = 16
elif format_version == 5:
data_type_map = self._GetDataTypeMap('user_assist_entry_v5')
expected_entry_data_size = 72
if expected_entry_data_size != len(entry_data):
entry_data_size = len(entry_data)
raise errors.ParseError((
f'Version: {format_version:d} size mismatch (calculated: '
f'{expected_entry_data_size:d}, stored: {entry_data_size:d}).'))
try:
user_assist_entry = self._ReadStructureFromByteStream(
entry_data, 0, data_type_map, 'UserAssist entry')
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
f'Unable to parse UserAssist entry value with error: {exception!s}')
if self._debug:
self._DebugPrintEntry(format_version, user_assist_entry)
return user_assist_entry
class UserAssistCollector(interface.WindowsRegistryKeyCollector):
"""Windows UserAssist information collector.
Returns:
user_assist_entries (list[UserAssistEntry]): UserAssist entries.
"""
_USER_ASSIST_KEY = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\UserAssist')
def __init__(self, debug=False, output_writer=None):
"""Initializes a Windows UserAssist information collector.
Args:
debug (Optional[bool]): True if debug information should be printed.
output_writer (Optional[OutputWriter]): output writer.
"""
super(UserAssistCollector, self).__init__(debug=debug)
self._output_writer = output_writer
self._parser = UserAssistDataParser(
debug=debug, output_writer=output_writer)
self.user_assist_entries = []
def _CollectUserAssistFromKey(self, guid_subkey):
"""Collects the UserAssist information from a GUID sub key.
Args:
guid_subkey (dfwinreg.WinRegistryKey): UserAssist GUID Registry key.
"""
version_value = guid_subkey.GetValueByName('Version')
if not version_value:
logging.warning(f'Missing Version value in sub key: {guid_subkey.name:s}')
return
format_version = version_value.GetDataAsObject()
if self._debug:
self._output_writer.WriteValue('GUID', guid_subkey.name)
self._output_writer.WriteIntegerValueAsDecimal(
'Format version', format_version)
self._output_writer.WriteText('\n')
count_subkey = guid_subkey.GetSubkeyByName('Count')
for value in count_subkey.GetValues():
if self._debug:
self._output_writer.WriteValue('Original name', value.name)
try:
# Note that Python 2 codecs.decode() does not support keyword arguments
# such as encodings='rot-13'.
value_name = codecs.decode(value.name, 'rot-13')
except UnicodeEncodeError:
characters = []
for character in value.name:
if ord(character) < 128:
try:
character = codecs.decode(character, 'rot-13')
characters.append(character)
except UnicodeEncodeError:
characters.append(character)
else:
characters.append(character)
value_name = ''.join(characters)
if self._debug:
self._output_writer.WriteValue('Converted name', value_name)
self._output_writer.WriteDebugData('Value data:', value.data)
if value_name != 'UEME_CTLSESSION':
user_assist_entry = self._parser.ParseEntry(format_version, value.data)
user_assist_entry = UserAssistEntry(
guid=guid_subkey.name, name=value_name, value_name=value.name)
self.user_assist_entries.append(user_assist_entry)
def Collect(self, registry): # pylint: disable=arguments-differ
"""Collects the UserAssist information.
Args:
registry (dfwinreg.WinRegistry): Windows Registry.
Returns:
bool: True if the UserAssist key was found, False if not.
"""
user_assist_key = registry.GetKeyByPath(self._USER_ASSIST_KEY)
if not user_assist_key:
return False
for guid_subkey in user_assist_key.GetSubkeys():
self._CollectUserAssistFromKey(guid_subkey)
return True
| {
"content_hash": "d03704435f80ecdaa38d2d73df279d91",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 80,
"avg_line_length": 34.041666666666664,
"alnum_prop": 0.6716986264109888,
"repo_name": "libyal/winreg-kb",
"id": "bba66b74bdf36fdad84459584593765882f3bd5f",
"size": "7377",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "winregrc/userassist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1074"
},
{
"name": "Python",
"bytes": "403051"
},
{
"name": "Shell",
"bytes": "1186"
}
],
"symlink_target": ""
} |
"""Contains the logic for `aq update city`."""
from sqlalchemy.orm import with_polymorphic
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import (City, Campus, HardwareEntity, Machine,
NetworkDevice, Cluster, Network)
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.processes import DSDBRunner
from aquilon.worker.dbwrappers.location import update_location
from aquilon.worker.dbwrappers.change_management import ChangeManagement
class CommandUpdateCity(BrokerCommand):
requires_plenaries = True
required_parameters = ["city"]
def render(self, session, logger, plenaries, city, timezone, fullname, campus,
default_dns_domain, comments, user, justification, reason, **arguments):
dbcity = City.get_unique(session, city, compel=True)
plenaries.add(dbcity)
if timezone is not None:
dbcity.timezone = timezone
update_location(dbcity, default_dns_domain=default_dns_domain,
fullname=fullname, comments=comments)
prev_campus = None
dsdb_runner = None
dsdb_runner = DSDBRunner(logger=logger)
if campus is not None:
dbcampus = Campus.get_unique(session, campus, compel=True)
HWS = with_polymorphic(HardwareEntity, [Machine, NetworkDevice])
q = session.query(HWS)
# HW types which have plenary templates
q = q.filter(HWS.hardware_type.in_(['machine', 'network_device']))
q = q.filter(HWS.location_id.in_(dbcity.offspring_ids()))
# This one would change the template's locations hence forbidden
# FIXME: allow the change if there are no machines affected
if dbcampus.hub != dbcity.hub:
# Doing this both to reduce user error and to limit
# testing required.
raise ArgumentError("Cannot change campus. {0} is in {1:l}, "
"while {2:l} is in {3:l}.".format(
dbcampus, dbcampus.hub,
dbcity, dbcity.hub))
# Validate ChangeManagement
cm = ChangeManagement(session, user, justification, reason, logger, self.command, **arguments)
cm.consider(q)
plenaries.add(q)
q = session.query(Cluster)
q = q.filter(Cluster.location_constraint_id.in_(dbcity.offspring_ids()))
# Validate ChangeManagement
# TO DO Either modify validate_prod_cluster method to accept queryset
# or convert to a list in validate method
cm.consider(q.all())
plenaries.add(q)
q = session.query(Network)
q = q.filter(Network.location_id.in_(dbcity.offspring_ids()))
# Validate ChangeManagement
cm.consider(q)
plenaries.add(q)
cm.validate()
if dbcity.campus:
prev_campus = dbcity.campus
dbcity.update_parent(parent=dbcampus)
session.flush()
if campus is not None:
if prev_campus:
prev_name = prev_campus.name
else:
prev_name = None
dsdb_runner.update_city(city, dbcampus.name, prev_name)
with plenaries.transaction(verbose=True):
dsdb_runner.commit_or_rollback()
return
| {
"content_hash": "b24890504f9906cef85fc5433e3c3441",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 106,
"avg_line_length": 38.522222222222226,
"alnum_prop": 0.5985001442169022,
"repo_name": "quattor/aquilon",
"id": "37311bbc402840fb2ce14b7657fcc04a808dee44",
"size": "4195",
"binary": false,
"copies": "1",
"ref": "refs/heads/upstream",
"path": "lib/aquilon/worker/commands/update_city.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "1823"
},
{
"name": "Makefile",
"bytes": "5732"
},
{
"name": "Mako",
"bytes": "4178"
},
{
"name": "PLSQL",
"bytes": "102109"
},
{
"name": "PLpgSQL",
"bytes": "8091"
},
{
"name": "Pan",
"bytes": "1058"
},
{
"name": "Perl",
"bytes": "6057"
},
{
"name": "Python",
"bytes": "5884984"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "33547"
},
{
"name": "Smarty",
"bytes": "4603"
}
],
"symlink_target": ""
} |
import MySQLdb
import MySQLdb.cursors
import json
import datetime
import requests
import time
''' PUSH '''
DOMAIN='YOURDOMAIN.opendatasoft.com'
PUSH_OR_API_LEY='YOUR API KEY HERE'
push_base_url = 'http://%s/api/push/1.0/am/realtime/push/?pushkey=%s'%(DOMAIN,PUSH_OR_API_LEY)
# A specific datetime encoder for json.dumps
class DTEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime.datetime):
return str(o)
if isinstance(o, datetime.date):
return str(o)
return super(DTEncoder, self).default(o)
fields = None
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="", # your password
db="YOURDB", # name of the data base
cursorclass=MySQLdb.cursors.SSCursor) # Streamed cursor
cur = db.cursor()
cur.execute("select * from X LEFT JOIN Y ON X.id = Y.id where X.zip = \"0\"")
cpt_total = 0
doc_list = list() # Push buffer to increase push speed and optimize processing
cpt = 0 # Push buffer counter
now = datetime.datetime.now()
fields = [i[0] for i in cur.description]
for row in cur:
# build the dict / record
record = dict()
for i, e in enumerate(fields):
record[e] = row[i]
# append to the buffer list
doc_list.append(record)
cpt += 1
cpt_total += 1
# every 100 -> Push !
if (cpt >= 100):
ret = requests.post(push_base_url, data=json.dumps(doc_list, cls=DTEncoder))
ret.raise_for_status()
cpt = 0
doc_list = list()
delta = datetime.datetime.now() - now
print "[Push status] %d documents pushed in %.2f minutes"%(cpt_total, delta.total_seconds() / 60)
if cpt != 0:
ret = requests.post(push_base_url, data=json.dumps(doc_list, cls=DTEncoder))
ret.raise_for_status()
delta = datetime.datetime.now() - now
print "*** END ***"
print "[Total time] %d documents pushed in %.2f minutes"%(cpt_total, delta.total_seconds() / 60)
db.close()
| {
"content_hash": "eb24c803b5a4a42a8b902278832290ae",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 105,
"avg_line_length": 30.176470588235293,
"alnum_prop": 0.6276803118908382,
"repo_name": "opendatasoft/ods-cookbook",
"id": "2044cb0d8679e0f4ef9f8b3d5ba5f82b2fc66d58",
"size": "2070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "push-api/mysql.to.ods/mysql_to_rt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5309"
},
{
"name": "HTML",
"bytes": "54045"
},
{
"name": "JavaScript",
"bytes": "2642203"
},
{
"name": "Python",
"bytes": "18201"
}
],
"symlink_target": ""
} |
"""
Jumping rules for MCMC
======================
This module contains classes that implement jumping rules for MCMC.
"""
import numpy as np
class RandomWalk:
"""
Random walk jumping function for MCMC.
"""
def __init__(self, covmat):
"""
Create a random walker with given covariance.
Args:
covmat: A square, 2D numpy.array that should be used as covariance
matrix for the random walk.
"""
try:
self.covmat = np.asarray(covmat)
# covmat must be a matrix.
if not self.covmat.ndim == 2:
raise Exception("Covariance matrix must be 2 dimensional.")
# covmat must be square.
self.n = covmat.shape[0]
if not self.n == covmat.shape[1]:
raise Exception("Covariance matrix must be square.")
except:
raise Exception("covmat argument must be convertible to a numpy array.")
def step(self, x):
"""
Generates a new step x_new from the current position x.
Args:
x: The current position of the random walker.
"""
return np.random.multivariate_normal(x.ravel(), self.covmat).reshape(x.shape)
def update(self, hist):
"""
Update covariance from a sequence of samples.
This computes the covariance matrix of a given sequence of samples,
scales it by 2.4/sqrt(n))^2 and sets it to the covariance matrix
to be used for the random walk.
Args:
hist: 2D numpy array with shape (m,n) where m is number of steps in
the sequence and n is the number of dimensions of the parameter
space.
"""
if not hist.shape[1] == self.n:
raise Exception("Provided array does not have the expected dimensions.")
mean = np.mean(hist, axis=0, keepdims=True)
d = hist - mean
s = np.dot(np.transpose(d), d) / hist.shape[0]
self.covmat = (2.4 / np.sqrt(self.n)) ** 2 * s
| {
"content_hash": "2513655e1ea614049d3062383e3cb001",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 85,
"avg_line_length": 31.921875,
"alnum_prop": 0.574645129711209,
"repo_name": "atmtools/typhon",
"id": "d8dcba93f950bc0a2b2d52d5b32a4507d670cecd",
"size": "2043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "typhon/retrieval/mcmc/jumping_rules.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1368660"
},
{
"name": "Shell",
"bytes": "203"
},
{
"name": "TeX",
"bytes": "315"
}
],
"symlink_target": ""
} |
"""
Django settings for todolist project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9zl%^5l1vj=1yg^g#iju6j&_@sb4z#+@r)3_5$jfg)cmt(8v4@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
# put contenttypes above auth so that python manage.py flush works
# correctly: https://code.djangoproject.com/ticket/9207
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'rest_framework_swagger',
'todolistapp',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todolist.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todolist.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
#Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'todolistapp': {
'handlers': ['console'],
'level': 'DEBUG',
}
},
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# for details regarding media vs static files:
# http://timmyomahony.com/blog/static-vs-media-and-root-vs-path-in-django/
# STATIC_ROOT is the absolute path to the folder within which static files will
# be collected by the staticfiles application
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
# STATIC_URL is the relative browser URL to be used when accessing static files
# from the browser
STATIC_URL = '/static/'
# MEDIA_ROOT is the absolute path to the folder that will hold user uploads
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# MEDIA_URL is the relative browser URL to be used when accessing media files
# from the browser
MEDIA_URL='/media/'
# NOTE: In production, change this to memcached
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'TIMEOUT': 300, # 5 minutes
'OPTIONS': {
'MAX_ENTRIES': 10000
}
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
#'todolistapp.auth.pkiauth.PkiAuthentication'
'rest_framework.authentication.BasicAuthentication'
#'rest_framework.authentication.SessionAuthentication',
, ),
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
),
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.MultiPartParser',
'rest_framework.parsers.FormParser',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework.renderers.StaticHTMLRenderer',
)
}
#Used to store application settings
TODOLIST = {
}
| {
"content_hash": "c57ff5da38b922b575530c5cdd8c5aea",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 91,
"avg_line_length": 28.65024630541872,
"alnum_prop": 0.672971114167813,
"repo_name": "mannyrivera2010/todolist-py3-drf",
"id": "c56bc4a8ffea219030e6d6080846f38556a4f151",
"size": "5816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todolist/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16734"
},
{
"name": "Shell",
"bytes": "1146"
}
],
"symlink_target": ""
} |
"""
.. module:: models
"""
import logging
import os
import uuid
from django.contrib.auth.models import User
from django.db import models
from django.db.models import F
from django.db.models import Q
from django.utils import timezone
logger = logging.getLogger('volontulo.models')
def upload_to_offers(_, filename):
"""
Upload to offers path.
This needs to be a full-body func because
migrations requires it to be serializable.
"""
_, file_extension = os.path.splitext(filename)
return os.path.join(
'offers',
'{}{}'.format(uuid.uuid4(), file_extension),
)
class Organization(models.Model):
"""Model that handles ogranizations/institutions."""
name = models.CharField(max_length=150, db_index=True)
address = models.CharField(max_length=150)
description = models.TextField()
def __str__(self):
"""Organization model string reprezentation."""
return self.name
class OffersManager(models.Manager):
"""Offers Manager."""
def get_active(self):
"""Return active offers."""
now = timezone.now()
return self.filter(
# that covers action_status__in=('ongoing', 'future'):
Q(finished_at__isnull=True) | Q(finished_at__gte=now),
# that covers recruitment_status__in=('ongoing', 'future'):
Q(recruitment_end_date__isnull=True) |
Q(recruitment_end_date__gte=now),
offer_status='published',
).all()
def get_for_administrator(self):
"""Return all offers for administrator to allow management."""
return self.all()
def get_weightened(self):
"""Return all published offers ordered by weight."""
return self.filter(offer_status='published').order_by('weight')
class Offer(models.Model):
"""Offer model."""
OFFER_STATUSES = (
('unpublished', 'Unpublished'),
('published', 'Published'),
('rejected', 'Rejected'),
)
objects = OffersManager()
organization = models.ForeignKey(Organization)
volunteers = models.ManyToManyField(User)
description = models.TextField()
requirements = models.TextField(blank=True, default='')
time_commitment = models.TextField()
benefits = models.TextField()
location = models.CharField(max_length=150)
title = models.CharField(max_length=150)
started_at = models.DateTimeField(blank=True, null=True)
finished_at = models.DateTimeField(blank=True, null=True)
time_period = models.CharField(max_length=150, default='', blank=True)
offer_status = models.CharField(
max_length=16,
choices=OFFER_STATUSES,
default='unpublished',
)
votes = models.BooleanField(default=0)
recruitment_start_date = models.DateTimeField(blank=True, null=True)
recruitment_end_date = models.DateTimeField(blank=True, null=True)
reserve_recruitment = models.BooleanField(blank=True, default=True)
reserve_recruitment_start_date = models.DateTimeField(
blank=True,
null=True
)
reserve_recruitment_end_date = models.DateTimeField(
blank=True,
null=True
)
action_ongoing = models.BooleanField(default=False, blank=True)
constant_coop = models.BooleanField(default=False, blank=True)
action_start_date = models.DateTimeField(blank=True, null=True)
action_end_date = models.DateTimeField(blank=True, null=True)
volunteers_limit = models.IntegerField(default=0, null=True, blank=True)
reserve_volunteers_limit = models.IntegerField(
default=0, null=True, blank=True)
weight = models.IntegerField(default=0, null=True, blank=True)
image = models.ImageField(
upload_to=upload_to_offers,
null=True,
blank=True
)
def __str__(self):
"""Offer string representation."""
return self.title
def create_new(self):
"""Set status while creating new offer."""
self.offer_status = 'unpublished'
@property
def action_status(self):
"""Determine action status by offer dates."""
now = timezone.now()
if self.started_at and self.started_at > now:
return 'future'
if self.finished_at and self.finished_at < now:
return 'finished'
return 'ongoing'
@property
def recruitment_status(self):
"""Determine recruitment status by recruitment dates."""
now = timezone.now()
if self.recruitment_start_date and self.recruitment_start_date > now:
return 'future'
if self.recruitment_end_date and self.recruitment_end_date < now:
return 'finished'
return 'ongoing'
def publish(self):
"""Publish offer."""
self.offer_status = 'published'
Offer.objects.all().update(weight=F('weight') + 1)
self.weight = 0
self.save()
return self
class UserProfile(models.Model):
"""Model that handles users profiles."""
user = models.OneToOneField(User)
organizations = models.ManyToManyField(
Organization,
related_name='userprofiles',
)
is_administrator = models.BooleanField(default=False, blank=True)
phone_no = models.CharField(
max_length=32,
blank=True,
default='',
null=True
)
uuid = models.UUIDField(default=uuid.uuid4, unique=True)
def is_in_organization(self):
"""Return True if current user is in any organization"""
return self.organizations.exists()
def __str__(self):
return self.user.email
| {
"content_hash": "0d83c86150b1c8b4e7ad11ecb5966b9f",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 77,
"avg_line_length": 30.895027624309392,
"alnum_prop": 0.6421673819742489,
"repo_name": "w1stler/volontulo",
"id": "f4a85cf064d11d23114936e9acc7bf29c06b577d",
"size": "5617",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "backend/apps/volontulo/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18114"
},
{
"name": "Dockerfile",
"bytes": "1761"
},
{
"name": "HTML",
"bytes": "102925"
},
{
"name": "JavaScript",
"bytes": "4192"
},
{
"name": "Makefile",
"bytes": "991"
},
{
"name": "Python",
"bytes": "205467"
},
{
"name": "Shell",
"bytes": "4436"
},
{
"name": "TypeScript",
"bytes": "152266"
}
],
"symlink_target": ""
} |
"""Models and helper functions for access to app's datastore metadata.
These entities cannot be created by users, but are created as results of
__namespace__, __kind__ and __property__ metadata queries such as
# Find all namespaces
q = db.GqlQuery("SELECT * FROM __namespace__")
for p in q.run():
print "namespace:", repr(p.namespace_name)
or
# Find all properties of A whose name starts with a lower-case letter
q = db.GqlQuery("SELECT __key__ from __property__ " +
"WHERE __key__ >= :1 AND __key__ < :2",
Property.key_for_property("A", "a"),
Property.key_for_property("A", chr(ord("z") + 1)))
for p in q.run():
print "%s: %s" % (Property.key_to_kind(p), Property.key_to_property(p))
or, using Query objects
# Find all kinds >= "a"
q = metadata.Kind().all()
q.filter("__key__ >=", metadata.Kind.key_for_kind("a"))
for p in q.run():
print "kind:", repr(p.kind_name)
"""
from google.appengine.api import datastore_types
from google.appengine.ext import db
class BaseMetadata(db.Model):
"""Base class for all metadata models."""
KIND_NAME = '__BaseMetadata__'
@classmethod
def kind(cls):
"""Kind name override."""
return cls.KIND_NAME
class Namespace(BaseMetadata):
"""Model for __namespace__ metadata query results."""
KIND_NAME = '__namespace__'
EMPTY_NAMESPACE_ID = datastore_types._EMPTY_NAMESPACE_ID
@property
def namespace_name(self):
"""Return the namespace name specified by this entity's key."""
return self.key_to_namespace(self.key())
@classmethod
def key_for_namespace(cls, namespace):
"""Return the __namespace__ key for namespace.
Args:
namespace: namespace whose key is requested.
Returns:
The key for namespace.
"""
if namespace:
return db.Key.from_path(cls.KIND_NAME, namespace)
else:
return db.Key.from_path(cls.KIND_NAME, cls.EMPTY_NAMESPACE_ID)
@classmethod
def key_to_namespace(cls, key):
"""Return the namespace specified by a given __namespace__ key.
Args:
key: key whose name is requested.
Returns:
The namespace specified by key.
"""
return key.name() or ''
class Kind(BaseMetadata):
"""Model for __kind__ metadata query results."""
KIND_NAME = '__kind__'
@property
def kind_name(self):
"""Return the kind name specified by this entity's key."""
return self.key_to_kind(self.key())
@classmethod
def key_for_kind(cls, kind):
"""Return the __kind__ key for kind.
Args:
kind: kind whose key is requested.
Returns:
The key for kind.
"""
return db.Key.from_path(cls.KIND_NAME, kind)
@classmethod
def key_to_kind(cls, key):
"""Return the kind specified by a given __kind__ key.
Args:
key: key whose name is requested.
Returns:
The kind specified by key.
"""
return key.name()
class Property(BaseMetadata):
"""Model for __property__ metadata query results."""
KIND_NAME = '__property__'
@property
def property_name(self):
"""Return the property name specified by this entity's key."""
return self.key_to_property(self.key())
@property
def kind_name(self):
"""Return the kind name specified by this entity's key."""
return self.key_to_kind(self.key())
property_representation = db.StringListProperty()
@classmethod
def key_for_kind(cls, kind):
"""Return the __property__ key for kind.
Args:
kind: kind whose key is requested.
Returns:
The parent key for __property__ keys of kind.
"""
return db.Key.from_path(Kind.KIND_NAME, kind)
@classmethod
def key_for_property(cls, kind, property):
"""Return the __property__ key for property of kind.
Args:
kind: kind whose key is requested.
property: property whose key is requested.
Returns:
The key for property of kind.
"""
return db.Key.from_path(Kind.KIND_NAME, kind, Property.KIND_NAME, property)
@classmethod
def key_to_kind(cls, key):
"""Return the kind specified by a given __property__ key.
Args:
key: key whose kind name is requested.
Returns:
The kind specified by key.
"""
if key.kind() == Kind.KIND_NAME:
return key.name()
else:
return key.parent().name()
@classmethod
def key_to_property(cls, key):
"""Return the property specified by a given __property__ key.
Args:
key: key whose property name is requested.
Returns:
property specified by key, or None if the key specified only a kind.
"""
if key.kind() == Kind.KIND_NAME:
return None
else:
return key.name()
def get_namespaces(start=None, end=None):
"""Return all namespaces in the specified range.
Args:
start: only return namespaces >= start if start is not None.
end: only return namespaces < end if end is not None.
Returns:
A list of namespace names between the (optional) start and end values.
"""
q = Namespace.all()
if start is not None:
q.filter('__key__ >=', Namespace.key_for_namespace(start))
if end is not None:
q.filter('__key__ <', Namespace.key_for_namespace(end))
return [x.namespace_name for x in q.run()]
def get_kinds(start=None, end=None):
"""Return all kinds in the specified range.
Args:
start: only return kinds >= start if start is not None.
end: only return kinds < end if end is not None.
Returns:
A list of kind names between the (optional) start and end values.
"""
q = Kind.all()
if start is not None and start != '':
q.filter('__key__ >=', Kind.key_for_kind(start))
if end is not None:
if end == '':
return []
q.filter('__key__ <', Kind.key_for_kind(end))
return [x.kind_name for x in q.run()]
def get_properties_of_kind(kind, start=None, end=None):
"""Return all properties of kind in the specified range.
NOTE: This function does not return unindexed properties.
Args:
kind: name of kind whose properties you want.
start: only return properties >= start if start is not None.
end: only return properties < end if end is not None.
Returns:
A list of property names of kind between the (optional) start and end
values.
"""
q = Property.all(keys_only=True)
q.ancestor(Property.key_for_kind(kind))
if start is not None and start != '':
q.filter('__key__ >=', Property.key_for_property(kind, start))
if end is not None:
if end == '':
return []
q.filter('__key__ <', Property.key_for_property(kind, end))
return [Property.key_to_property(x) for x in q.run()]
def get_representations_of_kind(kind, start=None, end=None):
"""Return all representations of properties of kind in the specified range.
NOTE: This function does not return unindexed properties.
Args:
kind: name of kind whose properties you want.
start: only return properties >= start if start is not None.
end: only return properties < end if end is not None.
Returns:
A dictionary mapping property names to its list of representations.
"""
q = Property.all()
q.ancestor(Property.key_for_kind(kind))
if start is not None and start != '':
q.filter('__key__ >=', Property.key_for_property(kind, start))
if end is not None:
if end == '':
return {}
q.filter('__key__ <', Property.key_for_property(kind, end))
result = {}
for property in q.run():
result[property.property_name] = property.property_representation
return result
| {
"content_hash": "75efbcf987e7c21c22eb2b6e2ab36754",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 79,
"avg_line_length": 25.848797250859107,
"alnum_prop": 0.6430470619516087,
"repo_name": "adviti/melange",
"id": "a6b612a7d8006cf8cae7804ff4eb44888aaedd5e",
"size": "8127",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "thirdparty/google_appengine/google/appengine/ext/db/metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import inspect
import re
from lollipop.compat import DictMixin, iterkeys
import collections
def identity(value):
"""Function that returns its argument."""
return value
def constant(value):
"""Returns function that takes any arguments and always returns given value."""
def func(*args, **kwargs):
return value
return func
def is_sequence(value):
"""Returns True if value supports list interface; False - otherwise"""
return isinstance(value, collections.Sequence)
def is_mapping(value):
"""Returns True if value supports dict interface; False - otherwise"""
return isinstance(value, collections.Mapping)
# Backward compatibility
is_list = is_sequence
is_dict = is_mapping
def make_context_aware(func, numargs):
"""
Check if given function has no more arguments than given. If so, wrap it
into another function that takes extra argument and drops it.
Used to support user providing callback functions that are not context aware.
"""
try:
if inspect.ismethod(func):
arg_count = len(inspect.getargspec(func).args) - 1
elif inspect.isfunction(func):
arg_count = len(inspect.getargspec(func).args)
elif inspect.isclass(func):
arg_count = len(inspect.getargspec(func.__init__).args) - 1
else:
arg_count = len(inspect.getargspec(func.__call__).args) - 1
except TypeError:
arg_count = numargs
if arg_count <= numargs:
def normalized(*args):
return func(*args[:-1])
return normalized
return func
def call_with_context(func, context, *args):
"""
Check if given function has more arguments than given. Call it with context
as last argument or without it.
"""
return make_context_aware(func, len(args))(*args + (context,))
def to_snake_case(s):
"""Converts camel-case identifiers to snake-case."""
return re.sub('([^_A-Z])([A-Z])', lambda m: m.group(1) + '_' + m.group(2).lower(), s)
def to_camel_case(s):
"""Converts snake-case identifiers to camel-case."""
return re.sub('_([a-z])', lambda m: m.group(1).upper(), s)
_default = object()
class DictWithDefault(DictMixin, object):
def __init__(self, values={}, default=None):
super(DictWithDefault, self).__init__()
self._values = values
self.default = default
def __len__(self):
return len(self._values)
def get(self, key, default=_default):
if key in self._values:
return self._values[key]
if default is _default:
default = self.default
return default
def __getitem__(self, key):
if key in self._values:
return self._values[key]
return self.default
def __setitem__(self, key, value):
self._values[key] = value
def __delitem__(self, key):
del self._values[key]
def __iter__(self):
for key in self._values:
yield key
def __contains__(self, key):
return key in self._values
def keys(self):
return self._values.keys()
def iterkeys(self):
for k in iterkeys(self._values):
yield k
def iteritems(self):
for k, v in self._values.iteritems():
yield k, v
class OpenStruct(DictMixin):
"""A dictionary that also allows accessing values through object attributes."""
def __init__(self, data=None):
self.__dict__.update({'_data': data or {}})
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
del self._data[key]
def __iter__(self):
for key in self._data:
yield key
def __len__(self):
return len(self._data)
def __contains__(self, key):
return key in self._data
def keys(self):
return self._data.keys()
def iterkeys(self):
for k in iterkeys(self._data):
yield k
def iteritems(self):
for k, v in self._data.iteritems():
yield k, v
def __hasattr__(self, name):
return name in self._data
def __getattr__(self, name):
if name not in self._data:
raise AttributeError(name)
return self._data[name]
def __setattr__(self, name, value):
self._data[name] = value
def __delattr__(self, name):
if name not in self._data:
raise AttributeError(name)
del self._data[name]
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
' '.join('%s=%s' % (k, repr(v)) for k, v in self._data.iteritems()),
)
| {
"content_hash": "29fa41562699850fc7334ece702c948c",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 89,
"avg_line_length": 25.81967213114754,
"alnum_prop": 0.5887830687830687,
"repo_name": "maximkulkin/lollipop",
"id": "d41fa2735d97672b2c6ca2eeb37fd3e8dc9d7de3",
"size": "4725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lollipop/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "240179"
}
],
"symlink_target": ""
} |
"""Utils needed for Google Hangouts."""
from hangups import CredentialsPrompt, GoogleAuthError, RefreshTokenCache
class Google2FAError(GoogleAuthError):
"""A Google authentication request failed."""
class HangoutsCredentials(CredentialsPrompt):
"""Google account credentials.
This implementation gets the user data as params.
"""
def __init__(self, email, password, pin=None):
"""Google account credentials.
:param email: Google account email address.
:param password: Google account password.
:param pin: Google account verification code.
"""
self._email = email
self._password = password
self._pin = pin
def get_email(self):
"""Return email.
:return: Google account email address.
"""
return self._email
def get_password(self):
"""Return password.
:return: Google account password.
"""
return self._password
def get_verification_code(self):
"""Return the verification code.
:return: Google account verification code.
"""
if self._pin is None:
raise Google2FAError()
return self._pin
def set_verification_code(self, pin):
"""Set the verification code.
:param pin: Google account verification code.
"""
self._pin = pin
class HangoutsRefreshToken(RefreshTokenCache):
"""Memory-based cache for refresh token."""
def __init__(self, token):
"""Memory-based cache for refresh token.
:param token: Initial refresh token.
"""
super().__init__("")
self._token = token
def get(self):
"""Get cached refresh token.
:return: Cached refresh token.
"""
return self._token
def set(self, refresh_token):
"""Cache a refresh token.
:param refresh_token: Refresh token to cache.
"""
self._token = refresh_token
| {
"content_hash": "08aa3bb60c58067f6502ce0b90b1b262",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 73,
"avg_line_length": 24.493827160493826,
"alnum_prop": 0.6008064516129032,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "9aff7730201ac9c615e890942100c133dc7397ca",
"size": "1984",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/hangouts/hangups_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
} |
import setuptools
install_requires = [
'PrettyTable==0.7.2',
'kazoo==1.00',
'simplejson',
'argparse',
'requests',
'kafka-python'
]
setuptools.setup(
name = 'stormkafkamon',
version = '0.1.1',
license = 'Apache',
description = '''Monitor offsets of a storm kafka spout.''',
author = "Philip O'Toole",
author_email = 'philipomailbox-github@yahoo.com',
url = 'https://github.com/vivekrao1985/stormkafkamon',
platforms = 'any',
packages = ['stormkafkamon'],
zip_safe = True,
verbose = False,
install_requires = install_requires,
dependency_links = ['https://github.com/mumrah/kafka-python/tarball/0.7#egg=kafka-python-0.7.2-0'],
entry_points={
'console_scripts': [
'skmon = stormkafkamon.monitor:main'
]
},
)
| {
"content_hash": "1acbb4e2de48d3c0bfaff04295f2d1f9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 103,
"avg_line_length": 26.419354838709676,
"alnum_prop": 0.6105006105006106,
"repo_name": "vivekrao1985/stormkafkamon",
"id": "49f029b3fe2e4b602468f3c4818f8555288acfae",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9328"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from django.contrib.auth import authenticate
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from piston.handler import BaseHandler as Handler
from piston.emitters import JSONEmitter, Emitter
from opps.api.models import ApiKey
class UncachedEmitter(JSONEmitter):
""" In websites running under varnish or another cache
caching the api can mess the results and return the wrong data
this emmitter injects No-Cache headers in response"""
def render(self, request):
content = super(UncachedEmitter, self).render(request)
response = HttpResponse(content)
response['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response['Content-Type'] = 'application/json; charset=utf-8'
response['Pragma'] = 'no-cache'
response['Expires'] = 0
return response
Emitter.register('json', UncachedEmitter, 'application/json; charset=utf-8')
class BaseHandler(Handler):
limit = 20
limit_arg = 'paginate_limit'
meta = {}
blackfield = ['num_pages', 'page_range', 'total_objects', 'per_page',
'page', 'has_next', 'has_previous', 'has_other_pages',
'end_index', 'start_index', 'start_index']
def include_meta(self, d):
obj = {'meta': self.meta, 'objects': d}
return obj
def paginate_queryset(self, queryset, request):
limit = request.GET.get(self.limit_arg, self.meta.get(self.limit_arg))
paginator = Paginator(queryset, limit or self.limit)
self.meta['num_pages'] = paginator.num_pages
self.meta['page_range'] = paginator.page_range
self.meta['total_objects'] = paginator.count
self.meta['per_page'] = paginator.per_page
page = self.meta.get('page', request.GET.get('page', 1))
try:
results = paginator.page(page)
except PageNotAnInteger:
results = paginator.page(1)
except EmptyPage:
results = paginator.page(paginator.num_pages)
self.meta['has_next'] = results.has_next()
self.meta['has_previous'] = results.has_previous()
self.meta['has_other_pages'] = results.has_other_pages()
self.meta['end_index'] = results.end_index()
self.meta['start_index'] = results.start_index()
self.meta['page_number'] = results.number
return results
def read(self, request):
base = self.model.objects
if request.GET.items():
items = request.GET.dict()
self.meta[self.limit_arg] = items.pop(self.limit_arg, None)
self.meta['page'] = items.pop('page', 1)
qs = base.filter(**items)
else:
qs = base.all()
self.meta['total_objects'] = qs.count()
return qs
def _limit(self, request):
limit = request.GET.get(self.limit_arg, self.limit)
return int(limit) * int(request.GET.get('page', 1))
def _page(self, request):
page = int(request.GET.get('page', 1))
if page == 1:
return 0
limit = int(request.GET.get(self.limit_arg, self.limit))
return limit * page - page
def appendModel(Model, Filters):
m = Model.objects.filter(**Filters)
l = []
for i in m:
l.append(i.__dict__)
return l
class ApiKeyAuthentication(object):
def __init__(self, auth_func=authenticate, method=['GET']):
self.auth_func = auth_func
self.method = method
def is_authenticated(self, request):
if request.method == 'GET' and 'GET' in self.method:
return True
try:
method = getattr(request, request.method)
except:
method = request.GET
try:
ApiKey.objects.get(
user__username=method.get('api_username'),
key=method.get('api_key'))
except ApiKey.DoesNotExist:
return False
return True
def challenge(self):
resp = HttpResponse("Authorization Required")
resp.status_code = 401
return resp
| {
"content_hash": "2c9d698ae73cb15b08783e20ee2f6bab",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 78,
"avg_line_length": 32.52755905511811,
"alnum_prop": 0.6051803437424352,
"repo_name": "YACOWS/opps",
"id": "d5f504a6b71be3bc732a5f88fe282dafbf910b32",
"size": "4177",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "opps/api/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13741"
},
{
"name": "HTML",
"bytes": "57113"
},
{
"name": "JavaScript",
"bytes": "62514"
},
{
"name": "Makefile",
"bytes": "848"
},
{
"name": "Python",
"bytes": "1235961"
},
{
"name": "Shell",
"bytes": "661"
}
],
"symlink_target": ""
} |
import re
from itertools import chain
from .nodes import (
Argument,
Comment,
ExternalLink,
Heading,
HTMLEntity,
Node,
Tag,
Template,
Text,
Wikilink,
)
from .smart_list.list_proxy import ListProxy
from .string_mixin import StringMixIn
from .utils import parse_anything
__all__ = ["Wikicode"]
FLAGS = re.IGNORECASE | re.DOTALL | re.UNICODE
class Wikicode(StringMixIn):
"""A ``Wikicode`` is a container for nodes that operates like a string.
Additionally, it contains methods that can be used to extract data from or
modify the nodes, implemented in an interface similar to a list. For
example, :meth:`index` can get the index of a node in the list, and
:meth:`insert` can add a new node at that index. The :meth:`filter()
<ifilter>` series of functions is very useful for extracting and iterating
over, for example, all of the templates in the object.
"""
RECURSE_OTHERS = 2
def __init__(self, nodes):
super().__init__()
self._nodes = nodes
def __str__(self):
return "".join([str(node) for node in self.nodes])
@staticmethod
def _get_children(node, contexts=False, restrict=None, parent=None):
"""Iterate over all child :class:`.Node`\\ s of a given *node*."""
yield (parent, node) if contexts else node
if restrict and isinstance(node, restrict):
return
for code in node.__children__():
for child in code.nodes:
sub = Wikicode._get_children(child, contexts, restrict, code)
yield from sub
@staticmethod
def _slice_replace(code, index, old, new):
"""Replace the string *old* with *new* across *index* in *code*."""
nodes = [str(node) for node in code.get(index)]
substring = "".join(nodes).replace(old, new)
code.nodes[index] = parse_anything(substring).nodes
@staticmethod
def _build_matcher(matches, flags):
"""Helper for :meth:`_indexed_ifilter` and others.
If *matches* is a function, return it. If it's a regex, return a
wrapper around it that can be called with a node to do a search. If
it's ``None``, return a function that always returns ``True``.
"""
if matches:
if callable(matches):
return matches
return lambda obj: re.search(matches, str(obj), flags)
return lambda obj: True
def _indexed_ifilter(
self, recursive=True, matches=None, flags=FLAGS, forcetype=None
):
"""Iterate over nodes and their corresponding indices in the node list.
The arguments are interpreted as for :meth:`ifilter`. For each tuple
``(i, node)`` yielded by this method, ``self.index(node) == i``. Note
that if *recursive* is ``True``, ``self.nodes[i]`` might not be the
node itself, but will still contain it.
"""
match = self._build_matcher(matches, flags)
if recursive:
restrict = forcetype if recursive == self.RECURSE_OTHERS else None
def getter(i, node):
for ch in self._get_children(node, restrict=restrict):
yield (i, ch)
inodes = chain(*(getter(i, n) for i, n in enumerate(self.nodes)))
else:
inodes = enumerate(self.nodes)
for i, node in inodes:
if (not forcetype or isinstance(node, forcetype)) and match(node):
yield (i, node)
def _is_child_wikicode(self, obj, recursive=True):
"""Return whether the given :class:`.Wikicode` is a descendant."""
def deref(nodes):
if isinstance(nodes, ListProxy):
return nodes._parent # pylint: disable=protected-access
return nodes
target = deref(obj.nodes)
if target is deref(self.nodes):
return True
if recursive:
todo = [self]
while todo:
code = todo.pop()
if target is deref(code.nodes):
return True
for node in code.nodes:
todo += list(node.__children__())
return False
def _do_strong_search(self, obj, recursive=True):
"""Search for the specific element *obj* within the node list.
*obj* can be either a :class:`.Node` or a :class:`.Wikicode` object. If
found, we return a tuple (*context*, *index*) where *context* is the
:class:`.Wikicode` that contains *obj* and *index* is its index there,
as a :class:`slice`. Note that if *recursive* is ``False``, *context*
will always be ``self`` (since we only look for *obj* among immediate
descendants), but if *recursive* is ``True``, then it could be any
:class:`.Wikicode` contained by a node within ``self``. If *obj* is not
found, :exc:`ValueError` is raised.
"""
if isinstance(obj, Wikicode):
if not self._is_child_wikicode(obj, recursive):
raise ValueError(obj)
return obj, slice(0, len(obj.nodes))
if isinstance(obj, Node):
mkslice = lambda i: slice(i, i + 1)
if not recursive:
return self, mkslice(self.index(obj))
for node in self.nodes:
for context, child in self._get_children(node, contexts=True):
if obj is child:
if not context:
context = self
return context, mkslice(context.index(child))
raise ValueError(obj)
raise TypeError(obj)
def _do_weak_search(self, obj, recursive):
"""Search for an element that looks like *obj* within the node list.
This follows the same rules as :meth:`_do_strong_search` with some
differences. *obj* is treated as a string that might represent any
:class:`.Node`, :class:`.Wikicode`, or combination of the two present
in the node list. Thus, matching is weak (using string comparisons)
rather than strong (using ``is``). Because multiple nodes can match
*obj*, the result is a list of tuples instead of just one (however,
:exc:`ValueError` is still raised if nothing is found). Individual
matches will never overlap.
The tuples contain a new first element, *exact*, which is ``True`` if
we were able to match *obj* exactly to one or more adjacent nodes, or
``False`` if we found *obj* inside a node or incompletely spanning
multiple nodes.
"""
obj = parse_anything(obj)
if not obj or obj not in self:
raise ValueError(obj)
results = []
contexts = [self]
while contexts:
context = contexts.pop()
i = len(context.nodes) - 1
while i >= 0:
node = context.get(i)
if obj.get(-1) == node:
for j in range(-len(obj.nodes), -1):
if obj.get(j) != context.get(i + j + 1):
break
else:
i -= len(obj.nodes) - 1
index = slice(i, i + len(obj.nodes))
results.append((True, context, index))
elif recursive and obj in node:
contexts.extend(node.__children__())
i -= 1
if not results:
if not recursive:
raise ValueError(obj)
results.append((False, self, slice(0, len(self.nodes))))
return results
def _get_tree(self, code, lines, marker, indent):
"""Build a tree to illustrate the way the Wikicode object was parsed.
The method that builds the actual tree is ``__showtree__`` of ``Node``
objects. *code* is the ``Wikicode`` object to build a tree for. *lines*
is the list to append the tree to, which is returned at the end of the
method. *marker* is some object to be used to indicate that the builder
should continue on from the last line instead of starting a new one; it
should be any object that can be tested for with ``is``. *indent* is
the starting indentation.
"""
def write(*args):
"""Write a new line following the proper indentation rules."""
if lines and lines[-1] is marker: # Continue from the last line
lines.pop() # Remove the marker
last = lines.pop()
lines.append(last + " ".join(args))
else:
lines.append(" " * 6 * indent + " ".join(args))
get = lambda code: self._get_tree(code, lines, marker, indent + 1)
mark = lambda: lines.append(marker)
for node in code.nodes:
node.__showtree__(write, get, mark)
return lines
@classmethod
def _build_filter_methods(cls, **meths):
"""Given Node types, build the corresponding i?filter shortcuts.
The should be given as keys storing the method's base name paired with
values storing the corresponding :class:`.Node` type. For example, the
dict may contain the pair ``("templates", Template)``, which will
produce the methods :meth:`ifilter_templates` and
:meth:`filter_templates`, which are shortcuts for
:meth:`ifilter(forcetype=Template) <ifilter>` and
:meth:`filter(forcetype=Template) <filter>`, respectively. These
shortcuts are added to the class itself, with an appropriate docstring.
"""
doc = """Iterate over {0}.
This is equivalent to :meth:`{1}` with *forcetype* set to
:class:`~{2.__module__}.{2.__name__}`.
"""
make_ifilter = lambda ftype: (
lambda self, *a, **kw: self.ifilter(forcetype=ftype, *a, **kw)
)
make_filter = lambda ftype: (
lambda self, *a, **kw: self.filter(forcetype=ftype, *a, **kw)
)
for name, ftype in meths.items():
ifilt = make_ifilter(ftype)
filt = make_filter(ftype)
ifilt.__doc__ = doc.format(name, "ifilter", ftype)
filt.__doc__ = doc.format(name, "filter", ftype)
setattr(cls, "ifilter_" + name, ifilt)
setattr(cls, "filter_" + name, filt)
@property
def nodes(self):
"""A list of :class:`.Node` objects.
This is the internal data actually stored within a :class:`.Wikicode`
object.
"""
return self._nodes
@nodes.setter
def nodes(self, value):
if not isinstance(value, list):
value = parse_anything(value).nodes
self._nodes = value
def get(self, index):
"""Return the *index*\\ th node within the list of nodes."""
return self.nodes[index]
def set(self, index, value):
"""Set the ``Node`` at *index* to *value*.
Raises :exc:`IndexError` if *index* is out of range, or
:exc:`ValueError` if *value* cannot be coerced into one :class:`.Node`.
To insert multiple nodes at an index, use :meth:`get` with either
:meth:`remove` and :meth:`insert` or :meth:`replace`.
"""
nodes = parse_anything(value).nodes
if len(nodes) > 1:
raise ValueError("Cannot coerce multiple nodes into one index")
if index >= len(self.nodes) or -1 * index > len(self.nodes):
raise IndexError("List assignment index out of range")
if nodes:
self.nodes[index] = nodes[0]
else:
self.nodes.pop(index)
def contains(self, obj):
"""Return whether this Wikicode object contains *obj*.
If *obj* is a :class:`.Node` or :class:`.Wikicode` object, then we
search for it exactly among all of our children, recursively.
Otherwise, this method just uses :meth:`.__contains__` on the string.
"""
if not isinstance(obj, (Node, Wikicode)):
return obj in self
try:
self._do_strong_search(obj, recursive=True)
except ValueError:
return False
return True
def index(self, obj, recursive=False):
"""Return the index of *obj* in the list of nodes.
Raises :exc:`ValueError` if *obj* is not found. If *recursive* is
``True``, we will look in all nodes of ours and their descendants, and
return the index of our direct descendant node within *our* list of
nodes. Otherwise, the lookup is done only on direct descendants.
"""
strict = isinstance(obj, Node)
equivalent = (lambda o, n: o is n) if strict else (lambda o, n: o == n)
for i, node in enumerate(self.nodes):
if recursive:
for child in self._get_children(node):
if equivalent(obj, child):
return i
elif equivalent(obj, node):
return i
raise ValueError(obj)
def get_ancestors(self, obj):
"""Return a list of all ancestor nodes of the :class:`.Node` *obj*.
The list is ordered from the most shallow ancestor (greatest great-
grandparent) to the direct parent. The node itself is not included in
the list. For example::
>>> text = "{{a|{{b|{{c|{{d}}}}}}}}"
>>> code = mwparserfromhell.parse(text)
>>> node = code.filter_templates(matches=lambda n: n == "{{d}}")[0]
>>> code.get_ancestors(node)
['{{a|{{b|{{c|{{d}}}}}}}}', '{{b|{{c|{{d}}}}}}', '{{c|{{d}}}}']
Will return an empty list if *obj* is at the top level of this Wikicode
object. Will raise :exc:`ValueError` if it wasn't found.
"""
def _get_ancestors(code, needle):
for node in code.nodes:
if node is needle:
return []
for code in node.__children__():
ancestors = _get_ancestors(code, needle)
if ancestors is not None:
return [node] + ancestors
return None
if isinstance(obj, Wikicode):
obj = obj.get(0)
elif not isinstance(obj, Node):
raise ValueError(obj)
ancestors = _get_ancestors(self, obj)
if ancestors is None:
raise ValueError(obj)
return ancestors
def get_parent(self, obj):
"""Return the direct parent node of the :class:`.Node` *obj*.
This function is equivalent to calling :meth:`.get_ancestors` and
taking the last element of the resulting list. Will return None if
the node exists but does not have a parent; i.e., it is at the top
level of the Wikicode object.
"""
ancestors = self.get_ancestors(obj)
return ancestors[-1] if ancestors else None
def insert(self, index, value):
"""Insert *value* at *index* in the list of nodes.
*value* can be anything parsable by :func:`.parse_anything`, which
includes strings or other :class:`.Wikicode` or :class:`.Node` objects.
"""
nodes = parse_anything(value).nodes
for node in reversed(nodes):
self.nodes.insert(index, node)
def insert_before(self, obj, value, recursive=True):
"""Insert *value* immediately before *obj*.
*obj* can be either a string, a :class:`.Node`, or another
:class:`.Wikicode` object (as created by :meth:`get_sections`, for
example). If *obj* is a string, we will operate on all instances of
that string within the code, otherwise only on the specific instance
given. *value* can be anything parsable by :func:`.parse_anything`. If
*recursive* is ``True``, we will try to find *obj* within our child
nodes even if it is not a direct descendant of this :class:`.Wikicode`
object. If *obj* is not found, :exc:`ValueError` is raised.
"""
if isinstance(obj, (Node, Wikicode)):
context, index = self._do_strong_search(obj, recursive)
context.insert(index.start, value)
else:
for exact, context, index in self._do_weak_search(obj, recursive):
if exact:
context.insert(index.start, value)
else:
obj = str(obj)
self._slice_replace(context, index, obj, str(value) + obj)
def insert_after(self, obj, value, recursive=True):
"""Insert *value* immediately after *obj*.
*obj* can be either a string, a :class:`.Node`, or another
:class:`.Wikicode` object (as created by :meth:`get_sections`, for
example). If *obj* is a string, we will operate on all instances of
that string within the code, otherwise only on the specific instance
given. *value* can be anything parsable by :func:`.parse_anything`. If
*recursive* is ``True``, we will try to find *obj* within our child
nodes even if it is not a direct descendant of this :class:`.Wikicode`
object. If *obj* is not found, :exc:`ValueError` is raised.
"""
if isinstance(obj, (Node, Wikicode)):
context, index = self._do_strong_search(obj, recursive)
context.insert(index.stop, value)
else:
for exact, context, index in self._do_weak_search(obj, recursive):
if exact:
context.insert(index.stop, value)
else:
obj = str(obj)
self._slice_replace(context, index, obj, obj + str(value))
def replace(self, obj, value, recursive=True):
"""Replace *obj* with *value*.
*obj* can be either a string, a :class:`.Node`, or another
:class:`.Wikicode` object (as created by :meth:`get_sections`, for
example). If *obj* is a string, we will operate on all instances of
that string within the code, otherwise only on the specific instance
given. *value* can be anything parsable by :func:`.parse_anything`.
If *recursive* is ``True``, we will try to find *obj* within our child
nodes even if it is not a direct descendant of this :class:`.Wikicode`
object. If *obj* is not found, :exc:`ValueError` is raised.
"""
if isinstance(obj, (Node, Wikicode)):
context, index = self._do_strong_search(obj, recursive)
for _ in range(index.start, index.stop):
context.nodes.pop(index.start)
context.insert(index.start, value)
else:
for exact, context, index in self._do_weak_search(obj, recursive):
if exact:
for _ in range(index.start, index.stop):
context.nodes.pop(index.start)
context.insert(index.start, value)
else:
self._slice_replace(context, index, str(obj), str(value))
def append(self, value):
"""Insert *value* at the end of the list of nodes.
*value* can be anything parsable by :func:`.parse_anything`.
"""
nodes = parse_anything(value).nodes
for node in nodes:
self.nodes.append(node)
def remove(self, obj, recursive=True):
"""Remove *obj* from the list of nodes.
*obj* can be either a string, a :class:`.Node`, or another
:class:`.Wikicode` object (as created by :meth:`get_sections`, for
example). If *obj* is a string, we will operate on all instances of
that string within the code, otherwise only on the specific instance
given. If *recursive* is ``True``, we will try to find *obj* within our
child nodes even if it is not a direct descendant of this
:class:`.Wikicode` object. If *obj* is not found, :exc:`ValueError` is
raised.
"""
if isinstance(obj, (Node, Wikicode)):
context, index = self._do_strong_search(obj, recursive)
for _ in range(index.start, index.stop):
context.nodes.pop(index.start)
else:
for exact, context, index in self._do_weak_search(obj, recursive):
if exact:
for _ in range(index.start, index.stop):
context.nodes.pop(index.start)
else:
self._slice_replace(context, index, str(obj), "")
def matches(self, other):
"""Do a loose equivalency test suitable for comparing page names.
*other* can be any string-like object, including :class:`.Wikicode`, or
an iterable of these. This operation is symmetric; both sides are
adjusted. Specifically, whitespace and markup is stripped and the first
letter's case is normalized. Typical usage is
``if template.name.matches("stub"): ...``.
"""
normalize = lambda s: (s[0].upper() + s[1:]).replace("_", " ") if s else s
this = normalize(self.strip_code().strip())
if isinstance(other, (str, bytes, Wikicode, Node)):
that = parse_anything(other).strip_code().strip()
return this == normalize(that)
for obj in other:
that = parse_anything(obj).strip_code().strip()
if this == normalize(that):
return True
return False
def ifilter(self, recursive=True, matches=None, flags=FLAGS, forcetype=None):
"""Iterate over nodes in our list matching certain conditions.
If *forcetype* is given, only nodes that are instances of this type (or
tuple of types) are yielded. Setting *recursive* to ``True`` will
iterate over all children and their descendants. ``RECURSE_OTHERS``
will only iterate over children that are not the instances of
*forcetype*. ``False`` will only iterate over immediate children.
``RECURSE_OTHERS`` can be used to iterate over all un-nested templates,
even if they are inside of HTML tags, like so:
>>> code = mwparserfromhell.parse("{{foo}}<b>{{foo|{{bar}}}}</b>")
>>> code.filter_templates(code.RECURSE_OTHERS)
["{{foo}}", "{{foo|{{bar}}}}"]
*matches* can be used to further restrict the nodes, either as a
function (taking a single :class:`.Node` and returning a boolean) or a
regular expression (matched against the node's string representation
with :func:`re.search`). If *matches* is a regex, the flags passed to
:func:`re.search` are :const:`re.IGNORECASE`, :const:`re.DOTALL`, and
:const:`re.UNICODE`, but custom flags can be specified by passing
*flags*.
"""
gen = self._indexed_ifilter(recursive, matches, flags, forcetype)
return (node for i, node in gen)
def filter(self, *args, **kwargs):
"""Return a list of nodes within our list matching certain conditions.
This is equivalent to calling :func:`list` on :meth:`ifilter`.
"""
return list(self.ifilter(*args, **kwargs))
def get_sections(
self,
levels=None,
matches=None,
flags=FLAGS,
flat=False,
include_lead=None,
include_headings=True,
):
"""Return a list of sections within the page.
Sections are returned as :class:`.Wikicode` objects with a shared node
list (implemented using :class:`.SmartList`) so that changes to
sections are reflected in the parent Wikicode object.
Each section contains all of its subsections, unless *flat* is
``True``. If *levels* is given, it should be a iterable of integers;
only sections whose heading levels are within it will be returned. If
*matches* is given, it should be either a function or a regex; only
sections whose headings match it (without the surrounding equal signs)
will be included. *flags* can be used to override the default regex
flags (see :meth:`ifilter`) if a regex *matches* is used.
If *include_lead* is ``True``, the first, lead section (without a
heading) will be included in the list; ``False`` will not include it;
the default will include it only if no specific *levels* were given. If
*include_headings* is ``True``, the section's beginning
:class:`.Heading` object will be included; otherwise, this is skipped.
"""
title_matcher = self._build_matcher(matches, flags)
matcher = lambda heading: (
title_matcher(heading.title) and (not levels or heading.level in levels)
)
iheadings = self._indexed_ifilter(recursive=False, forcetype=Heading)
sections = [] # Tuples of (index_of_first_node, section)
# Tuples of (index, heading), where index and heading.level are both
# monotonically increasing
open_headings = []
# Add the lead section if appropriate:
if include_lead or not (include_lead is not None or matches or levels):
itr = self._indexed_ifilter(recursive=False, forcetype=Heading)
try:
first = next(itr)[0]
sections.append((0, Wikicode(self.nodes[:first])))
except StopIteration: # No headings in page
sections.append((0, Wikicode(self.nodes[:])))
# Iterate over headings, adding sections to the list as they end:
for i, heading in iheadings:
if flat: # With flat, all sections close at the next heading
newly_closed, open_headings = open_headings, []
else: # Otherwise, figure out which sections have closed, if any
closed_start_index = len(open_headings)
for j, (start, last_heading) in enumerate(open_headings):
if heading.level <= last_heading.level:
closed_start_index = j
break
newly_closed = open_headings[closed_start_index:]
del open_headings[closed_start_index:]
for start, closed_heading in newly_closed:
if matcher(closed_heading):
sections.append((start, Wikicode(self.nodes[start:i])))
start = i if include_headings else (i + 1)
open_headings.append((start, heading))
# Add any remaining open headings to the list of sections:
for start, heading in open_headings:
if matcher(heading):
sections.append((start, Wikicode(self.nodes[start:])))
# Ensure that earlier sections are earlier in the returned list:
return [section for i, section in sorted(sections)]
def strip_code(self, normalize=True, collapse=True, keep_template_params=False):
"""Return a rendered string without unprintable code such as templates.
The way a node is stripped is handled by the
:meth:`~.Node.__strip__` method of :class:`.Node` objects, which
generally return a subset of their nodes or ``None``. For example,
templates and tags are removed completely, links are stripped to just
their display part, headings are stripped to just their title.
If *normalize* is ``True``, various things may be done to strip code
further, such as converting HTML entities like ``Σ``, ``Σ``,
and ``Σ`` to ``Σ``. If *collapse* is ``True``, we will try to
remove excess whitespace as well (three or more newlines are converted
to two, for example). If *keep_template_params* is ``True``, then
template parameters will be preserved in the output (normally, they are
removed completely).
"""
kwargs = {
"normalize": normalize,
"collapse": collapse,
"keep_template_params": keep_template_params,
}
nodes = []
for node in self.nodes:
stripped = node.__strip__(**kwargs)
if stripped:
nodes.append(str(stripped))
if collapse:
stripped = "".join(nodes).strip("\n")
while "\n\n\n" in stripped:
stripped = stripped.replace("\n\n\n", "\n\n")
return stripped
return "".join(nodes)
def get_tree(self):
"""Return a hierarchical tree representation of the object.
The representation is a string makes the most sense printed. It is
built by calling :meth:`_get_tree` on the :class:`.Wikicode` object and
its children recursively. The end result may look something like the
following::
>>> text = "Lorem ipsum {{foo|bar|{{baz}}|spam=eggs}}"
>>> print(mwparserfromhell.parse(text).get_tree())
Lorem ipsum
{{
foo
| 1
= bar
| 2
= {{
baz
}}
| spam
= eggs
}}
"""
marker = object() # Random object we can find with certainty in a list
return "\n".join(self._get_tree(self, [], marker, 0))
Wikicode._build_filter_methods(
arguments=Argument,
comments=Comment,
external_links=ExternalLink,
headings=Heading,
html_entities=HTMLEntity,
tags=Tag,
templates=Template,
text=Text,
wikilinks=Wikilink,
)
| {
"content_hash": "29a429ac153be4681d83f0e8fccb1eb1",
"timestamp": "",
"source": "github",
"line_count": 694,
"max_line_length": 84,
"avg_line_length": 42.46685878962536,
"alnum_prop": 0.5816707383279045,
"repo_name": "earwig/mwparserfromhell",
"id": "4d4f9b30486dbfb32a3f9a5af76522e2c16d10cb",
"size": "30595",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/mwparserfromhell/wikicode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1976"
},
{
"name": "C",
"bytes": "190783"
},
{
"name": "Python",
"bytes": "394172"
},
{
"name": "Shell",
"bytes": "4855"
}
],
"symlink_target": ""
} |
from . import AWSObject, AWSProperty, Tags
from .validators import boolean
def attribute_type_validator(x):
valid_types = ["S", "N", "B"]
if x not in valid_types:
raise ValueError("AttributeType must be one of: %s" %
", ".join(valid_types))
return x
def key_type_validator(x):
valid_types = ["HASH", "RANGE"]
if x not in valid_types:
raise ValueError("KeyType must be one of: %s" % ", ".join(valid_types))
return x
def projection_type_validator(x):
valid_types = ["KEYS_ONLY", "INCLUDE", "ALL"]
if x not in valid_types:
raise ValueError("ProjectionType must be one of: %s" %
", ".join(valid_types))
return x
class AttributeDefinition(AWSProperty):
props = {
"AttributeName": (basestring, True),
"AttributeType": (attribute_type_validator, True),
}
class KeySchema(AWSProperty):
props = {
"AttributeName": (basestring, True),
"KeyType": (key_type_validator, True)
}
class Key(KeySchema):
""" For backwards compatibility. """
pass
class ProvisionedThroughput(AWSProperty):
props = {
"ReadCapacityUnits": (int, True),
"WriteCapacityUnits": (int, True),
}
class Projection(AWSProperty):
props = {
"NonKeyAttributes": ([basestring], False),
"ProjectionType": (projection_type_validator, False)
}
class GlobalSecondaryIndex(AWSProperty):
props = {
"IndexName": (basestring, True),
"KeySchema": ([KeySchema], True),
"Projection": (Projection, True),
"ProvisionedThroughput": (ProvisionedThroughput, True)
}
class LocalSecondaryIndex(AWSProperty):
props = {
"IndexName": (basestring, True),
"KeySchema": ([KeySchema], True),
"Projection": (Projection, True),
}
class StreamSpecification(AWSProperty):
props = {
'StreamViewType': (basestring, True),
}
class TimeToLiveSpecification(AWSProperty):
props = {
'AttributeName': (basestring, True),
'Enabled': (boolean, True),
}
class Table(AWSObject):
resource_type = "AWS::DynamoDB::Table"
props = {
'AttributeDefinitions': ([AttributeDefinition], True),
'GlobalSecondaryIndexes': ([GlobalSecondaryIndex], False),
'KeySchema': ([KeySchema], True),
'LocalSecondaryIndexes': ([LocalSecondaryIndex], False),
'ProvisionedThroughput': (ProvisionedThroughput, True),
'StreamSpecification': (StreamSpecification, False),
'TableName': (basestring, False),
'Tags': (Tags, False),
'TimeToLiveSpecification': (TimeToLiveSpecification, False),
}
| {
"content_hash": "a8bf3d8fbb22b9d6125448210801a0e7",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 26.307692307692307,
"alnum_prop": 0.6085526315789473,
"repo_name": "7digital/troposphere",
"id": "978a4691a915e9b7d02a2e719a5e88107a5e736d",
"size": "2852",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "troposphere/dynamodb.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "579"
},
{
"name": "Python",
"bytes": "356311"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
import numpy as np
from numpy.testing import *
class TestArrayRepr(object):
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([ nan, inf])')
if __name__ == "__main__":
run_module_suite()
| {
"content_hash": "7c21f3044bea418c1d3f4c7e42ece640",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 52,
"avg_line_length": 24.8,
"alnum_prop": 0.592741935483871,
"repo_name": "dagss/numpy_svn",
"id": "954869727eda905a12cd32c7521b375fd407e2bf",
"size": "248",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numpy/core/tests/test_arrayprint.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import sys
import unittest
import libsbml
class TestSpeciesReference_newSetters(unittest.TestCase):
global sr
sr = None
def setUp(self):
self.sr = libsbml.SpeciesReference(2,4)
if (self.sr == None):
pass
pass
def tearDown(self):
_dummyList = [ self.sr ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setDenominator1(self):
i = self.sr.setDenominator(2)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.sr.getDenominator() == 2 )
pass
def test_SpeciesReference_setDenominator2(self):
c = libsbml.SpeciesReference(2,2)
i = c.setDenominator(4)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( c.getDenominator() == 4 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setId1(self):
i = self.sr.setId( "cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.sr.isSetId() )
self.assert_(( "cell" == self.sr.getId() ))
pass
def test_SpeciesReference_setId2(self):
i = self.sr.setId( "1cell")
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assertEqual( False, self.sr.isSetId() )
pass
def test_SpeciesReference_setId3(self):
c = libsbml.SpeciesReference(2,1)
i = c.setId( "cell")
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setId4(self):
i = self.sr.setId( "cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.sr.isSetId() )
self.assert_(( "cell" == self.sr.getId() ))
i = self.sr.setId("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.sr.isSetId() )
pass
def test_SpeciesReference_setName1(self):
i = self.sr.setName( "cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.sr.isSetName() )
i = self.sr.unsetName()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.sr.isSetName() )
pass
def test_SpeciesReference_setName2(self):
i = self.sr.setName( "1cell")
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assertEqual( False, self.sr.isSetName() )
i = self.sr.unsetName()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.sr.isSetName() )
pass
def test_SpeciesReference_setName3(self):
c = libsbml.SpeciesReference(2,1)
i = c.setName( "cell")
self.assert_( i == libsbml.LIBSBML_UNEXPECTED_ATTRIBUTE )
self.assertEqual( False, c.isSetName() )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setName4(self):
i = self.sr.setName( "cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.sr.isSetName() )
i = self.sr.setName("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.sr.isSetName() )
pass
def test_SpeciesReference_setSpecies1(self):
i = self.sr.setSpecies( "mm")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.sr.isSetSpecies() )
pass
def test_SpeciesReference_setSpecies2(self):
c = libsbml.SpeciesReference(2,2)
i = c.setSpecies( "1cell")
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assertEqual( False, c.isSetSpecies() )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setSpecies3(self):
c = libsbml.SpeciesReference(2,2)
i = c.setSpecies( "mole")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_(( "mole" == c.getSpecies() ))
self.assertEqual( True, c.isSetSpecies() )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setSpecies4(self):
i = self.sr.setSpecies( "mm")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.sr.isSetSpecies() )
i = self.sr.setSpecies("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.sr.isSetSpecies() )
pass
def test_SpeciesReference_setStoichiometry1(self):
i = self.sr.setStoichiometry(2.0)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.sr.getStoichiometry() == 2.0 )
pass
def test_SpeciesReference_setStoichiometry2(self):
c = libsbml.SpeciesReference(2,2)
i = c.setStoichiometry(4)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( c.getStoichiometry() == 4.0 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setStoichiometryMath1(self):
sm = libsbml.StoichiometryMath(2,4)
math = libsbml.ASTNode(libsbml.AST_TIMES)
a = libsbml.ASTNode()
b = libsbml.ASTNode()
a.setName( "a")
b.setName( "b")
math.addChild(a)
math.addChild(b)
sm.setMath(math)
i = self.sr.setStoichiometryMath(sm)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.sr.isSetStoichiometryMath() )
self.assert_( self.sr.getStoichiometry() == 1 )
i = self.sr.unsetStoichiometryMath()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.sr.isSetStoichiometryMath() )
_dummyList = [ sm ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setStoichiometryMath2(self):
sm = libsbml.StoichiometryMath(2,4)
math = libsbml.ASTNode(libsbml.AST_TIMES)
a = libsbml.ASTNode()
a.setName( "a")
math.addChild(a)
sm.setMath(math)
i = self.sr.setStoichiometryMath(sm)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.sr.isSetStoichiometryMath() )
_dummyList = [ sm ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setStoichiometryMath3(self):
i = self.sr.setStoichiometryMath(None)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.sr.isSetStoichiometryMath() )
i = self.sr.unsetStoichiometryMath()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.sr.isSetStoichiometryMath() )
pass
def test_SpeciesReference_setStoichiometryMath4(self):
sm = libsbml.StoichiometryMath(2,4)
math = None
sm.setMath(math)
i = self.sr.setStoichiometryMath(sm)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assertEqual( False, self.sr.isSetStoichiometryMath() )
self.assert_( self.sr.getStoichiometry() == 1 )
i = self.sr.unsetStoichiometryMath()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.sr.isSetStoichiometryMath() )
_dummyList = [ sm ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setStoichiometryMath5(self):
sr1 = libsbml.SpeciesReference(1,2)
sm = libsbml.StoichiometryMath(2,4)
math = libsbml.ASTNode(libsbml.AST_TIMES)
a = libsbml.ASTNode()
b = libsbml.ASTNode()
a.setName( "a")
b.setName( "b")
math.addChild(a)
math.addChild(b)
sm.setMath(math)
i = sr1.setStoichiometryMath(sm)
self.assert_( i == libsbml.LIBSBML_UNEXPECTED_ATTRIBUTE )
self.assertEqual( False, sr1.isSetStoichiometryMath() )
_dummyList = [ sm ]; _dummyList[:] = []; del _dummyList
_dummyList = [ sr1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setStoichiometryMath6(self):
sm = libsbml.StoichiometryMath(2,1)
sm.setMath(libsbml.parseFormula("1 + 1"))
i = self.sr.setStoichiometryMath(sm)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assertEqual( False, self.sr.isSetStoichiometryMath() )
_dummyList = [ sm ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesReference_setStoichiometryMath7(self):
sr1 = libsbml.SpeciesReference(1,2)
i = sr1.unsetStoichiometryMath()
self.assert_( i == libsbml.LIBSBML_UNEXPECTED_ATTRIBUTE )
_dummyList = [ sr1 ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSpeciesReference_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| {
"content_hash": "71791766f3b5846dd94ef87fd0cb1c3c",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 72,
"avg_line_length": 35.06122448979592,
"alnum_prop": 0.6626309662398138,
"repo_name": "TheCoSMoCompany/biopredyn",
"id": "ba5cdd25dcddcb0068a734eb8d694548221ced21",
"size": "10027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Prototype/src/libsbml-5.10.0/src/bindings/python/test/sbml/TestSpeciesReference_newSetters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3535918"
},
{
"name": "C++",
"bytes": "26120778"
},
{
"name": "CMake",
"bytes": "455400"
},
{
"name": "CSS",
"bytes": "49020"
},
{
"name": "Gnuplot",
"bytes": "206"
},
{
"name": "HTML",
"bytes": "193068"
},
{
"name": "Java",
"bytes": "66517"
},
{
"name": "JavaScript",
"bytes": "3847"
},
{
"name": "Makefile",
"bytes": "30905"
},
{
"name": "Perl",
"bytes": "3018"
},
{
"name": "Python",
"bytes": "7891301"
},
{
"name": "Shell",
"bytes": "247654"
},
{
"name": "TeX",
"bytes": "22566"
},
{
"name": "XSLT",
"bytes": "55564"
}
],
"symlink_target": ""
} |
import asyncio
import gc
import inspect
import io
import re
import unittest
import unittest.mock
import urllib.parse
import zlib
from http.cookies import SimpleCookie
from multidict import CIMultiDict, CIMultiDictProxy, upstr
import pytest
import aiohttp
from aiohttp import BaseConnector
from aiohttp.client_reqrep import ClientRequest, ClientResponse
import os.path
@pytest.yield_fixture
def make_request(loop):
request = None
def maker(*args, **kwargs):
nonlocal request
request = ClientRequest(*args, loop=loop, **kwargs)
return request
yield maker
if request is not None:
loop.run_until_complete(request.close())
def test_method1(make_request):
req = make_request('get', 'http://python.org/')
assert req.method == 'GET'
def test_method2(make_request):
req = make_request('head', 'http://python.org/')
assert req.method == 'HEAD'
def test_method3(make_request):
req = make_request('HEAD', 'http://python.org/')
assert req.method == 'HEAD'
def test_version_1_0(make_request):
req = make_request('get', 'http://python.org/', version='1.0')
assert req.version == (1, 0)
def test_version_default(make_request):
req = make_request('get', 'http://python.org/')
assert req.version == (1, 1)
def test_version_err(make_request):
with pytest.raises(ValueError):
make_request('get', 'http://python.org/', version='1.c')
def test_host_port_default_http(make_request):
req = make_request('get', 'http://python.org/')
assert req.host == 'python.org'
assert req.port == 80
assert not req.ssl
def test_host_port_default_https(make_request):
req = make_request('get', 'https://python.org/')
assert req.host == 'python.org'
assert req.port == 443
assert req.ssl
def test_host_port_nondefault_http(make_request):
req = make_request('get', 'http://python.org:960/')
assert req.host == 'python.org'
assert req.port == 960
assert not req.ssl
def test_host_port_nondefault_https(make_request):
req = make_request('get', 'https://python.org:960/')
assert req.host == 'python.org'
assert req.port == 960
assert req.ssl
def test_host_port_default_ws(make_request):
req = make_request('get', 'ws://python.org/')
assert req.host == 'python.org'
assert req.port == 80
assert not req.ssl
def test_host_port_default_wss(make_request):
req = make_request('get', 'wss://python.org/')
assert req.host == 'python.org'
assert req.port == 443
assert req.ssl
def test_host_port_nondefault_ws(make_request):
req = make_request('get', 'ws://python.org:960/')
assert req.host == 'python.org'
assert req.port == 960
assert not req.ssl
def test_host_port_nondefault_wss(make_request):
req = make_request('get', 'wss://python.org:960/')
assert req.host == 'python.org'
assert req.port == 960
assert req.ssl
def test_host_port_err(make_request):
with pytest.raises(ValueError):
make_request('get', 'http://python.org:123e/')
def test_hostname_err(make_request):
with pytest.raises(ValueError):
make_request('get', 'http://:8080/')
def test_host_header_host_without_port(make_request):
req = make_request('get', 'http://python.org/')
assert req.headers['HOST'] == 'python.org'
def test_host_header_host_with_default_port(make_request):
req = make_request('get', 'http://python.org:80/')
assert req.headers['HOST'] == 'python.org:80'
def test_host_header_host_with_nondefault_port(make_request):
req = make_request('get', 'http://python.org:99/')
assert req.headers['HOST'] == 'python.org:99'
def test_host_header_explicit_host(make_request):
req = make_request('get', 'http://python.org/',
headers={'host': 'example.com'})
assert req.headers['HOST'] == 'example.com'
def test_host_header_explicit_host_with_port(make_request):
req = make_request('get', 'http://python.org/',
headers={'host': 'example.com:99'})
assert req.headers['HOST'] == 'example.com:99'
def test_default_loop(loop):
asyncio.set_event_loop(loop)
req = ClientRequest('get', 'http://python.org/')
assert req.loop is loop
def test_default_headers_useragent(make_request):
req = make_request('get', 'http://python.org/')
assert 'SERVER' not in req.headers
assert 'USER-AGENT' in req.headers
def test_default_headers_useragent_custom(make_request):
req = make_request('get', 'http://python.org/',
headers={'user-agent': 'my custom agent'})
assert 'USER-Agent' in req.headers
assert 'my custom agent' == req.headers['User-Agent']
def test_skip_default_useragent_header(make_request):
req = make_request('get', 'http://python.org/',
skip_auto_headers=set([upstr('user-agent')]))
assert 'User-Agent' not in req.headers
def test_headers(make_request):
req = make_request('get', 'http://python.org/',
headers={'Content-Type': 'text/plain'})
assert 'CONTENT-TYPE' in req.headers
assert req.headers['CONTENT-TYPE'] == 'text/plain'
assert req.headers['ACCEPT-ENCODING'] == 'gzip, deflate'
def test_headers_list(make_request):
req = make_request('get', 'http://python.org/',
headers=[('Content-Type', 'text/plain')])
assert 'CONTENT-TYPE' in req.headers
assert req.headers['CONTENT-TYPE'] == 'text/plain'
def test_headers_default(make_request):
req = make_request('get', 'http://python.org/',
headers={'ACCEPT-ENCODING': 'deflate'})
assert req.headers['ACCEPT-ENCODING'] == 'deflate'
def test_invalid_url(make_request):
with pytest.raises(ValueError):
make_request('get', 'hiwpefhipowhefopw')
def test_invalid_idna(make_request):
with pytest.raises(ValueError):
make_request('get', 'http://\u2061owhefopw.com')
def test_no_path(make_request):
req = make_request('get', 'http://python.org')
assert '/' == req.path
def test_ipv6_default_http_port(make_request):
req = make_request('get', 'http://[2001:db8::1]/')
assert req.host == '2001:db8::1'
assert req.port == 80
assert not req.ssl
def test_ipv6_default_https_port(make_request):
req = make_request('get', 'https://[2001:db8::1]/')
assert req.host == '2001:db8::1'
assert req.port == 443
assert req.ssl
def test_ipv6_nondefault_http_port(make_request):
req = make_request('get', 'http://[2001:db8::1]:960/')
assert req.host == '2001:db8::1'
assert req.port == 960
assert not req.ssl
def test_ipv6_nondefault_https_port(make_request):
req = make_request('get', 'https://[2001:db8::1]:960/')
assert req.host == '2001:db8::1'
assert req.port == 960
assert req.ssl
def test_basic_auth(make_request):
req = make_request('get', 'http://python.org',
auth=aiohttp.helpers.BasicAuth('nkim', '1234'))
assert 'AUTHORIZATION' in req.headers
assert 'Basic bmtpbToxMjM0' == req.headers['AUTHORIZATION']
def test_basic_auth_utf8(make_request):
req = make_request('get', 'http://python.org',
auth=aiohttp.helpers.BasicAuth('nkim', 'секрет',
'utf-8'))
assert 'AUTHORIZATION' in req.headers
assert 'Basic bmtpbTrRgdC10LrRgNC10YI=' == req.headers['AUTHORIZATION']
def test_basic_auth_tuple_deprecated(make_request, warning):
with warning(DeprecationWarning):
req = make_request('get', 'http://python.org',
auth=('nkim', '1234'))
assert 'AUTHORIZATION' in req.headers
assert 'Basic bmtpbToxMjM0' == req.headers['AUTHORIZATION']
def test_basic_auth_from_url(make_request):
req = make_request('get', 'http://nkim:1234@python.org')
assert 'AUTHORIZATION' in req.headers
assert 'Basic bmtpbToxMjM0' == req.headers['AUTHORIZATION']
assert 'python.org' == req.netloc
def test_basic_auth_from_url_overriden(make_request):
req = make_request('get', 'http://garbage@python.org',
auth=aiohttp.BasicAuth('nkim', '1234'))
assert 'AUTHORIZATION' in req.headers
assert 'Basic bmtpbToxMjM0' == req.headers['AUTHORIZATION']
assert 'python.org' == req.netloc
def test_path_is_not_double_encoded1(make_request):
req = make_request('get', "http://0.0.0.0/get/test case")
assert req.path == "/get/test%20case"
def test_path_is_not_double_encoded2(make_request):
req = make_request('get', "http://0.0.0.0/get/test%2fcase")
assert req.path == "/get/test%2fcase"
def test_path_is_not_double_encoded3(make_request):
req = make_request('get', "http://0.0.0.0/get/test%20case")
assert req.path == "/get/test%20case"
def test_path_safe_chars_preserved(make_request):
req = make_request('get', "http://0.0.0.0/get/%:=")
assert req.path == "/get/%:="
def test_params_are_added_before_fragment1(make_request):
req = make_request('GET', "http://example.com/path#fragment",
params={"a": "b"})
assert req.path == "/path?a=b#fragment"
def test_params_are_added_before_fragment2(make_request):
req = make_request('GET', "http://example.com/path?key=value#fragment",
params={"a": "b"})
assert req.path == "/path?key=value&a=b#fragment"
def test_cookies(make_request):
req = make_request('get', 'http://test.com/path',
cookies={'cookie1': 'val1'})
assert 'COOKIE' in req.headers
assert 'cookie1=val1' == req.headers['COOKIE']
def test_cookies_merge_with_headers(make_request):
req = make_request('get', 'http://test.com/path',
headers={'cookie': 'cookie1=val1'},
cookies={'cookie2': 'val2'})
assert 'cookie1=val1; cookie2=val2' == req.headers['COOKIE']
def test_unicode_get1(make_request):
req = make_request('get', 'http://python.org',
params={'foo': 'f\xf8\xf8'})
assert '/?foo=f%C3%B8%C3%B8' == req.path
def test_unicode_get2(make_request):
req = make_request('', 'http://python.org',
params={'f\xf8\xf8': 'f\xf8\xf8'})
assert '/?f%C3%B8%C3%B8=f%C3%B8%C3%B8' == req.path
def test_unicode_get3(make_request):
req = make_request('', 'http://python.org', params={'foo': 'foo'})
assert '/?foo=foo' == req.path
def test_unicode_get4(make_request):
def join(*suffix):
return urllib.parse.urljoin('http://python.org/', '/'.join(suffix))
req = make_request('', join('\xf8'), params={'foo': 'foo'})
assert '/%C3%B8?foo=foo' == req.path
def test_query_multivalued_param(make_request):
for meth in ClientRequest.ALL_METHODS:
req = make_request(
meth, 'http://python.org',
params=(('test', 'foo'), ('test', 'baz')))
assert req.path == '/?test=foo&test=baz'
def test_query_str_param(make_request):
for meth in ClientRequest.ALL_METHODS:
req = make_request(meth, 'http://python.org', params='test=foo')
assert req.path == '/?test=foo'
def test_query_bytes_param_raises(make_request):
for meth in ClientRequest.ALL_METHODS:
with pytest.raises(TypeError) as ctx:
make_request(meth, 'http://python.org', params=b'test=foo')
assert re.match('not a valid non-string.*or mapping', str(ctx.value))
def test_query_str_param_is_not_encoded(make_request):
for meth in ClientRequest.ALL_METHODS:
req = make_request(meth, 'http://python.org', params='test=f+oo')
assert req.path == '/?test=f+oo'
def test_params_update_path_and_url(make_request):
req = make_request('get', 'http://python.org',
params=(('test', 'foo'), ('test', 'baz')))
assert req.path == '/?test=foo&test=baz'
assert req.url == 'http://python.org/?test=foo&test=baz'
class TestClientRequest(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.transport = unittest.mock.Mock()
self.connection = unittest.mock.Mock()
self.protocol = unittest.mock.Mock()
self.protocol.writer.drain.return_value = ()
self.stream = aiohttp.StreamParser(loop=self.loop)
self.connector = BaseConnector(loop=self.loop)
def tearDown(self):
self.connector.close()
try:
self.loop.stop()
self.loop.run_forever()
except RuntimeError: # loop is already closed
pass
self.loop.close()
gc.collect()
def test_no_content_length(self):
req = ClientRequest('get', 'http://python.org', loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('0', req.headers.get('CONTENT-LENGTH'))
self.loop.run_until_complete(req.close())
resp.close()
def test_no_content_length2(self):
req = ClientRequest('head', 'http://python.org', loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('0', req.headers.get('CONTENT-LENGTH'))
self.loop.run_until_complete(req.close())
resp.close()
def test_content_type_auto_header_get(self):
req = ClientRequest('get', 'http://python.org', loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertNotIn('CONTENT-TYPE', req.headers)
resp.close()
def test_content_type_auto_header_form(self):
req = ClientRequest('post', 'http://python.org', data={'hey': 'you'},
loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('application/x-www-form-urlencoded',
req.headers.get('CONTENT-TYPE'))
resp.close()
def test_content_type_auto_header_bytes(self):
req = ClientRequest('post', 'http://python.org', data=b'hey you',
loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('application/octet-stream',
req.headers.get('CONTENT-TYPE'))
resp.close()
def test_content_type_skip_auto_header_bytes(self):
req = ClientRequest('post', 'http://python.org', data=b'hey you',
skip_auto_headers={'CONTENT-TYPE'},
loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertNotIn('CONTENT-TYPE', req.headers)
resp.close()
def test_content_type_skip_auto_header_form(self):
req = ClientRequest('post', 'http://python.org', data={'hey': 'you'},
loop=self.loop, skip_auto_headers={'CONTENT-TYPE'})
resp = req.send(self.transport, self.protocol)
self.assertNotIn('CONTENT-TYPE', req.headers)
resp.close()
def test_content_type_auto_header_content_length_no_skip(self):
req = ClientRequest('get', 'http://python.org',
data=io.BytesIO(b'hey'),
skip_auto_headers={'CONTENT-LENGTH'},
loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual(req.headers.get('CONTENT-LENGTH'), '3')
resp.close()
def test_post_data(self):
for meth in ClientRequest.POST_METHODS:
req = ClientRequest(
meth, 'http://python.org/',
data={'life': '42'}, loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('/', req.path)
self.assertEqual(b'life=42', req.body)
self.assertEqual('application/x-www-form-urlencoded',
req.headers['CONTENT-TYPE'])
self.loop.run_until_complete(req.close())
resp.close()
@unittest.mock.patch(
'aiohttp.client_reqrep.ClientRequest.update_body_from_data')
def test_pass_falsy_data(self, _):
req = ClientRequest(
'post', 'http://python.org/',
data={}, loop=self.loop)
req.update_body_from_data.assert_called_once_with({}, frozenset())
self.loop.run_until_complete(req.close())
def test_get_with_data(self):
for meth in ClientRequest.GET_METHODS:
req = ClientRequest(
meth, 'http://python.org/', data={'life': '42'},
loop=self.loop)
self.assertEqual('/', req.path)
self.assertEqual(b'life=42', req.body)
self.loop.run_until_complete(req.close())
def test_bytes_data(self):
for meth in ClientRequest.POST_METHODS:
req = ClientRequest(
meth, 'http://python.org/',
data=b'binary data', loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('/', req.path)
self.assertEqual(b'binary data', req.body)
self.assertEqual('application/octet-stream',
req.headers['CONTENT-TYPE'])
self.loop.run_until_complete(req.close())
resp.close()
@unittest.mock.patch('aiohttp.client_reqrep.aiohttp')
def test_content_encoding(self, m_http):
req = ClientRequest('get', 'http://python.org/',
compress='deflate', loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
self.assertEqual(req.headers['CONTENT-ENCODING'], 'deflate')
m_http.Request.return_value\
.add_compression_filter.assert_called_with('deflate')
self.loop.run_until_complete(req.close())
resp.close()
@unittest.mock.patch('aiohttp.client_reqrep.aiohttp')
def test_content_encoding_header(self, m_http):
req = ClientRequest(
'get', 'http://python.org/',
headers={'Content-Encoding': 'deflate'}, loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
self.assertEqual(req.headers['CONTENT-ENCODING'], 'deflate')
m_http.Request.return_value\
.add_compression_filter.assert_called_with('deflate')
m_http.Request.return_value\
.add_chunking_filter.assert_called_with(8192)
self.loop.run_until_complete(req.close())
resp.close()
def test_chunked(self):
req = ClientRequest(
'get', 'http://python.org/',
headers={'TRANSFER-ENCODING': 'gzip'}, loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('gzip', req.headers['TRANSFER-ENCODING'])
self.loop.run_until_complete(req.close())
resp.close()
req = ClientRequest(
'get', 'http://python.org/',
headers={'Transfer-encoding': 'chunked'}, loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('chunked', req.headers['TRANSFER-ENCODING'])
self.loop.run_until_complete(req.close())
resp.close()
@unittest.mock.patch('aiohttp.client_reqrep.aiohttp')
def test_chunked_explicit(self, m_http):
req = ClientRequest(
'get', 'http://python.org/', chunked=True, loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('chunked', req.headers['TRANSFER-ENCODING'])
m_http.Request.return_value\
.add_chunking_filter.assert_called_with(8192)
self.loop.run_until_complete(req.close())
resp.close()
@unittest.mock.patch('aiohttp.client_reqrep.aiohttp')
def test_chunked_explicit_size(self, m_http):
req = ClientRequest(
'get', 'http://python.org/', chunked=1024, loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('chunked', req.headers['TRANSFER-ENCODING'])
m_http.Request.return_value\
.add_chunking_filter.assert_called_with(1024)
self.loop.run_until_complete(req.close())
resp.close()
def test_chunked_length(self):
req = ClientRequest(
'get', 'http://python.org/',
headers={'CONTENT-LENGTH': '1000'}, chunked=1024, loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
self.assertNotIn('CONTENT-LENGTH', req.headers)
self.loop.run_until_complete(req.close())
resp.close()
def test_file_upload_not_chunked(self):
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
req = ClientRequest(
'post', 'http://python.org/',
data=f,
loop=self.loop)
self.assertFalse(req.chunked)
self.assertEqual(req.headers['CONTENT-LENGTH'],
str(os.path.getsize(fname)))
self.loop.run_until_complete(req.close())
def test_precompressed_data_stays_intact(self):
data = zlib.compress(b'foobar')
req = ClientRequest(
'post', 'http://python.org/',
data=data,
headers={'CONTENT-ENCODING': 'deflate'},
compress=False,
loop=self.loop)
self.assertFalse(req.compress)
self.assertFalse(req.chunked)
self.assertEqual(req.headers['CONTENT-ENCODING'],
'deflate')
self.loop.run_until_complete(req.close())
def test_file_upload_not_chunked_seek(self):
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
f.seek(100)
req = ClientRequest(
'post', 'http://python.org/',
data=f,
loop=self.loop)
self.assertEqual(req.headers['CONTENT-LENGTH'],
str(os.path.getsize(fname) - 100))
self.loop.run_until_complete(req.close())
def test_file_upload_force_chunked(self):
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
req = ClientRequest(
'post', 'http://python.org/',
data=f,
chunked=True,
loop=self.loop)
self.assertTrue(req.chunked)
self.assertNotIn('CONTENT-LENGTH', req.headers)
self.loop.run_until_complete(req.close())
def test_expect100(self):
req = ClientRequest('get', 'http://python.org/',
expect100=True, loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('100-continue', req.headers['EXPECT'])
self.assertIsNotNone(req._continue)
req.terminate()
resp.close()
def test_expect_100_continue_header(self):
req = ClientRequest('get', 'http://python.org/',
headers={'expect': '100-continue'}, loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('100-continue', req.headers['EXPECT'])
self.assertIsNotNone(req._continue)
req.terminate()
resp.close()
def test_data_stream(self):
def gen():
yield b'binary data'
return b' result'
req = ClientRequest(
'POST', 'http://python.org/', data=gen(), loop=self.loop)
self.assertTrue(req.chunked)
self.assertTrue(inspect.isgenerator(req.body))
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
resp = req.send(self.transport, self.protocol)
self.assertIsInstance(req._writer, asyncio.Future)
self.loop.run_until_complete(resp.wait_for_close())
self.assertIsNone(req._writer)
self.assertEqual(
self.transport.write.mock_calls[-2:],
[unittest.mock.call(b'12\r\nbinary data result\r\n'),
unittest.mock.call(b'0\r\n\r\n')])
self.loop.run_until_complete(req.close())
def test_data_file(self):
req = ClientRequest(
'POST', 'http://python.org/',
data=io.BufferedReader(io.BytesIO(b'*' * 2)),
loop=self.loop)
self.assertTrue(req.chunked)
self.assertTrue(isinstance(req.body, io.IOBase))
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
resp = req.send(self.transport, self.protocol)
self.assertIsInstance(req._writer, asyncio.Future)
self.loop.run_until_complete(resp.wait_for_close())
self.assertIsNone(req._writer)
self.assertEqual(
self.transport.write.mock_calls[-2:],
[unittest.mock.call(b'2\r\n' + b'*' * 2 + b'\r\n'),
unittest.mock.call(b'0\r\n\r\n')])
self.loop.run_until_complete(req.close())
def test_data_stream_exc(self):
fut = asyncio.Future(loop=self.loop)
def gen():
yield b'binary data'
yield from fut
req = ClientRequest(
'POST', 'http://python.org/', data=gen(), loop=self.loop)
self.assertTrue(req.chunked)
self.assertTrue(inspect.isgenerator(req.body))
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
@asyncio.coroutine
def exc():
yield from asyncio.sleep(0.01, loop=self.loop)
fut.set_exception(ValueError)
asyncio.async(exc(), loop=self.loop)
resp = req.send(self.transport, self.protocol)
resp._connection = self.connection
self.loop.run_until_complete(req._writer)
self.assertTrue(self.connection.close.called)
self.assertTrue(self.protocol.set_exception.called)
self.loop.run_until_complete(req.close())
def test_data_stream_not_bytes(self):
@asyncio.coroutine
def gen():
yield object()
req = ClientRequest(
'POST', 'http://python.org/', data=gen(), loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.loop.run_until_complete(req._writer)
self.assertTrue(self.protocol.set_exception.called)
self.loop.run_until_complete(req.close())
resp.close()
def test_data_stream_exc_chain(self):
fut = asyncio.Future(loop=self.loop)
def gen():
yield from fut
req = ClientRequest(
'POST', 'http://python.org/', data=gen(), loop=self.loop)
inner_exc = ValueError()
@asyncio.coroutine
def exc():
yield from asyncio.sleep(0.01, loop=self.loop)
fut.set_exception(inner_exc)
asyncio.async(exc(), loop=self.loop)
resp = req.send(self.transport, self.protocol)
resp._connection = self.connection
self.loop.run_until_complete(req._writer)
self.assertTrue(self.connection.close.called)
self.assertTrue(self.protocol.set_exception.called)
outer_exc = self.protocol.set_exception.call_args[0][0]
self.assertIsInstance(outer_exc, aiohttp.ClientRequestError)
self.assertIs(inner_exc, outer_exc.__context__)
self.assertIs(inner_exc, outer_exc.__cause__)
self.loop.run_until_complete(req.close())
def test_data_stream_continue(self):
def gen():
yield b'binary data'
return b' result'
req = ClientRequest(
'POST', 'http://python.org/', data=gen(),
expect100=True, loop=self.loop)
self.assertTrue(req.chunked)
self.assertTrue(inspect.isgenerator(req.body))
def coro():
yield from asyncio.sleep(0.0001, loop=self.loop)
req._continue.set_result(1)
asyncio.async(coro(), loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.loop.run_until_complete(req._writer)
self.assertEqual(
self.transport.write.mock_calls[-2:],
[unittest.mock.call(b'12\r\nbinary data result\r\n'),
unittest.mock.call(b'0\r\n\r\n')])
self.loop.run_until_complete(req.close())
resp.close()
def test_data_continue(self):
req = ClientRequest(
'POST', 'http://python.org/', data=b'data',
expect100=True, loop=self.loop)
def coro():
yield from asyncio.sleep(0.0001, loop=self.loop)
req._continue.set_result(1)
asyncio.async(coro(), loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual(1, len(self.transport.write.mock_calls))
self.loop.run_until_complete(req._writer)
self.assertEqual(
self.transport.write.mock_calls[-1],
unittest.mock.call(b'data'))
self.loop.run_until_complete(req.close())
resp.close()
def test_close(self):
@asyncio.coroutine
def gen():
yield from asyncio.sleep(0.00001, loop=self.loop)
return b'result'
req = ClientRequest(
'POST', 'http://python.org/', data=gen(), loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.loop.run_until_complete(req.close())
self.assertEqual(
self.transport.write.mock_calls[-2:],
[unittest.mock.call(b'6\r\nresult\r\n'),
unittest.mock.call(b'0\r\n\r\n')])
self.loop.run_until_complete(req.close())
resp.close()
def test_custom_response_class(self):
class CustomResponse(ClientResponse):
def read(self, decode=False):
return 'customized!'
req = ClientRequest(
'GET', 'http://python.org/', response_class=CustomResponse,
loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('customized!', resp.read())
self.loop.run_until_complete(req.close())
resp.close()
def test_terminate(self):
req = ClientRequest('get', 'http://python.org', loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertIsNotNone(req._writer)
writer = req._writer = unittest.mock.Mock()
req.terminate()
self.assertIsNone(req._writer)
writer.cancel.assert_called_with()
resp.close()
def test_terminate_with_closed_loop(self):
if not hasattr(self.loop, 'is_closed'):
self.skipTest("Required asyncio 3.4.2+")
req = ClientRequest('get', 'http://python.org', loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertIsNotNone(req._writer)
writer = req._writer = unittest.mock.Mock()
self.loop.close()
req.terminate()
self.assertIsNone(req._writer)
self.assertFalse(writer.cancel.called)
resp.close()
def test_terminate_without_writer(self):
req = ClientRequest('get', 'http://python.org', loop=self.loop)
self.assertIsNone(req._writer)
req.terminate()
self.assertIsNone(req._writer)
def test_custom_req_rep(self):
@asyncio.coroutine
def go():
conn = None
class CustomResponse(ClientResponse):
@asyncio.coroutine
def start(self, connection, read_until_eof=False):
nonlocal conn
conn = connection
self.status = 123
self.reason = 'Test OK'
self.headers = CIMultiDictProxy(CIMultiDict())
self.cookies = SimpleCookie()
return
called = False
class CustomRequest(ClientRequest):
def send(self, writer, reader):
resp = self.response_class(self.method,
self.url,
self.host,
writer=self._writer,
continue100=self._continue)
resp._post_init(self.loop)
self.response = resp
nonlocal called
called = True
return resp
@asyncio.coroutine
def create_connection(req):
self.assertIsInstance(req, CustomRequest)
return self.transport, self.protocol
self.connector._create_connection = create_connection
resp = yield from aiohttp.request('get',
'http://example.com/path/to',
request_class=CustomRequest,
response_class=CustomResponse,
connector=self.connector,
loop=self.loop)
self.assertIsInstance(resp, CustomResponse)
self.assertTrue(called)
resp.close()
conn.close()
self.loop.run_until_complete(go())
| {
"content_hash": "250058a015809f68d1db620be4517df5",
"timestamp": "",
"source": "github",
"line_count": 943,
"max_line_length": 79,
"avg_line_length": 35.3711558854719,
"alnum_prop": 0.5900164892819667,
"repo_name": "elastic-coders/aiohttp",
"id": "a4f41a66f55b554eee3f382b0c0ec768062efed3",
"size": "33378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_client_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4885"
},
{
"name": "Makefile",
"bytes": "2376"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "981759"
},
{
"name": "Shell",
"bytes": "550"
}
],
"symlink_target": ""
} |
import os.path
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(
SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_MIGRATE_REPO,
api.version(SQLALCHEMY_MIGRATE_REPO)
) | {
"content_hash": "1b8c3d7ef75e0514cef2a5fbb49549ae",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 73,
"avg_line_length": 26.68421052631579,
"alnum_prop": 0.7514792899408284,
"repo_name": "Napchat/microblog",
"id": "0745429b76d5ae73eed3d0f971e450e17c1d3d7b",
"size": "526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db_create.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10470"
},
{
"name": "JavaScript",
"bytes": "128944"
},
{
"name": "Python",
"bytes": "35229"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Blog'
db.create_table(u'widgy_blog_blog', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content', self.gf('widgy.db.fields.VersionedWidgyField')(to=orm['widgy.VersionTracker'], on_delete=models.PROTECT)),
))
db.send_create_signal(u'widgy_blog', ['Blog'])
# Adding model 'BlogLayout'
db.create_table(u'widgy_blog_bloglayout', (
(u'defaultlayout_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['page_builder.DefaultLayout'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=1023)),
('slug', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('date', self.gf('django.db.models.fields.DateField')(default=datetime.datetime.now)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='blog_bloglayout_set', to=orm[user_orm_label])),
('image', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, on_delete=models.PROTECT, to=orm['filer.File'])),
('summary', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'widgy_blog', ['BlogLayout'])
def backwards(self, orm):
# Deleting model 'Blog'
db.delete_table(u'widgy_blog_blog')
# Deleting model 'BlogLayout'
db.delete_table(u'widgy_blog_bloglayout')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'ordering': "[u'name', u'email']", 'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': u"orm['%s']" % user_orm_label}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': u"orm['%s']" % user_orm_label}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'page_builder.defaultlayout': {
'Meta': {'object_name': 'DefaultLayout'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'widgy.node': {
'Meta': {'unique_together': "[('content_type', 'content_id')]", 'object_name': 'Node'},
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_frozen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'widgy.versioncommit': {
'Meta': {'object_name': 'VersionCommit'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % user_orm_label, 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['widgy.VersionCommit']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'publish_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'root_node': ('widgy.db.fields.WidgyField', [], {'to': "orm['widgy.Node']", 'on_delete': 'models.PROTECT'}),
'tracker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commits'", 'to': "orm['widgy.VersionTracker']"})
},
'widgy.versiontracker': {
'Meta': {'object_name': 'VersionTracker'},
'head': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['widgy.VersionCommit']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.PROTECT'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'working_copy': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['widgy.Node']", 'unique': 'True', 'on_delete': 'models.PROTECT'})
},
u'widgy_blog.blog': {
'Meta': {'object_name': 'Blog'},
'content': ('widgy.db.fields.VersionedWidgyField', [], {'to': "orm['widgy.VersionTracker']", 'on_delete': 'models.PROTECT'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'widgy_blog.bloglayout': {
'Meta': {'ordering': "['-date']", 'object_name': 'BlogLayout', '_ormbases': [u'page_builder.DefaultLayout']},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blog_bloglayout_set'", 'to': u"orm['%s']" % user_orm_label}),
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
u'defaultlayout_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['page_builder.DefaultLayout']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['filer.File']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1023'})
}
}
complete_apps = ['widgy_blog']
| {
"content_hash": "8777012f7fb0818fc1c2b95f5238dcdb",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 192,
"avg_line_length": 74.82051282051282,
"alnum_prop": 0.5670836189170665,
"repo_name": "fusionbox/django-widgy-blog",
"id": "0c9b86a6c18e8d592ed2b9835605c6796ce2dac2",
"size": "11696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "widgy_blog/south_migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "1903"
},
{
"name": "Python",
"bytes": "67436"
}
],
"symlink_target": ""
} |
""" unit testing code for MOE-type descriptors with EStates
"""
import os
import unittest
from rdkit import Chem
from rdkit.Chem.EState import EState_VSA
class TestCase(unittest.TestCase):
@staticmethod
def referenceData():
filename = os.sep.join(
[os.path.dirname(os.path.abspath(__file__)), 'test_data', 'EState_VSA.csv'])
with open(filename) as fin:
header = fin.readline()
header = [s.strip() for s in header.split(',')][1:]
funcEstates = dict((k, getattr(EState_VSA, k)) for k in header)
yield funcEstates
for line in fin:
line = [s.strip() for s in line.split(',')]
smiles = line.pop(0)
mol = Chem.MolFromSmiles(smiles)
data = dict((k, float(v)) for k, v in zip(header, line))
yield smiles, mol, data
def test1(self):
referenceData = self.referenceData()
funcEstates = next(referenceData)
for smiles, mol, data in referenceData:
for name in funcEstates:
calc = funcEstates[name](mol)
exp = data[name]
self.assertAlmostEqual(calc, exp, delta=1e-4,
msg='{0}: {1:.4f}!={2:.4f}'.format(smiles, calc, exp))
if __name__ == '__main__': # pragma: nocover
unittest.main()
| {
"content_hash": "46e6e829c8b73f06e5984b5e8fcb1bef",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 85,
"avg_line_length": 27.711111111111112,
"alnum_prop": 0.6086607858861267,
"repo_name": "bp-kelley/rdkit",
"id": "650dad5638f0cda3f75138fe824f413c8201d4d6",
"size": "1529",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "rdkit/Chem/EState/UnitTestVSA.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1593408"
},
{
"name": "C#",
"bytes": "10167"
},
{
"name": "C++",
"bytes": "13831236"
},
{
"name": "CMake",
"bytes": "761688"
},
{
"name": "Dockerfile",
"bytes": "2590"
},
{
"name": "Fortran",
"bytes": "7590"
},
{
"name": "HTML",
"bytes": "43059702"
},
{
"name": "Java",
"bytes": "369342"
},
{
"name": "JavaScript",
"bytes": "52043"
},
{
"name": "Jupyter Notebook",
"bytes": "498341"
},
{
"name": "LLVM",
"bytes": "40048"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "10862"
},
{
"name": "Python",
"bytes": "4156873"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "SWIG",
"bytes": "342569"
},
{
"name": "Shell",
"bytes": "3822"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "61432"
}
],
"symlink_target": ""
} |
"""Model constructor for Tensorflow state-level models."""
from typing import Dict, List
import numpy as np
import tensorflow as tf
from covid_epidemiology.src import constants
from covid_epidemiology.src.models import generic_seir_model_constructor
from covid_epidemiology.src.models import losses
from covid_epidemiology.src.models.shared import model_utils
class StateModelConstructor(generic_seir_model_constructor.ModelConstructor):
"""Constructs a state Tensorflow model, to be used in tf_seir."""
def __init__(self, model_spec, random_seed=0):
super(StateModelConstructor, self).__init__(model_spec, random_seed)
self.num_states = 17
def extract_prediction(self, all_states):
"""Extract the death and confirmed predictions."""
confirmed_all = list()
death_all = list()
for curr_state in all_states:
# pylint: disable=unused-variable
(exposed_t, infected_d_t, infected_ud_t, recovered_d_t, recovered_ud_t,
hospitalized_t, hospitalized_cumulative_t, hospitalized_increase_t,
icu_t, ventilator_t, death_t, population_t, reinfectable_d_t,
reinfectable_ud_t, reinfectable_vaccine_t, vaccine_immuned_t,
infected_ud_increase_t) = tf.unstack(curr_state)
# Include ICU and Ventilator since they are separate compartments.
confirmed_t = (
infected_d_t + recovered_d_t + hospitalized_t + icu_t + ventilator_t +
death_t + reinfectable_d_t)
confirmed_all.append(confirmed_t)
death_all.append(death_t)
return {"confirmed": confirmed_all, "death": death_all}
def compute_coef(self,
ground_truth_timeseries,
ground_truth_state,
num_train_steps,
num_known_steps,
power=2.0):
"""Compute train/valid coefficients for loss computation.
Args:
ground_truth_timeseries: ground truth compartments
ground_truth_state: ground truth state level compartments
num_train_steps: number of timesteps for training
num_known_steps: number of known timesteps
power: 2 for MSE and 1 for MAE
Returns:
train_coefs: training coeffcients for each compartment
valid_coefs: valid coeffcients for each compartment
"""
(_, gt_list, gt_indicator, _, _) = ground_truth_timeseries
# Recovered
recovered_train, recovered_valid = model_utils.compartment_base(
gt_list["recovered"], gt_indicator["recovered"], num_train_steps,
num_known_steps)
# Death
death_train, death_valid = model_utils.compartment_base(
gt_list["death"], gt_indicator["death"], num_train_steps,
num_known_steps)
# Confirmed
confirmed_train, confirmed_valid = model_utils.compartment_base(
gt_list["confirmed"], gt_indicator["confirmed"], num_train_steps,
num_known_steps)
# Hospitalized
hospitalized_train, hospitalized_valid = model_utils.compartment_base(
gt_list["hospitalized"], gt_indicator["hospitalized"], num_train_steps,
num_known_steps)
# Hospitalized cumulative
hospitalized_cumulative_train, hospitalized_cumulative_valid = model_utils.compartment_base(
gt_list["hospitalized_cumulative"],
gt_indicator["hospitalized_cumulative"], num_train_steps,
num_known_steps)
# ICU
icu_train, icu_valid = model_utils.compartment_base(gt_list["icu"],
gt_indicator["icu"],
num_train_steps,
num_known_steps)
# Ventilator
ventilator_train, ventilator_valid = model_utils.compartment_base(
gt_list["ventilator"], gt_indicator["ventilator"], num_train_steps,
num_known_steps)
train_coefs = [
0, (death_train / recovered_train)**power, 1,
(death_train / confirmed_train)**power,
(death_train / hospitalized_train)**power,
(death_train / hospitalized_cumulative_train)**power,
(death_train / icu_train)**power,
(death_train / ventilator_train)**power
]
valid_coefs = [
0, (death_valid / recovered_valid)**power, 1,
(death_valid / confirmed_valid)**power,
(death_valid / hospitalized_valid)**power,
(death_valid / hospitalized_cumulative_valid)**power,
(death_valid / icu_valid)**power,
(death_valid / ventilator_valid)**power
]
train_coefs = np.nan_to_num(train_coefs).tolist()
valid_coefs = np.nan_to_num(valid_coefs).tolist()
return train_coefs, valid_coefs
def seir_dynamics(self, current_state, seir_variables):
"""Model dynamics."""
(first_dose_vaccine_ratio_per_day, second_dose_vaccine_ratio_per_day,
average_contact_id, average_contact_iud, reinfectable_rate, alpha,
diagnosis_rate, recovery_rate_id, recovery_rate_iud, recovery_rate_h,
recovery_rate_i, recovery_rate_v, hospitalization_rate, icu_rate,
ventilator_rate, death_rate_id, death_rate_h, death_rate_i,
death_rate_v) = seir_variables
# pylint: disable=unused-variable
(exposed_t, infected_d_t, infected_ud_t, recovered_d_t, recovered_ud_t,
hospitalized_t, hospitalized_cumulative_t, hospitalized_increase_t, icu_t,
ventilator_t, death_t, population_t, reinfectable_d_t, reinfectable_ud_t,
reinfectable_vaccine_t, vaccine_immuned_t,
infected_ud_increase_t) = tf.unstack(current_state)
# Setting the susceptible so that the population adds up to a constant.
normalized_susceptible_t = 1.0 - (
exposed_t + infected_d_t + infected_ud_t + recovered_d_t +
recovered_ud_t + hospitalized_t + icu_t + ventilator_t + death_t +
vaccine_immuned_t) / population_t
normalized_susceptible_t = tf.nn.relu(normalized_susceptible_t)
# Differential change on vaccine immuned.
d_vaccine_immuned_dt = (
first_dose_vaccine_ratio_per_day * population_t +
second_dose_vaccine_ratio_per_day * population_t -
reinfectable_vaccine_t - vaccine_immuned_t)
# Differential change on reinfectable after vaccination.
d_reinfectable_vaccine_dt = vaccine_immuned_t * 1.0 / constants.VACCINE_IMMUNITY_DURATION
# Differential change on exposed
d_exposed_dt = (average_contact_id * infected_d_t +
average_contact_iud * infected_ud_t
) * normalized_susceptible_t - alpha * exposed_t
# Differential change on infected, documented and undocumented
d_infected_d_dt = (
diagnosis_rate * infected_ud_t - recovery_rate_id * infected_d_t -
death_rate_id * infected_d_t - hospitalization_rate * infected_d_t)
d_infected_ud_dt = (
alpha * exposed_t - diagnosis_rate * infected_ud_t -
recovery_rate_iud * infected_ud_t)
d_infected_ud_increase_dt = alpha * exposed_t - infected_ud_increase_t
# Differential change on recovered, documented and undocumented
d_recovered_d_dt = (
recovery_rate_id * infected_d_t + recovery_rate_h * hospitalized_t -
reinfectable_rate * recovered_d_t)
d_recovered_ud_dt = (
recovery_rate_iud * infected_ud_t - reinfectable_rate * recovered_ud_t)
# Differential change on hospitalized
d_hospitalized_d_dt = (
hospitalization_rate * infected_d_t -
(death_rate_h + recovery_rate_h + icu_rate) * hospitalized_t +
recovery_rate_i * icu_t)
d_hospitalized_cumulative_d_dt = (hospitalization_rate * infected_d_t)
d_hospitalized_increase_d_dt = (
hospitalization_rate * infected_d_t - hospitalized_increase_t)
# Differential change on icu
d_icu_d_dt = (
icu_rate * hospitalized_t -
(death_rate_i + recovery_rate_i + ventilator_rate) * icu_t +
recovery_rate_v * ventilator_t)
# Differential change on ventilator
d_ventilator_d_dt = (
ventilator_rate * icu_t -
(death_rate_v + recovery_rate_v) * ventilator_t)
# Differential change on death, documented
d_death_d_dt = (
death_rate_id * infected_d_t + death_rate_h * hospitalized_t +
death_rate_i * icu_t + death_rate_v * ventilator_t)
# Differential change on recovered, who may get the disease again.
d_reinfectable_d_dt = reinfectable_rate * recovered_d_t
d_reinfectable_ud_dt = reinfectable_rate * recovered_ud_t
all_state_derivatives = [
d_exposed_dt, d_infected_d_dt, d_infected_ud_dt, d_recovered_d_dt,
d_recovered_ud_dt, d_hospitalized_d_dt, d_hospitalized_cumulative_d_dt,
d_hospitalized_increase_d_dt, d_icu_d_dt, d_ventilator_d_dt,
d_death_d_dt, -d_death_d_dt, d_reinfectable_d_dt, d_reinfectable_ud_dt,
d_reinfectable_vaccine_dt, d_vaccine_immuned_dt,
d_infected_ud_increase_dt
]
return tf.stack(all_state_derivatives)
def compute_losses(self,
hparams,
train_coefs,
valid_coefs,
propagated_states,
ground_truth_timeseries,
r_eff,
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
num_forecast_steps,
quantiles=None):
train_loss_coefs = hparams["train_loss_coefs"]
valid_loss_coefs = hparams["valid_loss_coefs"]
time_scale_weight = hparams["time_scale_weight"]
width_coef_train = hparams["width_coef_train"]
width_coef_valid = hparams["width_coef_valid"]
quantile_cum_viol_coef = hparams["quantile_cum_viol_coef"]
increment_loss_weight = hparams["increment_loss_weight"]
train_crps_weight = hparams["train_crps_weight"]
valid_crps_weight = hparams["valid_crps_weight"]
(_, gt_list, gt_indicator, _, _) = ground_truth_timeseries
unstacked_propagated_states = tf.unstack(propagated_states, axis=1)
pred_infected = unstacked_propagated_states[1]
pred_recovered = unstacked_propagated_states[3]
pred_hospitalized = unstacked_propagated_states[5]
pred_hospitalized_cumulative = unstacked_propagated_states[6]
pred_icu = unstacked_propagated_states[8]
pred_ventilator = unstacked_propagated_states[9]
pred_death = unstacked_propagated_states[10]
pred_reinfected = unstacked_propagated_states[12]
pred_confirmed = (
pred_infected + pred_recovered + pred_death + pred_hospitalized +
pred_icu + pred_ventilator + pred_reinfected)
train_start_index = tf.identity(train_start_index)
train_end_index = tf.identity(train_end_index)
valid_start_index = tf.identity(valid_start_index)
valid_end_index = tf.identity(valid_end_index)
if quantiles is not None:
quantiles = tf.constant(quantiles, dtype=tf.float32)
# Use quantile loss if the value of quantiles are given
def loss(pred_states,
gt_list,
gt_indicator,
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=0,
is_training=True):
if quantiles is not None:
if is_training:
train_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_train)
valid_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_train)
else:
train_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_valid)
valid_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_valid)
train_loss += train_crps_weight * losses.crps_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight)
valid_loss += valid_crps_weight * losses.crps_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight)
else:
train_loss = losses.state_estimation_loss(
pred_states=pred_states,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight,
increment_loss_weight=increment_loss_weight,
num_forecast_steps=num_forecast_steps)
valid_loss = losses.state_estimation_loss(
pred_states=pred_states,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight,
increment_loss_weight=increment_loss_weight,
num_forecast_steps=num_forecast_steps)
return train_loss, valid_loss
infected_doc_train_loss, infected_doc_valid_loss = loss(
pred_infected,
gt_list["infected"],
gt_indicator["infected"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
recovered_doc_train_loss, recovered_doc_valid_loss = loss(
pred_recovered + pred_reinfected,
gt_list["recovered"],
gt_indicator["recovered"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
death_train_loss, death_valid_loss = loss(
pred_death,
gt_list["death"],
gt_indicator["death"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
hospitalized_train_loss, hospitalized_valid_loss = loss(
pred_hospitalized + pred_icu + pred_ventilator,
gt_list["hospitalized"],
gt_indicator["hospitalized"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
hospitalized_cumulative_train_loss, hospitalized_cumulative_valid_loss = loss(
pred_hospitalized_cumulative,
gt_list["hospitalized_cumulative"],
gt_indicator["hospitalized_cumulative"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
icu_train_loss, icu_valid_loss = loss(
pred_icu + pred_ventilator,
gt_list["icu"],
gt_indicator["icu"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
ventilator_train_loss, ventilator_valid_loss = loss(
pred_ventilator,
gt_list["ventilator"],
gt_indicator["ventilator"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
confirmed_train_loss, confirmed_valid_loss = loss(
pred_confirmed,
gt_list["confirmed"],
gt_indicator["confirmed"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
train_loss_overall = (
train_coefs[0] * train_loss_coefs[0] * infected_doc_train_loss +
train_coefs[1] * train_loss_coefs[1] * recovered_doc_train_loss +
train_coefs[2] * train_loss_coefs[2] * death_train_loss +
train_coefs[3] * train_loss_coefs[3] * confirmed_train_loss +
train_coefs[4] * train_loss_coefs[4] * hospitalized_train_loss +
train_coefs[5] *
(train_loss_coefs[5] * hospitalized_cumulative_train_loss) +
train_coefs[6] * train_loss_coefs[6] * icu_train_loss +
train_coefs[7] * train_loss_coefs[7] * ventilator_train_loss)
valid_loss_overall = (
valid_coefs[0] * valid_loss_coefs[0] * infected_doc_valid_loss +
valid_coefs[1] * valid_loss_coefs[1] * recovered_doc_valid_loss +
valid_coefs[2] * valid_loss_coefs[2] * death_valid_loss +
valid_coefs[3] * valid_loss_coefs[3] * confirmed_valid_loss +
valid_coefs[4] * valid_loss_coefs[4] * hospitalized_valid_loss +
valid_coefs[5] *
(valid_loss_coefs[5] * hospitalized_cumulative_valid_loss) +
valid_coefs[6] * valid_loss_coefs[6] * icu_valid_loss +
valid_coefs[7] * valid_loss_coefs[7] * ventilator_valid_loss)
# Loss for r_eff. Penalize r_eff>5
if quantiles is None:
if r_eff is not None:
train_loss_overall += (
hparams["r_eff_penalty_coef"] * tf.math.reduce_mean(
tf.math.softplus(r_eff - hparams["r_eff_penalty_cutoff"])))
# Calculate accelration
train_loss_overall += (
hparams["acceleration_death_coef"] *
self.acceleration_loss(pred_death, 3))
train_loss_overall += (
hparams["acceleration_confirm_coef"] *
self.acceleration_loss(pred_confirmed, 3))
train_loss_overall += (
hparams["acceleration_hospital_coef"] *
self.acceleration_loss(pred_hospitalized, 3))
else:
# Quantile cumulative violation penalty
forecasting_horizon = valid_end_index - valid_start_index
train_violation_confirmed = losses.quantile_viol_loss(
forecasting_horizon, train_end_index, forecasting_horizon,
gt_indicator["confirmed"], gt_list["confirmed"], pred_confirmed)
train_violation_death = losses.quantile_viol_loss(
forecasting_horizon, train_end_index, forecasting_horizon,
gt_indicator["death"], gt_list["death"], pred_death)
train_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
train_violation_confirmed)
train_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
train_violation_death)
valid_violation_confirmed = losses.quantile_viol_loss(
valid_start_index, valid_end_index, forecasting_horizon,
gt_indicator["confirmed"], gt_list["confirmed"], pred_confirmed)
valid_violation_death = losses.quantile_viol_loss(
valid_start_index, valid_end_index, forecasting_horizon,
gt_indicator["death"], gt_list["death"], pred_death)
valid_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
valid_violation_confirmed)
valid_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
valid_violation_death)
return train_loss_overall, valid_loss_overall
def unpack_states(self,
chosen_location_list,
ground_truth_timeseries,
propagated_states,
propagated_variables,
num_forecast_steps,
quantile_regression=False):
# Assign in the desired dictionary form.
susceptible_f_all_locations = {}
exposed_f_all_locations = {}
infected_d_f_all_locations = {}
infected_ud_f_all_locations = {}
recovered_d_f_all_locations = {}
recovered_ud_f_all_locations = {}
death_d_f_all_locations = {}
death_horizon_ahead_d_f_all_locations = {}
confirmed_f_all_locations = {}
confirmed_horizon_ahead_d_f_all_locations = {}
hospitalized_f_all_locations = {}
hospitalized_increase_f_all_locations = {}
hospitalized_cumulative_f_all_locations = {}
icu_f_all_locations = {}
ventilator_f_all_locations = {}
reinfectable_d_f_all_locations = {}
reinfectable_ud_f_all_locations = {}
population_f_all_locations = {}
reinfectable_vaccine_f_all_locations = {}
vaccine_immuned_t_f_all_locations = {}
infected_ud_increase_f_all_locations = {}
for location_index, location in enumerate(chosen_location_list):
exposed_f_all_locations[
location] = propagated_states[:, 0, location_index].numpy()
infected_d_f_all_locations[
location] = propagated_states[:, 1, location_index].numpy()
infected_ud_f_all_locations[
location] = propagated_states[:, 2, location_index].numpy()
recovered_d_f_all_locations[location] = (
propagated_states[:, 3, location_index].numpy())
recovered_ud_f_all_locations[location] = (
propagated_states[:, 4, location_index].numpy())
hospitalized_f_all_locations[location] = (
propagated_states[:, 5, location_index].numpy() +
propagated_states[:, 8, location_index].numpy() +
propagated_states[:, 9, location_index].numpy())
hospitalized_increase_f_all_locations[
location] = propagated_states[:, 7, location_index].numpy()
hospitalized_cumulative_f_all_locations[
location] = propagated_states[:, 6, location_index].numpy()
icu_f_all_locations[location] = (
propagated_states[:, 8, location_index].numpy() +
propagated_states[:, 9, location_index].numpy())
ventilator_f_all_locations[
location] = propagated_states[:, 9, location_index].numpy()
death_d_f_all_locations[
location] = propagated_states[:, 10, location_index].numpy()
death_horizon_ahead_d_f_all_locations[location] = (
propagated_states[num_forecast_steps - 1:, 10,
location_index].numpy() -
propagated_states[:-num_forecast_steps + 1, 10,
location_index].numpy())
population_f_all_locations[
location] = propagated_states[:, 11, location_index].numpy()
reinfectable_d_f_all_locations[
location] = propagated_states[:, 12, location_index].numpy()
reinfectable_ud_f_all_locations[
location] = propagated_states[:, 13, location_index].numpy()
reinfectable_vaccine_f_all_locations[
location] = propagated_states[:, 14, location_index].numpy()
vaccine_immuned_t_f_all_locations[
location] = propagated_states[:, 15, location_index].numpy()
infected_ud_increase_f_all_locations[
location] = propagated_states[:, 16, location_index].numpy()
confirmed_f_all_locations[location] = (
infected_d_f_all_locations[location] +
recovered_d_f_all_locations[location] +
death_d_f_all_locations[location] +
hospitalized_f_all_locations[location])
confirmed_horizon_ahead_d_f_all_locations[location] = (
confirmed_f_all_locations[location][num_forecast_steps - 1:, :] -
confirmed_f_all_locations[location][:-num_forecast_steps + 1, :])
susceptible_f_all_locations[location] = np.maximum(
0, (population_f_all_locations[location] -
confirmed_f_all_locations[location] -
exposed_f_all_locations[location] -
recovered_ud_f_all_locations[location] -
infected_ud_f_all_locations[location] -
vaccine_immuned_t_f_all_locations[location]))
recovered_d_f_all_locations[location] = (
recovered_d_f_all_locations[location] +
reinfectable_d_f_all_locations[location])
recovered_ud_f_all_locations[location] = (
recovered_ud_f_all_locations[location] +
reinfectable_ud_f_all_locations[location])
confirmed_f_all_locations[location] = (
confirmed_f_all_locations[location] +
reinfectable_d_f_all_locations[location])
# Lower bound of the cumulative quantiles are the last values.
# for all constructors.
if quantile_regression:
(_, gt_list, _, _, _) = ground_truth_timeseries
death_d_f_all_locations = self.lowerbound_postprocessing(
death_d_f_all_locations, gt_list["death"][:, location_index],
location, num_forecast_steps)
confirmed_f_all_locations = self.lowerbound_postprocessing(
confirmed_f_all_locations, gt_list["confirmed"][:, location_index],
location, num_forecast_steps)
recovered_d_f_all_locations = self.lowerbound_postprocessing(
recovered_d_f_all_locations, gt_list["recovered"][:,
location_index],
location, num_forecast_steps)
recovered_ud_f_all_locations = self.lowerbound_postprocessing(
recovered_ud_f_all_locations, None, location, num_forecast_steps)
reinfectable_d_f_all_locations = self.lowerbound_postprocessing(
reinfectable_d_f_all_locations, None, location, num_forecast_steps)
reinfectable_ud_f_all_locations = self.lowerbound_postprocessing(
reinfectable_ud_f_all_locations, None, location, num_forecast_steps)
rates = self.extract_rates(propagated_variables, chosen_location_list)
return (susceptible_f_all_locations, exposed_f_all_locations,
infected_d_f_all_locations, infected_ud_f_all_locations,
recovered_d_f_all_locations, recovered_ud_f_all_locations,
death_d_f_all_locations, death_horizon_ahead_d_f_all_locations,
confirmed_f_all_locations,
confirmed_horizon_ahead_d_f_all_locations,
hospitalized_f_all_locations, hospitalized_increase_f_all_locations,
hospitalized_cumulative_f_all_locations, icu_f_all_locations,
ventilator_f_all_locations, infected_ud_increase_f_all_locations,
rates)
def pack_compartments(self, states, ground_truth_timeseries,
num_forecast_steps):
"""Packs predictions into compartments with associated ground truth."""
(susceptible_f_all_locations, exposed_f_all_locations,
infected_d_f_all_locations, infected_ud_f_all_locations,
recovered_d_f_all_locations, recovered_ud_f_all_locations,
death_d_f_all_locations, death_horizon_ahead_d_f_all_locations,
confirmed_f_all_locations, confirmed_horizon_ahead_d_f_all_locations,
hospitalized_f_all_locations, hospitalized_increase_f_all_locations,
hospitalized_cumulative_f_all_locations, icu_f_all_locations,
ventilator_f_all_locations, infected_ud_increase_f_all_locations,
rates) = states
(_, _, _, _, orig_gt) = ground_truth_timeseries
# pack all results in a list of compartment dataclasses.
susceptible_compartment = generic_seir_model_constructor.Compartment(
name=constants.SUSCEPTIBLE,
predictions=susceptible_f_all_locations,
num_forecast_steps=num_forecast_steps)
exposed_compartment = generic_seir_model_constructor.Compartment(
name=constants.EXPOSED,
predictions=exposed_f_all_locations,
num_forecast_steps=num_forecast_steps)
infected_d_compartment = generic_seir_model_constructor.Compartment(
name=constants.INFECTED_DOC,
predictions=infected_d_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["infected"])
infected_ud_compartment = generic_seir_model_constructor.Compartment(
name=constants.INFECTED_UNDOC,
predictions=infected_ud_f_all_locations,
num_forecast_steps=num_forecast_steps)
infected_ud_increase_compartment = generic_seir_model_constructor.Compartment(
name=constants.INFECTED_UNDOC_INCREASE,
predictions=infected_ud_increase_f_all_locations,
num_forecast_steps=num_forecast_steps)
recovered_d_compartment = generic_seir_model_constructor.Compartment(
name=constants.RECOVERED_DOC,
predictions=recovered_d_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["recovered"])
recovered_ud_compartment = generic_seir_model_constructor.Compartment(
name=constants.RECOVERED_UNDOC,
predictions=recovered_ud_f_all_locations,
num_forecast_steps=num_forecast_steps)
death_d_compartment = generic_seir_model_constructor.Compartment(
name=constants.DEATH,
predictions=death_d_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["death"])
confirmed_compartment = generic_seir_model_constructor.Compartment(
name=constants.CONFIRMED,
predictions=confirmed_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["confirmed"])
hospitalized_compartment = generic_seir_model_constructor.Compartment(
name=constants.HOSPITALIZED,
predictions=hospitalized_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["hospitalized"])
hospitalized_increase_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HOSPITALIZED_INCREASE,
predictions=hospitalized_increase_f_all_locations,
num_forecast_steps=num_forecast_steps))
hospitalized_cumulative_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HOSPITALIZED_CUMULATIVE,
predictions=hospitalized_cumulative_f_all_locations,
num_forecast_steps=num_forecast_steps))
icu_compartment = generic_seir_model_constructor.Compartment(
name=constants.ICU,
predictions=icu_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["icu"])
ventilator_compartment = generic_seir_model_constructor.Compartment(
name=constants.VENTILATOR,
predictions=ventilator_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["ventilator"])
def create_horizon_ahead_gt(gt):
"""Creates incremental (1-day) ground truth values."""
horizon_ahead_gt = {}
for location in gt:
horizon_ahead_gt[location] = (
gt[location][num_forecast_steps - 1:] -
gt[location][:-num_forecast_steps + 1])
return horizon_ahead_gt
death_horizon_ahead_d_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HORIZON_AHEAD_DEATH,
predictions=death_horizon_ahead_d_f_all_locations,
num_forecast_steps=1,
ground_truth=create_horizon_ahead_gt(orig_gt["death"])))
confirmed_horizon_ahead_d_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HORIZON_AHEAD_CONFIRMED,
predictions=confirmed_horizon_ahead_d_f_all_locations,
num_forecast_steps=1,
ground_truth=create_horizon_ahead_gt(orig_gt["confirmed"])))
rates_compartments = []
for name, predictions in rates.items():
rates_compartments.append(
generic_seir_model_constructor.Compartment(
name=name,
predictions=predictions,
num_forecast_steps=num_forecast_steps,
use_quantiles=False))
compartments = [
susceptible_compartment, exposed_compartment, infected_d_compartment,
infected_ud_compartment, recovered_d_compartment,
recovered_ud_compartment, death_d_compartment,
death_horizon_ahead_d_compartment, confirmed_compartment,
confirmed_horizon_ahead_d_compartment, hospitalized_compartment,
hospitalized_increase_compartment, hospitalized_cumulative_compartment,
icu_compartment, ventilator_compartment,
infected_ud_increase_compartment
]
compartments += rates_compartments
return compartments
def apply_quantile_transform(self,
hparams,
propagated_states,
quantile_kernel,
quantile_biases,
ground_truth_timeseries,
num_train_steps,
num_forecast_steps,
num_quantiles=23,
epsilon=1e-8,
is_training=True,
initial_quantile_step=0):
"""Transform predictions into vector representing different quantiles.
Args:
hparams: Hyperparameters.
propagated_states: single value predictions, its dimensions represent
timestep * states * location.
quantile_kernel: Quantile mapping kernel.
quantile_biases: Biases for quantiles.
ground_truth_timeseries: Ground truth time series.
num_train_steps: number of train steps
num_forecast_steps: number of forecasting steps
num_quantiles: Number of quantiles
epsilon: A small number to avoid 0 division issues.
is_training: Whether the phase is training or inference.
initial_quantile_step: start index for quantile training
Returns:
Vector value predictions of size
timestep * states * location * num_quantiles
"""
(_, gt_list, gt_indicator, _, _) = ground_truth_timeseries
unstacked_propagated_states = tf.unstack(propagated_states, axis=1)
pred_infected = unstacked_propagated_states[1]
pred_recovered = unstacked_propagated_states[3]
pred_hospitalized = unstacked_propagated_states[5]
pred_icu = unstacked_propagated_states[8]
pred_ventilator = unstacked_propagated_states[9]
pred_death = unstacked_propagated_states[10]
pred_reinfected = unstacked_propagated_states[12]
pred_confirmed = (
pred_infected + pred_recovered + pred_death + pred_hospitalized +
pred_icu + pred_ventilator + pred_reinfected)
quantile_encoding_window = hparams["quantile_encoding_window"]
smooth_coef = hparams["quantile_smooth_coef"]
partial_mean_interval = hparams["partial_mean_interval"]
quantile_mapping_kernel = tf.math.softplus(
tf.expand_dims(quantile_kernel, 2))
quantile_biases = tf.math.softplus(tf.expand_dims(quantile_biases, 1))
propagated_states_quantiles = []
state_quantiles_multiplier_prev = tf.ones_like(
tf.expand_dims(propagated_states[0, :, :], 2))
def gt_ratio_feature(gt_values,
predicted):
"""Creates the GT ratio feature."""
# This uses the imputed values when the values are not valid.
ratio_pred = (1 - (predicted[:num_train_steps, :] /
(epsilon + gt_values[:num_train_steps])))
# Add 0 at the beginning
ratio_pred = tf.concat([
0 * ratio_pred[:(quantile_encoding_window + num_forecast_steps), :],
ratio_pred
],
axis=0)
ratio_pred = tf.expand_dims(ratio_pred, 1)
ratio_pred = tf.tile(ratio_pred, [1, self.num_states, 1])
return ratio_pred
def indicator_feature(gt_indicator):
"""Creates the indicator feature."""
indicator = 1. - gt_indicator
# Add 0 at the beginning
indicator = tf.concat([
0 * indicator[:(quantile_encoding_window + num_forecast_steps), :],
indicator
],
axis=0)
indicator = tf.expand_dims(indicator, 1)
indicator = tf.tile(indicator, [1, self.num_states, 1])
return indicator
# Propagated states features
temp_propagated_states = tf.concat([
0 * propagated_states[:quantile_encoding_window, :, :],
propagated_states
],
axis=0)
# GT ratio features
death_gt_ratio_feature = gt_ratio_feature(gt_list["death"], pred_death)
confirmed_gt_ratio_feature = gt_ratio_feature(gt_list["confirmed"],
pred_confirmed)
hospitalized_gt_ratio_feature = gt_ratio_feature(gt_list["hospitalized"],
pred_hospitalized)
# Indicator features
death_indicator_feature = indicator_feature(gt_indicator["death"])
confirmed_indicator_feature = indicator_feature(gt_indicator["confirmed"])
hospitalized_indicator_feature = indicator_feature(
gt_indicator["hospitalized"])
for ti in range(initial_quantile_step,
num_train_steps + num_forecast_steps):
if ti < num_train_steps:
state_quantiles_multiplier = tf.ones_like(
tf.expand_dims(propagated_states[0, :, :], 2))
state_quantiles_multiplier = tf.tile(state_quantiles_multiplier,
[1, 1, num_quantiles])
else:
# Construct the input features to be used for quantile estimation.
encoding_input = []
# Features coming from the trend of the estimated.
encoding_input.append(1 - (
temp_propagated_states[ti:(ti + quantile_encoding_window), :, :] /
(epsilon +
temp_propagated_states[ti + quantile_encoding_window, :, :])))
# Features coming from the ground truth ratio of death.
encoding_input.append(
death_gt_ratio_feature[ti:(ti + quantile_encoding_window), :, :])
# Features coming from the ground truth ratio of confirmed.
encoding_input.append(
confirmed_gt_ratio_feature[ti:(ti +
quantile_encoding_window), :, :])
# Features coming from the ground truth ratio of hospitalized.
encoding_input.append(
hospitalized_gt_ratio_feature[ti:(ti +
quantile_encoding_window), :, :])
# Features coming from death indicator.
encoding_input.append(
death_indicator_feature[ti:(ti + quantile_encoding_window), :, :])
# Features coming from confirmed indicator.
encoding_input.append(
confirmed_indicator_feature[ti:(ti +
quantile_encoding_window), :, :])
# Features coming from hospitalized indicator.
encoding_input.append(
hospitalized_indicator_feature[ti:(ti +
quantile_encoding_window), :, :])
encoding_input_t = tf.expand_dims(tf.concat(encoding_input, axis=0), 3)
# Limit the range of features.
encoding_input_t = model_utils.apply_relu_bounds(
encoding_input_t,
lower_bound=0.0,
upper_bound=2.0,
replace_nan=True)
# Estimate the multipliers of quantiles
state_quantiles_multiplier = quantile_biases + tf.math.reduce_mean(
tf.multiply(encoding_input_t, quantile_mapping_kernel), 0)
# Consider accumulation to guarantee monotonicity
state_quantiles_multiplier = tf.math.cumsum(
state_quantiles_multiplier, axis=-1)
if partial_mean_interval == 0:
# Normalize to match the median to point forecasts
state_quantiles_multiplier /= (
epsilon + tf.expand_dims(
state_quantiles_multiplier[:, :,
(num_quantiles - 1) // 2], -1))
else:
# Normalize with major densities to approximate point forecast (mean)
median_idx = (num_quantiles - 1) // 2
normalize_start = median_idx - partial_mean_interval
normalize_end = median_idx + partial_mean_interval
normalizer = tf.reduce_mean(
0.5 *
(state_quantiles_multiplier[:, :, normalize_start:normalize_end] +
state_quantiles_multiplier[:, :, normalize_start +
1:normalize_end + 1]),
axis=2,
keepdims=True)
state_quantiles_multiplier /= (epsilon + normalizer)
state_quantiles_multiplier = (
smooth_coef * state_quantiles_multiplier_prev +
(1 - smooth_coef) * state_quantiles_multiplier)
state_quantiles_multiplier_prev = state_quantiles_multiplier
# Return the estimated quantiles
propagated_states_quantiles_timestep = tf.multiply(
tf.expand_dims(propagated_states[ti, :, :], 2),
state_quantiles_multiplier)
propagated_states_quantiles.append(propagated_states_quantiles_timestep)
return tf.stack(propagated_states_quantiles)
def extract_rate_list(self):
"""Return list of rates that correspond to 'propagated_variables' tensor.
Args: None.
Returns:
List of rate names.
"""
return constants.ICU_AND_VENTILATOR_RATE_LIST
def calculate_r_eff(self,
rates = None,
propagated_variables = None,
epsilon = 1e-8):
"""Calculate Basic Reproduction Number R_eff over time and locations.
Args:
rates: rate name->tensor maps.
propagated_variables: single tensor of variables indexed by
(time)x(variables)x(locations) (used in the training).
epsilon: epsilon for avoiding numerical error.
Returns:
R_eff tensor.
"""
if rates is not None and propagated_variables is not None:
raise ValueError("Only rates or seir_variables can be used.")
elif rates is None and propagated_variables is None:
raise ValueError("Have to specify one argument.")
elif rates is not None:
beta_d, beta_ud = rates["average_contact_id_rate"], rates[
"average_contact_iud_rate"]
rho_id, rho_iud = rates["recovery_id_rate"], rates["recovery_iud_rate"]
gamma, h = rates["diagnosis_rate"], rates["hospitalization_rate"]
kappa_id = rates["death_id_rate"]
# equation is computed from the Next Generation Matrix Method.
# If you are changing any of the parameters below, please make sure to
# update the Next Generation Matrix derivation and parameters too.
# LINT.IfChange
r_eff = (beta_d * gamma + beta_ud *
(rho_id + kappa_id + h)) / ((gamma + rho_iud) *
(rho_id + kappa_id + h) + epsilon)
return r_eff
else:
propagated_variables_list = tf.unstack(propagated_variables, axis=1)
average_contact_id = propagated_variables_list[2]
average_contact_iud = propagated_variables_list[3]
diagnosis_rate = propagated_variables_list[6]
recovery_rate_id = propagated_variables_list[7]
recovery_rate_iud = propagated_variables_list[8]
hospitalization_rate = propagated_variables_list[12]
death_rate_id = propagated_variables_list[15]
beta_d = average_contact_id
beta_ud = average_contact_iud
rho_id = recovery_rate_id
rho_iud = recovery_rate_iud
gamma = diagnosis_rate
h = hospitalization_rate
kappa_id = death_rate_id
r_eff = (beta_d * gamma + beta_ud *
(rho_id + kappa_id + h)) / ((gamma + rho_iud) *
(rho_id + kappa_id + h) + epsilon)
return r_eff
| {
"content_hash": "f0924f0e84de6261deee23ea70f8e325",
"timestamp": "",
"source": "github",
"line_count": 1031,
"max_line_length": 96,
"avg_line_length": 42.915615906886515,
"alnum_prop": 0.625841884012114,
"repo_name": "google-research/google-research",
"id": "697d726c1363ad055ff7a18bdfea3268a55c707e",
"size": "44854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "covid_epidemiology/src/models/generic_seir_state_model_constructor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
"""A MCTS actor."""
from typing import Optional, Tuple
import acme
from acme import adders
from acme import specs
from acme.agents.tf.mcts import models
from acme.agents.tf.mcts import search
from acme.agents.tf.mcts import types
from acme.tf import variable_utils as tf2_variable_utils
import dm_env
import numpy as np
from scipy import special
import sonnet as snt
import tensorflow as tf
class MCTSActor(acme.Actor):
"""Executes a policy- and value-network guided MCTS search."""
_prev_timestep: dm_env.TimeStep
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
model: models.Model,
network: snt.Module,
discount: float,
num_simulations: int,
adder: Optional[adders.Adder] = None,
variable_client: Optional[tf2_variable_utils.VariableClient] = None,
):
# Internalize components: model, network, data sink and variable source.
self._model = model
self._network = tf.function(network)
self._variable_client = variable_client
self._adder = adder
# Internalize hyperparameters.
self._num_actions = environment_spec.actions.num_values
self._num_simulations = num_simulations
self._actions = list(range(self._num_actions))
self._discount = discount
# We need to save the policy so as to add it to replay on the next step.
self._probs = np.ones(
shape=(self._num_actions,), dtype=np.float32) / self._num_actions
def _forward(
self, observation: types.Observation) -> Tuple[types.Probs, types.Value]:
"""Performs a forward pass of the policy-value network."""
logits, value = self._network(tf.expand_dims(observation, axis=0))
# Convert to numpy & take softmax.
logits = logits.numpy().squeeze(axis=0)
value = value.numpy().item()
probs = special.softmax(logits)
return probs, value
def select_action(self, observation: types.Observation) -> types.Action:
"""Computes the agent's policy via MCTS."""
if self._model.needs_reset:
self._model.reset(observation)
# Compute a fresh MCTS plan.
root = search.mcts(
observation,
model=self._model,
search_policy=search.puct,
evaluation=self._forward,
num_simulations=self._num_simulations,
num_actions=self._num_actions,
discount=self._discount,
)
# The agent's policy is softmax w.r.t. the *visit counts* as in AlphaZero.
probs = search.visit_count_policy(root)
action = np.int32(np.random.choice(self._actions, p=probs))
# Save the policy probs so that we can add them to replay in `observe()`.
self._probs = probs.astype(np.float32)
return action
def update(self, wait: bool = False):
"""Fetches the latest variables from the variable source, if needed."""
if self._variable_client:
self._variable_client.update(wait)
def observe_first(self, timestep: dm_env.TimeStep):
self._prev_timestep = timestep
if self._adder:
self._adder.add_first(timestep)
def observe(self, action: types.Action, next_timestep: dm_env.TimeStep):
"""Updates the agent's internal model and adds the transition to replay."""
self._model.update(self._prev_timestep, action, next_timestep)
self._prev_timestep = next_timestep
if self._adder:
self._adder.add(action, next_timestep, extras={'pi': self._probs})
| {
"content_hash": "371fc2af6edbc95d65a6c8aa8131811b",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 31.754716981132077,
"alnum_prop": 0.684194890077243,
"repo_name": "deepmind/acme",
"id": "887d7c25e535db703ba4cc83ac07cbb2caa423d7",
"size": "3982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acme/agents/tf/mcts/acting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2182865"
},
{
"name": "Shell",
"bytes": "2668"
}
],
"symlink_target": ""
} |
'''
.. module:: skrf.io.csv
========================================
csv (:mod:`skrf.io.csv`)
========================================
Functions for reading and writing standard csv files
.. autosummary::
:toctree: generated/
read_pna_csv
pna_csv_2_ntwks
'''
import numpy as npy
import os
from ..network import Network
from .. import mathFunctions as mf
from ..frequency import Frequency
from .. import util
from warnings import warn
# delayed imports
# from pandas import Series, Index, DataFrame
def read_pna_csv(filename, *args, **kwargs):
'''
Reads data from a csv file written by an Agilient PNA.
This function returns a triplet containing the header, comments,
and data.
Parameters
-------------
filename : str
the file
\*args, \*\*kwargs :
Returns
---------
header : str
The header string, which is the line following the 'BEGIN'
comments : str
All lines that begin with a '!'
data : :class:`numpy.ndarray`
An array containing the data. The meaning of which depends on
the header.
See Also
----------
pna_csv_2_ntwks : Reads a csv file which contains s-parameter data
Examples
-----------
>>> header, comments, data = rf.read_pna_csv('myfile.csv')
'''
warn("deprecated", DeprecationWarning)
with open(filename,'r') as fid:
begin_line = -2
end_line = -1
n_END = 0
comments = ''
for k,line in enumerate(fid.readlines()):
if line.startswith('!'):
comments += line[1:]
elif line.startswith('BEGIN') and n_END == 0:
begin_line = k
elif line.startswith('END'):
if n_END == 0:
#first END spotted -> set end_line to read first data block only
end_line = k
#increment n_END to allow for CR correction in genfromtxt
n_END += 1
if k == begin_line+1:
header = line
footer = k - end_line
try:
data = npy.genfromtxt(
filename,
delimiter = ',',
skip_header = begin_line + 2,
skip_footer = footer - (n_END-1)*2,
*args, **kwargs
)
except(ValueError):
# carrage returns require a doubling of skiplines
data = npy.genfromtxt(
filename,
delimiter = ',',
skip_header = (begin_line + 2)*2,
skip_footer = footer,
*args, **kwargs
)
# pna uses unicode coding for degree symbol, but we dont need that
header = header.replace('\xb0','deg').rstrip('\n').rstrip('\r')
return header, comments, data
def pna_csv_2_df(filename, *args, **kwargs):
'''
Reads data from a csv file written by an Agilient PNA
'''
warn("deprecated", DeprecationWarning)
from pandas import Series, Index, DataFrame
header, comments, d = read_pna_csv(filename)
f_unit = header.split(',')[0].split(')')[0].split('(')[1]
names = header.split(',')
index = Index(d[:,0], name = names[0])
df=DataFrame(dict([(names[k], d[:,k]) for k in range(1,len(names))]), index=index)
return df
def pna_csv_2_ntwks2(filename, *args, **kwargs):
warn("deprecated", DeprecationWarning)
df = pna_csv_2_df(filename, *args, **kwargs)
header, comments, d = read_pna_csv(filename)
ntwk_dict = {}
param_set=set([k[:3] for k in df.columns])
f = df.index.values*1e-9
for param in param_set:
try:
s = mf.dbdeg_2_reim(
df['%s Log Mag(dB)'%param].values,
df['%s Phase(deg)'%param].values,
)
except(KeyError):
s = mf.dbdeg_2_reim(
df['%s (REAL)'%param].values,
df['%s (IMAG)'%param].values,
)
ntwk_dict[param] = Network(f=f, s=s, name=param, comments=comments)
try:
s=npy.zeros((len(f),2,2), dtype=complex)
s[:,0,0] = ntwk_dict['S11'].s.flatten()
s[:,1,1] = ntwk_dict['S22'].s.flatten()
s[:,1,0] = ntwk_dict['S21'].s.flatten()
s[:,0,1] = ntwk_dict['S12'].s.flatten()
name =os.path.splitext(os.path.basename(filename))[0]
ntwk = Network(f=f, s=s, name=name, comments=comments)
return ntwk
except:
return ntwk_dict
def pna_csv_2_ntwks3(filename):
'''
Read a CSV file exported from an Agilent PNA in dB/deg format
Parameters
--------------
filename : str
full path or filename
Returns
---------
out : n
2-Port Network
Examples
----------
'''
header, comments, d = read_pna_csv(filename)
col_headers = pna_csv_header_split(filename)
# set impedance to 50 Ohm (doesn't matter for now)
z0 = npy.ones((npy.shape(d)[0]))*50
# read f values, convert to GHz
f = d[:,0]/1e9
name = os.path.splitext(os.path.basename(filename))[0]
if 'db' in header.lower() and 'deg' in header.lower():
# this is a cvs in DB/DEG format
# -> convert db/deg values to real/imag values
s = npy.zeros((len(f),2,2), dtype=complex)
for k, h in enumerate(col_headers[1:]):
if 's11' in h.lower() and 'db' in h.lower():
s[:,0,0] = mf.dbdeg_2_reim(d[:,k+1], d[:,k+2])
elif 's21' in h.lower() and 'db' in h.lower():
s[:,1,0] = mf.dbdeg_2_reim(d[:,k+1], d[:,k+2])
elif 's12' in h.lower() and 'db' in h.lower():
s[:,0,1] = mf.dbdeg_2_reim(d[:,k+1], d[:,k+2])
elif 's22' in h.lower() and 'db' in h.lower():
s[:,1,1] = mf.dbdeg_2_reim(d[:,k+1], d[:,k+2])
n = Network(f=f,s=s,z0=z0, name = name)
return n
else:
warn("File does not seem to be formatted properly (only dB/deg supported for now)")
def read_all_csv(dir='.', contains = None):
'''
Read all CSV files in a directory
Parameters
--------------
dir : str, optional
the directory to load from, default \'.\'
contains : str, optional
if not None, only files containing this substring will be loaded
Returns
---------
out : dictionary
dictionary containing all loaded CSV objects. keys are the
filenames without extensions, and the values are the objects
Examples
----------
See Also
----------
'''
out={}
for filename in os.listdir(dir):
if contains is not None and contains not in filename:
continue
fullname = os.path.join(dir,filename)
keyname = os.path.splitext(filename)[0]
try:
out[keyname] = pna_csv_2_ntwks3(fullname)
continue
except:
pass
try:
out[keyname] = Network(fullname)
continue
except:
pass
return out
class AgilentCSV(object):
'''
Agilent-style csv file representing either scalar traces vs frequency
or complex data vs. frequency
'''
def __init__(self, filename, *args, **kwargs):
'''
Init.
Parameters
----------
filename : str
filename
\*args ,\*\*kwargs :
passed to Network.__init__ in :func:`networks` and :func:`scalar_networks`
'''
self.filename = filename
self.header, self.comments, self.data = self.read()
self.args, self.kwargs = args, kwargs
def read(self):
'''
Reads data from file
This function returns a triplet containing the header, comments,
and data.
Returns
---------
header : str
The header string, which is the line following the 'BEGIN'
comments : str
All lines that begin with a '!'
data : :class:`numpy.ndarray`
An array containing the data. The meaning of which depends on
the header.
'''
with open(self.filename, 'r') as fid:
begin_line = -2
end_line = -1
comments = ''
for k,line in enumerate(fid.readlines()):
if line.startswith('!'):
comments += line[1:]
elif line.startswith('BEGIN'):
begin_line = k
elif line.startswith('END'):
end_line = k
if k == begin_line+1:
header = line
footer = k - end_line
try:
data = npy.genfromtxt(
self.filename,
delimiter = ',',
skip_header = begin_line + 2,
skip_footer = footer,
)
except(ValueError):
# carrage returns require a doubling of skiplines
data = npy.genfromtxt(
self.filename,
delimiter = ',',
skip_header = (begin_line + 2)*2,
skip_footer = footer,
)
# pna uses unicode coding for degree symbol, but we dont need that
header = header.replace('\xb0','deg').rstrip('\n').rstrip('\r')
return header, comments, data
@property
def frequency(self):
'''
Frequency object : :class:`~skrf.frequency.Frequency`
'''
header, comments, d = self.header, self.comments, self.data
#try to pull out frequency unit
cols = self.columns
try:
f_unit = cols[0].split('(')[1].split(')')[0]
except:
f_unit = 'hz'
f = d[:,0]
return Frequency.from_f(f, unit = f_unit)
@property
def n_traces(self):
'''
number of data traces : int
'''
return self.data.shape[1] - 1
@property
def columns(self):
'''
List of column names : list of str
This function is needed because Agilent allows the delimiter
of a csv file (ie `'`) to be present in the header name. ridiculous.
If splitting the header fails, then a suitable list is returned of
the correct length, which looks like
* ['Freq(?)','filename-0','filename-1',..]
'''
header, d = self.header, self.data
n_traces = d.shape[1] - 1 # because theres is one frequency column
if header.count(',') == n_traces:
cols = header.split(',') # column names
else:
# the header contains too many delimiters. what loosers. maybe
# we can split it on `)'` instead
if header.count('),') == n_traces:
cols = header.split('),')
# we need to add back the paranthesis we split on to all but
# last columns
cols = [col + ')' for col in cols[:-1]] + [cols[-1]]
else:
# I dont know how to seperate column names
warn('Cant decipher header, so I\'m creating one. check output. ')
cols = ['Freq(?),']+['%s-%i'%(util.basename_noext(filename),k) \
for k in range(n_traces)]
return cols
@property
def scalar_networks(self):
'''
Returns list of Networks for each column : list
the data is stored in the Network's `.s` property, so its up
to you to interpret results. if 'db' is in the column name then
it is converted to linear before being store into `s`.
Returns
--------
out : list of :class:`~skrf.network.Network` objects
list of Networks representing the data contained in each column
'''
header, comments, d = self.header, self.comments, self.data
n_traces = d.shape[1] - 1 # because theres is one frequency column
cols = self.columns
freq = self.frequency
# loop through columns and create a single network for each column
ntwk_list = []
for k in range(1,n_traces+1):
s = d[:,k]
if 'db' in cols[k].lower():
s = mf.db_2_mag(s)
ntwk_list.append(
Network(
frequency = freq, s = s,comments = comments,
name = cols[k],*self.args, **self.kwargs)
)
return ntwk_list
@property
def networks(self):
'''
Reads a PNAX csv file, and returns a list of one-port Networks
Note this only works if csv is save in Real/Imaginary format for now
Parameters
-----------
filename : str
filename
Returns
--------
out : list of :class:`~skrf.network.Network` objects
list of Networks representing the data contained in column pairs
'''
names = self.columns
header, comments, d= self.header,self.comments, self.data
ntwk_list = []
if (self.n_traces)//2 == 0 : # / --> // for Python3 compatibility
# this isnt complex data
return self.scalar_networks
else:
for k in range((self.n_traces)//2):
name = names[k*2+1]
#print(names[k], names[k+1])
if 'db' in names[k].lower() and 'deg' in names[k+1].lower():
s = mf.dbdeg_2_reim(d[:,k*2+1], d[:,k*2+2])
elif 'real' in names[k].lower() and 'imag' in names[k+1].lower():
s = d[:,k*2+1]+1j*d[:,k*2+2]
else:
warn('CSV format unrecognized in "%s" or "%s". It\'s up to you to intrepret the resultant network correctly.' % (names[k], names[k+1]))
s = d[:,k*2+1]+1j*d[:,k*2+2]
ntwk_list.append(
Network(frequency = self.frequency, s=s, name=name,
comments=comments, *self.args, **self.kwargs)
)
return ntwk_list
@property
def dict(self):
'''
'''
return { self.columns[k]:self.data[:,k] \
for k in range(self.n_traces+1)}
@property
def dataframe(self):
'''
Pandas DataFrame representation of csv file
obviously this requires pandas
'''
from pandas import Index, DataFrame
index = Index(
self.frequency.f_scaled,
name = 'Frequency(%s)'%self.frequency.unit)
return DataFrame(
{ self.columns[k]:self.data[:,k] \
for k in range(1,self.n_traces+1)},
index=index,
)
def pna_csv_header_split(filename):
'''
Split a Agilent csv file's header into a list
This function is needed because Agilent allows the delimiter
of a csv file (ie `'`) to be present in the header name. ridiculous.
If splitting the header fails, then a suitable list is returned of
the correct length, which looks like
* ['Freq(?)','filename-0','filename-1',..]
Parameters
------------
filename : str
csv filename
Returns
--------
cols : list of str's
list of column names
'''
warn("deprecated", DeprecationWarning)
header, comments, d = read_pna_csv(filename)
n_traces = d.shape[1] - 1 # because theres is one frequency column
if header.count(',') == n_traces:
cols = header.split(',') # column names
else:
# the header contains too many delimiters. what loosers. maybe
# we can split it on `)'` instead
if header.count('),') == n_traces:
cols = header.split('),')
# we need to add back the paranthesis we split on to all but
# last columns
cols = [col + ')' for col in cols[:-1]] + [cols[-1]]
else:
# i dont know how to seperate column names
warn('Cant decipher header, so im creating one. check output. ')
cols = ['Freq(?),']+['%s-%i'%(util.basename_noext(filename),k) \
for k in range(n_traces)]
return cols
def pna_csv_2_ntwks(filename):
'''
Reads a PNAX csv file, and returns a list of one-port Networks
Note this only works if csv is save in Real/Imaginary format for now
Parameters
-----------
filename : str
filename
Returns
--------
out : list of :class:`~skrf.network.Network` objects
list of Networks representing the data contained in column pairs
'''
warn("deprecated", DeprecationWarning)
#TODO: check the data's format (Real-imag or db/angle , ..)
header, comments, d = read_pna_csv(filename)
#import pdb;pdb.set_trace()
names = pna_csv_header_split(filename)
ntwk_list = []
if (d.shape[1]-1)/2 == 0 :
# this isnt complex data
f = d[:,0]*1e-9
if 'db' in header.lower():
s = mf.db_2_mag(d[:,1])
else:
raise (NotImplementedError)
name = os.path.splitext(os.path.basename(filename))[0]
return Network(f=f, s=s, name=name, comments=comments)
else:
for k in range((d.shape[1]-1)/2):
f = d[:,0]*1e-9
name = names[k]
print((names[k], names[k+1]))
if 'db' in names[k].lower() and 'deg' in names[k+1].lower():
s = mf.dbdeg_2_reim(d[:,k*2+1], d[:,k*2+2])
elif 'real' in names[k].lower() and 'imag' in names[k+1].lower():
s = d[:,k*2+1]+1j*d[:,k*2+2]
else:
print('WARNING: csv format unrecognized. ts up to you to intrepret the resultant network correctly.')
s = d[:,k*2+1]+1j*d[:,k*2+2]
ntwk_list.append(
Network(f=f, s=s, name=name, comments=comments)
)
return ntwk_list
def pna_csv_2_freq(filename):
warn("deprecated", DeprecationWarning)
header, comments, d = read_pna_csv(filename)
#try to pull out frequency unit
cols = pna_csv_header_split(filename)
try:
f_unit = cols[0].split('(')[1].split(')')[0]
except:
f_unit = 'hz'
f = d[:,0]
return Frequency.from_f(f, unit = f_unit)
def pna_csv_2_scalar_ntwks(filename, *args, **kwargs):
'''
Reads a PNAX csv file containing scalar traces, returning Networks
Parameters
-----------
filename : str
filename
Returns
--------
out : list of :class:`~skrf.network.Network` objects
list of Networks representing the data contained in column pairs
'''
warn("deprecated", DeprecationWarning)
header, comments, d = read_pna_csv(filename)
n_traces = d.shape[1] - 1 # because theres is one frequency column
cols = pna_csv_header_split(filename)
#try to pull out frequency unit
try:
f_unit = cols[0].split('(')[1].split(')')[0]
except:
f_unit = 'hz'
f = d[:,0]
freq = Frequency.from_f(f, unit = f_unit)
# loop through columns and create a single network for each column
ntwk_list = []
for k in range(1,n_traces+1):
s = d[:,k]
if 'db' in cols[k].lower():
s = mf.db_2_mag(s)
ntwk_list.append(
Network(
frequency = freq, s = s,comments = comments,
name = cols[k],*args, **kwargs)
)
return ntwk_list
def read_zva_dat(filename, *args, **kwargs):
'''
Reads data from a dat file written by a R&S ZVA in dB/deg or re/im format
This function returns a triplet containing header, comments and data.
Parameters
-------------
filename : str
the file
\*args, \*\*kwargs :
Returns
---------
header : str
The header string, which is the line following the 'BEGIN'
data : :class:`numpy.ndarray`
An array containing the data. The meaning of which depends on
the header.
'''
#warn("deprecated", DeprecationWarning)
with open(filename,'r') as fid:
begin_line = -2
comments = ''
for k,line in enumerate(fid.readlines()):
if line.startswith('%'):
comments += line[1:]
header = line
begin_line = k+1
data = npy.genfromtxt(
filename,
delimiter = ',',
skip_header = begin_line,
*args, **kwargs
)
return header, comments, data
def zva_dat_2_ntwks(filename):
'''
Read a dat file exported from a R&S ZVA in dB/deg or re/im format
Parameters
--------------
filename : str
full path or filename
Returns
---------
out : n
2-Port Network
Examples
----------
'''
header, comments, d = read_zva_dat(filename)
col_headers = header.split(',')
# set impedance to 50 Ohm (doesn't matter for now)
z0 = npy.ones((npy.shape(d)[0]))*50
# read f values, convert to GHz
f = d[:,0]/1e9
name = os.path.splitext(os.path.basename(filename))[0]
if 're' in header.lower() and 'im' in header.lower():
# this is a cvs in re/im format
# -> no conversion required
s = npy.zeros((len(f),2,2), dtype=complex)
for k, h in enumerate(col_headers):
if 's11' in h.lower() and 're' in h.lower():
s[:,0,0] = d[:,k] + 1j*d[:,k+1]
elif 's21' in h.lower() and 're' in h.lower():
s[:,1,0] = d[:,k] + 1j*d[:,k+1]
elif 's12' in h.lower() and 're' in h.lower():
s[:,0,1] = d[:,k+1] #+ 1j*d[:,k+2]
elif 's22' in h.lower() and 're' in h.lower():
s[:,1,1] = d[:,k+1] #+ 1j*d[:,k+2]
elif 'db' in header.lower() and not 'deg' in header.lower():
# this is a cvs in db format (no deg values)
# -> conversion required
s = npy.zeros((len(f),2,2), dtype=complex)
for k, h in enumerate(col_headers):
# this doesn't always work! (depends on no. of channels, sequence of adding traces etc.
# -> Needs changing!
if 's11' in h.lower() and 'db' in h.lower():
s[:,0,0] = mf.dbdeg_2_reim(d[:,k], d[:,k+2])
elif 's21' in h.lower() and 'db' in h.lower():
s[:,1,0] = mf.dbdeg_2_reim(d[:,k], d[:,k+2])
n = Network(f=f,s=s,z0=z0, name = name)
return n
else:
warn("File does not seem to be formatted properly (dB/deg or re/im)")
def read_all_zva_dat(dir='.', contains = None):
'''
Read all DAT files in a directory (from R&S ZVA)
Parameters
--------------
dir : str, optional
the directory to load from, default \'.\'
contains : str, optional
if not None, only files containing this substring will be loaded
Returns
---------
out : dictionary
dictionary containing all loaded DAT objects. keys are the
filenames without extensions, and the values are the objects
Examples
----------
See Also
----------
'''
out={}
for filename in os.listdir(dir):
if contains is not None and contains not in filename:
continue
fullname = os.path.join(dir,filename)
keyname = os.path.splitext(filename)[0]
try:
out[keyname] = zva_dat_2_ntwks(fullname)
continue
except:
pass
try:
out[keyname] = Network(fullname)
continue
except:
pass
return out
def read_vectorstar_csv(filename, *args, **kwargs):
'''
Reads data from a csv file written by an Anritsu VectorStar
Parameters
-------------
filename : str
the file
\*args, \*\*kwargs :
Returns
---------
header : str
The header string, which is the line just before the data
comments : str
All lines that begin with a '!'
data : :class:`numpy.ndarray`
An array containing the data. The meaning of which depends on
the header.
'''
with open(filename,'r') as fid:
comments = ''.join([line for line in fid if line.startswith('!')])
fid.seek(0)
header = [line for line in fid if line.startswith('PNT')]
fid.close()
data = npy.genfromtxt(
filename,
comments='!',
delimiter =',',
skip_header = 1)[1:]
comments = comments.replace('\r','')
comments = comments.replace('!','')
return header, comments, data
def vectorstar_csv_2_ntwks(filename):
'''
Reads a vectorstar csv file, and returns a list of one-port Networks
Note this only works if csv is save in Real/Imaginary format for now
Parameters
-----------
filename : str
filename
Returns
--------
out : list of :class:`~skrf.network.Network` objects
list of Networks representing the data contained in column pairs
'''
#TODO: check the data's format (Real-imag or db/angle , ..)
header, comments, d = read_vectorstar_csv(filename)
names = [line for line in comments.split('\n') \
if line.startswith('PARAMETER')][0].split(',')[1:]
return [Network(
f = d[:,k*3+1],
s = d[:,k*3+2] + 1j*d[:,k*3+3],
z0 = 50,
name = names[k].rstrip(),
comments = comments,
) for k in range(d.shape[1]/3)]
| {
"content_hash": "50b5f2da54d076cb54f6f0096471b2d1",
"timestamp": "",
"source": "github",
"line_count": 875,
"max_line_length": 155,
"avg_line_length": 28.95657142857143,
"alnum_prop": 0.5280419939219324,
"repo_name": "Ttl/scikit-rf",
"id": "fdaaccbc004c9f329897d47ac4556bd9d312c22c",
"size": "25338",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "skrf/io/csv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "C",
"bytes": "4015"
},
{
"name": "Jupyter Notebook",
"bytes": "7151"
},
{
"name": "Python",
"bytes": "1124439"
},
{
"name": "Scheme",
"bytes": "6630"
},
{
"name": "Shell",
"bytes": "219"
},
{
"name": "TypeScript",
"bytes": "1286336"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
from codim1.core import *
from codim1.assembly import *
from codim1.fast_lib import *
from codim1.post import *
import codim1.core.tools as tools
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 2
def test_long_ray_fsf():
shear_modulus = 1.0
poisson_ratio = 0.25
n_elements_surface = 20
# n_elements_surface = 25
degree = 3
quad_min = degree + 1
quad_mult = 3
quad_max = quad_mult * degree
quad_logr = quad_mult * degree + (degree % 2)
quad_oneoverr = quad_mult * degree + (degree % 2)
interior_quad_pts = 13
di = 1.0
df = 1.0
x_di = -0.5
x_df = 0.5
# Determine fault parameters
# fault angle
left_end = np.array((x_di, -di))
right_end = np.array((x_df, -df))
fault_vector = left_end - right_end
# fault tangent and normal vectors
fault_tangential = fault_vector / np.linalg.norm(fault_vector)
fault_normal = np.array((fault_tangential[1], -fault_tangential[0]))
# Mesh the surface
main_surface_left = (-10.0, 0.0)
main_surface_right = (10.0, 0.0)
mesh1 = simple_line_mesh(n_elements_surface,
main_surface_left,
main_surface_right)
per_step = 5
steps = 10
ray_lengths = [1.0] * per_step
for i in range(1, steps):
ray_lengths.extend([2.0 ** float(i)] * per_step)
ray_left_dir = (-1.0, 0.0)
mesh2 = ray_mesh(main_surface_left, ray_left_dir, ray_lengths, flip = True)
ray_right_dir = (1.0, 0.0)
mesh3 = ray_mesh(main_surface_right, ray_right_dir, ray_lengths)
surface_mesh = combine_meshes(mesh2, combine_meshes(mesh1, mesh3),
ensure_continuity = True)
apply_to_elements(surface_mesh, "bc",
BC("traction", ZeroBasis()), non_gen = True)
# Mesh the fault
fault_elements = 50
fault_mesh = simple_line_mesh(fault_elements, left_end, right_end)
apply_to_elements(fault_mesh, "bc", BC("crack_displacement",
ConstantBasis(-fault_tangential)),
non_gen = True)
# Combine and apply pieces
mesh = combine_meshes(surface_mesh, fault_mesh)
bf = gll_basis(degree)
qs = QuadStrategy(mesh, quad_min, quad_max, quad_logr, quad_oneoverr)
apply_to_elements(mesh, "qs", qs, non_gen = True)
apply_to_elements(mesh, "basis", bf, non_gen = True)
apply_to_elements(mesh, "continuous", True, non_gen = True)
init_dofs(mesh)
ek = ElasticKernelSet(shear_modulus, poisson_ratio)
matrix, rhs = sgbem_assemble(mesh, ek)
apply_average_constraint(matrix, rhs, surface_mesh)
# for e_k in surface_mesh:
# e_k.dofs_initialized = False
# init_dofs(surface_mesh)
# matrix2 = simple_matrix_assemble(surface_mesh, ek.k_rh)
# The matrix produced by the hypersingular kernel is singular, so I need
# to provide some further constraint in order to remove rigid body motions.
# I impose a constraint that forces the average displacement to be zero.
# apply_average_constraint(matrix, rhs, mesh)
soln_coeffs = np.linalg.solve(matrix, rhs)
apply_coeffs(mesh, soln_coeffs, "soln")
x, u, t = evaluate_boundary_solution(surface_mesh, soln_coeffs, 8)
def analytical_free_surface(x, x_d, d, delta, s):
"""
Analytical solution for the surface displacements from an infinite
buried edge dislocation. Add two of them with opposite slip to represent
an infinitely long thrust/normal fault.
Extracted from chapter 3 of Segall 2010.
"""
xsi = (x - x_d) / d
factor = s / np.pi
term1 = np.cos(delta) * np.arctan(xsi)
term2 = (np.sin(delta) - xsi * np.cos(delta)) / (1 + xsi ** 2)
ux = factor * (term1 + term2)
term1 = np.sin(delta) * np.arctan(xsi)
term2 = (np.cos(delta) + xsi * np.sin(delta)) / (1 + xsi ** 2)
uy = -factor * (term1 + term2)
return ux, uy
# Compute the exact solution
x_e = x[0, :]
delta = np.arctan((df - di) / (x_df - x_di))
ux_exact1, uy_exact1 = analytical_free_surface(x_e, x_di, di, delta, -1.0)
ux_exact2, uy_exact2 = analytical_free_surface(x_e, x_df, df, delta, 1.0)
ux_exact = ux_exact1 + ux_exact2
uy_exact = uy_exact1 + uy_exact2
assert(np.sum(np.abs(ux_exact - u[0,:])) < 0.1)
def comparison_plot():
plt.plot(x_e, ux_exact, '*', label = 'Exact X Displacement')
plt.plot(x_e, uy_exact, '*', label = 'Exact Y Displacement')
plt.plot(x_e, u[0, :], '8',
linewidth = 2, label = 'Estimated X displacement')
plt.plot(x_e, u[1, :], '8',
linewidth = 2, label = 'Estimated Y displacement')
plt.axis([-5, 5, -0.2, 0.2])
plt.xlabel(r'$x/d$', fontsize = 18)
plt.ylabel(r'$u/s$', fontsize = 18)
plt.legend()
plt.show()
def error_plot():
x_error = np.abs(ux_exact - u[0, :]) / np.abs(ux_exact)
y_error = np.abs(uy_exact - u[1, :]) / np.abs(uy_exact)
plt.figure(1)
plt.xlim(-30, 30)
plt.ylim(0, 0.0001)
plt.plot(x_e, x_error, '*', label = '% X displacement Error')
plt.plot(x_e, y_error, '*', label = '% Y displacement Error')
plt.xlabel(r'$x/d$', fontsize = 18)
plt.ylabel(r'$100\left(\frac{|u_{exact} - u_{est}|}{s}\right)$', fontsize = 18)
plt.legend()
plt.show()
def interior_plot():
x_pts = 30
y_pts = 30
min_x = -3
max_x = 3
min_y = -3
max_y = 0
x = np.linspace(min_x, max_x, x_pts)
y = np.linspace(min_y, max_y, y_pts)
int_ux = np.zeros((y_pts, x_pts))
int_uy = np.zeros((y_pts, x_pts))
for i in range(x_pts):
print i
for j in range(y_pts):
u = sgbem_interior(mesh, (x[i], y[j]), np.zeros(2),
ek, "soln", "displacement")
int_ux[j, i] = u[0]
int_uy[j, i] = u[1]
X, Y = np.meshgrid(x, y)
def contf_plot(type, data):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
levels = np.linspace(-0.5, 0.5, 21)
tools.plot_mesh(fault_mesh, show = False, fig_ax = (fig, ax))
im = ax.contourf(X, Y, data, levels)
ax.contour(X, Y, data, levels,
colors = ('k',), linestyles=['solid'])
ax.set_ylabel(r'$x/d$', fontsize = 18)
ax.set_xlabel(r'$y/d$', fontsize = 18)
ax.set_title(type + ' displacement contours.')
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
fig.colorbar(im)
contf_plot('Vertical', int_uy)
contf_plot('Horizontal', int_ux)
plt.show()
comparison_plot()
# error_plot()
# Forming interior plot
# interior_plot()
if __name__ == "__main__":
test_long_ray_fsf()
| {
"content_hash": "8178fdf90ca34fb904e761c9b15432ff",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 87,
"avg_line_length": 35.69543147208122,
"alnum_prop": 0.5578782707622298,
"repo_name": "tbenthompson/codim1",
"id": "16cbd3370f5005d0b73e8be78a516745b365bc55",
"size": "7032",
"binary": false,
"copies": "1",
"ref": "refs/heads/LMS_branch",
"path": "examples/ex_long_ray_fsf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "49317"
},
{
"name": "Python",
"bytes": "210105"
}
],
"symlink_target": ""
} |
"""
@name: Modules/House/Lighting/Lights/_test/test_lights.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2014-2020 by D. Brian Kimmel
@note: Created on May 23, 2014
@license: MIT License
@summary: This module is for testing lighting data.
Passed all 14 tests - DBK - 2020-02-09
"""
__updated__ = '2020-02-09'
# Import system type stuff
from twisted.trial import unittest
from ruamel.yaml import YAML
# Import PyMh files and modules.
from _test.testing_mixin import SetupPyHouseObj
from Modules.House.Lighting.Lights.lights import Api as lightsApi, LocalConfig as lightsConfig
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
TEST_YAML = """\
Lights:
- Name: Front Door
Room: Outside
Family:
Name: Insteon
Address: 11.11.11
- Name: Garage
Room: Outside
Dimmable: true
Family:
Name: Insteon
Address: 22.22.22
- Name: Buffet
Comment: x
Room: Dining Room
Family:
Name: Insteon
Address: 33.33.33
- Name: Wet Bar
Comment: This is the Pink Poppy Wet bar light in the living room.
Family:
Name: Insteon
Address: 44.44.44
Dimmable: true # Optional
Room: Living Room
"""
class SetupMixin(object):
"""
"""
def setUp(self):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj()
l_yaml = YAML()
self.m_test_config = l_yaml.load(TEST_YAML)
self.m_config = lightsConfig(self.m_pyhouse_obj)
class A0(unittest.TestCase):
def test_00_Print(self):
_x = PrettyFormatAny.form('_x', 'title') # so it is defined when printing is cleaned up.
print('Id: test_lights')
class A1_Setup(SetupMixin, unittest.TestCase):
"""
This section tests the above setup for things we will need further down in the tests.
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_api = lightsApi(self.m_pyhouse_obj) # Must be done to setup module
def test_01_Pyhouse(self):
"""
"""
# print(PrettyFormatAny.form(self.m_pyhouse_obj, 'A1-01-A - PyHouse'))
self.assertIsNotNone(self.m_pyhouse_obj)
def test_02_House(self):
"""
"""
# print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'A1-02-A - House'))
self.assertIsNotNone(self.m_pyhouse_obj.House)
def test_03_Lighting(self):
"""
"""
# print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Lighting, 'A1-03-A - Lighting'))
self.assertIsNotNone(self.m_pyhouse_obj.House.Lighting)
def test_04_Lights(self):
"""
"""
print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Lighting.Lights, 'A1-04-A - Lights'))
self.assertIsNotNone(self.m_pyhouse_obj.House.Lighting.Lights)
class A2_Repr(SetupMixin, unittest.TestCase):
"""
This section tests the above setup for things we will need further down in the tests.
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_api = lightsApi(self.m_pyhouse_obj) # Must be done to setup module
self.m_api = lightsApi(self.m_pyhouse_obj)
self.m_local_config = lightsConfig(self.m_pyhouse_obj)
def test_01_Pyhouse(self):
"""
"""
l_yaml = self.m_test_config['Lights']
# print(PrettyFormatAny.form(l_light, 'A2-01-A - Light'))
l_lights = self.m_config._extract_all_lights(l_yaml)
# print(PrettyFormatAny.form(l_lights, 'A2-01-B - Lights'))
l_light = l_lights[0]
print(PrettyFormatAny.form(l_light, 'A2-01-C - Lights'))
l_repr = repr(l_light)
print('A2-01-D {}'.format(l_repr))
self.assertIsNotNone(self.m_pyhouse_obj)
class C1_Read(SetupMixin, unittest.TestCase):
"""
This section tests the reading and writing of config used by lighting_lights.
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_api = lightsApi(self.m_pyhouse_obj) # Must be done to setup module
def test_01_Light0(self):
""" Test reading the device portion of the config.
"""
l_yaml = self.m_test_config['Lights'][0]
print('C1-01-A - Yaml: ', l_yaml)
l_light = self.m_config._extract_one_light(l_yaml)
# print(PrettyFormatAny.form(l_light, 'C1-01-B - Light'))
# print(PrettyFormatAny.form(l_light.Family, 'C1-01-C - Family'))
# print(PrettyFormatAny.form(l_light.Room, 'C1-01-D - Room'))
self.assertEqual(l_light.Name, 'Front Door')
self.assertEqual(l_light.Comment, None)
self.assertEqual(l_light.DeviceType, 'Lighting')
self.assertEqual(l_light.DeviceSubType, 'Light')
self.assertEqual(l_light.Family.Name, 'Insteon')
self.assertEqual(l_light.Family.Address, '11.11.11')
self.assertEqual(l_light.Family.Type, 'Light')
self.assertEqual(l_light.Room.Name, 'Outside')
def test_02_Light1(self):
""" Test reading the device portion of the config.
"""
l_yaml = self.m_test_config['Lights'][1]
# print('C1-02-A - Yaml: ', l_yaml)
l_light = self.m_config._extract_one_light(l_yaml)
# print(PrettyFormatAny.form(l_light, 'C1-02-B - Light'))
# print(PrettyFormatAny.form(l_light.Family, 'C1-02-C - Family'))
# print(PrettyFormatAny.form(l_light.Room, 'C1-02-D - Room'))
self.assertEqual(l_light.Name, 'Garage')
self.assertEqual(l_light.Comment, None)
self.assertEqual(l_light.Family.Name, 'Insteon')
self.assertEqual(l_light.Family.Address, '22.22.22')
def test_03_Light2(self):
""" Test reading the device portion of the config.
"""
l_yaml = self.m_test_config['Lights'][2]
# print('C1-03-A - Yaml: ', l_yaml)
l_light = self.m_config._extract_one_light(l_yaml)
# print(PrettyFormatAny.form(l_light, 'C1-03-B - Light'))
self.assertEqual(l_light.Name, 'Buffet')
self.assertEqual(l_light.Comment, 'x')
self.assertEqual(l_light.Family.Name, 'Insteon')
self.assertEqual(l_light.Family.Address, '33.33.33')
def test_09_Lights(self):
""" Test reading of the Lights config file.
"""
l_yaml = self.m_test_config['Lights']
# print(PrettyFormatAny.form(l_yaml, 'C1-09-A - Yaml'))
l_lights = self.m_config._extract_all_lights(l_yaml)
# print(PrettyFormatAny.form(l_lights, 'C1-09-B - Node'))
self.assertEqual(l_lights[0].Name, 'Front Door')
self.assertEqual(l_lights[1].Name, 'Garage')
self.assertEqual(l_lights[2].Name, 'Buffet')
self.assertEqual(l_lights[3].Name, 'Wet Bar')
class D1_Write(SetupMixin, unittest.TestCase):
"""
This section tests the reading and writing of the Yaml config file used by lighting_lights.
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_yaml = self.m_test_config['Lights']
self.m_lights = self.m_config._extract_all_lights(self.m_yaml)
self.m_api = lightsApi(self.m_pyhouse_obj) # Must be done to setup module
self.m_pyhouse_obj.House.Lighting.Lights = self.m_lights
def Xtest_01_base(self):
"""
"""
l_ret = self.m_config._build_yaml()
# print(PrettyFormatAny.form(l_ret, 'D1-01-A - base'))
print(l_ret, 'D1-01-A - base')
self.assertEqual(l_ret['Lights'], None)
def Xtest_02_Light0(self):
"""Test the write for proper Base elements
"""
l_light = self.m_lights[0]
print(PrettyFormatAny.form(l_light, 'D1-02-A - Light0'))
l_config = self.m_config._save_one_light(l_light)
print(PrettyFormatAny.form(l_config, 'D1-02-B - Light'))
def Xtest_03_Light1(self):
"""Test the write for proper Base elements
"""
l_light = self.m_lights[1]
print(PrettyFormatAny.form(l_light, 'D1-03-A - Light'))
l_config = self.m_config._save_one_light(l_light)
print(PrettyFormatAny.form(l_config, 'D1-03-B - Light'))
def Xtest_04_AddLight0(self):
"""Test the write for proper Base elements
"""
def Xtest_09_Lights(self):
"""Test the write for proper Base elements
"""
l_ret = self.m_config._build_yaml()
print(PrettyFormatAny.form(self.m_lights, 'D1-09-A - Lights'))
l_config = self.m_config._save_all_lights(l_ret)
print(PrettyFormatAny.form(l_config, 'D1-09-B - Node'))
print(l_config, 'D1-09-C - Node')
class M1_Mqtt(SetupMixin, unittest.TestCase):
"""
This section tests the publishing of MQTT messages
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_api = lightsApi(self.m_pyhouse_obj) # Must be done to setup module
self.m_yaml = self.m_test_config['Lights']
self.m_lights = self.m_config._extract_all_lights(self.m_yaml)
self.m_pyhouse_obj.House.Lighting.Lights = self.m_lights
def test_01_base(self):
"""
"""
l_ret = self.m_config._build_yaml()
# print(PrettyFormatAny.form(l_ret, 'D1-01-A - base'))
print(l_ret, 'D1-01-A - base')
self.assertEqual(l_ret['Lights'], None)
class M2_Mqtt(SetupMixin, unittest.TestCase):
"""
This section tests the dispatch of MQTT messages
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_api = lightsApi(self.m_pyhouse_obj) # Must be done to setup module
self.m_yaml = self.m_test_config['Lights']
self.m_lights = self.m_config._extract_all_lights(self.m_yaml)
self.m_pyhouse_obj.House.Lighting.Lights = self.m_lights
def test_01_base(self):
"""
"""
l_ret = self.m_config._build_yaml()
# print(PrettyFormatAny.form(l_ret, 'D1-01-A - base'))
print(l_ret, 'D1-01-A - base')
self.assertEqual(l_ret['Lights'], None)
class Z9_YamlWrite(SetupMixin, unittest.TestCase):
"""
This section tests the reading and writing of the Yaml config file used by lighting_lights.
"""
def setUp(self):
SetupMixin.setUp(self)
def test_01_(self):
"""Test the write for proper XML Base elements
"""
# print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Lighting.Lights, 'C2-01-A - Node'))
pass
# ## END DBK
| {
"content_hash": "55a17436eb8b12805e4bd88332111496",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 97,
"avg_line_length": 34.161184210526315,
"alnum_prop": 0.6129032258064516,
"repo_name": "DBrianKimmel/PyHouse",
"id": "194fb8daf219a8d2de2582cfc33ebd9c42698cb2",
"size": "10385",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Project/src/Modules/House/Lighting/Lights/_test/test_lights.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "114778"
},
{
"name": "HTML",
"bytes": "15398"
},
{
"name": "JavaScript",
"bytes": "220171"
},
{
"name": "Python",
"bytes": "1491784"
},
{
"name": "Shell",
"bytes": "2131"
}
],
"symlink_target": ""
} |
import logging
import os
from tests.beeswax.impala_beeswax import ImpalaBeeswaxClient, ImpalaBeeswaxResult
from tests.performance.query import Query, QueryResult
# Setup logging for this module.
logging.basicConfig(level=logging.INFO, format='[%(name)s] %(threadName)s: %(message)s')
LOG = logging.getLogger('query_executor')
LOG.setLevel(level=logging.INFO)
# globals.
hive_result_regex = 'Time taken: (\d*).(\d*) seconds'
## TODO: Split executors into their own modules.
class QueryExecConfig(object):
"""Base Class for Execution Configs
Attributes:
plugin_runner (PluginRunner?)
"""
def __init__(self, plugin_runner=None):
self.plugin_runner = plugin_runner
class ImpalaQueryExecConfig(QueryExecConfig):
"""Base class for Impala query execution config
Attributes:
impalad (str): address of impalad <host>:<port>
"""
def __init__(self, plugin_runner=None, impalad='localhost:21000'):
super(ImpalaQueryExecConfig, self).__init__(plugin_runner=plugin_runner)
self._impalad = impalad
@property
def impalad(self):
return self._impalad
@impalad.setter
def impalad(self, value):
self._impalad = value
class JdbcQueryExecConfig(ImpalaQueryExecConfig):
"""Impala query execution config for jdbc
Attributes:
tranport (?): ?
"""
JDBC_CLIENT_PATH = os.path.join(os.environ['IMPALA_HOME'], 'bin/run-jdbc-client.sh')
def __init__(self, plugin_runner=None, impalad='localhost:21050', transport=None):
super(JdbcQueryExecConfig, self).__init__(plugin_runner=plugin_runner,
impalad=impalad)
self.transport = transport
@property
def jdbc_client_cmd(self):
"""The args to run the jdbc client.
Constructed on the fly, since the impalad it points to can change.
"""
return JdbcQueryExecConfig.JDBC_CLIENT_PATH + ' -i "%s" -t %s' % (self._impalad,
self.transport)
class BeeswaxQueryExecConfig(ImpalaQueryExecConfig):
"""Impala query execution config for beeswax
Args:
use_kerberos (boolean)
exec_options (str): String formatted as "opt1:val1;opt2:val2"
impalad (str): address of impalad <host>:<port>
plugin_runner (?): ?
Attributes:
use_kerberos (boolean)
exec_options (dict str -> str): execution options
"""
def __init__(self, use_kerberos=False, exec_options=None, impalad='localhost:21000',
plugin_runner=None):
super(BeeswaxQueryExecConfig, self).__init__(plugin_runner=plugin_runner,
impalad=impalad)
self.use_kerberos = use_kerberos
self.exec_options = dict()
self._build_options(exec_options)
def _build_options(self, exec_options):
"""Read the exec_options into self.exec_options
Args:
exec_options (str): String formatted as "opt1:val1;opt2:val2"
"""
if exec_options:
# exec_options are seperated by ; on the command line
options = exec_options.split(';')
for option in options:
key, value = option.split(':')
# The keys in ImpalaService QueryOptions are upper case.
self.exec_options[key.upper()] = value
class QueryExecutor(object):
"""Executes a query.
Args:
name (str): eg. "hive"
query (str): string containing SQL query to be executed
func (function): Function that accepts a QueryExecOption parameter and returns a
QueryResult. Eg. execute_using_impala_beeswax
config (QueryExecOption)
exit_on_error (boolean): Exit right after an error encountered.
Attributes:
exec_func (function): Function that accepts a QueryExecOption parameter and returns a
QueryResult.
exec_config (QueryExecOption)
query (str): string containing SQL query to be executed
exit_on_error (boolean): Exit right after an error encountered.
executor_name (str): eg. "hive"
result (QueryResult): Contains the result after execute method is called.
"""
def __init__(self, name, query, func, config, exit_on_error):
self.exec_func = func
self.exec_config = config
self.query = query
self.exit_on_error = exit_on_error
self.executor_name = name
self._result = QueryResult(query, query_config=self.exec_config)
def prepare(self, impalad):
"""Prepare the query to be run.
For now, this sets the impalad that the query connects to. If the executor is hive,
it's a no op.
"""
if self.executor_name != 'hive':
self.exec_config.impalad = impalad
def execute(self):
"""Execute the query using the given execution function"""
self._result = self.exec_func(self.query, self.exec_config)
if not self._result.success:
if self.exit_on_error:
raise RuntimeError(self._result.query_error)
else:
LOG.info("Continuing execution")
@property
def result(self):
"""Getter for the result of the query execution.
A result is a QueryResult object that contains the details of a single run of the
query.
"""
return self._result
| {
"content_hash": "9263b0780ddab34c609e99a42abbaac7",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 89,
"avg_line_length": 30.993788819875775,
"alnum_prop": 0.680561122244489,
"repo_name": "XiaominZhang/Impala",
"id": "4a3503eff09c7a761f74ace191f71911f985acd3",
"size": "6066",
"binary": false,
"copies": "5",
"ref": "refs/heads/cdh5-trunk",
"path": "tests/performance/query_executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "203216"
},
{
"name": "C++",
"bytes": "7612000"
},
{
"name": "CMake",
"bytes": "105273"
},
{
"name": "CSS",
"bytes": "89516"
},
{
"name": "Groff",
"bytes": "1633"
},
{
"name": "HTML",
"bytes": "56"
},
{
"name": "Java",
"bytes": "3629777"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Lex",
"bytes": "21812"
},
{
"name": "PLpgSQL",
"bytes": "393"
},
{
"name": "Protocol Buffer",
"bytes": "630"
},
{
"name": "Python",
"bytes": "1744065"
},
{
"name": "SQLPL",
"bytes": "3253"
},
{
"name": "Shell",
"bytes": "161556"
},
{
"name": "Thrift",
"bytes": "243825"
},
{
"name": "Yacc",
"bytes": "80218"
}
],
"symlink_target": ""
} |
"""
Custom router to allow bulk deletion
Modified from https://github.com/miki725/django-rest-framework-bulk
"""
from __future__ import print_function, unicode_literals
import copy
from rest_framework.routers import DefaultRouter, SimpleRouter
class BulkDeleteRouter(DefaultRouter):
"""
Map http methods to actions defined on the bulk mixins.
"""
routes = copy.deepcopy(SimpleRouter.routes)
routes[0].mapping.update({
'delete': 'bulk_destroy',
})
| {
"content_hash": "74b22866bb6c97110c149370beab0ec3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 25.42105263157895,
"alnum_prop": 0.7204968944099379,
"repo_name": "jonboiser/kolibri",
"id": "b79e8959f766d02bb7e771216bf0ccc58ef636dc",
"size": "483",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "kolibri/core/routers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "864"
},
{
"name": "CSS",
"bytes": "29663"
},
{
"name": "HTML",
"bytes": "12733"
},
{
"name": "JavaScript",
"bytes": "786460"
},
{
"name": "Makefile",
"bytes": "7625"
},
{
"name": "Python",
"bytes": "1204842"
},
{
"name": "Shell",
"bytes": "10412"
},
{
"name": "Vue",
"bytes": "809549"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="bordercolorsrc",
parent_name="scatterpolar.hoverlabel",
**kwargs,
):
super(BordercolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| {
"content_hash": "6761bb68df1d18c108e5469701945031",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 73,
"avg_line_length": 29.375,
"alnum_prop": 0.5957446808510638,
"repo_name": "plotly/plotly.py",
"id": "6ea006daf35d08408155739db2827cbdd548e6ef",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterpolar/hoverlabel/_bordercolorsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib import admin
from django.core.exceptions import ObjectDoesNotExist
from stratum0.models import Stratum0, Stratum1, ReplicationSite
import cvmfs.repository
class Stratum0AdminForm(forms.ModelForm):
class Meta:
fields = '__all__'
model = Stratum0
def clean(self):
"""
checks if the provided repository is available
"""
url = self.cleaned_data['url']
fqrn = ""
try:
repo = cvmfs.open_repository(url)
fqrn = repo.fqrn
except cvmfs.repository.RepositoryNotFound, e:
raise forms.ValidationError(
"The URL '%s' does not point to a CVMFS repository" % url)
try:
stratum0 = Stratum0.objects.get(fqrn=fqrn)
if not self.instance.pk and stratum0:
raise forms.ValidationError(
"The URL '%s' points to the repository '%s' which is already used by '%s'." % (url, fqrn, stratum0.name))
except ObjectDoesNotExist, e:
pass
return self.cleaned_data
class Stratum0Admin(admin.ModelAdmin):
fields = ['name', 'url', 'project_url', 'project_description']
form = Stratum0AdminForm
list_display = ['name', 'fqrn', 'url' ]
def save_model(self, request, obj, form, change):
# availability of <obj.url> was checked in Stratum0AdminForm.clean
repo = cvmfs.open_repository(obj.url)
obj.fqrn = repo.fqrn
super(Stratum0Admin, self).save_model(request, obj, form, change)
class Stratum1AdminForm(forms.ModelForm):
class Meta:
fields = '__all__'
model = Stratum1
def clean(self):
"""
checks if the provided repository is available and is a replica of a
locally defined stratum 0 repository
"""
url = self.cleaned_data['url']
fqrn = ""
try:
repo = cvmfs.open_repository(url)
fqrn = repo.fqrn
except cvmfs.repository.RepositoryNotFound, e:
raise forms.ValidationError("The URL '%s' does not point to a CVMFS replica" % url)
repo = Stratum0.objects.filter(fqrn=fqrn)
if not repo:
raise forms.ValidationError("The Stratum 0 of '%s' is not registered." % fqrn)
return self.cleaned_data
class Stratum1Admin(admin.ModelAdmin):
fields = ['replication_site', 'url' ]
form = Stratum1AdminForm
list_display = ['replication_site', 'stratum0', 'url' ]
list_filter = ['stratum0']
def save_model(self, request, obj, form, change):
# availability of <obj.url> and stratum0 was checked in Stratum1AdminForm.clean
repo = cvmfs.open_repository(obj.url)
stratum0 = Stratum0.objects.get(fqrn=repo.fqrn)
obj.stratum0 = stratum0
super(Stratum1Admin, self).save_model(request, obj, form, change)
admin.site.register(Stratum0, Stratum0Admin)
admin.site.register(Stratum1, Stratum1Admin)
admin.site.register(ReplicationSite)
| {
"content_hash": "2319a76e9987bd0942ff97cf206b88bb",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 125,
"avg_line_length": 32.712765957446805,
"alnum_prop": 0.6214634146341463,
"repo_name": "cvmfs/monitor",
"id": "bb82c6ad3304754e25092205171669386de87114",
"size": "3075",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "stratum0/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "868"
},
{
"name": "HTML",
"bytes": "13606"
},
{
"name": "JavaScript",
"bytes": "5220"
},
{
"name": "Python",
"bytes": "29127"
},
{
"name": "Shell",
"bytes": "3068"
}
],
"symlink_target": ""
} |
"""
DB-API Shortcuts
``get_object_or_404()`` is a shortcut function to be used in view functions for
performing a ``get()`` lookup and raising a ``Http404`` exception if a
``DoesNotExist`` exception was raised during the ``get()`` call.
``get_list_or_404()`` is a shortcut function to be used in view functions for
performing a ``filter()`` lookup and raising a ``Http404`` exception if a
``DoesNotExist`` exception was raised during the ``filter()`` call.
"""
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class ArticleManager(models.Manager):
def get_queryset(self):
return super(ArticleManager, self).get_queryset().filter(authors__name__icontains='sir')
class Article(models.Model):
authors = models.ManyToManyField(Author)
title = models.CharField(max_length=50)
objects = models.Manager()
by_a_sir = ArticleManager()
def __str__(self):
return self.title
| {
"content_hash": "22445f4e022bc1eac21fcede63b18605",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 96,
"avg_line_length": 28.942857142857143,
"alnum_prop": 0.6910167818361304,
"repo_name": "twz915/django",
"id": "ce74bc04adb9ae3126b79a6a4f4f26b1397d4c8d",
"size": "1013",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/get_object_or_404/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55929"
},
{
"name": "HTML",
"bytes": "182880"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11852079"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Data store proxy for a data server."""
import base64
import functools
import os
import threading
import time
import uuid
import logging
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.data_stores import common
BASE_MAP_SUBJECT = "servers_map"
MAP_SUBJECT = "aff4:/" + BASE_MAP_SUBJECT
MAP_VALUE_PREDICATE = "metadata:value"
def RPCWrapper(f):
"""A decorator for converting exceptions to rpc status messages.
This decorator should be inserted below the rpcserver.Handler call to prevent
normal exceptions from reaching the RCP layer. These expected exceptions are
then encoded into the status message of the response.
Args:
f: The function to wrap.
Returns:
A decorator function.
"""
@functools.wraps(f)
def Wrapper(self, request):
"""Wrap the function can catch exceptions, converting them to status."""
failed = True
response = rdfvalue.DataStoreResponse()
response.status = rdfvalue.DataStoreResponse.Status.OK
try:
f(self, request, response)
failed = False
except access_control.UnauthorizedAccess as e:
# Attach a copy of the request to the response so the caller can tell why
# we failed the request.
response.Clear()
response.request = request
response.status = rdfvalue.DataStoreResponse.Status.AUTHORIZATION_DENIED
if e.subject:
response.failed_subject = utils.SmartUnicode(e.subject)
response.status_desc = utils.SmartUnicode(e)
except data_store.Error as e:
# Attach a copy of the request to the response so the caller can tell why
# we failed the request.
response.Clear()
response.request = request
response.status = rdfvalue.DataStoreResponse.Status.DATA_STORE_ERROR
response.status_desc = utils.SmartUnicode(e)
except access_control.ExpiryError as e:
# Attach a copy of the request to the response so the caller can tell why
# we failed the request.
response.Clear()
response.request = request
response.status = rdfvalue.DataStoreResponse.Status.TIMEOUT_ERROR
response.status_desc = utils.SmartUnicode(e)
if failed:
# Limit the size of the error report since it can be quite large.
logging.info("Failed: %s", utils.SmartStr(response)[:1000])
serialized_response = response.SerializeToString()
return serialized_response
return Wrapper
class DataStoreService(object):
"""Class that responds to DataStore requests."""
def __init__(self, db):
self.db = db
self.transaction_lock = threading.Lock()
self.transactions = {}
old_pathing = config_lib.CONFIG.Get("Datastore.pathing")
# Need to add a fixed rule for the file where the server mapping is stored.
new_pathing = [r"(?P<path>" + BASE_MAP_SUBJECT + ")"] + old_pathing
self.pathing = new_pathing
self.db.RecreatePathing(self.pathing)
# Every service method must write to the response argument.
# The response will then be serialized to a string.
@RPCWrapper
def MultiSet(self, request, unused_response):
"""Set multiple attributes for a given subject at once."""
values = {}
to_delete = set()
for value in request.values:
if value.option == rdfvalue.DataStoreValue.Option.REPLACE:
to_delete.add(value.predicate)
timestamp = self.FromTimestampSpec(request.timestamp)
if value.HasField("value"):
if value.HasField("timestamp"):
timestamp = self.FromTimestampSpec(value.timestamp)
values.setdefault(value.predicate, []).append(
(value.value.GetValue(), timestamp))
self.db.MultiSet(request.subject[0], values, to_delete=to_delete,
sync=request.sync, replace=False,
token=request.token)
@RPCWrapper
def ResolveMulti(self, request, response):
"""Resolve multiple attributes for a given subject at once."""
predicate_regex = []
for v in request.values:
predicate_regex.append(v.predicate)
timestamp = self.FromTimestampSpec(request.timestamp)
subject = request.subject[0]
values = self.db.ResolveMulti(
subject, predicate_regex, timestamp=timestamp,
token=request.token)
response.results.Append(
subject=subject,
payload=[(predicate, self._Encode(value), int(ts))
for (predicate, value, ts) in values if value])
@RPCWrapper
def MultiResolveRegex(self, request, response):
"""Resolve multiple attributes for a given subject at once."""
predicate_regex = [utils.SmartUnicode(v.predicate) for v in request.values]
timestamp = self.FromTimestampSpec(request.timestamp)
subjects = list(request.subject)
for subject, values in self.db.MultiResolveRegex(
subjects, predicate_regex, timestamp=timestamp,
token=request.token,
limit=request.limit):
response.results.Append(
subject=subject,
payload=[(utils.SmartStr(predicate), self._Encode(value), int(ts))
for (predicate, value, ts) in values])
@RPCWrapper
def DeleteAttributes(self, request, unused_response):
"""Delete attributes from a given subject."""
timestamp = self.FromTimestampSpec(request.timestamp)
subject = request.subject[0]
sync = request.sync
token = request.token
attributes = [v.predicate for v in request.values]
start, end = timestamp # pylint: disable=unpacking-non-sequence
self.db.DeleteAttributes(subject, attributes, start=start, end=end,
token=token, sync=sync)
@RPCWrapper
def DeleteAttributesRegex(self, request, unused_response):
subject = request.subject[0]
token = request.token
attr_regexes = [v.predicate for v in request.values]
self.db.DeleteAttributesRegex(subject, attr_regexes, token=token)
@RPCWrapper
def DeleteSubject(self, request, unused_response):
subject = request.subject[0]
token = request.token
self.db.DeleteSubject(subject, token=token)
def _NewTransaction(self, subject, duration, response):
transid = utils.SmartStr(uuid.uuid4())
now = time.time()
self.transactions[subject] = (transid, now + duration)
self._AddTransactionId(response, subject, transid)
def _AddTransactionId(self, response, subject, transid):
blob = rdfvalue.DataBlob(string=transid)
value = rdfvalue.DataStoreValue(value=blob)
response.results.Append(subject=subject, values=[value])
@RPCWrapper
def LockSubject(self, request, response):
duration = self.FromTimestampSpec(request.timestamp)
if not request.subject:
# No return value.
return
subject = request.subject[0]
with self.transaction_lock:
# Check if there is a transaction.
try:
_, lease = self.transactions[subject]
if time.time() > lease:
self._NewTransaction(subject, duration, response)
else:
# Failed to get transaction.
# Do not need to do anything
pass
except KeyError:
return self._NewTransaction(subject, duration, response)
def _GetTransactionId(self, request):
return request.values[0].value.string
@RPCWrapper
def ExtendSubject(self, request, response):
duration = self.FromTimestampSpec(request.timestamp)
if not request.subject or not request.values:
# No return value.
return
subject = request.subject[0]
transid = self._GetTransactionId(request)
with self.transaction_lock:
# Check if there is a transaction.
try:
current, _ = self.transactions[subject]
if transid != current:
# Invalid transaction ID.
return
self.transactions[subject] = (transid, time.time() + duration)
# Add return value to response.
self._AddTransactionId(response, subject, transid)
except KeyError:
# Invalid transaction ID.
pass
@RPCWrapper
def UnlockSubject(self, request, response):
if not request.subject or not request.values:
return
subject = request.subject[0]
transid = self._GetTransactionId(request)
with self.transaction_lock:
# Check if there is a transaction.
try:
current, _ = self.transactions[subject]
if transid != current:
# Invalid transaction ID.
return
del self.transactions[subject]
# Add return value to response.
self._AddTransactionId(response, subject, transid)
except KeyError:
# Invalid transaction ID.
pass
def FromTimestampSpec(self, timestamp):
"""Converts constants from TimestampSpec() to the datastore ones."""
if timestamp.type == timestamp.Type.NEWEST_TIMESTAMP:
return self.db.NEWEST_TIMESTAMP
if timestamp.type == timestamp.Type.ALL_TIMESTAMPS:
return self.db.ALL_TIMESTAMPS
if timestamp.type == timestamp.Type.RANGED_TIME:
return (int(timestamp.start), int(timestamp.end))
if timestamp.type == timestamp.Type.SPECIFIC_TIME:
return int(timestamp.start)
def _Encode(self, value):
if isinstance(value, str):
return [base64.encodestring(value), 1]
return value
def Size(self):
return self.db.Size()
def LoadServerMapping(self):
"""Retrieve server mapping from database."""
mapping_str, _ = self.db.Resolve(MAP_SUBJECT, MAP_VALUE_PREDICATE)
if not mapping_str:
return None
mapping = rdfvalue.DataServerMapping(mapping_str)
# Restore pathing information.
if self._DifferentPathing(list(mapping.pathing)):
self.pathing = list(mapping.pathing)
self.db.RecreatePathing(self.pathing)
return mapping
def _DifferentPathing(self, new_pathing):
"""Check if we have a new pathing."""
if len(new_pathing) != len(self.pathing):
return True
for i, path in enumerate(new_pathing):
if path != self.pathing[i]:
return True
return False
def SaveServerMapping(self, mapping, create_pathing=False):
"""Stores the server mapping in the data store."""
if create_pathing:
# We are going to use our own pathing.
mapping.pathing = self.pathing
else:
# We are going to use the mapping pathing configuration.
# Check if its different than the one we use now and then ask the
# datastore to use it.
new_pathing = list(mapping.pathing)
if self._DifferentPathing(new_pathing):
self.pathing = new_pathing
self.db.RecreatePathing(new_pathing)
self.db.MultiSet(MAP_SUBJECT, {MAP_VALUE_PREDICATE: mapping})
def GetLocation(self):
return self.db.Location()
def GetComponentInformation(self):
"""Return number of components and average size per component."""
loc = self.GetLocation()
if not os.path.exists(loc):
return 0, 0
if not os.path.isdir(loc):
return 0, 0
size, files = common.DatabaseDirectorySize(loc, self.db.FileExtension())
if files:
return files, int(float(size)/float(files))
else:
return 0, 0
| {
"content_hash": "baf968c9a71314439f673dab5bbf363a",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 79,
"avg_line_length": 32.823008849557525,
"alnum_prop": 0.6804170036847308,
"repo_name": "defaultnamehere/grr",
"id": "346960a3db3817d39e2f5c3aefa643ab7dce453b",
"size": "11149",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/data_server/store.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36345"
},
{
"name": "JavaScript",
"bytes": "831633"
},
{
"name": "Makefile",
"bytes": "5939"
},
{
"name": "Python",
"bytes": "4541648"
},
{
"name": "Shell",
"bytes": "31077"
}
],
"symlink_target": ""
} |
""" Include Bokeh plots in Sphinx HTML documentation.
For other output types, the placeholder text ``[graph]`` will
be generated.
The ``bokeh-plot`` directive can be used by either supplying:
**A path to a source file** as the argument to the directive::
.. bokeh-plot:: path/to/plot.py
.. note::
.py scripts are not scanned automatically! In order to include
certain directories into .py scanning process use following directive
in sphinx conf.py file: bokeh_plot_pyfile_include_dirs = ["dir1","dir2"]
**Inline code** as the content of the directive::
.. bokeh-plot::
from bokeh.plotting import figure, output_file, show
output_file("example.html")
x = [1, 2, 3, 4, 5]
y = [6, 7, 6, 4, 5]
p = figure(title="example", plot_width=300, plot_height=300)
p.line(x, y, line_width=2)
p.circle(x, y, size=10, fill_color="white")
show(p)
This directive also works in conjunction with Sphinx autodoc, when
used in docstrings.
The ``bokeh-plot`` directive accepts the following options:
source-position : enum('above', 'below', 'none')
Where to locate the the block of formatted source
code (if anywhere).
linenos : flag
Whether to display line numbers along with the source.
Examples
--------
The inline example code above produces the following output:
.. bokeh-plot::
from bokeh.plotting import figure, output_file, show
output_file("example.html")
x = [1, 2, 3, 4, 5]
y = [6, 7, 6, 4, 5]
p = figure(title="example", plot_width=300, plot_height=300)
p.line(x, y, line_width=2)
p.circle(x, y, size=10, fill_color="white")
show(p)
"""
from __future__ import absolute_import
import ast
from os import getenv
from os.path import basename, dirname, join
import re
from uuid import uuid4
from docutils import nodes
from docutils.parsers.rst import Directive, Parser
from docutils.parsers.rst.directives import choice, flag
from sphinx.errors import SphinxError
from sphinx.util import console, copyfile, ensuredir
from sphinx.util.nodes import set_source_info
from ..document import Document
from ..embed import autoload_static
from ..resources import Resources
from ..settings import settings
from ..util.string import decode_utf8
from .example_handler import ExampleHandler
from .templates import PLOT_PAGE
docs_cdn = settings.docs_cdn()
# if BOKEH_DOCS_CDN is unset just use default CDN resources
if docs_cdn is None:
resources = Resources(mode="cdn")
else:
# "BOKEH_DOCS_CDN=local" is used for building and displaying the docs locally
if docs_cdn == "local":
resources = Resources(mode="server", root_url="/en/latest/")
# "BOKEH_DOCS_CDN=test:newthing" is used for building and deploying test docs to
# a one-off location "en/newthing" on the docs site
elif docs_cdn.startswith("test:"):
resources = Resources(mode="server", root_url="/en/%s/" % docs_cdn.split(":")[1])
# Otherwise assume it is a dev/rc/full release version and use CDN for it
else:
resources = Resources(mode="cdn", version=docs_cdn)
GOOGLE_API_KEY = getenv('GOOGLE_API_KEY')
if GOOGLE_API_KEY is None:
if settings.docs_missing_api_key_ok():
GOOGLE_API_KEY = "MISSING_API_KEY"
else:
raise SphinxError("The GOOGLE_API_KEY environment variable is not set. Set GOOGLE_API_KEY to a valid API key, "
"or set BOKEH_DOCS_MISSING_API_KEY_OK=yes to build anyway (with broken GMaps)")
CODING = re.compile(r"^# -\*- coding: (.*) -\*-$", re.M)
class PlotScriptError(SphinxError):
""" Error during script parsing. """
category = 'PlotScript error'
def _process_script(source, filename, auxdir, js_name):
# This is lame, but seems to be required for python 2
source = CODING.sub("", source)
# quick and dirty way to inject Google API key
if "GOOGLE_API_KEY" in source:
run_source = source.replace("GOOGLE_API_KEY", GOOGLE_API_KEY)
else:
run_source = source
c = ExampleHandler(source=run_source, filename=filename)
d = Document()
c.modify_document(d)
if c.error:
raise PlotScriptError(c.error_detail)
script_path = join("/scripts", js_name)
js_path = join(auxdir, js_name)
js, script = autoload_static(d.roots[0], resources, script_path)
with open(js_path, "w") as f:
f.write(js)
return (script, js, js_path, source)
class PlotScriptParser(Parser):
""" This Parser recognizes .py files in the Sphinx source tree,
assuming that they contain bokeh examples
Note: it is important that the .py files are parsed first. This is
accomplished by reordering the doc names in the env_before_read_docs callback
"""
supported = ('python',)
def parse(self, source, document):
""" Parse ``source``, write results to ``document``.
"""
# This is lame, but seems to be required for python 2
source = CODING.sub("", source)
env = document.settings.env
filename = env.doc2path(env.docname) # e.g. full path to docs/user_guide/examples/layout_vertical
# This code splits the source into two parts: the docstring (or None if
# there is not one), and the remaining source code after
m = ast.parse(source)
docstring = ast.get_docstring(m)
if docstring is not None:
lines = source.split("\n")
lineno = m.body[0].lineno # assumes docstring is m.body[0]
source = "\n".join(lines[lineno:])
js_name = "bokeh-plot-%s.js" % uuid4().hex
(script, js, js_path, source) = _process_script(source, filename, env.bokeh_plot_auxdir, js_name)
env.bokeh_plot_files[env.docname] = (script, js, js_path, source)
rst = PLOT_PAGE.render(source=source,
filename=basename(filename),
docstring=docstring,
script=script)
document['bokeh_plot_include_bokehjs'] = True
# can't use super, Sphinx Parser classes don't inherit object
Parser.parse(self, rst, document)
class BokehPlotDirective(Directive):
has_content = True
optional_arguments = 2
option_spec = {
'source-position': lambda x: choice(x, ('below', 'above', 'none')),
'linenos': lambda x: True if flag(x) is None else False,
}
def run(self):
env = self.state.document.settings.env
app = env.app
# filename *or* python code content, but not both
if self.arguments and self.content:
raise SphinxError("bokeh-plot:: directive can't have both args and content")
# process inline examples here
if self.content:
app.debug("[bokeh-plot] handling inline example in %r", env.docname)
source = '\n'.join(self.content)
# need docname not to look like a path
docname = env.docname.replace("/", "-")
js_name = "bokeh-plot-%s-inline-%s.js" % (docname, uuid4().hex)
# the code runner just needs a real path to cd to, this will do
path = join(env.bokeh_plot_auxdir, js_name)
(script, js, js_path, source) = _process_script(source, path, env.bokeh_plot_auxdir, js_name)
env.bokeh_plot_files[js_name] = (script, js, js_path, source)
# process example files here
else:
example_path = self.arguments[0][:-3] # remove the ".py"
# if it's an "internal" example, the python parser has already handled it
if example_path in env.bokeh_plot_files:
app.debug("[bokeh-plot] handling internal example in %r: %s", env.docname, self.arguments[0])
(script, js, js_path, source) = env.bokeh_plot_files[example_path]
# handle examples external to the docs source, e.g. gallery examples
else:
app.debug("[bokeh-plot] handling external example in %r: %s", env.docname, self.arguments[0])
source = open(self.arguments[0]).read()
source = decode_utf8(source)
docname = env.docname.replace("/", "-")
js_name = "bokeh-plot-%s-external-%s.js" % (docname, uuid4().hex)
(script, js, js_path, source) = _process_script(source, self.arguments[0], env.bokeh_plot_auxdir, js_name)
env.bokeh_plot_files[js_name] = (script, js, js_path, source)
# use the source file name to construct a friendly target_id
target_id = "%s.%s" % (env.docname, basename(js_path))
target = nodes.target('', '', ids=[target_id])
result = [target]
linenos = self.options.get('linenos', False)
code = nodes.literal_block(source, source, language="python", linenos=linenos, classes=[])
set_source_info(self, code)
source_position = self.options.get('source-position', 'below')
if source_position == "above": result += [code]
result += [nodes.raw('', script, format="html")]
if source_position == "below": result += [code]
return result
def env_before_read_docs(app, env, docnames):
docnames.sort(key=lambda x: 0 if "examples" in x else 1)
for name in [x for x in docnames if env.doc2path(x).endswith(".py")]:
if not name.startswith(tuple(env.app.config.bokeh_plot_pyfile_include_dirs)):
env.found_docs.remove(name)
docnames.remove(name)
def builder_inited(app):
app.env.bokeh_plot_auxdir = join(app.env.doctreedir, 'bokeh_plot')
ensuredir(app.env.bokeh_plot_auxdir) # sphinx/_build/doctrees/bokeh_plot
if not hasattr(app.env, 'bokeh_plot_files'):
app.env.bokeh_plot_files = {}
def html_page_context(app, pagename, templatename, context, doctree):
""" Add BokehJS to pages that contain plots.
"""
if doctree and doctree.get('bokeh_plot_include_bokehjs'):
context['bokeh_css_files'] = resources.css_files
context['bokeh_js_files'] = resources.js_files
def build_finished(app, exception):
files = set()
for (script, js, js_path, source) in app.env.bokeh_plot_files.values():
files.add(js_path)
files_iter = app.status_iterator(sorted(files),
'copying bokeh-plot files... ',
console.brown,
len(files),
lambda x: basename(x))
for file in files_iter:
target = join(app.builder.outdir, "scripts", basename(file))
ensuredir(dirname(target))
try:
copyfile(file, target)
except OSError as e:
raise SphinxError('cannot copy local file %r, reason: %s' % (file, e))
def env_purge_doc(app, env, docname):
""" Remove local files for a given document.
"""
if docname in env.bokeh_plot_files:
del env.bokeh_plot_files[docname]
def setup(app):
""" sphinx config variable to scan .py files in provided directories only """
app.add_config_value('bokeh_plot_pyfile_include_dirs', [], 'html')
app.add_source_parser('.py', PlotScriptParser)
app.add_directive('bokeh-plot', BokehPlotDirective)
app.connect('env-before-read-docs', env_before_read_docs)
app.connect('builder-inited', builder_inited)
app.connect('html-page-context', html_page_context)
app.connect('build-finished', build_finished)
app.connect('env-purge-doc', env_purge_doc)
| {
"content_hash": "3fd11183eaa199c4262acc2965edcd39",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 122,
"avg_line_length": 35.082317073170735,
"alnum_prop": 0.6305726948813766,
"repo_name": "rs2/bokeh",
"id": "b297f7d9a889d0ad6a55f862e4bac0acd00aa1e2",
"size": "11507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/sphinxext/bokeh_plot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "101858"
},
{
"name": "CoffeeScript",
"bytes": "1220192"
},
{
"name": "HTML",
"bytes": "48230"
},
{
"name": "JavaScript",
"bytes": "57773"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2648330"
},
{
"name": "Shell",
"bytes": "8519"
},
{
"name": "TypeScript",
"bytes": "236495"
}
],
"symlink_target": ""
} |
from flask import Blueprint
utils = Blueprint(
'utils',
__name__,
url_prefix='/v1/utils'
)
@utils.route('/ping', methods=['GET'])
def ping():
return 'pong'
| {
"content_hash": "b9693abefcf2220adee7f05e8f233098",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 38,
"avg_line_length": 15.818181818181818,
"alnum_prop": 0.5977011494252874,
"repo_name": "Dinoshauer/img-resize",
"id": "6a000b5d300b208a542db5f9f90e0a342e751866",
"size": "174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/routes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15451"
}
],
"symlink_target": ""
} |
import setuptools
import unittest
def discover_tests():
test_loader = unittest.TestLoader()
return test_loader.discover('.', pattern='*_test.py')
if __name__ == '__main__':
setuptools.setup(
name='mesonwrap',
version='0.2.0',
author='The Meson development team',
license='Apache 2',
url='https://github.com/mesonbuild/mesonwrap',
packages=[
'mesonwrap',
'mesonwrap.tools',
'wrapweb',
],
package_data={
'wrapweb': [
'static/ico/favicon.png',
'static/css/*.css',
'templates/*.html',
],
},
python_requires='>=3.7',
install_requires=[
'Flask',
'GitPython',
'PyGithub',
'cachetools',
'retrying',
'requests',
'requests-ftp',
],
entry_points={
'console_scripts': [
'mesonwrap=mesonwrap.cli:Command',
],
},
test_suite='setup.discover_tests',
)
| {
"content_hash": "1f8335d43a08383bb8a0027b7fb29459",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 57,
"avg_line_length": 24.622222222222224,
"alnum_prop": 0.4584837545126354,
"repo_name": "mesonbuild/wrapweb",
"id": "0b246840d91dbd31305c8c4937c80c560f26a625",
"size": "1132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "298"
},
{
"name": "HTML",
"bytes": "3074"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "99665"
}
],
"symlink_target": ""
} |
from sys import argv, exit
import tamil
import sys
def usage():
return u"tscii2utf8.py <source-file> <destination-file> "
if __name__ == u"__main__":
if not argv[1:]:
print( usage() )
exit(-1)
try:
source_file = argv[1]
destination_file = argv[2]
with open(source_file) as fileHandle:
print("working on " + source_file + "\n")
output = tamil.tscii.convert_to_unicode( fileHandle.read() )
#print( output )
fi = open(destination_file,"w")
fi.write(output.encode('utf-8'))
fi.close()
print("TSCII to UTF8 conversion completed. Check the file " + destination_file)
except Exception as fileOrConvException:
print(u"tscii2utf8 error - file %s could not be processed due to - %s"%(source_file,str(fileOrConvException)))
| {
"content_hash": "fd825ce6eda6a345f1cbe73225b10f27",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 118,
"avg_line_length": 35.12,
"alnum_prop": 0.5831435079726651,
"repo_name": "atvKumar/open-tamil",
"id": "7623f7fc2d6f9ec5dc20f8c7be3e337d02691397",
"size": "950",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/tscii2utf8-example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "14505"
},
{
"name": "HTML",
"bytes": "2558"
},
{
"name": "Java",
"bytes": "9842"
},
{
"name": "JavaScript",
"bytes": "9250"
},
{
"name": "Makefile",
"bytes": "146"
},
{
"name": "Python",
"bytes": "551116"
},
{
"name": "Ruby",
"bytes": "26442"
},
{
"name": "Shell",
"bytes": "3928"
}
],
"symlink_target": ""
} |
"""
ASGI config for restpython project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'restpython.settings')
application = get_asgi_application()
| {
"content_hash": "463a3c1e6139988116aa3ba6b2a9a90c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.8125,
"alnum_prop": 0.7732997481108312,
"repo_name": "zerazobz/coding",
"id": "53dfaef4c49e539114c2e5d6fbd537a86549bd38",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restpython/asgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "286"
},
{
"name": "Python",
"bytes": "7206"
}
],
"symlink_target": ""
} |
"""
Created on Wed Oct 4 10:22:49 2017
Converting excel files to csv
@author: Yuriy Sverchkov
"""
import logging
from sys import argv
from pandas import read_excel, concat
# Constants
logging.basicConfig( level = logging.DEBUG )
log = logging.getLogger(__name__)
log.setLevel( logging.DEBUG )
standardized_columns = {
0:'Protein Name',
1:'Replicate Name',
2:'Peptide Sequence',
3:'Total Area Endogenous',
4:'Total Area Reference Standard',
5:'Endogenous to Reference Ratio',
6:'Corrected Ratio'
}
# Functions
def standardize_names( df ):
if len(df.columns) == 6:
df['Corrected Ratio'] = df[df.columns[5]] # The for old master mix the corrected ratio is the raw ratio
converter = { df.columns[i] : standardized_columns[i] for i in range(0,7) }
return( df.rename( columns = converter ) )
# Main run
if __name__ == '__main__':
workbook_files = []
reading_flag = True
for arg in argv:
if arg == argv[0]:
continue
if reading_flag:
if arg != "-o":
workbook_files.append( arg )
else:
reading_flag = False
else:
output_file = arg
break
log.debug( 'Reading workbooks: {}; Writing csv: {}'.format( str( workbook_files ), output_file ) )
dataframes = []
for workbook_file in workbook_files:
log.debug( 'Reading {}'.format( workbook_file ) )
dataframes += list( read_excel( workbook_file, sheetname = None ).values() )
log.debug( 'Read successfully' )
log.debug( 'Composing one large dataframe' )
big_df = concat( map( standardize_names, dataframes ) )
log.debug( 'Writing to {}'.format( output_file ) )
big_df.to_csv( path_or_buf = output_file, index = False )
| {
"content_hash": "4f6724d45f9aba230dfb859fa69555df",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 111,
"avg_line_length": 26.941176470588236,
"alnum_prop": 0.5982532751091703,
"repo_name": "sverchkov/ivancic-panel-selection",
"id": "dfd26fb7f21de651b564572abdb5b734e76efd58",
"size": "1856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/xlsx_to_single_csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "7488"
},
{
"name": "Python",
"bytes": "38053"
},
{
"name": "R",
"bytes": "23977"
},
{
"name": "TeX",
"bytes": "1617"
}
],
"symlink_target": ""
} |
"""Distogram container used throughout ConKit"""
import numpy as np
from operator import attrgetter
from conkit.core.distance import Distance
from conkit.io._cache import PARSER_CACHE
from conkit.core.contactmap import ContactMap
class Distogram(ContactMap):
"""A distogram object to store all associated information. This class inherits methods and attributes
from :obj:`~conkit.core.contactmap.ContactMap`
Examples
--------
>>> import numpy as np
>>> from conkit.core import Distance
>>> from conkit.core import Distogram
>>> distogram = Distogram("example")
>>> distogram.add(Distance(1, 25, (0.25, 0.45, 0.25, 0.05), ((0, 4), (4, 6), (6, 8), (8, np.inf))))
>>> distogram.add(Distance(7, 19, (0.15, 0.15, 0.60, 0.1), ((0, 4), (4, 6), (6, 8), (8, np.inf))))
>>> print(distogram)
Distogram(id="example" ndistances=2)
Attributes
----------
id : str
A unique identifier
original_file_format : str
The original file format used to create the :obj:`~conkit.core.distogram.Distogram` instance
ndistances : int
The number of :obj:`~conkit.core.distance.Distance` instances in the :obj:`~conkit.core.distogram.Distogram`
"""
__slots__ = ["_original_file_format", "_sequence"]
def __init__(self, id):
self._original_file_format = None
super(Distogram, self).__init__(id)
def __repr__(self):
return '{}(id="{}", ndistances={})'.format(self.__class__.__name__, self.id, self.ndistances)
@property
def ndistances(self):
"""The number of :obj:`~conkit.core.distance.Distance` instances
Returns
-------
int
The number of distance pairs in the :obj:`~conkit.core.distogram.Distogram`
"""
return len(self)
@property
def original_file_format(self):
"""The original file format used to create the :obj:`~conkit.core.distogram.Distogram` instance"""
return self._original_file_format
@original_file_format.setter
def original_file_format(self, value):
if not value in PARSER_CACHE.distance_file_parsers.keys():
raise ValueError('Must provide valid distogram format: {}'.format(list(PARSER_CACHE.distance_file_parsers)))
self._original_file_format = value
def get_unique_distances(self, inplace=False):
"""Filter the :obj:`~conkit.core.distance.Distance` instances so that each residue pairs is present only once
Parameters
----------
inplace : bool, optional
Replace stored distances with the unique distances in the current instance [default: False]
Returns
-------
:obj:`~conkit.core.contactmap.ContactMap`
:obj:`~conkit.core.contactmap.ContactMap` instance, regardless of inplace
"""
distogram = self._inplace(inplace)
unique_pairs = {tuple(sorted(el.id)): el for el in self}
distogram.child_list = list(unique_pairs.values())
distogram.child_dict = unique_pairs
return distogram
def get_absent_residues(self, seq_len=None):
"""Get residues not represented by any :obj:`~conkit.core.distance.Distance` instance
Parameters
----------
seq_len : int, optional
Sequence length. If not provided, it will be pulled from :attr:`~conkit.core.contactmap.ContactMap.sequence`
[default: None]
Returns
-------
list
A list of absent residues
Raises
------
:exc:`ValueError`
No seq_len was provided and :attr:`~conkit.core.contactmap.ContactMap.sequence` is not defined
"""
if seq_len is None:
if self.sequence is None:
raise ValueError('Need to define a sequence or provide seq_len')
seq_len = self.sequence.seq_len
absent_residues = []
for residue in range(1, seq_len + 1):
if not any([c.id for c in self if residue in c.id]):
absent_residues.append(residue)
return absent_residues
def as_array(self, seq_len=None, get_weigths=False):
"""Transform the :obj:`~conkit.core.distogram.Distogram` instance into a :obj:numpy.array instance with shape
(seq_len, seq_len) where each element represents the predicted distance between residues
Parameters
----------
seq_len : int, optional
Sequence length. If not provided, it will be pulled from :attr:`~conkit.core.contactmap.ContactMap.sequence`
[default: None]
get_weigths : bool, optional
If True the resulting array contains the confidence for each predicted distance rather than actual distances
[default: False]
Returns
-------
:obj:`numpy.array`
:obj:`numpy.array` instance that represents the distogram. Note: change of residue indexing, now starts in 0
Raises
------
:exc:`ValueError`
No seq_len was provided and :attr:`~conkit.core.contactmap.ContactMap.sequence` is not defined
"""
if seq_len is None:
if self.sequence is None:
raise ValueError('Need to define a sequence or provide seq_len')
seq_len = self.sequence.seq_len
if seq_len < self.highest_residue_number:
raise ValueError('Sequence length does not match contact map')
if get_weigths:
getter = attrgetter('max_score')
else:
getter = attrgetter('predicted_distance')
array = np.full((seq_len + 1, seq_len + 1), np.nan)
for distance in self:
array[distance.res1_seq, distance.res2_seq] = getter(distance)
array[distance.res2_seq, distance.res1_seq] = getter(distance)
array = np.delete(array, 0, axis=0)
array = np.delete(array, 0, axis=1)
return array
def reshape_bins(self, new_bins):
"""Reshape the predicted distance bins for all :obj:`~conkit.core.distance.Distance` instances. This will
update :attr:`~conkit.core.distance.Distance.distance_scores` and
:attr:`~conkit.core.distance.Distance.distance_bins` to fit the new bins.
Parameters
----------
new_bins : tuple
A tuple of tuples, where each element corresponds with the upper and lower edges of the intervals for
the new distance bins
Raises
------
:exc:`ValueError`
The new distance bins are not valid
"""
if self.original_file_format == 'pdb':
raise ValueError('Cannot re-shape bins obtained from a PDB structure file')
Distance._assert_valid_bins(new_bins)
for distance in self:
distance._reshape_bins(new_bins)
def as_contactmap(self, distance_cutoff=8):
"""Create a :obj:`~conkit.core.contactmap.ContactMap` instance with the contacts present in this
:obj:`~conkit.core.distogram.Distogram` instance.
Parameters
----------
distance_cutoff : int, float
The distance cutoff used to consider a residue pair within contact of each other
Returns
-------
:obj:`~conkit.core.contactmap.ContactMap`
A contactmap with the contacts present in this distogram instance.
"""
contactmap = ContactMap("map_1")
for dist in self:
if dist.predicted_distance <= distance_cutoff:
contact = dist.as_contact(distance_cutoff)
contactmap.add(contact)
if self.sequence is not None:
contactmap.sequence = self.sequence._inplace(False)
return contactmap
@staticmethod
def calculate_rmsd(prediction, model, seq_len=None, calculate_wrmsd=False):
"""Calculate the RMSD between two :obj:`~conkit.core.distogram.Distogram` instances.
Parameters
----------
prediction: :obj:`~conkit.core.distogram.Distogram`
A ConKit :obj:`~conkit.core.distogram.Distogram` used as the prediction for the RMSD
model: :obj:`~conkit.core.distogram.Distogram`
A ConKit :obj:`~conkit.core.distogram.Distogram` used as the model to calculate the RMSD
seq_len: int, optional
Sequence length. If not provided, it will be pulled from :attr:`~conkit.core.contactmap.ContactMap.sequence`
[default: None]
calculate_wrmsd: bool
If set to True wRMSD is calculated using distance confidence scores [default: False]
Returns
-------
list
A list of floats with the RMSD values along the sequence
Raises
------
:exc:`ValueError`
other is not a :obj:`~conkit.core.distogram.Distogram` instance.
"""
if not isinstance(model, Distogram) or not isinstance(prediction, Distogram):
raise ValueError('Need to provide a conkit.core.distogram.Distogram instance')
max_distance = prediction.top.distance_bins[-1][0]
model_array = model.as_array(seq_len=seq_len)
model_array[model_array > max_distance] = max_distance
prediction_array = prediction.as_array(seq_len=seq_len)
prediction_array[prediction_array > max_distance] = max_distance
if prediction_array.shape != model_array.shape:
raise ValueError('Distograms cannot be matched')
difference = prediction_array - model_array
squared_difference = difference ** 2
if calculate_wrmsd:
prediction_weights = prediction.as_array(seq_len=seq_len, get_weigths=True)
squared_difference *= prediction_weights
sum_squared_differences = np.nansum(squared_difference, axis=0)
n_observations_array = np.sum(~np.isnan(squared_difference), axis=0)
rmsd = np.sqrt(sum_squared_differences / n_observations_array)
return rmsd
def find_residues_within(self, resnum, distance_cutoff):
"""Find all residues within a given distance of a given residue
Parameters
----------
resnum: int
The residue number of the residue of interest
distance_cutoff: int, float
The distance cutoff used to find residues
Returns
-------
set
A set with the residue numbers of residues within the given distance
"""
result = []
for distance in self:
if distance.predicted_distance <= distance_cutoff and resnum in distance.id:
result += list(distance.id)
return set(result)
@staticmethod
def merge_arrays(distogram_1, distogram_2):
"""Take two :obj:`~conkit.core.distogram.Distogram` instances and merge them together into the same
:obj:`numpy.array` instance. Each half square in this array will correspond with the predicted distances
at each hierarchy
Parameters
----------
distogram_1: :obj:`~conkit.core.distogram.Distogram`
First :obj:`~conkit.core.distogram.Distogram` instance, used to populate top half square of the array
distogram_2: :obj:`~conkit.core.distogram.Distogram`
Second :obj:`~conkit.core.distogram.Distogram` instance, used to populate lower half square of the array
Returns
-------
:obj:`numpy.array`
:obj:`numpy.array` instance that represents the combined distograms.
Raises
------
:exc:`ValueError`
No sequence has been registered for one of the :obj:`~conkit.core.distogram.Distogram` instances
:exc:`ValueError`
The sequence length associated to the :obj:`~conkit.core.distogram.Distogram` instances is incompatible
"""
if distogram_1.sequence is None or distogram_2.sequence is None:
raise ValueError("All hierarchies must have a sequence registered")
if distogram_1.sequence.seq_len != distogram_2.sequence.seq_len:
raise ValueError("Sequence lengths are incompatible")
array = np.full((distogram_1.sequence.seq_len + 1, distogram_1.sequence.seq_len + 1), np.nan)
for distance in distogram_1:
array[distance.res1_seq, distance.res2_seq] = distance.predicted_distance
for distance in distogram_2:
array[distance.res2_seq, distance.res1_seq] = distance.predicted_distance
array = np.delete(array, 0, axis=0)
array = np.delete(array, 0, axis=1)
return array
| {
"content_hash": "aabb2399e423adc0884afbe36bfdc304",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 120,
"avg_line_length": 38.916408668730654,
"alnum_prop": 0.6234685759745425,
"repo_name": "rigdenlab/conkit",
"id": "c0c0fbca53470d2632a3e0479d749b5a78980abf",
"size": "14165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conkit/core/distogram.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "900218"
}
],
"symlink_target": ""
} |
from sourcelyzer.cli import outnl
from sourcelyzer.properties import load_from_file
from sourcelyzer.app import App
import cherrypy
def init_server(properties):
cherrypy.config.update({
'server.socket_host': properties['sourcelyzer.server.listen_addr'],
'server.socket_port': int(properties['sourcelyzer.server.listen_port'])
})
cherrypy.tree.mount(Root(), '/')
def start(arguments, properties):
outnl('start server')
def stop(arguments, properties):
outnl('stop server')
def restart(arguments, properties):
stop()
start()
def start_console(arguments, properties):
app = App(properties)
app.run()
def server(arguments):
conffile = arguments['--config'] if arguments['--config'] != None else 'conf/server.properties'
props = load_from_file(conffile)
if arguments['start']:
start(arguments, props)
elif arguments['stop']:
stop(arguments, props)
elif arguments['restart']:
restart(arguments, props)
elif arguments['start-console']:
start_console(arguments, props)
else:
outnl('Wait, something is amiss. Some how there is an invalid command')
| {
"content_hash": "28293b9d52856a838b466665267b6577",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 99,
"avg_line_length": 26.022222222222222,
"alnum_prop": 0.6797608881298036,
"repo_name": "sourcelyzer/sourcelyzer",
"id": "bb8c7d57d5c42515e68c39055a6830615b3eb612",
"size": "1171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sourcelyzer/cli/apps/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "205"
},
{
"name": "JavaScript",
"bytes": "2108"
},
{
"name": "Python",
"bytes": "87235"
},
{
"name": "Vue",
"bytes": "3883"
}
],
"symlink_target": ""
} |
"""
\x1b[31mYmir Automation:\x1b[0m
This is the \x1b[35mDemo\x1b[0m Service
"""
import os
from fabric import api
from ymir import load_service_from_json
YMIR_SERVICE_JSON = os.path.abspath(
os.environ.get(
'YMIR_SERVICE_JSON',
os.path.join(os.path.dirname(__file__),
'service.json')))
# Create the ymir service from the service description
_service = load_service_from_json(YMIR_SERVICE_JSON)
service_data = _service.template_data()
# Install the standard service operations
# (like create, terminate, provision, etc) as fabric commands
_service.fabric_install()
def deploy(branch='master'):
""" example usage: "fab deploy:branch=master" """
_service.report("deploy for branch {0} -> {1} is not defined yet".format(
branch, _service))
def tail():
""" tail syslog on remote server """
with _service.ssh_ctx():
api.sudo('tail /var/log/syslog')
| {
"content_hash": "5f4b6ffd3f534aa9c498a84d71d9ac92",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 28,
"alnum_prop": 0.6666666666666666,
"repo_name": "mattvonrocketstein/ymir",
"id": "6345f2168e42933d6920f1e3a046fc708d6c08b2",
"size": "970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/cabot/fabfile.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "330"
},
{
"name": "Puppet",
"bytes": "3363"
},
{
"name": "Python",
"bytes": "177403"
},
{
"name": "Ruby",
"bytes": "9827"
},
{
"name": "Shell",
"bytes": "31589"
}
],
"symlink_target": ""
} |
import boto.sqs
import boto.sqs.queue
from boto.sqs.message import Message
from boto.sqs.connection import SQSConnection
from boto.exception import SQSError
import sys
import urllib2
response = urllib2.urlopen("http://ec2-52-30-7-5.eu-west-1.compute.amazonaws.com:81/key")
key = response.read()
# Get the keys from a specific url and then use them to connect to AWS Service
access_key_id = key[0:20]
secret_access_key = key[21:]
# Set up a connection to the AWS service.
conn = boto.sqs.connect_to_region("eu-west-1", aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
# Get a list of the queues that exists and then print the list out
rs = conn.get_all_queues()
for qq in rs:
print qq.id
q = conn.create_queue('joonasqueue')
myqueue = conn.get_queue('joonasqueue')
print q
print myqueue
| {
"content_hash": "d7a5855df3a6925274093e257ba93a8a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 120,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.7545787545787546,
"repo_name": "JoonasKansala/lab11",
"id": "c6a3519e64df49a189cbe6681cb8faa88d9cb4e5",
"size": "886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab11/create-aws-queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5709"
}
],
"symlink_target": ""
} |
import math
import os
import random
import re
import sys
'''
Arrays: Left Rotation
https://www.hackerrank.com/challenges/ctci-array-left-rotation/problem
'''
# Complete the rotLeft function below.
def rotLeft(a, d):
return a[d:]+a[:d]
if __name__ == '__main__':
n = 5
d = 4
a = [1, 2, 3, 4, 5]
result = rotLeft(a, d)
print(' '.join(map(str, result)))
| {
"content_hash": "e12ea7c32cc8ce221a1f25911166f3a3",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 70,
"avg_line_length": 14.37037037037037,
"alnum_prop": 0.6005154639175257,
"repo_name": "MarsBighead/mustang",
"id": "9891d6e74fcfc77f73996a4a4339f5612f9992e0",
"size": "404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/hackerrank/arrays-left-rotation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "622"
},
{
"name": "C++",
"bytes": "15533"
},
{
"name": "CSS",
"bytes": "2525"
},
{
"name": "Dockerfile",
"bytes": "499"
},
{
"name": "Erlang",
"bytes": "5855"
},
{
"name": "Go",
"bytes": "3879"
},
{
"name": "HTML",
"bytes": "3879"
},
{
"name": "Java",
"bytes": "541"
},
{
"name": "JavaScript",
"bytes": "7858"
},
{
"name": "Julia",
"bytes": "2223"
},
{
"name": "Makefile",
"bytes": "650"
},
{
"name": "Modula-3",
"bytes": "43"
},
{
"name": "PHP",
"bytes": "771"
},
{
"name": "PLpgSQL",
"bytes": "4642"
},
{
"name": "Perl",
"bytes": "46253"
},
{
"name": "Python",
"bytes": "110755"
},
{
"name": "Raku",
"bytes": "378"
},
{
"name": "Shell",
"bytes": "22680"
}
],
"symlink_target": ""
} |
"""
Set of programs to read and interact with output from Multifluid/multispecies
"""
import numpy as np
import os
from .bifrost import BifrostData, Rhoeetab, read_idl_ascii, subs2grph
class EbysusData(BifrostData):
"""
Class to hold data from Multifluid/multispecies simulations
in native format.
"""
def __init__(self, *args, **kwargs):
super(EbysusData, self).__init__(*args, **kwargs)
self.mf_common_file = (self.file_root + '_mf_common')
self.mf_file = (self.file_root + '_mf_%02i_%02i')
self.mfe_file = (self.file_root + '_mfe_%02i_%02i')
self.mfc_file = (self.file_root + '_mfc_%02i_%02i')
self.mf_e_file = self.file_root + '_mf_e'
def _set_snapvars(self):
self.snapvars = ['r', 'px', 'py', 'pz']
self.mhdvars = []
if (self.do_mhd):
self.mhdvars = ['bx', 'by', 'bz']
self.auxvars = self.params['aux'].split()
if (self.mf_epf):
# add internal energy to basic snaps
self.snapvars.append('e')
# make distiction between different aux variable
self.varsmfe = [v for v in self.auxvars if v.startswith('mfe_')]
self.varsmfc = [v for v in self.auxvars if v.startswith('mfc_')]
self.varsmf = [v for v in self.auxvars if v.startswith('mf_')]
for var in (self.varsmfe + self.varsmfc + self.varsmf):
self.auxvars.remove(var)
else: # one energy for all fluid
self.mhdvars = 'e' + self.mhdvars
if self.with_electrons:
self.snapevars.remove('ee')
if hasattr(self, 'with_electrons'):
if self.with_electrons:
self.mf_e_file = self.file_root + '_mf_e'
self.auxxyvars = []
# special case for the ixy1 variable, lives in a separate file
if 'ixy1' in self.auxvars:
self.auxvars.remove('ixy1')
self.auxxyvars.append('ixy1')
self.simple_vars = self.snapvars + self.mhdvars + \
self.auxvars + self.varsmf + self.varsmfe + self.varsmfc
self.compvars = ['ux', 'uy', 'uz', 's', 'rup', 'dxdbup',
'dxdbdn', 'dydbup', 'dydbdn', 'dzdbup', 'dzdbdn', 'modp']
if (self.do_mhd):
self.compvars = self.compvars + ['bxc', 'byc', 'bzc', 'modb']
# def set_snap(self,snap):
# super(EbysusData, self).set_snap(snap)
def _read_params(self):
''' Reads parameter file specific for Multi Fluid Bifrost '''
super(EbysusData, self)._read_params()
self.nspecies_max = 28
self.nlevels_max = 28
try:
self.mf_epf = self.params['mf_epf']
except KeyError:
raise KeyError('read_params: could not find mf_epf in idl file!')
try:
self.with_electrons = self.params['mf_electrons']
except KeyError:
raise KeyError(
'read_params: could not find with_electrons in idl file!')
def _init_vars(self, *args, **kwargs):
"""
Initialises variable (common for all fluid)
"""
self.variables = {}
for var in self.mhdvars: # for multispecies these are common
try:
self.variables[var] = self._get_simple_var(
var, *args, **kwargs)
setattr(self, var, self.variables[var])
except:
if self.verbose:
print(('(WWW) init_vars: could not read variable %s' % var))
def get_var(self, var, mf_ispecies=1, mf_ilevel=1, snap=None, *args, **kwargs):
"""
Reads a given variable from the relevant files.
Parameters
----------
var - string
Name of the variable to read. Must be Bifrost internal names.
mf_ispecies - integer [1, 28]
Species ID
mf_ilevel - integer
Ionization level
snap - integer, optional
Snapshot number to read. By default reads the loaded snapshot;
if a different number is requested, will load that snapshot
by running self.set_snap(snap).
"""
assert (mf_ispecies > 0 and mf_ispecies <= 28)
if (snap is not None) and (snap != self.snap):
self.set_snap(snap)
# # check if already in memmory
# if var in self.variables:
# return self.variables[var]
if var in self.simple_vars: # is variable already loaded?
return self._get_simple_var(var, mf_ispecies, mf_ilevel)
elif var in self.compvars:
return self._get_composite_var(var, mf_ispecies, mf_ilevel)
elif var in self.auxxyvars:
return super(EbysusData, self)._get_simple_var_xy(var)
else:
raise ValueError(("get_var: could not read variable"
"%s. Must be one of %s" % (var, str(self.simple_vars + self.compvars + self.auxxyvars))))
def _get_simple_var(self, var, mf_ispecies=0, mf_ilevel=0, order='F', mode='r', *args, **kwargs):
"""
Gets "simple" variable (ie, only memmap, not load into memory).
Overloads super class to make a distinction between different filenames for different variables
Parameters:
-----------
var - string
Name of the variable to read. Must be Bifrost internal names.
order - string, optional
Must be either 'C' (C order) or 'F' (Fortran order, default).
mode - string, optional
numpy.memmap read mode. By default is read only ('r'), but
you can use 'r+' to read and write. DO NOT USE 'w+'.
Returns
-------
result - numpy.memmap array
Requested variable.
"""
var_sufix = '_s%dl%d' % (mf_ispecies, mf_ilevel)
if self.snap < 0:
snapstr = ''
fsuffix_b = '.scr'
elif self.snap == 0:
snapstr = ''
fsuffix_b = ''
else:
snapstr = self.snap_str
fsuffix_b = ''
if var in self.mhdvars:
idx = self.mhdvars.index(var)
fsuffix_a = '.snap'
filename = self.mf_common_file
elif var in self.snapvars:
idx = self.snapvars.index(var)
fsuffix_a = '.snap'
filename = self.mf_file
elif var in self.snapevars:
idx = self.snapevars.index(var)
filename = self.mf_e_file
elif var in self.auxvars:
idx = self.auxvars.index(var)
fsuffix_a = '.aux'
filename = self.mf_file
elif var in self.varsmf:
idx = self.varsmf.index(var)
fsuffix_a = '.aux'
filename = self.mf_file
elif var in self.varsmfe:
idx = self.varsmfe.index(var)
fsuffix_a = '.aux'
filename = self.mfe_file
elif var in self.varsmfc:
idx = self.varsmfc.index(var)
fsuffix_a = '.aux'
filename = self.mfc_file
filename = filename + snapstr + fsuffix_a + fsuffix_b
if var not in self.mhdvars:
filename = filename % (mf_ispecies, mf_ilevel)
dsize = np.dtype(self.dtype).itemsize
offset = self.nx * self.ny * self.nzb * idx * dsize
mmap = np.memmap(filename, dtype=self.dtype, order=order, offset=offset,
mode=mode, shape=(self.nx, self.ny, self.nzb))
setattr(self, var + var_sufix, mmap)
self.variables[var + var_sufix] = mmap
return np.memmap(filename, dtype=self.dtype, order=order, offset=offset,
mode=mode, shape=(self.nx, self.ny, self.nzb))
def _get_composite_var(self, var, mf_ispecies=0, mf_ilevel=0, order='F', mode='r', *args, **kwargs):
"""
Gets composite variables for multi species fluid.
"""
from . import cstagger
var_sufix = '_s%dl%d' % (mf_ispecies, mf_ilevel)
if var in ['ux', 'uy', 'uz']: # velocities
p = self._get_simple_var(
'p' + var[1], mf_ispecies, mf_ilevel, order, mode)
r = self._get_simple_var('r', mf_ispecies, mf_ilevel, order, mode)
rdt = r.dtype # tricky
cstagger.init_stagger(
self.nzb, self.z.astype(rdt), self.zdn.astype(rdt))
if getattr(self, 'n' + var[1]) < 5:
return p / r
else: # will call xdn, ydn, or zdn to get r at cell faces
return p / getattr(cstagger, var[1] + 'dn')(r)
elif var == 'ee': # internal energy
if hasattr(self, 'e'):
e = self._get_simple_var(
'e', mf_ispecies, mf_ilevel, order, mode)
r = self._get_simple_var(
'r', mf_ispecies, mf_ilevel, order, mode)
return e / r
elif var == 's': # entropy?
p = self._get_simple_var('p', mf_ispecies, mf_ilevel, order, mode)
r = self._get_simple_var('r', mf_ispecies, mf_ilevel, order, mode)
return np.log(p) - self.params['gamma'] * np.log(r)
elif var in ['modb', 'modp']: # total magnetic field
v = var[3]
if v == 'b':
if not self.do_mhd:
raise ValueError("No magnetic field available.")
varr = self._get_simple_var(
v+'x', mf_ispecies, mf_ilevel, order, mode)
varrt = varr.dtype # tricky
cstagger.init_stagger(self.nzb, self.z.astype(
varrt), self.zdn.astype(varrt))
result = cstagger.xup(varr) ** 2 # varr == _get_simple_var(v+'x')
result += cstagger.yup(self._get_simple_var(v +
'y', mf_ispecies, mf_ilevel, order, mode)) ** 2
result += cstagger.zup(self._get_simple_var(v +
'z', mf_ispecies, mf_ilevel, order, mode)) ** 2
return np.sqrt(result)
else:
raise ValueError(('_get_composite_var: do not know (yet) how to'
'get composite variable %s.' % var))
| {
"content_hash": "99f738a4e5fecb68ec7c749d91e71712",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 119,
"avg_line_length": 39.89494163424124,
"alnum_prop": 0.5306739490880718,
"repo_name": "M1kol4j/helita",
"id": "154dfd34d64dc108f00e3bf8bd7d534306944922",
"size": "10253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helita/sim/ebysus.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "61874"
},
{
"name": "Fortran",
"bytes": "5272"
},
{
"name": "Python",
"bytes": "434925"
}
],
"symlink_target": ""
} |
import argparse
parser = argparse.ArgumentParser(prog="single.py", description="Run Starfish fitting model in parallel.")
# Even though these arguments aren't being used, we need to add them.
parser.add_argument("--generate", action="store_true", help="Write out the data, mean model, and residuals for current parameter settings.")
# parser.add_argument("--initPhi", action="store_true", help="Create *phi.json files for each order using values in config.yaml")
parser.add_argument("--optimize", action="store_true", help="Optimize the parameters.")
args = parser.parse_args()
from Starfish import astroseismic_align as AA
if args.optimize:
AA.optimize()
if args.generate:
# Save the residuals as a large JSON file, using the known parameters
AA.generate()
| {
"content_hash": "4e22ecd5a2178f4b892addc21f8dd235",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 140,
"avg_line_length": 45.529411764705884,
"alnum_prop": 0.7493540051679587,
"repo_name": "BrownDwarf/Starfish",
"id": "9848fcc51846892dbce6b6119092540a3ac89261",
"size": "797",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/a_align.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3946242"
},
{
"name": "Makefile",
"bytes": "665"
},
{
"name": "Python",
"bytes": "469734"
},
{
"name": "Shell",
"bytes": "6095"
},
{
"name": "TeX",
"bytes": "282961"
}
],
"symlink_target": ""
} |
'''
CodeWars 2013
Distinct Letters
'''
import sys
def is_distinct(line):
used_letters = []
for c in line:
if c in used_letters:
return False
else:
used_letters.append(c)
return True
def get_output(is_distinct):
if is_distinct:
return 'USES DISTINCT LETTERS'
else:
return 'DOES NOT USE DISTINCT LETTERS'
for linen in sys.stdin:
line = linen.rstrip().upper()
if line == '.':
break
print (line, get_output(is_distinct(line)))
| {
"content_hash": "a64936b34aa3274f481c61e3420cb7b5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 47,
"avg_line_length": 17.533333333333335,
"alnum_prop": 0.5855513307984791,
"repo_name": "martrik/HPCodeWarsSamples",
"id": "15eb2bdae01411614d0cefdfc9315892c71fe53a",
"size": "548",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Problems/2013/problems/2013SampleSolutions/prob08_distinct_letters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46295"
},
{
"name": "C#",
"bytes": "29603"
},
{
"name": "C++",
"bytes": "25390"
},
{
"name": "CSS",
"bytes": "390"
},
{
"name": "HTML",
"bytes": "1045375"
},
{
"name": "Java",
"bytes": "45251"
},
{
"name": "JavaScript",
"bytes": "48535"
},
{
"name": "Python",
"bytes": "18227"
}
],
"symlink_target": ""
} |
"""Tests for ceilometer/storage/
"""
import mock
from oslo_config import fixture as fixture_config
from oslotest import base
import retrying
from ceilometer.alarm.storage import impl_log as impl_log_alarm
from ceilometer.alarm.storage import impl_sqlalchemy as impl_sqlalchemy_alarm
from ceilometer.event.storage import impl_hbase as impl_hbase_event
from ceilometer import storage
from ceilometer.storage import impl_log
from ceilometer.storage import impl_sqlalchemy
import six
class EngineTest(base.BaseTestCase):
def test_get_connection(self):
engine = storage.get_connection('log://localhost',
'ceilometer.metering.storage')
self.assertIsInstance(engine, impl_log.Connection)
def test_get_connection_no_such_engine(self):
try:
storage.get_connection('no-such-engine://localhost',
'ceilometer.metering.storage')
except RuntimeError as err:
self.assertIn('no-such-engine', six.text_type(err))
class ConnectionRetryTest(base.BaseTestCase):
def setUp(self):
super(ConnectionRetryTest, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
def test_retries(self):
with mock.patch.object(retrying.time, 'sleep') as retry_sleep:
try:
self.CONF.set_override("connection", "no-such-engine://",
group="database")
storage.get_connection_from_config(self.CONF)
except RuntimeError as err:
self.assertIn('no-such-engine', six.text_type(err))
self.assertEqual(9, retry_sleep.call_count)
retry_sleep.assert_called_with(10.0)
class ConnectionConfigTest(base.BaseTestCase):
def setUp(self):
super(ConnectionConfigTest, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
def test_only_default_url(self):
self.CONF.set_override("connection", "log://", group="database")
conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'alarm')
self.assertIsInstance(conn, impl_log_alarm.Connection)
def test_two_urls(self):
self.CONF.set_override("connection", "log://", group="database")
self.CONF.set_override("alarm_connection", "sqlite://",
group="database")
conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'alarm')
self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection)
def test_three_urls(self):
self.CONF.set_override("connection", "log://", group="database")
self.CONF.set_override("alarm_connection", "sqlite://",
group="database")
self.CONF.set_override("event_connection", "hbase://__test__",
group="database")
conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'alarm')
self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection)
conn = storage.get_connection_from_config(self.CONF, 'event')
self.assertIsInstance(conn, impl_hbase_event.Connection)
def test_three_urls_no_default(self):
self.CONF.set_override("connection", None, group="database")
self.CONF.set_override("metering_connection", "log://",
group="database")
self.CONF.set_override("alarm_connection", "sqlite://",
group="database")
self.CONF.set_override("event_connection", "hbase://__test__",
group="database")
conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'alarm')
self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection)
conn = storage.get_connection_from_config(self.CONF, 'event')
self.assertIsInstance(conn, impl_hbase_event.Connection)
def test_sqlalchemy_driver(self):
self.CONF.set_override("connection", "sqlite+pysqlite://",
group="database")
conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_sqlalchemy.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_sqlalchemy.Connection)
conn = storage.get_connection_from_config(self.CONF, 'alarm')
self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection)
| {
"content_hash": "908d1ebb6c6ccc916892fa26a6b53b2c",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 77,
"avg_line_length": 47.375,
"alnum_prop": 0.6468149264983039,
"repo_name": "yanheven/ceilometer",
"id": "55439ebb0f91bf235b084da9a2b434d72334f829",
"size": "5907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/tests/storage/test_get_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2969045"
},
{
"name": "Shell",
"bytes": "4227"
}
],
"symlink_target": ""
} |
"""
Unit test for Date parameters.
"""
import unittest
import datetime as dt
import param
class TestDateParameters(unittest.TestCase):
def test_initialization_out_of_bounds(self):
try:
class Q(param.Parameterized):
q = param.Date(dt.datetime(2017,2,27),
bounds=(dt.datetime(2017,2,1),
dt.datetime(2017,2,26)))
except ValueError:
pass
else:
raise AssertionError("No exception raised on out-of-bounds date")
def test_set_out_of_bounds(self):
class Q(param.Parameterized):
q = param.Date(bounds=(dt.datetime(2017,2,1),
dt.datetime(2017,2,26)))
try:
Q.q = dt.datetime(2017,2,27)
except ValueError:
pass
else:
raise AssertionError("No exception raised on out-of-bounds date")
def test_set_exclusive_out_of_bounds(self):
class Q(param.Parameterized):
q = param.Date(bounds=(dt.datetime(2017,2,1),
dt.datetime(2017,2,26)),
inclusive_bounds=(True, False))
try:
Q.q = dt.datetime(2017,2,26)
except ValueError:
pass
else:
raise AssertionError("No exception raised on out-of-bounds date")
def test_get_soft_bounds(self):
q = param.Date(dt.datetime(2017,2,25),
bounds=(dt.datetime(2017,2,1),
dt.datetime(2017,2,26)),
softbounds=(dt.datetime(2017,2,1),
dt.datetime(2017,2,25)))
self.assertEqual(q.get_soft_bounds(), (dt.datetime(2017,2,1),
dt.datetime(2017,2,25)))
| {
"content_hash": "031ed5985c3f4c2b8371c0b6602e8447",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 77,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.5043149946062567,
"repo_name": "ioam/param",
"id": "860e8fe30898a1de8f6a9c17691b5bcac497bea5",
"size": "1854",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/API0/testdateparam.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "387425"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib import admin
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.http import HttpResponse, HttpResponseNotFound
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from reviewboard.accounts.admin import fix_review_counts
from reviewboard.admin.server import get_server_url
from reviewboard.scmtools.forms import RepositoryForm
from reviewboard.scmtools.models import Repository, Tool
class RepositoryAdmin(admin.ModelAdmin):
list_display = ('name', 'path', 'hosting', '_visible', 'inline_actions')
list_select_related = ('hosting_account',)
search_fields = ('name', 'path', 'mirror_path', 'tool__name')
raw_id_fields = ('local_site',)
ordering = ('name',)
fieldsets = (
(_('General Information'), {
'fields': ('name', 'visible',),
'classes': ('wide',),
}),
(_('Repository Hosting'), {
'fields': (
'hosting_type',
'hosting_url',
'hosting_account',
'hosting_account_username',
'hosting_account_password',
'hosting_account_two_factor_auth_code',
),
'classes': ('wide',),
}),
(RepositoryForm.REPOSITORY_INFO_FIELDSET, {
'fields': (
'tool',
'repository_plan',
'path',
'mirror_path',
'raw_file_url',
'username',
'password',
'use_ticket_auth',
),
'classes': ('wide',),
}),
(RepositoryForm.SSH_KEY_FIELDSET, {
'fields': (
'associate_ssh_key',
),
'classes': ('wide',),
}),
(RepositoryForm.BUG_TRACKER_FIELDSET, {
'fields': (
'bug_tracker_use_hosting',
'bug_tracker_type',
'bug_tracker_hosting_url',
'bug_tracker_plan',
'bug_tracker_hosting_account_username',
'bug_tracker',
),
'classes': ('wide',),
}),
(_('Access Control'), {
'fields': ('public', 'users', 'review_groups'),
'classes': ('wide',),
}),
(_('Advanced Settings'), {
'fields': ('encoding',),
'classes': ('wide', 'collapse'),
}),
(_('Internal State'), {
'description': _('<p>This is advanced state that should not be '
'modified unless something is wrong.</p>'),
'fields': ('local_site', 'hooks_uuid', 'extra_data'),
'classes': ['collapse'],
}),
)
form = RepositoryForm
def hosting(self, repository):
if repository.hosting_account_id:
account = repository.hosting_account
if account.service:
return '%s@%s' % (account.username, account.service.name)
return ''
def inline_actions(self, repository):
s = ['<div class="admin-inline-actions">']
if repository.hosting_account:
service = repository.hosting_account.service
if service and service.has_repository_hook_instructions:
s.append(format_html(
'<a class="action-hooks-setup"'
' href="{0}/hooks-setup/">[{1}]</a>',
repository.pk, _('Hooks')))
s.append(format_html(
'<a class="action-rbtools-setup"'
' href="{0}/rbtools-setup/">[{1}]</a>',
repository.pk, _('RBTools Setup')))
s.append('</div>')
return ''.join(s)
inline_actions.allow_tags = True
inline_actions.short_description = ''
def _visible(self, repository):
return repository.visible
_visible.boolean = True
_visible.short_description = _('Show')
def get_urls(self):
from django.conf.urls import patterns
return patterns(
'',
(r'^(?P<repository_id>[0-9]+)/hooks-setup/$',
self.admin_site.admin_view(self.hooks_setup)),
(r'^(?P<repository_id>[0-9]+)/rbtools-setup/$',
self.admin_site.admin_view(self.rbtools_setup)),
) + super(RepositoryAdmin, self).get_urls()
def hooks_setup(self, request, repository_id):
repository = get_object_or_404(Repository, pk=repository_id)
if repository.hosting_account:
service = repository.hosting_account.service
if service and service.has_repository_hook_instructions:
return HttpResponse(service.get_repository_hook_instructions(
request, repository))
return HttpResponseNotFound()
def rbtools_setup(self, request, repository_id):
repository = get_object_or_404(Repository, pk=repository_id)
return render_to_response(
'admin/scmtools/repository/rbtools_setup.html',
RequestContext(request, {
'repository': repository,
'reviewboard_url': get_server_url(
local_site=repository.local_site),
}))
@receiver(pre_delete, sender=Repository,
dispatch_uid='repository_delete_reset_review_counts')
def repository_delete_reset_review_counts(sender, instance, using, **kwargs):
"""Reset review counts in the dashboard when deleting repository objects.
There doesn't seem to be a good way to get notified on cascaded delete
operations, which means that when deleting a repository, there's no
good way to update the review counts that are shown to users. This
method clears them out entirely to be regenerated. Deleting
repositories should be a very rare occurrance, so it's not too
upsetting to do this.
"""
fix_review_counts()
class ToolAdmin(admin.ModelAdmin):
list_display = ('__str__', 'class_name')
admin.site.register(Repository, RepositoryAdmin)
admin.site.register(Tool, ToolAdmin)
| {
"content_hash": "e628f7803d742bd22402c0cf14b6f845",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 77,
"avg_line_length": 34.50828729281768,
"alnum_prop": 0.5669228306115914,
"repo_name": "bkochendorfer/reviewboard",
"id": "5f38e109d11a511c78a7e53d80048f2801c484b0",
"size": "6246",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "reviewboard/scmtools/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "212721"
},
{
"name": "HTML",
"bytes": "179427"
},
{
"name": "JavaScript",
"bytes": "1463002"
},
{
"name": "Python",
"bytes": "3686542"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class FeedConfig(AppConfig):
name = 'feed'
| {
"content_hash": "8fca4ccb5f5282cdfbff25a4ef8bcce5",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 16.6,
"alnum_prop": 0.7349397590361446,
"repo_name": "SJIT-Hackerspace/hackerspace",
"id": "e4e5c4116d2959954af7863f8c054b8668283b04",
"size": "83",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "hackerspace/feed/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19187"
},
{
"name": "HTML",
"bytes": "4010"
},
{
"name": "JavaScript",
"bytes": "176"
},
{
"name": "Python",
"bytes": "7061"
}
],
"symlink_target": ""
} |
from test import *
import os
path=os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
import unittest
unittest.main() | {
"content_hash": "3020a8bae81649dfe4378b72f26c3823",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 17.714285714285715,
"alnum_prop": 0.75,
"repo_name": "wowgeeker/fabscheduler",
"id": "c610a07c48eb3f931ef41f63ad5d7654d8b65be6",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "455"
}
],
"symlink_target": ""
} |
from openstack.tests.unit import base
from openstack.key_manager.v1 import container
ID_VAL = "123"
IDENTIFIER = 'http://localhost/containers/%s' % ID_VAL
EXAMPLE = {
'container_ref': IDENTIFIER,
'created': '2015-03-09T12:14:57.233772',
'name': '3',
'secret_refs': ['4'],
'status': '5',
'type': '6',
'updated': '2015-03-09T12:15:57.233772',
'consumers': ['7']
}
class TestContainer(base.TestCase):
def test_basic(self):
sot = container.Container()
self.assertIsNone(sot.resource_key)
self.assertEqual('containers', sot.resources_key)
self.assertEqual('/containers', sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = container.Container(**EXAMPLE)
self.assertEqual(EXAMPLE['created'], sot.created_at)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['secret_refs'], sot.secret_refs)
self.assertEqual(EXAMPLE['status'], sot.status)
self.assertEqual(EXAMPLE['type'], sot.type)
self.assertEqual(EXAMPLE['updated'], sot.updated_at)
self.assertEqual(EXAMPLE['container_ref'], sot.id)
self.assertEqual(EXAMPLE['container_ref'], sot.container_ref)
self.assertEqual(ID_VAL, sot.container_id)
self.assertEqual(EXAMPLE['consumers'], sot.consumers)
| {
"content_hash": "a745f376e5838701aa67e987fed91d8d",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 69,
"avg_line_length": 35.46511627906977,
"alnum_prop": 0.6504918032786885,
"repo_name": "openstack/python-openstacksdk",
"id": "2095e6c41cf50bf4d8a9b26221a93edaec66a16c",
"size": "2071",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "openstack/tests/unit/key_manager/v1/test_container.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3804005"
},
{
"name": "Shell",
"bytes": "9027"
}
],
"symlink_target": ""
} |
from decorator import decorator
from .chara import Spy
from . import storage
from .replayers import SEQUENCE, PATTERN_MATCH
def record(target):
@decorator
def wrapper(fn, *args, **kwargs):
spy = Spy(target)
with spy.record():
result = fn(*args, **kwargs)
storage.write(fn, spy, args[0] if args else None)
return result
return wrapper
def replay(target, sequence_mode=False, pattern_match_mode=False):
@decorator
def wrapper(fn, *args, **kwargs):
spy = Spy(target)
storage.read(fn, spy, args[0] if args else None)
if sequence_mode:
spy.replay_mode = SEQUENCE
elif pattern_match_mode:
spy.replay_mode = PATTERN_MATCH
with spy.replay():
return fn(*args, **kwargs)
return wrapper
| {
"content_hash": "ddb025f03e04557150327b2445465d16",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 66,
"avg_line_length": 20.390243902439025,
"alnum_prop": 0.6064593301435407,
"repo_name": "bywires/chara",
"id": "87fd848142522b1959fe1bc4754202a7a9735e5c",
"size": "836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chara/integration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22110"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
} |
import argparse
import gzip
import json
def make_unique(infile, outfile, tweet_ids):
fout = gzip.open(outfile, 'w')
with gzip.open(infile, 'r') as source:
for line in source:
try:
tweet = json.loads(line)
except:
continue
if tweet['id'] in tweet_ids:
continue
if 'retweet_id' in tweet and tweet['retweet_id'] in tweet_ids:
continue
tweet_ids.add(tweet['id'])
if 'retweet_id' in tweet:
tweet_ids.add(tweet['retweet_id'])
tweet_string = json.dumps(tweet) + '\n'
fout.write(tweet_string)
fout.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('infile')
parser.add_argument('outfile')
args = parser.parse_args()
tweet_ids = set()
make_unique(args.infile, args.outfile, tweet_ids)
if __name__ == '__main__':
main()
| {
"content_hash": "630ec226569feb620abedede4982c646",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 23.11904761904762,
"alnum_prop": 0.5417095777548918,
"repo_name": "lrei/twitter_sentiment_gen",
"id": "133bcc03ea2e9a0369f6a01560fa216d758d8369",
"size": "971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filter_unique.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62466"
}
],
"symlink_target": ""
} |
from MathFunc import *
from copy import copy, deepcopy
from model.Game import Game
from model.World import World
from model.Move import Move
from CommonFunctions import *
def get_puck_owner(world: World):
h = None
for h in world.hockeyists:
if world.puck.owner_hockeyist_id == h.id:
return h
class WorldConstants:
def __init__(self, game: (None, Game), world: (None, World), move: (None, Move)):
self.go_rest_pct = 0.7
self.main_period_len = 6000
self.game = game
self.move = move
self.world = world
self.alpha_player = 0.98
self.alpha_puck = 0.999
self.tick = world.tick
self.rink_mid_x = (game.rink_right + game.rink_left)/2
self.rink_mid_y = (game.rink_bottom + game.rink_top)/2
self.rink_len = game.rink_right - game.rink_left
self.rink_width = game.rink_bottom - game.rink_top
self.puck_radius = world.puck.radius
self.opponent_player = world.get_opponent_player()
self.player = world.get_my_player()
self.net_width = self.player.net_bottom-self.player.net_top
self.net_top = self.player.net_top
self.net_bot = self.player.net_bottom
self.goalie_radius = 30
self.player_radius = 30
self.turn_speed = game.hockeyist_turn_angle_factor
self.sgn_x = copysign(1,self.opponent_player.net_front - self.player.net_front)
self.puck_radius = world.puck.radius
self.puck_x = world.puck.x
self.puck_y = world.puck.y
self.puck_x_range_me = abs(self.player.net_front - self.puck_x)
self.puck_x_range_opp = abs(self.opponent_player.net_front - self.puck_x)
self.puck_state = None
self.puck_state_next = None
self.def_x = self.player.net_front + copysign(self.player_radius,self.sgn_x)*2.5
self.def_y = (self.player.net_top + self.player.net_bottom) / 2
self.def_x_front = self.def_x + self.rink_len/4 * self.sgn_x
self.def_y_top = (self.player.net_top + self.player.net_bottom) / 2 - self.rink_width/4
self.def_y_bot = (self.player.net_top + self.player.net_bottom) / 2 + self.rink_width/4
self.puck_owner = get_puck_owner(world)
| {
"content_hash": "c19cc987ac51e08b8fbf35968903314f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 95,
"avg_line_length": 44.68,
"alnum_prop": 0.631154879140555,
"repo_name": "pkosukhin/codehockey",
"id": "1d6764262a6124e2b9c7f25918e0f646c5ccff56",
"size": "2234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "144532"
}
],
"symlink_target": ""
} |
import json
from os.path import join
from glob import glob
class VocabularyEncoder(json.JSONEncoder):
"""
Encodes a Vocuabulary class to JSON
"""
def default(self, obj):
if isinstance(obj, Vocabulary) or isinstance(obj, VocabularyManifest):
return obj.to_json()
return json.JSONEncoder.default(self, obj)
class Vocabulary():
"""
Loads a Vocabulary model definition
that is serialisable with json.dumps()
"""
def __init__(self, filepath):
model = self._load(filepath)
self.vocabularyId = model['vocabularyId']
self.vocabularyName = model['vocabularyName']
self.vocabularyValues = model['vocabularyValues']
def _load(self, filepath):
with open(filepath) as f:
return json.load(f)
def to_json(self):
return self.__dict__
class VocabularyManifest():
"""
Loads all JSON schemas in a directory
and generates a manifest of Vocabulary instances
"""
def __init__(self, models_dir):
models = sorted(glob(join(models_dir, '*.json')))
self.manifest = [Vocabulary(f) for f in models]
def to_json(self):
return self.manifest
| {
"content_hash": "8a4226fdfea49416b5049d8f64354b7c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 24.571428571428573,
"alnum_prop": 0.6337209302325582,
"repo_name": "JiscRDSS/taxonomyschema",
"id": "a46fd2f16451dec630e56416587fc6f079cefb62",
"size": "1204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taxonomyschema/vocabulary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "944"
},
{
"name": "Python",
"bytes": "10335"
},
{
"name": "Shell",
"bytes": "2428"
}
],
"symlink_target": ""
} |
import logging
from pkg_resources import iter_entry_points
log = logging.getLogger(__name__)
SCANNERS = {}
def scan_path(store, parent, path):
if not len(SCANNERS):
for ep in iter_entry_points('datasurvey.scanners'):
SCANNERS[ep.name] = ep.load()
best_scanner = None
best_bid = 0
for scanner_cls in SCANNERS.values():
scanner = scanner_cls(store, parent, path)
bid = scanner.bid()
if bid is None:
continue
if bid > best_bid:
best_bid = bid
best_scanner = scanner
else:
scanner.cleanup()
if best_scanner is None:
log.warning("Cannot find a scanner for: %r", path)
else:
try:
best_scanner.scan()
except Exception as ex:
log.exception(ex)
finally:
best_scanner.cleanup()
| {
"content_hash": "282b5e6764a6c31910b14f6caa5269d9",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 59,
"avg_line_length": 26.424242424242426,
"alnum_prop": 0.5642201834862385,
"repo_name": "occrp/datasurvey",
"id": "872249e82afff2f0a11bfea877d577af91e0f082",
"size": "872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datasurvey/auction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10494"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import os
import platform
from unittest.case import TestCase
import placa_grafica
from templates import FRAMES
class TestesDoMotor(TestCase):
def teste_inverter_coordenadas(self):
self.assertTupleEqual((0, placa_grafica.ALTURA - 1), placa_grafica.normalizar_coordenadas(0, 0))
self.assertTupleEqual((3, placa_grafica.ALTURA - 2), placa_grafica.normalizar_coordenadas(3, 1))
self.assertTupleEqual((10, 0), placa_grafica.normalizar_coordenadas(10, placa_grafica.ALTURA - 1))
def teste_desenhar_frame_vazio(self):
self.maxDiff = None
class PontoCartesiano():
def __init__(self, x, y, caracter):
self.caracter = caracter
self(x, y)
def __call__(self, x, y):
self.y = y
self.x = x
frames = FRAMES
if platform.system() == 'Windows':
frames = [f.replace('\n', os.linesep) for f in frames]
self.assertEqual(frames[0], placa_grafica.desenhar())
ponto_a = PontoCartesiano(1, 1, 'A')
self.assertEqual(frames[1], placa_grafica.desenhar(ponto_a))
ponto_a.x = 2
self.assertEqual(frames[2], placa_grafica.desenhar(ponto_a))
ponto_b = PontoCartesiano(1, 1, 'B')
ponto_a.y = 2
self.assertEqual(frames[3], placa_grafica.desenhar(ponto_a, ponto_b))
ponto_b(2, 2)
self.assertEqual(frames[4], placa_grafica.desenhar(ponto_a, ponto_b))
ponto_b(placa_grafica.LARGURA - 1, placa_grafica.ALTURA - 1)
self.assertEqual(frames[4], placa_grafica.desenhar(ponto_a, ponto_b))
| {
"content_hash": "76dd81bce2b96046ac65bb5fcb8918c8",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 106,
"avg_line_length": 39.73809523809524,
"alnum_prop": 0.6285200718993409,
"repo_name": "Cleitoon1/pythonbirds",
"id": "025c040043e0a436ad174af64d4373457fdb4661",
"size": "1693",
"binary": false,
"copies": "11",
"ref": "refs/heads/simples",
"path": "testes/testes_placa_grafica.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91477"
}
],
"symlink_target": ""
} |
import copy
import functools
import random
import netaddr
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log as logging
import testscenarios
from neutron.agent import firewall
from neutron.agent.linux import iptables_firewall
from neutron.agent.linux import openvswitch_firewall
from neutron.agent import securitygroups_rpc as sg_cfg
from neutron.cmd.sanity import checks
from neutron.tests.common import conn_testers
from neutron.tests.functional.agent.linux import base as linux_base
from neutron.tests.functional import base
LOG = logging.getLogger(__name__)
load_tests = testscenarios.load_tests_apply_scenarios
reverse_direction = {
conn_testers.ConnectionTester.INGRESS:
conn_testers.ConnectionTester.EGRESS,
conn_testers.ConnectionTester.EGRESS:
conn_testers.ConnectionTester.INGRESS}
reverse_transport_protocol = {
conn_testers.ConnectionTester.TCP: conn_testers.ConnectionTester.UDP,
conn_testers.ConnectionTester.UDP: conn_testers.ConnectionTester.TCP}
DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
VLAN_COUNT = 4096
def skip_if_firewall(firewall_name):
def outter(f):
@functools.wraps(f)
def wrap(self, *args, **kwargs):
if self.firewall_name == firewall_name:
self.skipTest("This test doesn't use %s firewall" %
firewall_name)
return f(self, *args, **kwargs)
return wrap
return outter
def _add_rule(sg_rules, base, port_range_min=None, port_range_max=None):
rule = copy.copy(base)
if port_range_min:
rule['port_range_min'] = port_range_min
if port_range_max:
rule['port_range_max'] = port_range_max
sg_rules.append(rule)
class BaseFirewallTestCase(base.BaseSudoTestCase):
FAKE_SECURITY_GROUP_ID = 'fake_sg_id'
MAC_SPOOFED = "fa:16:3e:9a:2f:48"
scenarios_iptables = testscenarios.multiply_scenarios(
[('IptablesFirewallDriver', {'initialize': 'initialize_iptables',
'firewall_name': 'iptables'})],
[('with ipset', {'enable_ipset': True}),
('without ipset', {'enable_ipset': False})])
scenarios_ovs_fw_interfaces = testscenarios.multiply_scenarios(
[('OVS Firewall Driver', {'initialize': 'initialize_ovs',
'firewall_name': 'openvswitch'})],
linux_base.BaseOVSLinuxTestCase.scenarios)
scenarios = scenarios_iptables + scenarios_ovs_fw_interfaces
ip_cidr = None
vlan_range = set(range(VLAN_COUNT))
def setUp(self):
cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP')
super(BaseFirewallTestCase, self).setUp()
self.tester, self.firewall = getattr(self, self.initialize)()
if self.firewall_name == "openvswitch":
self.assign_vlan_to_peers()
self.src_port_desc = self._create_port_description(
self.tester.vm_port_id,
[self.tester.vm_ip_address],
self.tester.vm_mac_address,
[self.FAKE_SECURITY_GROUP_ID])
# FIXME(jlibosva): We should consider to call prepare_port_filter with
# deferred bridge depending on its performance
self.firewall.prepare_port_filter(self.src_port_desc)
def initialize_iptables(self):
cfg.CONF.set_override('enable_ipset', self.enable_ipset,
'SECURITYGROUP')
tester = self.useFixture(
conn_testers.LinuxBridgeConnectionTester(self.ip_cidr))
firewall_drv = iptables_firewall.IptablesFirewallDriver(
namespace=tester.bridge_namespace)
return tester, firewall_drv
def initialize_ovs(self):
self.config(group='OVS', ovsdb_interface=self.ovsdb_interface)
# Tests for ovs requires kernel >= 4.3 and OVS >= 2.5
if not checks.ovs_conntrack_supported():
self.skipTest("Open vSwitch with conntrack is not installed "
"on this machine. To run tests for OVS/CT firewall,"
" please meet the requirements (kernel>=4.3, "
"OVS>=2.5. More info at"
"https://github.com/openvswitch/ovs/blob/master/"
"FAQ.md")
tester = self.useFixture(
conn_testers.OVSConnectionTester(self.ip_cidr))
firewall_drv = openvswitch_firewall.OVSFirewallDriver(tester.bridge)
return tester, firewall_drv
def assign_vlan_to_peers(self):
vlan = self.get_not_used_vlan()
LOG.debug("Using %d vlan tag for this test", vlan)
self.tester.set_vm_tag(vlan)
self.tester.set_peer_tag(vlan)
def get_not_used_vlan(self):
port_vlans = self.firewall.int_br.br.ovsdb.db_find(
'Port', ('tag', '!=', []), columns=['tag']).execute()
used_vlan_tags = {val['tag'] for val in port_vlans}
available_vlans = self.vlan_range - used_vlan_tags
return random.choice(list(available_vlans))
@staticmethod
def _create_port_description(port_id, ip_addresses, mac_address, sg_ids):
return {'admin_state_up': True,
'device': port_id,
'device_owner': DEVICE_OWNER_COMPUTE,
'fixed_ips': ip_addresses,
'mac_address': mac_address,
'port_security_enabled': True,
'security_groups': sg_ids,
'status': 'ACTIVE'}
def _apply_security_group_rules(self, sg_id, sg_rules):
with self.firewall.defer_apply():
self.firewall.update_security_group_rules(sg_id, sg_rules)
self.firewall.update_port_filter(self.src_port_desc)
def _apply_security_group_members(self, sg_id, members):
with self.firewall.defer_apply():
self.firewall.update_security_group_members(sg_id, members)
self.firewall.update_port_filter(self.src_port_desc)
class FirewallTestCase(BaseFirewallTestCase):
ip_cidr = '192.168.0.1/24'
@skip_if_firewall('openvswitch')
def test_rule_application_converges(self):
sg_rules = [{'ethertype': 'IPv4', 'direction': 'egress'},
{'ethertype': 'IPv6', 'direction': 'egress'},
{'ethertype': 'IPv4', 'direction': 'ingress',
'source_ip_prefix': '0.0.0.0/0', 'protocol': 'icmp'},
{'ethertype': 'IPv6', 'direction': 'ingress',
'source_ip_prefix': '0::0/0', 'protocol': 'ipv6-icmp'}]
# make sure port ranges converge on all protocols with and without
# port ranges (prevents regression of bug 1502924)
for proto in ('tcp', 'udp', 'icmp'):
for version in ('IPv4', 'IPv6'):
if proto == 'icmp' and version == 'IPv6':
proto = 'ipv6-icmp'
base = {'ethertype': version, 'direction': 'ingress',
'protocol': proto}
sg_rules.append(copy.copy(base))
_add_rule(sg_rules, base, port_range_min=50,
port_range_max=50)
_add_rule(sg_rules, base, port_range_max=55)
_add_rule(sg_rules, base, port_range_min=60,
port_range_max=60)
_add_rule(sg_rules, base, port_range_max=65)
# add some single-host rules to prevent regression of bug 1502917
sg_rules.append({'ethertype': 'IPv4', 'direction': 'ingress',
'source_ip_prefix': '77.77.77.77/32'})
sg_rules.append({'ethertype': 'IPv6', 'direction': 'ingress',
'source_ip_prefix': 'fe80::1/128'})
self.firewall.update_security_group_rules(
self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.firewall.prepare_port_filter(self.src_port_desc)
# after one prepare call, another apply should be a NOOP
self.assertEqual([], self.firewall.iptables._apply())
orig_sg_rules = copy.copy(sg_rules)
for proto in ('tcp', 'udp', 'icmp'):
for version in ('IPv4', 'IPv6'):
if proto == 'icmp' and version == 'IPv6':
proto = 'ipv6-icmp'
# make sure firewall is in converged state
self.firewall.update_security_group_rules(
self.FAKE_SECURITY_GROUP_ID, orig_sg_rules)
self.firewall.update_port_filter(self.src_port_desc)
sg_rules = copy.copy(orig_sg_rules)
# remove one rule and add another to make sure it results in
# exactly one delete and insert
sg_rules.pop(0 if version == 'IPv4' else 1)
sg_rules.append({'ethertype': version, 'direction': 'egress',
'protocol': proto})
self.firewall.update_security_group_rules(
self.FAKE_SECURITY_GROUP_ID, sg_rules)
result = self.firewall.update_port_filter(self.src_port_desc)
deletes = [r for r in result if r.startswith('-D ')]
creates = [r for r in result if r.startswith('-I ')]
self.assertEqual(1, len(deletes))
self.assertEqual(1, len(creates))
# quick sanity check to make sure the insert was for the
# correct proto
self.assertIn('-p %s' % proto, creates[0])
# another apply should be a NOOP if the right rule was removed
# and the new one was inserted in the correct position
self.assertEqual([], self.firewall.iptables._apply())
@skip_if_firewall('openvswitch')
def test_rule_ordering_correct(self):
sg_rules = [
{'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp',
'port_range_min': i, 'port_range_max': i}
for i in range(50, 61)
]
self.firewall.update_security_group_rules(
self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.firewall.prepare_port_filter(self.src_port_desc)
self._assert_sg_out_tcp_rules_appear_in_order(sg_rules)
# remove a rule and add a new one
sg_rules.pop(5)
sg_rules.insert(8, {'ethertype': 'IPv4', 'direction': 'egress',
'protocol': 'tcp', 'port_range_min': 400,
'port_range_max': 400})
self.firewall.update_security_group_rules(
self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.firewall.prepare_port_filter(self.src_port_desc)
self._assert_sg_out_tcp_rules_appear_in_order(sg_rules)
# reverse all of the rules (requires lots of deletes and inserts)
sg_rules = list(reversed(sg_rules))
self.firewall.update_security_group_rules(
self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.firewall.prepare_port_filter(self.src_port_desc)
self._assert_sg_out_tcp_rules_appear_in_order(sg_rules)
def _assert_sg_out_tcp_rules_appear_in_order(self, sg_rules):
outgoing_rule_pref = '-A %s-o%s' % (self.firewall.iptables.wrap_name,
self.src_port_desc['device'][3:13])
rules = [
r for r in self.firewall.iptables.get_rules_for_table('filter')
if r.startswith(outgoing_rule_pref)
]
# we want to ensure the rules went in in the same order we sent
indexes = [rules.index('%s -p tcp -m tcp --dport %s -j RETURN' %
(outgoing_rule_pref, i['port_range_min']))
for i in sg_rules]
# all indexes should be in order with no unexpected rules in between
self.assertEqual(range(indexes[0], indexes[-1] + 1), indexes)
def test_ingress_icmp_secgroup(self):
# update the sg_group to make ping pass
sg_rules = [{'ethertype': constants.IPv4,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_ICMP},
{'ethertype': constants.IPv4,
'direction': firewall.EGRESS_DIRECTION}]
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
def test_mac_spoofing(self):
sg_rules = [{'ethertype': constants.IPv4,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_ICMP},
{'ethertype': constants.IPv4,
'direction': firewall.EGRESS_DIRECTION}]
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.vm_mac_address = self.MAC_SPOOFED
self.tester.flush_arp_tables()
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.EGRESS)
@skip_if_firewall('openvswitch')
def test_mac_spoofing_works_without_port_security_enabled(self):
self.src_port_desc['port_security_enabled'] = False
self.firewall.update_port_filter(self.src_port_desc)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.vm_mac_address = self.MAC_SPOOFED
self.tester.flush_arp_tables()
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.EGRESS)
def test_port_security_enabled_set_to_false(self):
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.src_port_desc['port_security_enabled'] = False
self.firewall.update_port_filter(self.src_port_desc)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
def test_dhcp_requests_from_vm(self):
# DHCPv4 uses source port 67, destination port 68
self.tester.assert_connection(direction=self.tester.EGRESS,
protocol=self.tester.UDP,
src_port=68, dst_port=67)
def test_dhcp_server_forbidden_on_vm(self):
self.tester.assert_no_connection(direction=self.tester.EGRESS,
protocol=self.tester.UDP,
src_port=67, dst_port=68)
self.tester.assert_no_connection(direction=self.tester.INGRESS,
protocol=self.tester.UDP,
src_port=68, dst_port=67)
def test_ip_spoofing(self):
sg_rules = [{'ethertype': constants.IPv4,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_ICMP}]
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
not_allowed_ip = "%s/24" % (
netaddr.IPAddress(self.tester.vm_ip_address) + 1)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.vm_ip_cidr = not_allowed_ip
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.EGRESS)
self.tester.assert_no_connection(protocol=self.tester.UDP,
src_port=68, dst_port=67,
direction=self.tester.EGRESS)
@skip_if_firewall('openvswitch')
def test_ip_spoofing_works_without_port_security_enabled(self):
self.src_port_desc['port_security_enabled'] = False
self.firewall.update_port_filter(self.src_port_desc)
sg_rules = [{'ethertype': constants.IPv4,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_ICMP}]
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
not_allowed_ip = "%s/24" % (
netaddr.IPAddress(self.tester.vm_ip_address) + 1)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.vm_ip_cidr = not_allowed_ip
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.EGRESS)
def test_allowed_address_pairs(self):
sg_rules = [{'ethertype': constants.IPv4,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_ICMP},
{'ethertype': constants.IPv4,
'direction': firewall.EGRESS_DIRECTION}]
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
port_mac = self.tester.vm_mac_address
allowed_ip = netaddr.IPAddress(self.tester.vm_ip_address) + 1
not_allowed_ip = "%s/24" % (allowed_ip + 1)
self.src_port_desc['allowed_address_pairs'] = [
{'mac_address': port_mac,
'ip_address': allowed_ip}]
allowed_ip = "%s/24" % allowed_ip
self.firewall.update_port_filter(self.src_port_desc)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.vm_ip_cidr = allowed_ip
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.vm_ip_cidr = not_allowed_ip
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
def test_arp_is_allowed(self):
self.tester.assert_connection(protocol=self.tester.ARP,
direction=self.tester.EGRESS)
self.tester.assert_connection(protocol=self.tester.ARP,
direction=self.tester.INGRESS)
def _test_rule(self, direction, protocol):
sg_rules = [{'ethertype': constants.IPv4, 'direction': direction,
'protocol': protocol}]
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
not_allowed_direction = reverse_direction[direction]
not_allowed_protocol = reverse_transport_protocol[protocol]
self.tester.assert_connection(protocol=protocol,
direction=direction)
self.tester.assert_no_connection(protocol=not_allowed_protocol,
direction=direction)
self.tester.assert_no_connection(protocol=protocol,
direction=not_allowed_direction)
def test_ingress_tcp_rule(self):
self._test_rule(self.tester.INGRESS, self.tester.TCP)
def test_ingress_udp_rule(self):
self._test_rule(self.tester.INGRESS, self.tester.UDP)
def test_egress_tcp_rule(self):
self._test_rule(self.tester.EGRESS, self.tester.TCP)
def test_egress_udp_rule(self):
self._test_rule(self.tester.EGRESS, self.tester.UDP)
def test_connection_with_destination_port_range(self):
port_min = 12345
port_max = 12346
sg_rules = [{'ethertype': constants.IPv4,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_TCP,
'port_range_min': port_min,
'port_range_max': port_max}]
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.assert_connection(protocol=self.tester.TCP,
direction=self.tester.INGRESS,
dst_port=port_min)
self.tester.assert_connection(protocol=self.tester.TCP,
direction=self.tester.INGRESS,
dst_port=port_max)
self.tester.assert_no_connection(protocol=self.tester.TCP,
direction=self.tester.INGRESS,
dst_port=port_min - 1)
self.tester.assert_no_connection(protocol=self.tester.TCP,
direction=self.tester.INGRESS,
dst_port=port_max + 1)
def test_connection_with_source_port_range(self):
source_port_min = 12345
source_port_max = 12346
sg_rules = [{'ethertype': constants.IPv4,
'direction': firewall.EGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_TCP,
'source_port_range_min': source_port_min,
'source_port_range_max': source_port_max}]
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.assert_connection(protocol=self.tester.TCP,
direction=self.tester.EGRESS,
src_port=source_port_min)
self.tester.assert_connection(protocol=self.tester.TCP,
direction=self.tester.EGRESS,
src_port=source_port_max)
self.tester.assert_no_connection(protocol=self.tester.TCP,
direction=self.tester.EGRESS,
src_port=source_port_min - 1)
self.tester.assert_no_connection(protocol=self.tester.TCP,
direction=self.tester.EGRESS,
src_port=source_port_max + 1)
@skip_if_firewall('iptables')
def test_established_connection_is_cut(self):
port = 12345
sg_rules = [{'ethertype': constants.IPv4,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_TCP,
'port_range_min': port,
'port_range_max': port}]
connection = {'protocol': self.tester.TCP,
'direction': self.tester.INGRESS,
'dst_port': port}
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.establish_connection(**connection)
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, list())
self.tester.assert_no_established_connection(**connection)
@skip_if_firewall('openvswitch')
def test_preventing_firewall_blink(self):
direction = self.tester.INGRESS
sg_rules = [{'ethertype': 'IPv4', 'direction': 'ingress',
'protocol': 'tcp'}]
self.tester.start_sending_icmp(direction)
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, {})
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.stop_sending_icmp(direction)
packets_sent = self.tester.get_sent_icmp_packets(direction)
packets_received = self.tester.get_received_icmp_packets(direction)
self.assertGreater(packets_sent, 0)
self.assertEqual(packets_received, 0)
def test_remote_security_groups(self):
remote_sg_id = 'remote_sg_id'
peer_port_desc = self._create_port_description(
self.tester.peer_port_id,
[self.tester.peer_ip_address],
self.tester.peer_mac_address,
[remote_sg_id])
vm_sg_members = {'IPv4': [self.tester.peer_ip_address]}
peer_sg_rules = [{'ethertype': 'IPv4', 'direction': 'egress',
'protocol': 'icmp'}]
self.firewall.update_security_group_rules(remote_sg_id, peer_sg_rules)
self.firewall.update_security_group_members(remote_sg_id,
vm_sg_members)
self.firewall.prepare_port_filter(peer_port_desc)
vm_sg_rules = [{'ethertype': 'IPv4', 'direction': 'ingress',
'protocol': 'icmp', 'remote_group_id': remote_sg_id}]
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID,
vm_sg_rules)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.assert_no_connection(protocol=self.tester.TCP,
direction=self.tester.INGRESS)
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.EGRESS)
def test_related_connection(self):
"""Test ICMP net unreachable packets get back
When destination address of ip traffic is not reachable, ICMP packets
are returned. This packets are marked as RELATED traffic by conntrack
and this test case validates such packets are not dropped by the
firewall as ingress ICMP packets are not allowed in this test case. The
used address below 1.2.3.4 is outside of subnet that is used in tester
object.
"""
# Enable ip forwarding on the interface in order to reply with
# destionation net unreachable
self.tester._peer.execute([
'sysctl', '-w', 'net.ipv4.conf.%s.forwarding=1' %
self.tester._peer.port.name])
self.tester.set_vm_default_gateway(self.tester.peer_ip_address)
vm_sg_rules = [{'ethertype': 'IPv4', 'direction': 'egress',
'protocol': 'icmp'}]
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID,
vm_sg_rules)
self.tester.assert_net_unreachable(self.tester.EGRESS, '1.2.3.4')
class FirewallTestCaseIPv6(BaseFirewallTestCase):
scenarios = BaseFirewallTestCase.scenarios_ovs_fw_interfaces
ip_cidr = '2001:db8:aaaa::1/64'
def test_icmp_from_specific_address(self):
sg_rules = [{'ethertype': constants.IPv6,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_ICMP,
'source_ip_prefix': self.tester.peer_ip_address}]
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
def test_icmp_to_specific_address(self):
sg_rules = [{'ethertype': constants.IPv6,
'direction': firewall.EGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_ICMP,
'destination_ip_prefix': self.tester.peer_ip_address}]
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.EGRESS)
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.EGRESS)
def test_tcp_from_specific_address(self):
sg_rules = [{'ethertype': constants.IPv6,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_TCP,
'source_ip_prefix': self.tester.peer_ip_address}]
self.tester.assert_no_connection(protocol=self.tester.TCP,
direction=self.tester.INGRESS)
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.assert_connection(protocol=self.tester.TCP,
direction=self.tester.INGRESS)
self.tester.assert_no_connection(protocol=self.tester.UDP,
direction=self.tester.INGRESS)
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
def test_tcp_to_specific_address(self):
sg_rules = [{'ethertype': constants.IPv6,
'direction': firewall.EGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_TCP,
'destination_ip_prefix': self.tester.peer_ip_address}]
self.tester.assert_no_connection(protocol=self.tester.TCP,
direction=self.tester.EGRESS)
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.assert_connection(protocol=self.tester.TCP,
direction=self.tester.EGRESS)
self.tester.assert_no_connection(protocol=self.tester.UDP,
direction=self.tester.EGRESS)
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.EGRESS)
def test_udp_from_specific_address(self):
sg_rules = [{'ethertype': constants.IPv6,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_UDP,
'source_ip_prefix': self.tester.peer_ip_address}]
self.tester.assert_no_connection(protocol=self.tester.UDP,
direction=self.tester.INGRESS)
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.assert_connection(protocol=self.tester.UDP,
direction=self.tester.INGRESS)
self.tester.assert_no_connection(protocol=self.tester.TCP,
direction=self.tester.INGRESS)
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
def test_udp_to_specific_address(self):
sg_rules = [{'ethertype': constants.IPv6,
'direction': firewall.EGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_UDP,
'destination_ip_prefix': self.tester.peer_ip_address}]
self.tester.assert_no_connection(protocol=self.tester.UDP,
direction=self.tester.EGRESS)
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
self.tester.assert_connection(protocol=self.tester.UDP,
direction=self.tester.EGRESS)
self.tester.assert_no_connection(protocol=self.tester.TCP,
direction=self.tester.EGRESS)
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.EGRESS)
@skip_if_firewall('openvswitch')
def test_ip_spoofing(self):
sg_rules = [{'ethertype': constants.IPv6,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_ICMP}]
self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules)
not_allowed_ip = "%s/64" % (
netaddr.IPAddress(self.tester.vm_ip_address) + 1)
self.tester.assert_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.vm_ip_cidr = not_allowed_ip
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.INGRESS)
self.tester.assert_no_connection(protocol=self.tester.ICMP,
direction=self.tester.EGRESS)
self.tester.assert_no_connection(protocol=self.tester.UDP,
src_port=546, dst_port=547,
direction=self.tester.EGRESS)
| {
"content_hash": "99c767ce2edc415a876769d94fabdfb9",
"timestamp": "",
"source": "github",
"line_count": 666,
"max_line_length": 79,
"avg_line_length": 49.786786786786784,
"alnum_prop": 0.5755473792146691,
"repo_name": "bigswitch/neutron",
"id": "c9d57f1f2e179e0bd403aa03b71ea37cb7b2cf6c",
"size": "33949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/functional/agent/test_firewall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "8468247"
},
{
"name": "Shell",
"bytes": "14648"
}
],
"symlink_target": ""
} |
"""
Support for Honeywell Round Connected and Honeywell Evohome thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/thermostat.honeywell/
"""
import logging
import socket
from homeassistant.components.thermostat import ThermostatDevice
from homeassistant.const import (
CONF_PASSWORD, CONF_USERNAME, TEMP_CELSIUS, TEMP_FAHRENHEIT)
REQUIREMENTS = ['evohomeclient==0.2.5',
'somecomfort==0.2.1']
_LOGGER = logging.getLogger(__name__)
CONF_AWAY_TEMP = "away_temperature"
DEFAULT_AWAY_TEMP = 16
def _setup_round(username, password, config, add_devices):
"""Setup rounding function."""
from evohomeclient import EvohomeClient
try:
away_temp = float(config.get(CONF_AWAY_TEMP, DEFAULT_AWAY_TEMP))
except ValueError:
_LOGGER.error("value entered for item %s should convert to a number",
CONF_AWAY_TEMP)
return False
evo_api = EvohomeClient(username, password)
try:
zones = evo_api.temperatures(force_refresh=True)
for i, zone in enumerate(zones):
add_devices([RoundThermostat(evo_api,
zone['id'],
i == 0,
away_temp)])
except socket.error:
_LOGGER.error(
"Connection error logging into the honeywell evohome web service"
)
return False
return True
# config will be used later
# pylint: disable=unused-argument
def _setup_us(username, password, config, add_devices):
"""Setup user."""
import somecomfort
try:
client = somecomfort.SomeComfort(username, password)
except somecomfort.AuthError:
_LOGGER.error('Failed to login to honeywell account %s', username)
return False
except somecomfort.SomeComfortError as ex:
_LOGGER.error('Failed to initialize honeywell client: %s', str(ex))
return False
dev_id = config.get('thermostat')
loc_id = config.get('location')
add_devices([HoneywellUSThermostat(client, device)
for location in client.locations_by_id.values()
for device in location.devices_by_id.values()
if ((not loc_id or location.locationid == loc_id) and
(not dev_id or device.deviceid == dev_id))])
return True
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the honeywel thermostat."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
region = config.get('region', 'eu').lower()
if username is None or password is None:
_LOGGER.error("Missing required configuration items %s or %s",
CONF_USERNAME, CONF_PASSWORD)
return False
if region not in ('us', 'eu'):
_LOGGER.error('Region `%s` is invalid (use either us or eu)', region)
return False
if region == 'us':
return _setup_us(username, password, config, add_devices)
else:
return _setup_round(username, password, config, add_devices)
class RoundThermostat(ThermostatDevice):
"""Representation of a Honeywell Round Connected thermostat."""
# pylint: disable=too-many-instance-attributes
def __init__(self, device, zone_id, master, away_temp):
"""Initialize the thermostat."""
self.device = device
self._current_temperature = None
self._target_temperature = None
self._name = "round connected"
self._id = zone_id
self._master = master
self._is_dhw = False
self._away_temp = away_temp
self._away = False
self.update()
@property
def name(self):
"""Return the name of the honeywell, if any."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._is_dhw:
return None
return self._target_temperature
def set_temperature(self, temperature):
"""Set new target temperature."""
self.device.set_temperature(self._name, temperature)
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._away
def turn_away_mode_on(self):
"""Turn away on.
Evohome does have a proprietary away mode, but it doesn't really work
the way it should. For example: If you set a temperature manually
it doesn't get overwritten when away mode is switched on.
"""
self._away = True
self.device.set_temperature(self._name, self._away_temp)
def turn_away_mode_off(self):
"""Turn away off."""
self._away = False
self.device.cancel_temp_override(self._name)
def update(self):
"""Get the latest date."""
try:
# Only refresh if this is the "master" device,
# others will pick up the cache
for val in self.device.temperatures(force_refresh=self._master):
if val['id'] == self._id:
data = val
except StopIteration:
_LOGGER.error("Did not receive any temperature data from the "
"evohomeclient API.")
return
self._current_temperature = data['temp']
self._target_temperature = data['setpoint']
if data['thermostat'] == "DOMESTIC_HOT_WATER":
self._name = "Hot Water"
self._is_dhw = True
else:
self._name = data['name']
self._is_dhw = False
class HoneywellUSThermostat(ThermostatDevice):
"""Representation of a Honeywell US Thermostat."""
def __init__(self, client, device):
"""Initialize the thermostat."""
self._client = client
self._device = device
@property
def is_fan_on(self):
"""Return true if fan is on."""
return self._device.fan_running
@property
def name(self):
"""Return the name of the honeywell, if any."""
return self._device.name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return (TEMP_CELSIUS if self._device.temperature_unit == 'C'
else TEMP_FAHRENHEIT)
@property
def current_temperature(self):
"""Return the current temperature."""
self._device.refresh()
return self._device.current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._device.system_mode == 'cool':
return self._device.setpoint_cool
else:
return self._device.setpoint_heat
def set_temperature(self, temperature):
"""Set target temperature."""
import somecomfort
try:
if self._device.system_mode == 'cool':
self._device.setpoint_cool = temperature
else:
self._device.setpoint_heat = temperature
except somecomfort.SomeComfortError:
_LOGGER.error('Temperature %.1f out of range', temperature)
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {'fan': (self.is_fan_on and 'running' or 'idle'),
'fanmode': self._device.fan_mode,
'system_mode': self._device.system_mode}
def turn_away_mode_on(self):
"""Turn away on."""
pass
def turn_away_mode_off(self):
"""Turn away off."""
pass
| {
"content_hash": "29062cf89bb49410b49b323fe71ef892",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 77,
"avg_line_length": 32.121457489878544,
"alnum_prop": 0.6022183009831107,
"repo_name": "Zyell/home-assistant",
"id": "633212e02b5b437761bac4d11ff297cb9f403c3b",
"size": "7934",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "homeassistant/components/thermostat/honeywell.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "798938"
},
{
"name": "Python",
"bytes": "771451"
},
{
"name": "Shell",
"bytes": "5097"
}
],
"symlink_target": ""
} |
import pygame, string
pygame.init()
class MyDisplay:
def surface_from_image_file(self, fullfilename):
"""
Takes the filename, loads it and finally returns the surface
"""
try:
# print "surface_from_image_file: loading file", fullfilename
return pygame.image.load(fullfilename)
except:
print 'Invalid filename or location', fullfilename
print 'Using default picture... No Picture.jpg'
file = os.path.split(fullfilename)
filename_components = [file[1], "No Picture.jpg"]
fullfilename = string.join(filename_components, "")
return pygame.image.load(fullfilename)
def surface_from_string(self,
list_strings,
alignment=0,
font_size=20,
back_colour=(0,0,0),
font_colour=(255,255,255)):
"""
Cycles through the list of the strings.
Converts each string into a surface containing that string.
In display terms, it places each string surface below the previous.
Returns a single surface of all the string surfaces.
The strings can be centre, left or right aligned depending upon the value assigned to
'alignment':
alignment = 0, Left
alignment = 1, Centre
alignment = 2, Right
"""
self.list_surface = []
self.list_rect = []
line_coords = [0,0]
if alignment != 0 and alignment != 1 and alignment != 2:
#Check whether valid alignment value was used
raise "Invalid aligment value!", alignment
for string in list_strings:
#Loops through the list of strings converting them into surfaces
#Gets a rect for each surface
#Adds each created surface to a list of surfaces
#Adds each created rect to a list of rects
# print "surface_from_string: creating surface and rect from string"
font = pygame.font.Font(None,font_size) #Font
self.string_surface = font.render(string, 1, font_colour) #Surface
self.string_rect = self.string_surface.get_rect() #Rect
# print "surface_from_string: appending surface and rect to thier respective lists"
self.list_surface.append(self.string_surface)
self.list_rect.append(self.string_rect)
# print "surface_from_string: getting the width and height dimensions for new surface"
max_width = self.get_max_width(self.list_surface) #find the longest width of all the string surfaces
max_height = self.get_total_height(self.list_surface) #get total height of all the surfaces
#Maximum dimensions the new surface will need to be to contain all the strings
self.dialoguesize = max_width, max_height
# print "surface_from_string: creating surface to contain the strings"
surface_dialogue = pygame.Surface(self.dialoguesize)
surface_dialogue.fill(back_colour)
# print "surface_from_string: arranging string surfaces"
for index in range(len(list_strings)):
#Cycles through the list rects using an index.
#Moves each surface below the previous
#Any horizontal movement is also done at this stage
#NOTE: should be the same number of items in list_strings, list_surfaces, list_rects.
line_coords[1] = (font_size * index) #calculate distance needed to move surface below the previous
surface_position = ( 0, line_coords[1] ) #coords of where the surface is to be
surface = self.list_surface[index] #get surface
self.list_rect[index] = self.align_surface(surface, surface_position, alignment, self.dialoguesize) #move surface
surface_dialogue.blit(surface, self.list_rect[index]) #copy string surface to dialogue surface
return surface_dialogue
def prepare_display(self,
list_surface_components,
back_colour=[0,0,0],
display_size=[1000, 800]):
"""
surface_components = [surface, position=[0,0], alignment_type=[0,0]]
Takes a list of surfaces and thier components.
Cycles through the list moving and blitting the provided surfaces to the screen surface.
"""
# print "prepare_display: Creating display"
screen = pygame.display.set_mode(display_size) #Surface
screen.fill(back_colour)
for surface_components in list_surface_components:
self.surface = surface_components[0]
self.position = surface_components[1]
self.alignment_type = surface_components[2]
#creates a rect and moves it.
rect = self.align_surface(self.surface, self.position, self.alignment_type, display_size)
# print "prepare_display: blitting"
screen.blit(self.surface, rect)
def display(self):
"""
Displays what has been blitted to the screen surface
"""
# print "display: flipping"
pygame.display.flip()
# print "display: waiting"
self.display_wait()
# print "display: finished"
def align_surface(self, surface, position=[0,0], alignment_type=0, display_size=[1000, 800]):
"""
Places the surface in a designanted place determined by the value of 'alignment_type'.
These case's are true provided 'position' is [0,0]:
alignment_type = 0, Top left / Custom (user defined using 'position')
alignment_type = 1, Top middle
alignment_type = 2, Top right
alignment_type = 3, Middle left
alignment_type = 4, Centre
alignment_type = 5, Middle right
alignment_type = 6, Bottom left
alignment_type = 7, Bottom middle
alignment_type = 8, Bottom right
"""
self.surface = surface
rect = self.surface.get_rect()
d_size = display_size
s_size = self.surface.get_size()
if alignment_type == 0:
return rect.move(position)
elif alignment_type == 1:
x_coord = self.mid_coord(s_size[0], d_size[0])
return rect.move([ x_coord, position[1] ])
elif alignment_type == 2:
x_coord = self.rigORbot_coord(s_size[0], d_size[0])
return rect.move([ x_coord, position[1] ])
elif alignment_type == 3:
y_coord = self.mid_coord(s_size[1], d_size[1])
return rect.move([ position[0], y_coord ])
elif alignment_type == 4:
x_coord = self.mid_coord(s_size[0], d_size[0])
y_coord = self.mid_coord(s_size[1], d_size[1])
return rect.move([ x_coord, y_coord ])
elif alignment_type == 5:
x_coord = self.rigORbot_coord(s_size[0], d_size[0])
y_coord = self.mid_coord(s_size[1], d_size[1])
return rect.move([ x_coord, y_coord ])
elif alignment_type == 6:
y_coord = self.rigORbot_coord(s_size[1], d_size[1])
return rect.move([ position[0], y_coord ])
elif alignment_type == 7:
x_coord = self.mid_coord(s_size[0], d_size[0])
y_coord = self.rigORbot_coord(s_size[1], d_size[1])
return rect.move([ x_coord, y_coord ])
elif alignment_type == 8:
x_coord = self.rigORbot_coord(s_size[0], d_size[0])
y_coord = self.rigORbot_coord(s_size[1], d_size[1])
return rect.move([ x_coord, y_coord ])
else:
raise 'Invalid alignment_type was used!'
def rigORbot_coord(self, surface_length, display_length):
"""
Returns a coordinate of the top left corner of a surface which is
to be right or bottom aligned in the display surface.
"""
return (display_length - surface_length)
def mid_coord(self, surface_length, display_length):
"""
Returns a coordinate of the top left corner of a surface which is
to be centred in the display surface.
"""
return (display_length - surface_length)/2
def get_max_width(self, list_of_surfaces):
"""
Checks the width of each surface to find out the maximum width.
"""
max_width = 0
for surface in list_of_surfaces:
width = surface.get_width()
if max_width == 0:
max_width = width
elif width > max_width:
max_width = width
return max_width
def get_total_height(self, list_of_surfaces):
total_height = 0
for surface in list_of_surfaces:
total_height = total_height + surface.get_height()
return total_height
def display_wait(self):
"""
Continueously polls an event queue for two particular events (closure
of window and a keyboard key press). Closes image upon discovery of either event.
"""
import time
timeout=3
t = time.time()
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
# print "display_wait: User interaction, shutting down"
pygame.quit()
if event.type == pygame.KEYDOWN:
return 0
if timeout and time.time()-t>timeout:
return
| {
"content_hash": "c5c8f8085e4c5a2cff684518da2c3db7",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 125,
"avg_line_length": 39.77235772357724,
"alnum_prop": 0.5729762878168438,
"repo_name": "sparkslabs/kamaelia_",
"id": "1ecbdb6574b7a47b8e30ea5e8a73cf902787ea5b",
"size": "10609",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/CE/pygame_display.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3814"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "504"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896248"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "707430"
}
],
"symlink_target": ""
} |
from __future__ import print_function
"""Dump binary log generated by PX4's sdlog2 or APM as CSV
Usage: python sdlog2_dump.py <log.bin> [-v] [-e] [-d delimiter] [-n null] [-m MSG[.field1,field2,...]]
-v Use plain debug output instead of CSV.
-e Recover from errors.
-d Use "delimiter" in CSV. Default is ",".
-n Use "null" as placeholder for empty values in CSV. Default is empty.
-m MSG[.field1,field2,...]
Dump only messages of specified type, and only specified fields.
Multiple -m options allowed."""
__author__ = "Anton Babushkin"
__version__ = "1.2"
import struct, sys
if sys.hexversion >= 0x030000F0:
runningPython3 = True
def _parseCString(cstr):
return str(cstr, 'ascii').split('\0')[0]
else:
runningPython3 = False
def _parseCString(cstr):
return str(cstr).split('\0')[0]
class SDLog2Parser:
BLOCK_SIZE = 8192
MSG_HEADER_LEN = 3
MSG_HEAD1 = 0xA3
MSG_HEAD2 = 0x95
MSG_FORMAT_PACKET_LEN = 89
MSG_FORMAT_STRUCT = "BB4s16s64s"
MSG_TYPE_FORMAT = 0x80
FORMAT_TO_STRUCT = {
"b": ("b", None),
"B": ("B", None),
"h": ("h", None),
"H": ("H", None),
"i": ("i", None),
"I": ("I", None),
"f": ("f", None),
"n": ("4s", None),
"N": ("16s", None),
"Z": ("64s", None),
"c": ("h", 0.01),
"C": ("H", 0.01),
"e": ("i", 0.01),
"E": ("I", 0.01),
"L": ("i", 0.0000001),
"M": ("b", None),
"q": ("q", None),
"Q": ("Q", None),
}
__csv_delim = ","
__csv_null = ""
__msg_filter = []
__time_msg = None
__debug_out = False
__correct_errors = False
__file_name = None
__file = None
def __init__(self):
return
def reset(self):
self.__msg_descrs = {} # message descriptions by message type map
self.__msg_labels = {} # message labels by message name map
self.__msg_names = [] # message names in the same order as FORMAT messages
self.__buffer = bytearray() # buffer for input binary data
self.__ptr = 0 # read pointer in buffer
self.__csv_columns = [] # CSV file columns in correct order in format "MSG.label"
self.__csv_data = {} # current values for all columns
self.__csv_updated = False
self.__msg_filter_map = {} # filter in form of map, with '*" expanded to full list of fields
def setCSVDelimiter(self, csv_delim):
self.__csv_delim = csv_delim
def setCSVNull(self, csv_null):
self.__csv_null = csv_null
def setMsgFilter(self, msg_filter):
self.__msg_filter = msg_filter
def setTimeMsg(self, time_msg):
self.__time_msg = time_msg
def setDebugOut(self, debug_out):
self.__debug_out = debug_out
def setCorrectErrors(self, correct_errors):
self.__correct_errors = correct_errors
def setFileName(self, file_name):
self.__file_name = file_name
if file_name != None:
self.__file = open(file_name, 'w+')
else:
self.__file = None
def process(self, fn):
self.reset()
if self.__debug_out:
# init __msg_filter_map
for msg_name, show_fields in self.__msg_filter:
self.__msg_filter_map[msg_name] = show_fields
first_data_msg = True
f = open(fn, "rb")
bytes_read = 0
while True:
chunk = f.read(self.BLOCK_SIZE)
if len(chunk) == 0:
break
self.__buffer = self.__buffer[self.__ptr:] + chunk
self.__ptr = 0
while self.__bytesLeft() >= self.MSG_HEADER_LEN:
head1 = self.__buffer[self.__ptr]
head2 = self.__buffer[self.__ptr + 1]
if (head1 != self.MSG_HEAD1 or head2 != self.MSG_HEAD2):
if self.__correct_errors:
self.__ptr += 1
continue
else:
raise Exception("Invalid header at %i (0x%X): %02X %02X, must be %02X %02X" % (
bytes_read + self.__ptr, bytes_read + self.__ptr, head1, head2, self.MSG_HEAD1, self.MSG_HEAD2))
msg_type = self.__buffer[self.__ptr + 2]
if msg_type == self.MSG_TYPE_FORMAT:
# parse FORMAT message
if self.__bytesLeft() < self.MSG_FORMAT_PACKET_LEN:
break
self.__parseMsgDescr()
else:
# parse data message
msg_descr = self.__msg_descrs[msg_type]
if msg_descr == None:
raise Exception("Unknown msg type: %i" % msg_type)
msg_length = msg_descr[0]
if self.__bytesLeft() < msg_length:
break
if first_data_msg:
# build CSV columns and init data map
if not self.__debug_out:
self.__initCSV()
first_data_msg = False
self.__parseMsg(msg_descr)
bytes_read += self.__ptr
if not self.__debug_out and self.__time_msg != None and self.__csv_updated:
self.__printCSVRow()
f.close()
def __bytesLeft(self):
return len(self.__buffer) - self.__ptr
def __filterMsg(self, msg_name):
show_fields = "*"
if len(self.__msg_filter_map) > 0:
show_fields = self.__msg_filter_map.get(msg_name)
return show_fields
def __initCSV(self):
if len(self.__msg_filter) == 0:
for msg_name in self.__msg_names:
self.__msg_filter.append((msg_name, "*"))
for msg_name, show_fields in self.__msg_filter:
if show_fields == "*":
show_fields = self.__msg_labels.get(msg_name, [])
self.__msg_filter_map[msg_name] = show_fields
for field in show_fields:
full_label = msg_name + "_" + field
self.__csv_columns.append(full_label)
self.__csv_data[full_label] = None
if self.__file != None:
print(self.__csv_delim.join(self.__csv_columns), file=self.__file)
else:
print(self.__csv_delim.join(self.__csv_columns))
def __printCSVRow(self):
s = []
for full_label in self.__csv_columns:
v = self.__csv_data[full_label]
if v == None:
v = self.__csv_null
else:
v = str(v)
s.append(v)
if self.__file != None:
print(self.__csv_delim.join(s), file=self.__file)
else:
print(self.__csv_delim.join(s))
def __parseMsgDescr(self):
if runningPython3:
data = struct.unpack(self.MSG_FORMAT_STRUCT,
self.__buffer[self.__ptr + 3: self.__ptr + self.MSG_FORMAT_PACKET_LEN])
else:
data = struct.unpack(self.MSG_FORMAT_STRUCT,
str(self.__buffer[self.__ptr + 3: self.__ptr + self.MSG_FORMAT_PACKET_LEN]))
msg_type = data[0]
if msg_type != self.MSG_TYPE_FORMAT:
msg_length = data[1]
msg_name = _parseCString(data[2])
msg_format = _parseCString(data[3])
msg_labels = _parseCString(data[4]).split(",")
# Convert msg_format to struct.unpack format string
msg_struct = ""
msg_mults = []
for c in msg_format:
try:
f = self.FORMAT_TO_STRUCT[c]
msg_struct += f[0]
msg_mults.append(f[1])
except KeyError as e:
raise Exception("Unsupported format char: %s in message %s (%i)" % (c, msg_name, msg_type))
msg_struct = "<" + msg_struct # force little-endian
self.__msg_descrs[msg_type] = (msg_length, msg_name, msg_format, msg_labels, msg_struct, msg_mults)
self.__msg_labels[msg_name] = msg_labels
self.__msg_names.append(msg_name)
if self.__debug_out:
if self.__filterMsg(msg_name) != None:
print(
"MSG FORMAT: type = %i, length = %i, name = %s, format = %s, labels = %s, struct = %s, mults = %s" % (
msg_type, msg_length, msg_name, msg_format, str(msg_labels), msg_struct, msg_mults))
self.__ptr += self.MSG_FORMAT_PACKET_LEN
def __parseMsg(self, msg_descr):
msg_length, msg_name, msg_format, msg_labels, msg_struct, msg_mults = msg_descr
if not self.__debug_out and self.__time_msg != None and msg_name == self.__time_msg and self.__csv_updated:
self.__printCSVRow()
self.__csv_updated = False
show_fields = self.__filterMsg(msg_name)
if (show_fields != None):
if runningPython3:
data = list(
struct.unpack(msg_struct, self.__buffer[self.__ptr + self.MSG_HEADER_LEN:self.__ptr + msg_length]))
else:
data = list(struct.unpack(msg_struct,
str(self.__buffer[self.__ptr + self.MSG_HEADER_LEN:self.__ptr + msg_length])))
for i in range(len(data)):
if type(data[i]) is str:
data[i] = _parseCString(data[i])
m = msg_mults[i]
if m != None:
data[i] = data[i] * m
if self.__debug_out:
s = []
for i in range(len(data)):
label = msg_labels[i]
if show_fields == "*" or label in show_fields:
s.append(label + "=" + str(data[i]))
print("MSG %s: %s" % (msg_name, ", ".join(s)))
else:
# update CSV data buffer
for i in range(len(data)):
label = msg_labels[i]
if label in show_fields:
self.__csv_data[msg_name + "_" + label] = data[i]
if self.__time_msg != None and msg_name != self.__time_msg:
self.__csv_updated = True
if self.__time_msg == None:
self.__printCSVRow()
self.__ptr += msg_length
def _main():
if len(sys.argv) < 2:
print(
"Usage: python sdlog2_dump.py <log.bin> [-v] [-e] [-d delimiter] [-n null] [-m MSG[.field1,field2,...]] [-t TIME_MSG_NAME]\n")
print("\t-v\tUse plain debug output instead of CSV.\n")
print("\t-e\tRecover from errors.\n")
print("\t-d\tUse \"delimiter\" in CSV. Default is \",\".\n")
print("\t-n\tUse \"null\" as placeholder for empty values in CSV. Default is empty.\n")
print(
"\t-m MSG[.field1,field2,...]\n\t\tDump only messages of specified type, and only specified fields.\n\t\tMultiple -m options allowed.")
print(
"\t-t\tSpecify TIME message name to group data messages by time and significantly reduce duplicate output.\n")
print("\t-fPrint to file instead of stdout")
return
fn = sys.argv[1]
debug_out = False
correct_errors = False
msg_filter = []
csv_null = ""
csv_delim = ","
time_msg = "TIME"
file_name = None
opt = None
for arg in sys.argv[2:]:
if opt != None:
if opt == "d":
csv_delim = arg
elif opt == "n":
csv_null = arg
elif opt == "t":
time_msg = arg
elif opt == "f":
file_name = arg
elif opt == "m":
show_fields = "*"
a = arg.split("_")
if len(a) > 1:
show_fields = a[1].split(",")
msg_filter.append((a[0], show_fields))
opt = None
else:
if arg == "-v":
debug_out = True
elif arg == "-e":
correct_errors = True
elif arg == "-d":
opt = "d"
elif arg == "-n":
opt = "n"
elif arg == "-m":
opt = "m"
elif arg == "-t":
opt = "t"
elif arg == "-f":
opt = "f"
if csv_delim == "\\t":
csv_delim = "\t"
parser = SDLog2Parser()
parser.setCSVDelimiter(csv_delim)
parser.setCSVNull(csv_null)
parser.setMsgFilter(msg_filter)
parser.setTimeMsg(time_msg)
parser.setFileName(file_name)
parser.setDebugOut(debug_out)
parser.setCorrectErrors(correct_errors)
parser.process(fn)
if __name__ == "__main__":
_main()
| {
"content_hash": "39cef6a229b85e193e0aede9148823f1",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 147,
"avg_line_length": 37.14655172413793,
"alnum_prop": 0.4847218998994353,
"repo_name": "Xyrotechnology/Project-Anthrax",
"id": "7da0e6e5e2f05bd24ae34fbc2746c6f0a2fcf4bd",
"size": "12950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SD/libraries/Scripts/Pixhawk/Firmware-master/Tools/sdlog2/sdlog2_dump.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "3488"
},
{
"name": "C",
"bytes": "2513070"
},
{
"name": "C++",
"bytes": "4421299"
},
{
"name": "CMake",
"bytes": "398432"
},
{
"name": "Io",
"bytes": "307"
},
{
"name": "Makefile",
"bytes": "111009"
},
{
"name": "Matlab",
"bytes": "44019"
},
{
"name": "Python",
"bytes": "526904"
},
{
"name": "Ruby",
"bytes": "5097"
},
{
"name": "Scilab",
"bytes": "1502"
},
{
"name": "Shell",
"bytes": "33891"
}
],
"symlink_target": ""
} |
import datetime
import time
import random
import csv as csv
from collections import defaultdict
import numpy as np
def create_data(file_name,number_of_days):
lat,lon = read_coordinates('coordinates.csv')
number_of_stops = len(lat)
time_end = datetime.datetime.now()
time_end = time_end.replace(hour=5,minute=0, second=0, microsecond=0)
time_now = time_end - datetime.timedelta(days=number_of_days)
time_change = 1 #in min
probability_init = 0.007
hour_correction = [0]*24
hour_correction[5] = 0.3
hour_correction[6] = 0.6
hour_correction[7] = 1.0
hour_correction[8] = 1.5
hour_correction[9] = 0.8
hour_correction[10] = 0.5
hour_correction[11] = 0.8
hour_correction[12] = 1.2
hour_correction[13] = 1.0
hour_correction[14] = 0.7
hour_correction[15] = 0.6
hour_correction[16] = 1.0
hour_correction[17] = 1.5
hour_correction[18] = 1.0
hour_correction[19] = 0.8
hour_correction[20] = 0.5
hour_correction[21] = 0.3
hour_correction[22] = 0.2
demand_probability = [random.random()*probability_init for sk in range(number_of_stops)]
data = []
while time_now<=time_end:
for sk in range(number_of_stops):
if random.random()<demand_probability[sk]*hour_correction[time_now.hour]:
data.append([time_now,lat[sk],lon[sk],sk])
time_now = time_now + datetime.timedelta(minutes=time_change)
#if (time_now.hour==23): #skip night
#time_now += datetime.timedelta(hours=6)
write_data(file_name,['time','lat','lon','id'],data)
return
def write_data(file_name,header,list_name):
with open(file_name, "wb") as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(list_name)
return
def read_coordinates(file_name):
csv_file_object = csv.reader(open(file_name, 'rU'),delimiter=',')
header = csv_file_object.next()
lat = []
lon = []
for row in csv_file_object:
col = header.index("lat")
lat.append(float(row[col]))
col = header.index("lon")
lon.append(float(row[col]))
return lat,lon
def read_events(file_name):
csv_file_object = csv.reader(open(file_name, 'rU'),delimiter=',')
header = csv_file_object.next()
times_start_trip = []
lat = []
lon = []
id = []
for row in csv_file_object:
col = header.index("time")
time_extracted = time.strptime(row[col], "%Y-%m-%d %H:%M:%S") #2014-06-28 06:57:00
times_start_trip.append(datetime.datetime(*time_extracted[:6]))
col = header.index("lat")
lat.append(float(row[col]))
col = header.index("lon")
lon.append(float(row[col]))
col = header.index("id")
id.append(float(row[col]))
return times_start_trip, lat, lon, id
def make_coordinate_dictionary(file_name):
times_start_trip,lat,lon,stop_sequence = read_events(file_name)
coordinates_look_up = defaultdict(list)
for sk in range(len(stop_sequence)):
if int(stop_sequence[sk]) not in coordinates_look_up:
coordinates_look_up[int(stop_sequence[sk])].append(lat[sk])
coordinates_look_up[int(stop_sequence[sk])].append(lon[sk])
return coordinates_look_up,stop_sequence,times_start_trip
def coordinates_to_grid(coordinates,grid_width):
const_lat_km = 111.0 #per 1 degree
const_lon_km = 56.0 #per 1 degree
const_new_idx = 1000
latmax = max(coordinates.items(), key=lambda a: a[1][0])[1][0]
lonmax = max(coordinates.items(), key=lambda a: a[1][1])[1][1]
latmin = min(coordinates.items(), key=lambda a: a[1][0])[1][0]
lonmin = min(coordinates.items(), key=lambda a: a[1][1])[1][1]
latstep = grid_width/const_lat_km
lonstep = grid_width/const_lon_km
latstart = latmin - 50/const_lat_km + latstep#50m slack
lonstart = lonmin - 50/const_lon_km + lonstep
coordinates_grid = defaultdict(list)
stops_grid = {}
for stopID, coords in coordinates.items():
latidx = 0
lonidx = 0
latbound = latstart
lonbound = lonstart
while coords[0] > latbound:
latbound+= latstep
latidx+=1
while coords[1] > lonbound:
lonbound+= lonstep
lonidx+=1
gridID = latidx*const_new_idx+lonidx
stops_grid[stopID] = gridID
if gridID not in coordinates_grid:
coordinates_grid[gridID].append(latbound-latstep)
coordinates_grid[gridID].append(lonbound-lonstep)
coordinates_grid[gridID].append(latbound)
coordinates_grid[gridID].append(lonbound)
return coordinates_grid,stops_grid
def convert_stop_sequence_grid(stop_sequence,stops_grid):
stop_sequence_grid = []
for stop in stop_sequence:
stop_sequence_grid.append(stops_grid[stop])
return stop_sequence_grid
def discretize_observations(times_start_trip,stop_sequence,discretization_min,param_hour_range): #step in minutes
demand = []
times_demand = []
stops_unique = list(set(stop_sequence))
k = len(stops_unique)
discretization_step = 60/discretization_min
time_now = times_start_trip[0].replace(hour=param_hour_range[0], minute=0, second=0) + datetime.timedelta(minutes=discretization_min)
time_end = times_start_trip[-1].replace(hour=param_hour_range[-1], minute=0, second=0)
times_start_trip.append(time_end) #stopping criteria
sk=0
while time_now<=time_end:
while times_start_trip[sk]<(time_now - datetime.timedelta(minutes=discretization_min)): #assume sorted times
sk +=1 #fastforward irrelevant trips
demand_vector_now = [0]*k #initialize demand in current slot
while times_start_trip[sk]<time_now:
idx = [ii for ii in range(len(stops_unique)) if stop_sequence[sk]==stops_unique[ii]]
demand_vector_now[idx[0]] += 1
sk += 1
demand.append(demand_vector_now)
times_demand.append(time_now)
if (time_now.weekday()==4) and (time_now.hour==param_hour_range[-1]) and (time_now.minute==0): #if friday move to sunday
time_now = time_now + datetime.timedelta(hours=48)
if (time_now.hour==param_hour_range[-1]) and (time_now.minute==0): #if end of the day move to morning
plus_hours = 24 - param_hour_range[-1] + param_hour_range[0]
time_now = time_now + datetime.timedelta(hours=plus_hours)
time_now = time_now + datetime.timedelta(minutes=discretization_min)
times_start_trip = times_start_trip[0:-1] #remove stopping criterion, not needed anymore
return times_demand,demand,stops_unique
def fade_for_video(demand,alpha):
demand_faded = []
vector_before = [0]*len(demand[0])
for vector in demand:
vector_now = []
for i in range(len(vector)):
v = vector[i] + alpha*vector_before[i]
if v < 0.01:
v = 0
vector_now.append(v)
demand_faded.append(vector_now)
vector_before = vector_now
return demand_faded
def sort_by_time(time_data,stop_sequence):
time_data = np.array(time_data)
ind_sorted = np.argsort(time_data)
time_data = time_data[ind_sorted]
stop_sequence = np.array(stop_sequence)[ind_sorted]
time_data = list(time_data)
stop_sequence = list(stop_sequence)
return time_data,stop_sequence
| {
"content_hash": "f5b634a1dd8fac9935ac486535581665",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 134,
"avg_line_length": 35.12169312169312,
"alnum_prop": 0.7051822838204278,
"repo_name": "zliobaite/Heatmaps",
"id": "7c731b21d5d20255a7c909f746042ddb52b1f74d",
"size": "6638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_processing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12954"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from chatpro.profiles.models import Contact
from chatpro.test import ChatProTest
from dash.utils.sync import sync_pull_contacts
from django.utils import timezone
from mock import patch
from temba.types import Contact as TembaContact
class TembaTest(ChatProTest):
@patch('dash.orgs.models.TembaClient.get_contacts')
def test_sync_pull_contacts(self, mock_get_contacts):
# RapidPro returning no changes
mock_get_contacts.return_value = [
TembaContact.create(uuid='C-001', name="Ann", urns=['tel:1234'], groups=['G-001'],
fields=dict(chat_name="ann"), language='eng', modified_on=timezone.now()),
TembaContact.create(uuid='C-002', name="Bob", urns=['tel:2345'], groups=['G-001'],
fields=dict(chat_name="bob"), language='eng', modified_on=timezone.now()),
TembaContact.create(uuid='C-003', name="Cat", urns=['tel:3456'], groups=['G-002'],
fields=dict(chat_name="cat"), language='eng', modified_on=timezone.now()),
TembaContact.create(uuid='C-004', name="Dan", urns=['twitter:danny'], groups=['G-002'],
fields=dict(chat_name="dan"), language='eng', modified_on=timezone.now()),
TembaContact.create(uuid='C-005', name="Eve", urns=['twitter:evee'], groups=['G-003'],
fields=dict(chat_name="eve"), language='eng', modified_on=timezone.now())
]
created, updated, deleted, failed = sync_pull_contacts(self.unicef, Contact)
self.assertFalse(created or updated or deleted or failed)
# RapidPro returning 1 new, 1 modified, 1 deleted and 2 unsyncable contact
mock_get_contacts.return_value = [
TembaContact.create(uuid='C-001', name="Annie", urns=['tel:5678'], groups=['G-002'],
fields=dict(chat_name="annie"), language='eng', modified_on=timezone.now()),
TembaContact.create(uuid='C-003', name="Cat", urns=['tel:3456'], groups=['G-002'],
fields=dict(chat_name="cat"), language='eng', modified_on=timezone.now()),
TembaContact.create(uuid='C-004', name="Dan", urns=['twitter:danny'], groups=['G-002'],
fields=dict(chat_name="dan"), language='eng', modified_on=timezone.now()),
TembaContact.create(uuid='C-005', name="Eve", urns=['twitter:evee'], groups=['G-003'],
fields=dict(chat_name="eve"), language='eng', modified_on=timezone.now()),
TembaContact.create(uuid='C-007', name="Jim", urns=['twitter:jimbo'], groups=['G-003'],
fields=dict(chat_name="jim"), language='eng', modified_on=timezone.now()),
TembaContact.create(uuid='C-009', name="Xen", urns=['twitter:xen'], groups=[], # no room group
fields=dict(chat_name="xen"), language='eng', modified_on=timezone.now()),
TembaContact.create(uuid='C-010', name="Yan", urns=[], groups=['G-003'], # no URN
fields=dict(chat_name="yan"), language='eng', modified_on=timezone.now())
]
created, updated, deleted, failed = sync_pull_contacts(self.unicef, Contact)
self.assertEqual(sorted(created), ['C-007'])
self.assertEqual(sorted(updated), ['C-001'])
self.assertEqual(sorted(deleted), ['C-002'])
self.assertEqual(sorted(failed), ['C-009', 'C-010'])
# check created contact
jim = Contact.objects.get(uuid='C-007')
self.assertEqual(jim.full_name, "Jim")
self.assertEqual(jim.chat_name, "jim")
self.assertEqual(jim.urn, 'twitter:jimbo')
self.assertEqual(jim.room, self.room3)
# check modified contact
ann = Contact.objects.get(uuid='C-001')
self.assertEqual(ann.full_name, "Annie")
self.assertEqual(ann.chat_name, "annie")
self.assertEqual(ann.urn, 'tel:5678')
self.assertEqual(ann.room, self.room2)
# check deleted contact
bob = Contact.objects.get(uuid='C-002')
self.assertFalse(bob.is_active)
| {
"content_hash": "42927c48890cd0e3a415b98589ad46f7",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 108,
"avg_line_length": 59.91549295774648,
"alnum_prop": 0.5916784203102962,
"repo_name": "rapidpro/chatpro",
"id": "75c851bc7fd0284a0c406fde41067776c18cc4df",
"size": "4254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chatpro/utils/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2638"
},
{
"name": "CoffeeScript",
"bytes": "12697"
},
{
"name": "HTML",
"bytes": "22196"
},
{
"name": "Python",
"bytes": "142459"
}
],
"symlink_target": ""
} |
import os, pytest, rdflib, tempfile
from channelworm.ion_channel.exporter import Exporter
pytestmark = [pytest.mark.django_db]
SAMPLE_LOCATION = os.path.join('tests', 'test_data', 'rdf_data.n3')
EXPORT_LOCATION = os.path.join('tests', 'test_data', 'exported_data.n3')
def test_exporter_init_and_load(data_pool):
"""
Can we create an Exporter with arguments?
Uses IonChannel and Experiment as example data.
Implicitly tests Exporter's load() method.
"""
ic = data_pool.get_ion_channel()
ex = data_pool.get_experiment()
expo = Exporter(ic, ex)
assert type(expo) == Exporter
def test_load_type_error():
"""
Do we get the correct exception if we try to load
non-Model data into the Exporter?
"""
with pytest.raises(TypeError):
Exporter(None)
def test_parse_without_source():
"""
When we try to parse without a 'source' argument, do
we get the correct error type?
"""
expo = Exporter()
with pytest.raises(TypeError):
expo.parse(source=None)
def test_parse():
"""
Can the parse() method actually parse an RDF graph?
After parsing, is the graph identical to what RDFLib
parses?
"""
expo = Exporter()
expo.parse(source=SAMPLE_LOCATION)
graph = rdflib.Graph()
graph.parse(source=SAMPLE_LOCATION, format='n3')
assert graph.isomorphic(expo.graph)
def test_load_parse_and_export(data_pool):
"""
Can we load and parse data, then export it?
Test passes if we can load, parse and export without
error, and the exported graph is the same as the one
stored in the Exporter object.
"""
fname = tempfile.mktemp()
expo = Exporter()
ref = data_pool.get_reference()
expo.parse(SAMPLE_LOCATION)
expo.load(ref)
expo.export(filename=fname)
current_graph = rdflib.Graph().parse(fname, format='n3')
assert current_graph.isomorphic(expo.graph)
| {
"content_hash": "6846c11022e3e91fc13c9849fa0e3362",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 72,
"avg_line_length": 27.855072463768117,
"alnum_prop": 0.6675338189386056,
"repo_name": "openworm/ChannelWorm",
"id": "b97af166ed1b6bb90a30193c00c2179b0e3b9c66",
"size": "1922",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/export_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11459"
},
{
"name": "HTML",
"bytes": "185500"
},
{
"name": "JavaScript",
"bytes": "598029"
},
{
"name": "Jupyter Notebook",
"bytes": "411940"
},
{
"name": "PLpgSQL",
"bytes": "2505"
},
{
"name": "Python",
"bytes": "252678"
},
{
"name": "Shell",
"bytes": "2962"
},
{
"name": "TeX",
"bytes": "13842"
}
],
"symlink_target": ""
} |
from pybuilder.core import use_plugin, init
use_plugin('python.core')
# build
use_plugin('python.install_dependencies')
use_plugin('python.pydev')
use_plugin('python.distutils')
# quality
use_plugin('python.unittest')
use_plugin('python.coverage')
use_plugin('python.frosted')
use_plugin('python.flake8')
use_plugin('python.pylint')
use_plugin('python.pep8')
# IDE
use_plugin('python.pycharm')
default_task = ['install_dependencies', 'analyze', 'publish']
@init
def set_properties(project):
project.set_property('frosted_break_build', True)
project.set_property('flake8_break_build', True)
project.set_property('pylint_options', ["--max-line-length=79"])
| {
"content_hash": "609beed7dc2ab1d44d97effbdb49f582",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 26.84,
"alnum_prop": 0.7347242921013413,
"repo_name": "careerfamily/simpleioc",
"id": "91465b0812044a5c4c0e297c685d68cc8814a858",
"size": "671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11606"
}
],
"symlink_target": ""
} |
from package_control import sys_path
sys_path.add_dependency(u'bz2')
| {
"content_hash": "0d053fe89d17b05d1fc22f55f22fffdc",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 36,
"avg_line_length": 34.5,
"alnum_prop": 0.7971014492753623,
"repo_name": "herove/dotfiles",
"id": "4d4b42fadbd64b72bbe8ecdb82d83b07f8577659",
"size": "70",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sublime/Packages/0_package_control_loader/02-bz2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "358958"
},
{
"name": "C++",
"bytes": "601356"
},
{
"name": "CMake",
"bytes": "17100"
},
{
"name": "Java",
"bytes": "77"
},
{
"name": "JavaScript",
"bytes": "1058301"
},
{
"name": "Python",
"bytes": "5847904"
},
{
"name": "Shell",
"bytes": "49159"
},
{
"name": "Vim script",
"bytes": "43682"
}
],
"symlink_target": ""
} |
from kotti import DBSession
from kotti.resources import Content, Document
from pyramid.view import view_config
@view_config(name='sitemap.xml',
context=Document,
permission='view',
renderer='templates/sitemap.pt',)
def sitemap(request):
nodes = DBSession.query(Content).with_polymorphic(Content)
request.response_content_type = "text/xml"
return {'nodes': nodes}
| {
"content_hash": "388673b8b617c666d7c2189251c732d8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 32,
"alnum_prop": 0.6899038461538461,
"repo_name": "koansys/kotti_sitemap",
"id": "781dfb33d4cac6d08a72b86aed9ddebb92e8fdc7",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kotti_sitemap/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2063"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('goals', '0166_auto_20160902_1934'),
]
operations = [
migrations.AddField(
model_name='customactionfeedback',
name='goal',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='goals.Goal'),
),
migrations.AlterField(
model_name='customactionfeedback',
name='customgoal',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='goals.CustomGoal'),
),
]
| {
"content_hash": "0ee85eaed88c3ae08fbe7ed4f975a962",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 127,
"avg_line_length": 30.75,
"alnum_prop": 0.6355013550135501,
"repo_name": "tndatacommons/tndata_backend",
"id": "c5b463cee653fcee3b50cb8b39f0c4f1b27780b6",
"size": "810",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tndata_backend/goals/migrations/0167_auto_20160902_2013.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29078"
},
{
"name": "HTML",
"bytes": "680433"
},
{
"name": "JavaScript",
"bytes": "186991"
},
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Python",
"bytes": "2023392"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
} |
import argparse
import errno
import itertools
import os
import sys
import time
from .confparser import ConfParser
from .confparser import ConfParserException
from .palette import Palette
from .transformer import Transformer
from .transformer import RegexStyle
from .txtsconf import *
from .version import VERSION
VERSION_INFO="""TxtStyle version %s.
Copyright (C) 2013 Arman Sharif.
Apache License v2.0 or later: <http://www.apache.org/licenses/LICENSE-2.0>
""" % VERSION
_USER_HOME_CONF_FILE = os.path.join(os.getenv('HOME', os.getenv("USERPROFILE")), '.txts.conf')
class Txts(object):
def __init__(self, styles, filepath=None, color_always=False):
self.transformer = Transformer(styles)
self.filepath = filepath
self.color_always = color_always
def transform(self):
if self.filepath:
self._transform_file()
elif not sys.stdin.isatty():
self._transform_pipe()
def _transform_file(self):
try:
with open(self.filepath, 'r', encoding='utf-8', errors='ignore') as infile:
for line in infile:
self._style(line)
except KeyboardInterrupt:
pass
except IOError as e:
if e.errno == errno.ENOENT:
sys.stderr.write("File not found: %s\n" % self.filepath)
elif e.errno == errno.EPIPE:
# broken pipe
pass
else:
sys.stderr.write("%s\n" % e)
sys.exit(e.errno)
def _transform_pipe(self):
sys.stdin = sys.stdin.detach()
try:
while True:
line = sys.stdin.readline()
if not line:
break
self._style(line.decode('utf-8', errors='ignore'))
except KeyboardInterrupt:
pass
finally:
sys.stdin.close()
def _style(self, line):
if sys.stdout.isatty() or self.color_always:
styled_line = self.transformer.style(line.strip('\n'))
sys.stdout.write(styled_line + '\n')
else:
sys.stdout.write(line)
def parse_args():
parser = argparse.ArgumentParser(
prog='TxtStyle',
description='Prettifies output of console programs.')
parser.add_argument('filepath', nargs='?', help='Path to a file.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-p', '--palette', help='Print a palette of available styles.', action='store_true')
group.add_argument('-n', '--name', nargs=1, help='Name of the style to apply.')
group.add_argument('-r', '--regex', nargs=1, action='append', help='Highlight text based on the given regular expression.')
parser.add_argument('-c', '--conf', nargs=1, help='Path to a conf file. Default is: ~/.txt.conf')
parser.add_argument('--color-always', help='Always use color. Similar to grep --color=always.', action='store_true')
version_group = parser.add_mutually_exclusive_group()
version_group.add_argument('--version', help='Print version information', action='store_true')
return parser.parse_args()
def get_styles(conf_parser, style_def_name):
try:
return conf_parser.get_styles(style_def_name)
except ConfParserException as e:
sys.stderr.write("%s\n" % e)
sys.exit(1)
def get_conf_lines(args):
confpath = get_conf_path(args)
with open(confpath, 'r') as f:
return f.readlines()
def get_conf_path(args):
if args.conf:
# User-specified conf file
filepath = args.conf[0]
if not os.path.isfile(filepath):
sys.stderr.write("File not found: %s\n" % filepath)
sys.exit(errno.ENOENT)
return filepath
else:
# User-home conf file (~/.txt.conf)
if not os.path.isfile(_USER_HOME_CONF_FILE):
with open(_USER_HOME_CONF_FILE, 'w+') as f:
f.write(DEFAULT_CONF)
return _USER_HOME_CONF_FILE
def loop_default_colors():
while True:
for style in ['bold','underline']:
for col in ['red', 'green', 'blue', 'magenta', 'cyan', 'white']:
yield ( col, style )
def main():
args = parse_args()
styles = []
if args.version:
sys.stdout.write(VERSION_INFO)
sys.exit(0)
elif args.palette:
Palette().print_palette()
sys.exit(0)
elif args.name:
conf_lines = get_conf_lines(args)
conf_parser = ConfParser(conf_lines)
style_def_name = args.name[0]
styles = get_styles(conf_parser, style_def_name)
elif args.regex:
rexps = list(itertools.chain.from_iterable(args.regex))
styles = [ RegexStyle(regex, style) for regex, style in zip(rexps, loop_default_colors()) ]
txts = Txts(styles, args.filepath, args.color_always)
txts.transform()
if __name__ == "__main__":
main()
| {
"content_hash": "f0785bc880d4707d384192f887dbb9c5",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 127,
"avg_line_length": 32.48344370860927,
"alnum_prop": 0.6004077471967381,
"repo_name": "armandino/TxtStyle",
"id": "1712b728a2b5d1f294180f1e0355eafad73a4b06",
"size": "5509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txtstyle/txts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "36914"
},
{
"name": "Shell",
"bytes": "1623"
}
],
"symlink_target": ""
} |
import copy
import fractions
import functools
import math
import numbers
import re
import sys
import weakref
class UnitArithmetic:
@classmethod
def multiply(cls, first, second):
result = first.copy()
for unit, power in second.items():
result[unit] = result.get(unit, 0) + power
return cls.clean(result)
@classmethod
def divide(cls, first, second):
result = first.copy()
for unit, power in second.items():
result[unit] = result.get(unit, 0) - power
return cls.clean(result)
@classmethod
def power(cls, units, other):
if other == 0:
return {}
else:
return cls.clean({unit: power * other for unit, power in units.items()})
@staticmethod
def clean(units):
result = {}
for unit, power in units.items():
if isinstance(power, fractions.Fraction):
if power.denominator == 1:
if not power.numerator == 0:
result[unit] = power.numerator
else:
result[unit] = power
else:
rounded = round(power)
# each operation contributes at most one epsilon of relative error;
# assuming limited number of operations and powers close to unity,
# this is a reasonable upper bound for the absolute error
if abs(power - rounded) < 16 * sys.float_info.epsilon:
if rounded != 0:
result[unit] = rounded
else:
result[unit] = power
return result
class UnitSystem:
def __init__(self):
self.units = {}
self.constants = {}
def add_unit(self, unit, symbol, expansion=None):
valid = (
expansion is None or
isinstance(expansion, numbers.Real) or
(isinstance(expansion, Quantity) and expansion.system() is self))
if not valid:
raise TypeError('unit expansion is not in terms of system units')
if isinstance(expansion, Quantity):
expansion = expansion.expand()
self.units[unit] = {'symbol': symbol, 'expansion': expansion}
def add_constant(self, constant, symbol, definition):
valid = (
isinstance(definition, numbers.Real) or
(isinstance(definition, Quantity) and definition.system() is self))
if not valid:
raise TypeError('constant definition is not in terms of system units')
if isinstance(definition, Quantity):
definition = definition.expand()
self.constants[constant] = {'symbol': symbol, 'definition': definition}
def get_constant(self, arg):
if isinstance(arg, dict):
# argument is a dictionary of constants and their powers
return functools.reduce(Quantity.__mul__,
(self.constants[constant]['definition'] ** power
for constant, power in arg.items()))
else:
# argument is the name of a constant
return self.constants[arg]['definition']
def expand_quantity(self, quantity):
return (Quantity(quantity.value, quantity.error, {}, self) *
self.expand_units(quantity.units))
def expand_units(self, units):
if not units:
return Quantity(1, 0, {}, self)
units = list(units.items())
expansions = []
while units:
unit, power = units.pop()
expansion = self.units.get(unit, {'expansion': None})['expansion']
if expansion is None:
expansions.append(Quantity(1, 0, {unit: power}, self))
else:
temp = expansion ** power
units += list(temp.units.items())
expansions.append(Quantity(temp.value, temp.error, {}, self))
return functools.reduce(Quantity.__mul__, expansions)
def format_quantity(self, quantity, format_spec):
if quantity.units:
units_string = self.format_units(quantity.units)
if quantity.error == 0:
return '{} {}'.format(quantity.value, units_string)
else:
value_error_string = self.format_value_error(
quantity.value, quantity.error, format_spec, parens=True)
return '{} {}'.format(value_error_string, units_string)
else:
if quantity.error == 0:
return str(quantity.value)
else:
return self.format_value_error(
quantity.value, quantity.error, format_spec)
@staticmethod
def format_value_error(value, error, format_spec, parens=False):
# options: number of significant digits displayed for error
# separate value and error ('s') or parenthesize error ('p')
match = re.fullmatch(r'(\d+)?([sp])?', format_spec)
if not match:
raise ValueError('invalid format specifier')
error_sigfig, mode = 2, 's'
temp = match.group(1)
if temp is not None:
temp = int(temp)
if temp > 0:
error_sigfig = temp
temp = match.group(2)
if temp is not None:
mode = temp
if not math.isfinite(value) or not math.isfinite(error):
if mode == 's':
result = '{} \xb1 {}'.format(value, error)
else:
result = '{}({})'.format(value, error)
return '({})'.format(result) if parens else result
# 'head' and 'tail' refer to the places (in positional notation) of the
# highest and lowest displayed digits of a number
error_head = math.floor(math.log10(error))
if value != 0:
value_head = math.floor(math.log10(abs(value)))
else:
value_head = error_head
error_tail = error_head - error_sigfig + 1
# handle the case when rounding changes the number of significant digits
if round(abs(value), -error_tail) >= 10**(value_head+1):
value_head += 1
if mode == 's':
dp = max(value_head - error_tail, 0)
if value_head == 0:
result = '{:.{dp}f} \xb1 {:.{dp}f}'.format(value, error, dp=dp)
return '({})'.format(result) if parens else result
else:
temp1 = value / 10**value_head
temp2 = error / 10**value_head
return '({:.{dp}f} \xb1 {:.{dp}f})e{:+03}'.format(temp1, temp2,
value_head, dp=dp)
else:
if value_head >= error_head:
formatted_head = value_head
dp = value_head - error_tail
else:
formatted_head = error_head
dp = error_sigfig - 1
temp1 = value / 10**formatted_head
temp2 = round(error / 10**error_tail)
if formatted_head == 0:
return '{:.{dp}f}({})'.format(temp1, temp2, dp=dp)
else:
return '{:.{dp}f}({})e{}'.format(temp1, temp2, formatted_head, dp=dp)
def format_units(self, units):
return ' '.join(self.format_unit(unit, power)
for unit, power in sorted(units.items(),
key=lambda arg: (math.copysign(1, -arg[1]), arg[0])))
def format_unit(self, unit, power):
symbol = self.units.get(unit, {'symbol': unit})['symbol']
if power == 1:
return symbol
elif power < 0 or isinstance(power, fractions.Fraction):
return '{}^({})'.format(symbol, power)
else:
return '{}^{}'.format(symbol, power)
def copy(self):
result = copy.deepcopy(self)
# copy.deepcopy updates the circular reference automatically, but not
# circular weak references
for data in result.units.values():
if data['expansion'] is not None:
data['expansion'].system = weakref.ref(result)
for data in result.constants.values():
data['definition'].system = weakref.ref(result)
return result
class Quantity:
def __init__(self, value, error, units, system):
self.value = value
self.error = error
self.units = units
if isinstance(system, weakref.ref):
self.system = system
else:
self.system = weakref.ref(system)
def __pos__(self):
return Quantity(self.value, self.error, self.units, self.system)
def __neg__(self):
return Quantity(-self.value, self.error, self.units, self.system)
def __abs__(self):
return Quantity(abs(self.value), self.error, self.units, self.system)
def __add__(self, other):
if isinstance(other, Quantity) and self.system is other.system:
first, second = self.expand(), other.expand()
if first.units == second.units:
value = first.value + second.value
error = math.hypot(first.error, second.error)
return Quantity(value, error, first.units, first.system)
elif isinstance(other, numbers.Real):
if other == 0:
return +self
else:
first = self.expand()
if not first.units:
return Quantity(first.value + other, first.error, {}, first.system)
return NotImplemented
def __sub__(self, other):
if isinstance(other, Quantity) and self.system is other.system:
first, second = self.expand(), other.expand()
if first.units == second.units:
value = first.value - second.value
error = math.hypot(first.error, second.error)
return Quantity(value, error, first.units, first.system)
elif isinstance(other, numbers.Real):
if other == 0:
return +self
else:
first = self.expand()
if not first.units:
return Quantity(first.value - other, first.error, {}, first.system)
return NotImplemented
def __mul__(self, other):
if isinstance(other, Quantity) and self.system is other.system:
value = self.value * other.value
error = math.hypot(self.error * other.value, other.error * self.value)
units = UnitArithmetic.multiply(self.units, other.units)
return Quantity(value, error, units, self.system)
elif isinstance(other, numbers.Real):
value = self.value * other
error = abs(self.error * other)
return Quantity(value, error, self.units, self.system)
else:
return NotImplemented
def __truediv__(self, other):
if isinstance(other, Quantity) and self.system is other.system:
value = self.value / other.value
error = math.hypot(self.error / other.value,
other.error * self.value / other.value**2)
units = UnitArithmetic.divide(self.units, other.units)
return Quantity(value, error, units, self.system)
elif isinstance(other, numbers.Real):
value = self.value / other
error = abs(self.error / other)
return Quantity(value, error, self.units, self.system)
else:
return NotImplemented
def __pow__(self, other):
if isinstance(other, Quantity) and self.system is other.system:
first, second = self.expand(), other.expand()
if not first.units and not second.units:
value = first.value ** second.value
if first.value != 0:
error = value * math.hypot(
second.value / first.value * first.error,
math.log(first.value) * second.error)
else:
error = first.error ** second.value
return Quantity(value, error, {}, first.system)
elif first.units and second.error == 0 and not second.units:
return first ** second.value
elif isinstance(other, numbers.Real):
value = self.value ** other
if self.value != 0:
error = abs(other * value / self.value * self.error)
else:
error = self.error ** other
units = UnitArithmetic.power(self.units, other)
return Quantity(value, error, units, self.system)
return NotImplemented
def __radd__(self, other):
if isinstance(other, numbers.Real):
return self + other
else:
return NotImplemented
def __rsub__(self, other):
if isinstance(other, numbers.Real):
return -self + other
else:
return NotImplemented
def __rmul__(self, other):
if isinstance(other, numbers.Real):
return self * other
else:
return NotImplemented
def __rtruediv__(self, other):
if isinstance(other, numbers.Real):
return self**-1 * other
else:
return NotImplemented
def __rpow__(self, other):
if isinstance(other, numbers.Real):
second = self.expand()
if not second.units:
return Quantity(other, 0, {}, second.system) ** second
return NotImplemented
def __eq__(self, other):
return (isinstance(other, Quantity) and
self.value == other.value and self.error == other.error and
self.units == other.units and self.system is other.system)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.value, self.error,
tuple(sorted(self.units.items())), self.system))
def almost_equals(self, other):
if isinstance(other, Quantity) and self.system is other.system:
first, second = self.expand(), other.expand()
if first.units == second.units:
return abs(first.value - second.value) <= first.error + second.error
elif isinstance(other, numbers.Real):
first = self.expand()
if not first.units:
return abs(first.value - other) < first.error
raise TypeError('quantities are incomparable: {} and {}'
.format(self, other))
def __float__(self):
first = self.expand()
if first.error == 0 and not first.units:
return float(first.value)
else:
raise TypeError('quantity has either error or units')
def expand(self):
if self.system is not None:
return self.system().expand_quantity(self)
else:
return self
def copy(self):
return Quantity(self.value, self.error, self.units.copy(), self.system)
def __repr__(self):
kwargs = ', '.join('{}={}'.format(key, repr(self.__dict__[key]))
for key in ['value', 'error', 'units', 'system'])
return '{}({})'.format(self.__class__.__name__, kwargs)
def __str__(self):
return format(self, '2s')
def __format__(self, format_spec):
return self.system().format_quantity(self, format_spec)
@staticmethod
def make_same_system(args):
if all(isinstance(arg, numbers.Real) for arg in args):
return args
if not all(isinstance(arg, (Quantity, numbers.Real)) for arg in args):
raise TypeError('arguments are not all quantities or numbers')
systems = [arg.system for arg in args if isinstance(arg, Quantity)]
if not all(system is systems[0] for system in systems):
raise TypeError('arguments do not have the same system')
return [Quantity(arg, 0, {}, systems[0]) if isinstance(arg, numbers.Real)
else arg for arg in args]
| {
"content_hash": "c0b21e1eb877ad834ebc845630cf8bae",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 78,
"avg_line_length": 34.90049751243781,
"alnum_prop": 0.6291518175338561,
"repo_name": "EdwinChan/python-physical",
"id": "bc7bac02b2e6bb78d2f1fce80bb2445a9e5deda8",
"size": "14030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42111"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import atexit
import errno
import os
import shutil
import sys
import tempfile
import time
import path_utils as path
import pywatchman
global_temp_dir = None
class TempDir(object):
"""
This is a helper for locating a reasonable place for temporary files.
When run in the watchman test suite, we compute this up-front and then
store everything under that temporary directory.
When run under the FB internal test runner, we infer a reasonable grouped
location from the process group environmental variable exported by the
test runner.
"""
def __init__(self, keepAtShutdown=False):
# We'll put all our temporary stuff under one dir so that we
# can clean it all up at the end.
parent_dir = tempfile.tempdir or os.environ.get("TMP", "/tmp")
prefix = "watchmantest"
self.temp_dir = path.get_canonical_filesystem_path(
tempfile.mkdtemp(dir=parent_dir, prefix=prefix)
)
if os.name != "nt":
# On some platforms, setting the setgid bit on a directory doesn't
# work if the user isn't a member of the directory's group. Set the
# group explicitly to avoid this.
os.chown(self.temp_dir, -1, os.getegid())
# Some environments have a weird umask that can leave state
# directories too open and break tests.
os.umask(0o022)
# Redirect all temporary files to that location
if pywatchman.compat.PYTHON3:
tempfile.tempdir = os.fsdecode(self.temp_dir)
else:
tempfile.tempdir = self.temp_dir
self.keep = keepAtShutdown
def cleanup():
if self.keep:
sys.stdout.write("Preserving output in %s\n" % self.temp_dir)
return
self._retry_rmtree(self.temp_dir)
atexit.register(cleanup)
def get_dir(self):
return self.temp_dir
def set_keep(self, value):
self.keep = value
def _retry_rmtree(self, top):
# Keep trying to remove it; on Windows it may take a few moments
# for any outstanding locks/handles to be released
for _ in range(1, 10):
shutil.rmtree(top, onerror=_remove_readonly)
if not os.path.isdir(top):
return
sys.stdout.write("Waiting to remove temp data under %s\n" % top)
time.sleep(0.2)
sys.stdout.write("Failed to completely remove %s\n" % top)
def _remove_readonly(func, path, exc_info):
# If we encounter an EPERM or EACCESS error removing a file try making its parent
# directory writable and then retry the removal. This is necessary to clean up
# eden mount point directories after the checkout is unmounted, as these directories
# are made read-only by "eden clone"
_ex_type, ex, _traceback = exc_info
if not (
isinstance(ex, EnvironmentError) and ex.errno in (errno.EACCES, errno.EPERM)
):
# Just ignore other errors. This will be retried by _retry_rmtree()
return
try:
parent_dir = os.path.dirname(path)
os.chmod(parent_dir, 0o755)
# func() is the function that failed.
# This is usually os.unlink() or os.rmdir().
func(path)
except OSError as ex:
return
def get_temp_dir(keep=None):
global global_temp_dir
if global_temp_dir:
return global_temp_dir
if keep is None:
keep = os.environ.get("WATCHMAN_TEST_KEEP", "0") == "1"
global_temp_dir = TempDir(keep)
return global_temp_dir
| {
"content_hash": "8206eb0b22578a7bc796766eb50d879a",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 88,
"avg_line_length": 33.10909090909091,
"alnum_prop": 0.6328940142778693,
"repo_name": "wez/watchman",
"id": "cbbb5392663edfb887dcabd828e6b59a2ed3fd62",
"size": "3664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/TempDir.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "68354"
},
{
"name": "C++",
"bytes": "1017051"
},
{
"name": "CMake",
"bytes": "33772"
},
{
"name": "CSS",
"bytes": "42513"
},
{
"name": "HTML",
"bytes": "36593"
},
{
"name": "Java",
"bytes": "165025"
},
{
"name": "JavaScript",
"bytes": "35291"
},
{
"name": "Python",
"bytes": "677902"
},
{
"name": "Ruby",
"bytes": "21741"
},
{
"name": "Rust",
"bytes": "69015"
},
{
"name": "Shell",
"bytes": "13265"
},
{
"name": "Thrift",
"bytes": "32316"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, DateTime, String, text, create_engine
from sqlalchemy.dialects.mysql import INTEGER, TINYINT
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class PmUser(Base):
__tablename__ = 'pm_user'
id = Column(INTEGER(11), primary_key=True)
name = Column(String(40))
email = Column(String(40), nullable=False, unique=True)
phone = Column(String(20))
password = Column(String(255), nullable=False)
is_active = Column(TINYINT(1), nullable=False, comment='是否活跃账号')
is_superuser = Column(TINYINT(1), nullable=False, comment='是否管理员')
is_olduser = Column(TINYINT(1), server_default=text("'0'"))
gmt_create = Column(DateTime)
gmt_modified = Column(DateTime)
if __name__ == '__main__':
engine = create_engine('mysql+pymysql://root:root@localhost/pricemonitor?charset=utf8', echo=True)
Base.metadata.create_all(engine) | {
"content_hash": "694a86cd50980665980d4dca24b42ff7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 102,
"avg_line_length": 37.8,
"alnum_prop": 0.707936507936508,
"repo_name": "qqxx6661/price-monitor",
"id": "2f99ad6a54ddcbb8d7347bcc27db7ebdf9a6a270",
"size": "983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PriceMonitor/database/model/pm_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "304755"
}
],
"symlink_target": ""
} |
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class AccessRights(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MANAGE = "Manage"
SEND = "Send"
LISTEN = "Listen"
class ClusterSkuName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of this SKU.
"""
DEDICATED = "Dedicated"
class CreatedByType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class DefaultAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Default Action for Network Rule Set
"""
ALLOW = "Allow"
DENY = "Deny"
class EncodingCaptureDescription(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the possible values for the encoding format of capture description. Note:
'AvroDeflate' will be deprecated in New API Version
"""
AVRO = "Avro"
AVRO_DEFLATE = "AvroDeflate"
class EndPointProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state of the Private Endpoint Connection.
"""
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
CANCELED = "Canceled"
FAILED = "Failed"
class EntityStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the possible values for the status of the Event Hub.
"""
ACTIVE = "Active"
DISABLED = "Disabled"
RESTORING = "Restoring"
SEND_DISABLED = "SendDisabled"
RECEIVE_DISABLED = "ReceiveDisabled"
CREATING = "Creating"
DELETING = "Deleting"
RENAMING = "Renaming"
UNKNOWN = "Unknown"
class KeyType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The access key to regenerate.
"""
PRIMARY_KEY = "PrimaryKey"
SECONDARY_KEY = "SecondaryKey"
class ManagedServiceIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of managed service identity.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class NetworkRuleIPAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The IP Filter Action
"""
ALLOW = "Allow"
class PrivateLinkConnectionStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the connection.
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
DISCONNECTED = "Disconnected"
class ProvisioningStateDR(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state of the Alias(Disaster Recovery configuration) - possible values 'Accepted'
or 'Succeeded' or 'Failed'
"""
ACCEPTED = "Accepted"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class PublicNetworkAccessFlag(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""This determines if traffic is allowed over public network. By default it is enabled.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class RoleDisasterRecovery(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""role of namespace in GEO DR - possible values 'Primary' or 'PrimaryNotReplicating' or
'Secondary'
"""
PRIMARY = "Primary"
PRIMARY_NOT_REPLICATING = "PrimaryNotReplicating"
SECONDARY = "Secondary"
class SkuName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of this SKU.
"""
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
class SkuTier(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The billing tier of this particular SKU.
"""
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
class UnavailableReason(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the reason for the unavailability of the service.
"""
NONE = "None"
INVALID_NAME = "InvalidName"
SUBSCRIPTION_IS_DISABLED = "SubscriptionIsDisabled"
NAME_IN_USE = "NameInUse"
NAME_IN_LOCKDOWN = "NameInLockdown"
TOO_MANY_NAMESPACE_IN_CURRENT_SUBSCRIPTION = "TooManyNamespaceInCurrentSubscription"
| {
"content_hash": "bcecf8ced2c485aeb60cce9cc4336037",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 100,
"avg_line_length": 28.87162162162162,
"alnum_prop": 0.7002106248537328,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d145b436a261162ad0978750d55e5562f6a82276",
"size": "4741",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/v2021_06_01_preview/models/_event_hub_management_client_enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
default_app_config = 'daiquiri.stats.apps.StatsConfig' | {
"content_hash": "9bb62b6283b0b96f821255ebf8babe2f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 54,
"avg_line_length": 54,
"alnum_prop": 0.8148148148148148,
"repo_name": "aipescience/django-daiquiri",
"id": "944830e7e434d1492142b9df89c18107c0a7b1bd",
"size": "54",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daiquiri/stats/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28598"
},
{
"name": "HTML",
"bytes": "236579"
},
{
"name": "JavaScript",
"bytes": "97087"
},
{
"name": "Python",
"bytes": "602159"
}
],
"symlink_target": ""
} |
from libcloud.storage.types import Provider
from libcloud.storage.types import OLD_CONSTANT_TO_NEW_MAPPING
from libcloud.common.providers import get_driver as _get_provider_driver
from libcloud.common.providers import set_driver as _set_provider_driver
DRIVERS = {
Provider.DUMMY:
('libcloud.storage.drivers.dummy', 'DummyStorageDriver'),
Provider.CLOUDFILES:
('libcloud.storage.drivers.cloudfiles', 'CloudFilesStorageDriver'),
Provider.OPENSTACK_SWIFT:
('libcloud.storage.drivers.cloudfiles', 'OpenStackSwiftStorageDriver'),
Provider.S3:
('libcloud.storage.drivers.s3', 'S3StorageDriver'),
Provider.S3_US_EAST2:
('libcloud.storage.drivers.s3', 'S3USEast2StorageDriver'),
Provider.S3_US_WEST:
('libcloud.storage.drivers.s3', 'S3USWestStorageDriver'),
Provider.S3_US_WEST_OREGON:
('libcloud.storage.drivers.s3', 'S3USWestOregonStorageDriver'),
Provider.S3_US_GOV_WEST:
('libcloud.storage.drivers.s3', 'S3USGovWestStorageDriver'),
Provider.S3_CN_NORTH:
('libcloud.storage.drivers.s3', 'S3CNNorthStorageDriver'),
Provider.S3_EU_WEST:
('libcloud.storage.drivers.s3', 'S3EUWestStorageDriver'),
Provider.S3_EU_WEST2:
('libcloud.storage.drivers.s3', 'S3EUWest2StorageDriver'),
Provider.S3_EU_CENTRAL:
('libcloud.storage.drivers.s3', 'S3EUCentralStorageDriver'),
Provider.S3_AP_SOUTH:
('libcloud.storage.drivers.s3', 'S3APSouthStorageDriver'),
Provider.S3_AP_SOUTHEAST:
('libcloud.storage.drivers.s3', 'S3APSEStorageDriver'),
Provider.S3_AP_SOUTHEAST2:
('libcloud.storage.drivers.s3', 'S3APSE2StorageDriver'),
Provider.S3_AP_NORTHEAST:
('libcloud.storage.drivers.s3', 'S3APNE1StorageDriver'),
Provider.S3_AP_NORTHEAST1:
('libcloud.storage.drivers.s3', 'S3APNE1StorageDriver'),
Provider.S3_AP_NORTHEAST2:
('libcloud.storage.drivers.s3', 'S3APNE2StorageDriver'),
Provider.S3_SA_EAST:
('libcloud.storage.drivers.s3', 'S3SAEastStorageDriver'),
Provider.S3_CA_CENTRAL:
('libcloud.storage.drivers.s3', 'S3CACentralStorageDriver'),
Provider.S3_RGW:
('libcloud.storage.drivers.rgw', 'S3RGWStorageDriver'),
Provider.S3_RGW_OUTSCALE:
('libcloud.storage.drivers.rgw', 'S3RGWOutscaleStorageDriver'),
Provider.NINEFOLD:
('libcloud.storage.drivers.ninefold', 'NinefoldStorageDriver'),
Provider.GOOGLE_STORAGE:
('libcloud.storage.drivers.google_storage', 'GoogleStorageDriver'),
Provider.NIMBUS:
('libcloud.storage.drivers.nimbus', 'NimbusStorageDriver'),
Provider.LOCAL:
('libcloud.storage.drivers.local', 'LocalStorageDriver'),
Provider.AZURE_BLOBS:
('libcloud.storage.drivers.azure_blobs', 'AzureBlobsStorageDriver'),
Provider.KTUCLOUD:
('libcloud.storage.drivers.ktucloud', 'KTUCloudStorageDriver'),
Provider.AURORAOBJECTS:
('libcloud.storage.drivers.auroraobjects', 'AuroraObjectsStorageDriver'),
Provider.BACKBLAZE_B2:
('libcloud.storage.drivers.backblaze_b2', 'BackblazeB2StorageDriver'),
Provider.ALIYUN_OSS:
('libcloud.storage.drivers.oss', 'OSSStorageDriver'),
Provider.DIGITALOCEAN_SPACES:
('libcloud.storage.drivers.digitalocean_spaces',
'DigitalOceanSpacesStorageDriver'),
}
def get_driver(provider):
deprecated_constants = OLD_CONSTANT_TO_NEW_MAPPING
return _get_provider_driver(drivers=DRIVERS, provider=provider,
deprecated_constants=deprecated_constants)
def set_driver(provider, module, klass):
return _set_provider_driver(drivers=DRIVERS, provider=provider,
module=module, klass=klass)
| {
"content_hash": "09ac4bd9a77a6dcbc4fed2f743c629ad",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 77,
"avg_line_length": 43.66265060240964,
"alnum_prop": 0.7224061810154525,
"repo_name": "vongazman/libcloud",
"id": "183189ebb88aa1fd4ab6c64b4a52c6633da64f1d",
"size": "4406",
"binary": false,
"copies": "4",
"ref": "refs/heads/trunk",
"path": "libcloud/storage/providers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "6421604"
},
{
"name": "Shell",
"bytes": "5936"
}
],
"symlink_target": ""
} |
from traitsui.null.rgb_color_trait import *
| {
"content_hash": "ff4106674377fadd9e0d3659dee1c589",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 43,
"avg_line_length": 44,
"alnum_prop": 0.7954545454545454,
"repo_name": "enthought/etsproxy",
"id": "f28225edf308d1801a3b67c3a3b9a6c623e9ac8b",
"size": "59",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/traits/ui/null/rgb_color_trait.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
from framework import *
login()
term = input("Grading Term ('current' or 'q1'): ")
if term == "current":
currentQ()
elif term == "q1":
quarter1()
else:
print("Invalid Choice Please Restart")
browser.quit()
exit()
| {
"content_hash": "1f50ba8c8f4b7f4e74d4b9c846906d3e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 50,
"avg_line_length": 17.857142857142858,
"alnum_prop": 0.576,
"repo_name": "sapblatt11/veraScrape",
"id": "b27d65d580de3167947b063052d80ca87dd651f3",
"size": "272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3741"
}
],
"symlink_target": ""
} |
"""
NMA (Notify My Android) notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.nma/
"""
import logging
import xml.etree.ElementTree as ET
import requests
from blumate.components.notify import (
ATTR_TITLE, DOMAIN, BaseNotificationService)
from blumate.const import CONF_API_KEY
from blumate.helpers import validate_config
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'https://www.notifymyandroid.com/publicapi/'
def get_service(hass, config):
"""Get the NMA notification service."""
if not validate_config({DOMAIN: config},
{DOMAIN: [CONF_API_KEY]},
_LOGGER):
return None
response = requests.get(_RESOURCE + 'verify',
params={"apikey": config[CONF_API_KEY]})
tree = ET.fromstring(response.content)
if tree[0].tag == 'error':
_LOGGER.error("Wrong API key supplied. %s", tree[0].text)
return None
return NmaNotificationService(config[CONF_API_KEY])
# pylint: disable=too-few-public-methods
class NmaNotificationService(BaseNotificationService):
"""Implement the notification service for NMA."""
def __init__(self, api_key):
"""Initialize the service."""
self._api_key = api_key
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
data = {
"apikey": self._api_key,
"application": 'home-assistant',
"event": kwargs.get(ATTR_TITLE),
"description": message,
"priority": 0,
}
response = requests.get(_RESOURCE + 'notify', params=data)
tree = ET.fromstring(response.content)
if tree[0].tag == 'error':
_LOGGER.exception(
"Unable to perform request. Error: %s", tree[0].text)
| {
"content_hash": "13cc93f088b8d0fa81cb500245beabc1",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 74,
"avg_line_length": 30.661290322580644,
"alnum_prop": 0.6186217780115728,
"repo_name": "bdfoster/blumate",
"id": "1a170fbd376e709dc94f07979fa967b2e856157d",
"size": "1901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blumate/components/notify/nma.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1309487"
},
{
"name": "JavaScript",
"bytes": "10846"
},
{
"name": "Python",
"bytes": "2460958"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6407"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import tensorflow as tf
import dataplumbing as dp
##########################################################################################
# Settings
##########################################################################################
# Model settings
#
num_features = dp.train.num_features
max_steps = dp.train.max_length
num_cells = 250
num_classes = dp.train.num_classes
activation = tf.nn.tanh
initialization_factor = 1.0
# Training parameters
#
num_iterations = 50000
batch_size = 100
learning_rate = 0.001
##########################################################################################
# Model
##########################################################################################
# Inputs
#
x = tf.placeholder(tf.float32, [batch_size, max_steps, num_features]) # Features
l = tf.placeholder(tf.int32, [batch_size]) # Sequence length
y = tf.placeholder(tf.float32, [batch_size]) # Labels
# Trainable parameters
#
s = tf.Variable(tf.random_normal([num_cells], stddev=np.sqrt(initialization_factor))) # Determines initial state
W_g = tf.Variable(
tf.random_uniform(
[num_features+num_cells, num_cells],
minval=-np.sqrt(6.0*initialization_factor/(num_features+2.0*num_cells)),
maxval=np.sqrt(6.0*initialization_factor/(num_features+2.0*num_cells))
)
)
b_g = tf.Variable(tf.zeros([num_cells]))
W_u = tf.Variable(
tf.random_uniform(
[num_features, num_cells],
minval=-np.sqrt(6.0*initialization_factor/(num_features+num_cells)),
maxval=np.sqrt(6.0*initialization_factor/(num_features+num_cells))
)
)
b_u = tf.Variable(tf.zeros([num_cells]))
W_a = tf.Variable(
tf.random_uniform(
[num_features+num_cells, num_cells],
minval=-np.sqrt(6.0*initialization_factor/(num_features+2.0*num_cells)),
maxval=np.sqrt(6.0*initialization_factor/(num_features+2.0*num_cells))
)
)
W_o = tf.Variable(
tf.random_uniform(
[num_cells, num_classes],
minval=-np.sqrt(6.0*initialization_factor/(num_cells+num_classes)),
maxval=np.sqrt(6.0*initialization_factor/(num_cells+num_classes))
)
)
b_o = tf.Variable(tf.zeros([num_classes]))
# Internal states
#
n = tf.zeros([batch_size, num_cells])
d = tf.zeros([batch_size, num_cells])
h = tf.zeros([batch_size, num_cells])
a_max = tf.fill([batch_size, num_cells], -1E38) # Start off with lowest number possible
# Define model
#
h += activation(tf.expand_dims(s, 0))
for i in range(max_steps):
x_step = x[:,i,:]
xh_join = tf.concat(axis=1, values=[x_step, h]) # Combine the features and hidden state into one tensor
u = tf.matmul(x_step, W_u)+b_u
g = tf.matmul(xh_join, W_g)+b_g
a = tf.matmul(xh_join, W_a) # The bias term when factored out of the numerator and denominator cancels and is unnecessary
z = tf.multiply(u, tf.nn.tanh(g))
a_newmax = tf.maximum(a_max, a)
exp_diff = tf.exp(a_max-a_newmax)
exp_scaled = tf.exp(a-a_newmax)
n = tf.multiply(n, exp_diff)+tf.multiply(z, exp_scaled) # Numerically stable update of numerator
d = tf.multiply(d, exp_diff)+exp_scaled # Numerically stable update of denominator
h_new = activation(tf.div(n, d))
a_max = a_newmax
h = tf.where(tf.greater(l, i), h_new, h) # Use new hidden state only if the sequence length has not been exceeded
ly = tf.matmul(h, W_o)+b_o
ly_flat = tf.reshape(ly, [batch_size])
##########################################################################################
# Optimizer/Analyzer
##########################################################################################
# Cost function and optimizer
#
cost = tf.reduce_mean(tf.square(ly_flat-y))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
##########################################################################################
# Train
##########################################################################################
# Operation to initialize session
#
initializer = tf.global_variables_initializer()
# Open session
#
with tf.Session() as session:
# Initialize variables
#
session.run(initializer)
# Each training session represents one batch
#
for iteration in range(num_iterations):
# Grab a batch of training data
#
xs, ls, ys = dp.train.batch(batch_size)
feed = {x: xs, l: ls, y: ys}
# Update parameters
#
out = session.run((cost, optimizer), feed_dict=feed)
print('Iteration:', iteration, 'Dataset:', 'train', 'Cost:', out[0])
# Periodically run model on test data
#
if iteration%100 == 0:
# Grab a batch of test data
#
xs, ls, ys = dp.test.batch(batch_size)
feed = {x: xs, l: ls, y: ys}
# Run model
#
out = session.run(cost, feed_dict=feed)
print('Iteration:', iteration, 'Dataset:', 'test', 'Cost:', out)
# Save the trained model
#
os.makedirs('bin', exist_ok=True)
saver = tf.train.Saver()
saver.save(session, 'bin/train.ckpt')
| {
"content_hash": "a584ec15e59c95e0ef79ae03cdf93193",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 126,
"avg_line_length": 28.790419161676645,
"alnum_prop": 0.586730449251248,
"repo_name": "jostmey/rwa",
"id": "93120002bc5a8da0dca0998fb3529568d7da2ec1",
"size": "5409",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "adding_problem_100/rwa_model/train.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "182460"
}
],
"symlink_target": ""
} |
import mock
import pytest
import unittest
from mercury.common.exceptions import MercuryClientException
from mercury.log_service import service as log_server
mock_make_agent_log_service = mock.MagicMock()
def mock_init(self, bind_address):
self.bind_address = bind_address
self.context = mock.MagicMock()
self.socket = mock.MagicMock()
self.bound = False
class AgentLogServiceTestCase(unittest.TestCase):
def setUp(self):
with mock.patch(
'mercury.log_service.service.SimpleRouterReqService.__init__',
new=mock_init):
log_collection = mock.Mock()
self.agent_log_service = log_server.AgentLogService('invalid_bind_url',
log_collection)
def test_process(self):
self.assertDictEqual(
self.agent_log_service.process({}),
{"message": "Invalid message", "error": True})
message = {
"name": "mercury.agent.agent",
"msg": "Injection completed",
"args": [],
"levelname": "INFO",
"levelno": 20,
"pathname": "/src/mercury/agent/mercury/agent/agent.py",
"filename": "agent.py",
"module": "agent",
"exc_info": None,
"exc_text": None,
"stack_info": None,
"lineno": 89,
"funcName": "run",
"created": 1510076870.9318643,
"msecs": 931.8642616271973,
"relativeCreated": 405.81798553466797,
"thread": 139861930071808,
"threadName": "MainThread",
"processName": "MainProcess",
"process": 71,
"message": "Injection completed",
"asctime": "2017-11-07 17:47:50,931",
"mercury_id": "0112426690c16b0fd4586e91731a0cc4f3f918c5af"
}
self.agent_log_service.log_collection.insert = mock.MagicMock()
assert self.agent_log_service.process(message) == {'message': 'ok', 'error': False}
self.agent_log_service.log_collection.insert.assert_called_once_with(message)
| {
"content_hash": "59b1e8423684976305386b6b289d1537",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 91,
"avg_line_length": 36.23728813559322,
"alnum_prop": 0.5701590271281571,
"repo_name": "jr0d/mercury",
"id": "59c2a3445fc4d4a9731235caa5ba631f5d30160a",
"size": "2138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/log_service/unit/test_agent_log_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "420"
},
{
"name": "Gherkin",
"bytes": "62330"
},
{
"name": "Makefile",
"bytes": "1627"
},
{
"name": "Python",
"bytes": "323696"
},
{
"name": "Shell",
"bytes": "1694"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.