code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright(C) 2009-2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from decimal import Decimal
import datetime
from weboob.capabilities.bank import Transaction
from weboob.capabilities import NotAvailable
from weboob.tools.misc import to_unicode
__all__ = ['FrenchTransaction']
class FrenchTransaction(Transaction):
"""
Transaction with some helpers for french bank websites.
"""
PATTERNS = []
@classmethod
def clean_amount(klass, text):
"""
Clean a string containing an amount.
"""
return text.replace(' ', '').replace('.','').replace(u'\xa0', '') \
.replace(',','.').strip(u' \t\u20ac\xa0\x80€\n\rEUR')
def set_amount(self, credit='', debit=''):
"""
Set an amount value from a string.
Can take two strings if there are both credit and debit
columns.
"""
credit = self.clean_amount(credit)
debit = self.clean_amount(debit)
if len(debit) > 0:
self.amount = - abs(Decimal(debit))
else:
self.amount = Decimal(credit)
def parse(self, date, raw):
"""
Parse date and raw strings to create datetime.date objects,
determine the type of transaction, and create a simplified label
When calling this method, you should have defined patterns (in the
PATTERN class attribute) with a list containing tuples of regexp
and the associated type, for example::
PATTERNS = [(re.compile('^VIR(EMENT)? (?P<text>.*)'), FrenchTransaction.TYPE_TRANSFER),
(re.compile('^PRLV (?P<text>.*)'), FrenchTransaction.TYPE_ORDER),
(re.compile('^(?P<text>.*) CARTE \d+ PAIEMENT CB (?P<dd>\d{2})(?P<mm>\d{2}) ?(.*)$'),
FrenchTransaction.TYPE_CARD)
]
In regexps, you can define this patterns:
* text: part of label to store in simplified label
* category: part of label representing the category
* yy, mm, dd, HH, MM: date and time parts
"""
if not isinstance(date, (datetime.date, datetime.datetime)):
if date.isdigit() and len(date) == 8:
date = datetime.date(int(date[4:8]), int(date[2:4]), int(date[0:2]))
elif '/' in date:
date = datetime.date(*reversed([int(x) for x in date.split('/')]))
if date.year < 100:
date = date.replace(year=2000 + date.year)
self.date = date
self.rdate = date
self.raw = to_unicode(raw.replace(u'\n', u' ').strip())
self.category = NotAvailable
if ' ' in self.raw:
self.category, useless, self.label = [part.strip() for part in self.raw.partition(' ')]
else:
self.label = self.raw
for pattern, _type in self.PATTERNS:
m = pattern.match(self.raw)
if m:
args = m.groupdict()
def inargs(key):
"""
inner function to check if a key is in args,
and is not None.
"""
return args.get(key, None) is not None
self.type = _type
if inargs('text'):
self.label = args['text'].strip()
if inargs('category'):
self.category = args['category'].strip()
# Set date from information in raw label.
if inargs('dd') and inargs('mm'):
dd = int(args['dd'])
mm = int(args['mm'])
if inargs('yy'):
yy = int(args['yy'])
else:
d = datetime.date.today()
try:
d = d.replace(month=mm, day=dd)
except ValueError:
d = d.replace(year=d.year-1, month=mm, day=dd)
yy = d.year
if d > datetime.date.today():
yy -= 1
if yy < 100:
yy += 2000
if inargs('HH') and inargs('MM'):
self.rdate = datetime.datetime(yy, mm, dd, int(args['HH']), int(args['MM']))
else:
self.rdate = datetime.date(yy, mm, dd)
return
|
eirmag/weboob
|
weboob/tools/capabilities/bank/transactions.py
|
Python
|
agpl-3.0
| 5,196
|
#!/usr/bin/env python
"""
Created on 1 Feb 2018
@author: loay
"""
import os
import sys
import time
from subprocess import *
import sigploit
import ss7main
simsi_path = os.path.join(os.getcwd(), 'ss7/attacks/fraud/simsi')
mtsms_path = os.path.join(os.getcwd(), 'ss7/attacks/fraud/mtsms')
cl_path = os.path.join(os.getcwd(), 'ss7/attacks/fraud/cl')
isd_path = os.path.join(os.getcwd(),'ss7/attacks/fraud/isd')
sai_path = os.path.join(os.getcwd(),'ss7/attacks/fraud/sai')
def simsi():
jar_file = 'SendIMSI.jar'
try:
sendIMSI = check_call(['java', '-jar', os.path.join(simsi_path, jar_file)])
if sendIMSI == 0:
fr = raw_input('\nWould you like to go back to Fraud Menu? (y/n): ')
if fr == 'y' or fr == 'yes':
ss7main.ss7fraud()
elif fr == 'n' or fr == 'no':
attack_menu = raw_input('Would you like to choose another attacks category? (y/n): ')
if attack_menu == 'y' or attack_menu == 'yes':
ss7main.attacksMenu()
elif attack_menu == 'n' or attack_menu == 'no':
main_menu = raw_input('Would you like to go back to the main menu? (y/exit): ')
if main_menu == 'y' or main_menu == 'yes':
sigploit.mainMenu()
elif main_menu == 'exit':
print 'TCAP End...'
sys.exit(0)
except CalledProcessError as e:
print "\033[31m[-]Error:\033[0m%s Failed to Launch, %s" %(jar_file, e.message)
time.sleep(2)
ss7main.ss7fraud()
def mtsms():
jar_file = 'MTForwardSMS.jar'
try:
mtForwardSMS = check_call(['java', '-jar', os.path.join(mtsms_path, jar_file)])
if mtForwardSMS == 0:
fr = raw_input('\nWould you like to go back to Fraud Menu? (y/n): ')
if fr == 'y' or fr == 'yes':
ss7main.ss7fraud()
elif fr == 'n' or fr == 'no':
attack_menu = raw_input('Would you like to choose another attacks category? (y/n): ')
if attack_menu == 'y' or attack_menu == 'yes':
ss7main.attacksMenu()
elif attack_menu == 'n' or attack_menu == 'no':
main_menu = raw_input('Would you like to go back to the main menu? (y/exit): ')
if main_menu == 'y' or main_menu == 'yes':
sigploit.mainMenu()
elif main_menu == 'exit':
print 'TCAP End...'
sys.exit(0)
except CalledProcessError as e:
print "\033[31m[-]Error:\033[0mMTForwardSMS Failed to Launch, " + str(e)
time.sleep(2)
ss7main.ss7fraud()
def cl():
jar_file = 'CancelLocation.jar'
try:
cancelLocation = check_call(['java', '-jar', os.path.join(cl_path, jar_file)])
if cancelLocation == 0:
fr = raw_input('\nWould you like to go back to Fraud Menu? (y/n): ')
if fr == 'y' or fr == 'yes':
ss7main.ss7fraud()
elif fr == 'n' or fr == 'no':
attack_menu = raw_input('Would you like to choose another attacks category? (y/n): ')
if attack_menu == 'y' or attack_menu == 'yes':
ss7main.attacksMenu()
elif attack_menu == 'n' or attack_menu == 'no':
main_menu = raw_input('Would you like to go back to the main menu? (y/exit): ')
if main_menu == 'y' or main_menu == 'yes':
sigploit.mainMenu()
elif main_menu == 'exit':
print 'TCAP End...'
sys.exit(0)
except CalledProcessError as e:
print "\033[31m[-]Error:\033[0mCancelLocation Failed to Launch, " + str(e)
time.sleep(2)
ss7main.ss7fraud()
def isd():
jar_file = 'InsertSubscriberData.jar'
try:
insertSD = check_call(['java','-jar', os.path.join(isd_path,jar_file)])
if insertSD == 0:
fr = raw_input('\nWould you like to go back to Fraud Menu? (y/n): ')
if fr == 'y' or fr == 'yes':
ss7main.Fraud()
elif fr == 'n' or fr == 'no':
attack_menu = raw_input('Would you like to choose another attacks category? (y/n): ')
if attack_menu == 'y'or attack_menu =='yes':
ss7main.attacksMenu()
elif attack_menu == 'n' or attack_menu =='no':
main_menu = raw_input('Would you like to go back to the main menu? (y/exit): ')
if main_menu == 'y' or main_menu =='yes':
sigploit.mainMenu()
elif main_menu =='exit':
print 'TCAP End...'
sys.exit(0)
except CalledProcessError as e:
print "\033[31m[-]Error:\033[0mInsertSubscriberData Failed to Launch, " + str(e)
time.sleep(2)
ss7main.ss7fraud()
def sai():
jar_file = 'SendAuthenticationInfo.jar'
try:
sendAuth = check_call(['java', '-jar', os.path.join(sai_path, jar_file)])
if sendAuth == 0:
fr = raw_input('\nWould you like to go back to Fraud Menu? (y/n): ')
if fr == 'y' or fr == 'yes':
ss7main.ss7fraud()
elif fr == 'n' or fr == 'no':
attack_menu = raw_input('Would you like to choose another attacks category? (y/n): ')
if attack_menu == 'y' or attack_menu == 'yes':
ss7main.attacksMenu()
elif attack_menu == 'n' or attack_menu == 'no':
main_menu = raw_input('Would you like to go back to the main menu? (y/exit): ')
if main_menu == 'y' or main_menu == 'yes':
sigploit.mainMenu()
elif main_menu == 'exit':
print 'TCAP End...'
sys.exit(0)
except CalledProcessError as e:
print "\033[31m[-]Error:\033[0m%s Failed to Launch, %s" %(jar_file, e.message)
time.sleep(2)
ss7main.ss7fraud()
|
SigPloiter/SigPloit
|
ss7/fraud.py
|
Python
|
mit
| 5,988
|
# this is a default profile
import profile
class d_profile(profile):
def __init__(self):
# super(profile.__init__(self))
self.name = "default"
def get(self):
# self.name = "default"
return self
|
Ganben/parrot
|
pyrrot/chatserver/models/d_profile.py
|
Python
|
apache-2.0
| 236
|
from django import forms
from django.utils.translation import ugettext_lazy as _
class GroundtruthRecordFormMixin(forms.ModelForm):
def clean(self):
cleaned_data = super(GroundtruthRecordFormMixin, self).clean()
contact_email = cleaned_data.get('contact_email', None)
contact_phone = cleaned_data.get('contact_phone', None)
if not (contact_email or contact_phone):
raise forms.ValidationError(_('Please enter an email address or '
'phone number'))
return cleaned_data
|
596acres/django-livinglots-groundtruth
|
livinglots_groundtruth/forms.py
|
Python
|
bsd-3-clause
| 569
|
#!/usr/bin/python
"""
this is the code to accompany the Lesson 1 (Naive Bayes) mini-project
use a Naive Bayes Classifier to identify emails by their authors
authors and labels:
Sara has label 0
Chris has label 1
"""
import sys
import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
t0 = time.time()
clf.fit(features_train, labels_train)
print "training time:", round(time.time()-t0, 3), "s"
#training time: 1.101 s
t0 = time.time()
pred = clf.predict(features_test)
print "predict time:", round(time.time()-t0, 3), "s"
#predict time: 0.203 s
from sklearn.metrics import accuracy_score
print(accuracy_score(labels_test, pred))
#0.973833902162
print(accuracy_score(labels_test, pred, normalize=False))
#1712
|
filipenevola/learning-ml
|
naive_bayes/nb_author_id.py
|
Python
|
mit
| 1,086
|
from datetime import datetime
from typing import Optional, Union
import orjson
from django.contrib.auth.models import UserManager
from django.utils.timezone import now as timezone_now
from zerver.lib.hotspots import copy_hotspots
from zerver.lib.timezone import canonicalize_timezone
from zerver.lib.upload import copy_avatar
from zerver.lib.utils import generate_api_key
from zerver.models import (
Realm,
RealmUserDefault,
Recipient,
Stream,
Subscription,
UserBaseSettings,
UserProfile,
get_fake_email_domain,
)
def copy_default_settings(
settings_source: Union[UserProfile, RealmUserDefault], target_profile: UserProfile
) -> None:
# Important note: Code run from here to configure the user's
# settings should not call send_event, as that would cause clients
# to throw an exception (we haven't sent the realm_user/add event
# yet, so that event will include the updated details of target_profile).
#
# Note that this function will do at least one save() on target_profile.
for settings_name in UserBaseSettings.property_types:
if settings_name in ["default_language", "enable_login_emails"] and isinstance(
settings_source, RealmUserDefault
):
continue
value = getattr(settings_source, settings_name)
setattr(target_profile, settings_name, value)
if isinstance(settings_source, RealmUserDefault):
target_profile.save()
return
setattr(target_profile, "full_name", settings_source.full_name)
setattr(target_profile, "timezone", canonicalize_timezone(settings_source.timezone))
target_profile.save()
if settings_source.avatar_source == UserProfile.AVATAR_FROM_USER:
from zerver.lib.actions import do_change_avatar_fields
do_change_avatar_fields(
target_profile,
UserProfile.AVATAR_FROM_USER,
skip_notify=True,
acting_user=target_profile,
)
copy_avatar(settings_source, target_profile)
copy_hotspots(settings_source, target_profile)
def get_display_email_address(user_profile: UserProfile) -> str:
if not user_profile.email_address_is_realm_public():
return f"user{user_profile.id}@{get_fake_email_domain(user_profile.realm)}"
return user_profile.delivery_email
# create_user_profile is based on Django's User.objects.create_user,
# except that we don't save to the database so it can used in
# bulk_creates
#
# Only use this for bulk_create -- for normal usage one should use
# create_user (below) which will also make the Subscription and
# Recipient objects
def create_user_profile(
realm: Realm,
email: str,
password: Optional[str],
active: bool,
bot_type: Optional[int],
full_name: str,
bot_owner: Optional[UserProfile],
is_mirror_dummy: bool,
tos_version: Optional[str],
timezone: Optional[str],
tutorial_status: str = UserProfile.TUTORIAL_WAITING,
enter_sends: bool = False,
force_id: Optional[int] = None,
force_date_joined: Optional[datetime] = None,
) -> UserProfile:
if force_date_joined is None:
date_joined = timezone_now()
else:
date_joined = force_date_joined
email = UserManager.normalize_email(email)
extra_kwargs = {}
if force_id is not None:
extra_kwargs["id"] = force_id
user_profile = UserProfile(
is_staff=False,
is_active=active,
full_name=full_name,
last_login=date_joined,
date_joined=date_joined,
realm=realm,
is_bot=bool(bot_type),
bot_type=bot_type,
bot_owner=bot_owner,
is_mirror_dummy=is_mirror_dummy,
tos_version=tos_version,
timezone=timezone,
tutorial_status=tutorial_status,
enter_sends=enter_sends,
onboarding_steps=orjson.dumps([]).decode(),
default_language=realm.default_language,
delivery_email=email,
**extra_kwargs,
)
if bot_type or not active:
password = None
if user_profile.email_address_is_realm_public():
# If emails are visible to everyone, we can set this here and save a DB query
user_profile.email = get_display_email_address(user_profile)
user_profile.set_password(password)
user_profile.api_key = generate_api_key()
return user_profile
def create_user(
email: str,
password: Optional[str],
realm: Realm,
full_name: str,
active: bool = True,
role: Optional[int] = None,
bot_type: Optional[int] = None,
bot_owner: Optional[UserProfile] = None,
tos_version: Optional[str] = None,
timezone: str = "",
avatar_source: str = UserProfile.AVATAR_FROM_GRAVATAR,
is_mirror_dummy: bool = False,
default_sending_stream: Optional[Stream] = None,
default_events_register_stream: Optional[Stream] = None,
default_all_public_streams: Optional[bool] = None,
source_profile: Optional[UserProfile] = None,
force_id: Optional[int] = None,
force_date_joined: Optional[datetime] = None,
enable_marketing_emails: Optional[bool] = None,
) -> UserProfile:
user_profile = create_user_profile(
realm,
email,
password,
active,
bot_type,
full_name,
bot_owner,
is_mirror_dummy,
tos_version,
timezone,
force_id=force_id,
force_date_joined=force_date_joined,
)
user_profile.avatar_source = avatar_source
user_profile.timezone = timezone
user_profile.default_sending_stream = default_sending_stream
user_profile.default_events_register_stream = default_events_register_stream
if role is not None:
user_profile.role = role
# Allow the ORM default to be used if not provided
if default_all_public_streams is not None:
user_profile.default_all_public_streams = default_all_public_streams
# If a source profile was specified, we copy settings from that
# user. Note that this is positioned in a way that overrides
# other arguments passed in, which is correct for most defaults
# like time zone where the source profile likely has a better value
# than the guess. As we decide on details like avatars and full
# names for this feature, we may want to move it.
if source_profile is not None:
# copy_default_settings saves the attribute values so a secondary
# save is not required.
copy_default_settings(source_profile, user_profile)
elif bot_type is None:
realm_user_default = RealmUserDefault.objects.get(realm=realm)
copy_default_settings(realm_user_default, user_profile)
else:
# This will be executed only for bots.
user_profile.save()
if bot_type is None and enable_marketing_emails is not None:
user_profile.enable_marketing_emails = enable_marketing_emails
user_profile.save(update_fields=["enable_marketing_emails"])
if not user_profile.email_address_is_realm_public():
# With restricted access to email addresses, we can't generate
# the fake email addresses we use for display purposes without
# a User ID, which isn't generated until the .save() above.
user_profile.email = get_display_email_address(user_profile)
user_profile.save(update_fields=["email"])
recipient = Recipient.objects.create(type_id=user_profile.id, type=Recipient.PERSONAL)
user_profile.recipient = recipient
user_profile.save(update_fields=["recipient"])
Subscription.objects.create(
user_profile=user_profile, recipient=recipient, is_user_active=user_profile.is_active
)
return user_profile
|
zulip/zulip
|
zerver/lib/create_user.py
|
Python
|
apache-2.0
| 7,676
|
# Generated by Django 2.0.2 on 2018-09-06 09:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('activity', '0011_add_new_event_type'),
]
operations = [
migrations.AddField(
model_name='activity',
name='content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='activity',
name='object_id',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
|
OpenTechFund/WebApp
|
opentech/apply/activity/migrations/0012_add_generic_relation_to_activity.py
|
Python
|
gpl-2.0
| 745
|
from codecs import open
from os import path
import re
from setuptools import setup, find_packages
def read(*paths):
filename = path.join(path.abspath(path.dirname(__file__)), *paths)
with open(filename, encoding='utf-8') as f:
return f.read()
def find_version(*paths):
contents = read(*paths)
match = re.search(r'^__version__ = [\'"]([^\'"]+)[\'"]', contents, re.M)
if not match:
raise RuntimeError('Unable to find version string.')
return match.group(1)
setup(
name='mixpanel',
version=find_version('mixpanel', '__init__.py'),
description='Official Mixpanel library for Python',
long_description=read('README.rst'),
url='https://github.com/mixpanel/mixpanel-python',
author='Mixpanel, Inc.',
author_email='dev@mixpanel.com',
license='Apache',
install_requires=['six >= 1.9.0'],
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords='mixpanel analytics',
packages=find_packages(),
)
|
blockstack/packaging
|
imported/mixpanel/setup.py
|
Python
|
gpl-3.0
| 1,154
|
import nengo
import numpy as np
import redis
import struct
r = redis.StrictRedis('127.0.0.1')
model = nengo.Network()
with model:
def receive_spikes(t):
msg = r.get('spikes')
v = np.zeros(10)
if len(msg) > 0:
ii = struct.unpack('%dI' % (len(msg)/4), msg)
v[[ii]] = 1000.0
return v
sink_node = nengo.Node(receive_spikes, size_in=0)
|
tcstewar/testing_notebooks
|
show_remote_spikes/sink.py
|
Python
|
gpl-2.0
| 399
|
# -*- coding: utf-8 -*-
from . import load_table
from ddbmock.utils import push_write_throughput
@load_table
def delete_item(post, table):
item = table.delete_item(post[u'Key'], post[u'Expected'])
capacity = item.get_size().as_units()
push_write_throughput(table.name, capacity)
if post[u'ReturnValues'] == "ALL_OLD":
return {
"ConsumedCapacityUnits": capacity,
"Attributes": item,
}
else:
return {
"ConsumedCapacityUnits": capacity,
}
|
dekked/dynamodb-mock
|
ddbmock/operations/delete_item.py
|
Python
|
lgpl-3.0
| 528
|
import numpy as np
__all__ = ['BoundingBox']
class BoundingBox(np.ndarray):
"""Bounding box assuming RAS coordinate system"""
def __new__(cls, input_array, info=None):
try:
if len(input_array) == 6 and np.isscalar(input_array[0]):
pass
elif len(input_array) >= 1 and len(input_array[0]) == 3:
pass
else:
raise ValueError(
"Bounding box must have 6 components or be a list of points")
except TypeError:
raise ValueError(
"Bounding box must have 6 components or be a list of points")
except ValueError:
raise ValueError(
"Bounding box must have 6 components or be a list of points")
input_array = np.asanyarray(input_array)
if len(input_array) != 6 or input_array.ndim > 1:
input_array = np.r_[
np.min(input_array, axis=0),
np.max(input_array, axis=0),
]
array = input_array.view(cls)
array.setflags(write=False)
return input_array.view(cls)
def __str__(self):
return ('%s:%s' % (
super(np.ndarray, self[:3]).__str__(),
super(np.ndarray, self[3:]).__str__()
)).replace('BoundingBox', '')
@property
def volume(self):
return np.prod(self.side_lengths)
@property
def left(self):
return self[0]
@property
def posterior(self):
return self[1]
@property
def inferior(self):
return self[2]
@property
def right(self):
return self[3]
@property
def anterior(self):
return self[4]
@property
def superior(self):
return self[5]
@property
def side_lengths(self):
return self[3:] - self[:3]
def union(self, bounding_box):
if isinstance(bounding_box, BoundingBox):
_bounding_box = bounding_box
else:
_bounding_box = BoundingBox(bounding_box)
return BoundingBox(np.r_[
np.minimum(self[:3], _bounding_box[:3]),
np.maximum(self[3:], _bounding_box[3:]),
])
def contains(self, points):
points = np.atleast_2d(points)
lower = np.asarray(self[:3])[None, :]
upper = np.asarray(self[3:])[None, :]
self_min_ge_point = (points <= upper).all(1)
self_max_le_point = (lower <= points).all(1)
return self_min_ge_point * self_max_le_point
def intersection(self, bounding_box):
bounding_boxes = np.atleast_2d(np.asarray(bounding_box))
lower = np.asarray(self[:3])[None, :]
upper = np.asarray(self[3:])[None, :]
self_min_ge_other_min = (bounding_boxes[:, :3] <= lower).any(1)
self_min_le_other_max = (lower <= bounding_boxes[:, 3:]).any(1)
self_max_ge_other_min = (bounding_boxes[:, :3] <= upper).any(1)
self_max_le_other_max = (lower <= bounding_boxes[:, 3:]).any(1)
other_min_ge_self_min = (lower <= bounding_boxes[:, :3]).any(1)
other_min_le_self_max = (bounding_boxes[:, :3] <= upper).any(1)
other_max_ge_self_min = (lower <= bounding_boxes[:, 3:]).any(1)
other_max_le_self_max = (bounding_boxes[:, 3:] <= upper).any(1)
one_point_self_inside_other = (
self_min_ge_other_min * self_min_le_other_max +
self_max_ge_other_min * self_max_le_other_max
)
one_point_other_inside_self = (
other_min_ge_self_min * other_min_le_self_max +
other_max_ge_self_min * other_max_le_self_max
)
intersection_exists = (
one_point_self_inside_other +
one_point_other_inside_self
)
intersection = np.c_[
np.maximum(bounding_boxes[:, :3], lower),
np.minimum(bounding_boxes[:, 3:], upper),
]
if bounding_box.ndim == 1:
if any(intersection_exists):
return BoundingBox(intersection[0])
else:
return None
else:
return intersection_exists, intersection
def old_intersection(self, bounding_box):
one_point_self_inside_bounding_box = np.all(
(self[:3] >= bounding_box[:, :3]) *
(self[3:] <= bounding_box[:, 3:]),
1
)
one_point_bounding_box_inside_self = np.all(
(self[:3] <= bounding_box[:, :3]) *
(self[3:] >= bounding_box[:, 3:]),
1
)
if one_point_self_inside_bounding_box or one_point_bounding_box_inside_self:
return BoundingBox(np.r_[
np.maximum(self[:3], bounding_box[:, :3]),
np.minimum(self[3:], bounding_box[:, 3:]),
])
else:
return None
def collides_width(self, bounding_box):
return self.intersection(bounding_box) is not None
def split(self, axis):
box_minor = self.copy()
box_major = self.copy()
half_length = (self[axis + 3] - self[axis]) / 2.
box_minor.setflags(write=True)
box_major.setflags(write=True)
box_minor[axis + 3] -= half_length
box_major[axis] += half_length
box_minor.setflags(write=False)
box_major.setflags(write=False)
return box_minor, box_major
def __contains__(self, point):
point = np.atleast_2d(point)
return (self[:3] <= point).prod(1) * (point <= self[:3]).prod(1)
class AABBTree:
tree = None
leaves = None
forwardMap = None
reverseMap = None
def __init__(self, allboxes, indices=None):
self.tree, self.leaves = self.buildTree(allboxes, indices=indices)
self.forwardMap = np.arange(len(allboxes))
self.reverseMap = np.arange(len(allboxes))
def intersect(self, box):
return self._intersect(self.tree, box)
def _intersect(self, tree, box):
treeBox = tree.box
if all(
(box[::2] <= treeBox[1::2]) *
(box[1::2] >= treeBox[::2])
):
if isinstance(tree, self.leaf):
return [tree]
else:
return self._intersect(tree.left, box) + self._intersect(tree.right, box)
else:
return []
def build_tree(self, allboxes, indices=None, leafPointers=None, parent=None):
if indices == None:
indices = np.arange(len(allboxes), dtype=np.int)
boxes = allboxes
else:
if len(indices) == 1:
return self.leaf(allboxes[indices[0], :], indices[0], parent=parent)
boxes = allboxes[indices, :]
def buildTree_old(self, allboxes, indices=None, leafPointers=None, parent=None, verbose=False):
"""
Each element of the box list is a 6-ple b where
b = [x-,x+,y-,y+,z-,z+]
"""
if indices == None:
indices = np.arange(len(allboxes), dtype=np.int)
boxes = allboxes
else:
if len(indices) == 1:
return self.leaf(allboxes[indices[0], :], indices[0], parent=parent)
boxes = allboxes[indices, :]
if verbose:
print '*******************************************'
dimensions = len(boxes[0]) / 2
box = np.empty(2 * dimensions)
np.min(boxes[:, ::2], axis=0, out=box[::2])
np.max(boxes[:, 1::2], axis=0, out=box[1::2])
boxes[:, ::2].min(0, out=box[::2])
boxes[:, 1::2].max(0, out=box[1::2])
lengths = box[1::2] - box[::2]
largestDimension = lengths.argmax()
largestDimensionLength = lengths[largestDimension]
cuttingPlaneAt = largestDimensionLength / 2.
halfLength = (
boxes[:, 2 * largestDimension] +
(
boxes[:, 2 * largestDimension + 1] - boxes[:, 2 * largestDimension]
) / 2.
)
halfLengthSortedIndices = halfLength.argsort()
halfLengthSorted = halfLength[halfLengthSortedIndices]
division = halfLengthSorted.searchsorted(cuttingPlaneAt)
leftIndices = indices[halfLengthSortedIndices[:division]]
rightIndices = indices[halfLengthSortedIndices[division:]]
if len(leftIndices) == 0 or len(rightIndices) == 0:
n = len(indices) / 2
leftIndices = indices[:n]
rightIndices = indices[n:]
if verbose:
print "Left: ", leftIndices
print "Right: ", rightIndices
n = self.node(box, indices.copy(), parent=parent)
n.left = self.buildTree(
allboxes, leftIndices, parent=n, leafPointers=leafPointers)
n.right = self.buildTree(
allboxes, rightIndices, parent=n, leafPointers=leafPointers)
if parent != None:
return n
else:
return n, leafPointers
class node:
box = None
indices = None
left = None
right = None
parent = None
def __init__(self, box, indices, left=None, right=None, parent=None):
self.box = box
self.indices = indices
self.left = left
self.right = right
self.parent = parent
def __str__(self):
return """
box = %s
indices = %s
""" % (
self.box,
self.indices,
)
def __repr__(self):
return self.__str__()
class leaf:
box = None
indices = None
parent = None
def __init__(self, box, indices, parent=None):
self.box = box
self.indices = indices
self.parent = parent
def __str__(self):
return """
box = %s
indices = %s
""" % (
self.box,
self.indices,
)
def __repr__(self):
return self.__str__()
|
oesteban/tract_querier
|
tract_querier/aabb.py
|
Python
|
bsd-3-clause
| 9,958
|
# Задача 5. Вариант 6.
# Напишите программу, которая бы при запуске случайным образом отображала название одного из двух спутников Марса.
# Velyan A. S.
# 27.05.2016
print("\nназвание одного из двух спутников Марса:")
import random
satellite=["Фобос", "Деймос"]
s=random.choice(satellite)
print(s)
input("Нажмите Enter для выхода")
|
Mariaanisimova/pythonintask
|
PMIa/2015/Velyan_A_S/task_5_6.py
|
Python
|
apache-2.0
| 503
|
__author__ = "Laura Martinez Sanchez"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "lmartisa@gmail.com"
from osgeo import gdal, gdalnumeric, ogr, osr
import numpy as np
from PIL import Image, ImageDraw
from collections import defaultdict
import pickle
import time
from texture_common import *
#Uses a gdal geomatrix (gdal.GetGeoTransform()) to calculate the pixel location of a geospatial coordinate
def world2Pixel(geoMatrix, x, y):
ulX = geoMatrix[0]
ulY = geoMatrix[3]
xDist = geoMatrix[1]
yDist = geoMatrix[5]
rtnX = geoMatrix[2]
rtnY = geoMatrix[4]
pixel = int((x - ulX) / xDist)
line = int((y - ulY) / yDist)
return (pixel, line)
#Converts a Python Imaging Library array to a gdalnumeric image.
def imageToArray(i):
'''
Converts a Python Imaging Library (PIL) array to a gdalnumeric image.
'''
a = gdalnumeric.fromstring(i.tobytes(), 'b')
a.shape = i.im.size[1], i.im.size[0]
return a
def ReadClipArray(lrY, ulY, lrX, ulX, img):
clip = np.empty((img.RasterCount, lrY - ulY, lrX - ulX))
#Read only the pixels needed for do the clip
for band in range(img.RasterCount):
band += 1
imgaux = img.GetRasterBand(band).ReadAsArray(ulX, ulY, lrX - ulX, lrY - ulY)
clip[band - 1] = imgaux
return clip
#Does the clip of the shape
def ObtainPixelsfromShape(field, rasterPath, shapePath, INX, *args):
# field='zona'
# open dataset, also load as a gdal image to get geotransform
# INX can be false. If True, uses additional layers.
print "Starting clip...."
start = time.time()
if args:
texture_train_Path = args[0]
print texture_train_Path
img, textArrayShp = createTextureArray(texture_train_Path, rasterPath)
else:
#print"Indexes = False"
img = gdal.Open(rasterPath)
geoTrans = img.GetGeoTransform()
geoTransaux = img.GetGeoTransform()
proj = img.GetProjection()
#open shapefile
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(shapePath, 0)
layer = dataSource.GetLayer()
clipdic = defaultdict(list)
count = 0
#Convert the layer extent to image pixel coordinates, we read only de pixels needed
for feature in layer:
minX, maxX, minY, maxY = feature.GetGeometryRef().GetEnvelope()
geoTrans = img.GetGeoTransform()
ulX, ulY = world2Pixel(geoTrans, minX, maxY)
lrX, lrY = world2Pixel(geoTrans, maxX, minY)
#print ulX,lrX,ulY,lrY
# Calculate the pixel size of the new image
pxWidth = int(lrX - ulX)
pxHeight = int(lrY - ulY)
clip = ReadClipArray(lrY, ulY, lrX, ulX, img)
#EDIT: create pixel offset to pass to new image Projection info
xoffset = ulX
yoffset = ulY
#print "Xoffset, Yoffset = ( %d, %d )" % ( xoffset, yoffset )
# Create a new geomatrix for the image
geoTrans = list(geoTrans)
geoTrans[0] = minX
geoTrans[3] = maxY
# Map points to pixels for drawing the boundary on a blank 8-bit, black and white, mask image.
points = []
pixels = []
geom = feature.GetGeometryRef()
pts = geom.GetGeometryRef(0)
[points.append((pts.GetX(p), pts.GetY(p))) for p in range(pts.GetPointCount())]
[pixels.append(world2Pixel(geoTrans, p[0], p[1])) for p in points]
rasterPoly = Image.new("L", (pxWidth, pxHeight), 1)
rasterize = ImageDraw.Draw(rasterPoly)
rasterize.polygon(pixels, 0)
mask = imageToArray(rasterPoly)
#SHow the clips of the features
# plt.imshow(mask)
# plt.show()
# Clip the image using the mask into a dict
temp = gdalnumeric.choose(mask, (clip, np.nan))
# #SHow the clips of the image
# plt.imshow(temp[4])
# plt.show()
temp = np.concatenate(temp.T)
temp = temp[~np.isnan(temp[:, 0])] #NaN
#print temp.shape
clipdic[str(feature.GetField(field))].append(temp)
count += temp.shape[0]
end = time.time()
print "Time clipshape:"
print (end - start)
print "count", count
return clipdic, count
##########################################################################
|
madi/DeadTrees-BDEOSS
|
clipshape.py
|
Python
|
gpl-3.0
| 4,289
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'set_column08.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.image_dir = test_dir + 'images/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
italic = workbook.add_format({'italic': 1})
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write('A1', 'Foo', bold)
worksheet.write('B1', 'Bar', italic)
worksheet.write_column('A2', data[0])
worksheet.write_column('B2', data[1])
worksheet.write_column('C2', data[2])
worksheet.set_row(12, None, None, {'hidden': True})
worksheet.set_column('F:F', None, None, {'hidden': True})
worksheet.insert_image('E12', self.image_dir + 'logo.png')
workbook.close()
self.assertExcelEqual()
|
jvrsantacruz/XlsxWriter
|
xlsxwriter/test/comparison/test_set_column08.py
|
Python
|
bsd-2-clause
| 1,662
|
# IronPython Pad. Write code snippets here and F5 to run.
import ctypes
#Load CoolProp shared library
CP = ctypes.cdll.LoadLibrary(r"E:\Cyril\Dropbox\CVC\BIM_Revit\ScriptsPython\dll\CoolProp.dll")
#making PropsSI function call shorter
PropsSI = CP.PropsSI
#Convert python data type entered in PropsSI function call to expected argtypes for PropsSI function in the .dll
PropsSI.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_double, ctypes.c_char_p, ctypes.c_double, ctypes.c_char_p)
#Convert returned value from .dll to desired data type which is a float in python
PropsSI.restype = ctypes.c_double
#You can then call PropsSI function as in previous article
print PropsSI('C','T',275.15,'P',101325,'INCOMP::Water')
|
Nahouhak/pythoncvc.net
|
RevitPyCVC/Fluides/coolprop_ironpython_example.py
|
Python
|
mit
| 727
|
'''
Created on 2013-01-22
@author: levi
'''
from util.parser import load_parser
from util.test_script_base import Test
class MBEddr(Test):
def __init__(self):
Test.__init__(self)
# ============TRANSFORMATION=================
r0 = 'Hlayer0rule0'
r1 = 'Hlayer0rule1'
r2 = 'Hlayer0rule2'
r3 = 'Hlayer0rule3'
r4 = 'Hlayer0rule4'
r5 = 'Hlayer0rule6'
r6 = 'Hlayer0rule7'
r7 = 'Hlayer0rule8'
r8 = 'Hlayer0rule9'
r9 = 'Hlayer0rule10'
r10 = 'Hlayer0rule11'
r11 = 'Hlayer1rule0'
r12 = 'Hlayer1rule1'
r13 = 'Hlayer1rule2'
r14 = 'Hlayer1rule3'
r15 = 'Hlayer1rule4'
r16 = 'Hlayer1rule5'
r17 = 'Hlayer1rule6'
r18 = 'Hlayer1rule7'
r19 = 'Hlayer1rule8'
r20 = 'Hlayer1rule9'
r21 = 'Hlayer1rule10'
r22 = 'Hlayer1rule11'
r23 = 'Hlayer1rule12'
r24 = 'Hlayer1rule13'
r25 = 'Hlayer1rule14'
r26 = 'Hlayer1rule15'
r27 = 'Hlayer2rule1'
r28 = 'Hlayer2rule2'
r29 = 'Hlayer2rule3'
r30 = 'Hlayer3rule0'
r31 = 'Hlayer3rule1'
r32 = 'Hlayer3rule2'
r33 = 'Hlayer3rule3'
r34 = 'Hlayer3rule4'
r34copy = 'Hlayer3rule4copy'
r35 = 'Hlayer3rule5'
r36 = 'Hlayer4rule0'
r37 = 'Hlayer4rule1'
r38 = 'Hlayer4rule2'
r39 = 'Hlayer4rule3'
r40 = 'Hlayer5rule0'
r41 = 'Hlayer5rule1'
r42 = 'Hlayer5rule2'
r43 = 'Hlayer5rule3'
r44 = 'Hlayer5rule4'
r45 = 'Hlayer5rule5'
r46 = 'Hlayer6rule0'
self.full_transformation = [[r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, ],
[r11, r12, r13, r14, r15, r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, ],
[r27, r28, r29, ], [r30, r31, r32, r33, r34, r34copy, r35, ],
[r36, r37, r38, r39, ],
[r40, r41, r42, r43, r44, r45, ], [r46, ],
]
#from MPS
self.full_transformation = []
self.full_transformation.append(
['Hlayer0rule0', 'Hlayer0rule1', 'Hlayer0rule2', 'Hlayer0rule3', 'Hlayer0rule4', 'Hlayer0rule6',
'Hlayer0rule7', 'Hlayer0rule8', 'Hlayer0rule9', 'Hlayer0rule10', 'Hlayer0rule11', ]) # layer0
self.full_transformation.append(
['Hlayer1rule0', 'Hlayer1rule1', 'Hlayer1rule2', 'Hlayer1rule3', 'Hlayer1rule4', 'Hlayer1rule5',
'Hlayer1rule6', 'Hlayer1rule7', 'Hlayer1rule8', 'Hlayer1rule9', 'Hlayer1rule10', 'Hlayer1rule11',
'Hlayer1rule12', 'Hlayer1rule13', 'Hlayer1rule14', 'Hlayer1rule15', ]) # layer1
self.full_transformation.append(['Hlayer2rule1', 'Hlayer2rule2', 'Hlayer2rule3', ]) # layer2
self.full_transformation.append(
['Hlayer3rule0', 'Hlayer3rule1', 'Hlayer3rule2', 'Hlayer3rule3', 'Hlayer3rule4', 'Hlayer3rule5',
'Hlayer3rule4copy', ]) # layer3
self.full_transformation.append(['Hlayer4rule0', 'Hlayer4rule1', 'Hlayer4rule2', 'Hlayer4rule3', ]) # layer4
self.full_transformation.append(
['Hlayer5rule1', 'Hlayer5rule2', 'Hlayer5rule3', 'Hlayer5rule4', 'Hlayer5rule5', ]) # layer5
#self.transformation_directory = "mbeddr2C_MM/transformation_from_eclipse/"
self.artifact_directory = "~/Projects/SyVOLT/"
self.transformation_directory = "mbeddr2C_MM/transformation_from_mps/"
# =====METAMODELS===============
self.inputMM = "./mbeddr2C_MM/ecore_metamodels/Module.ecore"
self.outputMM = "./mbeddr2C_MM/ecore_metamodels/C.ecore"
# ====CONTRACTS==================
self.contract_directory = "mbeddr2C_MM/contracts_from_mps/"
self.atomic_contracts = [
"AssignmentInstance",
"GlobalVarGetsCorrectFunctionAddress",
"AssignmentInstance2",
]
self.if_then_contracts = [
]
# =========PC SAVE LOCATION
self.pc_save_filename = "pcs_mbeddr.txt"
if __name__ == "__main__":
parser = load_parser()
args = parser.parse_args()
mbeddr = MBEddr()
mbeddr.test_correct(args)
|
levilucio/SyVOLT
|
test_mbeddr.py
|
Python
|
mit
| 4,286
|
from requests import request, ConnectionError
from social.utils import module_member, parse_qs
from social.exceptions import AuthFailed
class BaseAuth(object):
"""A django.contrib.auth backend that authenticates the user based on
a authentication provider response"""
name = '' # provider name, it's stored in database
supports_inactive_user = False # Django auth
ID_KEY = None
EXTRA_DATA = None
REQUIRES_EMAIL_VALIDATION = False
def __init__(self, strategy=None, redirect_uri=None, *args, **kwargs):
self.strategy = strategy
self.redirect_uri = redirect_uri
self.data = {}
if strategy:
self.data = self.strategy.request_data()
self.redirect_uri = self.strategy.absolute_uri(
self.redirect_uri
)
def setting(self, name, default=None):
"""Return setting value from strategy"""
return self.strategy.setting(name, default=default, backend=self)
def auth_url(self):
"""Must return redirect URL to auth provider"""
raise NotImplementedError('Implement in subclass')
def auth_html(self):
"""Must return login HTML content returned by provider"""
raise NotImplementedError('Implement in subclass')
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
raise NotImplementedError('Implement in subclass')
def process_error(self, data):
"""Process data for errors, raise exception if needed.
Call this method on any override of auth_complete."""
pass
def authenticate(self, *args, **kwargs):
"""Authenticate user using social credentials
Authentication is made if this is the correct backend, backend
verification is made by kwargs inspection for current backend
name presence.
"""
# Validate backend and arguments. Require that the Social Auth
# response be passed in as a keyword argument, to make sure we
# don't match the username/password calling conventions of
# authenticate.
if 'backend' not in kwargs or kwargs['backend'].name != self.name or \
'strategy' not in kwargs or 'response' not in kwargs:
return None
self.strategy = self.strategy or kwargs.get('strategy')
self.redirect_uri = self.redirect_uri or kwargs.get('redirect_uri')
self.data = self.strategy.request_data()
pipeline = self.strategy.get_pipeline()
kwargs.setdefault('is_new', False)
if 'pipeline_index' in kwargs:
pipeline = pipeline[kwargs['pipeline_index']:]
return self.pipeline(pipeline, *args, **kwargs)
def pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = self.run_pipeline(pipeline, pipeline_index, *args, **kwargs)
if not isinstance(out, dict):
return out
user = out.get('user')
if user:
user.social_user = out.get('social')
user.is_new = out.get('is_new')
return user
def disconnect(self, *args, **kwargs):
pipeline = self.strategy.get_disconnect_pipeline()
if 'pipeline_index' in kwargs:
pipeline = pipeline[kwargs['pipeline_index']:]
kwargs['name'] = self.strategy.backend.name
kwargs['user_storage'] = self.strategy.storage.user
return self.run_pipeline(pipeline, *args, **kwargs)
def run_pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = kwargs.copy()
out.setdefault('strategy', self.strategy)
out.setdefault('backend', out.pop(self.name, None) or self)
out.setdefault('request', self.strategy.request)
for idx, name in enumerate(pipeline):
out['pipeline_index'] = pipeline_index + idx
func = module_member(name)
result = func(*args, **out) or {}
if not isinstance(result, dict):
return result
out.update(result)
self.strategy.clean_partial_pipeline()
return out
def extra_data(self, user, uid, response, details):
"""Return deafault extra data to store in extra_data field"""
data = {}
for entry in (self.EXTRA_DATA or []) + self.setting('EXTRA_DATA', []):
if not isinstance(entry, (list, tuple)):
entry = (entry,)
size = len(entry)
if size >= 1 and size <= 3:
if size == 3:
name, alias, discard = entry
elif size == 2:
(name, alias), discard = entry, False
elif size == 1:
name = alias = entry[0]
discard = False
value = response.get(name) or details.get(name)
if discard and not value:
continue
data[alias] = value
return data
def auth_allowed(self, response, details):
"""Return True if the user should be allowed to authenticate, by
default check if email is whitelisted (if there's a whitelist)"""
emails = self.setting('WHITELISTED_EMAILS', [])
domains = self.setting('WHITELISTED_DOMAINS', [])
email = details.get('email')
allowed = True
if email and (emails or domains):
domain = email.split('@', 1)[1]
allowed = email in emails or domain in domains
return allowed
def get_user_id(self, details, response):
"""Return a unique ID for the current user, by default from server
response."""
return response.get(self.ID_KEY)
def get_user_details(self, response):
"""Must return user details in a know internal struct:
{'username': <username if any>,
'email': <user email if any>,
'fullname': <user full name if any>,
'first_name': <user first name if any>,
'last_name': <user last name if any>}
"""
raise NotImplementedError('Implement in subclass')
def get_user(self, user_id):
"""
Return user with given ID from the User model used by this backend.
This is called by django.contrib.auth.middleware.
"""
from social.strategies.utils import get_current_strategy
strategy = self.strategy or get_current_strategy()
return strategy.get_user(user_id)
def continue_pipeline(self, *args, **kwargs):
"""Continue previous halted pipeline"""
kwargs.update({'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def request_token_extra_arguments(self):
"""Return extra arguments needed on request-token process"""
return self.setting('REQUEST_TOKEN_EXTRA_ARGUMENTS', {})
def auth_extra_arguments(self):
"""Return extra arguments needed on auth process. The defaults can be
overriden by GET parameters."""
extra_arguments = self.setting('AUTH_EXTRA_ARGUMENTS', {})
extra_arguments.update((key, self.data[key]) for key in extra_arguments
if key in self.data)
return extra_arguments
def uses_redirect(self):
"""Return True if this provider uses redirect url method,
otherwise return false."""
return True
def request(self, url, method='GET', *args, **kwargs):
kwargs.setdefault('timeout', self.setting('REQUESTS_TIMEOUT') or
self.setting('URLOPEN_TIMEOUT'))
try:
response = request(method, url, *args, **kwargs)
except ConnectionError as err:
raise AuthFailed(self, str(err))
response.raise_for_status()
return response
def get_json(self, url, *args, **kwargs):
return self.request(url, *args, **kwargs).json()
def get_querystring(self, url, *args, **kwargs):
return parse_qs(self.request(url, *args, **kwargs).text)
def get_key_and_secret(self):
"""Return tuple with Consumer Key and Consumer Secret for current
service provider. Must return (key, secret), order *must* be respected.
"""
return self.setting('KEY'), self.setting('SECRET')
|
HackerEcology/SuggestU
|
suggestu/social/backends/base.py
|
Python
|
gpl-3.0
| 8,286
|
#!/usr/bin/env python
__author__ = 'Ian Katz, Michael Meisinger'
import re
from ooi import logging
from ooi.logging import log
from pyon.util.containers import get_ion_ts, DotDict
from pyon.core.exception import BadRequest, Inconsistent, NotFound
from pyon.core.registry import getextends
from pyon.ion.resource import LCE, RT, PRED
from pyon.util.config import Config
# Common resource type and association definitions
errc_lookups = None
class EnhancedResourceRegistryClient(object):
"""
This class provides enhanced resource registry client functionality by wrapping the "real" client.
Specifically, this class adds more succinct interaction with the resource registry in assign and find operations.
This class analyzes the allowable resource/predicate relations to allow the following:
* assigning/unassigning one resource to another and letting this class figure out the allowed predicate
* assigning and validating that only one subject (or object) association exists
* finding objects or subjects between two resource types and letting the class figure out the allowed predicate
* finding a single object or subject and letting the class do the error checking for len(results) == 1
* all of the above find ops, but with resource_id instead of full resource
Examples:
# assigning
self.assign_instrument_model_to_instrument_agent(instrument_model_id, instrument_agent_id)
self.assign_one_instrument_model_to_instrument_device(instrument_model_id, instrument_device_id)
self.assign_instrument_device_to_one_platform_device(instrument_device_id, platform_device_id)
self.unassign_instrument_model_from_instrument_device(instrument_model_id, instrument_device_id)
# find objects
self.find_instrument_models_of_instrument_device(instrument_device_id) # returns list
self.find_instrument_model_of_instrument_device(instrument_device_id) # returns IonObject or raises NotFound
self.find_instrument_devices_by_instrument_model(instrument_model_id) # returns list
self.find_instrument_device_by_instrument_model(instrument_model_id) # returns IonObject or raises NotFound
# find subjects
self.find_instrument_model_ids_of_instrument_device(instrument_device_id) # returns list
self.find_instrument_model_id_of_instrument_device(instrument_device_id) # returns string or raises NotFound
self.find_instrument_device_ids_by_instrument_model(instrument_model_id) # returns list
self.find_instrument_device_id_by_instrument_model(instrument_model_id) # returns string or raises NotFound
Breaking Ambiguity:
assign/unassign method names can also include "_with_has_model" ("_with_", and the predicate type with underscores)
find method name can include "_using_has_model" ("_using_", and the predicate type with underscores)
"""
def __init__(self, rr_client):
self.id = id(self)
log.debug("EnhancedResourceRegistryClient init")
self.RR = rr_client
global errc_lookups
if not errc_lookups:
errc_lookups = self._build_lookups()
self.__dict__.update(errc_lookups)
self._cached_dynamics = {}
self._cached_predicates = {}
self._cached_predicates = {}
self._cached_resources = {}
self._all_cached_resources = {}
log.debug("done init")
@classmethod
def _build_lookups(cls):
lookup_dict = {}
log.debug("Generating lookup tables for %s resources and their labels", len(RT))
lookup_dict["resource_to_label"] = dict([(v, cls._uncamel(v)) for v in RT.values() if type("") == type(v)])
lookup_dict["label_to_resource"] = dict([(cls._uncamel(v), v) for v in RT.values() if type("") == type(v)])
log.debug("Generating lookup tables for %s predicates and their labels", len(PRED.values()))
lookup_dict["predicate_to_label"] = dict([(v, cls._uncamel(v)) for v in PRED.values() if type("") == type(v)])
lookup_dict["label_to_predicate"] = dict([(cls._uncamel(v), v) for v in PRED.values() if type("") == type(v)])
log.debug("Generating predicate lookup table")
lookup_dict["predicates_for_subj_obj"] = cls._build_predicate_list()
return lookup_dict
@classmethod
def _build_predicate_list(cls):
"""
Create a master dict of dicts of lists in self.predicates_for_subj_obj
self.predicates_for_subj_obj[RT.SubjectType][RT.ObjectType] = [PRED.typeOfPred1, PRED.typeOfPred2]
"""
pred_lookup = {}
# if no extends are found, just return the base type as a list
def my_getextends(iontype):
try:
return getextends(iontype)
except KeyError:
return [iontype]
# read associations yaml and expand all domain/range pairs
assoc_defs = Config(["res/config/associations.yml"]).data['AssociationDefinitions']
for ad in assoc_defs:
predicate = ad['predicate']
domain = ad['domain']
range = ad['range']
for d in domain:
for ad in my_getextends(d):
for r in range:
for ar in my_getextends(r):
pred_lookup.setdefault(ad, {}).setdefault(ar, set()).add(predicate)
return pred_lookup
def __getattr__(self, item):
"""
anything we can't puzzle out gets passed along to the real RR client
"""
# don't waste time looking up function names twice
if item in self._cached_dynamics:
return self._cached_dynamics[item]
dynamic_fns = [
self._make_dynamic_assign_function, # understand assign_x_x_to_y_y_with_some_predicate(o, s) functions
self._make_dynamic_assign_single_object_function, # understand assign_one_x_x_to_y_y_with_some_predicate(o, s) functions
self._make_dynamic_assign_single_subject_function, # understand assign_x_x_to_one_y_y_with_some_predicate(o, s) functions
self._make_dynamic_unassign_function, # understand unassign_x_x_to_y_y_with_some_predicate(o, s) functions
self._make_dynamic_find_objects_function, # understand find_x_xs_by_y_y_using_some_predicate(s) functions
self._make_dynamic_find_subjects_function, # understand find_x_xs_by_y_y_using_some_predicate(o) functions
self._make_dynamic_find_object_function, # understand find_x_x_by_y_y_using_some_predicate(s) functions
self._make_dynamic_find_subject_function, # understand find_x_x_by_y_y_using_some_predicate(o) functions
self._make_dynamic_find_object_ids_function, # understand find_x_x_ids_by_y_y_using_some_predicate(s) functions
self._make_dynamic_find_subject_ids_function, # understand find_x_x_ids_by_y_y_using_some_predicate(o) functions
self._make_dynamic_find_object_id_function, # understand find_x_x_id_by_y_y_using_some_predicate(s) functions
self._make_dynamic_find_subject_id_function, # understand find_x_x_id_by_y_y_using_some_predicate(o) functions
]
# try parsing against all the dynamic functions to see if one works
for gen_fn in dynamic_fns:
fn = gen_fn(item)
if fn is None:
log.trace("dynamic function match fail")
else:
log.trace("dynamic function match for %s", item)
self._cached_dynamics[item] = fn
return fn
log.trace("Getting %s attribute from self.RR", item)
if not hasattr(self.RR, item):
raise AttributeError(("The method '%s' could not be parsed as a dynamic function and does not exist " +
"in the Resource Registry Client (%s)") % (item, type(self.RR).__name__))
ret = getattr(self.RR, item)
log.trace("Got attribute from self.RR: %s", type(ret).__name__)
self._cached_dynamics[item] = ret
return ret
def create(self, resource_obj=None, specific_type=None):
"""
create a single object of the predefined type
@param resource_obj an IonObject resource of the proper type
@param specific_type the name of an Ion type (e.g. RT.Resource)
@retval the resource ID
"""
if resource_obj is None:
resource_obj = {}
# Validate the input
self._check_type(resource_obj, specific_type, "to be created")
self._check_name(resource_obj, "to be created")
#persist
#primary_object_obj = IonObject(self.iontype, primary_object)
resource_id, _ = self.RR.create(resource_obj)
return resource_id
def read(self, resource_id='', specific_type=None):
"""
update a single object of the predefined type
@param resource_id the id to be deleted
@param specific_type the name of an Ion type (e.g. RT.Resource)
"""
if resource_id in self._all_cached_resources:
resource_obj = self._all_cached_resources[resource_id]
self._check_type(resource_obj, specific_type, "to be read")
log.debug("Returning cached %s object", specific_type)
return resource_obj
resource_obj = self.RR.read(resource_id)
self._check_type(resource_obj, specific_type, "to be read")
if specific_type in self._cached_resources:
log.debug("Adding cached %s object", specific_type)
self._add_resource_to_cache(specific_type, resource_obj)
return resource_obj
def read_mult(self, resource_ids=None, specific_type=None):
if resource_ids is None:
resource_ids = []
found_resources = [self._all_cached_resources.get(rid, None) for rid in resource_ids]
missing_resources = [resource_ids[i] for i, robj in enumerate(found_resources) if robj is None]
if not missing_resources:
for robj in found_resources:
self._check_type(robj, specific_type, "to be read")
return found_resources
# normal case, check return types
if not specific_type in self._cached_resources:
ret = self.RR.read_mult(resource_ids)
if None is not specific_type:
if not all([r.type_ == specific_type for r in ret]):
raise BadRequest("Expected %s resources from read_mult, but received different type" %
specific_type)
return ret
log.debug("Returning cached %s resources", specific_type)
cache = self._cached_resources[specific_type]
# fill in any holes that we can
misses = [x for x in resource_ids if x not in cache.by_id]
if misses:
log.debug("Attempting to fill in %s cache misses", len(misses))
misses_objs = self.RR.read_mult(misses)
for mo in misses_objs:
if None is not mo:
self._add_resource_to_cache(specific_type, mo)
return [cache.by_id.get(r, None) for r in resource_ids]
def update(self, resource_obj=None, specific_type=None):
"""
update a single object of the predefined type
@param resource_obj the updated resource
@param specific_type the name of an Ion type (e.g. RT.Resource)
"""
if None == resource_obj: resource_obj = {}
self._check_type(resource_obj, specific_type, "to be updated")
if not hasattr(resource_obj, "_id") or "" == resource_obj._id:
raise BadRequest("The _id field was not set in the "
+ "%s resource to be updated" % type(resource_obj).__name__)
#if the name is being changed, make sure it's not
# being changed to a duplicate
self._check_name(resource_obj, "to be updated")
#persist
return self.RR.update(resource_obj)
def retire(self, resource_id='', specific_type=None):
return self.lcs_delete(resource_id, specific_type)
def lcs_delete(self, resource_id='', specific_type=None):
"""
alias for LCS delete -- the default "delete operation" in ION
@param resource_id the id to be deleted
@param specific_type the name of an Ion type (e.g. RT.Resource)
"""
if None is not specific_type:
resource_obj = self.RR.read(resource_id)
self._check_type(resource_obj, specific_type, "to be retired")
self.RR.lcs_delete(resource_id)
return
def delete(self, resource_id):
raise NotImplementedError("TODO: remove me")
def force_delete(self, resource_id='', specific_type=None):
"""
delete a single object of the predefined type
AND its history
AND any associations to/from it
(i.e., NOT retiring!)
@param resource_id the id to be deleted
@param specific_type the name of an Ion type (e.g. RT.Resource)
"""
#primary_object_obj = self.RR.read(primary_object_id)
if None is not specific_type:
resource_obj = self.RR.read(resource_id)
self._check_type(resource_obj, specific_type, "to be deleted")
# Note: delete automatically retires associations
self.RR.delete(resource_id)
def delete_association(self, subject_id='', association_type='', object_id=''):
"""
delete an association
@param subject_id the resource ID of the predefined type
@param association_type the predicate
@param object_id the resource ID of the type to be joined
@todo check for errors
"""
assert(type("") == type(subject_id) == type(object_id))
assoc = self.RR.get_association(subject=subject_id,
predicate=association_type,
object=object_id)
self.RR.delete_association(assoc)
def find_resource_by_name(self, resource_type, name, id_only=False):
rsrcs = self.find_resources_by_name(resource_type, name, id_only)
if 1 == len(rsrcs):
return rsrcs[0]
elif 1 < len(rsrcs):
raise Inconsistent("Expected 1 %s with name '%s', got %d" %
(resource_type, name, len(rsrcs)))
else:
raise NotFound("Expected 1 %s with name '%s', got %d" %
(resource_type, name, len(rsrcs)))
def find_resources_by_name(self, resource_type, name, id_only=False):
assert name
if resource_type not in self._cached_resources:
log.warn("Using find_resources_by_name on resource type %s, which was not cached", resource_type)
ret, _ = self.RR.find_resources(restype=resource_type, name=name, id_only=id_only)
return ret
if not name in self._cached_resources[resource_type].by_name:
log.debug("The %s resource with name '%s' was not in the cache", resource_type, name)
return []
log.debug("Returning object(s) from cache")
objs = self._cached_resources[resource_type].by_name[name]
if id_only:
return [obj._id for obj in objs]
else:
return objs
def find_subjects(self, subject_type='', predicate='', object='', id_only=False):
assert subject_type != ''
assert predicate != ''
object_id, object_type = self._extract_id_and_type(object)
if not self.has_cached_predicate(predicate):
ret, _ = self.RR.find_subjects(subject_type=subject_type,
predicate=predicate,
object=object_id,
id_only=id_only)
return ret
log.debug("Using %s cached results for 'find (%s) subjects'", len(self._cached_predicates[predicate]), predicate)
def filter_fn(assoc):
if object != assoc.o:
return False
if "" != subject_type and subject_type != assoc.st:
return False
return True
log.debug("Checking object_id=%s, subject_type=%s", object_id, subject_type)
preds = self._cached_predicates[predicate]
time_search_start = get_ion_ts()
subject_ids = [a.s for a in self.filter_cached_associations(predicate, filter_fn)]
time_search_stop = get_ion_ts()
total_time = int(time_search_stop) - int(time_search_start)
log.debug("Processed %s %s predicates for %s subjects in %s seconds",
len(preds),
predicate,
len(subject_ids),
total_time / 1000.0)
if id_only:
return subject_ids
else:
log.debug("getting full subject IonObjects with read_mult")
return self.read_mult(subject_ids, subject_type)
def find_objects(self, subject, predicate, object_type='', id_only=False):
subject_id, subject_type = self._extract_id_and_type(subject)
if not self.has_cached_predicate(predicate):
ret, _ = self.RR.find_objects(subject=subject_id,
predicate=predicate,
object_type=object_type,
id_only=id_only)
return ret
log.debug("Using %s cached results for 'find (%s) objects'", len(self._cached_predicates[predicate]), predicate)
def filter_fn(assoc):
if subject_id != assoc.s:
return False
if "" != object_type and object_type != assoc.ot:
return False
return True
log.debug("Checking subject_id=%s, object_type=%s", subject_id, object_type)
preds = self._cached_predicates[predicate]
time_search_start = get_ion_ts()
object_ids = [a.o for a in self.filter_cached_associations(predicate, filter_fn)]
time_search_stop = get_ion_ts()
total_time = int(time_search_stop) - int(time_search_start)
log.debug("Processed %s %s predicates for %s objects in %s seconds",
len(preds),
predicate,
len(object_ids),
total_time / 1000.0)
if id_only:
return object_ids
else:
log.debug("getting full object IonObjects with read_mult")
return self.read_mult(object_ids)
def find_subject(self, subject_type='', predicate='', object='', id_only=False):
assert subject_type != ''
assert predicate != ''
object_id, object_type = self._extract_id_and_type(object)
idstring = ""
if id_only: idstring = " ID"
findop_name = "Find %s subject%s by %s object using predicate %s" % (subject_type,
idstring,
object_type,
predicate)
return self._find_subject_(findop_name, subject_type, predicate, object_id, object_type, id_only)
def _find_subject_(self, findop_name, subject_type, predicate, object_id, object_type, id_only):
objs = self.find_subjects(subject_type=subject_type,
predicate=predicate,
object=object_id,
id_only=id_only)
if 1 == len(objs):
return objs[0]
elif 1 < len(objs):
raise Inconsistent("Expected 1 %s as subject of %s '%s', got %d in '%s'" %
(subject_type, object_type, str(object_id), len(objs), findop_name))
else:
raise NotFound("Expected 1 %s as subject of %s '%s' in '%s'" %
(subject_type, object_type, str(object_id), findop_name))
def find_object(self, subject, predicate, object_type='', id_only=False):
subject_id, subject_type = self._extract_id_and_type(subject)
idstring = ""
if id_only: idstring = " ID"
findop_name = "Find %s subject%s by %s object using predicate %s" % (subject_type,
idstring,
object_type,
predicate)
return self._find_object_(findop_name, subject_id, subject_type, predicate, object_type, id_only)
def _find_object_(self, findop_name, subject_id, subject_type, predicate, object_type, id_only):
objs = self.find_objects(subject=subject_id,
predicate=predicate,
object_type=object_type,
id_only=id_only)
if 1 == len(objs):
return objs[0]
elif 1 < len(objs):
raise Inconsistent("Expected 1 %s as object of %s '%s', got %d in '%s'" %
(object_type, subject_type, str(subject_id), len(objs), findop_name))
else:
raise NotFound("Expected 1 %s as object of %s '%s' in '%s'" %
(object_type, subject_type, str(subject_id), findop_name))
def delete_object_associations(self, subject_id='', association_type=''):
"""
delete all assocations of a given type that are attached as objects to the given subject
"""
log.debug("Deleting all %s object associations from subject with id='%s'",
association_type,
subject_id)
associations = self.RR.find_associations(subject=subject_id, predicate=association_type)
for a in associations:
self.RR.delete_association(a)
def delete_subject_associations(self, association_type='', object_id=''):
"""
delete all assocations of a given type that are attached as subjects to the given object
"""
log.debug("Deleting all %s associations to object with id='%s'",
association_type,
object_id)
associations = self.RR.find_associations(object=object_id, predicate=association_type)
for a in associations:
self.RR.delete_association(a)
def advance_lcs(self, resource_id, transition_event):
"""
attempt to advance the lifecycle state of a resource
@resource_id the resource id
@new_state the new lifecycle state
"""
assert type(resource_id) is str
assert type(transition_event) is str
log.debug("Moving resource life cycle with transition event=%s", transition_event)
ret = self.RR.execute_lifecycle_transition(resource_id=resource_id,
transition_event=transition_event)
log.info("lifecycle transition=%s resulted in lifecycle state=%s", transition_event, str(ret))
return ret
def cache_predicate(self, predicate):
"""
Save all associations of a given predicate type to memory, for in-memory find_subjects/objects ops
This is a PREFETCH operation, and EnhancedResourceRegistryClient objects that use the cache functionality
should NOT be persisted across service calls.
"""
#log.debug("Caching predicates: %s", predicate)
if self.has_cached_predicate(predicate):
#log.debug("Reusing prior cached predicate %s", predicate)
return
time_caching_start = get_ion_ts()
preds = self.RR.find_associations(predicate=predicate, id_only=False)
time_caching_stop = get_ion_ts()
total_time = int(time_caching_stop) - int(time_caching_start)
log.debug("Cached predicate %s with %s resources in %s seconds", predicate, len(preds), total_time / 1000.0)
self._cached_predicates[predicate] = preds
def filter_cached_associations(self, predicate, is_match_fn):
if not self.has_cached_predicate(predicate):
raise BadRequest("Attempted to filter cached associations of uncached predicate '%s'" % predicate)
return [a for a in self._cached_predicates[predicate] if is_match_fn(a)]
def get_cached_associations(self, predicate):
return self.filter_cached_associations(predicate, lambda x: True)
def _add_resource_to_cache(self, resource_type, resource_obj):
self._cached_resources[resource_type].by_id[resource_obj._id] = resource_obj
self._cached_resources[resource_type].by_name.setdefault(resource_obj.name, []).append(resource_obj)
self._all_cached_resources[resource_obj._id] = resource_obj
def cache_resources(self, resource_type, specific_ids=None):
"""
Save all resources of a given type to memory, for in-memory lookup ops
This is a PREFETCH operation, and EnhancedResourceRegistryClient objects that use the cache functionality
should NOT be kept across service calls.
"""
#log.info("Caching resources: %s", resource_type)
#log.debug("This cache is %s", self)
time_caching_start = get_ion_ts()
resource_objs = []
if specific_ids is None:
resource_objs, _ = self.RR.find_resources(restype=resource_type, id_only=False)
else:
assert type(specific_ids) is list
if specific_ids:
resource_objs = self.RR.read_mult(specific_ids)
lookups = DotDict()
lookups.by_id = {}
lookups.by_name = {}
self._cached_resources[resource_type] = lookups
for r in resource_objs:
self._add_resource_to_cache(resource_type, r)
time_caching_stop = get_ion_ts()
total_time = int(time_caching_stop) - int(time_caching_start)
#log.info("Cached %s %s resources in %s seconds", len(resource_objs), resource_type, total_time / 1000.0)
def has_cached_predicate(self, predicate):
return predicate in self._cached_predicates
def has_cached_resource(self, resource_type):
return resource_type in self._cached_resources
def clear_cached_predicate(self, predicate=None):
if None is predicate:
self._cached_predicates = {}
elif predicate in self._cached_predicates:
del self._cached_predicates[predicate]
def clear_cached_resource(self, resource_type=None):
if None is resource_type:
self._cached_resources = {}
self._all_cached_resources = {}
elif resource_type in self._cached_resources:
del self._cached_resources[resource_type]
del_list = [i for i, o in self._all_cached_resources.iteritems() if o.type_ == resource_type]
for i in del_list:
del self._all_cached_resources[i]
@classmethod
def _uncamel(cls, name):
"""
convert CamelCase to camel_case, from http://stackoverflow.com/a/1176023/2063546
"""
log.trace("name is %s: '%s'" % (type(name).__name__, name))
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _extract_id_and_type(self, id_or_obj):
"""
figure out whether a subject/object is an IonObject or just an ID
"""
if hasattr(id_or_obj, "_id"):
log.debug("find_object for IonObject")
the_id = id_or_obj._id
the_type = type(id_or_obj).__name__
else:
the_id = id_or_obj
the_type = "(Unspecified IonObject)"
if log.isEnabledFor(logging.DEBUG):
try:
the_obj = self.RR.read(the_id)
the_type = type(the_obj).__name__
except:
pass
return the_id, the_type
def _parse_function_name_for_subj_pred_obj(self, genre, fn_name, regexp, required_fields=None, group_names=None):
"""
parse a function name into subject/predicate/object, as well as their CamelCase equivalents
extracts subject, object, and predicate from a function name. predicate is optional, and if missing
then this function will attempt to look it up in the list of predicates for the given subject and
object. the function raises error messages if the function name is parsed correctly but yields no
matches in the RT and PRED lists.
@param genre string, an identifier for what kind of function we're parsing, used for debug messages
@param fn_name string, the function name coming from getattr
@param regexp string, the regexp (containing groups) to parse the fn_name
@param required_fields list, the list of what groups should be "not None" to accept the parse
@param group_names dict mapping of "subject", "object", and "predicate" to their group names
"""
if None is group_names: group_names = {}
if None is required_fields: required_fields = []
log.trace("Attempting parse %s as %s", fn_name, genre)
m = re.match(regexp, fn_name)
if None is m: return None
for r in required_fields:
if None is m.group(r): return None
log.debug("parsed '%s' as %s", fn_name, genre)
ret = {}
for name, idx in group_names.iteritems():
if None is idx:
ret[name] = None
else:
ret[name] = m.group(idx)
obj = ret["object"]
subj = ret["subject"]
pred = ret["predicate"]
if not subj in self.label_to_resource:
log.debug("Attempted to use dynamic %s with unknown subject '%s'", genre, subj)
return None
if not obj in self.label_to_resource:
log.debug("Attempted to use dynamic %s with unknown object '%s'", genre, obj)
return None
isubj = self.label_to_resource[subj]
iobj = self.label_to_resource[obj]
# code won't execute because getextends(Resource) puts ALL resources in a domain position
# if isubj not in self.predicates_for_subj_obj:
# log.debug("Dynamic %s wanted 1 predicate choice for associating %s to %s, no domain" %
# (genre, subj, obj))
# return None
if iobj not in self.predicates_for_subj_obj[isubj]:
log.debug("Dynamic %s wanted 1 predicate choice for associating %s to %s, no range" %
(genre, subj, obj))
return None
if pred is not None:
log.debug("supplied pred is %s", pred)
if not pred in self.label_to_predicate:
raise BadRequest("Attempted to use dynamic %s between %s and %s with unknown predicate '%s'" %
(genre, isubj, iobj, pred))
#return None
ipred = self.label_to_predicate[pred]
if not ipred in self.predicates_for_subj_obj[isubj][iobj]:
raise BadRequest("Attempted to use dynamic %s between %s and %s with disallowed predicate '%s'" %
(genre, isubj, ipred, ipred))
else:
log.debug("no supplied predicate, picking from choices: %s" % self.predicates_for_subj_obj[isubj][iobj])
if len(self.predicates_for_subj_obj[isubj][iobj]) != 1:
raise BadRequest("Dynamic %s wanted 1 predicate choice for associating %s to %s, got %s" %
(genre, subj, obj, self.predicates_for_subj_obj[isubj][iobj]))
ipred = self.predicates_for_subj_obj[isubj][iobj][0]
ret["RT.object"] = iobj
ret["RT.subject"] = isubj
ret["PRED.predicate"] = ipred
return ret
def _make_dynamic_assign_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("assign function w/pred",
item,
r"(assign_)(\w+)(_to_)(\w+)(_with_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to create associations %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(obj_id, subj_id):
log.debug("Dynamically creating association %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, obj_id)
self.RR.create_association(subj_id, ipred, obj_id)
return ret_fn
ret = freeze()
return ret
def _make_dynamic_assign_single_subject_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("assign single subject function w/pred",
item,
r"(assign_)(\w+)(_to_one_)(\w+)(_with_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to create associations (1)%s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(obj_id, subj_id):
log.debug("Dynamically creating association (1)%s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, obj_id)
# see if there are any other objects of this type and pred on this subject
existing_subjs = self.find_subjects(isubj, ipred, obj_id, id_only=True)
if len(existing_subjs) > 1:
raise Inconsistent("Multiple %s-%s subjects found associated to the same %s object with id='%s'" %
(isubj, ipred, iobj, obj_id))
if len(existing_subjs) > 0:
try:
self.RR.get_association(subj_id, ipred, obj_id)
except NotFound:
raise BadRequest("Attempted to add a second %s-%s association to a %s with id='%s'" %
(isubj, ipred, iobj, obj_id))
else:
log.debug("Create %s Association (single subject): ALREADY EXISTS", ipred)
return
self.RR.create_association(subj_id, ipred, obj_id)
return ret_fn
ret = freeze()
return ret
def _make_dynamic_assign_single_object_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("assign single object function w/pred",
item,
r"(assign_one_)(\w+)(_to_)(\w+)(_with_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to create associations %s -> %s -> (1)%s", isubj, ipred, iobj)
def freeze():
def ret_fn(obj_id, subj_id):
log.debug("Dynamically creating association %s -> %s -> (1)%s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, obj_id)
# see if there are any other objects of this type and pred on this subject
existing_objs = self.find_objects(subj_id, ipred, iobj, id_only=True)
if len(existing_objs) > 1:
raise Inconsistent("Multiple %s-%s objects found with the same %s subject with id='%s'" %
(ipred, iobj, isubj, subj_id))
if len(existing_objs) > 0:
try:
log.debug("get_association gives")
log.debug(self.RR.get_association(subj_id, ipred, obj_id))
except NotFound:
raise BadRequest("Attempted to add a second %s-%s association to a %s with id='%s'" %
(ipred, iobj, isubj, subj_id))
else:
log.debug("Create %s Association (single object): ALREADY EXISTS", ipred)
return
self.RR.create_association(subj_id, ipred, obj_id)
return ret_fn
ret = freeze()
return ret
def _make_dynamic_unassign_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("unassign function w/pred",
item,
r"(unassign_)(\w+)(_from_)(\w+)(_with_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to delete associations %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(obj_id, subj_id):
log.debug("Dynamically deleting association %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, obj_id)
self.delete_association(subj_id, ipred, obj_id)
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_objects_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find objects w/pred function",
item,
r"(find_)(\w+)(s_of_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find objects %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(subj):
log.debug("Dynamically finding objects %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj, ipred, iobj)
subj_id, _ = self._extract_id_and_type(subj)
ret = self.find_objects(subject=subj_id, predicate=ipred, object_type=iobj, id_only=False)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_subjects_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find subjects w/pred function",
item,
r"(find_)(\w+)(s_by_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 2, "predicate": 6, "object": 4})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find subjects %s <- %s <- %s", iobj, ipred, isubj)
def freeze():
def ret_fn(obj):
log.debug("Dynamically finding subjects %s <- %s <- %s", iobj, ipred, isubj)
log.debug("%s <- %s <- %s", obj, ipred, isubj)
obj_id, _ = self._extract_id_and_type(obj)
ret = self.find_subjects(subject_type=isubj, predicate=ipred, object=obj_id, id_only=False)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_object_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find object w/pred function",
item,
r"(find_)(\w+)(_of_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find object %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(subj_id):
log.debug("Dynamically finding object %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, iobj)
ret = self._find_object_(item, subj_id, isubj, ipred, iobj, False)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_subject_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find subject w/pred function",
item,
r"(find_)(\w+)(_by_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 2, "predicate": 6, "object": 4})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find subject %s <- %s <- %s", iobj, ipred, isubj)
def freeze():
def ret_fn(obj_id):
log.debug("Dynamically finding subject %s <- %s <- %s", iobj, ipred, isubj)
log.debug("%s <- %s <- %s", isubj, ipred, obj_id)
ret = self._find_subject_(item, isubj, ipred, obj_id, iobj, False)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_object_ids_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find object_ids w/pred function",
item,
r"(find_)(\w+)(_ids_of_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find object_ids %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(subj):
log.debug("Dynamically finding object_ids %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj, ipred, iobj)
subj_id, _ = self._extract_id_and_type(subj)
ret = self.find_objects(subject=subj_id, predicate=ipred, object_type=iobj, id_only=True)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_subject_ids_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find subject_ids w/pred function",
item,
r"(find_)(\w+)(_ids_by_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 2, "predicate": 6, "object": 4})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find subject_ids %s <- %s <- %s", iobj, ipred, isubj)
def freeze():
def ret_fn(obj):
log.debug("Dynamically finding subject_ids %s <- %s <- %s", iobj, ipred, isubj)
log.debug("%s <- %s <- %s", isubj, ipred, obj)
obj_id, _ = self._extract_id_and_type(obj)
ret = self.find_subjects(subject_type=isubj, predicate=ipred, object=obj_id, id_only=True)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_object_id_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find object_id w/pred function",
item,
r"(find_)(\w+)(_id_of_)(\w+)(_using_)(\w+)?",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find object_id %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(subj_id):
log.debug("Dynamically finding object_id %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, iobj)
ret = self._find_object_(item, subj_id, isubj, ipred, iobj, True)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_subject_id_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find subject_id w/pred function",
item,
r"(find_)(\w+)(_id_by_)(\w+)(_using_)(\w+)?",
[2,3,4,5,6],
{"subject": 2, "predicate": 6, "object": 4})
if inputs is None:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find subject_id %s <- %s <- %s", iobj, ipred, isubj)
def freeze():
def ret_fn(obj_id):
log.debug("Dynamically finding subject_id %s <- %s <- %s", iobj, ipred, isubj)
log.debug("%s <- %s <- %s", isubj, ipred, obj_id)
ret = self._find_subject_(item, isubj, ipred, obj_id, iobj, True)
return ret
return ret_fn
ret = freeze()
return ret
def _check_type(self, resource_obj, specific_type, verb):
"""
determine whether the given resource matches the given type (if indeed given)
@param resource_obj the IonObject resource to be checked
@param specific_type a string type, or None
@param verb what will happen to this object (like "to be created")
@raises BadRequest if name exists already or wasn't set
"""
if specific_type is None:
return
resource_type = resource_obj.type_
if resource_type != specific_type:
raise BadRequest("Expected a %s for the resource %s, but received type %s" %
(specific_type, verb, resource_type))
def _check_name(self, resource_obj, verb):
"""
determine whether a resource with the same type and name already exists
@param resource_obj the IonObject resource to be checked
@param verb what will happen to this object (like "to be created")
@raises BadRequest if name exists already or wasn't set
"""
resource_type = resource_obj.type_
if not (hasattr(resource_obj, "name") and "" != resource_obj.name):
raise BadRequest("The name field was not set in the resource %s" % verb)
def pluck(self, resource_id=''):
"""
delete all associations to/from a resource
"""
# find all associations where this is the subject
_, obj_assns = self.RR.find_objects(subject=resource_id, id_only=True)
# find all associations where this is the object
_, sbj_assns = self.RR.find_subjects(object=resource_id, id_only=True)
log.debug("pluck will remove %s subject associations and %s object associations",
len(sbj_assns), len(obj_assns))
for assn in obj_assns:
log.debug("pluck deleting object association %s", assn)
self.RR.delete_association(assn)
for assn in sbj_assns:
log.debug("pluck deleting subject association %s", assn)
self.RR.delete_association(assn)
|
ooici/coi-services
|
ion/util/enhanced_resource_registry_client.py
|
Python
|
bsd-2-clause
| 50,232
|
# coding=utf-8
import re
import csv
# with open('index.html', 'r', encoding='utf-8') as f:
with open('index.html') as f:
str_html = f.read()
# 1. 先抓大后抓小: 先获得所有包含一层楼的所有内容
all_floors = \
re.findall('j_l_post clearfix "(.*?)p_props_tail', str_html, re.S)
# re.findall('l_post l_post_bright j_l_post clearfix "(.*?)p_props_tail props_appraise_wrap', str_html, re.S)
# print all_floors
# 2. 获取每个楼层里的发帖人、内容、时间
result_list = []
for each in all_floors:
result = {
'username': re.findall('username="(.*?)"', each, re.S)[0],
'content': re.findall('j_d_post_content ">(.*?)<', each, re.S)[0].replace(' ', ''),
'deploy_time': re.findall('tail-info">(2017-.*?)<', each)[0]
}
result_list.append(result)
# with open('result.csv', 'w') as f:
# writer = csv.DictWriter(f, fieldnames=['username', 'content', 'deploy_time'])
# writer.writeheader()
# writer.writerows(result_list)
for each in result_list:
print each['username']
print each['content']
print each['deploy_time']
|
littlecarson/web-scraping
|
01-Regular-Expression/tieba_scrap.py
|
Python
|
mit
| 1,124
|
"""
Tests for Calendar Sync views.
"""
import ddt
from django.test import TestCase
from django.urls import reverse
from openedx.features.calendar_sync.api import SUBSCRIBE, UNSUBSCRIBE
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
TEST_PASSWORD = 'test'
@ddt.ddt
class TestCalendarSyncView(SharedModuleStoreTestCase, TestCase):
"""Tests for the calendar sync view."""
@classmethod
def setUpClass(cls):
""" Set up any course data """
super(TestCalendarSyncView, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestCalendarSyncView, self).setUp()
self.user = self.create_user_for_course(self.course)
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.calendar_sync_url = reverse('openedx.calendar_sync', args=[self.course.id])
@ddt.data(
# Redirect on successful subscribe
[{'tool_data': "{{'toggle_data': '{}'}}".format(SUBSCRIBE)}, 302, ''],
# Redirect on successful unsubscribe
[{'tool_data': "{{'toggle_data': '{}'}}".format(UNSUBSCRIBE)}, 302, ''],
# 422 on unknown toggle_data
[{'tool_data': "{{'toggle_data': '{}'}}".format('gibberish')}, 422,
'Toggle data was not provided or had unknown value.'],
# 422 on no toggle_data
[{'tool_data': "{{'random_data': '{}'}}".format('gibberish')}, 422,
'Toggle data was not provided or had unknown value.'],
# 422 on no tool_data
[{'nonsense': "{{'random_data': '{}'}}".format('gibberish')}, 422, 'Tool data was not provided.'],
)
@ddt.unpack
def test_course_dates_fragment(self, data, expected_status_code, contained_text):
response = self.client.post(self.calendar_sync_url, data)
self.assertEqual(response.status_code, expected_status_code)
self.assertIn(contained_text, str(response.content))
|
msegado/edx-platform
|
openedx/features/calendar_sync/tests/test_views.py
|
Python
|
agpl-3.0
| 2,012
|
#!/usr/bin/env python
''' Unit test for the quantization module
File: test_optim.py
'''
__author__ = "Hauke Holtkamp"
__credits__ = "Hauke Holtkamp"
__license__ = "unknown"
__version__ = "unknown"
__maintainer__ = "Hauke Holtkamp"
__email__ = "h.holtkamp@gmail.com"
__status__ = "Development"
from quantmap import quantmap
import unittest
import numpy as np
from utils import utils
import scipy.linalg
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
pass
def test_quantmap(self):
"""Test quantmap. Since it's randomly permuted, we cannot check the exact outcome. Test that the outcome is proportional to the request."""
alloc = np.array([ 0.1147, 0.0381, 0.1080, 0.0721, 0.0640, 0.1477, 0.1048, 0.1607 , 0.0416, 0.1378, 0.0107 ])
K = alloc.size-1 # users
N = 50 # subcarriers on 10 MHz
T = 10 # timeslots per subframe
#outMap = quantmap(alloc, N, T)
# All resources must be used
#self.assertEqual(N*T, np.sum(np.sum(outMap)))
# Test that a user receives more than requested (conservative assignment)
alloc = np.array([ 0., 0., 0., 0., 0., 0., 0., 0. , 0., 0.5, 0.5 ])
K = alloc.size-1 # users
outMap = quantmap(alloc, N, T)
answer = np.nansum(outMap[:,-1]) # last user's resources
self.assertTrue(N*T*alloc[9] < answer)
if __name__ == '__main__':
unittest.main()
|
ryklith/pyltesim
|
quantmap/test_quantmap.py
|
Python
|
gpl-2.0
| 1,495
|
# coding=utf-8
# Copyright 2020 The PI-SAC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Project inputs to a tanh-squashed MultivariateNormalDiag distribution."""
import gin
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.distributions import utils as distribution_utils
from tf_agents.networks import network
from tf_agents.networks import utils as network_utils
from tf_agents.specs import distribution_spec
from tf_agents.specs import tensor_spec
@gin.configurable
class TanhNormalProjectionNetwork(network.DistributionNetwork):
"""Generates a tanh-squashed MultivariateNormalDiag distribution."""
def __init__(self,
sample_spec,
activation_fn=None,
kernel_initializer=None,
std_transform=tf.exp,
min_std=None,
max_std=None,
name='TanhNormalProjectionNetwork'):
"""Creates an instance of TanhNormalProjectionNetwork.
Args:
sample_spec: A `tensor_spec.BoundedTensorSpec` detailing the shape and
dtypes of samples pulled from the output distribution.
activation_fn: Activation function to use in dense layer.
kernel_initializer: Initializer to use for the kernels of the conv and
dense layers. If none is provided a default glorot_uniform
std_transform: Transformation function to apply to the stddevs.
min_std: Minimum std.
max_std: Maximum std.
name: A string representing name of the network.
"""
if len(tf.nest.flatten(sample_spec)) != 1:
raise ValueError('Tanh Normal Projection network only supports single'
' spec samples.')
output_spec = self._output_distribution_spec(sample_spec, name)
super(TanhNormalProjectionNetwork, self).__init__(
# We don't need these, but base class requires them.
input_tensor_spec=None,
state_spec=(),
output_spec=output_spec,
name=name)
self._sample_spec = sample_spec
self._std_transform = std_transform
self._min_std = min_std
self._max_std = max_std
if kernel_initializer is None:
kernel_initializer = 'glorot_uniform'
self._projection_layer = tf.keras.layers.Dense(
sample_spec.shape.num_elements() * 2,
activation=activation_fn,
kernel_initializer=kernel_initializer,
name='projection_layer')
def _output_distribution_spec(self, sample_spec, network_name):
input_param_shapes = {
'loc': sample_spec.shape,
'scale_diag': sample_spec.shape
}
input_param_spec = { # pylint: disable=g-complex-comprehension
name: tensor_spec.TensorSpec(
shape=shape,
dtype=sample_spec.dtype,
name=network_name + '_' + name)
for name, shape in input_param_shapes.items()
}
def distribution_builder(*args, **kwargs):
distribution = tfp.distributions.MultivariateNormalDiag(*args, **kwargs)
return distribution_utils.scale_distribution_to_spec(
distribution, sample_spec)
return distribution_spec.DistributionSpec(
distribution_builder, input_param_spec, sample_spec=sample_spec)
def call(self, inputs, outer_rank, training=False, mask=None):
if inputs.dtype != self._sample_spec.dtype:
raise ValueError('Inputs to TanhNormalProjectionNetwork must match the '
'sample_spec.dtype.')
if mask is not None:
raise NotImplementedError(
'TanhNormalProjectionNetwork does not yet implement action masking; '
'got mask={}'.format(mask))
# outer_rank is needed because the projection is not done on the raw
# observations so getting the outer rank is hard as there is no spec to
# compare to.
batch_squash = network_utils.BatchSquash(outer_rank)
inputs = batch_squash.flatten(inputs)
means_and_stds = self._projection_layer(inputs, training=training)
means, stds = tf.split(means_and_stds, num_or_size_splits=2, axis=-1)
means = tf.reshape(means, [-1] + self._sample_spec.shape.as_list())
means = tf.cast(means, self._sample_spec.dtype)
if self._std_transform is not None:
stds = self._std_transform(stds)
if self._min_std is not None:
stds = tf.maximum(stds, self._min_std)
if self._max_std is not None:
stds = tf.minimum(stds, self._max_std)
stds = tf.cast(stds, self._sample_spec.dtype)
means = batch_squash.unflatten(means)
stds = batch_squash.unflatten(stds)
return self.output_spec.build_distribution(loc=means, scale_diag=stds), ()
|
google-research/pisac
|
pisac/tanh_normal_projection_network.py
|
Python
|
apache-2.0
| 5,102
|
def multiples_of_three_or_five(num):
if ((num % 3) == 0 or (num % 5) == 0):
return True
else:
return False
def cumulative_sum(num):
cum_sum = 0
for i in range(1, num):
if multiples_of_three_or_five(i):
cum_sum += i
return cum_sum
|
paulliwali/projecteuler-python
|
euler/problem_1.py
|
Python
|
mit
| 286
|
# -*- coding: utf-8 -*-
# Copyright 2015-TODAY LasLabs Inc.
# License MIT (https://opensource.org/licenses/MIT).
from mixer.backend.sqlalchemy import mixer
from datetime import datetime
dt_now = datetime.now()
__model__ = 'carepoint.models.cph.store.Store'
store_default = mixer.blend(
__model__,
store_id=1,
store_type_cn=1,
name='Name',
store_hours='StoreHours',
store_no='StoreNo',
fed_tax_id='FedTaxId',
url='Url',
email='Email',
mgr_cont_id=1,
cont_id=1,
carepoint_acct_no='CarepointAcctNo',
cmt='Cmt',
status_cn=1,
app_flags=1,
nabp='Nabp',
medicaid_no='Medicaid',
timestmp=dt_now,
region_id=1,
NPI='Npi',
pharmacy_service_type_cn=1,
web_refill_yn=False,
add_user_id=1,
add_date=dt_now,
chg_user_id=1,
chg_date=dt_now,
)
def store_rnd(cnt):
return mixer.cycle(cnt).blend(__model__)
|
laslabs/Python-Carepoint
|
carepoint/tests/models/cph/fixtures/store.py
|
Python
|
mit
| 903
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.jabber.component}
"""
from hashlib import sha1
from twisted.python import failure
from twisted.trial import unittest
from twisted.words.protocols.jabber import component, xmlstream
from twisted.words.protocols.jabber.jid import JID
from twisted.words.xish import domish
from twisted.words.xish.utility import XmlPipe
class DummyTransport:
def __init__(self, list):
self.list = list
def write(self, bytes):
self.list.append(bytes)
class ComponentInitiatingInitializerTests(unittest.TestCase):
def setUp(self):
self.output = []
self.authenticator = xmlstream.Authenticator()
self.authenticator.password = 'secret'
self.xmlstream = xmlstream.XmlStream(self.authenticator)
self.xmlstream.namespace = 'test:component'
self.xmlstream.send = self.output.append
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns='test:component' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1.0'>")
self.xmlstream.sid = u'12345'
self.init = component.ComponentInitiatingInitializer(self.xmlstream)
def testHandshake(self):
"""
Test basic operations of component handshake.
"""
d = self.init.initialize()
# the initializer should have sent the handshake request
handshake = self.output[-1]
self.assertEqual('handshake', handshake.name)
self.assertEqual('test:component', handshake.uri)
self.assertEqual(sha1("%s%s" % ('12345', 'secret')).hexdigest(),
unicode(handshake))
# successful authentication
handshake.children = []
self.xmlstream.dataReceived(handshake.toXml())
return d
class ComponentAuthTests(unittest.TestCase):
def authPassed(self, stream):
self.authComplete = True
def testAuth(self):
self.authComplete = False
outlist = []
ca = component.ConnectComponentAuthenticator("cjid", "secret")
xs = xmlstream.XmlStream(ca)
xs.transport = DummyTransport(outlist)
xs.addObserver(xmlstream.STREAM_AUTHD_EVENT,
self.authPassed)
# Go...
xs.connectionMade()
xs.dataReceived("<stream:stream xmlns='jabber:component:accept' xmlns:stream='http://etherx.jabber.org/streams' from='cjid' id='12345'>")
# Calculate what we expect the handshake value to be
hv = sha1("%s%s" % ("12345", "secret")).hexdigest()
self.assertEqual(outlist[1], "<handshake>%s</handshake>" % (hv))
xs.dataReceived("<handshake/>")
self.assertEqual(self.authComplete, True)
class JabberServiceHarness(component.Service):
def __init__(self):
self.componentConnectedFlag = False
self.componentDisconnectedFlag = False
self.transportConnectedFlag = False
def componentConnected(self, xmlstream):
self.componentConnectedFlag = True
def componentDisconnected(self):
self.componentDisconnectedFlag = True
def transportConnected(self, xmlstream):
self.transportConnectedFlag = True
class JabberServiceManagerTests(unittest.TestCase):
def testSM(self):
# Setup service manager and test harnes
sm = component.ServiceManager("foo", "password")
svc = JabberServiceHarness()
svc.setServiceParent(sm)
# Create a write list
wlist = []
# Setup a XmlStream
xs = sm.getFactory().buildProtocol(None)
xs.transport = self
xs.transport.write = wlist.append
# Indicate that it's connected
xs.connectionMade()
# Ensure the test service harness got notified
self.assertEqual(True, svc.transportConnectedFlag)
# Jump ahead and pretend like the stream got auth'd
xs.dispatch(xs, xmlstream.STREAM_AUTHD_EVENT)
# Ensure the test service harness got notified
self.assertEqual(True, svc.componentConnectedFlag)
# Pretend to drop the connection
xs.connectionLost(None)
# Ensure the test service harness got notified
self.assertEqual(True, svc.componentDisconnectedFlag)
class RouterTests(unittest.TestCase):
"""
Tests for L{component.Router}.
"""
def test_addRoute(self):
"""
Test route registration and routing on incoming stanzas.
"""
router = component.Router()
routed = []
router.route = lambda element: routed.append(element)
pipe = XmlPipe()
router.addRoute('example.org', pipe.sink)
self.assertEqual(1, len(router.routes))
self.assertEqual(pipe.sink, router.routes['example.org'])
element = domish.Element(('testns', 'test'))
pipe.source.send(element)
self.assertEqual([element], routed)
def test_route(self):
"""
Test routing of a message.
"""
component1 = XmlPipe()
component2 = XmlPipe()
router = component.Router()
router.addRoute('component1.example.org', component1.sink)
router.addRoute('component2.example.org', component2.sink)
outgoing = []
component2.source.addObserver('/*',
lambda element: outgoing.append(element))
stanza = domish.Element((None, 'presence'))
stanza['from'] = 'component1.example.org'
stanza['to'] = 'component2.example.org'
component1.source.send(stanza)
self.assertEqual([stanza], outgoing)
def test_routeDefault(self):
"""
Test routing of a message using the default route.
The default route is the one with C{None} as its key in the
routing table. It is taken when there is no more specific route
in the routing table that matches the stanza's destination.
"""
component1 = XmlPipe()
s2s = XmlPipe()
router = component.Router()
router.addRoute('component1.example.org', component1.sink)
router.addRoute(None, s2s.sink)
outgoing = []
s2s.source.addObserver('/*', lambda element: outgoing.append(element))
stanza = domish.Element((None, 'presence'))
stanza['from'] = 'component1.example.org'
stanza['to'] = 'example.com'
component1.source.send(stanza)
self.assertEqual([stanza], outgoing)
class ListenComponentAuthenticatorTests(unittest.TestCase):
"""
Tests for L{component.ListenComponentAuthenticator}.
"""
def setUp(self):
self.output = []
authenticator = component.ListenComponentAuthenticator('secret')
self.xmlstream = xmlstream.XmlStream(authenticator)
self.xmlstream.send = self.output.append
def loseConnection(self):
"""
Stub loseConnection because we are a transport.
"""
self.xmlstream.connectionLost("no reason")
def test_streamStarted(self):
"""
The received stream header should set several attributes.
"""
observers = []
def addOnetimeObserver(event, observerfn):
observers.append((event, observerfn))
xs = self.xmlstream
xs.addOnetimeObserver = addOnetimeObserver
xs.makeConnection(self)
self.assertIdentical(None, xs.sid)
self.assertFalse(xs._headerSent)
xs.dataReceived("<stream:stream xmlns='jabber:component:accept' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"to='component.example.org'>")
self.assertEqual((0, 0), xs.version)
self.assertNotIdentical(None, xs.sid)
self.assertTrue(xs._headerSent)
self.assertEqual(('/*', xs.authenticator.onElement), observers[-1])
def test_streamStartedWrongNamespace(self):
"""
The received stream header should have a correct namespace.
"""
streamErrors = []
xs = self.xmlstream
xs.sendStreamError = streamErrors.append
xs.makeConnection(self)
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"to='component.example.org'>")
self.assertEqual(1, len(streamErrors))
self.assertEqual('invalid-namespace', streamErrors[-1].condition)
def test_streamStartedNoTo(self):
"""
The received stream header should have a 'to' attribute.
"""
streamErrors = []
xs = self.xmlstream
xs.sendStreamError = streamErrors.append
xs.makeConnection(self)
xs.dataReceived("<stream:stream xmlns='jabber:component:accept' "
"xmlns:stream='http://etherx.jabber.org/streams'>")
self.assertEqual(1, len(streamErrors))
self.assertEqual('improper-addressing', streamErrors[-1].condition)
def test_onElement(self):
"""
We expect a handshake element with a hash.
"""
handshakes = []
xs = self.xmlstream
xs.authenticator.onHandshake = handshakes.append
handshake = domish.Element(('jabber:component:accept', 'handshake'))
handshake.addContent('1234')
xs.authenticator.onElement(handshake)
self.assertEqual('1234', handshakes[-1])
def test_onElementNotHandshake(self):
"""
Reject elements that are not handshakes
"""
handshakes = []
streamErrors = []
xs = self.xmlstream
xs.authenticator.onHandshake = handshakes.append
xs.sendStreamError = streamErrors.append
element = domish.Element(('jabber:component:accept', 'message'))
xs.authenticator.onElement(element)
self.assertFalse(handshakes)
self.assertEqual('not-authorized', streamErrors[-1].condition)
def test_onHandshake(self):
"""
Receiving a handshake matching the secret authenticates the stream.
"""
authd = []
def authenticated(xs):
authd.append(xs)
xs = self.xmlstream
xs.addOnetimeObserver(xmlstream.STREAM_AUTHD_EVENT, authenticated)
xs.sid = u'1234'
theHash = '32532c0f7dbf1253c095b18b18e36d38d94c1256'
xs.authenticator.onHandshake(theHash)
self.assertEqual('<handshake/>', self.output[-1])
self.assertEqual(1, len(authd))
def test_onHandshakeWrongHash(self):
"""
Receiving a bad handshake should yield a stream error.
"""
streamErrors = []
authd = []
def authenticated(xs):
authd.append(xs)
xs = self.xmlstream
xs.addOnetimeObserver(xmlstream.STREAM_AUTHD_EVENT, authenticated)
xs.sendStreamError = streamErrors.append
xs.sid = u'1234'
theHash = '1234'
xs.authenticator.onHandshake(theHash)
self.assertEqual('not-authorized', streamErrors[-1].condition)
self.assertEqual(0, len(authd))
class XMPPComponentServerFactoryTests(unittest.TestCase):
"""
Tests for L{component.XMPPComponentServerFactory}.
"""
def setUp(self):
self.router = component.Router()
self.factory = component.XMPPComponentServerFactory(self.router,
'secret')
self.xmlstream = self.factory.buildProtocol(None)
self.xmlstream.thisEntity = JID('component.example.org')
def test_makeConnection(self):
"""
A new connection increases the stream serial count. No logs by default.
"""
self.xmlstream.dispatch(self.xmlstream,
xmlstream.STREAM_CONNECTED_EVENT)
self.assertEqual(0, self.xmlstream.serial)
self.assertEqual(1, self.factory.serial)
self.assertIdentical(None, self.xmlstream.rawDataInFn)
self.assertIdentical(None, self.xmlstream.rawDataOutFn)
def test_makeConnectionLogTraffic(self):
"""
Setting logTraffic should set up raw data loggers.
"""
self.factory.logTraffic = True
self.xmlstream.dispatch(self.xmlstream,
xmlstream.STREAM_CONNECTED_EVENT)
self.assertNotIdentical(None, self.xmlstream.rawDataInFn)
self.assertNotIdentical(None, self.xmlstream.rawDataOutFn)
def test_onError(self):
"""
An observer for stream errors should trigger onError to log it.
"""
self.xmlstream.dispatch(self.xmlstream,
xmlstream.STREAM_CONNECTED_EVENT)
class TestError(Exception):
pass
reason = failure.Failure(TestError())
self.xmlstream.dispatch(reason, xmlstream.STREAM_ERROR_EVENT)
self.assertEqual(1, len(self.flushLoggedErrors(TestError)))
def test_connectionInitialized(self):
"""
Make sure a new stream is added to the routing table.
"""
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
self.assertIn('component.example.org', self.router.routes)
self.assertIdentical(self.xmlstream,
self.router.routes['component.example.org'])
def test_connectionLost(self):
"""
Make sure a stream is removed from the routing table on disconnect.
"""
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
self.xmlstream.dispatch(None, xmlstream.STREAM_END_EVENT)
self.assertNotIn('component.example.org', self.router.routes)
|
Architektor/PySnip
|
venv/lib/python2.7/site-packages/twisted/words/test/test_jabbercomponent.py
|
Python
|
gpl-3.0
| 13,801
|
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""usage: makecab.py [options] source [destination]
Makes cab archives of single files, using zip compression.
Acts like Microsoft makecab.exe would act if passed `/D CompressionType=MSZIP`.
If [destination] is omitted, uses source with last character replaced with _.
options:
-h, --help: print this message
/D arg: silently ignored (for compat with makecab.exe)
/L outdir: put output file in outdir
/Vn: silently ignored (for compat with makecab.exe)
"""
# A cross-platform reimplementation of the bits of makecab.exe that we use.
# cab also supports LZX compression, which has a bitstream that allows for
# a higher compression rate than zip compression (aka deflate). But the cab
# shipped to users is built on the signing server using regular Microsoft
# makecab.exe, so having something in-tree that is good enough is good enough.
from __future__ import print_function
from collections import namedtuple
import datetime
import os
import struct
import sys
import zlib
class FlagParseError(Exception): pass
def ParseFlags(flags):
"""Parses |flags| and returns the parsed flags; returns None for --help."""
# Can't use optparse / argparse because of /-style flags :-/
input = None
output = None
output_dir = '.'
# Parse.
i = 0
while i < len(flags):
flag = flags[i]
if flag == '-h' or flag == '--help':
return None
if flag.startswith('/V'):
i += 1 # Ignore /V1 and friends.
elif flag in ['/D', '/L']:
if i == len(flags) - 1:
raise FlagParseError('argument needed after ' + flag)
if flag == '/L':
output_dir = flags[i + 1]
# Ignore all /D flags silently.
i += 2
elif (flag.startswith('-') or
(flag.startswith('/') and not os.path.exists(flag))):
raise FlagParseError('unknown flag ' + flag)
else:
if not input:
input = flag
elif not output:
output = flag
else:
raise FlagParseError('too many paths: %s %s %s' % (input, output, flag))
i += 1
# Validate and set default values.
if not input:
raise FlagParseError('no input file')
if not output:
output = os.path.basename(input)[:-1] + '_'
Flags = namedtuple('Flags', ['input', 'output', 'output_dir'])
return Flags(input=input, output=output, output_dir=output_dir)
def WriteCab(output_file, input_file, cab_stored_filename, input_size,
input_mtimestamp):
"""Reads data from input_file and stores its MSZIP-compressed data
in output_file. cab_stored_filename is the filename stored in the
cab file, input_size is the size of the input file, and input_mtimestamp
the mtime timestamp of the input file (must be at least midnight 1980-1-1)."""
# Need to write (all in little-endian)::
# 36 bytes CFHEADER cab header
# 8 bytes CFFOLDER (a set of files compressed with the same parameters)
# 16 bytes + filename (+ 1 byte trailing \0 for filename) CFFFILE
# Many 8 bytes CFDATA blocks, representing 32kB chunks of uncompressed data,
# each followed by the compressed data.
cffile_offset = 36 + 8
cfdata_offset = cffile_offset + 16 + len(cab_stored_filename) + 1
chunk_size = 1 << 15
num_chunks = (input_size + chunk_size - 1) / chunk_size
# https://msdn.microsoft.com/en-us/library/bb417343.aspx#cabinet_format
# Write CFHEADER.
CFHEADER = ('<'
'4s' # signature, 'MSCF'
'I' # reserved1, set to 0
'I' # cbCabinet, size of file in bytes. Not yet known, filled in later.
'I' # reserved2, set to 0
'I' # coffFiles, offset of first (and here, only) CFFILE.
'I' # reserved3, set to 0
'B' # versionMinor, currently 3. Yes, minor version is first.
'B' # versionMajor, currently 1.
'H' # cFolders, number of CFFOLDER entries.
'H' # cFiles, number of CFFILE entries.
'H' # flags, for multi-file cabinets. 0 here.
'H' # setID, for multi-file cabinets. 0 here.
'H' # iCabinet, index in multi-file cabinets. 0 here.
)
output_file.write(struct.pack(CFHEADER,
'MSCF', 0, 0, 0,
cffile_offset, 0, 3, 1, 1, 1, 0,
0, 0))
# Write single CFFOLDER.
CFFOLDER = ('<'
'I' # coffCabStart, offset of first CFDATA block in this folder.
'H' # cCFData, number of CFDATA blocks in this folder.
'H' # typeCompress, compression type. 1 means MS-ZIP.
)
output_file.write(struct.pack(CFFOLDER, cfdata_offset, num_chunks, 1))
# Write single CFFILE.
CFFILE = ('<'
'I' # cbFile, uncompressed size of this file in bytes.
'I' # uoffFolderStart, uncompressed offset of this file in folder.
'H' # iFolder, index into CFFOLDER area.
'H' # date, in the format ((year-1980) << 9) + (month << 5) + (day),
# where month={1..12} and day={1..31}.
'H' # time, in the format (hour << 11)+(minute << 5)+(seconds/2),
# where hour={0..23}.
'H' # attribs, 1: read-only
# 2: hidden
# 4: system file
# 0x20: file modified since last backup
# 0x40: run after extraction
# 0x80: name contains UTF
) # Followed by szFile, the file's name.
assert output_file.tell() == cffile_offset
mtime = datetime.datetime.fromtimestamp(input_mtimestamp)
date = (mtime.year - 1980) << 9 | mtime.month << 5 | mtime.day
# TODO(thakis): hour seems to be off by 1 from makecab.exe (DST?)
time = mtime.hour << 11 | mtime.minute << 5 | mtime.second / 2
output_file.write(struct.pack(CFFILE, input_size, 0, 0, date, time, 0))
output_file.write(cab_stored_filename + '\0')
# Write num_chunks many CFDATA headers, followed by the compressed data.
assert output_file.tell() == cfdata_offset
CFDATA = ('<'
'I' # checksum. Optional and expensive to compute in Python, so write 0.
'H' # cbData, number of compressed bytes in this block.
'H' # cbUncomp, size after decompressing. 1 << 15 for all but the last.
)
# Read input data in chunks of 32kB, compress and write out compressed data.
for _ in xrange(num_chunks):
chunk = input_file.read(chunk_size)
# Have to use compressobj instead of compress() so we can pass a negative
# window size to remove header and trailing checksum.
# Compression level 6 runs about 8x as fast as makecab.exe's LZX compression
# while producing a 45% larger file. (Interestingly, it also runs
# about 5x as fast as makecab.exe's MSZIP compression while being about
# 4.8% larger -- so it might be possible to write an LZX compressor that's
# much faster without being so much larger.) Compression level 9 isn't
# very different. Level 1 is another ~30% faster and 10% larger.
# Since 6 is ok and the default, let's go with that.
# Remember: User-shipped bits get recompressed on the signing server.
zlib_obj = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed = zlib_obj.compress(chunk) + zlib_obj.flush()
compressed_size = 2 + len(compressed) # Also count 0x43 0x4b magic header.
# cab spec: "Each data block represents 32k uncompressed, except that the
# last block in a folder may be smaller. A two-byte MSZIP signature precedes
# the compressed encoding in each block, consisting of the bytes 0x43, 0x4B.
# The maximum compressed size of each MSZIP block is 32k + 12 bytes."
assert compressed_size <= chunk_size + 12
output_file.write(struct.pack(CFDATA, 0, compressed_size, len(chunk)))
output_file.write('\x43\x4b') # MSZIP magic block header.
output_file.write(compressed)
outfile_size = output_file.tell()
# Now go back and fill in missing size in CFHEADER.
output_file.seek(8) # cbCabinet, size of file in bytes.
output_file.write(struct.pack('<I', outfile_size))
def main():
try:
flags = ParseFlags(sys.argv[1:])
except FlagParseError as arg_error:
print('makecab.py: error:', arg_error.message, file=sys.stderr)
print('pass --help for usage', file=sys.stderr)
sys.exit(1)
if not flags: # --help got passed
print(__doc__)
sys.exit(0)
if not os.path.exists(flags.input):
print('makecab.py: error: input file %s does not exist' % flags.input,
file=sys.stderr)
sys.exit(1)
with open(os.path.join(flags.output_dir, flags.output), 'wb') as output_file:
cab_stored_filename = os.path.basename(flags.input)
input_mtimestamp = os.path.getmtime(flags.input)
input_size = os.path.getsize(flags.input)
with open(flags.input, 'rb') as input_file:
WriteCab(output_file, input_file, cab_stored_filename, input_size,
input_mtimestamp)
if __name__ == '__main__':
main()
|
endlessm/chromium-browser
|
chrome/tools/build/win/makecab.py
|
Python
|
bsd-3-clause
| 8,817
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('autodeploy', '0006_auto_20150510_1427'),
]
operations = [
migrations.AlterField(
model_name='project',
name='lastUpdate',
field=models.DateTimeField(default=datetime.datetime(2015, 5, 10, 14, 27, 14, 363617), blank=True),
),
]
|
mkalioby/AutoDeploy
|
webapp/autoDeploy/autodeploy/migrations/0007_auto_20150510_1427.py
|
Python
|
gpl-2.0
| 487
|
import yaku.utils
def setup(ctx):
env = ctx.env
ctx.env["CC"] = ["clang"]
ctx.env["CC_TGT_F"] = ["-c", "-o"]
ctx.env["CC_SRC_F"] = []
ctx.env["CFLAGS"] = []
ctx.env["DEFINES"] = []
ctx.env["LINK"] = ["clang"]
ctx.env["LINKFLAGS"] = []
ctx.env["LINK_TGT_F"] = ["-o"]
ctx.env["LINK_SRC_F"] = []
ctx.env["SHAREDLIB_FMT"] = "lib%s.so"
ctx.env["SHLINK"] = ["clang"]
ctx.env["SHLINKFLAGS"] = []
ctx.env["SHLINK_TGT_F"] = ["-o"]
ctx.env["SHLINK_SRC_F"] = []
ctx.env["MODLINK"] = ["clang", "-bundle", "-undefined", "dynamic_lookup"]
ctx.env["MODLINKFLAGS"] = []
ctx.env["MODLINK_TGT_F"] = ["-o"]
ctx.env["MODLINK_SRC_F"] = []
ctx.env["CPPPATH"] = []
ctx.env["CPPPATH_FMT"] = "-I%s"
ctx.env["LIBDIR"] = []
ctx.env["LIBS"] = []
ctx.env["LIB_FMT"] = "-l%s"
ctx.env["LIBDIR_FMT"] = "-L%s"
ctx.env["CC_OBJECT_FMT"] = "%s.o"
ctx.env["PROGRAM_FMT"] = "%s"
ctx.env["CXX"] = ["clang"]
ctx.env["CXX_TGT_F"] = ["-c", "-o"]
ctx.env["CXX_SRC_F"] = []
ctx.env["CXXFLAGS"] = []
ctx.env["CXXLINK"] = ["clang"]
ctx.env["CXXLINKFLAGS"] = []
ctx.env["CXXLINK_TGT_F"] = ["-o"]
ctx.env["CXXLINK_SRC_F"] = []
ctx.env["CXXSHLINK"] = ["clang"]
ctx.env["CXXSHLINKFLAGS"] = []
ctx.env["CXXSHLINK_TGT_F"] = ["-o"]
ctx.env["CXXSHLINK_SRC_F"] = []
ctx.env["CPPPATH"] = []
ctx.env["CPPPATH_FMT"] = "-I%s"
ctx.env["LIBDIR"] = []
ctx.env["LIBS"] = []
ctx.env["FRAMEWORKS"] = []
ctx.env["LIB_FMT"] = "-l%s"
ctx.env["LIBDIR_FMT"] = "-L%s"
ctx.env["CXX_OBJECT_FMT"] = "%s.o"
ctx.env["PROGRAM_FMT"] = "%s"
def detect(ctx):
if yaku.utils.find_program("clang") is None:
return False
else:
return True
|
esc/Bento
|
bento/private/_yaku/yaku/tools/clang.py
|
Python
|
bsd-3-clause
| 1,777
|
# =============================================================================
# COPYRIGHT 2013 Brain Corporation.
# License under MIT license (see LICENSE file)
# =============================================================================
import os
from requirement import RequirementException
import logging
from utility import ln, write_file
def install(robustus, requirement_specifier, rob_file, ignore_index):
# Softlinking to existing PyGtk. TODO: install via configure
if os.path.isfile('/usr/lib/python2.7/dist-packages/pygtk.py'):
logging.info('Linking pygtk')
site_packages_dir = os.path.join(robustus.env, 'lib/python2.7/site-packages')
files = ['pygtk.py', 'pygtk.pyc', 'gtk-2.0', 'glib', 'gobject', 'cairo']
for f in files:
src = os.path.join('/usr/lib/python2.7/dist-packages', f)
if not os.path.exists(src):
raise RequirementException('Required packages for system-wide PyGtk missing, %s not found' % f)
ln(src, os.path.join(site_packages_dir, f), force=True)
pygtk_pth = os.path.join(site_packages_dir, 'pygtk.pth')
if not os.path.exists(pygtk_pth):
write_file(os.path.join(pygtk_pth),
'w',
os.path.join(site_packages_dir, 'gtk-2.0'))
else:
raise RequirementException('System-wide PyGtk is missing')
|
braincorp/robustus
|
robustus/detail/install_pygtk.py
|
Python
|
mit
| 1,403
|
from AppKit import *
import os
import vanilla.dialogs as dialogs
import vanilla
from mojo.events import addObserver
from mojo.UI import OpenGlyphWindow, OpenSpaceCenter, CurrentSpaceCenterWindow, OutputWindow
from lib.scripting.scriptTools import ScriptRunner
from lib.scripting.codeEditor import CodeEditor
from mojo.tools import CallbackWrapper
from plistlib import readPlist, writePlist
def OpenRoboFontProject(path):
root = os.path.dirname(path)
project = readPlist(path)
documentController = NSDocumentController.sharedDocumentController()
delegate = NSApp().delegate()
openFileNames = [window.representedFilename() for window in NSApp().windows()]
for fileName, data in project["documents"].items():
isUntitled = fileName == "untitled"
if not isUntitled:
if not os.path.exists(fileName):
fileName = os.path.abspath(os.path.join(root, fileName))
if not os.path.exists(fileName):
continue
if fileName in openFileNames:
continue
data.sort(key=lambda item: item.get("name") != "FontWindow")
for windowData in data:
name = windowData["windowName"]
x, y, w, h = windowData["frame"]
if isUntitled:
if name == "FontWindow":
RFont()
elif name == "ScriptingWindow":
delegate.scriptingWindow_(None)
elif name == "FeatureWindow":
delegate.newFeature_(None)
else:
url = NSURL.fileURLWithPath_(fileName)
doc, error = documentController.openDocumentWithContentsOfURL_display_error_(url, True, None)
if error:
delegate.application_openFile_(NSApp(), fileName)
window = NSApp().mainWindow()
vanillaWrapper = None
if hasattr(window.delegate(), "vanillaWrapper"):
vanillaWrapper = window.delegate().vanillaWrapper()
if vanillaWrapper:
font = CurrentFont()
if name == "GlyphWindow":
window = OpenGlyphWindow(font[windowData["glyphName"]], newWindow=True)
window.w.getNSWindow().setFrame_display_animate_(((x, y), (w, h)), True, False)
continue
elif name == "SpaceCenter":
spaceCenter = OpenSpaceCenter(font)
spaceCenter.setPointSize(windowData["pointSize"])
spaceCenter.setPre(windowData["pre"])
spaceCenter.setAfter(windowData["after"])
spaceCenter.set(windowData["input"])
window = CurrentSpaceCenterWindow()
window.w.getNSWindow().setFrame_display_animate_(((x, y), (w, h)), True, False)
continue
window.setFrame_display_animate_(((x, y), (w, h)), True, False)
for windowData in project["toolWindows"]:
name = windowData["windowName"]
x, y, w, h = windowData["frame"]
if name == "DebugWindow":
window = OutputWindow()
window.show()
window.w.getNSWindow().setFrame_display_animate_(((x, y), (w, h)), True, False)
elif name == "InspectorWindow":
try:
# a little bit hacky
# will move to mojo.UI in the upcoming releases
window = delegate._inspectorWindow.w.getNSWindow()
except:
window = None
if window is None:
delegate.openInspector_(None)
window = delegate._inspectorWindow.w.getNSWindow()
window.setFrame_display_animate_(((x, y), (w, h)), True, False)
if "execute" in project:
try:
ScriptRunner(text=project["execute"])
except:
import traceback
print(traceback.format_exc(5))
class SaveRoboFontProject(object):
def __init__(self):
w, h = 550, 250
self.view = vanilla.Group((0, 0, w, h))
self.view.relative = vanilla.CheckBox((0, 3, 300, 22), "Use Relative Paths")
self.view.info = vanilla.TextBox((0, 33, 300, 22), "Execute on load:")
self.view.editor = CodeEditor((0, 60, w, h-70))
view = self.view.getNSView()
view.setFrame_(((0, 0), (w, h)))
path = dialogs.putFile("Save RoboFont Project..", fileTypes=["roboFontProject"], accessoryView=view)
if path:
data = self.getData(path)
writePlist(data, path)
icon = NSImage.alloc().initByReferencingFile_(os.path.join(os.path.dirname(__file__), "roboFontProjectIcon.png"))
ws = NSWorkspace.sharedWorkspace()
ws.setIcon_forFile_options_(icon, path, 0)
def getData(self, path):
toolWindows = list()
documents = dict()
untitled = list()
relativePaths = self.view.relative.get()
for document in NSApp().orderedDocuments():
url = document.fileURL()
fileName = None
if url:
fileName = url.path()
if relativePaths and path:
fileName = os.path.relpath(fileName, os.path.dirname(path))
if fileName not in documents:
documents[fileName] = []
for windowController in document.windowControllers():
window = windowController.window()
(x, y), (w, h) = window.frame()
data = dict()
data["frame"] = x, y, w, h
data["windowName"] = window.windowName()
vanillaWrapper = None
if hasattr(window.delegate(), "vanillaWrapper"):
vanillaWrapper = window.delegate().vanillaWrapper()
if vanillaWrapper:
if data["windowName"] == "GlyphWindow":
data["glyphName"] = vanillaWrapper.getGlyph().name
elif data["windowName"] == "SpaceCenter":
spaceCenter = vanillaWrapper.getSpaceCenter()
data["input"] = spaceCenter.get()
data["pre"] = spaceCenter.getPre()
data["after"] = spaceCenter.getAfter()
data["pointSize"] = spaceCenter.getPointSize()
if fileName:
documents[fileName].append(data)
else:
untitled.append(data)
for window in NSApp().windows():
if hasattr(window, "windowName"):
if window.windowName() in ["DebugWindow", "InspectorWindow"]:
(x, y), (w, h) = window.frame()
data = dict()
data["frame"] = x, y, w, h
data["windowName"] = window.windowName()
toolWindows.append(data)
documents["untitled"] = untitled
info = dict(toolWindows=toolWindows, documents=documents)
code = self.view.editor.get()
if code:
info["execute"] = code
return info
# file handler
class ReadRoboFontProjectFile(object):
def __init__(self):
addObserver(self, "applicationOpenFile", "applicationOpenFile")
def applicationOpenFile(self, notification):
path = notification["path"]
ext = notification["ext"]
fileHandler = notification["fileHandler"]
if ext.lower() == ".robofontproject":
try:
OpenRoboFontProject(path)
except:
import traceback
print(traceback.format_exc(5))
fileHandler["opened"] = True
ReadRoboFontProjectFile()
# add to menu
class RoboFontProjectMenu(object):
def __init__(self):
title = "Save Project..."
mainMenu = NSApp().mainMenu()
fileMenu = mainMenu.itemWithTitle_("File")
if not fileMenu:
return
fileMenu = fileMenu.submenu()
if fileMenu.itemWithTitle_(title):
return
index = fileMenu.indexOfItemWithTitle_("Revert to Saved")
self.target = CallbackWrapper(self.callback)
newItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(title, "action:", "")
newItem.setTarget_(self.target)
fileMenu.insertItem_atIndex_(newItem, index+1)
def callback(self, sender):
SaveRoboFontProject()
RoboFontProjectMenu()
|
typemytype/projectRoboFontExtension
|
RoboFontProject.roboFontExt/lib/main.py
|
Python
|
mit
| 8,553
|
#!/usr/bin/python
# file: flask_server.py
# Eilidh Southren - 1513195
#------------------------------------------
#
# This script creates a flask server
# that allows the user to view the pictures
# taken by the security system online.
#
from flask import Flask, render_template
app = Flask(__name__)
import os
import time
@app.route('/')
def index():
# return array of files in static folder
fileList = os.listdir("static")
todayDate = time.strftime("%d_%m_%Y")
newDate = '00_00_00'
myData = {
'list' : fileList,
'date' : todayDate,
'newDate' : newDate
}
return render_template('index.html', **myData)
if __name__ == "__main__":
try:
app.run(host='0.0.0.0', port=8080, debug=True)
except KeyboardInterrupt:
pass
print('Shutting down web app:', __name__)
|
esouthren/Rasprotect-System
|
flask_server.py
|
Python
|
gpl-3.0
| 900
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import re
from twisted.internet import defer
from buildslave.commands.base import SourceBaseCommand
from buildslave import runprocess
from buildslave.commands.base import AbandonChain
class Repo(SourceBaseCommand):
"""Repo specific VC operation. In addition to the arguments
handled by SourceBaseCommand, this command reads the following keys:
['manifest_url'] (required): The manifests repo repository.
['manifest_branch'] (optional): Which manifest repo version (i.e. branch or tag)
to retrieve. Default: "master".
['manifest_file'] (optional): Which manifest file to use. Default: "default.xml".
['tarball'] (optional): The tarball base to accelerate the fetch.
['repo_downloads'] (optional): Repo downloads to do. Computer from GerritChangeSource
and forced build properties.
"""
header = "repo operation"
def setup(self, args):
SourceBaseCommand.setup(self, args)
self.manifest_url = args.get('manifest_url')
self.manifest_branch = args.get('manifest_branch')
self.manifest_file = args.get('manifest_file')
self.tarball = args.get('tarball')
self.repo_downloads = args.get('repo_downloads')
# we're using string instead of an array here, because it will be transferred back
# to the master as string anyway and using eval() could have security implications.
self.repo_downloaded = ""
self.sourcedata = "%s %s %s" % (self.manifest_url, self.manifest_branch, self.manifest_file)
self.re_change = re.compile(".* refs/changes/\d\d/(\d+)/(\d+) -> FETCH_HEAD$")
self.re_head = re.compile("^HEAD is now at ([0-9a-f]+)...")
def _fullSrcdir(self):
return os.path.join(self.builder.basedir, self.srcdir)
def sourcedirIsUpdateable(self):
print os.path.join(self._fullSrcdir(), ".repo")
print os.path.isdir(os.path.join(self._fullSrcdir(), ".repo"))
return os.path.isdir(os.path.join(self._fullSrcdir(), ".repo"))
def _repoCmd(self, command, cb=None, abandonOnFailure=True, **kwargs):
repo = self.getCommand("repo")
c = runprocess.RunProcess(self.builder, [repo] + command, self._fullSrcdir(),
sendRC=False, timeout=self.timeout,
maxTime=self.maxTime, usePTY=False, **kwargs)
self.command = c
d = c.start()
if cb:
if abandonOnFailure:
d.addCallback(self._abandonOnFailure)
d.addCallback(cb)
return d
def _tarCmd(self, cmds, callback):
cmd = ["tar"] + cmds
c = runprocess.RunProcess(self.builder, cmd, self._fullSrcdir(),
sendRC=False, timeout=self.timeout,
maxTime=self.maxTime, usePTY=False)
self.command = c
cmdexec = c.start()
cmdexec.addCallback(callback)
return cmdexec
def _gitCmd(self, subdir, cmds, callback):
cmd = ["git"] + cmds
c = runprocess.RunProcess(self.builder, cmd, os.path.join(self._fullSrcdir(), subdir),
sendRC=False, timeout=self.timeout,
maxTime=self.maxTime, usePTY=False)
self.command = c
cmdexec = c.start()
cmdexec.addCallback(callback)
return cmdexec
def sourcedataMatches(self):
try:
olddata = self.readSourcedata()
return olddata == self.sourcedata
except IOError:
return False
def doVCFull(self):
os.makedirs(self._fullSrcdir())
if self.tarball and os.path.exists(self.tarball):
return self._tarCmd(['-xvzf', self.tarball], self._doInit)
else:
return self._doInit(None)
def _doInit(self,res):
# on fresh init, this file may confuse repo.
if os.path.exists(os.path.join(self._fullSrcdir(), ".repo/project.list")):
os.unlink(os.path.join(self._fullSrcdir(), ".repo/project.list"))
return self._repoCmd(['init', '-u', self.manifest_url, '-b', self.manifest_branch, '-m', self.manifest_file], self._didInit)
def _didInit(self, res):
return self.doVCUpdate()
def doVCUpdate(self):
command = ['forall', '-c', 'git', 'clean', '-f', '-d', '-x']
return self._repoCmd(command, self._doClean2, abandonOnFailure=False)
def _doClean2(self,dummy):
command = ['clean', '-f', '-d', '-x']
return self._gitCmd(".repo/manifests",command, self._doSync)
def _doSync(self, dummy):
command = ['sync']
self.sendStatus({"header": "synching manifest %s from branch %s from %s\n"
% (self.manifest_file, self.manifest_branch, self.manifest_url)})
return self._repoCmd(command, self._didSync)
def _didSync(self, dummy):
if self.tarball and not os.path.exists(self.tarball):
return self._tarCmd(['-cvzf', self.tarball, ".repo"], self._doDownload)
else:
return self._doDownload(None)
def _doDownload(self, dummy):
if hasattr(self.command, 'stderr') and self.command.stderr:
lines = self.command.stderr.split('\n')
if len(lines) > 2:
match1 = self.re_change.match(lines[1])
match2 = self.re_head.match(lines[-2])
if match1 and match2:
self.repo_downloaded += "%s/%s %s " % (match1.group(1), match1.group(2), match2.group(1))
if self.repo_downloads:
# download each changeset while the self.download variable is not empty
download = self.repo_downloads.pop(0)
command = ['download'] + download.split(' ')
self.sendStatus({"header": "downloading changeset %s\n"
% (download)})
return self._repoCmd(command, self._doDownload, keepStderr=True) # call again
if self.repo_downloaded:
self.sendStatus({"repo_downloaded": self.repo_downloaded[:-1]})
return defer.succeed(0)
def maybeNotDoVCFallback(self, res):
# If we were unable to find the branch/SHA on the remote,
# clobbering the repo won't help any, so just abort the chain
if hasattr(self.command, 'stderr'):
if "Couldn't find remote ref" in self.command.stderr:
raise AbandonChain(-1)
|
eunchong/build
|
third_party/buildbot_slave_8_4/buildslave/commands/repo.py
|
Python
|
bsd-3-clause
| 7,218
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ElasticPoolPerDatabaseMinDtuCapability(Model):
"""The minimum per-database DTU capability.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar limit: The maximum DTUs per database.
:vartype limit: long
:ivar status: The status of the capability. Possible values include:
'Visible', 'Available', 'Default', 'Disabled'
:vartype status: str or :class:`CapabilityStatus
<azure.mgmt.sql.models.CapabilityStatus>`
"""
_validation = {
'limit': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'limit': {'key': 'limit', 'type': 'long'},
'status': {'key': 'status', 'type': 'CapabilityStatus'},
}
def __init__(self):
self.limit = None
self.status = None
|
SUSE/azure-sdk-for-python
|
azure-mgmt-sql/azure/mgmt/sql/models/elastic_pool_per_database_min_dtu_capability.py
|
Python
|
mit
| 1,345
|
import random
import pygame
from pygame.locals import *
import viewport
import tiles
import characters
import group
import tree
import mapgen
MIN_CHARACTERS = 150
class WorldView(viewport.Viewport):
def __init__(self, parent, viewport_rect, canvas_w, canvas_h):
super(WorldView, self).__init__(parent, viewport_rect, canvas_w, canvas_h)
self.tile_w = 50
self.tile_h = 50
self.map = mapgen.Map.from_random(canvas_w//self.tile_w, canvas_h//self.tile_h)
self.alltiles = group.Group(self)
self.alltiles_coords = {}
self.alltrees = group.Group(self)
self.allfood = group.Group(self)
# Generate tiles and trees
for i, row in enumerate(self.map):
for j, tile in enumerate(row):
block = tiles.TileView(
self, i, j, self.tile_w, self.tile_h, tile)
self.alltiles.add(block)
self.alltiles_coords[i, j] = block
self.allcharacters = group.Group(self)
self.active_item = None
self.age = 0.0
def _create_character(self):
character = characters.Character.from_random(self)
while character.midx == -1 \
or pygame.sprite.spritecollideany(character, self.allcharacters) \
or pygame.sprite.spritecollideany(character, self.alltrees):
x = random.randint(character.r, self.canvas_w - character.r)
y = random.randint(character.r, self.canvas_h - character.r)
character.set_midpoint_x(x)
character.set_midpoint_y(y)
self.allcharacters.add(character)
# debugging:
if self.active_item is None:
self.active_item = character
self.parent.brainview.brain = character.brain
def update(self):
self.age += 1
while len(self.allcharacters) < MIN_CHARACTERS:
self._create_character()
self.alltiles.update()
self.allcharacters.update()
self.allcharacters.collisions()
def jump_to(self, item):
x = item.rect.x - self.rect.w // 2
y = item.rect.y - self.rect.h // 2
if x < 0:
x = 0
if y < 0:
y = 0
if x > self.canvas_w - self.rect.w:
x = self.canvas_w - self.rect.w
if y > self.canvas_h - self.rect.h:
y = self.canvas_h - self.rect.h
self.drag_offset[0] = -x
self.drag_offset[1] = -y
def draw(self):
for group in (self.alltiles, self.allfood, self.allcharacters, self.alltrees):
group.draw(self.canvas)
if self.active_item:
self.parent.infopane.text = str(self.active_item)
else:
self.parent.infopane.text = ''
self.image.blit(self.canvas, self.drag_offset)
def onclick(self, relpos, button):
if button == 1:
canvas_pos = [relpos[i] - self.drag_offset[i] for i in (0, 1)]
clicked_sprites = []
if not clicked_sprites:
clicked_sprites = [s for s in self.allcharacters.spritedict
if s.rect.collidepoint(canvas_pos)]
if not clicked_sprites:
clicked_sprites = [s for s in self.alltiles.spritedict
if s.rect.collidepoint(canvas_pos)]
if clicked_sprites:
if self.active_item is not None \
and self.active_item is not clicked_sprites[0]:
self.active_item.redraw = True
self.active_item = clicked_sprites[0]
self.active_item.redraw = True
if hasattr(self.active_item, 'brain'):
self.parent.brainview.brain = self.active_item.brain
else:
self.parent.brainview.brain = None
return
self.parent.brainview.brain = None
if self.active_item:
self.active_item.redraw = True
self.active_item = None
|
flexo/evolutron
|
evolutron/worldview.py
|
Python
|
mit
| 4,012
|
#The recipe gives simple implementation of a Discrete Proportional-Integral-Derivative (PID) controller. PID controller gives output value for error between desired reference input and measurement feedback to minimize error value.
#More information: http://en.wikipedia.org/wiki/PID_controller
#
#cnr437@gmail.com
#
####### Example #########
#
#p=PID(3.0,0.4,1.2)
#p.setPoint(5.0)
#while True:
# pid = p.update(measurement_value)
#
#
class PID:
"""
Discrete PID control
"""
def __init__(self, P=2.0, I=0.0, D=1.0, Derivator=0, Integrator=0, Integrator_max=500, Integrator_min=-500):
self.Kp=P
self.Ki=I
self.Kd=D
self.Derivator=Derivator
self.Integrator=Integrator
self.Integrator_max=Integrator_max
self.Integrator_min=Integrator_min
self.set_point=0.0
self.fitness=-1
def update(self,current_value):
"""
Calculate PID output value for given reference input and feedback
"""
self.error = self.set_point - current_value
self.P_value = self.Kp * self.error
self.D_value = self.Kd * ( self.Derivator - current_value)
self.Derivator = current_value
self.Integrator = self.Integrator + self.error
if self.Integrator > self.Integrator_max:
self.Integrator = self.Integrator_max
elif self.Integrator < self.Integrator_min:
self.Integrator = self.Integrator_min
self.I_value = self.Integrator * self.Ki
PID = self.P_value + self.I_value + self.D_value
if PID > 1:
PID = 1
if PID < 0:
PID = 0
return PID
def setPoint(self,set_point):
"""
Initilize the setpoint of PID
"""
self.set_point = set_point
self.Integrator=0
self.Derivator=0
def setIntegrator(self, Integrator):
self.Integrator = Integrator
def setDerivator(self, Derivator):
self.Derivator = Derivator
def setKp(self,P):
self.Kp=P
def setKi(self,I):
self.Ki=I
def setKd(self,D):
self.Kd=D
def getPoint(self):
return self.set_point
def getError(self):
return self.error
def getIntegrator(self):
return self.Integrator
def getDerivator(self):
return self.Derivator
def setFitness(self, f):
self.fitness = f
def getFitness(self):
return self.fitness
|
Miceuz/pidgenetics
|
pid.py
|
Python
|
bsd-3-clause
| 2,139
|
# Imports from Django
from django.contrib.sitemaps import Sitemap
from django.contrib.sites.models import Site
# Imports from brubeck
from brubeck.blogs.models import Blog, Entry
site = Site.objects.get_current()
class BlogSitemap(Sitemap):
def items(self):
return Blog.objects.filter(section__publication__site=site)
def lastmod(self, obj):
try:
return obj.entry_set.latest().pub_date
except:
return None
changefreq = 'daily'
priority = 0.5
class EntrySitemap(Sitemap):
def items(self):
return Entry.get_published.filter(blog__section__publication__site=site)
def lastmod(self, obj):
return obj.pub_date
changefreq = 'daily'
priority = 0.75
|
albatrossandco/brubeck_cms
|
brubeck/blogs/sitemap.py
|
Python
|
bsd-3-clause
| 741
|
#!/usr/bin/python
'''
Basic example script talking to a VManager using the Python library.
Prints out network settings, changes and updates a config value, and
subscribes to data notifications and prints packets that are received.
'''
#============================ adjust path =====================================
import sys
import os
if __name__ == "__main__":
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..', 'libs'))
sys.path.insert(0, os.path.join(here, '..', 'external_libs'))
#============================ imports =========================================
import urllib3
import traceback
import base64
import certifi
# generic SmartMeshSDK imports
from SmartMeshSDK import sdk_version
# VManager-specific imports
from VManagerSDK.vmanager import Configuration
from VManagerSDK.vmgrapi import VManagerApi
from VManagerSDK.vmanager.rest import ApiException
from VManagerSDK.vmanager import SystemWriteConfig
#============================ defines =========================================
DFLT_VMGR_HOST = "127.0.0.1"
urllib3.disable_warnings() # disable warnings that show up about self-signed certificates
#============================ helpers =========================================
def process_data(mydata):
'''
Processes received data. Prints MAC address of source and packet content.
'''
macaddr = mydata.mac_address
datapayload = int((base64.b64decode(mydata.payload)).encode('hex'),16)
print ' Data from mote {0} --> {1}'.format(macaddr, datapayload)
#============================ main ============================================
try:
# print banner
print '\nVMgr_BasicExample (c) Dust Networks'
print 'SmartMesh SDK {0}\n'.format('.'.join([str(i) for i in sdk_version.VERSION]))
mgrhost = raw_input('Enter the IP address of the manager (e.g. {0} ): '.format(DFLT_VMGR_HOST))
if mgrhost == "":
mgrhost = DFLT_VMGR_HOST
# log in as user "dust"
config = Configuration()
config.username = 'dust'
config.password = 'dust'
config.verify_ssl = False
if os.path.isfile(certifi.where()):
config.ssl_ca_cert = certifi.where()
else:
config.ssl_ca_cert = os.path.join(os.path.dirname(sys.executable), "cacert.pem")
# initialize the VManager Python library
voyager = VManagerApi(host=mgrhost)
# read and display network configuration
netConfig = voyager.networkApi.get_network_config()
print '\n==== Display current network Configuration'
print netConfig
# update system configuration
sysConfig = SystemWriteConfig()
sysConfig.location = "California"
voyager.systemApi.update_system_config(sysConfig)
# read a configuration setting
mysetting = voyager.systemApi.get_system_config()
print '\n==== Set and display current location'
print mysetting.location
# start listening for data notifications
voyager.get_notifications('data', notif_callback=process_data)
print '\n==== Subscribe to data notifications and display'
reply = raw_input ('Waiting for notifications , Press any key to stop\n\n')
voyager.stop_notifications()
print 'Script ended normally'
except:
traceback.print_exc()
print ('Script ended with an error.')
voyager.stop_notifications()
sys.exit()
|
realms-team/basestation-fw
|
libs/smartmeshsdk-REL-1.3.0.1/vmanager_apps/VMgr_BasicExample.py
|
Python
|
bsd-3-clause
| 3,403
|
"""Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
from distutils.core import Command as _Command
from distutils.util import convert_path
from fnmatch import fnmatchcase
from setuptools.extern.six.moves import filterfalse, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature, _get_unpatched
from setuptools.depends import Require
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages'
]
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder(object):
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style)
path; it will be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
The list of included packages is built up first and then any
explicitly excluded packages are removed from it.
"""
out = cls._find_packages_iter(convert_path(where))
out = cls.require_parents(out)
includes = cls._build_filter(*include)
excludes = cls._build_filter('ez_setup', '*__pycache__', *exclude)
out = filter(includes, out)
out = filterfalse(excludes, out)
return list(out)
@staticmethod
def require_parents(packages):
"""
Exclude any apparent package that apparently doesn't include its
parent.
For example, exclude 'foo.bar' if 'foo' is not present.
"""
found = []
for pkg in packages:
base, sep, child = pkg.rpartition('.')
if base and base not in found:
continue
found.append(pkg)
yield pkg
@staticmethod
def _candidate_dirs(base_path):
"""
Return all dirs in base_path that might be packages.
"""
has_dot = lambda name: '.' in name
for root, dirs, files in os.walk(base_path, followlinks=True):
# Exclude directories that contain a period, as they cannot be
# packages. Mutate the list to avoid traversal.
dirs[:] = filterfalse(has_dot, dirs)
for dir in dirs:
yield os.path.relpath(os.path.join(root, dir), base_path)
@classmethod
def _find_packages_iter(cls, base_path):
candidates = cls._candidate_dirs(base_path)
return (
path.replace(os.path.sep, '.')
for path in candidates
if cls._looks_like_package(os.path.join(base_path, path))
)
@staticmethod
def _looks_like_package(path):
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
setup = distutils.core.setup
_Command = _get_unpatched(_Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
# we can't patch distutils.cmd, alas
distutils.core.Command = Command
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
# fix findall bug in distutils (http://bugs.python.org/issue12885)
distutils.filelist.findall = findall
|
ohduran/CrowdFinanceInfographic
|
virtualenv/lib/python3.5/site-packages/setuptools/__init__.py
|
Python
|
mit
| 5,440
|
from flask import current_app, redirect, Response, request, session, url_for
from flask.views import MethodView
from requests_oauthlib import OAuth2Session
from sqlalchemy.exc import IntegrityError
from oauthlib.oauth2.rfc6749.errors import OAuth2Error
from zeus import auth
from zeus.config import celery, db
from zeus.constants import GITHUB_AUTH_URI, GITHUB_DEFAULT_SCOPES, GITHUB_TOKEN_URI
from zeus.models import Email, Identity, User
from zeus.tasks import sync_github_access
from zeus.utils.github import GitHubClient
def get_oauth_session(redirect_uri=None, state=None, scopes=None):
return OAuth2Session(
client_id=current_app.config["GITHUB_CLIENT_ID"],
redirect_uri=redirect_uri,
state=state,
scope=",".join(sorted(scopes)) if scopes else None,
)
class GitHubAuthView(MethodView):
def __init__(self, authorized_url, scopes=GITHUB_DEFAULT_SCOPES):
self.authorized_url = authorized_url
self.scopes = scopes
super(GitHubAuthView, self).__init__()
def get(self):
auth.bind_redirect_target()
oauth = get_oauth_session(
redirect_uri=url_for(self.authorized_url, _external=True),
scopes=self.scopes,
)
authorization_url, state = oauth.authorization_url(GITHUB_AUTH_URI)
session["oauth_state"] = state
return redirect(authorization_url)
class GitHubCompleteView(MethodView):
# TODO(dcramer): we dont handle the case where the User row has been deleted,
# but the identity still exists. It shouldn't happen.
def get(self):
# TODO(dcramer): handle errors
oauth = get_oauth_session(state=session.pop("oauth_state", None))
try:
resp = oauth.fetch_token(
GITHUB_TOKEN_URI,
client_secret=current_app.config["GITHUB_CLIENT_SECRET"],
authorization_response=request.url,
)
except OAuth2Error:
current_app.logger.exception("oauth.error")
# redirect, as this is likely temporary based on server data
return redirect(auth.get_redirect_target(clear=True) or "/")
if resp is None or resp.get("access_token") is None:
return Response(
"Access denied: reason=%s error=%s resp=%s"
% (request.args["error"], request.args["error_description"], resp)
)
assert resp.get("token_type") == "bearer"
scopes = resp["scope"][0].split(",")
if "user:email" not in scopes:
raise NotImplementedError
# fetch user details
client = GitHubClient(token=resp["access_token"])
user_data = client.get("/user")
identity_config = {
"access_token": resp["access_token"],
"refresh_token": resp.get("refresh_token"),
"login": user_data["login"],
}
email_list = client.get("/user/emails")
email_list.append(
{
"email": "{}@users.noreply.github.com".format(user_data["login"]),
"verified": True,
}
)
primary_email = user_data.get("email")
# HACK(dcramer): capture github's anonymous email addresses when they're not listed
# (we haven't actually confirmed they're not listed)
if not primary_email:
primary_email = next(
(e["email"] for e in email_list if e["verified"] and e["primary"])
)
try:
# we first attempt to create a new user + identity
with db.session.begin_nested():
user = User(email=primary_email)
db.session.add(user)
identity = Identity(
user=user,
external_id=str(user_data["id"]),
provider="github",
scopes=scopes,
config=identity_config,
)
db.session.add(identity)
user_id = user.id
new_user = True
except IntegrityError:
# if that fails, assume the identity exists
identity = Identity.query.filter(
Identity.external_id == str(user_data["id"]),
Identity.provider == "github",
).first()
# and if it doesnt, attempt to find a matching user,
# as it means the failure above was due to that
if not identity:
user = User.query.filter(User.email == primary_email).first()
assert (
user
) # this should not be possible unless we've got a race condition
identity = Identity(
user=user,
external_id=str(user_data["id"]),
provider="github",
scopes=scopes,
config=identity_config,
)
db.session.add(identity)
user_id = user.id
else:
identity.config = identity_config
identity.scopes = scopes
db.session.add(identity)
user_id = identity.user_id
new_user = False
db.session.flush()
for email in email_list:
try:
with db.session.begin_nested():
db.session.add(
Email(
user_id=user_id,
email=email["email"],
verified=email["verified"],
)
)
except IntegrityError:
pass
db.session.commit()
# forcefully expire a session after permanent_session_lifetime
# Note: this is enforced in zeus.auth
auth.login_user(user_id)
user = auth.get_current_user()
if new_user:
# update synchronously so the new user has a better experience
sync_github_access(user_id=user.id)
else:
celery.delay("zeus.sync_github_access", user_id=user.id)
next_uri = auth.get_redirect_target(clear=True) or "/"
if "/login" in next_uri or "/auth/github" in next_uri:
next_uri = "/"
return redirect(next_uri)
|
getsentry/zeus
|
zeus/web/views/auth_github.py
|
Python
|
apache-2.0
| 6,319
|
"""Configuration file for the Sphinx documentation builder.
This file does only contain a selection of the most common options. For a
full list see the documentation:
http://www.sphinx-doc.org/en/master/config
-- Path setup ---------------------------------------------------------------
If extensions (or modules to document with autodoc) are in another directory,
add these directories to sys.path here. If the directory is relative to the
documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
"""
import sys
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
class Mock(MagicMock):
"""Mock object."""
@classmethod
def __getattr__(cls, name):
"""Return a MagicMock object."""
return MagicMock()
MOCK_MODULES = [
'rllab',
"rllab.core.serializable.Serializable",
'rllab.sampler.utils.rollout',
"AAPI",
'PyANGKernel',
'AAPI.GKGUISystem',
'thread'
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Project information -----------------------------------------------------
project = 'Flow'
copyright = '2019, flow-project'
author = 'flow-project'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.3.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flowdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Flow.tex', 'Flow Documentation',
'flow-project', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flow', 'Flow Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Flow', 'Flow Documentation',
author, 'Flow', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
numpydoc_use_plotsv = True
numpydoc_show_class_members = False
numpydoc_show_inherited_class_members = True
numpydoc_class_members_toctree = True
numpydoc_use_blockquotes = Trueautodoc_mock_imports = ["django"]
|
cathywu/flow
|
docs/source/conf.py
|
Python
|
mit
| 6,043
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
'''
@Author : Soner
@version :
@Time : 2017/11/3/0003 16:32
@license : Copyright(C), Your Company
'''
from selenium import webdriver
from time import sleep
import os
driver = webdriver.Chrome()
file_path = "file://" + os.path.abspath("Checkbox.html")
driver.get(file_path)
'''
# 通过元素判断查找复选框
inputs = driver.find_elements_by_tag_name("input")
for i in inputs:
if i.get_attribute("type") == "checkbox":
i.click()
sleep(2)
'''
'''
# 通过Xpath查找复选框
checks = driver.find_elements_by_xpath("//input[@type='checkbox']")
for check in checks:
check.click()
sleep(1.5)
'''
# 通过CSS查找复选框
checkboxes = driver.find_elements_by_css_selector("input[type=checkbox]")
for checkbox in checkboxes:
checkbox.click()
sleep(1.5)
# 打印当前页面type=checkbox的个数
print(len(checkboxes))
# 取消最后一个type=checkbox的复选框
# pop 函数用于获取列表中的一个元素,默认为最后一个(-1);(0)为获取第一个元素
driver.find_elements_by_css_selector("input[type=checkbox]").pop().click()
sleep(1)
driver.quit()
|
lsp84ch83/PyText
|
Selenium/Web元素定位及操作/Checkbox定位一组元素.py
|
Python
|
gpl-3.0
| 1,156
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_resourcemanager_project
description:
- Represents a GCP Project. A project is a container for ACLs, APIs, App Engine Apps,
VMs, and other Google Cloud Platform resources.
short_description: Creates a GCP Project
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
name:
description:
- 'The user-assigned display name of the Project. It must be 4 to 30 characters.
Allowed characters are: lowercase and uppercase letters, numbers, hyphen, single-quote,
double-quote, space, and exclamation point.'
required: false
labels:
description:
- The labels associated with this Project.
- 'Label keys must be between 1 and 63 characters long and must conform to the
following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.'
- Label values must be between 0 and 63 characters long and must conform to the
regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
- No more than 256 labels can be associated with a given resource.
- Clients should store labels in a representation such as JSON that does not depend
on specific characters being disallowed .
required: false
parent:
description:
- A parent organization.
required: false
suboptions:
type:
description:
- Must be organization.
required: false
id:
description:
- Id of the organization.
required: false
id:
description:
- The unique, user-assigned ID of the Project. It must be 6 to 30 lowercase letters,
digits, or hyphens. It must start with a letter.
- Trailing hyphens are prohibited.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a project
gcp_resourcemanager_project:
name: My Sample Project
id: alextest-{{ 10000000000 | random }}
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
parent:
type: organization
id: 636173955921
state: present
'''
RETURN = '''
number:
description:
- Number uniquely identifying the project.
returned: success
type: int
lifecycleState:
description:
- The Project lifecycle state.
returned: success
type: str
name:
description:
- 'The user-assigned display name of the Project. It must be 4 to 30 characters.
Allowed characters are: lowercase and uppercase letters, numbers, hyphen, single-quote,
double-quote, space, and exclamation point.'
returned: success
type: str
createTime:
description:
- Time of creation.
returned: success
type: str
labels:
description:
- The labels associated with this Project.
- 'Label keys must be between 1 and 63 characters long and must conform to the following
regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.'
- Label values must be between 0 and 63 characters long and must conform to the
regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
- No more than 256 labels can be associated with a given resource.
- Clients should store labels in a representation such as JSON that does not depend
on specific characters being disallowed .
returned: success
type: dict
parent:
description:
- A parent organization.
returned: success
type: complex
contains:
type:
description:
- Must be organization.
returned: success
type: str
id:
description:
- Id of the organization.
returned: success
type: str
id:
description:
- The unique, user-assigned ID of the Project. It must be 6 to 30 lowercase letters,
digits, or hyphens. It must start with a letter.
- Trailing hyphens are prohibited.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(type='str'),
labels=dict(type='dict'),
parent=dict(type='dict', options=dict(type=dict(type='str'), id=dict(type='str'))),
id=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module))
fetch = fetch_resource(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'resourcemanager')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link):
auth = GcpSession(module, 'resourcemanager')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link):
auth = GcpSession(module, 'resourcemanager')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'projectId': module.params.get('id'),
u'name': module.params.get('name'),
u'labels': module.params.get('labels'),
u'parent': ProjectParent(module.params.get('parent', {}), module).to_request(),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'resourcemanager')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
return "https://cloudresourcemanager.googleapis.com/v1/projects/{id}".format(**module.params)
def collection(module):
return "https://cloudresourcemanager.googleapis.com/v1/projects".format(**module.params)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
# SQL only: return on 403 if not exist
if allow_not_found and response.status_code == 403:
return None
try:
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'message']):
module.fail_json(msg=navigate_hash(result, ['error', 'message']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'projectNumber': response.get(u'number'),
u'lifecycleState': response.get(u'lifecycleState'),
u'name': response.get(u'name'),
u'createTime': response.get(u'createTime'),
u'labels': response.get(u'labels'),
u'parent': ProjectParent(response.get(u'parent', {}), module).from_response(),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://cloudresourcemanager.googleapis.com/v1/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response)
if op_result is None:
return {}
status = navigate_hash(op_result, ['done'])
wait_done = wait_for_completion(status, op_result, module)
raise_if_errors(op_result, ['error'], module)
return navigate_hash(wait_done, ['response'])
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
if not status:
raise_if_errors(op_result, ['error'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri)
status = navigate_hash(op_result, ['done'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class ProjectParent(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'type': self.request.get('type'), u'id': self.request.get('id')})
def from_response(self):
return remove_nones_from_dict({u'type': self.request.get(u'type'), u'id': self.request.get(u'id')})
if __name__ == '__main__':
main()
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/cloud/google/gcp_resourcemanager_project.py
|
Python
|
gpl-3.0
| 11,435
|
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if len(obstacleGrid) == 0:
return 0
if len(obstacleGrid[0]) == 0:
return 0
def __uniq_paths(x, y, d, obsGrid):
if d[x][y] == -1:
if obsGrid[x][y] == 1:
d[x][y] = 0
elif x == len(obsGrid) - 1 and y == len(obsGrid[0]) - 1:
d[x][y] = 1
else:
c = 0
if x + 1 < len(obsGrid):
c = c + __uniq_paths(x+1, y, d,obsGrid)
if y + 1 < len(obsGrid[0]):
c = c + __uniq_paths(x, y+1, d, obsGrid)
d[x][y] = c
return d[x][y]
d = []
for i in range(len(obstacleGrid)):
d.append([])
for j in range(len(obstacleGrid[i])):
d[i].append(-1)
__uniq_paths(0, 0, d, obstacleGrid)
return d[0][0]
if __name__ == '__main__':
s = Solution()
print s.uniquePathsWithObstacles([[0,1], [0, 0]])
|
benquike/cheatsheets
|
algo/leetcode/dp/uniq_paths2.py
|
Python
|
cc0-1.0
| 1,199
|
"""Sensor for checking the battery level of Roomba."""
from homeassistant.components.sensor import SensorEntity
from homeassistant.components.vacuum import STATE_DOCKED
from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE
from homeassistant.helpers.icon import icon_for_battery_level
from .const import BLID, DOMAIN, ROOMBA_SESSION
from .irobot_base import IRobotEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the iRobot Roomba vacuum cleaner."""
domain_data = hass.data[DOMAIN][config_entry.entry_id]
roomba = domain_data[ROOMBA_SESSION]
blid = domain_data[BLID]
roomba_vac = RoombaBattery(roomba, blid)
async_add_entities([roomba_vac], True)
class RoombaBattery(IRobotEntity, SensorEntity):
"""Class to hold Roomba Sensor basic info."""
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} Battery Level"
@property
def unique_id(self):
"""Return the ID of this sensor."""
return f"battery_{self._blid}"
@property
def device_class(self):
"""Return the device class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return PERCENTAGE
@property
def icon(self):
"""Return the icon for the battery."""
charging = bool(self._robot_state == STATE_DOCKED)
return icon_for_battery_level(
battery_level=self._battery_level, charging=charging
)
@property
def state(self):
"""Return the state of the sensor."""
return self._battery_level
|
w1ll1am23/home-assistant
|
homeassistant/components/roomba/sensor.py
|
Python
|
apache-2.0
| 1,712
|
# import Image
import sys
def average_colour(image):
colour_tuple = [None, None, None]
for channel in range(3):
# Get data for one channel at a time
pixels = image.getdata(band=channel)
values = []
for pixel in pixels:
values.append(pixel)
colour_tuple[channel] = sum(values) / len(values)
return tuple([int(i) for i in colour_tuple])
def most_frequent_colour(image):
w, h = image.size
pixels = image.getcolors(w * h)
most_frequent_pixel = pixels[0]
for count, colour in pixels:
if count > most_frequent_pixel[0]:
most_frequent_pixel = (count, colour)
return most_frequent_pixel[1]
def average_colour_in_k_clusters(image, k):
pass
def compare(title, image, colour_tuple):
image.show(title=title)
image = Image.new("RGB", (200, 200,), colour_tuple)
return image
def kmeans(pixels, k):
numFeatures = len(pixels)
centroids = getRandomCentroids(numFeatures, k)
iterations = 0
oldCentroids = None
while not shouldStop(oldCentroids, centroids, iterations):
oldCentroids = centroids
interations += 1
def save(name, result, image):
image.save("images/results/{}.jpg".format(name))
sample = Image.new("RGB", (200, 200,), result)
sample.save("images/results/{}-result.jpg".format(name))
def main():
image = Image.open("images/DSC_6883.jpg")
if "mode" in sys.argv:
result = most_frequent_colour(image)
if "ave" in sys.argv:
result = average_colour(image)
save("Wheatbelt", result, image)
if __name__ == "__main__":
main()
|
frodo4fingers/swico
|
dominant_colour.py
|
Python
|
mit
| 1,649
|
from django.contrib import admin
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from registration.models import RegistrationProfile
class RegistrationAdmin(admin.ModelAdmin):
actions = ['activate_users', 'resend_activation_email']
list_display = ('user', 'activation_key_expired')
raw_id_fields = ['user']
search_fields = ('user__username', 'user__first_name')
def activate_users(self, request, queryset):
"""
Activates the selected users, if they are not alrady
activated.
"""
for profile in queryset:
RegistrationProfile.objects.activate_user(profile.activation_key)
activate_users.short_description = _("Activate users")
def resend_activation_email(self, request, queryset):
"""
Re-sends activation emails for the selected users.
Note that this will *only* send activation emails for users
who are eligible to activate; emails will not be sent to users
whose activation keys have expired or who have already
activated.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
for profile in queryset:
if not profile.activation_key_expired():
profile.send_activation_email(site)
resend_activation_email.short_description = _("Re-send activation emails")
admin.site.register(RegistrationProfile, RegistrationAdmin)
|
Apreche/Turnflict
|
utils/registration/admin.py
|
Python
|
mit
| 1,605
|
import datetime as dt
import numpy as np
import math
def simulate(na_price_nodate, dt_start, dt_end, symbols, portf_allocation):
''' Simulate a Portfolio Allocation Function
Returns sharpe ratio and cumulative returns
na_price_nodate = np.array of data without column 0 of dates
symbols = if any of them is negative, it will flip the column'''
# 1. Get closing prices for each day for the selected
## STEP 1: CUMULATIVE RETURN
for i in range(len(symbols)):
if symbols[i][0] == '-':
na_price_nodate[:,i] = np.flipud(na_price_nodate[:,i])
# Normalizing the prices to start at 1 and see relative returns
na_normalized_price = na_price_nodate / na_price_nodate[0, :]
na_normalized_price = na_normalized_price * portf_allocation
cum_ret = np.nansum(na_normalized_price[-1]) / np.nansum(na_normalized_price[0])
#print 'Cumulative returns:',cum_ret
## STEP 2: AVERAGE DAILY RETURN
one_row = np.nansum(na_normalized_price, axis=1)
for i in range(len(one_row)-1, 0, -1):
one_row[i] = (one_row[i] / one_row[i-1]) - 1
one_row[0] = 0
av_day = np.mean(one_row)
#print 'Average:',av_day
## STEP 3: STANDARD DEVIATION
std_day = np.std(one_row)
#print 'STD:',std_day
## STEP 4: SHARPE
trading_days = ((dt_end-dt_start).days * 252) / 365
sharpe_rt = (av_day / std_day) * np.sqrt(trading_days)
if math.isnan(sharpe_rt):
sharpe_rt = 0
#print 'Sharpe:',sharpe_rt
## RETURNS SHARPE RATIO, CUMULATIVE RETURNS, Symbols (some sign might be changed
## if going short was allowed)
return sharpe_rt, cum_ret
#print 'Cumulative returns:',cum_ret
#print 'Average:',av_day
#print 'STD:',std_day
#print 'Sharpe:',sharpe_rt
# EXAMPLES
# _______________________________________
#symbols = ['EURCAD', 'EURGBP']
#portf_allocation = [0.5, 0.5]
#dt_start = dt.datetime(2011, 1, 1)
#dt_end = dt.datetime(2011, 12, 31)
#exchange = 'fx'
#import get_data
#na_price = get_data.get_data(symbols, exchange, dt_start, dt_end)
#dt_end = dt.datetime(2014, 10, 11)
#dt_start = dt_end-dt.timedelta(30)
#exchange = 'fx'
#portf_allocation = [0.2, 0.1, 0.7]
#symbols = ['EURGBP', 'EURUSD', 'EURCHF']
#import get_data
#na_price = get_data.get_data(symbols, exchange, dt_start, dt_end)
#symbols = ['-EURGBP', '-EURUSD', 'EURCHF']
#dt_start = dt.datetime(2000, 4, 14)
#dt_end = dt.datetime(2000, 5, 26)
#exchange = 'mc'
#portf_allocation = [0.20000000000000001, 0.59999999999999998, 0.10000000000000001, 0.0, 0.0, 0.10000000000000001]
#symbols = ['ABE', 'FCC', 'IBE', 'SAN', 'REP', 'TEF']
#import get_data
#na_price = get_data.get_data(symbols, exchange, dt_start, dt_end)
#symbols = ['-ABE', 'FCC', '-IBE', '-SAN', 'REP', '-TEF']
#dt_start = dt.datetime(2003, 3, 7)
#dt_end = dt.datetime(2003, 4, 18)
#exchange = 'mc'
#portf_allocation = [0.40000000000000002, 0.0, 0.29999999999999999, 0.20000000000000001, 0.0, 0.10000000000000001]
#symbols = ['ABE', 'FCC', 'IBE', 'SAN', 'REP', 'TEF']
#import get_data
#na_price = get_data.get_data(symbols, exchange, dt_start, dt_end)
#symbols = ['ABE', '-FCC', 'IBE', '-SAN', 'REP', '-TEF']
# Call function
#a, b = simulate(na_price[:,1:], dt_start, dt_end, symbols, portf_allocation)
#print 'Sharpe:', a
#print 'Returns:', b
|
melrcm/Optimizepy
|
simulate_portfolio_allocation.py
|
Python
|
lgpl-2.1
| 3,352
|
from __future__ import absolute_import
from celery import task
from streamer.models import Streamer
from twitchapp.settings import TWITCH_CLIENT_ID
import json
import urllib
import requests
@task(name='tasks.populate')
def populate():
Streamer.objects.all().delete()
games_req = requests.get('https://api.twitch.tv/kraken/games/top'
+
'?client_id=' + TWITCH_CLIENT_ID,
headers={'Accept':
'application/vnd.twitchtv.v2+json'})
for top_game in json.loads(games_req.text)['top']:
game_req = requests.get('https://api.twitch.tv/kraken/streams?game='
+ urllib.quote_plus(top_game['game']['name'])
+
'&limit=6&client_id=' + TWITCH_CLIENT_ID,
headers={'Accept':
'application/vnd.twitchtv.v3+json'})
game_streams = json.loads(game_req.text)['streams']
for stream in game_streams:
channel = stream['channel']
Streamer.objects.create(
streamer_name=channel['name'],
streamer_views=channel['views'],
streamer_follows=channel['followers'],
streamer_avatar=channel['logo'],
stream_game=stream['game'],
stream_title=channel['status'],
stream_language=channel['broadcaster_language'],
stream_viewers=stream['viewers'],
stream_thumbnail=stream['preview']['large'],
stream_link=channel['url'],
)
|
froxio/who2host
|
twitchapp/tasks.py
|
Python
|
mpl-2.0
| 1,706
|
# -*- coding: utf-8 -*-
from nltk import word_tokenize
import general
import numpy as np
#mappt alle aktuellen NW-Ressorts auf die 5 Ressorts aus dem Experiment
def ressort_mapping(ressort):
result = ''
print(ressort)
if ressort in ['Gütersloh','Bünde','Warburg','Herford','Löhne','Lübbecke','Höxter','Paderborn',
'Enger-Spenge','Bad Oeynhausen','Bielefeld','Schloss Holte', 'Beilagen']:
result = 'Bielefeld'
elif ressort in ['Sport Herford','Sport Bielefeld','Sport Bad Oeynhausen','Sport Paderborn','Sport Bünde',
'Sport Lübbecke','Sport Schloß Holte','Sport Höxter','Sport Gütersloh']:
result = 'Sport Bielefeld'
elif ressort == 'Kultur':
result = 'Kultur'
elif ressort in ['Politik','Politik / Politik']:
result = 'Politik'
elif ressort == 'Sport_Bund':
result = 'Sport_Bund'
return result
#fuer vergleich von interessen mit titel oder text
def compare_string_to_interests(string,interest_list, mode = 'prior_title'):
result = {}
for interest in interest_list:
for word in word_tokenize(string):
if word.lower() == interest.lower():
result[mode + '_interest'] = 1
return result
#fuer das prior feature mit den ressorts
def normalize_article_ressort_to_dict(article_ressort,ressort_list):
result = {}
for ressort in ressort_list:
if article_ressort.lower() == ressort.lower():
result[ressort] = 1
else:
result[ressort] = 0
return result
#User X findet Ressort Y gut und Artikel Z ist aus Ressort Y
def user_specific_ressort_ratings(ressort_ratings_user, ressort_artikel, threshold = 3):
result = {}
for key in ressort_ratings_user:
dict_key = "ressort_specific_%s" % key
if key == ressort_artikel and ressort_ratings_user[key] >= threshold:
result[dict_key] = 1
else:
result[dict_key] = 0
return result
#User X findet Ressort Y mit Wertung Z gut und Artikel ist aus Ressort Y
def user_specific_ressort_explicit_ratings(ressort_ratings_user,ressort_artikel):
result = {}
for ressort in ressort_ratings_user.keys():
feature_name = 'user_specific_ressort_rating_' + ressort +'_'
for j in range(1,6):
feature_name += '%d' % j
if ressort_ratings_user[ressort] == j and ressort_artikel == ressort:
result[feature_name] = 1
else:
result[feature_name] = 0
return result
#normalisiert seiten auf die von philipp recherchierten bereiche
def normalize_pages(self):
dict = {'1':0,'2':0,'3':0,'4-5':0,'6-7':0,'8':0,'9-16':0,'17-24':0,'25+':0}
if self.page == 1:
dict['1'] = 1
elif self.page == 2:
dict['2'] = 1
elif self.page == 3:
dict['3'] = 1
elif self.page < 6:
dict['4-5'] = 1
elif self.page < 8:
dict['6-7'] = 1
elif self.page == 8:
dict['8'] = 1
elif self.page < 17:
dict['9-16'] = 1
elif self.page < 25:
dict['17-24'] = 1
else:
dict['25+'] = 1
return dict
#fuer die crossfeatures
def compute_cross_features(user_age,user_sex,user_education,article_ressort,article_normalized_page,
ressort_list,pages_list,age_list,sexes_list,edu_list):
cf_age_ressort = {}
cf_sex_ressort = {}
cf_edu_ressort = {}
for ressort in ressort_list:
if article_ressort == ressort:
for age in age_list:
feature = '%s_%s' % (ressort,age)
if user_age == age:
cf_age_ressort[feature] = 1
else:
cf_age_ressort[feature] = 0
for sex in sexes_list:
feature = '%s_%s' % (ressort,sex)
if user_sex == sex:
cf_sex_ressort[feature] = 1
else:
cf_sex_ressort[feature] = 0
for edu in edu_list:
feature = '%s_%s' % (ressort,edu)
if user_education == edu:
cf_edu_ressort[feature] = 1
else:
cf_edu_ressort[feature] = 0
else:
for age in age_list:
feature = "%s_%s" % (ressort,age)
cf_age_ressort[feature] = 0
for sex in sexes_list:
feature = "%s_%s" % (ressort,sex)
cf_sex_ressort[feature] = 0
for edu in edu_list:
feature = "%s_%s" % (ressort,edu)
cf_edu_ressort[feature] = 0
cf_age_page = {}
cf_sex_page = {}
cf_edu_page = {}
for normalized_page in pages_list:
if article_normalized_page == normalized_page:
for age in age_list:
feature = '%s_%s' % (normalized_page,age)
if user_age == age:
cf_age_page[feature] = 1
else:
cf_age_page[feature] = 0
for sex in sexes_list:
feature = '%s_%s' % (normalized_page,sex)
if user_sex == sex:
cf_sex_page[feature] = 1
else:
cf_sex_page[feature] = 0
for edu in edu_list:
feature = '%s_%s' % (normalized_page,edu)
if user_education == edu:
cf_edu_page[feature] = 1
else:
cf_edu_page[feature] = 0
else:
for age in age_list:
feature = "%s_%s" % (normalized_page,age)
cf_age_page[feature] = 0
for sex in sexes_list:
feature = "%s_%s" % (normalized_page,sex)
cf_sex_page[feature] = 0
for edu in edu_list:
feature = "%s_%s" % (normalized_page,edu)
cf_edu_page[feature] = 0
return cf_age_ressort,cf_sex_ressort,cf_edu_ressort,cf_age_page,cf_sex_page,cf_edu_page
# def compute_annotation_esa_comparison(article, user_annotations, mode='pos'):
#
# cos_sims = []
#
# user_annotations = []
#
# for article,annotation in user_annotations:
# if mode == 'pos':
# if annotation == 4:
# #Todo: esa_vectors für die zwei Artikel aus DB
# esa_vec_current_article = []
# esa_vec_article = []
# cos_sim = general.calcualtecos(esa_vec_current_article,esa_vec_article)
# cos_sims.append(cos_sim)
#
# #nur negativ bewertete artikel nutzen
# elif mode == 'neg':
# if annotation == 1:
# #Todo: esa_vectors für die zwei Artikel aus DB
# esa_vec_current_article = []
# esa_vec_article = []
# cos_sim = general.calcualtecos(esa_vec_current_article,esa_vec_article)
# cos_sims.append(cos_sim)
#
#
# return np.min(cos_sims),np.max(cos_sims),np.mean(cos_sims)
def compute_general_feature_dict(user_list, ressort_list,pages_list,age_list,sexes_list,edu_list):
feature_vector = []
for user in user_list:
#Todo: aus Datenbank holen
user_age = False
user_sex = False
user_education = False
user_annotations = []
user_ressort_ratings = {}
for article, annotation in user_annotations:
#Todo: aus Datenbank holen
article_ressort = False
article_normalized_page = normalize_pages(False)
ressort_prior_dict = normalize_article_ressort_to_dict(article_ressort,ressort_list)
ressort_user_specific_dict = user_specific_ressort_ratings(user_ressort_ratings,article_ressort)
ressort_user_specific_explicit_rating_dict = user_specific_ressort_explicit_ratings(user_ressort_ratings,article_ressort)
cf_age_ressort, cf_sex_ressort, cf_edu_ressort, cf_age_page, cf_sex_page, cf_edu_page = compute_cross_features(
user_age,user_sex,user_education,article_ressort,article_normalized_page,ressort_list,pages_list,age_list,sexes_list,edu_list
)
annotation_comparison_min_pos, annotation_comparison_max_pos, annotation_comparison_mean_pos = compute_annotation_esa_comparison(article,user_annotations)
annotation_comparison_min_neg, annotation_comparison_max_neg, annotation_comparison_mean_neg = compute_annotation_esa_comparison(article,user_annotations)
annotation_comparison_dict = {'comparison_min_pos':annotation_comparison_min_pos, 'comparison_max_pos':annotation_comparison_max_pos,
'comparison_mean_pos':annotation_comparison_mean_pos,'comparison_min_neg':annotation_comparison_min_neg,
'comparison_max_neg':annotation_comparison_max_neg, 'comparison_mean_neg':annotation_comparison_mean_neg}
#weitere feature-extraction hier einfügen
#features in einem Dict zusammenfassen
feature_dict = {}
feature_dict.update(ressort_prior_dict)
feature_dict.update(ressort_user_specific_dict)
feature_dict.update(ressort_user_specific_explicit_rating_dict)
feature_dict.update(cf_age_ressort)
feature_dict.update(cf_sex_ressort)
feature_dict.update(cf_edu_ressort)
feature_dict.update(cf_age_page)
feature_dict.update(cf_sex_page)
feature_dict.update(cf_edu_page)
feature_dict.update(annotation_comparison_dict)
#dict zum feature-vektor hinzufügen
feature_vector.append(feature_dict)
return feature_vector
# #### AB HIER ESA FEATURES, voerst nicht relevant, da vom Service selbst schon implementiert ########
#
# #berechne cosinus-vergleich von artikel und user. berechne zusätzlich, ob score besser
# # als der durchschnitt der aktuellen Ausgabe
# def esa_comparison_interests_article(user, article):
# esa_score_dict, avg_result = compute_cosine_sim_current_issue_for_user(user)
#
# better_than_avg = False
# if esa_score_dict[article] > avg_result:
# better_than_avg = True
#
# return esa_score_dict[article],better_than_avg
#
#
# #berechnet dict mit esa-scores für alle artikel einer Ausgabe bezogen auf einen user
# #normalisiert diese scores durch den besten Score der ausgabe. Gibt
# #außerdem den durchschnittlichen score der normalisierten Liste zurück
# def compute_cosine_sim_current_issue_for_user(user):
# result = {}
# #TODO: esa_vektoren für artikel und user bereitstellen
# esa_vec_user = False
#
# #TODO: liste aller artikel der aktuellen ausgabe bereitstellen
# list_of_articles_in_current_issue = []
#
# for article in list_of_articles_in_current_issue:
# esa_vec_article = False
# result[article] = general.calcualtecos(esa_vec_user,esa_vec_article)
#
# max_score_current_issue_x_user = np.max(result.values())
#
# for article in result.keys():
# result[article] /= max_score_current_issue_x_user
#
# avg_score_current_issue_x_user = np.mean(result.values())
#
# return result, avg_score_current_issue_x_user
|
swalter2/PersonalizationService
|
Service/feature_extraction.py
|
Python
|
mit
| 11,275
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import os.path
import six
import eventlet
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from neutron.agent.common import config as agent_cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import utils as common_utils
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('external_pids',
default='$state_path/external/pids',
help=_('Location to store child pid files')),
]
cfg.CONF.register_opts(OPTS)
agent_cfg.register_process_monitor_opts(cfg.CONF)
@six.add_metaclass(abc.ABCMeta)
class MonitoredProcess(object):
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the process."""
@abc.abstractmethod
def enable(self):
"""Enable the service, or respawn the process."""
class ProcessManager(MonitoredProcess):
"""An external process manager for Neutron spawned processes.
Note: The manager expects uuid to be in cmdline.
"""
def __init__(self, conf, uuid, namespace=None, service=None,
pids_path=None, default_cmd_callback=None,
cmd_addl_env=None, pid_file=None, run_as_root=False):
self.conf = conf
self.uuid = uuid
self.namespace = namespace
self.default_cmd_callback = default_cmd_callback
self.cmd_addl_env = cmd_addl_env
self.pids_path = pids_path or self.conf.external_pids
self.pid_file = pid_file
self.run_as_root = run_as_root
if service:
self.service_pid_fname = 'pid.' + service
self.service = service
else:
self.service_pid_fname = 'pid'
self.service = 'default-service'
common_utils.ensure_dir(os.path.dirname(self.get_pid_file_name()))
def enable(self, cmd_callback=None, reload_cfg=False):
if not self.active:
if not cmd_callback:
cmd_callback = self.default_cmd_callback
cmd = cmd_callback(self.get_pid_file_name())
ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env,
run_as_root=self.run_as_root)
elif reload_cfg:
self.reload_cfg()
def reload_cfg(self):
self.disable('HUP')
def disable(self, sig='9'):
pid = self.pid
if self.active:
cmd = ['kill', '-%s' % (sig), pid]
utils.execute(cmd, run_as_root=True)
# In the case of shutting down, remove the pid file
if sig == '9':
fileutils.delete_if_exists(self.get_pid_file_name())
elif pid:
LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring '
'signal %(signal)s', {'uuid': self.uuid, 'pid': pid,
'signal': sig})
else:
LOG.debug('No process started for %s', self.uuid)
def get_pid_file_name(self):
"""Returns the file name for a given kind of config file."""
if self.pid_file:
return self.pid_file
else:
return utils.get_conf_file_name(self.pids_path,
self.uuid,
self.service_pid_fname)
@property
def pid(self):
"""Last known pid for this external process spawned for this uuid."""
return utils.get_value_from_file(self.get_pid_file_name(), int)
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
return self.uuid in f.readline()
except IOError:
return False
ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service'])
class ProcessMonitor(object):
def __init__(self, config, resource_type):
"""Handle multiple process managers and watch over all of them.
:param config: oslo config object with the agent configuration.
:type config: oslo_config.ConfigOpts
:param resource_type: can be dhcp, router, load_balancer, etc.
:type resource_type: str
"""
self._config = config
self._resource_type = resource_type
self._monitored_processes = {}
if self._config.AGENT.check_child_processes_interval:
self._spawn_checking_thread()
def register(self, uuid, service_name, monitored_process):
"""Start monitoring a process.
The given monitored_process will be tied to it's uuid+service_name
replacing the old one if it existed already.
The monitored_process should be enabled before registration,
otherwise ProcessMonitor could try to enable the process itself,
which could lead to double enable and if unlucky enough, two processes
running, and also errors in the logs.
:param uuid: An ID of the resource for which the process is running.
:param service_name: A logical service name for this process monitor,
so the same uuid provided via process manager
can reference several different services.
:param monitored_process: MonitoredProcess we want to monitor.
"""
service_id = ServiceId(uuid, service_name)
self._monitored_processes[service_id] = monitored_process
def unregister(self, uuid, service_name):
"""Stop monitoring a process.
The uuid+service_name will be removed from the monitored processes.
The service must be disabled **after** unregistering, otherwise if
process monitor checks after you disable the process, and before
you unregister it, the process will be respawned, and left orphaned
into the system.
:param uuid: An ID of the resource for which the process is running.
:param service_name: A logical service name for this process monitor,
so the same uuid provided via process manager
can reference several different services.
"""
service_id = ServiceId(uuid, service_name)
self._monitored_processes.pop(service_id, None)
def stop(self):
"""Stop the process monitoring.
This method will stop the monitoring thread, but no monitored
process will be stopped.
"""
self._monitor_processes = False
def _spawn_checking_thread(self):
self._monitor_processes = True
eventlet.spawn(self._periodic_checking_thread)
@lockutils.synchronized("_check_child_processes")
def _check_child_processes(self):
# we build the list of keys before iterating in the loop to cover
# the case where other threads add or remove items from the
# dictionary which otherwise will cause a RuntimeError
for service_id in list(self._monitored_processes):
pm = self._monitored_processes.get(service_id)
if pm and not pm.active:
LOG.error(_LE("%(service)s for %(resource_type)s "
"with uuid %(uuid)s not found. "
"The process should not have died"),
{'service': pm.service,
'resource_type': self._resource_type,
'uuid': service_id.uuid})
self._execute_action(service_id)
eventlet.sleep(0)
def _periodic_checking_thread(self):
while self._monitor_processes:
eventlet.sleep(self._config.AGENT.check_child_processes_interval)
eventlet.spawn(self._check_child_processes)
def _execute_action(self, service_id):
action = self._config.AGENT.check_child_processes_action
action_function = getattr(self, "_%s_action" % action)
action_function(service_id)
def _respawn_action(self, service_id):
LOG.error(_LE("respawning %(service)s for uuid %(uuid)s"),
{'service': service_id.service,
'uuid': service_id.uuid})
self._monitored_processes[service_id].enable()
def _exit_action(self, service_id):
LOG.error(_LE("Exiting agent as programmed in check_child_processes_"
"actions"))
self._exit_handler(service_id.uuid, service_id.service)
def _exit_handler(self, uuid, service):
"""This is an exit handler for the ProcessMonitor.
It will be called if the administrator configured the exit action in
check_child_processes_actions, and one of our external processes die
unexpectedly.
"""
LOG.error(_LE("Exiting agent because of a malfunction with the "
"%(service)s process identified by uuid %(uuid)s"),
{'service': service, 'uuid': uuid})
raise SystemExit(1)
|
suneeth51/neutron
|
neutron/agent/linux/external_process.py
|
Python
|
apache-2.0
| 9,806
|
from django.db import models
# Create your models here.
#MVC MODEL VIEW CONTROLLER
#
class Post(models.Model):
title=models.CharField(max_length=120)
content=models.TextField()
updated=models.DateTimeField(auto_now=True,auto_now_add=False)
timestamp=models.DateTimeField(auto_now=False,auto_now_add=True)
def __unicode__(self):
return self.title
def __str__(self):
return self.title
|
onurstats/myBlog
|
posts/models.py
|
Python
|
gpl-3.0
| 396
|
import doto.model
import doto.model.task
CREATE_CMD = """
CREATE TABLE IF NOT EXISTS
timerecords (
id INTEGER NOT NULL,
task_id INTEGER,
start TIMESTAMP,
end TIMESTAMP,
PRIMARY KEY (id),
FOREIGN KEY(task_id) REFERENCES tasks (id)
)
"""
class Timerecord(object):
"""
A timerecord is a time span for which one worked on a task
A timerecord is a time span that is assosiated with a event.
The sum of all timerecords is the total amount of work taht was put into the Task.
This can be used to track the amount of time one worked a specific task.
This should come in handy for freelancers (like me).
"""
def __init__(self, start, end=None, task_event=None):
"""
"""
self.id = None
self.span = doto.model.TimeSpan(start=start, end=end)
self.task = task_event
@staticmethod
def row_to_obj(row, store):
"""
Create Task from database row
"""
timerecord = doto.model.unwrap_row(store,
row,
Timerecord,
('start', 'end'),
('id',))
task_id = row['task_id']
if task_id is None:
timerecord.task = None
else:
timerecord.task = doto.model.task.get(store, task_id)
return timerecord
@staticmethod
def obj_to_row(obj):
row_dict = doto.model.unwrap_obj(obj, ignore_list=['span', 'task'])
row_dict['task_id'] = obj.task.id if obj.task is not None else None
row_dict['start'] = obj.span.start
row_dict['end'] = obj.span.end
return row_dict
def get_started_timerecords(store):
"""
Get all task which are not completed.
@param cache if True the result will be stored in the cache
so a cache_id can be used. Default=False
@param limit Set the maximum number of returned items. Default=10
@return A list of unfinished tasks
"""
return store.query(Timerecord.row_to_obj, 'SELECT * FROM timerecords WHERE end IS NULL;', ())
insert_query = """INSERT INTO timerecords ( task_id, start, end)
VALUES (:task_id, :start, :end)
;
"""
update_query = """UPDATE timerecords SET task_id = :task_id,
start = :start,
end = :end
WHERE id = :id;
"""
delete_query = 'DELETE FROM timerecords WHERE id = ?;'
update = doto.model.crud.update(update_query, Timerecord)
add_new = doto.model.crud.insert(insert_query, Timerecord)
delete = doto.model.crud.delete(delete_query)
doto.model.setup_module(CREATE_CMD, ())
|
tantSinnister/doto
|
doto/model/timerecord.py
|
Python
|
bsd-3-clause
| 2,998
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Shampoo Optimizer.
Variant of Adagrad using one preconditioner matrix per variable dimension.
For details, see https://arxiv.org/abs/1802.09568
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import matrix_functions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import optimizer
def GetParam(var, timestep):
if callable(var):
return var(timestep)
else:
return var
class ShampooOptimizer(optimizer.Optimizer):
"""The Shampoo Optimizer
Variant of Adagrad using one preconditioner matrix per variable dimension.
For details, see https://arxiv.org/abs/1802.09568
gbar is time-weighted accumulated gradient:
gbar[t] = gbar_decay[t] * gbar[t-1] + gbar_weight[t] * g[t]
mat_gbar is time-weighted accumulated gradient square:
mat_gbar_j[t] = mat_gbar_decay[t] * mat_gbar_j[t-1]
+ mat_gbar_weight[t] * gg_j[t]
where if g[t] = g_abcd then gg_a[t] = g_abcd g_a'bcd (Einstein notation)
Update rule:
w[t+1] = w[t] - learning_rate[t] * Prod_j mat_gbar_j[t]^(-alpha/n) gbar[t]
Again, mat_gbar_j[t]^(-alpha) gbar[t] is a tensor contraction along the
j'th dimension of gbar[t] with the first dimension of
mat_gbar_j[t]^(-alpha/n), where alpha is a hyperparameter,
and n = rank of the variable.
Prod_j represents doing this contraction for all j in 0..n-1.
Typically learning_rate is constant, but could be time dependent by passing
a lambda function that depends on step.
"""
def __init__(self,
global_step=0,
max_matrix_size=768,
gbar_decay=0.0,
gbar_weight=1.0,
mat_gbar_decay=1.0,
mat_gbar_weight=1.0,
learning_rate=1.0,
svd_interval=1,
precond_update_interval=1,
epsilon=1e-4,
alpha=0.5,
use_iterative_root=False,
use_locking=False,
name="Shampoo"):
"""Default values of the various hyper-parameters.
gbar_decay, gbar_weight etc. can be a float or a time varying parameter.
For time-varying parameters use e.g. "lambda T: T / (T + 1.0)"
where the expression in the lambda is a tensorflow expression
Args:
global_step: tensorflow variable indicating the step.
max_matrix_size: We do not perform SVD for matrices larger than this.
gbar_decay:
gbar_weight: Used to update gbar:
gbar[t] = gbar_decay[t] * gbar[t-1] + gbar_weight[t] * g[t]
mat_gbar_decay:
mat_gbar_weight: Used to update mat_gbar:
mat_gbar_j[t] = mat_gbar_decay[t] * mat_gbar_j[t-1]
+ mat_gbar_weight[t] * gg_j[t]
learning_rate: Similar to SGD
svd_interval: We should do SVD after this many steps. Default = 1, i.e.
every step. Usually 20 leads to no loss of accuracy, and
50 or 100 is also OK. May also want more often early,
and less often later - set in caller as for example:
"svd_interval = lambda(T): tf.cond(
T < 2000, lambda: 20.0, lambda: 1000.0)"
precond_update_interval: We should update the preconditioners after
this many steps. Default = 1. Usually less than
svd_interval.
epsilon: epsilon * I_n is added to each mat_gbar_j for stability for
non-diagonal version of shampoo.
alpha: total power of the preconditioners.
use_iterative_root: should the optimizer use SVD (faster) or the
iterative root method (for TPU) for finding the
roots of PSD matrices.
use_locking:
name: name of optimizer.
"""
super(ShampooOptimizer, self).__init__(use_locking, name)
self._global_step = math_ops.cast(global_step, dtypes.float32)
self._max_matrix_size = max_matrix_size
self._gbar_decay = gbar_decay
self._gbar_weight = gbar_weight
self._mat_gbar_decay = mat_gbar_decay
self._mat_gbar_weight = mat_gbar_weight
self._learning_rate = learning_rate
self._svd_interval = svd_interval
self._precond_update_interval = precond_update_interval
self._epsilon = epsilon
self._alpha = alpha
self._use_iterative_root = use_iterative_root
self._name = name
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
_ = self._zeros_slot(v, "gbar", self._name)
shape = np.array(v.get_shape())
for i, d in enumerate(shape):
d_tensor = ops.convert_to_tensor(d)
if d <= self._max_matrix_size:
mat_g_init = array_ops.zeros_like(linalg_ops.eye(d_tensor))
if self._svd_interval > 1:
_ = self._get_or_make_slot(v, linalg_ops.eye(d_tensor),
"H_" + str(i), self._name)
else:
mat_g_init = array_ops.zeros([d_tensor])
_ = self._get_or_make_slot(v, mat_g_init, "Gbar_" + str(i),
self._name)
def _resource_apply_dense(self, grad, var):
return self._apply_dense(grad, var)
def _apply_dense(self, grad, var):
return self._apply_gradient(grad, var)
def _resource_apply_sparse(self, grad_values, var, grad_indices):
return self._apply_sparse_shared(grad_values, grad_indices, var)
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(grad.values, grad.indices, var)
def _apply_sparse_shared(self, grad_values, grad_indices, var):
if var.get_shape()[0] <= self._max_matrix_size or self._gbar_decay != 0.0:
# The dimension is small enough, we can make the variable dense and
# do a dense update
dense_grad = array_ops.scatter_nd(
array_ops.expand_dims(grad_indices, axis=1), grad_values,
array_ops.shape(var, out_type=grad_indices.dtype))
return self._apply_gradient(dense_grad, var)
return self._apply_gradient(grad_values, var, grad_indices)
def _weighted_average(self, var, weight, weight_t, rest):
"""Computes exponential weighted average: var = weight_t * var + rest.
Important to ensure that var does not occur in rest, otherwise
we can get race conditions in a distributed setting.
Args:
var: variable to be updated
weight: parameter to be checked. If it is a constant, we can optimize.
weight_t: current value of parameter, used for weighting
rest: the remaining tensor to be added
Returns:
updated variable.
"""
if weight == 0.0:
return rest # no need to update var, we will never use it.
if weight == 1.0: # common case
return state_ops.assign_add(var, rest)
# The op below can cause race conditions in a distributed setting,
# since computing weight_t * var + rest can take some time, during
# which var may be set by another worker. To prevent this, it should
# be implemented as a C++ op.
return var.assign_add((weight_t - 1) * var + rest)
def _update_mat_g(self, mat_g, grad, axes, mat_gbar_decay,
mat_gbar_weight, i):
"""Updates the cumulative outer products of the gradients.
Args:
mat_g: the matrix to be updated
grad: the gradient of the variable
axes: a list of k-1 integers 0 to k-1, except i
mat_gbar_decay: constant for weighted average:
mat_g = mat_g * decay + grad * weight
mat_gbar_weight: constant for weighted average
i: index of dimension to be updated.
Returns:
updated mat_g = mat_g * mat_gbar_decay + grad_outer * mat_gbar_weight
In Einstein notation if i = 0: grad_outer_aa'= g_abcd g_a'bcd
thus grad_outer is a matrix d_i x d_i, where d_i is the size of the
i'th dimension of g.
Alternate view: If mat_i(grad) is the flattening of grad to a
d_i x (d_1d_2...d_{i-1}d_{i+1}...d_k) matrix, then
grad_outer = mat_i(grad) mat_i(grad).transpose
"""
grad_outer = math_ops.tensordot(grad, grad, axes=(axes, axes),
name="grad_outer_" + str(i))
return self._weighted_average(mat_g, self._mat_gbar_decay, mat_gbar_decay,
mat_gbar_weight * grad_outer)
def _compute_power_svd(self, var, mat_g, mat_g_size, alpha, mat_h_slot_name):
"""Computes mat_h = mat_g^alpha using svd. mat_g is a symmetric PSD matrix.
Args:
var: the variable we are updating.
mat_g: the symmetric PSD matrix whose power it to be computed
mat_g_size: size of mat_g
alpha: a real number
mat_h_slot_name: name of slot to store the power, if needed.
Returns:
mat_h = mat_g^alpha
Stores mat_h in the appropriate slot, if it exists.
Note that mat_g is PSD. So we could use linalg_ops.self_adjoint_eig.
"""
if mat_g_size == 1:
mat_h = math_ops.pow(mat_g + self._epsilon, alpha)
else:
damping = self._epsilon * linalg_ops.eye(
math_ops.cast(mat_g_size, dtypes.int32))
diag_d, mat_u, mat_v = linalg_ops.svd(mat_g + damping, full_matrices=True)
mat_h = math_ops.matmul(
mat_v * math_ops.pow(math_ops.maximum(diag_d, self._epsilon), alpha),
array_ops.transpose(mat_u))
if mat_h_slot_name is not None:
return state_ops.assign(self.get_slot(var, mat_h_slot_name), mat_h)
return mat_h
def _compute_power_iter(self, var, mat_g, mat_g_size, alpha, mat_h_slot_name,
iter_count=100, epsilon=1e-6):
"""Computes mat_g^alpha, where alpha = -1/p, p a positive integer."""
mat_g_sqrt = matrix_functions.matrix_square_root(mat_g, mat_g_size,
iter_count, self._epsilon)
mat_h = matrix_functions.matrix_inverse_pth_root(
mat_g_sqrt,
mat_g_size,
2 * alpha,
iter_count,
epsilon,
ridge_epsilon=0.0)
if mat_h_slot_name is not None:
return state_ops.assign(self.get_slot(var, mat_h_slot_name), mat_h)
return mat_h
def _compute_power(self, var, mat_g, mat_g_size, alpha, mat_h_slot_name=None):
"""Just a switch between the iterative power vs svd."""
with ops.name_scope("matrix_iterative_power"):
if self._use_iterative_root:
return self._compute_power_iter(var, mat_g, mat_g_size, alpha,
mat_h_slot_name)
else:
return self._compute_power_svd(var, mat_g, mat_g_size, alpha,
mat_h_slot_name)
def _apply_gradient(self, grad, var, indices=None):
"""The main function to update a variable.
Args:
grad: A Tensor containing gradient to apply.
var: A Tensor containing the variable to update.
indices: An array of integers, for sparse update.
Returns:
Updated variable var = var - learning_rate * preconditioner * grad
If the gradient is dense, var and grad have the same shape.
If the update is sparse, then the first dimension of the gradient and var
may differ, others are all the same. In this case the indices array
provides the set of indices of the variable which are to be updated with
each row of the gradient.
"""
global_step = self._global_step + 1
# Update accumulated weighted average of gradients
gbar = self.get_slot(var, "gbar")
gbar_decay_t = GetParam(self._gbar_decay, global_step)
gbar_weight_t = GetParam(self._gbar_weight, global_step)
if indices is not None:
# Note - the sparse update is not easily implemented, since the
# algorithm needs all indices of gbar to be updated
# if mat_gbar_decay != 1 or mat_gbar_decay != 0.
# One way to make mat_gbar_decay = 1 is by rescaling.
# If we want the update:
# G_{t+1} = a_{t+1} G_t + b_{t+1} w_t
# define:
# r_{t+1} = a_{t+1} * r_t
# h_t = G_t / r_t
# Then:
# h_{t+1} = h_t + (b_{t+1} / r_{t+1}) * w_t
# So we get the mat_gbar_decay = 1 as desired.
# We can implement this in a future version as needed.
# However we still need gbar_decay = 0, otherwise all indices
# of the variable will need to be updated.
if self._gbar_decay != 0.0:
tf_logging.warning("Not applying momentum for variable: %s" % var.name)
gbar_updated = grad
else:
gbar_updated = self._weighted_average(gbar, self._gbar_decay,
gbar_decay_t,
gbar_weight_t * grad)
# Update the preconditioners and compute the preconditioned gradient
shape = var.get_shape()
mat_g_list = []
for i in range(len(shape)):
mat_g_list.append(self.get_slot(var, "Gbar_" + str(i)))
mat_gbar_decay_t = GetParam(self._mat_gbar_decay, global_step)
mat_gbar_weight_t = GetParam(self._mat_gbar_weight, global_step)
preconditioned_grad = gbar_updated
v_rank = len(mat_g_list)
neg_alpha = - GetParam(self._alpha, global_step) / v_rank
svd_interval = GetParam(self._svd_interval, global_step)
precond_update_interval = GetParam(self._precond_update_interval,
global_step)
for i, mat_g in enumerate(mat_g_list):
# axes is the list of indices to reduce - everything but the current i.
axes = list(range(i)) + list(range(i+1, v_rank))
if shape[i] <= self._max_matrix_size:
# If the tensor size is sufficiently small perform full Shampoo update
# Note if precond_update_interval > 1 and mat_gbar_decay_t != 1, this
# is not strictly correct. However we will use it for now, and
# fix if needed. (G_1 = aG + bg ==> G_n = a^n G + (1+a+..+a^{n-1})bg)
# pylint: disable=g-long-lambda,cell-var-from-loop
mat_g_updated = control_flow_ops.cond(
math_ops.mod(global_step, precond_update_interval) < 1,
lambda: self._update_mat_g(
mat_g, grad, axes, mat_gbar_decay_t,
mat_gbar_weight_t * precond_update_interval, i),
lambda: mat_g)
mat_g_updated = mat_g_updated / float(shape[i].value)
if self._svd_interval == 1:
mat_h = self._compute_power(var, mat_g_updated, shape[i], neg_alpha)
else:
mat_h = control_flow_ops.cond(
math_ops.mod(global_step, svd_interval) < 1,
lambda: self._compute_power(var, mat_g_updated, shape[i],
neg_alpha, "H_" + str(i)),
lambda: self.get_slot(var, "H_" + str(i)))
# mat_h is a square matrix of size d_i x d_i
# preconditioned_grad is a d_i x ... x d_n x d_0 x ... d_{i-1} tensor
# After contraction with a d_i x d_i tensor
# it becomes a d_{i+1} x ... x d_n x d_0 x ... d_i tensor
# (the first dimension is contracted out, and the second dimension of
# mat_h is appended). After going through all the indices, it becomes
# a d_0 x ... x d_n tensor again.
preconditioned_grad = math_ops.tensordot(preconditioned_grad, mat_h,
axes=([0], [0]),
name="precond_" + str(i))
else:
# Tensor size is too large -- perform diagonal Shampoo update
# Only normalize non-vector cases.
if axes:
normalizer = 1.0 if indices is not None else float(shape[i].value)
grad_outer = math_ops.reduce_sum(grad * grad, axis=axes) / normalizer
else:
grad_outer = grad * grad
if i == 0 and indices is not None:
assert self._mat_gbar_decay == 1.0
mat_g_updated = state_ops.scatter_add(mat_g, indices,
mat_gbar_weight_t * grad_outer)
mat_g_updated_slice = array_ops.gather(mat_g_updated, indices)
mat_h = array_ops.where(
math_ops.greater(mat_g_updated_slice, 0),
math_ops.pow(mat_g_updated_slice, neg_alpha),
array_ops.zeros_like(mat_g_updated_slice))
else:
mat_g_updated = self._weighted_average(mat_g,
self._mat_gbar_decay,
mat_gbar_decay_t,
mat_gbar_weight_t * grad_outer)
mat_h = array_ops.where(
math_ops.greater(mat_g_updated, 0),
math_ops.pow(mat_g_updated, neg_alpha),
array_ops.zeros_like(mat_g_updated))
# Need to do the transpose to ensure that the tensor becomes
# a d_{i+1} x ... x d_n x d_0 x ... d_i tensor as described above.
preconditioned_grad = array_ops.transpose(
preconditioned_grad, perm=list(range(1, v_rank)) + [0]) * mat_h
# Update the variable based on the Shampoo update
learning_rate_t = GetParam(self._learning_rate, global_step)
if indices is not None:
var_updated = state_ops.scatter_add(
var, indices, -learning_rate_t * preconditioned_grad)
else:
var_updated = state_ops.assign_sub(var,
learning_rate_t * preconditioned_grad)
return var_updated
|
jbedorf/tensorflow
|
tensorflow/contrib/opt/python/training/shampoo.py
|
Python
|
apache-2.0
| 18,532
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cloud role maker."""
from __future__ import print_function
import os
import unittest
import paddle.fluid.generator as generator
import time # temp for debug
import paddle.fluid as fluid
import numpy as np
import paddle
import paddle.fluid.core as core
class TestGeneratorSeed(unittest.TestCase):
"""
Test cases for cpu generator seed.
"""
def test_gen_dropout_dygraph(self):
gen = paddle.seed(12343)
fluid.enable_dygraph()
gen.manual_seed(111111111)
st = paddle.get_cuda_rng_state()
x = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0)
x_again = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0)
x_third = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0)
print("x: {}".format(x.numpy()))
print("x_again: {}".format(x_again.numpy()))
x = x + x_again + x_third
y = fluid.layers.dropout(x, 0.5)
paddle.set_cuda_rng_state(st)
x1 = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0)
x1_again = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0)
x1_third = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0)
x1 = x1 + x1_again + x1_third
y1 = fluid.layers.dropout(x1, 0.5)
y_np = y.numpy()
y1_np = y1.numpy()
if core.is_compiled_with_cuda():
print(">>>>>>> dropout dygraph >>>>>>>")
self.assertTrue(np.allclose(y_np, y1_np))
def test_generator_gaussian_random_dygraph(self):
"""Test Generator seed."""
fluid.enable_dygraph()
paddle.seed(12312321111)
x = fluid.layers.gaussian_random([120], dtype="float32")
st1 = paddle.get_cuda_rng_state()
x1 = fluid.layers.gaussian_random([120], dtype="float32")
paddle.set_cuda_rng_state(st1)
x2 = fluid.layers.gaussian_random([120], dtype="float32")
paddle.seed(12312321111)
x3 = fluid.layers.gaussian_random([120], dtype="float32")
x_np = x.numpy()
x1_np = x1.numpy()
x2_np = x2.numpy()
x3_np = x3.numpy()
if core.is_compiled_with_cuda():
print(">>>>>>> gaussian random dygraph >>>>>>>")
self.assertTrue(np.allclose(x1_np, x2_np))
self.assertTrue(np.allclose(x_np, x3_np))
def test_generator_randint_dygraph(self):
"""Test Generator seed."""
fluid.enable_dygraph()
gen = paddle.seed(12312321111)
x = paddle.randint(low=10, shape=[10], dtype="int32")
st1 = gen.get_state()
x1 = paddle.randint(low=10, shape=[10], dtype="int32")
gen.set_state(st1)
x2 = paddle.randint(low=10, shape=[10], dtype="int32")
paddle.seed(12312321111)
x3 = paddle.randint(low=10, shape=[10], dtype="int32")
x_np = x.numpy()
x1_np = x1.numpy()
x2_np = x2.numpy()
x3_np = x3.numpy()
if core.is_compiled_with_cuda():
print(">>>>>>> randint dygraph >>>>>>>")
self.assertTrue(np.allclose(x1_np, x2_np))
self.assertTrue(np.allclose(x_np, x3_np))
def test_gen_TruncatedNormal_initializer(self):
fluid.disable_dygraph()
gen = paddle.seed(123123143)
cur_state = paddle.get_cuda_rng_state()
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
x = fluid.layers.uniform_random(shape=[2, 10])
result_1 = fluid.layers.fc(
input=x,
size=10,
param_attr=fluid.initializer.TruncatedNormal(
loc=0.0, scale=2.0))
result_2 = fluid.layers.fc(
input=x,
size=10,
param_attr=fluid.initializer.TruncatedNormal(
loc=0.0, scale=2.0))
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
out1 = exe.run(train_program,
feed={},
fetch_list=[result_1, result_2])
paddle.seed(123123143)
with fluid.program_guard(train_program, startup_program):
exe.run(startup_program)
out2 = exe.run(train_program,
feed={},
fetch_list=[result_1, result_2])
out1_res1 = np.array(out1[0])
out1_res2 = np.array(out1[1])
out2_res1 = np.array(out2[0])
out2_res2 = np.array(out2[1])
if core.is_compiled_with_cuda():
print(">>>>>>> truncated normal static >>>>>>>")
self.assertTrue(np.allclose(out1_res1, out2_res1))
self.assertTrue(np.allclose(out1_res2, out2_res2))
self.assertTrue(not np.allclose(out1_res2, out1_res1))
if __name__ == "__main__":
unittest.main()
|
luotao1/Paddle
|
python/paddle/fluid/tests/unittests/test_cuda_random_seed.py
|
Python
|
apache-2.0
| 5,777
|
"""
Tests for assetstore using any of the modulestores for metadata. May extend to testing the storage options
too.
"""
import unittest
from datetime import datetime, timedelta
import pytest
import ddt
import pytz
from django.test import TestCase
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import CourseLocator
from openedx.core.lib.tests import attr
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore import IncorrectlySortedList, ModuleStoreEnum, SortedAssetList
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.utils import (
MIXED_MODULESTORE_BOTH_SETUP,
MODULESTORE_SETUPS,
MixedModulestoreBuilder,
XmlModulestoreBuilder
)
class AssetStoreTestData:
"""
Shared data for constructing test assets.
"""
now = datetime.now(pytz.utc)
user_id = 144
user_id_long = int(user_id)
user_email = "me@example.com"
asset_fields = (
AssetMetadata.ASSET_BASENAME_ATTR, 'internal_name', 'pathname', 'locked',
'edited_by', 'edited_by_email', 'edited_on', 'created_by', 'created_by_email', 'created_on',
'curr_version', 'prev_version'
)
all_asset_data = (
('pic1.jpg', 'EKMND332DDBK', 'pix/archive', False,
user_id_long, user_email, now + timedelta(seconds=10 * 1), user_id_long, user_email, now, '14', '13'),
('shout.ogg', 'KFMDONSKF39K', 'sounds', True,
user_id, user_email, now + timedelta(seconds=10 * 2), user_id, user_email, now, '1', None),
('code.tgz', 'ZZB2333YBDMW', 'exercises/14', False,
user_id * 2, user_email, now + timedelta(seconds=10 * 3), user_id * 2, user_email, now, 'AB', 'AA'),
('dog.png', 'PUPY4242X', 'pictures/animals', True,
user_id_long * 3, user_email, now + timedelta(seconds=10 * 4), user_id_long * 3, user_email, now, '5', '4'),
('not_here.txt', 'JJJCCC747', '/dev/null', False,
user_id * 4, user_email, now + timedelta(seconds=10 * 5), user_id * 4, user_email, now, '50', '49'),
('asset.txt', 'JJJCCC747858', '/dev/null', False,
user_id * 4, user_email, now + timedelta(seconds=10 * 6), user_id * 4, user_email, now, '50', '49'),
('roman_history.pdf', 'JASDUNSADK', 'texts/italy', True,
user_id * 7, user_email, now + timedelta(seconds=10 * 7), user_id * 7, user_email, now, '1.1', '1.01'),
('weather_patterns.bmp', '928SJXX2EB', 'science', False,
user_id * 8, user_email, now + timedelta(seconds=10 * 8), user_id * 8, user_email, now, '52', '51'),
('demo.swf', 'DFDFGGGG14', 'demos/easy', False,
user_id * 9, user_email, now + timedelta(seconds=10 * 9), user_id * 9, user_email, now, '5', '4'),
)
class TestSortedAssetList(unittest.TestCase):
"""
Tests the SortedAssetList class.
"""
def setUp(self):
super().setUp()
asset_list = [dict(list(zip(AssetStoreTestData.asset_fields, asset))) for asset in AssetStoreTestData.all_asset_data] # lint-amnesty, pylint: disable=line-too-long
self.sorted_asset_list_by_filename = SortedAssetList(iterable=asset_list)
self.sorted_asset_list_by_last_edit = SortedAssetList(iterable=asset_list, key=lambda x: x['edited_on'])
self.course_key = CourseLocator('org', 'course', 'run')
def test_exception_on_bad_sort(self):
asset_key = self.course_key.make_asset_key('asset', 'pic1.jpg')
with pytest.raises(IncorrectlySortedList):
__ = self.sorted_asset_list_by_last_edit.find(asset_key)
def test_find(self):
asset_key = self.course_key.make_asset_key('asset', 'asset.txt')
assert self.sorted_asset_list_by_filename.find(asset_key) == 0
asset_key_last = self.course_key.make_asset_key('asset', 'weather_patterns.bmp')
assert self.sorted_asset_list_by_filename.find(asset_key_last) == (len(AssetStoreTestData.all_asset_data) - 1)
@attr('mongo')
@ddt.ddt
class TestMongoAssetMetadataStorage(TestCase):
"""
Tests for storing/querying course asset metadata.
"""
XML_MODULESTORE_MAP = {
'XML_MODULESTORE_BUILDER': XmlModulestoreBuilder(),
'MIXED_MODULESTORE_BUILDER': MixedModulestoreBuilder([('xml', XmlModulestoreBuilder())])
}
def setUp(self):
super().setUp()
self.differents = (('different', 'burn.jpg'),)
self.vrmls = (
('vrml', 'olympus_mons.vrml'),
('vrml', 'ponte_vecchio.vrml'),
)
self.regular_assets = (('asset', 'zippy.png'),)
self.alls = self.differents + self.vrmls + self.regular_assets
def _assert_metadata_equal(self, mdata1, mdata2):
"""
So we can use the below date comparison
"""
for attr in mdata1.ATTRS_ALLOWED_TO_UPDATE: # lint-amnesty, pylint: disable=redefined-outer-name
if isinstance(getattr(mdata1, attr), datetime):
self._assert_datetimes_equal(getattr(mdata1, attr), getattr(mdata2, attr))
else:
assert getattr(mdata1, attr) == getattr(mdata2, attr)
def _assert_datetimes_equal(self, datetime1, datetime2):
"""
Don't compare microseconds as mongo doesn't encode below milliseconds
"""
assert datetime1.replace(microsecond=0) == datetime2.replace(microsecond=0)
def _make_asset_metadata(self, asset_loc):
"""
Make a single test asset metadata.
"""
now = datetime.now(pytz.utc)
return AssetMetadata(
asset_loc, internal_name='EKMND332DDBK',
pathname='pictures/historical', contenttype='image/jpeg',
locked=False, fields={'md5': '77631ca4f0e08419b70726a447333ab6'},
edited_by=ModuleStoreEnum.UserID.test, edited_on=now,
created_by=ModuleStoreEnum.UserID.test, created_on=now,
curr_version='v1.0', prev_version='v0.95'
)
def _make_asset_thumbnail_metadata(self, asset_md):
"""
Add thumbnail to the asset_md
"""
asset_md.thumbnail = 'ABC39XJUDN2'
return asset_md
def setup_assets(self, course1_key, course2_key, store=None):
"""
Setup assets. Save in store if given
"""
for i, asset in enumerate(AssetStoreTestData.all_asset_data):
asset_dict = dict(list(zip(AssetStoreTestData.asset_fields[1:], asset[1:])))
if i in (0, 1) and course1_key:
asset_key = course1_key.make_asset_key('asset', asset[0])
asset_md = AssetMetadata(asset_key, **asset_dict)
if store is not None:
store.save_asset_metadata(asset_md, asset[4])
elif course2_key:
asset_key = course2_key.make_asset_key('asset', asset[0])
asset_md = AssetMetadata(asset_key, **asset_dict)
# Don't save assets 5 and 6.
if store is not None and i not in (4, 5):
store.save_asset_metadata(asset_md, asset[4])
@ddt.data(*MODULESTORE_SETUPS)
def test_save_one_and_confirm(self, storebuilder):
"""
Save the metadata in each store and retrieve it singularly, as all assets, and after deleting all.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_filename = 'burnside.jpg'
new_asset_loc = course.id.make_asset_key('asset', asset_filename)
# Save the asset's metadata.
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
# Find the asset's metadata and confirm it's the same.
found_asset_md = store.find_asset_metadata(new_asset_loc)
assert found_asset_md is not None
self._assert_metadata_equal(new_asset_md, found_asset_md)
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 1
@ddt.data(*MODULESTORE_SETUPS)
def test_delete(self, storebuilder):
"""
Delete non-existent and existent metadata
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
# Attempt to delete an asset that doesn't exist.
assert store.delete_asset_metadata(new_asset_loc, ModuleStoreEnum.UserID.test) == 0
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert store.delete_asset_metadata(new_asset_loc, ModuleStoreEnum.UserID.test) == 1
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
@ddt.data(*MODULESTORE_SETUPS)
def test_find_non_existing_assets(self, storebuilder):
"""
Find a non-existent asset in an existing course.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
# Find existing asset metadata.
asset_md = store.find_asset_metadata(new_asset_loc)
assert asset_md is None
@ddt.data(*MODULESTORE_SETUPS)
def test_get_all_non_existing_assets(self, storebuilder):
"""
Get all assets in an existing course when no assets exist.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
# Find existing asset metadata.
asset_md = store.get_all_asset_metadata(course.id, 'asset')
assert asset_md == []
@ddt.data(*MODULESTORE_SETUPS)
def test_find_assets_in_non_existent_course(self, storebuilder):
"""
Find asset metadata from a non-existent course.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
fake_course_id = CourseKey.from_string("{}nothere/{}nothere/{}nothere".format(
course.id.org, course.id.course, course.id.run
))
new_asset_loc = fake_course_id.make_asset_key('asset', 'burnside.jpg')
# Find asset metadata from non-existent course.
with pytest.raises(ItemNotFoundError):
store.find_asset_metadata(new_asset_loc)
with pytest.raises(ItemNotFoundError):
store.get_all_asset_metadata(fake_course_id, 'asset')
@ddt.data(*MODULESTORE_SETUPS)
def test_add_same_asset_twice(self, storebuilder):
"""
Add an asset's metadata, then add it again.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
# Add asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 1
# Add *the same* asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
# Still one here?
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 1
@ddt.data(*MODULESTORE_SETUPS)
def test_different_asset_types(self, storebuilder):
"""
Test saving assets with other asset types.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('vrml', 'pyramid.vrml')
new_asset_md = self._make_asset_metadata(new_asset_loc)
# Add asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'vrml')) == 1
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
@ddt.data(*MODULESTORE_SETUPS)
def test_asset_types_with_other_field_names(self, storebuilder):
"""
Test saving assets using an asset type of 'course_id'.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('course_id', 'just_to_see_if_it_still_works.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
# Add asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'course_id')) == 1
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
all_assets = store.get_all_asset_metadata(course.id, 'course_id')
assert all_assets[0].asset_id.path == new_asset_loc.path
@ddt.data(*MODULESTORE_SETUPS)
def test_lock_unlock_assets(self, storebuilder):
"""
Save multiple metadata in each store and retrieve it singularly, as all assets, and after deleting all.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
locked_state = new_asset_md.locked
# Flip the course asset's locked status.
store.set_asset_metadata_attr(new_asset_loc, "locked", not locked_state, ModuleStoreEnum.UserID.test)
# Find the same course and check its locked status.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
assert updated_asset_md.locked == (not locked_state)
# Now flip it back.
store.set_asset_metadata_attr(new_asset_loc, "locked", locked_state, ModuleStoreEnum.UserID.test)
reupdated_asset_md = store.find_asset_metadata(new_asset_loc)
assert reupdated_asset_md is not None
assert reupdated_asset_md.locked == locked_state
ALLOWED_ATTRS = (
('pathname', '/new/path'),
('internal_name', 'new_filename.txt'),
('locked', True),
('contenttype', 'image/png'),
('thumbnail', 'new_filename_thumb.jpg'),
('fields', {'md5': '5346682d948cc3f683635b6918f9b3d0'}),
('curr_version', 'v1.01'),
('prev_version', 'v1.0'),
('edited_by', 'Mork'),
('edited_on', datetime(1969, 1, 1, tzinfo=pytz.utc)),
)
DISALLOWED_ATTRS = (
('asset_id', 'IAmBogus'),
('created_by', 'Smith'),
('created_on', datetime.now(pytz.utc)),
)
UNKNOWN_ATTRS = (
('lunch_order', 'burger_and_fries'),
('villain', 'Khan')
)
@ddt.data(*MODULESTORE_SETUPS)
def test_set_all_attrs(self, storebuilder):
"""
Save setting each attr one at a time
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
for attribute, value in self.ALLOWED_ATTRS:
# Set the course asset's attribute.
store.set_asset_metadata_attr(new_asset_loc, attribute, value, ModuleStoreEnum.UserID.test)
# Find the same course asset and check its changed attribute.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
assert getattr(updated_asset_md, attribute, None) is not None
assert getattr(updated_asset_md, attribute, None) == value
@ddt.data(*MODULESTORE_SETUPS)
def test_set_disallowed_attrs(self, storebuilder):
"""
setting disallowed attrs should fail
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
for attribute, value in self.DISALLOWED_ATTRS:
original_attr_val = getattr(new_asset_md, attribute)
# Set the course asset's attribute.
store.set_asset_metadata_attr(new_asset_loc, attribute, value, ModuleStoreEnum.UserID.test)
# Find the same course and check its changed attribute.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
updated_attr_val = getattr(updated_asset_md, attribute, None)
assert updated_attr_val is not None
# Make sure that the attribute is unchanged from its original value.
if isinstance(original_attr_val, datetime):
self._assert_datetimes_equal(updated_attr_val, original_attr_val)
else:
assert updated_attr_val == original_attr_val
@ddt.data(*MODULESTORE_SETUPS)
def test_set_unknown_attrs(self, storebuilder):
"""
setting unknown attrs should fail
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
for attribute, value in self.UNKNOWN_ATTRS:
# Set the course asset's attribute.
store.set_asset_metadata_attr(new_asset_loc, attribute, value, ModuleStoreEnum.UserID.test)
# Find the same course and check its changed attribute.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
# Make sure the unknown field was *not* added.
with pytest.raises(AttributeError):
assert getattr(updated_asset_md, attribute) == value
@ddt.data(*MODULESTORE_SETUPS)
def test_save_one_different_asset(self, storebuilder):
"""
saving and deleting things which are not 'asset'
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_key = course.id.make_asset_key('different', 'burn.jpg')
new_asset_thumbnail = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset_thumbnail, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'different')) == 1
assert store.delete_asset_metadata(asset_key, ModuleStoreEnum.UserID.test) == 1
assert len(store.get_all_asset_metadata(course.id, 'different')) == 0
@ddt.data(*MODULESTORE_SETUPS)
def test_find_different(self, storebuilder):
"""
finding things which are of type other than 'asset'
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_key = course.id.make_asset_key('different', 'burn.jpg')
new_asset_thumbnail = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset_thumbnail, ModuleStoreEnum.UserID.test)
assert store.find_asset_metadata(asset_key) is not None
unknown_asset_key = course.id.make_asset_key('different', 'nosuchfile.jpg')
assert store.find_asset_metadata(unknown_asset_key) is None
def _check_asset_values(self, assets, orig):
"""
Check asset type/path values.
"""
for idx, asset in enumerate(orig):
assert assets[idx].asset_id.asset_type == asset[0]
assert assets[idx].asset_id.path == asset[1]
@ddt.data(*MODULESTORE_SETUPS)
def test_get_multiple_types(self, storebuilder):
"""
getting all things which are of type other than 'asset'
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
# Save 'em.
for asset_type, filename in self.alls:
asset_key = course.id.make_asset_key(asset_type, filename)
new_asset = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset, ModuleStoreEnum.UserID.test)
# Check 'em.
for asset_type, asset_list in (
('different', self.differents),
('vrml', self.vrmls),
('asset', self.regular_assets),
):
assets = store.get_all_asset_metadata(course.id, asset_type)
assert len(assets) == len(asset_list)
self._check_asset_values(assets, asset_list)
assert len(store.get_all_asset_metadata(course.id, 'not_here')) == 0
assert len(store.get_all_asset_metadata(course.id, None)) == 4
assets = store.get_all_asset_metadata(
course.id, None, start=0, maxresults=-1,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(assets) == len(self.alls)
self._check_asset_values(assets, self.alls)
@ddt.data(*MODULESTORE_SETUPS)
def test_save_metadata_list(self, storebuilder):
"""
Save a list of asset metadata all at once.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
# Make a list of AssetMetadata objects.
md_list = []
for asset_type, filename in self.alls:
asset_key = course.id.make_asset_key(asset_type, filename)
md_list.append(self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
))
# Save 'em.
store.save_asset_metadata_list(md_list, ModuleStoreEnum.UserID.test)
# Check 'em.
for asset_type, asset_list in (
('different', self.differents),
('vrml', self.vrmls),
('asset', self.regular_assets),
):
assets = store.get_all_asset_metadata(course.id, asset_type)
assert len(assets) == len(asset_list)
self._check_asset_values(assets, asset_list)
assert len(store.get_all_asset_metadata(course.id, 'not_here')) == 0
assert len(store.get_all_asset_metadata(course.id, None)) == 4
assets = store.get_all_asset_metadata(
course.id, None, start=0, maxresults=-1,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(assets) == len(self.alls)
self._check_asset_values(assets, self.alls)
@ddt.data(*MODULESTORE_SETUPS)
def test_save_metadata_list_with_mismatched_asset(self, storebuilder):
"""
Save a list of asset metadata all at once - but with one asset's metadata from a different course.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
# Make a list of AssetMetadata objects.
md_list = []
for asset_type, filename in self.alls:
if asset_type == 'asset':
asset_key = course2.id.make_asset_key(asset_type, filename)
else:
asset_key = course1.id.make_asset_key(asset_type, filename)
md_list.append(self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
))
# Save 'em.
store.save_asset_metadata_list(md_list, ModuleStoreEnum.UserID.test)
# Check 'em.
for asset_type, asset_list in (
('different', self.differents),
('vrml', self.vrmls),
):
assets = store.get_all_asset_metadata(course1.id, asset_type)
assert len(assets) == len(asset_list)
self._check_asset_values(assets, asset_list)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 0
assert len(store.get_all_asset_metadata(course1.id, None)) == 3
assets = store.get_all_asset_metadata(
course1.id, None, start=0, maxresults=-1,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(assets) == len(self.differents + self.vrmls)
self._check_asset_values(assets, self.differents + self.vrmls)
@ddt.data(*MODULESTORE_SETUPS)
def test_delete_all_different_type(self, storebuilder):
"""
deleting all assets of a given but not 'asset' type
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_key = course.id.make_asset_key('different', 'burn_thumb.jpg')
new_asset_thumbnail = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset_thumbnail, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'different')) == 1
@ddt.data(*MODULESTORE_SETUPS)
def test_get_all_assets_with_paging(self, storebuilder):
"""
Save multiple metadata in each store and retrieve it singularly, as all assets, and after deleting all.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
self.setup_assets(course1.id, course2.id, store)
expected_sorts_by_2 = (
(
('displayname', ModuleStoreEnum.SortOrder.ascending),
('code.tgz', 'demo.swf', 'dog.png', 'roman_history.pdf', 'weather_patterns.bmp'),
(2, 2, 1)
),
(
('displayname', ModuleStoreEnum.SortOrder.descending),
('weather_patterns.bmp', 'roman_history.pdf', 'dog.png', 'demo.swf', 'code.tgz'),
(2, 2, 1)
),
(
('uploadDate', ModuleStoreEnum.SortOrder.ascending),
('code.tgz', 'dog.png', 'roman_history.pdf', 'weather_patterns.bmp', 'demo.swf'),
(2, 2, 1)
),
(
('uploadDate', ModuleStoreEnum.SortOrder.descending),
('demo.swf', 'weather_patterns.bmp', 'roman_history.pdf', 'dog.png', 'code.tgz'),
(2, 2, 1)
),
)
# First, with paging across all sorts.
for sort_test in expected_sorts_by_2:
for i in range(3):
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=2 * i, maxresults=2, sort=sort_test[0]
)
num_expected_results = sort_test[2][i]
expected_filename = sort_test[1][2 * i]
assert len(asset_page) == num_expected_results
assert asset_page[0].asset_id.path == expected_filename
if num_expected_results == 2:
expected_filename = sort_test[1][(2 * i) + 1]
assert asset_page[1].asset_id.path == expected_filename
# Now fetch everything.
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=0, sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(asset_page) == 5
assert asset_page[0].asset_id.path == 'code.tgz'
assert asset_page[1].asset_id.path == 'demo.swf'
assert asset_page[2].asset_id.path == 'dog.png'
assert asset_page[3].asset_id.path == 'roman_history.pdf'
assert asset_page[4].asset_id.path == 'weather_patterns.bmp'
# Some odd conditions.
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=100, sort=('uploadDate', ModuleStoreEnum.SortOrder.ascending)
)
assert len(asset_page) == 0
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=3, maxresults=0,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(asset_page) == 0
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=3, maxresults=-12345,
sort=('displayname', ModuleStoreEnum.SortOrder.descending)
)
assert len(asset_page) == 2
@ddt.data('XML_MODULESTORE_BUILDER', 'MIXED_MODULESTORE_BUILDER')
def test_xml_not_yet_implemented(self, storebuilderName):
"""
Test coverage which shows that for now xml read operations are not implemented
"""
storebuilder = self.XML_MODULESTORE_MAP[storebuilderName]
with storebuilder.build(contentstore=None) as (__, store):
course_key = store.make_course_key("org", "course", "run")
asset_key = course_key.make_asset_key('asset', 'foo.jpg')
assert store.find_asset_metadata(asset_key) is None
assert store.get_all_asset_metadata(course_key, 'asset') == []
@ddt.data(*MODULESTORE_SETUPS)
def test_copy_all_assets_same_modulestore(self, storebuilder):
"""
Create a course with assets, copy them all to another course in the same modulestore, and check on it.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
self.setup_assets(course1.id, None, store)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 2
assert len(store.get_all_asset_metadata(course2.id, 'asset')) == 0
store.copy_all_asset_metadata(course1.id, course2.id, ModuleStoreEnum.UserID.test * 101)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 2
all_assets = store.get_all_asset_metadata(
course2.id, 'asset', sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(all_assets) == 2
assert all_assets[0].asset_id.path == 'pic1.jpg'
assert all_assets[1].asset_id.path == 'shout.ogg'
@ddt.data(*MODULESTORE_SETUPS)
def test_copy_all_assets_from_course_with_no_assets(self, storebuilder):
"""
Create a course with *no* assets, and try copy them all to another course in the same modulestore.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
store.copy_all_asset_metadata(course1.id, course2.id, ModuleStoreEnum.UserID.test * 101)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 0
assert len(store.get_all_asset_metadata(course2.id, 'asset')) == 0
all_assets = store.get_all_asset_metadata(
course2.id, 'asset', sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(all_assets) == 0
@ddt.data(
('mongo', 'split'),
('split', 'mongo'),
)
@ddt.unpack
def test_copy_all_assets_cross_modulestore(self, from_store, to_store):
"""
Create a course with assets, copy them all to another course in a different modulestore, and check on it.
"""
mixed_builder = MIXED_MODULESTORE_BOTH_SETUP
with mixed_builder.build() as (__, mixed_store):
with mixed_store.default_store(from_store):
course1 = CourseFactory.create(modulestore=mixed_store)
with mixed_store.default_store(to_store):
course2 = CourseFactory.create(modulestore=mixed_store)
self.setup_assets(course1.id, None, mixed_store)
assert len(mixed_store.get_all_asset_metadata(course1.id, 'asset')) == 2
assert len(mixed_store.get_all_asset_metadata(course2.id, 'asset')) == 0
mixed_store.copy_all_asset_metadata(course1.id, course2.id, ModuleStoreEnum.UserID.test * 102)
all_assets = mixed_store.get_all_asset_metadata(
course2.id, 'asset', sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(all_assets) == 2
assert all_assets[0].asset_id.path == 'pic1.jpg'
assert all_assets[1].asset_id.path == 'shout.ogg'
|
eduNEXT/edunext-platform
|
common/lib/xmodule/xmodule/modulestore/tests/test_assetstore.py
|
Python
|
agpl-3.0
| 34,098
|
def create_iterator(func, n):
def f(x):
x = func(x)
for _ in range(n-1):
x = func(x)
return x
return f
|
VladKha/CodeWars
|
6 kyu/Function iteration/solve.py
|
Python
|
gpl-3.0
| 147
|
# coding=utf-8
import random
from copy import deepcopy, copy
from itertools import product
from utils import argmin
MOST_CONSTRAINED_VARIABLE = 'mcv'
HIGHEST_DEGREE_VARIABLE = 'degree'
LEAST_CONSTRAINING_VALUE = 'lvc'
def backtrack(problem, variable_heuristic='', value_heuristic='', inference=True):
'''
Backtracking search.
variable_heuristic is the heuristic for variable choosing, can be
MOST_CONSTRAINED_VARIABLE, HIGHEST_DEGREE_VARIABLE, or blank for simple
ordered choosing.
value_heuristic is the heuristic for value choosing, can be
LEAST_CONSTRAINING_VALUE or blank for simple ordered choosing.
'''
assignment = {}
domains = deepcopy(problem.domains)
if variable_heuristic == MOST_CONSTRAINED_VARIABLE:
variable_chooser = _most_constrained_variable_chooser
elif variable_heuristic == HIGHEST_DEGREE_VARIABLE:
variable_chooser = _highest_degree_variable_chooser
else:
variable_chooser = _basic_variable_chooser
if value_heuristic == LEAST_CONSTRAINING_VALUE:
values_sorter = _least_constraining_values_sorter
else:
values_sorter = _basic_values_sorter
return _backtracking(problem,
assignment,
domains,
variable_chooser,
values_sorter,
inference=inference)
def _basic_variable_chooser(problem, variables, domains):
'''
Choose the next variable in order.
'''
return variables[0]
def _most_constrained_variable_chooser(problem, variables, domains):
'''
Choose the variable that has less available values.
'''
# the variable with fewer values available
return sorted(variables, key=lambda v: len(domains[v]))[0]
def _highest_degree_variable_chooser(problem, variables, domains):
'''
Choose the variable that is involved on more constraints.
'''
# the variable involved in more constraints
return sorted(variables, key=lambda v: problem.var_degrees[v], reverse=True)[0]
def _count_conflicts(problem, assignment, variable=None, value=None):
'''
Count the number of violated constraints on a given assignment.
'''
return len(_find_conflicts(problem, assignment, variable, value))
def _call_constraint(assignment, neighbors, constraint):
variables, values = zip(*[(n, assignment[n])
for n in neighbors])
return constraint(variables, values)
def _find_conflicts(problem, assignment, variable=None, value=None):
'''
Find violated constraints on a given assignment, with the possibility
of specifying a new variable and value to add to the assignment before
checking.
'''
if variable is not None and value is not None:
assignment = deepcopy(assignment)
assignment[variable] = value
conflicts = []
for neighbors, constraint in problem.constraints:
# if all the neighbors on the constraint have values, check if conflict
if all(n in assignment for n in neighbors):
if not _call_constraint(assignment, neighbors, constraint):
conflicts.append((neighbors, constraint))
return conflicts
def _basic_values_sorter(problem, assignment, variable, domains):
'''
Sort values in the same original order.
'''
return domains[variable][:]
def _least_constraining_values_sorter(problem, assignment, variable, domains):
'''
Sort values based on how many conflicts they generate if assigned.
'''
# the value that generates less conflicts
def update_assignment(value):
new_assignment = deepcopy(assignment)
new_assignment[variable] = value
return new_assignment
values = sorted(domains[variable][:],
key=lambda v: _count_conflicts(problem, assignment,
variable, v))
return values
def _backtracking(problem, assignment, domains, variable_chooser, values_sorter, inference=True):
'''
Internal recursive backtracking algorithm.
'''
from arc import arc_consistency_3
if len(assignment) == len(problem.variables):
return assignment
pending = [v for v in problem.variables
if v not in assignment]
variable = variable_chooser(problem, pending, domains)
values = values_sorter(problem, assignment, variable, domains)
for value in values:
new_assignment = deepcopy(assignment)
new_assignment[variable] = value
if not _count_conflicts(problem, new_assignment): # TODO on aima also checks if using fc
new_domains = deepcopy(domains)
new_domains[variable] = [value]
if not inference or arc_consistency_3(new_domains, problem.constraints):
result = _backtracking(problem,
new_assignment,
new_domains,
variable_chooser,
values_sorter,
inference=inference)
if result:
return result
return None
def _min_conflicts_value(problem, assignment, variable):
'''
Return the value generate the less number of conflicts.
In case of tie, a random value is selected among this values subset.
'''
return argmin(problem.domains[variable], lambda x: _count_conflicts(problem, assignment, variable, x))
def min_conflicts(problem, initial_assignment=None, iterations_limit=0):
"""
Min conflicts search.
initial_assignment the initial assignment, or None to generate a random
one.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until if finds an assignment
that doesn't generate conflicts (a solution).
"""
assignment = {}
if initial_assignment:
assignment.update(initial_assignment)
else:
for variable in problem.variables:
value = _min_conflicts_value(problem, assignment, variable)
assignment[variable] = value
iteration = 0
run = True
while run:
conflicts = _find_conflicts(problem, assignment)
conflict_variables = [v for v in problem.variables
if any(v in conflict[0] for conflict in conflicts)]
if conflict_variables:
variable = random.choice(conflict_variables)
value = _min_conflicts_value(problem, assignment, variable)
assignment[variable] = value
iteration += 1
if iterations_limit and iteration >= iterations_limit:
run = False
elif not _count_conflicts(problem, assignment):
run = False
return assignment
def convert_to_binary(variables, domains, constraints):
"""
Returns new constraint list, all binary, using hidden variables.
You can use it as previous step when creating a problem.
"""
def wdiff(vars_):
def diff(variables, values):
hidden, other = variables
if hidden.startswith('hidden'):
idx = vars_.index(other)
return values[1] == values[0][idx]
else:
idx = vars_.index(hidden)
return values[0] == values[1][idx]
diff.no_wrap = True # so it's not wrapped to swap values
return diff
new_constraints = []
new_domains = copy(domains)
new_variables = list(variables)
last = 0
for vars_, const in constraints:
if len(vars_) == 2:
new_constraints.append((vars_, const))
continue
hidden = 'hidden%d' % last
new_variables.append(hidden)
last += 1
new_domains[hidden] = [t for t in product(*map(domains.get, vars_)) if const(vars_, t)]
for var in vars_:
new_constraints.append(((hidden, var), wdiff(vars_)))
return new_variables, new_domains, new_constraints
|
emoron/simpleai
|
simpleai/search/csp.py
|
Python
|
mit
| 8,094
|
"""
This package contains some 3rd party modules:
- githubpy: https://github.com/michaelliao/githubpy
"""
|
ColinDuquesnoy/QCrash
|
qcrash/_extlibs/__init__.py
|
Python
|
mit
| 108
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This script stops VisTrails server. It can only stop with an XML Request """
import xmlrpclib
import sys
import socket
def usage():
return "%s server_url"%sys.argv[0]
if __name__ == '__main__':
try:
uri = sys.argv[1]
except Exception, e:
print usage()
sys.exit(1)
try:
proxy = xmlrpclib.ServerProxy(uri)
print proxy.quit()
except socket.error, e:
print "Vistrails was not running on ", uri
#print proxy.run_from_db('vistrails.sci.utah.edu', 3306,'vt_test',1,'/tmp/spreadsheet',598)
|
Nikea/VisTrails
|
vistrails/stop_vistrails_server.py
|
Python
|
bsd-3-clause
| 2,443
|
from preset_schema import PresetSchema
class TeletypePresetSchema(PresetSchema):
def firmware_name(self):
return 'teletype'
def check(self, nvram_data):
if nvram_data.fresh != 0x22:
print("this firmware image hasn't ever been run (or is corrupt)")
return False
return True
def root_type(self):
return 'nvram_data_t'
def address(self):
return 0x80040000
|
monome/ansible
|
tools/flash_tools/schemata/teletype/teletype_preset_schema.py
|
Python
|
gpl-2.0
| 442
|
"""This file sets up a command line manager.
Use "python manage.py" for a list of available commands.
Use "python manage.py runserver" to start the development web server on localhost:5000.
Use "python manage.py runserver --help" for a list of runserver options.
"""
from flask_migrate import MigrateCommand
from flask_script import Manager
from app import create_app
from app.commands import InitDbCommand
# Setup Flask-Script with command line commands
manager = Manager(create_app)
manager.add_command('db', MigrateCommand)
manager.add_command('init_db', InitDbCommand)
if __name__ == "__main__":
# python manage.py # shows available commands
# python manage.py runserver --help # shows available runserver options
manager.run()
|
lingthio/Flask-User-starter-app
|
manage.py
|
Python
|
bsd-2-clause
| 774
|
from django import forms
from .models import CustomEvent, Schedule
class ScheduleCreationForm(forms.ModelForm):
class Meta:
model = Schedule
fields = ['html']
class CustomEventForm(forms.ModelForm):
class Meta:
model = CustomEvent
fields = [
'conference', 'title', 'begin_time', 'end_time', 'location',
'break_event', 'description', 'link_path',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['title'].strip = False
|
pycontw/pycontw2016
|
src/events/forms.py
|
Python
|
mit
| 554
|
# -*- coding: utf-8 -*-
"""
Base module for loading and running the main SasView application.
"""
################################################################################
# This software was developed by the University of Tennessee as part of the
# Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
# project funded by the US National Science Foundation.
#
# See the license text in license.txt
#
# copyright 2009, University of Tennessee
################################################################################
import os
import os.path
import sys
import traceback
from sas.sasview.logger_config import SetupLogger
logger = SetupLogger(__name__).config_production()
# Log the start of the session
logger.info(" --- SasView session started ---")
# Log the python version
logger.info("Python: %s" % sys.version)
# Allow the dynamic selection of wxPython via an environment variable, when devs
# who have multiple versions of the module installed want to pick between them.
# This variable does not have to be set of course, and through normal usage will
# probably not be, but this can make things a little easier when upgrading to a
# new version of wx.
WX_ENV_VAR = "SASVIEW_WX_VERSION"
if WX_ENV_VAR in os.environ:
logger.info("You have set the %s environment variable to %s." % \
(WX_ENV_VAR, os.environ[WX_ENV_VAR]))
import wxversion
if wxversion.checkInstalled(os.environ[WX_ENV_VAR]):
logger.info("Version %s of wxPython is installed, so using that version." % os.environ[WX_ENV_VAR])
wxversion.select(os.environ[WX_ENV_VAR])
else:
logger.error("Version %s of wxPython is not installed, so using default version." % os.environ[WX_ENV_VAR])
else:
logger.info("You have not set the %s environment variable, so using default version of wxPython." % WX_ENV_VAR)
import wx
try:
logger.info("Wx version: %s" % wx.__version__)
except:
logger.error("Wx version: error reading version")
import wxcruft
wxcruft.call_later_fix()
# wxcruft.trace_new_id()
# Always use private .matplotlib setup to avoid conflicts with other
# uses of matplotlib
# Have to check if .sasview exists first
sasdir = os.path.join(os.path.expanduser("~"),'.sasview')
if not os.path.exists(sasdir):
os.mkdir(sasdir)
mplconfigdir = os.path.join(os.path.expanduser("~"),'.sasview','.matplotlib')
if not os.path.exists(mplconfigdir):
os.mkdir(mplconfigdir)
os.environ['MPLCONFIGDIR'] = mplconfigdir
reload(sys)
sys.setdefaultencoding("iso-8859-1")
from sas.sasgui.guiframe import gui_manager
from sas.sasgui.guiframe.gui_style import GUIFRAME
from welcome_panel import WelcomePanel
PLUGIN_MODEL_DIR = 'plugin_models'
APP_NAME = 'SasView'
# Set SAS_MODELPATH so sasmodels can find our custom models
os.environ['SAS_MODELPATH'] = os.path.join(sasdir, PLUGIN_MODEL_DIR)
from matplotlib import backend_bases
backend_bases.FigureCanvasBase.filetypes.pop('pgf', None)
class SasView():
"""
Main class for running the SasView application
"""
def __init__(self):
"""
"""
# from gui_manager import ViewApp
self.gui = gui_manager.SasViewApp(0)
# Set the application manager for the GUI
self.gui.set_manager(self)
# Add perspectives to the basic application
# Additional perspectives can still be loaded
# dynamically
# Note: py2exe can't find dynamically loaded
# modules. We load the fitting module here
# to ensure a complete Windows executable build.
# Fitting perspective
try:
import sas.sasgui.perspectives.fitting as module
fitting_plug = module.Plugin()
self.gui.add_perspective(fitting_plug)
except Exception:
logger.error("%s: could not find Fitting plug-in module"% APP_NAME)
logger.error(traceback.format_exc())
# P(r) perspective
try:
import sas.sasgui.perspectives.pr as module
pr_plug = module.Plugin()
self.gui.add_perspective(pr_plug)
except:
logger.error("%s: could not find P(r) plug-in module"% APP_NAME)
logger.error(traceback.format_exc())
# Invariant perspective
try:
import sas.sasgui.perspectives.invariant as module
invariant_plug = module.Plugin()
self.gui.add_perspective(invariant_plug)
except Exception as e :
logger.error("%s: could not find Invariant plug-in module"% \
APP_NAME)
logger.error(traceback.format_exc())
# Corfunc perspective
try:
import sas.sasgui.perspectives.corfunc as module
corfunc_plug = module.Plugin()
self.gui.add_perspective(corfunc_plug)
except:
logger.error("Unable to load corfunc module")
# Calculator perspective
try:
import sas.sasgui.perspectives.calculator as module
calculator_plug = module.Plugin()
self.gui.add_perspective(calculator_plug)
except:
logger.error("%s: could not find Calculator plug-in module"% \
APP_NAME)
logger.error(traceback.format_exc())
# File converter tool
try:
import sas.sasgui.perspectives.file_converter as module
converter_plug = module.Plugin()
self.gui.add_perspective(converter_plug)
except:
logger.error("%s: could not find File Converter plug-in module"% \
APP_NAME)
logger.error(traceback.format_exc())
# Add welcome page
self.gui.set_welcome_panel(WelcomePanel)
# Build the GUI
self.gui.build_gui()
# delete unused model folder
self.gui.clean_plugin_models(PLUGIN_MODEL_DIR)
# Start the main loop
self.gui.MainLoop()
def run():
"""
__main__ method for loading and running SasView
"""
from multiprocessing import freeze_support
freeze_support()
if len(sys.argv) > 1:
## Run sasview as an interactive python interpreter
# if sys.argv[1] == "-i":
# sys.argv = ["ipython", "--pylab"]
# from IPython import start_ipython
# sys.exit(start_ipython())
thing_to_run = sys.argv[1]
sys.argv = sys.argv[1:]
import runpy
if os.path.exists(thing_to_run):
runpy.run_path(thing_to_run, run_name="__main__")
else:
runpy.run_module(thing_to_run, run_name="__main__")
else:
SasView()
if __name__ == "__main__":
run()
|
lewisodriscoll/sasview
|
sasview/sasview.py
|
Python
|
bsd-3-clause
| 6,749
|
"""
OEML - REST API
This section will provide necessary information about the `CoinAPI OEML REST API` protocol. <br/> This API is also available in the Postman application: <a href=\"https://postman.coinapi.io/\" target=\"_blank\">https://postman.coinapi.io/</a> <br/><br/> Implemented Standards: * [HTTP1.0](https://datatracker.ietf.org/doc/html/rfc1945) * [HTTP1.1](https://datatracker.ietf.org/doc/html/rfc2616) * [HTTP2.0](https://datatracker.ietf.org/doc/html/rfc7540) # noqa: E501
The version of the OpenAPI document: v1
Contact: support@coinapi.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from openapi_client.exceptions import ApiAttributeError
class OrdSide(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'BUY': "BUY",
'SELL': "SELL",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""OrdSide - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): Side of order. ., must be one of ["BUY", "SELL", ] # noqa: E501
Keyword Args:
value (str): Side of order. ., must be one of ["BUY", "SELL", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""OrdSide - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): Side of order. ., must be one of ["BUY", "SELL", ] # noqa: E501
Keyword Args:
value (str): Side of order. ., must be one of ["BUY", "SELL", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
|
coinapi/coinapi-sdk
|
oeml-sdk/python/openapi_client/model/ord_side.py
|
Python
|
mit
| 11,969
|
# $Id$
# (C) Timothy Corbett-Clark, 2004
"""Midge is a bug tracking system.
Midge is a system for tracking bugs found during the commercial
development of a software product. It is particularly suited to a
process for which the filing, managing, fixing, and testing are all
undertaken by different roles in a trusted environment.)
Midge aims to be consistent, self-explanatory, powerful enough to
efficiently manage thousands of bugs, require no administration, and
be bug-free!)
For more information, see http://midge.sourceforge.net.
"""
|
p/midge
|
midge/__init__.py
|
Python
|
gpl-2.0
| 546
|
"""
Objects for dealing with Hermite_e series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite_e series, including a `HermiteE` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermedomain` -- Hermite_e series default domain, [-1,1].
- `hermezero` -- Hermite_e series that evaluates identically to 0.
- `hermeone` -- Hermite_e series that evaluates identically to 1.
- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``.
- `hermeadd` -- add two Hermite_e series.
- `hermesub` -- subtract one Hermite_e series from another.
- `hermemul` -- multiply two Hermite_e series.
- `hermediv` -- divide one Hermite_e series by another.
- `hermeval` -- evaluate a Hermite_e series at given points.
- `hermeval2d` -- evaluate a 2D Hermite_e series at given points.
- `hermeval3d` -- evaluate a 3D Hermite_e series at given points.
- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product.
- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product.
Calculus
--------
- `hermeder` -- differentiate a Hermite_e series.
- `hermeint` -- integrate a Hermite_e series.
Misc Functions
--------------
- `hermefromroots` -- create a Hermite_e series with specified roots.
- `hermeroots` -- find the roots of a Hermite_e series.
- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials.
- `hermevander2d` -- Vandermonde-like matrix for 2D power series.
- `hermevander3d` -- Vandermonde-like matrix for 3D power series.
- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights.
- `hermeweight` -- Hermite_e weight function.
- `hermecompanion` -- symmetrized companion matrix in Hermite_e form.
- `hermefit` -- least-squares fit returning a Hermite_e series.
- `hermetrim` -- trim leading coefficients from a Hermite_e series.
- `hermeline` -- Hermite_e series of given straight line.
- `herme2poly` -- convert a Hermite_e series to a polynomial.
- `poly2herme` -- convert a polynomial to a Hermite_e series.
Classes
-------
- `HermiteE` -- A Hermite_e series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division
import numpy as np
import numpy.linalg as la
import polyutils as pu
import warnings
from polytemplate import polytemplate
__all__ = ['hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline',
'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', 'hermpow',
'hermeval',
'hermeder', 'hermeint', 'herme2poly', 'poly2herme', 'hermefromroots',
'hermevander', 'hermefit', 'hermetrim', 'hermeroots', 'HermiteE',
'hermeval2d', 'hermeval3d', 'hermegrid2d', 'hermegrid3d', 'hermevander2d',
'hermevander3d', 'hermecompanion', 'hermegauss', 'hermeweight']
hermetrim = pu.trimcoef
def poly2herme(pol) :
"""
poly2herme(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herme2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import poly2herme
>>> poly2herme(np.arange(4))
array([ 2., 10., 2., 3.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1) :
res = hermeadd(hermemulx(res), pol[i])
return res
def herme2poly(c) :
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herme
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import herme2poly
>>> herme2poly([ 2., 10., 2., 3.])
array([ 0., 1., 2., 3.])
"""
from polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1) :
tmp = c0
c0 = polysub(c[i - 2], c1*(i - 1))
c1 = polyadd(tmp, polymulx(c1))
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermedomain = np.array([-1,1])
# Hermite coefficients representing zero.
hermezero = np.array([0])
# Hermite coefficients representing one.
hermeone = np.array([1])
# Hermite coefficients representing the identity x.
hermex = np.array([0, 1])
def hermeline(off, scl) :
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeline
>>> from numpy.polynomial.hermite_e import hermeline, hermeval
>>> hermeval(0,hermeline(3, 2))
3.0
>>> hermeval(1,hermeline(3, 2))
5.0
"""
if scl != 0 :
return np.array([off,scl])
else :
return np.array([off])
def hermefromroots(roots) :
"""
Generate a HermiteE series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in HermiteE form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in HermiteE form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
chebfromroots.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefromroots, hermeval
>>> coef = hermefromroots((-1, 0, 1))
>>> hermeval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermefromroots((-1j, 1j))
>>> hermeval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermeline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermemul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermemul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermeadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermesub, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeadd
>>> hermeadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermesub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermeadd, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermesub
>>> hermesub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermemulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x)))
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemulx
>>> hermemulx([1, 2, 3])
array([ 2., 7., 2., 3.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
prd[i + 1] = c[i]
prd[i - 1] += c[i]*i
return prd
def hermemul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermeadd, hermesub, hermediv, hermepow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemul
>>> hermemul([1, 2, 3], [0, 1, 2])
array([ 14., 15., 28., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else :
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1) :
tmp = c0
nd = nd - 1
c0 = hermesub(c[-i]*xs, c1*(nd - 1))
c1 = hermeadd(tmp, hermemulx(c1))
return hermeadd(c0, hermemulx(c1))
def hermediv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermeadd, hermesub, hermemul, hermepow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermediv
>>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 2.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermemul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermepow(c, pow, maxpower=16) :
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermeadd, hermesub, hermemul, hermediv
Examples
--------
>>> from numpy.polynomial.hermite_e import hermepow
>>> hermepow([1, 2, 3], 2)
array([ 23., 28., 46., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=c.dtype)
elif power == 1 :
return c
else :
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1) :
prd = hermemul(prd, c)
return prd
def hermeder(c, m=1, scl=1, axis=0) :
"""
Differentiate a Hermite_e series.
Returns the series coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2``
while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y)
+ 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1
is ``y``.
Parameters
----------
c: array_like
Array of Hermite_e series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermeint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeder
>>> hermeder([ 1., 1., 1., 1.])
array([ 1., 2., 3.])
>>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
return c[:1]*0
else :
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([ 1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([ 2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0 :
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt :
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeval(x, c, tensor=True):
"""
Evaluate an HermiteE series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermeval2d, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeval
>>> coef = [1,2,3]
>>> hermeval(1, coef)
3.0
>>> hermeval([[1,2],[3,4]], coef)
array([[ 3., 14.],
[ 31., 54.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1 :
c0 = c[0]
c1 = 0
elif len(c) == 2 :
c0 = c[0]
c1 = c[1]
else :
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1) :
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(nd - 1)
c1 = tmp + c1*x
return c0 + c1*x
def hermeval2d(x, y, c):
"""
Evaluate a 2-D HermiteE series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermeval, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
return c
def hermegrid2d(x, y, c):
"""
Evaluate a 2-D HermiteE series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
return c
def hermeval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite_e series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
c = hermeval(z, c, tensor=False)
return c
def hermegrid3d(x, y, z, c):
"""
Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
c = hermeval(z, c)
return c
def hermevander(x, deg) :
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = He_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the HermiteE polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and
``hermeval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of HermiteE series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander: ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding HermiteE polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermevander
>>> x = np.array([-1, 0, 1])
>>> hermevander(x, 3)
array([[ 1., -1., 0., 2.],
[ 1., 0., -1., -0.],
[ 1., 1., 0., -2.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0 :
v[1] = x
for i in range(2, ideg + 1) :
v[i] = (v[i-1]*x - v[i-2]*(i - 1))
return np.rollaxis(v, 0, v.ndim)
def hermevander2d(x, y, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = He_i(x) * He_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the HermiteE polynomials.
If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
v = vx[..., None]*vy[..., None, :]
return v.reshape(v.shape[:-2] + (-1,))
def hermevander3d(x, y, z, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then Hehe pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the HermiteE polynomials.
If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
vz = hermevander(z, degz)
v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :]
return v.reshape(v.shape[:-3] + (-1,))
def hermefit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a HermiteE series of degree `deg` that is
the least squares fit to the data values `y` given at points `x`. If
`y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D
multiple fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x),
where `n` is `deg`.
Since numpy version 1.7.0, hermefit also supports NA. If any of the
elements of `x`, `y`, or `w` are NA, then the corresponding rows of the
linear least squares problem (see Notes) are set to 0. If `y` is 2-D,
then an NA in any row of `y` invalidates that whole row.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : present when `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, polyfit
hermeval : Evaluates a Hermite series.
hermevander : pseudo Vandermonde matrix of Hermite series.
hermeweight : HermiteE weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the HermiteE series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c`
are the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using HermiteE series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermeweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefik, hermeval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermeval(x, [1, 2, 3]) + err
>>> hermefit(x, y, 2)
array([ 1.01690445, 1.99951418, 2.99948696])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2 :
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = hermevander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# scale the design matrix and solve the least squares equation
scl = np.sqrt((lhs*lhs).sum(1))
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def hermecompanion(c):
"""
Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an HermiteE basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of HermiteE series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
accprod = np.multiply.accumulate
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array(-c[0]/c[1])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., np.sqrt(np.arange(1,n))))
scl = np.multiply.accumulate(scl)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(np.arange(1,n))
bot[...] = top
mat[:,-1] -= (c[:-1]/c[-1])*(scl/scl[-1])
return mat
def hermeroots(c):
"""
Compute the roots of a HermiteE series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * He_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The HermiteE series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots
>>> coef = hermefromroots([-1, 0, 1])
>>> coef
array([ 0., 2., 0., 1.])
>>> hermeroots(coef)
array([-1., 0., 1.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1 :
return np.array([], dtype=c.dtype)
if len(c) == 2 :
return np.array([-c[0]/c[1]])
m = hermecompanion(c)
r = la.eigvals(m)
r.sort()
return r
def hermegauss(deg):
"""
Gauss-HermiteE quadrature.
Computes the sample points and weights for Gauss-HermiteE quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2/2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`He_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermecompanion(c)
x = la.eigvals(m)
x.sort()
# improve roots by one application of Newton
dy = hermeval(x, c)
df = hermeval(x, hermeder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = hermeval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Hermite_e we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(2*np.pi) / w.sum()
return x, w
def hermeweight(x):
"""Weight function of the Hermite_e polynomials.
The weight function is :math:`\exp(-x^2/2)` and the interval of
integration is :math:`[-\inf, \inf]`. the HermiteE polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-.5*x**2)
return w
#
# HermiteE series class
#
exec polytemplate.substitute(name='HermiteE', nick='herme', domain='[-1,1]')
|
lthurlow/Network-Grapher
|
proj/external/numpy-1.7.0/numpy/polynomial/hermite_e.py
|
Python
|
mit
| 54,747
|
from obj_model import (Model, BooleanAttribute, EnumAttribute, FloatAttribute, IntegerAttribute,
PositiveIntegerAttribute, RegexAttribute, SlugAttribute, StringAttribute, LongStringAttribute,
UrlAttribute, OneToOneAttribute, ManyToOneAttribute, ManyToManyAttribute, OneToManyAttribute,
TabularOrientation)
class Reference(Model):
""" Reference
Related attributes:
tests (:obj:`list` of `Test`): tests
subtests (:obj:`list` of `Subtest`): subtests
"""
id = SlugAttribute()
value = StringAttribute()
class Meta(Model.Meta):
attribute_order = ('id', 'value')
class Test(Model):
""" Test
Related attributes:
subtests (:obj:`list` of `Subtest`): subtests
"""
id = SlugAttribute()
name = StringAttribute(default='my name')
references = ManyToManyAttribute(Reference, related_name='tests')
class Meta(Model.Meta):
attribute_order = ('id', 'name', 'references')
table_format = TabularOrientation.column
class Reference(Model):
""" Another definition of Reference, which causes a _check_imported_models error
Related attributes:
subtests (:obj:`list` of `Subtest`): subtests
"""
id = SlugAttribute()
value = StringAttribute()
class Meta(Model.Meta):
attribute_order = ('id', 'value')
class Subtest(Model):
""" Subtest
"""
id = SlugAttribute()
test = ManyToOneAttribute(Test, related_name='subtests')
references = ManyToManyAttribute(Reference, related_name='subtests')
class Meta(Model.Meta):
attribute_order = ('id', 'test', 'references')
|
KarrLab/obj_model
|
tests/fixtures/migrate/small_bad_related.py
|
Python
|
mit
| 1,630
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
from flumotion.common import log
from flumotion.component.common.wms import common
MAX_SIZE = 65535
MAX_LOCID = 4294967295
MAX_INCID = 254 # Not 255 ! ! !
class MMSProducer(object, log.Loggable):
logCategory = "wms-mmsprod"
def __init__(self):
self.reset()
def register(self, sink):
self._sink = sink
def reset(self):
self.wait_keyframe = True
self._header = None
self._h_locid = 0
self._d_locid = 0
self._d_incid = 0
def stop(self):
eos = self.mms_eos(0)
if self._sink:
self._sink.pushData(self, eos)
def pushHeaders(self, header_obj, data_obj):
data = header_obj.data + data_obj.data
if self._header == data:
return
self._header_obj = header_obj
header = self.mms_header(data)
if self._header is not None:
eos = self.mms_eos(1)
change = self.mms_change()
if self._sink:
self._sink.pushData(self, eos)
self._sink.pushData(self, change)
self.reset()
self._header = data
if self._sink:
self._sink.pushData(self, header)
self._h_locid = (self._h_locid + 1) % (MAX_LOCID + 1)
return header
def pushPacket(self, packet):
assert self._header is not None, "No header yet"
if self.wait_keyframe:
info = self._header_obj.stream_index[packet.stream_number]
if not (info.is_video and packet.is_keyframe):
return
self.wait_keyframe = False
packet = self.mms_data(packet.data)
self._d_incid = (self._d_incid + 1) % (MAX_INCID + 1)
self._d_locid = (self._d_locid + 1) % (MAX_LOCID + 1)
if self._sink:
self._sink.pushData(self, packet)
def mms_header(self, data):
size = len(data) + 8
assert size <= MAX_SIZE, "ASF header too big to fit in one MMS packet"
packet = ["$H",
common.encode_word(size),
common.encode_dword(self._h_locid),
common.encode_byte(0),
common.encode_byte(12),
common.encode_word(size),
data]
return "".join(packet)
def mms_data(self, data):
size = len(data) + 8
assert size <= MAX_SIZE, "ASF packet too big to fit in one MMS packet"
packet = ["$D",
common.encode_word(size),
common.encode_dword(self._d_locid),
common.encode_byte(0),
common.encode_byte(self._d_incid),
common.encode_word(size),
data]
return "".join(packet)
def mms_eos(self, hresult):
packet = ["$E",
common.encode_word(8),
common.encode_dword(hresult)]
return "".join(packet)
def mms_change(self):
packet = ["$C",
common.encode_word(4),
common.encode_dword(0)]
return "".join(packet)
|
flumotion-mirror/flumotion-windowsmedia
|
flumotion/component/common/wms/mmsproducer.py
|
Python
|
lgpl-2.1
| 3,590
|
import os
from mock import patch, MagicMock
from unittest2 import TestCase
from py_selenium_server.selenium_server import SeleniumServerHandler
from util import test_data_dir, mock_latest_version, mock_data
class TestSeleniumServer(TestCase):
@patch(
"py_selenium_server.selenium_server.SELENIUM_JAR_STORE_LOCATION",
test_data_dir)
@patch(
"py_selenium_server.selenium_server"
".get_latest_selenium_server_description_data",
MagicMock(return_value=mock_data))
def test_start_server(self):
os.system(
"touch {0}{1}".format(
test_data_dir, mock_latest_version))
server = SeleniumServerHandler()
response = server.start_server()
self.assertEqual(server.status, "started")
self.assertEqual(response, 256)
@patch(
"py_selenium_server.selenium_server.SELENIUM_JAR_STORE_LOCATION",
test_data_dir)
def test_stop_server(self):
os.system(
"touch {0}{1}".format(
test_data_dir, mock_latest_version))
server = SeleniumServerHandler()
self.assertRaises(KeyboardInterrupt,
server.stop_server)
try:
server.stop_server()
except KeyboardInterrupt:
self.assertEqual(server.status, "stopped")
|
MideO/py-selenium-server
|
tests/test_selenium_server_commands.py
|
Python
|
gpl-3.0
| 1,343
|
import opensfm.reconstruction as orec
from opensfm.dataset import DataSetBase
def run_dataset(dataset: DataSetBase, input, output):
"""Bundle a reconstructions.
Args:
input: input reconstruction JSON in the dataset
output: input reconstruction JSON in the dataset
"""
reconstructions = dataset.load_reconstruction(input)
camera_priors = dataset.load_camera_models()
rig_cameras_priors = dataset.load_rig_cameras()
gcp = dataset.load_ground_control_points()
tracks_manager = dataset.load_tracks_manager()
# load the tracks manager and add its observations to the reconstruction
# go through all the points and add their shots
for reconstruction in reconstructions:
reconstruction.add_correspondences_from_tracks_manager(tracks_manager)
orec.bundle(
reconstruction, camera_priors, rig_cameras_priors, gcp, dataset.config
)
dataset.save_reconstruction(reconstructions, output)
|
oscarlorentzon/OpenSfM
|
opensfm/actions/bundle.py
|
Python
|
bsd-2-clause
| 980
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import six
from rally.verification.verifiers.tempest import tempest
from tests import test
from tests.verification.verifiers import fakes
TEMPEST_PATH = 'rally.verification.verifiers.tempest.tempest'
class TempestTestCase(test.TestCase):
def setUp(self):
super(TempestTestCase, self).setUp()
self.verifier = tempest.Tempest('fake_deploy_id')
self.verifier.verification = mock.MagicMock()
self.verifier.lock_path = 'fake_lock_path'
self.conf_opts = (
('compute', [
('flavor_ref_alt', 'fake_flavor_ref_alt'),
('flavor_ref', 'fake_flavor_ref'),
('image_ref_alt', 'fake_image_ref_alt'),
('image_ref', 'fake_image_ref')]),
('compute-admin', [('password', 'fake_password')]),
('identity', [
('username', 'fake_username'),
('password', 'fake_password'),
('tenant_name', 'fake_tenant_name'),
('admin_username', 'fake_username'),
('admin_password', 'fake_password'),
('admin_tenant_name', 'fake_tenant_name'),
('uri', 'fake_uri'),
('uri_v3', 'fake_uri')]))
self.set_name = 'smoke'
self.regex = None
def test__generate_config(self):
test_config = self.verifier._generate_config(self.conf_opts)
self.assertEqual(len(fakes.FAKE_CONFIG) - 1,
len(test_config.sections()))
for section, values in six.iteritems(fakes.FAKE_CONFIG):
if section != 'DEFAULT':
# NOTE(akurilin): Method `items` from ConfigParser return
# a list of (name, value) pairs for each option in the given
# section with options from DEFAULT section, so we need to
# extend FAKE_CONFIG for correct comparison.
values.extend(fakes.FAKE_CONFIG['DEFAULT'])
self.assertEqual(set(values),
set(test_config.items(section)))
@mock.patch('six.moves.builtins.open')
def test__write_config(self, mock_open):
conf = mock.Mock()
mock_file = mock.MagicMock()
mock_open.return_value = mock_file
fake_conf_path = os.path.join(self.verifier.tempest_path,
'tempest.conf')
self.verifier._write_config(conf)
mock_open.assert_called_once_with(fake_conf_path, 'w+')
mock_file.write.assert_called_once_whith(conf, fake_conf_path)
mock_file.close.assert_called_once()
@mock.patch('os.path.exists')
def test_is_installed(self, mock_exists):
mock_exists.return_value = True
result = self.verifier.is_installed()
mock_exists.assert_called_once_with(
os.path.join(self.verifier.tempest_path, '.venv'))
self.assertTrue(result)
@mock.patch('rally.verification.verifiers.tempest.tempest.subprocess')
def test__clone(self, mock_sp):
self.verifier._clone()
mock_sp.check_call.assert_called_once_with(
['git', 'clone', 'git://github.com/openstack/tempest',
tempest.Tempest.tempest_base_path])
@mock.patch(TEMPEST_PATH + '.Tempest._install_venv')
@mock.patch(TEMPEST_PATH + '.subprocess')
@mock.patch('os.path.exists')
@mock.patch('shutil.copytree')
def test_install(self, mock_copytree, mock_exists, mock_sp, mock_venv):
mock_exists.side_effect = (False, True, False)
# simulate tempest is clonned but is not installed for current deploy
self.verifier.install()
mock_copytree.assert_called_once_with(
tempest.Tempest.tempest_base_path,
self.verifier.tempest_path)
mock_sp.check_call.assert_called_once_with(
'git checkout master; git remote update; git pull',
cwd=os.path.join(self.verifier.tempest_path, 'tempest'),
shell=True)
@mock.patch('rally.verification.verifiers.tempest.tempest.shutil')
@mock.patch('os.path.exists')
def test_uninstall(self, mock_exists, mock_shutil):
mock_exists.return_value = True
self.verifier.uninstall()
mock_shutil.rmtree.assert_called_once_with(self.verifier.tempest_path)
@mock.patch(TEMPEST_PATH + '.Tempest._generate_env')
@mock.patch('shutil.rmtree')
@mock.patch(TEMPEST_PATH + '.subprocess')
def test__run(self, mock_sp, mock_rmtree, mock_env):
self.verifier._run('smoke')
fake_call = (
'%(venv)s testr run --parallel --subunit smoke '
'| %(venv)s subunit2junitxml --forward '
'--output-to=%(tempest_path)s/testr_log.xml '
'| %(venv)s subunit-2to1 '
'| %(venv)s %(tempest_path)s/tools/colorizer.py' % {
'venv': self.verifier._venv_wrapper,
'tempest_path': self.verifier.tempest_path})
mock_sp.check_call.assert_called_once_with(
fake_call, env=mock_env(), cwd=self.verifier.tempest_path,
shell=True)
@mock.patch(TEMPEST_PATH + '.Tempest._save_results')
@mock.patch(TEMPEST_PATH + '.Tempest._prepare_and_run')
def test_verify(self, mock_run, mock_save_results):
self.verifier.verify(set_name=self.set_name, regex=None,
options=self.conf_opts)
mock_run.assert_called_once_with('smoke', None, self.conf_opts)
@mock.patch(TEMPEST_PATH + '.Tempest._check_testr_initialization')
@mock.patch(TEMPEST_PATH + '.Tempest._run')
@mock.patch(TEMPEST_PATH + '.Tempest._write_config')
@mock.patch(TEMPEST_PATH + '.Tempest._generate_config')
def test__prepare_and_run(self, mock_gen, mock_write, mock_run,
mock_check_testr):
mock_gen.return_value = 'fake_conf'
self.verifier._prepare_and_run(set_name=self.set_name,
regex=None,
options=self.conf_opts)
mock_gen.assert_called_once_with(self.conf_opts)
mock_write.assert_called_once_with('fake_conf')
mock_run.assert_called_once_with('smoke')
@mock.patch('os.environ')
def test__generate_env(self, mock_env):
expected_env = {'PATH': '/some/path'}
mock_env.copy.return_value = expected_env.copy()
expected_env.update({
'TEMPEST_CONFIG': 'tempest.conf',
'TEMPEST_CONFIG_DIR': self.verifier.tempest_path,
'OS_TEST_PATH': os.path.join(self.verifier.tempest_path,
'tempest/test_discover')})
self.assertEqual(expected_env, self.verifier._generate_env())
@mock.patch('os.path.isdir')
@mock.patch(TEMPEST_PATH + '.subprocess')
def test__venv_install_when_venv_exists(self, mock_sp, mock_isdir):
mock_isdir.return_value = True
self.verifier._install_venv()
mock_isdir.assert_called_once_with(
os.path.join(self.verifier.tempest_path, '.venv'))
self.assertEqual(0, mock_sp.call_count)
@mock.patch('os.path.isdir')
@mock.patch(TEMPEST_PATH + '.subprocess')
def test__venv_install_when_venv_not_exist(self, mock_sp, mock_isdir):
mock_isdir.return_value = False
self.verifier._install_venv()
mock_isdir.assert_called_once_with(
os.path.join(self.verifier.tempest_path, '.venv'))
mock_sp.check_call.assert_has_calls([
mock.call('python ./tools/install_venv.py', shell=True,
cwd=self.verifier.tempest_path),
mock.call('%s pip install junitxml' % self.verifier._venv_wrapper,
shell=True, cwd=self.verifier.tempest_path)])
@mock.patch('os.path.isdir')
@mock.patch(TEMPEST_PATH + '.subprocess')
def test__check_testr_initialization_when_testr_already_initialized(
self, mock_sp, mock_isdir):
mock_isdir.return_value = True
self.verifier._check_testr_initialization()
mock_isdir.assert_called_once_with(
os.path.join(self.verifier.tempest_path, '.testrepository'))
self.assertEqual(0, mock_sp.call_count)
@mock.patch('os.path.isdir')
@mock.patch(TEMPEST_PATH + '.subprocess.call')
def test__check_testr_initialization_when_testr_not_initialized(
self, mock_sp, mock_isdir):
mock_isdir.return_value = False
self.verifier._check_testr_initialization()
mock_isdir.assert_called_once_with(
os.path.join(self.verifier.tempest_path, '.testrepository'))
mock_sp.assert_called_once_with(
'%s testr init' % self.verifier._venv_wrapper, shell=True,
cwd=self.verifier.tempest_path)
@mock.patch('xml.dom.minidom')
@mock.patch('os.path.isfile')
def test__save_results_without_log_file(self, mock_isfile, mock_minidom):
mock_isfile.return_value = False
self.verifier._save_results()
mock_isfile.assert_called_once_with(self.verifier.log_file)
self.assertEqual(0, mock_minidom.call_count)
@mock.patch('os.path.isfile')
def test__save_results_with_log_file(self, mock_isfile):
mock_isfile.return_value = True
self.verifier.log_file = os.path.join(os.path.dirname(__file__),
'fake_log.xml')
self.verifier._save_results()
mock_isfile.assert_called_once_with(self.verifier.log_file)
fake_test_case = fakes.get_fake_test_case()
self.verifier.verification.finish_verification.assert_called_once_with(
total=fake_test_case['total'],
test_cases=fake_test_case['test_cases'])
|
ytsarev/rally
|
tests/verification/verifiers/test_tempest.py
|
Python
|
apache-2.0
| 10,366
|
#Code by Jennifer Atkins
#Created Tuesday July 25, 2017 13:20:16
#Python file that graphs multiple air quality test result CSV files
import matplotlib.pyplot as plt
import csv
import dateutil
import numpy as np
import time
import datetime
def correlation_coefficient(xdata,ydata):
xmean = np.mean(xdata)
ymean = np.mean(ydata)
xsigma = np.sqrt(np.var(xdata))
ysigma = np.sqrt(np.var(ydata))
xysums = 0
for i in range(len(xdata)):
xdiff = xdata[i] - xmean
ydiff = ydata[i] - ymean
xysums = xdiff * ydiff +xysums
stnddevs = xsigma * ysigma
coeff = xysums/stnddevs/len(xdata)
return coeff
user_file1 = input("File Name 1: ")
resultsa = csv.reader(open(user_file1), delimiter=',')
user_file2 = input("File Name 2: ")
resultsb = csv.reader(open(user_file2), delimiter=',')
timesa = []
timesb = []
Val25a = []
Val25b = []
row_countera= 0
for r in resultsa:
row_countera += 1
if row_countera>1:
#Append each column in CSV to a separate list
#this_time = dateutil.parser.parse(r[0])
#this_time = this_time + datetime.timedelta(hours=30,minutes=43)
#timesa.append(this_time) #converts str date and time to datetime
timesa.append(dateutil.parser.parse(r[0]))
Val25a.append(int(r[8]))
row_counterb= 0
for r in resultsb:
row_counterb += 1
if row_counterb>1:
#Append each column in CSV to a separate list
timesb.append(dateutil.parser.parse(r[0]))
Val25b.append(int(r[8]))
n_merge = int(input("n data points to combine:"))
ndata_a = len(Val25a)
ndata_b = len(Val25b)
nsum_data_a= int(ndata_a/n_merge)
nsum_data_b= int(ndata_b/n_merge)
data_ave_a = []
data_ave_b = []
data_unc_a = []
data_unc_b = []
merge_times_a = []
merge_times_b = []
for i in range(nsum_data_a):
idata = Val25a[i*n_merge:(i+1)*n_merge]
idata_array = np.asarray(idata)
aqmean = np.mean(idata_array)
aqsigma = np.sqrt(np.var(idata_array))
data_ave_a.append(aqmean)
data_unc_a.append(aqsigma)
itimes = timesa[i*n_merge:(i+1)*n_merge]
itime = itimes[int(len(itimes)/2)]
merge_times_a.append(itime)
for i in range(nsum_data_b):
idata = Val25b[i*n_merge:(i+1)*n_merge]
idata_array = np.asarray(idata)
aqmean = np.mean(idata_array)
aqsigma = np.sqrt(np.var(idata_array))
data_ave_b.append(aqmean)
data_unc_b.append(aqsigma)
itimes = timesb[i*n_merge:(i+1)*n_merge]
itime = itimes[int(len(itimes)/2)]
merge_times_b.append(itime)
print("Touchpi Ave Data Length:", len(data_ave_a), "Openpi Ave Data Length:", len(data_ave_b))
fig = plt.figure()
plt.figure(1)
plt.plot(merge_times_a, data_ave_a, "b.", label='File 1')
plt.plot(merge_times_b, data_ave_b, "g.", label = 'File 2')
plt.legend(loc="best")
plt.xlabel("Time")
plt.ylabel("Particle Concentration 2.5")
file_title = "Air Quality Test Results"
plt.title(file_title)
fig.autofmt_xdate()
data_arrayA = np.asarray(data_ave_a)
data_arrayB = np.asarray(data_ave_b)
index = [0, len(data_arrayB) - 1, len(data_arrayB - 2), len(data_arrayB) - 3, len(data_arrayB) - 4]
data_arrayB = np.delete(data_arrayB, index)
corr_coeff = correlation_coefficient(data_arrayA, data_arrayB)
corr_statemnt = ('Correlation coefficient = ', corr_coeff)
plt.figure(2)
plt.plot(data_arrayA, data_arrayB, "b.")
plt.xlabel("Touchpi")
plt.ylabel("Openpi")
file_title2 = "AQ Sensor Correlation"
plt.title(file_title2)
print(corr_statemnt)
plt.show()
|
bearing/dosenet-raspberrypi
|
aq_comparison_graph.py
|
Python
|
mit
| 3,364
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import flags
from . import setup
from . import mixup
|
google-research/understanding-transfer-learning
|
libutil/__init__.py
|
Python
|
apache-2.0
| 634
|
from django import template
import randomcolor
register = template.Library()
CATEGORY_NAMES = {
'cs.AI': 'Artificial Intelligence',
'cs.CL': 'Computation and Language',
'cs.CC': 'Computational Complexity',
'cs.CE': 'Computational Engineering',
'cs.CG': 'Computational Geometry',
'cs.GT': 'Game Theory',
'cs.CV': 'Computer Vision',
'cs.CY': 'Computers and Society',
'cs.CR': 'Cryptography and Security',
'cs.DS': 'Data Structures and Algorithms',
'cs.DB': 'Databases',
'cs.DL': 'Digital Libraries',
'cs.DM': 'Discrete Mathematics',
'cs.DC': 'Distributed Computing',
'cs.ET': 'Emerging Technologies',
'cs.FL': 'Formal Languages',
'cs.GL': 'General Literature',
'cs.GR': 'Graphics',
'cs.AR': 'Hardware Architecture',
'cs.HC': 'Human-Computer Interaction',
'cs.IR': 'Information Retrieval',
'cs.IT': 'Information Theory',
'cs.LG': 'Learning',
'cs.LO': 'Logic',
'cs.MS': 'Mathematical Software',
'cs.MA': 'Multiagent Systems',
'cs.MM': 'Multimedia',
'cs.NI': 'Networking and Internet',
'cs.NE': 'Neural and Evolutionary Computing',
'cs.NA': 'Numerical Analysis',
'cs.OS': 'Operating Systems',
'cs.PF': 'Performance',
'cs.PL': 'Programming Languages',
'cs.RO': 'Robotics',
'cs.SI': 'Social and Information Networks',
'cs.SE': 'Software Engineering',
'cs.SD': 'Sound',
'cs.SC': 'Symbolic Computation',
'cs.SY': 'Systems and Control',
'stat.ML': 'Machine Learning',
}
@register.inclusion_tag('papers/templatetags/category_badge.html')
def category_badge(category):
if category not in CATEGORY_NAMES:
return {}
return {
'category': category,
'name': CATEGORY_NAMES[category],
'color': randomcolor.RandomColor(category).generate(luminosity='dark')[0],
}
|
arxiv-vanity/arxiv-vanity
|
arxiv_vanity/papers/templatetags/papers.py
|
Python
|
apache-2.0
| 1,851
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import httpretty
import pytest
from selenium.common.exceptions import InvalidArgumentException
from appium.webdriver.webdriver import WebDriver
from test.unit.helper.test_helper import android_w3c_driver, appium_command, get_httpretty_request_body
class TestWebDriverRemoteFs(object):
@httpretty.activate
def test_push_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
data = base64.b64encode(bytes('HelloWorld', 'utf-8')).decode('utf-8')
assert isinstance(driver.push_file(dest_path, data), WebDriver)
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
assert d['data'] == str(data)
@httpretty.activate
def test_push_file_invalid_arg_exception_without_src_path_and_base64data(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path)
@httpretty.activate
def test_push_file_invalid_arg_exception_with_src_file_not_found(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/dest_path/to/file.txt'
src_path = '/src_path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path, source_path=src_path)
@httpretty.activate
def test_pull_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_file'),
body='{"value": "SGVsbG9Xb3JsZA=="}',
)
dest_path = '/path/to/file.txt'
assert driver.pull_file(dest_path) == str(base64.b64encode(bytes('HelloWorld', 'utf-8')).decode('utf-8'))
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
@httpretty.activate
def test_pull_folder(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_folder'),
body='{"value": "base64EncodedZippedFolderData"}',
)
dest_path = '/path/to/file.txt'
assert driver.pull_folder(dest_path) == 'base64EncodedZippedFolderData'
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
|
appium/python-client
|
test/unit/webdriver/device/remote_fs_test.py
|
Python
|
apache-2.0
| 3,434
|
"""
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..utils.metaestimators import _safe_split
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import check_cv
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv_iter)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
test_score = _score(estimator, X_test, y_test, scorer)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score, test_score] if return_train_score else [test_score]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, groups=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = list(cv.split(X, y, groups))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv_iter)
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, groups=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, groups, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, groups):
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return y[indices]
def learning_curve(estimator, X, y, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Make a list since we will be iterating multiple times over the folds
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv_iter)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv_iter
for n_train_samples in train_sizes_abs)
out = np.array(out)
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv_iter for v in param_range)
out = np.asarray(out)
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
asnorkin/sentiment_analysis
|
site/lib/python2.7/site-packages/sklearn/model_selection/_validation.py
|
Python
|
mit
| 36,967
|
from __future__ import unicode_literals
from math import atan, exp, log, pi, sin
from django.contrib.gis.geos import GEOSGeometry, LinearRing, Point, Polygon
from django.contrib.gis.maps.google.gmap import GoogleMapException
from django.utils.six.moves import range
# Constants used for degree to radian conversion, and vice-versa.
DTOR = pi / 180.
RTOD = 180. / pi
class GoogleZoom(object):
"""
GoogleZoom is a utility for performing operations related to the zoom
levels on Google Maps.
This class is inspired by the OpenStreetMap Mapnik tile generation routine
`generate_tiles.py`, and the article "How Big Is the World" (Hack #16) in
"Google Maps Hacks" by Rich Gibson and Schuyler Erle.
`generate_tiles.py` may be found at:
http://trac.openstreetmap.org/browser/applications/rendering/mapnik/generate_tiles.py
"Google Maps Hacks" may be found at http://safari.oreilly.com/0596101619
"""
def __init__(self, num_zoom=19, tilesize=256):
"Initializes the Google Zoom object."
# Google's tilesize is 256x256, square tiles are assumed.
self._tilesize = tilesize
# The number of zoom levels
self._nzoom = num_zoom
# Initializing arrays to hold the parameters for each one of the
# zoom levels.
self._degpp = [] # Degrees per pixel
self._radpp = [] # Radians per pixel
self._npix = [] # 1/2 the number of pixels for a tile at the given zoom level
# Incrementing through the zoom levels and populating the parameter arrays.
z = tilesize # The number of pixels per zoom level.
for i in range(num_zoom):
# Getting the degrees and radians per pixel, and the 1/2 the number of
# for every zoom level.
self._degpp.append(z / 360.) # degrees per pixel
self._radpp.append(z / (2 * pi)) # radians per pixel
self._npix.append(z / 2) # number of pixels to center of tile
# Multiplying `z` by 2 for the next iteration.
z *= 2
def __len__(self):
"Returns the number of zoom levels."
return self._nzoom
def get_lon_lat(self, lonlat):
"Unpacks longitude, latitude from GEOS Points and 2-tuples."
if isinstance(lonlat, Point):
lon, lat = lonlat.coords
else:
lon, lat = lonlat
return lon, lat
def lonlat_to_pixel(self, lonlat, zoom):
"Converts a longitude, latitude coordinate pair for the given zoom level."
# Setting up, unpacking the longitude, latitude values and getting the
# number of pixels for the given zoom level.
lon, lat = self.get_lon_lat(lonlat)
npix = self._npix[zoom]
# Calculating the pixel x coordinate by multiplying the longitude value
# with the number of degrees/pixel at the given zoom level.
px_x = round(npix + (lon * self._degpp[zoom]))
# Creating the factor, and ensuring that 1 or -1 is not passed in as the
# base to the logarithm. Here's why:
# if fac = -1, we'll get log(0) which is undefined;
# if fac = 1, our logarithm base will be divided by 0, also undefined.
fac = min(max(sin(DTOR * lat), -0.9999), 0.9999)
# Calculating the pixel y coordinate.
px_y = round(npix + (0.5 * log((1 + fac) / (1 - fac)) * (-1.0 * self._radpp[zoom])))
# Returning the pixel x, y to the caller of the function.
return (px_x, px_y)
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * (2 * atan(exp((px[1] - npix) / (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def tile(self, lonlat, zoom):
"""
Returns a Polygon corresponding to the region represented by a fictional
Google Tile for the given longitude/latitude pair and zoom level. This
tile is used to determine the size of a tile at the given point.
"""
# The given lonlat is the center of the tile.
delta = self._tilesize / 2
# Getting the pixel coordinates corresponding to the
# the longitude/latitude.
px = self.lonlat_to_pixel(lonlat, zoom)
# Getting the lower-left and upper-right lat/lon coordinates
# for the bounding box of the tile.
ll = self.pixel_to_lonlat((px[0] - delta, px[1] - delta), zoom)
ur = self.pixel_to_lonlat((px[0] + delta, px[1] + delta), zoom)
# Constructing the Polygon, representing the tile and returning.
return Polygon(LinearRing(ll, (ll[0], ur[1]), ur, (ur[0], ll[1]), ll), srid=4326)
def get_zoom(self, geom):
"Returns the optimal Zoom level for the given geometry."
# Checking the input type.
if not isinstance(geom, GEOSGeometry) or geom.srid != 4326:
raise TypeError('get_zoom() expects a GEOS Geometry with an SRID of 4326.')
# Getting the envelope for the geometry, and its associated width, height
# and centroid.
env = geom.envelope
env_w, env_h = self.get_width_height(env.extent)
center = env.centroid
for z in range(self._nzoom):
# Getting the tile at the zoom level.
tile_w, tile_h = self.get_width_height(self.tile(center, z).extent)
# When we span more than one tile, this is an approximately good
# zoom level.
if (env_w > tile_w) or (env_h > tile_h):
if z == 0:
raise GoogleMapException('Geometry width and height should not exceed that of the Earth.')
return z - 1
# Otherwise, we've zoomed in to the max.
return self._nzoom - 1
def get_width_height(self, extent):
"""
Returns the width and height for the given extent.
"""
# Getting the lower-left, upper-left, and upper-right
# coordinates from the extent.
ll = Point(extent[:2])
ul = Point(extent[0], extent[3])
ur = Point(extent[2:])
# Calculating the width and height.
height = ll.distance(ul)
width = ul.distance(ur)
return width, height
|
BitWriters/Zenith_project
|
zango/lib/python3.5/site-packages/django/contrib/gis/maps/google/zoom.py
|
Python
|
mit
| 6,676
|
from django import forms
from taggit.forms import *
class CommentForm(forms.Form):
content_type = forms.CharField(widget=forms.HiddenInput)
object_id = forms.IntegerField(widget=forms.HiddenInput)
#parent_id = forms.IntegerField(widget=forms.HiddenInput, required=False)
content = forms.CharField(label='', widget=forms.Textarea)
|
isabernardes/Heriga
|
comments/forms.py
|
Python
|
mit
| 348
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
File format specific behavior.
'''
from django.utils.translation import ugettext_lazy as _
from translate.storage.lisa import LISAfile
from translate.storage.properties import propunit, propfile
from translate.storage.xliff import xliffunit
from translate.storage.po import pounit, pofile
from translate.storage.php import phpunit
from translate.storage.ts2 import tsunit
from translate.storage import mo
from translate.storage import factory
from weblate.trans.util import get_string, join_plural
from translate.misc import quote
import weblate
import subprocess
import os.path
import re
import hashlib
import importlib
import __builtin__
FILE_FORMATS = {}
FLAGS_RE = re.compile(r'\b[-\w]+\b')
LOCATIONS_RE = re.compile(r'^([+-]|.*, [+-]|.*:[+-])')
def register_fileformat(fileformat):
'''
Registers fileformat in dictionary.
'''
FILE_FORMATS[fileformat.format_id] = fileformat
class FileUnit(object):
'''
Wrapper for translate-toolkit unit to cope with ID/template based
translations.
'''
def __init__(self, unit, template=None):
'''
Creates wrapper object.
'''
self.unit = unit
self.template = template
if template is not None:
self.mainunit = template
else:
self.mainunit = unit
self.checksum = None
self.contentsum = None
def get_locations(self):
'''
Returns comma separated list of locations.
'''
# XLIFF and PHP are special in ttkit - it uses locations for what
# is context in other formats
if (isinstance(self.mainunit, xliffunit)
or isinstance(self.mainunit, phpunit)):
return ''
result = ', '.join(self.mainunit.getlocations())
# Do not try to handle relative locations in Qt TS, see
# http://qt-project.org/doc/qt-4.8/linguist-ts-file-format.html
if LOCATIONS_RE.match(result):
return ''
return result
def reformat_flags(self, typecomments):
'''
Processes flags from PO file to nicer form.
'''
# Grab flags
flags = set(FLAGS_RE.findall('\n'.join(typecomments)))
# Discard fuzzy flag, we don't care about that one
flags.discard('fuzzy')
# Join into string
return ', '.join(flags)
def get_flags(self):
'''
Returns flags (typecomments) from units.
This is Gettext (po) specific feature.
'''
# Merge flags
if hasattr(self.unit, 'typecomments'):
return self.reformat_flags(self.unit.typecomments)
elif hasattr(self.template, 'typecomments'):
return self.reformat_flags(self.template.typecomments)
else:
return ''
def get_comments(self):
'''
Returns comments (notes) from units.
'''
comment = ''
if self.unit is not None:
comment = self.unit.getnotes()
if self.template is not None:
# Avoid duplication in case template has same comments
template_comment = self.template.getnotes()
if template_comment != comment:
comment = template_comment + ' ' + comment
return comment
def is_unit_key_value(self):
'''
Checks whether unit is key = value based rather than
translation.
These are some files like PHP or properties, which for some
reason do not correctly set source/target attributes.
'''
return (
hasattr(self.mainunit, 'name')
and hasattr(self.mainunit, 'value')
and hasattr(self.mainunit, 'translation')
)
def get_source(self):
'''
Returns source string from a ttkit unit.
'''
if (isinstance(self.mainunit, tsunit)
and self.template is None
and self.mainunit.hasplural()):
# Need to apply special magic for plurals here
# as there is no singlular/plural in the source string
return join_plural([
self.unit.source,
self.unit.source,
])
if self.is_unit_key_value():
# Need to decode property encoded string
if isinstance(self.mainunit, propunit):
if self.template is not None:
return quote.propertiesdecode(self.template.value)
else:
return quote.propertiesdecode(self.unit.name)
if self.template is not None:
return self.template.value
else:
return self.unit.name
else:
if self.template is not None:
return get_string(self.template.target)
else:
return get_string(self.unit.source)
def get_target(self):
'''
Returns target string from a ttkit unit.
'''
if self.unit is None:
return ''
if (isinstance(self.unit, tsunit)
and not self.unit.isreview()
and not self.unit.istranslated()):
# For Qt ts, empty translated string means source should be used
return self.get_source()
if self.is_unit_key_value():
# Need to decode property encoded string
if isinstance(self.unit, propunit):
# This is basically stolen from
# translate.storage.properties.propunit.gettarget
# which for some reason does not return translation
value = quote.propertiesdecode(self.unit.value)
value = re.sub(u"\\\\ ", u" ", value)
return value
return self.unit.value
else:
return get_string(self.unit.target)
def get_context(self):
'''
Returns context of message. In some cases we have to use
ID here to make all backends consistent.
'''
# XLIFF is special in ttkit - it uses locations for what
# is context in other formats
if isinstance(self.mainunit, xliffunit):
context = self.mainunit.getlocations()
if len(context) == 0:
return ''
else:
return context[0]
elif isinstance(self.mainunit, pounit) and self.template is not None:
# Monolingual PO files
return self.template.source
else:
context = self.mainunit.getcontext()
if self.is_unit_key_value() and context == '':
return self.mainunit.getid()
return context
def get_previous_source(self):
'''
Returns previous message source if there was any.
'''
if not self.is_fuzzy() or not hasattr(self.unit, 'prev_source'):
return ''
return get_string(self.unit.prev_source)
def get_checksum(self):
'''
Returns checksum of source string, used for quick lookup.
We use MD5 as it is faster than SHA1.
'''
if self.checksum is None:
md5 = hashlib.md5()
if self.template is None:
md5.update(self.get_source().encode('utf-8'))
md5.update(self.get_context().encode('utf-8'))
self.checksum = md5.hexdigest()
return self.checksum
def get_contentsum(self):
'''
Returns checksum of source string and conntext, used for quick lookup.
We use MD5 as it is faster than SHA1.
'''
if self.template is None:
return self.get_checksum()
if self.contentsum is None:
md5 = hashlib.md5()
md5.update(self.get_source().encode('utf-8'))
md5.update(self.get_context().encode('utf-8'))
self.contentsum = md5.hexdigest()
return self.contentsum
def is_translated(self):
'''
Checks whether unit is translated.
'''
if self.unit is None:
return False
if self.is_unit_key_value():
return not self.unit.isfuzzy() and self.unit.value != ''
elif isinstance(self.mainunit, tsunit):
# For Qt ts, empty translated string means source should be used
return not self.unit.isreview() or self.unit.istranslated()
else:
return self.unit.istranslated()
def is_fuzzy(self):
'''
Checks whether unit is translated.
'''
if self.unit is None:
return False
return self.unit.isfuzzy()
def is_obsolete(self):
'''
Checks whether unit is marked as obsolete in backend.
'''
return self.mainunit.isobsolete()
def is_translatable(self):
'''
Checks whether unit is translatable.
For some reason, blank string does not mean non translatable
unit in some formats (XLIFF), so lets skip those as well.
'''
return self.mainunit.istranslatable() and not self.mainunit.isblank()
def set_target(self, target):
'''
Sets translation unit target.
'''
self.unit.settarget(target)
# Propagate to value so that is_translated works correctly
if self.is_unit_key_value():
self.unit.value = self.unit.translation
def mark_fuzzy(self, fuzzy):
'''
Sets fuzzy flag on translated unit.
'''
self.unit.markfuzzy(fuzzy)
class FileFormat(object):
'''
Generic object defining file format loader.
'''
name = ''
format_id = ''
loader = (None, None)
monolingual = None
check_flags = ()
@classmethod
def fixup(cls, store):
'''
Performs optional fixups on store.
'''
return store
@classmethod
def load(cls, storefile):
'''
Loads file using defined loader.
'''
# Workaround for _ created by interactive interpreter and
# later used instead of gettext by ttkit
if ('_' in __builtin__.__dict__
and not callable(__builtin__.__dict__['_'])):
del __builtin__.__dict__['_']
# Add missing mode attribute to Django file wrapper
if not isinstance(storefile, basestring):
storefile.mode = 'r'
return cls.parse_store(storefile)
@classmethod
def parse_store(cls, storefile):
# Tuple style loader, import from translate toolkit
module_name, class_name = cls.loader
module = importlib.import_module(
'translate.storage.%s' % module_name
)
# Get the class
storeclass = getattr(module, class_name)
# Parse file
store = storeclass.parsefile(storefile)
# Apply possible fixups and return
return cls.fixup(store)
def __init__(self, storefile, template_store=None):
'''
Creates file format object, wrapping up translate-toolkit's
store.
'''
self.storefile = storefile
# Load store
self.store = self.load(storefile)
# Check store validity
if not self.is_valid(self.store):
raise ValueError('Invalid file format')
# Remember template
self.template_store = template_store
@property
def has_template(self):
'''
Checks whether class is using template.
'''
return (
(self.monolingual or self.monolingual is None)
and self.template_store is not None
)
def find_unit(self, context, source):
'''
Finds unit by context and source.
Returns tuple (ttkit_unit, created) indicating whether returned
unit is new one.
'''
if self.has_template:
# Need to create new unit based on template
template_ttkit_unit = self.template_store.findid(context)
# We search by ID when using template
ttkit_unit = self.store.findid(context)
# We always need new unit to translate
if ttkit_unit is None:
ttkit_unit = template_ttkit_unit
if template_ttkit_unit is None:
raise Exception(
'Could not find template unit for new unit!'
)
add = True
else:
add = False
return (FileUnit(ttkit_unit, template_ttkit_unit), add)
else:
# Find all units with same source
found_units = self.store.findunits(source)
# Find is broken for propfile, ignore results
if len(found_units) > 0 and not isinstance(self.store, propfile):
for ttkit_unit in found_units:
# Does context match?
if ttkit_unit.getcontext() == context:
return (FileUnit(ttkit_unit), False)
else:
# Fallback to manual find for value based files
for ttkit_unit in self.store.units:
ttkit_unit = FileUnit(ttkit_unit)
if ttkit_unit.get_source() == source:
return (ttkit_unit, False)
return (None, False)
def add_unit(self, ttkit_unit):
'''
Adds new unit to underlaying store.
'''
if isinstance(self.store, LISAfile):
# LISA based stores need to know this
self.store.addunit(ttkit_unit.unit, new=True)
else:
self.store.addunit(ttkit_unit.unit)
def update_header(self, **kwargs):
'''
Updates store header if available.
'''
if not hasattr(self.store, 'updateheader'):
return
kwargs['x_generator'] = 'Weblate %s' % weblate.VERSION
# Adjust Content-Type header if needed
header = self.store.parseheader()
if ('Content-Type' not in header
or 'charset=CHARSET' in header['Content-Type']
or 'charset=ASCII' in header['Content-Type']):
kwargs['Content_Type'] = 'text/plain; charset=UTF-8'
self.store.updateheader(**kwargs)
def save(self):
'''
Saves underlaying store to disk.
'''
self.store.save()
def all_units(self):
'''
Generator of all units.
'''
if not self.has_template:
for tt_unit in self.store.units:
# Create wrapper object
yield FileUnit(tt_unit)
else:
for template_unit in self.template_store.units:
# Create wrapper object (not translated)
yield FileUnit(
self.store.findid(template_unit.getid()),
template_unit
)
def count_units(self):
'''
Returns count of units.
'''
if not self.has_template:
return len(self.store.units)
else:
return len(self.template_store.units)
@property
def mimetype(self):
'''
Returns most common mime type for format.
'''
if self.store.Mimetypes is None:
# Properties files do not expose mimetype
return 'text/plain'
else:
return self.store.Mimetypes[0]
@property
def extension(self):
'''
Returns most common file extension for format.
'''
if self.store.Extensions is None:
# Typo in translate-toolkit 1.9, see
# https://github.com/translate/translate/pull/10
if hasattr(self.store, 'Exensions'):
return self.store.Exensions[0]
else:
return 'txt'
else:
return self.store.Extensions[0]
@classmethod
def supports_language_pack(cls):
'''
Checks whether backend store supports generating language pack.
'''
return hasattr(cls, 'get_language_pack')
@classmethod
def is_valid(cls, store):
'''
Checks whether store seems to be valid.
In some cases ttkit happily "parses" the file, even though it
really did not do so (eg. Gettext parser on random text file).
'''
if store is None:
return False
if cls.monolingual is False and str(store) == '':
return False
return True
@classmethod
def supports_new_language(cls):
'''
Whether it supports creating new translation.
'''
return False
@staticmethod
def is_valid_base_for_new(base):
'''
Checks whether base is valid.
'''
return True
@staticmethod
def add_language(filename, code, base):
'''
Adds new language file.
'''
raise ValueError('Not supported')
class AutoFormat(FileFormat):
name = _('Automatic detection')
format_id = 'auto'
@classmethod
def parse_store(cls, storefile):
'''
Directly loads using translate-toolkit.
'''
return factory.getobject(storefile)
register_fileformat(AutoFormat)
class PoFormat(FileFormat):
name = _('Gettext PO file')
format_id = 'po'
loader = ('po', 'pofile')
monolingual = False
msginit_found = None
def get_language_pack(self):
'''
Generates compiled messages file.
'''
outputfile = mo.mofile()
for unit in self.store.units:
if not unit.istranslated() and not unit.isheader():
continue
mounit = mo.mounit()
if unit.isheader():
mounit.source = ""
else:
mounit.source = unit.source
mounit.msgctxt = [unit.getcontext()]
mounit.target = unit.target
outputfile.addunit(mounit)
return str(outputfile)
def get_language_pack_meta(self):
'''
Returns language pack filename and mime type.
'''
basefile = os.path.splitext(
os.path.basename(self.storefile)
)[0]
return (
'%s.mo' % basefile,
'application/x-gettext-catalog'
)
@classmethod
def supports_new_language(cls):
'''
Checks whether we can create new language file.
'''
if cls.msginit_found is None:
try:
ret = subprocess.check_call(
['msginit', '--help'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
cls.msginit_found = (ret == 0)
except:
cls.msginit_found = False
return cls.msginit_found
@staticmethod
def is_valid_base_for_new(base):
'''
Checks whether base is valid.
'''
try:
pofile.parsefile(base)
return True
except:
return False
@staticmethod
def add_language(filename, code, base):
'''
Adds new language file.
'''
subprocess.check_call(
[
'msginit',
'-i', base,
'-o', filename,
'--no-translator',
'-l', code
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
register_fileformat(PoFormat)
class PoMonoFormat(PoFormat):
name = _('Gettext PO file (monolingual)')
format_id = 'po-mono'
loader = ('po', 'pofile')
monolingual = True
register_fileformat(PoMonoFormat)
class TSFormat(FileFormat):
name = _('Qt Linguist Translation File')
format_id = 'ts'
loader = ('ts2', 'tsfile')
register_fileformat(TSFormat)
class XliffFormat(FileFormat):
name = _('XLIFF Translation File')
format_id = 'xliff'
loader = ('xliff', 'xlifffile')
register_fileformat(XliffFormat)
class StringsFormat(FileFormat):
name = _('OS X Strings')
format_id = 'strings'
loader = ('properties', 'stringsfile')
register_fileformat(StringsFormat)
class PropertiesFormat(FileFormat):
name = _('Java Properties')
format_id = 'properties'
loader = ('properties', 'javafile')
monolingual = True
@classmethod
def fixup(cls, store):
'''
Java properties need to be iso-8859-1, but
ttkit converts them to utf-8.
'''
store.encoding = 'iso-8859-1'
return store
register_fileformat(PropertiesFormat)
class PropertiesUtf8Format(FileFormat):
name = _('Java Properties (UTF-8)')
format_id = 'properties-utf8'
loader = ('properties', 'javautf8file')
monolingual = True
register_fileformat(PropertiesUtf8Format)
class PhpFormat(FileFormat):
name = _('PHP strings')
format_id = 'php'
loader = ('php', 'phpfile')
register_fileformat(PhpFormat)
class AndroidFormat(FileFormat):
name = _('Android String Resource')
format_id = 'aresource'
loader = ('aresource', 'AndroidResourceFile')
monolingual = True
# Whitespace is ignored in this format
check_flags = ('ignore-begin-space', 'ignore-end-space')
@classmethod
def supports_new_language(cls):
'''
Checks whether we can create new language file.
'''
return True
@staticmethod
def add_language(filename, code, base):
'''
Adds new language file.
'''
with open(filename, 'w') as output:
output.write('''<?xml version="1.0" encoding="utf-8"?>
<resources></resources>''')
register_fileformat(AndroidFormat)
FILE_FORMAT_CHOICES = [(fmt, FILE_FORMATS[fmt].name) for fmt in FILE_FORMATS]
|
paour/weblate
|
weblate/trans/formats.py
|
Python
|
gpl-3.0
| 22,503
|
#!/usr/bin/python2.5
#encoding=utf-8
'''
DONT TOUCH THIS FILE, UNLESS YOU KNOW WHAT YOU ARE DOING !!!
Modify the release.cfg under the project module in config folder instead.
'''
import os
release_file = None
for root, dirs, files in os.walk(u'.'):
if u'release.py' in files:
release_file = os.path.join(root, u'release.py')
execfile(release_file)
break;
if release_file is None:
raise IOError, 'Release File not found!'
from setuptools import setup, find_packages
from turbogears.finddata import find_package_data
packages=find_packages()
package_data = find_package_data(where=short_name, package=package_name)
for data_dir in data_dirs:
if os.path.isdir(data_dir):
packages.append(data_dir)
package_data.update(find_package_data(where=data_dir, only_in_packages=False))
setup(
name=name,
version=version,
description=description,
long_description=long_description,
author=author,
author_email=author_email,
maintainer=maintainer,
maintainer_email=maintainer_email,
url=url,
download_url=download_url,
license=license,
install_requires=install_requires,
zip_safe=zip_safe,
packages=packages,
package_data=package_data,
keywords=keywords,
classifiers=classifiers,
test_suite=test_suite,
entry_points=entry_points,
data_files=data_files,
)
|
rsgemignani/my-fortunes
|
setup.py
|
Python
|
gpl-2.0
| 1,387
|
ELECTRUM_VERSION = '2.9.0' # version of the client package
PROTOCOL_VERSION = '0.10' # protocol version requested
# The hash of the mnemonic seed must begin with this
SEED_PREFIX = '01' # Electrum standard wallet
SEED_PREFIX_SW = '02' # Electrum segwit wallet
SEED_PREFIX_2FA = '101' # extended seed for two-factor authentication
def seed_prefix(seed_type):
if seed_type == 'standard':
return SEED_PREFIX
elif seed_type == 'segwit':
return SEED_PREFIX_SW
elif seed_type == '2fa':
return SEED_PREFIX_2FA
|
dashpay/electrum-dash
|
lib/version.py
|
Python
|
mit
| 567
|
'''
Odnoklassniki.ru OAuth2 and IFRAME application support
If you are using OAuth2 authentication,
* Take a look to:
http://dev.odnoklassniki.ru/wiki/display/ok/The+OAuth+2.0+Protocol
* You need to register OAuth application here:
http://dev.odnoklassniki.ru/wiki/pages/viewpage.action?pageId=13992188
elif you're building iframe application,
* Take a look to:
http://dev.odnoklassniki.ru/wiki/display/ok/Odnoklassniki.ru+Third+Party+Platform
* You need to register your iframe application here:
http://dev.odnoklassniki.ru/wiki/pages/viewpage.action?pageId=5668937
* You need to sign a public offer and do some bureaucracy if you want to be listed in application registry
Then setup your application according manual and use information from
registration mail to set settings values.
'''
from django import forms
from django.contrib.auth import authenticate
from django.utils import simplejson
from hashlib import md5
from social_auth.backends import OAuthBackend, BaseOAuth2, USERNAME, BaseAuth, \
SocialAuthBackend
from social_auth.backends.exceptions import AuthFailed
from social_auth.utils import setting, log, dsa_urlopen, backend_setting
from urllib import urlencode, unquote
from urllib2 import Request
ODNOKLASSNIKI_API_SERVER = 'http://api.odnoklassniki.ru/'
EXPIRES_NAME = setting('SOCIAL_AUTH_EXPIRATION', 'expires')
class OdnoklassnikiBackend(OAuthBackend):
'''Odnoklassniki authentication backend'''
name = 'odnoklassniki'
EXTRA_DATA = [('refresh_token', 'refresh_token'),
('expires_in', EXPIRES_NAME)]
def get_user_id(self, details, response):
'''Return user unique id provided by Odnoklassniki'''
return response['uid']
def get_user_details(self, response):
'''Return user details from Odnoklassniki request'''
return {
USERNAME: response['uid'],
'email': '',
'fullname': unquote(response['name']),
'first_name': unquote(response['first_name']),
'last_name': unquote(response['last_name'])
}
class OdnoklassnikiMixin(object):
def get_settings(self):
client_key = backend_setting(self, self.SETTINGS_KEY_NAME)
client_secret = backend_setting(self, self.SETTINGS_SECRET_NAME)
public_key = backend_setting(self, self.SETTINGS_PUBLIC_NAME)
return client_key, client_secret, public_key
class OdnoklassnikiOAuth2(BaseOAuth2, OdnoklassnikiMixin):
'''Odnoklassniki OAuth2 support'''
AUTH_BACKEND = OdnoklassnikiBackend
AUTHORIZATION_URL = 'http://www.odnoklassniki.ru/oauth/authorize'
ACCESS_TOKEN_URL = 'http://api.odnoklassniki.ru/oauth/token.do'
SETTINGS_KEY_NAME = 'ODNOKLASSNIKI_OAUTH2_CLIENT_KEY'
SETTINGS_SECRET_NAME = 'ODNOKLASSNIKI_OAUTH2_CLIENT_SECRET'
SETTINGS_PUBLIC_NAME = 'ODNOKLASSNIKI_OAUTH2_APP_KEY'
FORCE_STATE_CHECK = False
def get_scope(self):
return backend_setting(self, 'ODNOKLASSNIKI_OAUTH2_EXTRA_SCOPE', [])
def user_data(self, access_token, *args, **kwargs):
'''Return user data from Odnoklassniki REST API'''
data = {'access_token': access_token, 'method': 'users.getCurrentUser'}
client_key, client_secret, public_key = self.get_settings()
return odnoklassniki_api(data, ODNOKLASSNIKI_API_SERVER, public_key, client_secret, 'oauth')
def odnoklassniki_oauth_sig(data, client_secret):
'''Calculates signature of request data access_token value must be included
Algorithm is described at http://dev.odnoklassniki.ru/wiki/pages/viewpage.action?pageId=12878032 , search for "little bit different way"
'''
suffix = md5('{0:s}{1:s}'.format(data['access_token'], client_secret)).hexdigest()
check_list = sorted(['{0:s}={1:s}'.format(key, value) for key, value in data.items() if key != 'access_token'])
return md5(''.join(check_list) + suffix).hexdigest()
def odnoklassniki_iframe_sig(data, client_secret_or_session_secret):
'''Calculates signature as described at http://dev.odnoklassniki.ru/wiki/display/ok/Authentication+and+Authorization
If API method requires session context, request is signed with session secret key.
Otherwise it is signed with application secret key'''
param_list = sorted(['{0:s}={1:s}'.format(key, value) for key, value in data.items()])
return md5(''.join(param_list) + client_secret_or_session_secret).hexdigest()
def odnoklassniki_api(data, api_url, public_key, client_secret, request_type = 'oauth'):
''' Calls Odnoklassniki REST API method
http://dev.odnoklassniki.ru/wiki/display/ok/Odnoklassniki+Rest+API
'''
data.update({
'application_key': public_key,
'format': 'JSON'
})
if request_type == 'oauth':
data['sig'] = odnoklassniki_oauth_sig(data, client_secret)
elif request_type == 'iframe_session':
data['sig'] = odnoklassniki_iframe_sig(data, data['session_secret_key'])
elif request_type == 'iframe_nosession':
data['sig'] = odnoklassniki_iframe_sig(data, client_secret)
else:
raise AuthFailed('Unknown request type {0}. How should it be signed?'.format(request_type))
params = urlencode(data)
request = Request('{0}fb.do?{1}'.format(api_url, params))
try:
return simplejson.loads(dsa_urlopen(request).read())
except (TypeError, KeyError, IOError, ValueError, IndexError):
log('error', 'Could not load data from Odnoklassniki.',
exc_info=True, extra=dict(data=params))
return None
#
class OdnoklassnikiIframeForm(forms.Form):
logged_user_id = forms.IntegerField()
api_server = forms.CharField()
application_key = forms.CharField()
session_key = forms.CharField()
session_secret_key = forms.CharField()
authorized = forms.IntegerField()
apiconnection = forms.CharField()
refplace = forms.CharField(required=False)
referer = forms.CharField(required=False)
auth_sig = forms.CharField()
sig = forms.CharField()
custom_args = forms.CharField(required=False)
def __init__(self, auth, *args, **kwargs):
self.auth = auth
super(OdnoklassnikiIframeForm, self).__init__(*args, **kwargs)
def get_auth_sig(self):
secret_key = backend_setting(self.auth, 'ODNOKLASSNIKI_APP_SECRET')
hash_source = '{0:d}{1:s}{2:s}'.format(self.cleaned_data['logged_user_id'],
self.cleaned_data['session_key'], secret_key)
return md5(hash_source).hexdigest()
def clean_auth_sig(self):
correct_key = self.get_auth_sig()
key = self.cleaned_data['auth_sig'].lower()
if correct_key != key:
raise forms.ValidationError('Wrong authorization key')
return self.cleaned_data['auth_sig']
def get_response(self):
fields = ('logged_user_id',
'api_server',
'application_key',
'session_key',
'session_secret_key',
'authorized',
'apiconnection',
)
response = {}
for fieldname in self.fields.keys():
if fieldname in fields:
response[fieldname] = self.cleaned_data[fieldname]
return response
class OdnoklassnikiAppBackend(SocialAuthBackend):
'''Odnoklassniki iframe app authentication backend'''
name = 'odnoklassnikiapp'
def get_user_id(self, details, response):
'''Return unique user id provided by Odnoklassniki'''
return response['uid']
def extra_data(self, user, uid, response, details):
return dict([(key, value) for key, value in response.items() if key in response['extra_data_list']])
def get_user_details(self, response):
return {USERNAME: response['uid'],
'email': '',
'fullname': unquote(response['name']),
'first_name': unquote(response['first_name']),
'last_name': unquote(response['last_name'])
}
class OdnoklassnikiApp(BaseAuth, OdnoklassnikiMixin):
'''Odnoklassniki iframe app authentication class'''
SETTINGS_KEY_NAME = 'ODNOKLASSNIKI_APP_KEY'
SETTINGS_SECRET_NAME = 'ODNOKLASSNIKI_APP_SECRET'
SETTINGS_PUBLIC_NAME = 'ODNOKLASSNIKI_APP_PUBLIC_KEY'
AUTH_BACKEND = OdnoklassnikiAppBackend
def auth_complete(self, request, user, *args, **kwargs):
form = OdnoklassnikiIframeForm(auth=self, data=request.GET)
if not form.is_valid():
raise AuthFailed('Cannot authorize: malformed parameters')
else:
response = form.get_response()
extra_user_data = backend_setting(self, 'ODNOKLASSNIKI_APP_EXTRA_USER_DATA_LIST', ())
base_fields = ('uid', 'first_name', 'last_name', 'name')
fields = base_fields + extra_user_data
data = {
'method': 'users.getInfo',
'uids': '{0}'.format(response['logged_user_id']),
'fields': ','.join(fields),
}
client_key, client_secret, public_key = self.get_settings()
details = odnoklassniki_api(data, response['api_server'], public_key, client_secret, 'iframe_nosession')
if len(details) == 1 and 'uid' in details[0]:
details = details[0]
auth_data_fields = backend_setting(self, 'ODNOKLASSNIKI_APP_EXTRA_AUTH_DATA_LIST',
('api_server', 'apiconnection', 'session_key', 'session_secret_key', 'authorized'))
for field in auth_data_fields:
details[field] = response[field]
details['extra_data_list'] = fields + auth_data_fields
kwargs.update({
'auth': self,
'response': details,
self.AUTH_BACKEND.name: True
})
else:
raise AuthFailed('Cannot get user details: API error')
return authenticate(*args, **kwargs)
@property
def uses_redirect(self):
'''Odnoklassniki API for iframe application does not require redirects'''
return False
# Backend definition
BACKENDS = {
'odnoklassniki': OdnoklassnikiOAuth2,
'odnoklassnikiapp': OdnoklassnikiApp
}
|
1st/django-social-auth
|
social_auth/backends/contrib/odnoklassniki.py
|
Python
|
bsd-3-clause
| 10,369
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import Axon
from Kamaelia.Chassis.ConnectedServer import SimpleServer
from Kamaelia.Protocol.HTTP.HTTPServer import HTTPServer
from Kamaelia.Protocol.HTTP.Handlers.Minimal import Minimal
import Kamaelia.Protocol.HTTP.ErrorPages as ErrorPages
from Kamaelia.Chassis.Pipeline import Pipeline
homedirectory = "/srv/www/htdocs/"
indexfilename = "index.html"
def requestHandlers(URLHandlers):
def createRequestHandler(request):
if request.get("bad"):
return ErrorPages.websiteErrorPage(400, request.get("errormsg",""))
else:
for (prefix, handler) in URLHandlers:
if request["raw-uri"][:len(prefix)] == prefix:
request["uri-prefix-trigger"] = prefix
request["uri-suffix"] = request["raw-uri"][len(prefix):]
return handler(request)
return ErrorPages.websiteErrorPage(404, "No resource handlers could be found for the requested URL.")
return createRequestHandler
class HelloHandler(Axon.Component.component):
def __init__(self, request):
super(HelloHandler, self).__init__()
self.request = request
def main(self):
resource = {
"type" : "text/html",
"statuscode" : "200",
"length": len("<html><body><h1>Hello World</h1><P> Game Over!! </body></html>"),
}
self.send(resource, "outbox"); yield 1
page = {
"data" : "<html><body><h1>Hello World</h1><P> Game Over!! </body></html>"
}
self.send(page, "outbox"); yield 1
self.send(Axon.Ipc.producerFinished(self), "signal")
yield 1
class Cat(Axon.Component.component):
def __init__(self, *args):
super(Cat, self).__init__()
self.args = args
def main(self):
self.send(self.args, "outbox")
self.send(Axon.Ipc.producerFinished(self), "signal")
yield 1
class ExampleWrapper(Axon.Component.component):
def main(self):
# Tell the browser the type of data we're sending!
resource = {
"type" : "text/html",
"statuscode" : "200",
}
self.send(resource, "outbox"); yield 1
# Send the header
header = {
"data" : "<html><body>"
}
self.send(header, "outbox"); yield 1
# Wait for it....
while not self.dataReady("inbox"):
self.pause()
yield 1
# Send the data we recieve as the page body
while self.dataReady("inbox"):
pageData = {
"data" : str(self.recv("inbox"))
}
self.send(pageData, "outbox"); yield 1
# send a footer
footer = {
"data" : "</body></html>"
}
self.send(footer, "outbox"); yield 1
# and shutdown nicely
self.send(Axon.Ipc.producerFinished(self), "signal")
yield 1
def EchoHandler(request):
return Pipeline ( Cat(request), ExampleWrapper() )
def servePage(request):
return Minimal(request=request,
homedirectory=homedirectory,
indexfilename=indexfilename)
def HTTPProtocol():
return HTTPServer(requestHandlers([
["/echo", EchoHandler ],
["/hello", HelloHandler ],
["/", servePage ],
]))
SimpleServer(protocol=HTTPProtocol,
port=8082,
socketOptions=(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) ).run()
|
sparkslabs/kamaelia
|
Sketches/MPS/HTTP/michael.py
|
Python
|
apache-2.0
| 4,406
|
# -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth.models import User
class FormAcceso(forms.Form):
usuario = forms.CharField(label="Usuario",
widget=forms.TextInput(attrs={"size": "15", "class": "small",
"placeholder": "Nombre de usuario"}))
passwd = forms.CharField(label="Contraseña",
widget=forms.PasswordInput(attrs={"size": "15", "class":"small",
"placeholder": "contraseña de acceso"}))
def clean_usuario(self):
usuario = self.cleaned_data['usuario']
if not User.objects.filter(username=usuario):
raise forms.ValidationError("El usuario no existe")
else:
usr = User.objects.get(username=usuario)
if not usr.is_active:
raise forms.ValidationError("El usuario esta inactivo")
return self.cleaned_data['usuario']
def clean_passwd(self):
usuario = self.data['usuario']
passwd = self.cleaned_data['passwd']
if User.objects.filter(username=usuario):
usr = User.objects.get(username=usuario)
if not usr.check_password(passwd):
raise forms.ValidationError("La contraseña es incorrecta")
return self.cleaned_data['passwd']
|
roldandvg/incidencias
|
incidencias/apps/autenticar/forms.py
|
Python
|
gpl-2.0
| 1,408
|
"""A module which implements the time frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
#
# License : BSD (3-clause)
import warnings
from math import sqrt
from copy import deepcopy
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose, _time_mask
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..io.pick import pick_info, pick_types
from ..io.meas_info import Info
from ..utils import check_fname
from .multitaper import dpss_windows
from ..viz.utils import figure_nobar, plt_show
from ..externals.h5io import write_hdf5, read_hdf5
from ..externals.six import string_types
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (_BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, _BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis, ...].copy()
return data
def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
"""Compute time freq decomposition with Morlet wavelets
This function operates directly on numpy arrays. Consider using
`tfr_morlet` to process `Epochs` or `Evoked` instances.
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
sfreq : float
sampling Frequency
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
"""
mode = 'same'
# mode = "valid"
n_signals, n_times = X.shape
n_frequencies = len(freqs)
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Ws : list of array
Wavelets time series
use_fft : bool
Use FFT for convolutions
mode : 'same' | 'valid' | 'full'
Convention for convolution
decim : int
Temporal decimation factor
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
n_signals, n_times = X[:, ::decim].shape
n_frequencies = len(Ws)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr[..., ::decim]
return tfrs
def _time_frequency(X, Ws, use_fft, decim):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_times = n_times // decim + bool(n_times % decim)
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr = tfr[:, ::decim]
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@verbose
def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
sfreq : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int
Temporal decimation factor
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
x = cwt(e, **cwt_kw)
power[k] = (x * x.conj()).real
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = (tfr * tfr.conj()).real
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[::decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
sfreq : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times))
# Separate to save memory for n_jobs=1
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
for c in range(n_channels))
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c, plf_c
return psd, plf
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
if mode is not None and baseline is not None:
logger.info("Applying baseline correction '%s' during %s" %
(mode, baseline))
data = rescale(data.copy(), times, baseline, mode)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
class AverageTFR(ContainsMixin, UpdateChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None
Comment on the data, e.g., the experimental condition.
Defaults to None.
method : str | None
Comment on the method used to compute the data, e.g., morlet wavelet.
Defaults to None.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = times
self.freqs = freqs
self.nave = nave
self.comment = comment
self.method = method
@property
def ch_names(self):
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=False):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
"""
inst = self if not copy else self.copy()
mask = _time_mask(inst.times, tmin, tmax)
inst.times = inst.times[mask]
inst.data = inst.data[..., mask]
return inst
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True,
title=None, axes=None, layout=None, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
info = self.info
data = self.data
n_picks = len(picks)
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB)
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError('There must be an axes for each picked '
'channel.')
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
_imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, onselect_callback,
ylim=None, tfr=data[idx: idx + 1], freq=freqs,
x_label='Time (ms)', y_label='Frequency (Hz)',
colorbar=colorbar, picker=False, cmap=cmap)
if title:
fig.suptitle(title)
colorbar = False # only one colorbar for multiple axes
plt_show(show)
return fig
def _onselect(self, eclick, erelease, baseline, mode, layout):
"""Callback function called by rubber band selector in channel tfr."""
import matplotlib.pyplot as plt
from ..viz import plot_tfr_topomap
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
plt.ion() # turn interactive mode on
tmin = round(min(eclick.xdata, erelease.xdata) / 1000., 5) # ms to s
tmax = round(max(eclick.xdata, erelease.xdata) / 1000., 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
types.append('grad')
fig = figure_nobar()
fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(tmin,
tmax,
fmin,
fmax),
y=0.04)
for idx, ch_type in enumerate(types):
ax = plt.subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, layout=layout,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None,
axes=ax)
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', font_color='w'):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr, _plot_topo
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB)
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
layout=layout, colorbar=colorbar, vmin=vmin,
vmax=vmax, cmap=cmap, layout_scale=layout_scale,
title=title, border=border, x_label='Time (ms)',
y_label='Frequency (Hz)', fig_facecolor=fig_facecolor,
font_color=font_color)
plt_show(show)
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
return "<AverageTFR | %s>" % s
def apply_baseline(self, baseline, mode='mean'):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
"""
self.data = rescale(self.data, self.times, baseline, mode, copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap=None,
sensors=True, colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap. If None and the plotted data is all positive, defaults to
'Reds'. If None and data contains also negative values, defaults to
'RdBu_r'. Defaults to None.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos)
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
def _prepare_write_tfr(tfr, condition):
"""Aux function"""
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=tfr.info, nave=tfr.nave,
comment=tfr.comment, method=tfr.method))
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython')
def read_tfrs(fname, condition=None):
"""
Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'I can give you "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
@verbose
def tfr_morlet(inst, freqs, n_cycles, use_fft=False,
return_itc=True, decim=1, n_jobs=1, picks=None, verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int
The decimation factor on the time axis. To reduce memory usage.
n_jobs : int
The number of jobs to run in parallel.
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : instance of AverageTFR
The averaged power.
itc : instance of AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
"""
data = _get_data(inst, return_itc)
info = inst.info
info, data, picks = _prepare_picks(info, data, picks)
data = data = data[:, picks, :]
power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='morlet-itc'))
return out
def _prepare_picks(info, data, picks):
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
if np.array_equal(picks, np.arange(len(data))):
picks = slice(None)
else:
info = pick_info(info, picks)
return info, data, picks
@verbose
def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1, n_jobs=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
sampling Frequency
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim: int
Temporal decimation factor. Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
"""
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if n_times <= n_times_wavelets:
warnings.warn("Time windows are as long or longer than the epoch. "
"Consider reducing n_cycles.")
psd = np.zeros((n_channels, n_frequencies, n_times))
itc = np.zeros((n_channels, n_frequencies, n_times))
parallel, my_time_frequency, _ = parallel_func(_time_frequency,
n_jobs)
for m in range(n_taps):
psd_itc = parallel(my_time_frequency(data[:, c, :],
Ws[m], use_fft, decim)
for c in range(n_channels))
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd[c, :, :] += psd_c
itc[c, :, :] += itc_c
psd /= n_taps
itc /= n_taps
return psd, itc
@verbose
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
use_fft=True, return_itc=True, decim=1, n_jobs=1,
picks=None, verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
Default is 4.0 (3 good tapers).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool
The fft based convolution or not.
Defaults to True.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Defaults to True.
decim : int
The decimation factor on the time axis. To reduce memory usage.
Note than this is brute force decimation, no anti-aliasing is done.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel. Defaults to 1.
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
Notes
-----
.. versionadded:: 0.9.0
"""
data = _get_data(inst, return_itc)
info = inst.info
info, data, picks = _prepare_picks(info, data, picks)
data = data = data[:, picks, :]
power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
frequencies=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim,
n_jobs=n_jobs, zero_mean=True,
verbose='INFO')
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='mutlitaper-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='mutlitaper-itc'))
return out
def combine_tfr(all_tfr, weights='nave'):
"""Merge AverageTFR data by weighted addition
Create a new AverageTFR instance, using a combination of the supplied
instances as its data. By default, the mean (weighted by trials) is used.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Data must have the same channels and the same time instants.
Parameters
----------
all_tfr : list of AverageTFR
The tfr datasets.
weights : list of float | str
The weights to apply to the data of each AverageTFR instance.
Can also be ``'nave'`` to weight according to tfr.nave,
or ``'equal'`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
tfr : AverageTFR
The new TFR data.
Notes
-----
.. versionadded:: 0.11.0
"""
tfr = all_tfr[0].copy()
if isinstance(weights, string_types):
if weights not in ('nave', 'equal'):
raise ValueError('Weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_tfr], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_tfr)] * len(all_tfr)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_tfr):
raise ValueError('Weights must be the same size as all_tfr')
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
assert t_.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (tfr, t_))
assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (tfr, t_))
# use union of bad channels
bads = list(set(tfr.info['bads']).union(*(t_.info['bads']
for t_ in all_tfr[1:])))
tfr.info['bads'] = bads
tfr.data = sum(w * t_.data for w, t_ in zip(weights, all_tfr))
tfr.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_tfr))), 1)
return tfr
|
cmoutard/mne-python
|
mne/time_frequency/tfr.py
|
Python
|
bsd-3-clause
| 54,818
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class DeploymentsOperations(object):
"""DeploymentsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for this operation. Constant value: "2017-05-10".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-05-10"
self.config = config
def _delete_initial(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted.
Deleting a template deployment removes the associated deployment
operations. Deleting a template deployment does not affect the state of
the resource group. This is an asynchronous operation that returns a
status of 202 until the template deployment is successfully deleted.
The Location response header contains the URI that is used to obtain
the status of the process. While the process is running, a call to the
URI in the Location header returns a status of 202. When the process
finishes, the URI in the Location header returns a status of 204 on
success. If the asynchronous request failed, the URI in the Location
header returns an error-level status code.
:param resource_group_name: The name of the resource group with the
deployment to delete. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to delete.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def check_existence(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Checks whether the deployment exists.
:param resource_group_name: The name of the resource group with the
deployment to check. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to check.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: bool or ClientRawResponse if raw=true
:rtype: bool or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [204, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = (response.status_code == 204)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, deployment_name, properties, custom_headers=None, raw=False, **operation_config):
parameters = models.Deployment(properties=properties)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Deployment')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, deployment_name, properties, custom_headers=None, raw=False, **operation_config):
"""Deploys resources to a resource group.
You can provide the template and parameters directly in the request or
link to JSON files.
:param resource_group_name: The name of the resource group to deploy
the resources to. The name is case insensitive. The resource group
must already exist.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param properties: The deployment properties.
:type properties:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
DeploymentExtended or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentExtended]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
properties=properties,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Gets a deployment.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to get.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeploymentExtended or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentExtended
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def cancel(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted
or Running. After the deployment is canceled, the provisioningState is
set to Canceled. Canceling a template deployment stops the currently
running template deployment and leaves the resource group partially
deployed.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to cancel.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def validate(
self, resource_group_name, deployment_name, properties, custom_headers=None, raw=False, **operation_config):
"""Validates whether the specified template is syntactically correct and
will be accepted by Azure Resource Manager..
:param resource_group_name: The name of the resource group the
template will be deployed to. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param properties: The deployment properties.
:type properties:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeploymentValidateResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentValidateResult
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.Deployment(properties=properties)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Deployment')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 400]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def export_template(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Exports the template used for specified deployment.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment from which to get
the template.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeploymentExportResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentExportResult
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExportResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, filter=None, top=None, custom_headers=None, raw=False, **operation_config):
"""Get all the deployments for a resource group.
:param resource_group_name: The name of the resource group with the
deployments to get. The name is case insensitive.
:type resource_group_name: str
:param filter: The filter to apply on the operation. For example, you
can use $filter=provisioningState eq '{state}'.
:type filter: str
:param top: The number of results to get. If null is passed, returns
all deployments.
:type top: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DeploymentExtended
:rtype:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentExtendedPaged[~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentExtended]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DeploymentExtendedPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DeploymentExtendedPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-resource/azure/mgmt/resource/resources/v2017_05_10/operations/deployments_operations.py
|
Python
|
mit
| 32,532
|
""" License
MIT License
Copyright (c) 2017 OpenAdaptronik
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from django.urls import path
from .views import IndexView, register_success, register_activate
app_name = 'register'
urlpatterns = [
path('activate/<uidb64>/<slug:token>/', register_activate, name='activate'),
path('success/', register_success, name='success'),
path('', IndexView.as_view(), name='index'),
]
|
IT-PM-OpenAdaptronik/Webapp
|
apps/register/urls.py
|
Python
|
mit
| 1,410
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#This file is part of pypository.
#
#pypository is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#pypository is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with pypository. If not, see <http://www.gnu.org/licenses/>.
"""Test global configuration"""
__author__ = "Nestor Arocha"
__copyright__ = "Copyright 2008-2013, Nestor Arocha"
__email__ = "nesaro@gmail.com"
import unittest
class TestInmutableDict(unittest.TestCase):
def testEqual(self):
from pypository.utils import ImmutableDict
a = ImmutableDict({"a":1,"b":2})
b = ImmutableDict({"a":1,"b":2})
c = ImmutableDict({"a":1,"b":3})
self.assertEqual(a,b)
self.assertEqual(hash(a),hash(b))
self.assertNotEqual(a,c)
self.assertNotEqual(hash(a),hash(c))
|
nesaro/pypository
|
tests/test_Immutable.py
|
Python
|
gpl-3.0
| 1,254
|
from django.shortcuts import render, HttpResponseRedirect
from django.db.models import Q
from contas.models import Caixa, Cheque
from empresas.models import Sistema
from datetime import datetime
#d = datetime.strptime('2007-07-18 10:03:19', '%Y-%m-%d %H:%M:%S')
#day_string = d.strftime('%Y-%m-%d')
#Trabalhando com datas
def caixa_formulario(request,caixaId):
try:
caixa = Caixa.objects.get(id=caixaId)
except:
caixa = Caixa()
grupo = Sistema.objects.filter(ativo='SIM', tipo='GRUPO', empresa_id=request.user.empresa.id).order_by('nome')
subgrupo = Sistema.objects.filter(ativo='SIM', tipo='SUB-GRUPO', empresa_id=request.user.empresa.id).order_by('nome')
categoria = Sistema.objects.filter(ativo='SIM', tipo='CATEGORIA', empresa_id=request.user.empresa.id).order_by('nome')
return render(request,'sistema/caixa_formulario.html',{'caixas':caixa,'grupos':grupo,'subgrupos':subgrupo,'categorias':categoria})
def caixa(request):
caixa = Caixa.objects.filter(empresa_id=int(request.user.empresa.id)).order_by('data_vencimento')
return render(request,'sistema/caixa.html',{'caixas':caixa})
def caixa_gravar(request):
try:
caixa = Caixa.objects.get(empresa_id=request.user.empresa.id,id=request.POST.get('caixaId'),usuario_id=request.user.id)
except:
caixa = Caixa()
data_pagamento = datetime.strptime(str(request.POST.get('data_pagamento')), '%d/%m/%Y').date()
data_vencimento = datetime.strptime(str(request.POST.get('data_vencimento')), '%d/%m/%Y').date()
data_boleto = datetime.strptime(str(request.POST.get('data_boleto')), '%d/%m/%Y').date()
caixa.pessoa_id = int(request.POST.get('pessoa_id'))
caixa.data_pagamento = data_pagamento.strftime('%Y-%m-%d')
caixa.data_vencimento = data_vencimento.strftime('%Y-%m-%d')
caixa.valor_multa = (request.POST.get('valor_multa').replace('.','')).replace(',','.')
caixa.valor_juros = (request.POST.get('valor_juros').replace('.','')).replace(',','.')
caixa.valor_desconto = (request.POST.get('valor_desconto').replace('.','')).replace(',','.')
caixa.valor_bruto = (request.POST.get('valor_bruto').replace('.','')).replace(',','.')
caixa.descricao = request.POST.get('descricao')
caixa.usuario_id = request.user.id
caixa.empresa_id = request.user.empresa.id
caixa.observacao = request.POST.get('observacao')
caixa.data_boleto = data_boleto.strftime('%Y-%m-%d')
caixa.categoria_id = request.POST.get('categoria_id')
caixa.grupo_id = request.POST.get('grupo_id')
caixa.subgrupo_id = request.POST.get('subgrupo_id')
caixa.tipo = request.POST.get('tipo')
caixa.save()
return HttpResponseRedirect('/sistema/caixa/')
def caixa_excluir(request,caixaId):
caixa = Caixa.objects.get(empresa_id=request.user.empresa.id,id=caixaId,usuario_id=request.user.id).delete()
return HttpResponseRedirect('/sistema/caixa/')
def cheque(request):
if request.method == 'POST':
cheque = Cheque.objects.filter(valor=request.POST.get('parametro'),empresa_id=int(request.user.empresa.id)).order_by('data_compensar')
else:
cheque = Cheque.objects.filter(empresa_id=int(request.user.empresa.id)).order_by('data_compensar')
return render(request,'sistema/cheque.html',{'cheques':cheque})
def cheque_formulario(request,chequeId):
try:
cheque = Cheque.objects.get(id=chequeId)
except:
cheque = Cheque()
return render(request,'sistema/cheque_formulario.html',{'cheques':cheque})
def cheque_gravar(request):
try:
cheque = Cheque.objects.get(id=request.POST.get('chequeId'),empresa_id=request.user.empresa.id)
except:
cheque = Cheque()
data_compensar = datetime.strptime(str(request.POST.get('data_compensar')), '%d/%m/%Y').date()
data_compensado = datetime.strptime(str(request.POST.get('data_compensado')), '%d/%m/%Y').date()
data_recebido = datetime.strptime(str(request.POST.get('data_recebido')), '%d/%m/%Y').date()
cheque.numero_cheque = request.POST.get('numero_cheque')
cheque.valor = (request.POST.get('valor').replace('.','')).replace(',','.')
cheque.data_compensar = data_compensar.strftime('%Y-%m-%d')
cheque.data_recebido = data_recebido.strftime('%Y-%m-%d')
cheque.data_compensado = data_compensado.strftime('%Y-%m-%d')
cheque.banco = request.POST.get('banco')
cheque.agencia = request.POST.get('agencia')
cheque.nome = request.POST.get('nome')
cheque.empresa_id = request.user.empresa_id
cheque.save()
return HttpResponseRedirect('/sistema/cheque/')
def cheque_excluir(request,chequeId):
cheque = Cheque.objects.get(id=chequeId,empresa_id=request.user.empresa.id).delete()
return HttpResponseRedirect('/sistema/cheque/')
|
mateusportal/portalconta
|
contas/views.py
|
Python
|
gpl-2.0
| 4,783
|
#!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
import scipy.stats as stats
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
## Get user's input ##
parser = argparse.ArgumentParser(description= """""")
parser.add_argument('alleleFreqs', help='')
parser.add_argument('alleleCounts', help='')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
alleleFreqs = args.alleleFreqs
alleleCounts = args.alleleCounts
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "alleleFreqs: ", alleleFreqs
print "alleleCounts: ", alleleCounts
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
#### 1. Load input tables:
##########################
header("1. Load input tables")
frequenciesDf = pd.read_csv(alleleFreqs, header=0, index_col=0, sep='\t')
countsDf = pd.read_csv(alleleCounts, header=0, index_col=0, sep='\t')
# Convert allele frequencies from fractions to percentages
frequenciesDf = frequenciesDf * 100
#### 2. Select hot source L1:
#############################
#hotSourceList = ["22q12.1", "14q23.1", "6p22.1", "6p24.1", "Xp22.2-1", "9q32", "2q24.1", "3q21.1", "Xp22.2-2", "7p12.3", "3q26.1"]
hotSourceList = ["22q12.1", "14q23.1", "6p22.1", "6p24.1", "Xp22.2-1", "9q32", "2q24.1", "3q21.1", "Xp22.2-2", "7p12.3", "3q26.1", "1p12", "8q24.22", "1p31.1-2", "13q21.2-2", "1p22.3", "5q14.3", "7p14.3"]
frequenciesHotDf = frequenciesDf.loc[hotSourceList]
countsHotDf = countsDf.loc[hotSourceList]
print "frequenciesHotDf: ", frequenciesHotDf
print "countsHotDf: ", countsHotDf
#### 3. Make plot:
###################
header("3. Make plot")
## Set plotting style
sns.set_style("whitegrid")
# Initialize the matplotlib figure
fig = plt.figure(figsize=(6, 2))
# Plot source element allele frequency in the complete PCAWG cohort
ax = frequenciesHotDf["PCAWG"].plot(kind='bar')
ax.set(ylim=(0, 100), ylabel="VAF (%)")
# Add to the top of each bar the source elemenet allele count
rects = ax.patches
labels = countsHotDf["PCAWG"].values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height, label, ha='center', va='bottom', size='8')
## Save figure
fileName = outDir + "/Pictures/topSourceElements_VAF_alleleCounts.pdf"
plt.savefig(fileName)
####
header("Finished")
|
brguez/TEIBA
|
src/python/germlineTopSrcElements_plotVaf.py
|
Python
|
gpl-3.0
| 3,152
|
# !/usr/bin/env python
from setuptools import setup, find_packages
requires = ["scrapy"]
setup(
name="dwarf-scraper",
version="0.1",
description="dwarf-scraper",
author="Jose Guilherme Vanz",
author_email="guilherme.sft@gmail.com",
url="https://github.com/jvanz/dwarf",
packages=find_packages(),
install_requires=requires
)
|
jvanz/dwarf
|
scraper/setup.py
|
Python
|
gpl-2.0
| 359
|
##!/usr/bin/python
import numpy as np
import pylab as plt
import seaborn as sns
sns.set_context('poster')
plt.subplot(1,1,1)
data = np.genfromtxt(fname='cor.dat')
#for x in range(1,data.shape[-1]):
plt.plot(data[:,0],data[:,1],lw=2,label='Re')
plt.plot(data[:,0],data[:,2],lw=2,label='Im')
#plt.plot(data[:,0],data[:,3],lw=2,label='$|C(t)|$')
#dat = np.genfromtxt(fname='../spo/1.0.2/corr')
#plt.plot(dat[:,0],dat[:,1],'--',label='Re, QM')
#plt.plot(dat[:,0],dat[:,2],'--',label='Im, QM')
#x = np.linspace(0,4,100)
#y = -np.sin(x)
#plt.plot(x,y,lw=2,label='sin(x)')
plt.xlabel('$Time$')
plt.ylabel('$C(t)$')
#plt.title('traj')
#plt.subplot(2,1,2)
dat = np.genfromtxt(fname='/home/bing/gwp/spo_2d/1.0.1/cor1')
#for x in range(1,3):
#plt.plot(dat[:,0]/2.0,dat[:,1],'--',label='$\Re(C(t)),~ QM$',lw=2)
#plt.plot(dat[:,0]/2.0,dat[:,2],'--',label='$\Im(C(t)),~ QM$',lw=2)
#z = np.sqrt(data[:,1]**2+data[:,2]**2)
#plt.plot(data[:,0],z,label='$|C(t)|$',lw=1)
#plt.ylim(-0.2,0.2)
plt.legend()
#plt.xlim(0,8)
plt.savefig('cor.pdf')
plt.show()
|
binghongcha08/pyQMD
|
GWP/2D/1.0.7/cor.py
|
Python
|
gpl-3.0
| 1,054
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service import api
from sahara.service.validations import node_group_template_schema as ngt_schema
from sahara.service.validations import node_group_templates as nt
from sahara.tests.unit.service.validation import utils as u
class TestNGTemplateCreateValidation(u.ValidationTestCase):
def setUp(self):
super(TestNGTemplateCreateValidation, self).setUp()
self._create_object_fun = nt.check_node_group_template_create
self.scheme = ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA
api.plugin_base.setup_plugins()
def test_node_groups_create_required(self):
self._assert_create_object_validation(
data={
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'name' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'flavor_id' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'hadoop_version' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'node_processes' is a required property")
)
self._assert_create_object_validation(
data={
'name': "a",
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': []
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'\[\] is too short')
)
def test_ng_template_create_v_names(self):
data = {
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode']
}
self._assert_valid_name_hostname_validation(data)
def test_ng_template_create_v_node_processes(self):
self._assert_create_object_validation(
data={
'name': "a",
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ["namenode", "namenode"]
},
bad_req_i=(1, 'INVALID_DATA',
'Duplicates in node processes have been detected')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['wrong_process']
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin doesn't support the following node processes: "
"['wrong_process']")
)
def test_ng_template_create_v_right(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode',
'datanode',
'secondarynamenode',
'tasktracker',
'jobtracker'],
'node_configs': {
'HDFS': {
u'hadoop.tmp.dir': '/temp/'
}
},
'image_id': '550e8400-e29b-41d4-a716-446655440000',
'volumes_per_node': 2,
'volumes_size': 10,
'description': 'test node template',
'floating_ip_pool': 'd9a3bebc-f788-4b81-9a93-aa048022c1ca'
}
)
def test_ng_template_create_v_minimum_ints(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['wrong_process'],
'volumes_per_node': -1
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'-1(.0)? is less than the minimum of 0')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['wrong_process'],
'volumes_size': 0
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'0(.0)? is less than the minimum of 1')
)
def test_ng_template_create_v_types(self):
default_data = {
'name': 'a', 'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode']
}
self._assert_types(default_data)
def test_ng_template_create_v_unique_ng(self):
data = {
'name': 'test',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode']}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'NAME_ALREADY_EXISTS',
"NodeGroup template with name 'test' already exists")
)
def test_ng_template_create_v_flavor_exists(self):
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '1',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode']
},
bad_req_i=(1, 'NOT_FOUND',
"Requested flavor '1' not found")
)
def test_ng_template_create_v_ng_configs(self):
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode'],
'node_configs': {
'wrong_target': {
u'hadoop.tmp.dir': '/temp/'
}
}},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin doesn't contain applicable "
"target 'wrong_target'")
)
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['namenode'],
'node_configs': {
'HDFS': {
's': 'a'
}
}
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin's applicable target 'HDFS' doesn't "
"contain config with name 's'")
)
def test_ng_template_cinder(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['wrong_process'],
'volumes_per_node': -1
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'-1(.0)? is less than the minimum of 0')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['wrong_process'],
'volumes_size': 0
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'0(.0)? is less than the minimum of 1')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['datanode', 'tasktracker'],
'volumes_per_node': 1,
'volumes_size': 1,
'volume_mount_prefix': '/mnt/volume'
}
)
data = {
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['datanode', 'tasktracker'],
'volumes_per_node': 1,
'volumes_size': 1,
'volume_mount_prefix': 'qwerty'
}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'VALIDATION_ERROR', "'qwerty' is not a 'posix_path'")
)
def test_wrong_floating_ip_pool(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': ['datanode', 'tasktracker'],
'floating_ip_pool': 'network_bad'
},
bad_req_i=(1, 'NOT_FOUND', "Floating IP pool network_bad "
"not found")
)
|
henaras/sahara
|
sahara/tests/unit/service/validation/test_ng_template_validation_create.py
|
Python
|
apache-2.0
| 10,575
|
# -*- encoding: utf-8 -*-
import random
from pilasengine.escenas import normal
from pilasengine.fondos import fondo
class FondoEscenaJuego(fondo.Fondo):
def iniciar(self):
self.imagen = './data/fondo.png'
class EscenaJuego(normal.Normal):
def iniciar(self):
self.fondo = FondoEscenaJuego(self.pilas)
self.tablero = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
self.hay_ganador = False
self.turno = "ficha{}".format(str(random.randint(1, 2)))
self.crear_actor_turno_actual()
self.definir_imagen_turno_actual()
self.turnos = 9
self.ter_ficha1 = ["ficha1", "ficha1", "ficha1"]
self.ter_ficha2 = ["ficha2", "ficha2", "ficha2"]
self.crear_pizarra()
self.crear_casillas_vacias()
self.pulsa_tecla_escape.conectar(self.cuando_se_presione_escape)
def cuando_se_presione_escape(self, evento):
"Regresa al menu principal"
import escena_menu
self.pilas.escenas.definir_escena(escena_menu.EscenaMenu(self.pilas))
def crear_actor_turno_actual(self):
self.turno_actual = self.pilas.actores.Actor()
self.turno_actual.x = -160
self.turno_actual.y = 75
self.turno_actual.escala = .6
def definir_imagen_turno_actual(self):
self.turno_actual.imagen = './data/{}.png'.format(self.turno)
def crear_casillas_vacias(self):
self.casillas = self.pilas.actores.Grupo()
for fila, _ in enumerate(self.tablero):
for columna, _ in enumerate(self.tablero[fila]):
casilla = self.pilas.actores.Boton(80*columna-80, -80*fila+80,
'./data/ficha_vacia.png')
casilla.pos_en_tablero = (fila, columna)
casilla.conectar_presionado(self.cuando_presiona_casilla,
casilla)
self.casillas.agregar(casilla)
def cuando_presiona_casilla(self, casilla):
casilla.desactivar()
imagen = './data/{}.png'.format(self.turno)
casilla.pintar_presionado(imagen)
casilla.escala = .8
casilla.escala = [1]
self.quitar_un_turno()
self.poner_ficha_en_tablero(casilla.pos_en_tablero)
self.verificar_ganador()
self.verificar_empate()
self.cambiar_turno()
def verificar_ganador(self):
if (self.verificar_ganador_en_horizontal() or
self.verificar_ganador_en_vertical() or
self.verificar_ganador_en_diagonal()):
self.hay_ganador = True
self.casillas.desactivar()
self.mostrar_mensaje_fin_juego()
def mostrar_mensaje_fin_juego(self, empate=False):
if empate:
mensaje = u"¡Nadie Ganó, vuelve a intentarlo!"
else:
nombre_jugador = self.turno.replace("ficha", "jugador ")
mensaje = u"¡Ganó {}!".format(nombre_jugador)
texto = self.pilas.actores.Texto(cadena_de_texto=mensaje, y=-180)
texto.escala = .7
texto.escala = [1]
def verificar_empate(self):
if not self.hay_ganador and self.turnos < 1:
self.casillas.desactivar()
self.mostrar_mensaje_fin_juego(empate=True)
def verificar_ganador_en_horizontal(self):
if self.ter_ficha1 in self.tablero:
fila = self.tablero.index(self.ter_ficha1)
self.pintar_linea_horizontal(fila)
return True
elif self.ter_ficha2 in self.tablero:
fila = self.tablero.index(self.ter_ficha2)
self.pintar_linea_horizontal(fila)
return True
return False
def verificar_ganador_en_vertical(self):
cols = [list(col) for col in zip(*self.tablero)]
if self.ter_ficha1 in cols:
columna = cols.index(self.ter_ficha1)
self.pintar_linea_vertical(columna)
return True
elif self.ter_ficha2 in cols:
columna = cols.index(self.ter_ficha2)
self.pintar_linea_vertical(columna)
return True
return False
def verificar_ganador_en_diagonal(self):
diagonal1 = [self.tablero[0][0],
self.tablero[1][1],
self.tablero[2][2]]
diagonal2 = [self.tablero[0][2],
self.tablero[1][1],
self.tablero[2][0]]
if diagonal1 == self.ter_ficha1 or diagonal1 == self.ter_ficha2:
self.pintar_linea_diagonal_1()
return True
elif diagonal2 == self.ter_ficha1 or diagonal2 == self.ter_ficha2:
self.pintar_linea_diagonal_2()
return True
return False
def cambiar_turno(self):
if self.turno == "ficha1":
self.turno = "ficha2"
else:
self.turno = "ficha1"
self.definir_imagen_turno_actual()
def quitar_un_turno(self):
self.turnos -= 1
def poner_ficha_en_tablero(self, casilla_pos):
fila, columna = casilla_pos
self.tablero[fila][columna] = self.turno
def crear_pizarra(self):
self.pizarra = self.pilas.actores.Pizarra()
def pintar_linea_horizontal(self, h):
y = 80 - h * 80
self.pizarra.linea(-100, y, 100, y, self.pilas.colores.cyan, grosor=3)
def pintar_linea_diagonal_1(self):
self.pizarra.linea(-84, 84, 84, -84, self.pilas.colores.cyan, grosor=3)
def pintar_linea_diagonal_2(self):
self.pizarra.linea(84, 84, -84, -84, self.pilas.colores.cyan, grosor=3)
def pintar_linea_vertical(self, v):
x = -80 + v * 80
self.pizarra.linea(x, -100, x, 100, self.pilas.colores.cyan, grosor=3)
|
fsalamero/pilas
|
pilasengine/ejemplos/ejemplos_a_revisar/tres_en_raya/escena_juego.py
|
Python
|
lgpl-3.0
| 5,757
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# emailIdx - Synchronizes emails from IMAP to Elasticsearch
# Copyright (C) 2015 Paul Hofmann
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#########################################################################################
# Imports #
#########################################################################################
from emailidx import EmailSerializer, CertificateVerifier, Settings
from imaplib2 import imaplib2
from M2Crypto import X509
import re, sys
#########################################################################################
# Global Variables #
#########################################################################################
TLS_METHOD = {
'PLAIN': { 'USE_SSL': False, 'USE_STARTTLS': False },
'SSL/TLS': { 'USE_SSL': True, 'USE_STARTTLS': False },
'STARTTLS': { 'USE_SSL': False, 'USE_STARTTLS': True },
}
IMAP_MAILBOX_DESC_REGEX = re.compile(r'\((?P<flags>.*?)\) "(?P<delimiter>.*)" (?P<name>.*)')
#########################################################################################
# Exceptions #
#########################################################################################
class NoCertificateFoundError(Exception):
pass
#########################################################################################
# Helper Functions #
#########################################################################################
def get_peer_certificate_of_connection(imap_connection):
"""
Returns the certficate of the remote peer.
If the connection is unencrypted or the certificate can't be found for any other reason, NoCertificateFoundError is raised.
If the can't be parsed properly, X509Error is raised.
"""
if hasattr(imap_connection, 'ssl'):
sock = imap_connection.ssl()
else:
sock = imap_connection.sock
if not hasattr(sock, 'getpeercert'):
raise NoCertificateFoundError('No certificate found. Is connection really encrypted?')
data = sock.getpeercert(binary_form=True)
cert = X509.load_cert_string(data, X509.FORMAT_DER)
return cert
def verify_connection_security(imap_connection):
"""
Checks the certificate of the remote peer using the method specified in IMAP_TLS_VERIFY_METHOD in Settings.
In case the certificate is invalid, emailidx.CertificateVerifier.InsecureCertificateError is raised.
If no encryption is used (IMAP_TLS_METHOD='PLAIN'), NO check will be performed.
"""
if Settings.settings['imap']['tls_method'] in ('SSL/TLS', 'STARTTLS'):
peer_cert = get_peer_certificate_of_connection(imap_connection)
CertificateVerifier.CERTIFICATE_VERIFY_METHOD[Settings.settings['imap']['tls_verify_method']](peer_cert, Settings.settings['imap']['credentials']['host'])
def open_connection():
"""
Opens IMAP connection and tries to log in as specified in the settings.
"""
tls_method = TLS_METHOD[Settings.settings['imap']['tls_method']]
if tls_method['USE_SSL']:
the_port = Settings.settings['imap']['port'] if Settings.settings['imap']['port'] is not None else imaplib2.IMAP4_SSL_PORT
imap_connection = imaplib2.IMAP4_SSL(Settings.settings['imap']['credentials']['host'], the_port)
else:
the_port = Settings.settings['imap']['port'] if Settings.settings['imap']['port'] is not None else imaplib2.IMAP4_PORT
imap_connection = imaplib2.IMAP4(Settings.settings['imap']['credentials']['host'], the_port)
if tls_method['USE_STARTTLS']:
imap_connection.starttls()
try:
verify_connection_security(imap_connection)
except CertificateVerifier.InsecureCertificateError, icerr:
print >>sys.stderr, "[IMAP] The certicate of the remote peer was not accepted for the following reason:"
print >>sys.stderr, "[IMAP] \"%s\"" % icerr.message
sys.exit(-1)
if Settings.settings['imap']['use_cram_transfer']:
imap_connection.login_cram_md5(Settings.settings['imap']['credentials']['user'], Settings.settings['imap']['credentials']['password'])
else:
imap_connection.login(Settings.settings['imap']['credentials']['user'], Settings.settings['imap']['credentials']['password'])
return imap_connection
def parse_mailbox_description(mbx_desc_line):
"""
Parses mailbox name, flags and hierarchy delimiter from the given IMAP-LIST line.
"""
flags, delimiter, mailbox_name = IMAP_MAILBOX_DESC_REGEX.match(mbx_desc_line).groups()
mailbox_name = mailbox_name.strip('"')
flags = [flag_with_slash[1:] for flag_with_slash in flags.split(' ')]
return { 'flags': flags, 'delimiter': delimiter, 'mailbox_name': mailbox_name }
def get_mailboxes(imap_connection):
"""
Returns information about all mailboxes.
"""
(rv, mbx_descs) = imap_connection.list()
if rv == 'OK':
mailboxes = []
for mbx_desc in mbx_descs:
mailboxes.append(parse_mailbox_description(mbx_desc))
return mailboxes
else:
return None
def get_message_in_current_mailbox(imap_connection, message_id):
"""
Returns the serialized message with the given message_id from the currently selected mailbox.
"""
rv, msg_data = imap_connection.fetch(message_id, '(RFC822)')
if rv != 'OK':
print >>sys.stderr, "[IMAP] Couldn't fetch message with id %s" % message_id
return None
for response_part in msg_data:
if isinstance(response_part, tuple):
msg_as_rfc822 = response_part[1]
the_email = { 'raw_message': msg_as_rfc822 }
the_email = EmailSerializer.serialize_email_with_context(the_email)
if not Settings.settings['sync_behavior']['keep_raw_messages']:
del the_email['raw_message']
return the_email
return None
def get_all_messages_in_mailbox(imap_connection, mailbox_name):
"""
Get all messages in serialized format from the given mailbox.
The messages are sorted by their SHA256 hash value.
"""
rv = imap_connection.select(mailbox_name, readonly=True)[0]
if rv == 'OK':
print "[IMAP] Processing mailbox: %s" % mailbox_name
messages = {}
rv, data = imap_connection.search(None, "ALL")
if rv != 'OK':
# No messages found
return messages
msg_ids = data[0].split()
for msg_id in msg_ids:
curr_msg = get_message_in_current_mailbox(imap_connection, msg_id)
if curr_msg:
curr_msg['mailbox'] = mailbox_name
curr_msg_hash = curr_msg['hash']
if curr_msg_hash not in messages:
messages[curr_msg_hash] = []
messages[curr_msg_hash].append(curr_msg)
imap_connection.close()
return messages
else:
print >>sys.stderr, "[IMAP] Couldn't select mailbox: %s" % mailbox_name
return None
def fetch_all_emails_from_connection(imap_connection):
"""
Fetches all emails from the given IMAP connection ordered by mailboxes.
"""
messages = {}
for mbx in get_mailboxes(imap_connection):
mbx_name = mbx['mailbox_name']
if mbx_name not in Settings.settings['sync_behavior']['excluded_mailboxes']:
if 'Noselect' not in mbx['flags']:
msgs_by_hash = get_all_messages_in_mailbox(imap_connection, mbx_name)
if msgs_by_hash:
messages[mbx_name] = msgs_by_hash
return messages
#########################################################################################
# Exposed Functions #
#########################################################################################
def fetch_all_emails_via_imap():
"""
Fetches all emails from the IMAP server configured in settings ordered by mailboxes.
"""
imap_connection = open_connection()
msgs_by_mailbox = fetch_all_emails_from_connection(imap_connection)
imap_connection.logout()
print '[IMAP] logged out.'
return msgs_by_mailbox
|
p-ho/emailIdx
|
emailidx/ImapAdapter.py
|
Python
|
gpl-3.0
| 9,186
|
from django.views.generic import ListView
from models import Media
class CatalogListView(ListView):
model = Media
template_name = 'media_list.html'
|
zenweasel/pybuilder-djangoexample
|
pyb_django/catalog/views.py
|
Python
|
bsd-3-clause
| 158
|
# Copyright (c) 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import context
from cinder.db.sqlalchemy import api
from cinder import exception
from cinder.i18n import _, _LI
from cinder.image import image_utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.ListOpt('available_devices',
default=[],
help='List of all available devices'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class BlockDeviceDriver(driver.BaseVD, driver.LocalVD, driver.CloneableVD,
driver.CloneableImageVD, driver.TransferVD):
VERSION = '2.1.0'
def __init__(self, *args, **kwargs):
super(BlockDeviceDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.backend_name = \
self.configuration.safe_get('volume_backend_name') or "BlockDev"
target_driver =\
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
def check_for_setup_error(self):
pass
def create_volume(self, volume):
device = self.find_appropriate_size_device(volume['size'])
LOG.info(_LI("Create %(volume)s on %(device)s"),
{"volume": volume['name'], "device": device})
return {
'provider_location': device,
}
def delete_volume(self, volume):
"""Deletes a logical volume."""
dev_path = self.local_path(volume)
if not dev_path or dev_path not in \
self.configuration.available_devices:
return
if os.path.exists(dev_path) and \
self.configuration.volume_clear != 'none':
volutils.clear_volume(
self._get_device_size(dev_path), dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def local_path(self, volume):
if volume['provider_location']:
path = volume['provider_location'].rsplit(" ", 1)
return path[-1]
else:
return None
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
device = self.find_appropriate_size_device(src_vref['size'])
volutils.copy_volume(
self.local_path(src_vref), device,
self._get_device_size(device) * 2048,
self.configuration.volume_dd_blocksize,
execute=self._execute)
return {
'provider_location': device,
}
def get_volume_stats(self, refresh=False):
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
dict_of_devices_sizes = self._devices_sizes()
used_devices = self._get_used_devices()
total_size = 0
free_size = 0
for device, size in dict_of_devices_sizes.items():
if device not in used_devices:
free_size += size
total_size += size
LOG.debug("Updating volume stats")
backend_name = self.configuration.safe_get('volume_backend_name')
data = {'total_capacity_gb': total_size / 1024,
'free_capacity_gb': free_size / 1024,
'reserved_percentage': self.configuration.reserved_percentage,
'QoS_support': False,
'volume_backend_name': backend_name or self.__class__.__name__,
'vendor_name': "Open Source",
'driver_version': self.VERSION,
'storage_protocol': 'unknown'}
self._stats = data
def _get_used_devices(self):
lst = api.volume_get_all_by_host(context.get_admin_context(),
self.host)
used_devices = set()
for volume in lst:
local_path = self.local_path(volume)
if local_path:
used_devices.add(local_path)
return used_devices
def _get_device_size(self, dev_path):
out, _err = self._execute('blockdev', '--getsz', dev_path,
run_as_root=True)
size_in_m = int(out)
return size_in_m / 2048
def _devices_sizes(self):
available_devices = self.configuration.available_devices
dict_of_devices_sizes = {}
for device in available_devices:
dict_of_devices_sizes[device] = self._get_device_size(device)
return dict_of_devices_sizes
def find_appropriate_size_device(self, size):
dict_of_devices_sizes = self._devices_sizes()
free_devices = (set(self.configuration.available_devices) -
self._get_used_devices())
if not free_devices:
raise exception.CinderException(_("No free disk"))
possible_device = None
possible_device_size = None
for device in free_devices:
dev_size = dict_of_devices_sizes[device]
if size * 1024 <= dev_size and (possible_device is None or
dev_size < possible_device_size):
possible_device = device
possible_device_size = dev_size
if possible_device:
return possible_device
else:
raise exception.CinderException(_("No big enough free disk"))
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = self.local_path(volume)
model_update = \
self.target_driver.ensure_export(
context,
volume,
volume_path)
return model_update
def create_export(self, context, volume, connector):
volume_path = self.local_path(volume)
export_info = self.target_driver.create_export(context,
volume,
volume_path)
return {'provider_location': export_info['location'],
'provider_auth': export_info['auth'], }
def remove_export(self, context, volume):
self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
pass
|
sasukeh/cinder
|
cinder/volume/drivers/block_device.py
|
Python
|
apache-2.0
| 8,384
|
import asyncio
import json
from discord import Member, Role
from discord.ext import commands as c
from accounts import level
from helpers import update_db
with open("plugins/settings/admin.json") as cfg:
config = json.load(cfg)
with open("db/admin.json") as admn:
admin = json.load(admn)
class Admin:
"""Administration plugin."""
def __init__(self, bot):
self.bot = bot
self.log = config["log"]
self.display_purges = config["display_purges"]
@c.command(no_pm=True, pass_context=True)
@level(2)
async def admin_set(self, ctx):
"""Set the logging channel for admin commands.
The channel this command is invoked in will become the channel that all
bot administration actions (kicks, bans, softbans, and unbans) are logged
to.
"""
admin["servers"][ctx.message.server.id] = ctx.message.channel.id
update_db(admin, "admin")
await self.bot.say("\U00002705")
# Helper function for logging
async def log_to_channel(self, server, author, target, log_type, info):
if self.log and (server.id in admin["servers"]):
channel = server.get_channel(admin["servers"][server.id])
target = f"{target.name}#{target.discriminator}"
header = f"**[{log_type}]** *by {author}*"
body = f"**Member:** {target}\n**Reason:** {info}"
await self.bot.send_message(channel, f"{header}\n{body}")
@c.command(no_pm=True, pass_context=True)
@level(2)
async def kick(self, ctx, member: Member, *, reason: str = ""):
"""Kick a user."""
await self.bot.kick(member)
await self.bot.say("\U00002705")
await self.log_to_channel(ctx.message.server, ctx.message.author,
member, "KICK", reason)
@c.command(no_pm=True, pass_context=True)
@level(2)
async def ban(self, ctx, member: Member,
purge: int = 7, *, reason: str = ""):
"""Ban a userr."""
await self.bot.ban(member, purge)
await self.bot.say("\U00002705")
await self.log_to_channel(ctx.message.server, ctx.message.author,
member, "\U0001F528BAN\U0001F528", reason)
@c.command(no_pm=True, pass_context=True)
@level(2)
async def unban(self, ctx, uid: str, *, reason: str = ""):
"""Unban a user by UID."""
for banned in await self.bot.get_bans(ctx.message.server):
if banned.id == uid:
user = banned
break
await self.bot.unban(ctx.message.server, user)
await self.bot.say("\U00002705")
await self.log_to_channel(ctx.message.server, ctx.message.author,
user, "UNBAN", reason)
@c.command(no_pm=True, pass_context=True)
@level(2)
async def softban(self, ctx, member: Member,
purge: int = 1, *, reason: str = ""):
"""Softban (ban then unban) a user."""
await self.bot.ban(member, purge)
await self.bot.unban(member.server, member)
await self.bot.say("\U00002705")
await self.log_to_channel(ctx.message.server, ctx.message.author, member,
"\U0001F528SOFTBAN\U0001F528", reason)
@c.command(no_pm=True)
@level(1)
async def mute(self, member: Member, switch: bool = True):
"""Mute or unmute a user."""
await self.bot.server_voice_state(member, mute=switch)
await self.bot.say("\U00002705")
@c.command(no_pm=True)
@level(1)
async def deafen(self, member: Member, switch: bool = True):
"""Deafen or undeafen a user."""
await self.bot.server_voice_state(member, deafen=switch)
await self.bot.say("\U00002705")
# Message purging helper function
async def purge_messages(self, location, message, limit, check):
removed = await self.bot.purge_from(message.channel, limit=limit,
before=message, check=check)
# Display information about the purge
if self.display_purges:
amount = len(removed)
await self.bot.say(f"\U00002705 {amount} message(s) purged from {location}.")
@c.group(pass_context=True)
@level(1)
async def purge(self, ctx):
"""Purge messages."""
if ctx.invoked_subcommand is None:
await self.bot.say("\U00002754 What should be purged?")
@purge.command(name="all", aliases=["everyone"], pass_context=True)
async def purge_all(self, ctx, amount: int):
"""Purge messages from everyone."""
await self.purge_messages("everyone", ctx.message, amount,
lambda m: m is not None)
@purge.command(name="member", aliases=["user"], pass_context=True)
async def purge_member(self, ctx, member: Member, amount: int):
"""Purge messages from a user."""
await self.purge_messages(f"{member.mention}", ctx.message, amount,
lambda m: m.author.id == member.id)
@purge.command(name="id", aliases=["uid"], pass_context=True)
async def purge_uid(self, ctx, uid: str, amount: int):
"""Purge messages by UID."""
await self.purge_messages(uid, ctx.message, amount,
lambda m: m.author.id == uid)
@purge.command(name="role", aliases=["group"], pass_context=True)
async def purge_role(self, ctx, role: Role, amount: int):
"""Purge messages from a role."""
await self.purge_messages(f"{role.name}", ctx.message, amount,
lambda m: role in m.author.roles)
@c.command(no_pm=True)
async def nick(self, member: Member, *, name: str):
"""Change someone's nickname.
If the nickname is set to '!none' it will be removed.
"""
if name.lower() == "!none":
name = None
try:
await self.bot.change_nickname(member, name)
await self.bot.say("\U00002705")
except Exception as error:
await self.bot.say(f"Unable to change nickname: {error}")
def setup(bot):
bot.add_cog(Admin(bot))
|
Ispira/Ispyra
|
plugins/admin.py
|
Python
|
mit
| 6,041
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from flask_appbuilder.widgets import RenderTemplateWidget
from wtforms.widgets import HTMLString, html_params
class AirflowModelListWidget(RenderTemplateWidget):
template = 'airflow/model_list.html'
class AirflowDateTimePickerWidget:
data_template = (
'<div class="input-group datetime datetimepicker">'
'<span class="input-group-addon"><i class="fa fa-calendar cursor-hand"></i>'
"</span>"
'<input class="form-control" %(text)s />'
"</div>"
)
def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
if not field.data:
field.data = ""
template = self.data_template
return HTMLString(
template % {"text": html_params(type="text", value=field.data, **kwargs)}
)
|
owlabs/incubator-airflow
|
airflow/www_rbac/widgets.py
|
Python
|
apache-2.0
| 1,660
|
"""
Functions for creating stretched coordinate PMLs.
"""
import copy
import itertools
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
from spins import fdfd_tools
s_function_type = Callable[[float], float]
def prepare_s_function(ln_R: float = -16, m: float = 4) -> s_function_type:
"""Create an s_function to pass to the SCPML functions.
This is used when you would like to customize the PML parameters.
Args:
ln_R: Natural logarithm of the desired reflectance.
m: Polynomial order for the PML (imaginary part increases as
`distance**m`).
Returns:
An s_function, which takes an ndarray (distances) and returns an ndarray
(complex part of the cell width; needs to be divided by
`sqrt(epilon_effective) * real(omega)` before use).
"""
def s_factor(distance: np.ndarray) -> np.ndarray:
s_max = (m + 1) * ln_R / 2 # / 2 because we assume periodic boundaries
return s_max * (distance**m)
return s_factor
def uniform_grid_scpml(
shape: np.ndarray or List[int],
thicknesses: np.ndarray or List[int],
omega: float,
epsilon_effective: float = 1.0,
s_function: s_function_type = None,
) -> fdfd_tools.GridSpacing:
"""
Create dx arrays for a uniform grid with a cell width of 1 and a pml.
If you want something more fine-grained, check out stretch_with_scpml(...).
:param shape: Shape of the grid, including the PMLs (which are 2*thicknesses thick)
:param thicknesses: [th_x, th_y, th_z] Thickness of the PML in each direction.
Both polarities are added.
Each th_ of pml is applied twice, once on each edge of the grid along the given axis.
th_* may be zero, in which case no pml is added.
:param omega: Angular frequency for the simulation
:param epsilon_effective: Effective epsilon of the PML. Match this to the material
at the edge of your grid.
Default 1.
:param s_function: s_function created by prepare_s_function(...), allowing
customization of pml parameters.
Default uses prepare_s_function() with no parameters.
:return: Complex cell widths (dx_lists)
"""
if s_function is None:
s_function = prepare_s_function()
# Normalized distance to nearest boundary
def l(u, n, t):
return ((t - u).clip(0) + (u - (n - t)).clip(0)) / t
dx_a = [np.array(np.inf)] * 3
dx_b = [np.array(np.inf)] * 3
# divide by this to adjust for epsilon_effective and omega
s_correction = np.sqrt(epsilon_effective) * np.real(omega)
for k, th in enumerate(thicknesses):
s = shape[k]
if th > 0:
sr = np.arange(s)
dx_a[k] = 1 + 1j * s_function(l(sr, s, th)) / s_correction
dx_b[k] = 1 + 1j * s_function(l(sr + 0.5, s, th)) / s_correction
else:
dx_a[k] = np.ones((s,))
dx_b[k] = np.ones((s,))
return [dx_a, dx_b]
def stretch_with_scpml(
dxes: fdfd_tools.GridSpacing,
axis: int,
polarity: int,
omega: float,
epsilon_effective: float = 1.0,
thickness: int = 10,
s_function: s_function_type = None,
) -> fdfd_tools.GridSpacing:
"""
Stretch dxes to contain a stretched-coordinate PML (SCPML) in one direction along one axis.
:param dxes: dx_tuple with coordinates to stretch
:param axis: axis to stretch (0=x, 1=y, 2=z)
:param polarity: direction to stretch (-1 for -ve, +1 for +ve)
:param omega: Angular frequency for the simulation
:param epsilon_effective: Effective epsilon of the PML. Match this to the material at the
edge of your grid. Default 1.
:param thickness: number of cells to use for pml (default 10)
:param s_function: s_function created by prepare_s_function(...), allowing customization
of pml parameters. Default uses prepare_s_function() with no parameters.
:return: Complex cell widths
"""
if s_function is None:
s_function = prepare_s_function()
dx_ai = dxes[0][axis].astype(complex)
dx_bi = dxes[1][axis].astype(complex)
pos = np.hstack((0, dx_ai.cumsum()))
pos_a = (pos[:-1] + pos[1:]) / 2
pos_b = pos[:-1]
# divide by this to adjust for epsilon_effective and omega
s_correction = np.sqrt(epsilon_effective) * np.real(omega)
if polarity > 0:
# front pml
bound = pos[thickness]
d = bound - pos[0]
def l_d(x):
return (bound - x) / (bound - pos[0])
slc = slice(thickness)
else:
# back pml
bound = pos[-thickness - 1]
d = pos[-1] - bound
def l_d(x):
return (x - bound) / (pos[-1] - bound)
if thickness == 0:
slc = slice(None)
else:
slc = slice(-thickness, None)
dx_ai[slc] *= 1 + 1j * s_function(l_d(pos_a[slc])) / d / s_correction
dx_bi[slc] *= 1 + 1j * s_function(l_d(pos_b[slc])) / d / s_correction
dxes[0][axis] = dx_ai
dxes[1][axis] = dx_bi
return dxes
def generate_periodic_dx(pos: List[np.ndarray]) -> fdfd_tools.GridSpacing:
"""
Given a list of 3 ndarrays cell centers, creates the cell width parameters for a periodic grid.
:param pos: List of 3 ndarrays of cell centers
:return: (dx_a, dx_b) cell widths (no pml)
"""
if len(pos) != 3:
raise Exception('Must have len(pos) == 3')
dx_a = [np.array(np.inf)] * 3
dx_b = [np.array(np.inf)] * 3
for i, p_orig in enumerate(pos):
p = np.array(p_orig, dtype=float)
if p.size != 1:
p_shifted = np.hstack((p[1:], p[-1] + (p[1] - p[0])))
dx_a[i] = np.diff(p)
dx_b[i] = np.diff((p + p_shifted) / 2)
return dx_a, dx_b
def make_nonuniform_grid(SimBorders: List[int],
dx_default: List[int],
Boxes: List[dict],
grad_Mesh=0.05,
step=1.0) -> (np.array, np.array, np.array):
'''
make_nonuniform_grid makes x, y, z vector for a non-uniform grid. In
addition to the simulation boundaries and the default dx, you can add boxes
where you want a finer mesh. The mesh will change gradually by grad_Mesh.
(a grad_Mesh larger then 1 does not make any sense)
input:
- SimBorders: the boundaries of your simulation in the form
[xmin xmax ymin ymax zmin zmax]
- dx_default: the largest mesh allowed (a 3 element np.array)
- Boxes: List of dicts that define finer mesh boxes
These have 'pos' (a 3 element np.array), 'size' (a 3
element np.array) and 'meshsize' the meshsize
- The grad_Mesh (by default 0.05)
- step: the minimum mesh size is first calculated on a fine grid in
the x,y and z direction. Step is the mesh size of this vector. It
should be significantly smaller than the mesh size of the boxes
output:
- xs: mesh spacing along the x direction
- ys: mesh spacing along the y direction
- zs: mesh spacing along the z direction
(Dries Vercruysse)
'''
# make x, y, z vectors with a step with a step pitch and the dx, dy, dz vectors specifying
# the default mesh size
NX = int((np.ceil(SimBorders[1]) - np.floor(SimBorders[0])) / step)
NY = int((np.ceil(SimBorders[3]) - np.floor(SimBorders[2])) / step)
NZ = int((np.ceil(SimBorders[5]) - np.floor(SimBorders[4])) / step)
x = np.linspace(np.floor(SimBorders[0]), np.ceil(SimBorders[1]), NX + 1)
y = np.linspace(np.floor(SimBorders[2]), np.ceil(SimBorders[3]), NY + 1)
z = np.linspace(np.floor(SimBorders[4]), np.ceil(SimBorders[5]), NZ + 1)
dx = dx_default[0] * np.ones((1, NX + 1))
dy = dx_default[0] * np.ones((1, NY + 1))
dz = dx_default[0] * np.ones((1, NZ + 1))
# define a function that makes a dx vector with DX in between x0 and xn and
# that increases outside of the [x0, xn] with grad_mesh
def MeshBox(x, x0, xn, DX, grad_Mesh):
dx = DX * np.ones_like(x)
dx[x < x0] += grad_Mesh * (x0 - x[x < x0]) + DX
dx[x > xn] += grad_Mesh * (x[x > xn] - xn) + DX
return np.expand_dims(dx, axis=0)
# for every box element make the dx, dy, dz vector with MeshBox and append
# it to the existing dx, dy and dz vector
for box in Boxes:
x0 = box['pos'][0] - box['size'][0] / 2
xn = box['pos'][0] + box['size'][0] / 2
y0 = box['pos'][1] - box['size'][1] / 2
yn = box['pos'][1] + box['size'][1] / 2
z0 = box['pos'][2] - box['size'][2] / 2
zn = box['pos'][2] + box['size'][2] / 2
dx = np.append(
dx, MeshBox(x, x0, xn, box['meshsize'][0], grad_Mesh), axis=0)
dy = np.append(
dy, MeshBox(y, y0, yn, box['meshsize'][1], grad_Mesh), axis=0)
dz = np.append(
dz, MeshBox(z, z0, zn, box['meshsize'][2], grad_Mesh), axis=0)
# take the minimum of all the dx vectors
dxv = np.amin(dx, axis=0)
dyv = np.amin(dy, axis=0)
dzv = np.amin(dz, axis=0)
# make the mesh: start at the simulation border and take step with the mesh
# size give at that point
xs = [SimBorders[0]]
while xs[-1] < SimBorders[1]:
xs = xs + [xs[-1] + dxv[int((xs[-1] - x[0]) // step)]]
xs = (xs / np.max(xs) * SimBorders[1])
ys = [SimBorders[2]]
while ys[-1] < SimBorders[3]:
ys = ys + [ys[-1] + dyv[int((ys[-1] - y[0]) // step)]]
ys = (ys / np.max(ys) * SimBorders[3])
zs = [SimBorders[4]]
while zs[-1] < SimBorders[5]:
zs = zs + [zs[-1] + dzv[int((zs[-1] - z[0]) // step)]]
zs = (zs / np.max(zs) * SimBorders[5])
# return results
return xs, ys, zs
def apply_scpml(dxes: fdfd_tools.GridSpacing,
pml_layers: Optional[Union[int, fdfd_tools.PmlLayers]],
omega: float) -> fdfd_tools.GridSpacing:
"""Applies PMLs to the grid spacing.
This function implements SC-PMLs by modifying the grid spacing based on
the PML layers.
Args:
dxes: Grid spacing to modify.
pml_layers: Indicates number of PML layers to apply on each side. If
`None`, no PMLs are applied. If this is a scalar, the same number of
PML layers are applied to each side.
omega: Frequency of PML operation.
Returns:
A new grid spacing with SC-PML applied.
"""
# Make a copy of `dxes`. We write this out so that we have an array of
# array of numpy arrays.
dxes = [
[np.array(dxes[grid_num][i]) for i in range(3)] for grid_num in range(2)
]
if not pml_layers:
return dxes
if isinstance(pml_layers, int):
pml_layers = [pml_layers] * 6
for pml, (axis, polarity) in zip(pml_layers,
itertools.product(range(3), [1, -1])):
if pml > 0:
dxes = stretch_with_scpml(
dxes, omega=omega, axis=axis, polarity=polarity, thickness=pml)
return dxes
|
stanfordnqp/spins-b
|
spins/fdfd_tools/grid.py
|
Python
|
gpl-3.0
| 11,145
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.