text stringlengths 4 1.02M | meta dict |
|---|---|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from PIL import Image
from rabbitgw.nn import WGAN_TF,WGAN_GP_TF
from rabbitgw.util.net import mlp_tf
from rabbitgw.util.process import inverse_standardlize,standardlize
Mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
X,_ = Mnist.train.next_batch(50000)
X = standardlize(X,0,1)
mlp_d = mlp_tf(28*28,256,1)
mlp_g = mlp_tf(100,256,28*28,output_act_fun=tf.nn.sigmoid)
wgan = WGAN_GP_TF(x_size=28*28,z_size=100,net_G=mlp_g,net_D=mlp_d)
def my_callback(context):
ep = context.get("ep")
predict = context.get("predict")
image = predict(1)
image = inverse_standardlize(image,0,1).reshape([28,28])
image = (image * 255).astype("uint8")
Image.fromarray(image).save("image/ep%d.jpg"%(ep))
wgan.open_session()
wgan.fit(X,epoch=500,callbacks=[my_callback])
wgan.save_model("model/gan.pkct")
wgan.close()
| {
"content_hash": "a910b6cfa3f4680a24d4652ce805a0b8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 67,
"avg_line_length": 35.30769230769231,
"alnum_prop": 0.7189542483660131,
"repo_name": "MashiMaroLjc/rabbitgw",
"id": "f41c411545bc4a7f62ad82ccfbe5ed7fe3cc1292",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19419"
}
],
"symlink_target": ""
} |
"""A collection of analytical results which return the exact solution to a
trial problem to benchmark particle movers
Includes:
* Motion in a constant electric field
* Motion in a constant magnetic field
* Crossed motion in perpendicular electric and magnetic fields
"""
__author__ = 'swebb'
import numpy as np
import scipy.linalg as linalg
import scipy.constants as consts
class PtclTests:
def __init__(self):
"""does nothing"""
def constnonrelmagfield(self, x0, v0, q, m, B, t):
"""
Compute the motion of a charged particle with initial x0 and v0
through a constant magnetic field
:param x0: initial position
:param v0: initial velocity
:param q: charge
:param m: mass
:param B: magnetic field
:param t: time to propagate
:return: x, v after propagating for time t
"""
# Normalize the magnetic field
tau = q*B/(m)
# Solve for the velocity using matrix exponentiation
bMatrix = np.matrix([[0., tau[2], -1.*tau[1]],
[-1.*tau[2], 0., tau[0]],
[tau[1], -1.*tau[0], 0.]])
print 'BB = ', bMatrix
greenFunc = linalg.expm(bMatrix*t)
print 'greenFunc = ', greenFunc
vfinal = greenFunc*np.matrix(v0).T
# Compute x using some matrix identities
xfinal = x0 + bMatrix.I*(greenFunc - np.identity(3))*np.matrix(v0).T
# get shapes right
vout = np.zeros(3)
xout = np.zeros(3)
vout[0] = vfinal[0,0]
vout[1] = vfinal[1,0]
vout[2] = vfinal[2,0]
xout[0] = xfinal[0,0]
xout[1] = xfinal[1,0]
xout[2] = xfinal[2,0]
return xout, vout
def constrelmagfield(self, x0, u0, q, m, B, t):
"""
Compute the relativistic motion of a charged particle with initial x0
and u0 through a constant magnetic field
:param x0: initial position
:param v0: initial velocity beta*gamma
:param q: charge
:param m: mass
:param B: magnetic field
:param t: time to propagate
:return: x, v after propagating for time t
"""
# Properly normalize the vectors
gamma = np.sqrt(np.dot(u0, u0)/consts.c**2 + 1)
v0 = u0/gamma
tau = q * B/(m * gamma)
# Solve for the velocity using matrix exponentiation
bMatrix = np.matrix([[0., tau[2], -1.*tau[1]],
[-1.*tau[2], 0., tau[0]],
[tau[1], -1.*tau[0], 0.]])
greenFunc = linalg.expm(bMatrix*t)
vfinal = greenFunc*np.matrix(v0).T
# Compute x using some matrix identities
xfinal = x0 + bMatrix.I*(greenFunc - np.identity(3))*np.matrix(v0).T
# get shapes right
vout = np.zeros(3)
xout = np.zeros(3)
vout[0] = vfinal[0,0]
vout[1] = vfinal[1,0]
vout[2] = vfinal[2,0]
xout[0] = xfinal[0,0]
xout[1] = xfinal[1,0]
xout[2] = xfinal[2,0]
return xout, vout | {
"content_hash": "d808758a5622ffbd775cb28495790db7",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 77,
"avg_line_length": 29.19626168224299,
"alnum_prop": 0.545774647887324,
"repo_name": "radiasoft/radtrack",
"id": "d65019b6d41f05ffe61ed8122b9aa1056a0a167b",
"size": "3124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experimental/ode/PtclTests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "681"
},
{
"name": "Erlang",
"bytes": "2396"
},
{
"name": "GLSL",
"bytes": "794"
},
{
"name": "Jupyter Notebook",
"bytes": "39421"
},
{
"name": "Makefile",
"bytes": "1128"
},
{
"name": "Python",
"bytes": "1092344"
},
{
"name": "Shell",
"bytes": "35016"
},
{
"name": "Tcl",
"bytes": "55753"
}
],
"symlink_target": ""
} |
from collections import namedtuple
CacheEntry = namedtuple('CacheEntry', 'model_plural class_name cache_type')
MappingEntry = namedtuple('MappingEntry', 'class_name attr polymorph')
def resource(model_plural, class_name, cache_type='memcache'):
return CacheEntry(model_plural, class_name, cache_type)
def mapping(class_name, attr, polymorph=False):
return MappingEntry(class_name, attr, polymorph)
def all_cache_entries():
ret = [
resource('access_groups', 'AccessGroup'),
resource('audits', 'Audit'),
resource('custom_attribute_values', 'CustomAttributeValue'),
resource('categorizations', 'Categorization'),
resource('category_bases', 'CategoryBase'),
resource('comments', 'Comment'),
resource('control_categories', 'ControlCategory'),
resource('control_assertions', 'ControlAssertion'),
resource('contexts', 'Context'),
resource('controls', 'Control'),
resource('assessments', 'Assessments'),
resource('assessment_templates', 'AssessmentTemplate'),
resource('data_assets', 'DataAsset'),
resource('directives', 'Directive'),
resource('contracts', 'Contract'),
resource('policies', 'Policy'),
resource('regulations', 'Regulation'),
resource('standards', 'Standard'),
resource('documents', 'Document'),
resource('events', 'Event'),
resource('facilities', 'Facility'),
resource('helps', 'Help'),
resource('markets', 'Market'),
resource('meetings', 'Meeting'),
resource('object_documents', 'ObjectDocument'),
resource('object_owners', 'ObjectOwner'),
resource('object_people', 'ObjectPerson'),
resource('objectives', 'Objective'),
resource('options', 'Option'),
resource('org_groups', 'OrgGroup'),
resource('vendors', 'Vendor'),
resource('people', 'Person'),
resource('products', 'Product'),
resource('projects', 'Project'),
resource('programs', 'Program'),
resource('relationships', 'Relationship'),
resource('requests', 'Request'),
resource('revisions', 'Revision'),
resource('sections', 'Section'),
resource('clauses', 'Clause'),
resource('systems_or_processes', 'SystemOrProcess'),
resource('systems', 'System'),
resource('processes', 'Process'),
resource('issues', 'Issue'),
# ggrc notification models
resource('notification_configs', 'NotificationConfig'),
resource('notifications', 'Notification'),
resource('notification_type', 'NotificationType'),
# ggrc custom attribuess
resource('custom_attribute_definitions', 'CustomAttributeDefinition'),
resource('custom_attribute_values', 'CustomAttributeValue'),
# FIXME: Extension-defined models should be registered
# from the extensions.
# ggrc_basic_permissions models
resource('roles', 'Role'),
resource('user_roles', 'UserRole'),
# ggrc_gdrive_integration models
resource('object_folders', 'ObjectFolder'),
resource('object_files', 'ObjectFile'),
resource('object_events', 'ObjectEvent'),
# ggrc_risk_assessments models
resource('templates', 'Template'),
resource('risk_assessments', 'RiskAssessment'),
resource('risk_assessment_mappings', 'RiskAssessmentMapping'),
resource('risk_assessment_control_mappings',
'RiskAssessmentControlMapping'),
resource('threats', 'Threat'),
resource('vulnerabilities', 'Vulnerability'),
# ggrc_workflows models
resource('cycle_task_entries', 'CycleTaskEntry'),
resource('cycle_task_group_object_tasks', 'CycleTaskGroupObjectTask'),
resource('cycle_task_groups', 'CycleTaskGroup'),
resource('cycles', 'Cycle'),
resource('task_group_objects', 'TaskGroupObject'),
resource('task_group_tasks', 'TaskGroupTask'),
resource('task_groups', 'TaskGroup'),
resource('workflow_people', 'WorkflowPerson'),
resource('workflows', 'Workflow'),
]
return ret
def all_mapping_entries():
ret = [
mapping('Audit', 'requests'),
mapping('Audit', 'program'),
mapping('Request', 'audit'),
mapping('CustomAttributeValue', 'attributable', True),
mapping('Request', 'responses'),
mapping('ObjectDocument', 'document'),
mapping('ObjectDocument', 'documentable', True),
mapping('ObjectOwner', 'person'),
mapping('ObjectOwner', 'ownable', True),
mapping('ObjectPerson', 'person'),
mapping('ObjectPerson', 'personable', True),
mapping('Section', 'directive'), # this goes out?
mapping('Relationship', 'source', True),
mapping('Relationship', 'destination', True),
mapping('UserRole', 'context'),
mapping('UserRole', 'person'),
mapping('UserRole', 'role'),
mapping('ObjectEvent', 'eventable', True),
mapping('ObjectFolder', 'folderable', True),
mapping('ObjectFile', 'fileable', True),
mapping('Notification', 'recipients'),
mapping('Notification', 'notification_object'),
# ggrc_workflows mappings:
mapping('TaskGroupObject', 'object', True),
mapping('TaskGroupObject', 'task_group'),
mapping('TaskGroupTask', 'task_group'),
mapping('TaskGroup', 'workflow'),
mapping('WorkflowPerson', 'context'),
mapping('WorkflowPerson', 'person'),
mapping('WorkflowPerson', 'workflow'),
mapping('Cycle', 'workflow'),
mapping('Cycle', 'cycle_task_groups'),
mapping('CycleTaskGroup', 'cycle'),
mapping('CycleTaskGroup', 'task_group'),
mapping('CycleTaskGroupObjectTask', 'cycle'),
mapping('CycleTaskGroupObjectTask', 'cycle_task_entries'),
mapping('CycleTaskGroupObjectTask', 'task_group_task'),
mapping('CycleTaskGroupObjectTask', 'cycle_task_objects_for_cache'),
mapping('CycleTaskEntry', 'cycle'),
mapping('CycleTaskEntry', 'cycle_task_group_object_task'),
# mapping('RiskAssessmentMapping'),
# mapping('RiskAssessmentControlMapping'),
]
return ret
class Cache:
name = None
supported_resources = {}
def __init__(self):
pass
def get_name(self):
return None
def get(self, *_):
return None
def add(self, *_):
return None
def update(self, *_):
return None
def remove(self, *_):
return None
def get_multi(self, *_):
return None
def add_multi(self, *_):
return None
def update_multi(self, *_):
return None
def remove_multi(self, *_):
return None
def clean(self):
return False
def get_key(self, category, resource):
cache_key = category + ":" + resource
return cache_key
def parse_filter(self, filter):
return filter.get('ids'), filter.get('attrs')
def is_caching_supported(self, category, resource):
if category is 'collection':
return resource in self.supported_resources
else:
return False
| {
"content_hash": "5ecebe6a5716ffe36e6f0435ab6343d0",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 76,
"avg_line_length": 33.94607843137255,
"alnum_prop": 0.6453429602888087,
"repo_name": "kr41/ggrc-core",
"id": "494310d829f9a057e8e290c8e392ed7ddf37fab6",
"size": "7039",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "src/ggrc/cache/cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "191076"
},
{
"name": "Cucumber",
"bytes": "136322"
},
{
"name": "HTML",
"bytes": "1079513"
},
{
"name": "JavaScript",
"bytes": "1718280"
},
{
"name": "Makefile",
"bytes": "7103"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2389878"
},
{
"name": "Shell",
"bytes": "30802"
}
],
"symlink_target": ""
} |
import sys
import os
import django
django.setup()
from django.core.files import File
from recipes.models import Lesson
dir = sys.argv[1]
exts = set()
for fn in os.listdir(dir):
if 'groenten' in fn: continue
base, ext = os.path.splitext(fn)
exts.add(ext)
if ext not in ('.odt', '.doc'):
continue
base = base.strip()
if Lesson.objects.filter(filename=base).exists():
continue
print(base, ext)
with open(os.path.join(dir, fn), 'rb') as doc_file:
l = Lesson.objects.create(filename=base, docfile=File(doc_file))
| {
"content_hash": "3a2d9d93d84c3cea344415a4a89bbe14",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 25.727272727272727,
"alnum_prop": 0.6537102473498233,
"repo_name": "vanatteveldt/luctor",
"id": "505f0f4fe2d2bee7f1b36346beb91fb5e0b77fb6",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0_addfiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "39651"
},
{
"name": "Python",
"bytes": "89870"
}
],
"symlink_target": ""
} |
import sys
if sys.version_info < (3, 7):
from ._zsrc import ZsrcValidator
from ._zmin import ZminValidator
from ._zmid import ZmidValidator
from ._zmax import ZmaxValidator
from ._zauto import ZautoValidator
from ._z import ZValidator
from ._ytype import YtypeValidator
from ._ysrc import YsrcValidator
from ._yaxis import YaxisValidator
from ._y0 import Y0Validator
from ._y import YValidator
from ._xtype import XtypeValidator
from ._xsrc import XsrcValidator
from ._xaxis import XaxisValidator
from ._x0 import X0Validator
from ._x import XValidator
from ._visible import VisibleValidator
from ._uirevision import UirevisionValidator
from ._uid import UidValidator
from ._transpose import TransposeValidator
from ._textsrc import TextsrcValidator
from ._text import TextValidator
from ._stream import StreamValidator
from ._showscale import ShowscaleValidator
from ._reversescale import ReversescaleValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._dy import DyValidator
from ._dx import DxValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._colorscale import ColorscaleValidator
from ._colorbar import ColorbarValidator
from ._coloraxis import ColoraxisValidator
from ._autocolorscale import AutocolorscaleValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._zsrc.ZsrcValidator",
"._zmin.ZminValidator",
"._zmid.ZmidValidator",
"._zmax.ZmaxValidator",
"._zauto.ZautoValidator",
"._z.ZValidator",
"._ytype.YtypeValidator",
"._ysrc.YsrcValidator",
"._yaxis.YaxisValidator",
"._y0.Y0Validator",
"._y.YValidator",
"._xtype.XtypeValidator",
"._xsrc.XsrcValidator",
"._xaxis.XaxisValidator",
"._x0.X0Validator",
"._x.XValidator",
"._visible.VisibleValidator",
"._uirevision.UirevisionValidator",
"._uid.UidValidator",
"._transpose.TransposeValidator",
"._textsrc.TextsrcValidator",
"._text.TextValidator",
"._stream.StreamValidator",
"._showscale.ShowscaleValidator",
"._reversescale.ReversescaleValidator",
"._opacity.OpacityValidator",
"._name.NameValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._idssrc.IdssrcValidator",
"._ids.IdsValidator",
"._hoverlabel.HoverlabelValidator",
"._hoverinfosrc.HoverinfosrcValidator",
"._hoverinfo.HoverinfoValidator",
"._dy.DyValidator",
"._dx.DxValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
"._colorscale.ColorscaleValidator",
"._colorbar.ColorbarValidator",
"._coloraxis.ColoraxisValidator",
"._autocolorscale.AutocolorscaleValidator",
],
)
| {
"content_hash": "5cf9dbacf764ea4d3e7d756d74aa885d",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 56,
"avg_line_length": 37.71875,
"alnum_prop": 0.6335266500966584,
"repo_name": "plotly/python-api",
"id": "47c7037349a6601adef3873072fff6d2bbe2a505",
"size": "3621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/heatmapgl/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from oslo_policy import policy
from monasca_api import policies
CONF = cfg.CONF
DEFAULT_AUTHORIZED_ROLES = policies.roles_list_to_check_str(
cfg.CONF.security.default_authorized_roles)
READ_ONLY_AUTHORIZED_ROLES = policies.roles_list_to_check_str(
cfg.CONF.security.read_only_authorized_roles)
AGENT_AUTHORIZED_ROLES = policies.roles_list_to_check_str(cfg.CONF.security.agent_authorized_roles)
rules = [
policy.DocumentedRuleDefault(
name='api:metrics:get',
check_str=DEFAULT_AUTHORIZED_ROLES + ' or ' + READ_ONLY_AUTHORIZED_ROLES,
description='List metrics, measurements, metric statistics or metric names.',
operations=[
{'path': '/v2.0/metrics', 'method': 'GET'},
{'path': '/v2.0/metrics/measurements', 'method': 'GET'},
{'path': '/v2.0/metrics/statistics', 'method': 'GET'},
{'path': '/v2.0/metrics/names', 'method': 'GET'}
]
),
policy.DocumentedRuleDefault(
name='api:metrics:post',
check_str=DEFAULT_AUTHORIZED_ROLES + ' or ' + AGENT_AUTHORIZED_ROLES,
description='Create metrics.',
operations=[
{'path': '/v2.0/metrics', 'method': 'POST'}
]
),
policy.DocumentedRuleDefault(
name='api:metrics:dimension:values',
check_str=DEFAULT_AUTHORIZED_ROLES + ' or ' + READ_ONLY_AUTHORIZED_ROLES,
description='List dimension values.',
operations=[
{'path': '/v2.0/metrics/dimensions/names/values', 'method': 'GET'}
]
),
policy.DocumentedRuleDefault(
name='api:metrics:dimension:names',
check_str=DEFAULT_AUTHORIZED_ROLES + ' or ' + READ_ONLY_AUTHORIZED_ROLES,
description='List dimension names.',
operations=[
{'path': '/v2.0/metrics/dimensions/names', 'method': 'GET'}
]
),
]
def list_rules():
return rules
| {
"content_hash": "b06e6575b98b284cec2c6c6025911099",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 99,
"avg_line_length": 36.301886792452834,
"alnum_prop": 0.6237006237006237,
"repo_name": "openstack/monasca-api",
"id": "cf2614e230cefc46c483a5e46a2f362cf13edd4e",
"size": "2496",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monasca_api/policies/metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2638"
},
{
"name": "Java",
"bytes": "883947"
},
{
"name": "Jinja",
"bytes": "32747"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "936668"
},
{
"name": "Shell",
"bytes": "129514"
}
],
"symlink_target": ""
} |
from flask import Flask, jsonify
# import request
app = Flask(__name__)
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
@app.route('/tasks/optimizer', methods=['POST'])
def get_tasks():
# content = request.get_json(silent=True)
return jsonify({'tasks': tasks})
if __name__ == '__main__':
app.run(debug=True, port=23334) | {
"content_hash": "0e175dc6778029fa298e834973eb0387",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 22.59259259259259,
"alnum_prop": 0.5508196721311476,
"repo_name": "VisGroup/streaming-storyline",
"id": "e83c29c29b2767402e67ea82aaea4425fc105518",
"size": "629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "optimizer/test_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "41"
},
{
"name": "C++",
"bytes": "28730"
},
{
"name": "CSS",
"bytes": "2824"
},
{
"name": "HTML",
"bytes": "5342"
},
{
"name": "JavaScript",
"bytes": "43133"
},
{
"name": "Python",
"bytes": "8627"
},
{
"name": "Shell",
"bytes": "191"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from sentry.models import Integration
from sentry.testutils import AcceptanceTestCase
class OrganizationIntegrationSettingsTest(AcceptanceTestCase):
def setUp(self):
super(OrganizationIntegrationSettingsTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.org = self.create_organization(
name='Rowdy Tiger',
owner=None,
)
self.team = self.create_team(organization=self.org, name='Mariachi Band')
self.project = self.create_project(
organization=self.org,
teams=[self.team],
name='Bengal',
)
self.create_member(
user=self.user,
organization=self.org,
role='owner',
teams=[self.team],
)
self.model = Integration.objects.create(
provider='slack',
external_id='some_slack',
name='Test Slack',
metadata={
'domain_name': 'slack-test.slack.com',
},
)
self.org_integration = self.model.add_organization(self.org.id)
self.login_as(self.user)
def test_all_integrations_list(self):
path = '/settings/{}/integrations/'.format(self.org.slug)
self.browser.get(path)
self.browser.wait_until_not('.loading-indicator')
self.browser.snapshot('organization settings - all integrations')
| {
"content_hash": "1f256899d309957d296372dfeb929507",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 81,
"avg_line_length": 33.20454545454545,
"alnum_prop": 0.5975359342915811,
"repo_name": "ifduyue/sentry",
"id": "1f7bea09832a59780d41abbbb467afebed510f4f",
"size": "1461",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/acceptance/test_organization_integrations_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "301292"
},
{
"name": "HTML",
"bytes": "241298"
},
{
"name": "JavaScript",
"bytes": "3295572"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "6892"
},
{
"name": "Python",
"bytes": "36910084"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
from django.test import TestCase
from mock import patch
import ntpath
import os
import posixpath
import sys
from .dummydata import windows_data, osx_data, linux_data
from ..utils.filesystem import enumerate_mounted_disk_partitions, EXPORT_FOLDER_NAME
def _get_mocked_popen(cmd_resp):
class MockedPopen(object):
def __init__(self, cmd, *args, **kwargs):
if cmd not in cmd_resp:
raise Exception("subprocess.Popen called for an unmocked command '{}'!".format(cmd))
self.response = cmd_resp[cmd]
def communicate(self): # to handle subprocess.Popen().communicate()
return self.response.encode(), None
def read(self): # to handle os.popen().read()
return self.response
return MockedPopen
def _get_mocked_disk_usage(disk_sizes):
def mock_disk_usage(path):
if path not in disk_sizes:
raise Exception("Disk usage not mocked for path '{}'!".format(path))
sizes = disk_sizes[path]
class MockDiskSizes(object):
f_bsize = 2
f_blocks = sizes["total"] / 2
f_bavail = sizes["free"] / 2
total = sizes["total"]
free = sizes["free"]
used = sizes["used"]
return MockDiskSizes()
return mock_disk_usage
class patch_popen(object):
def __init__(self, cmd_resp):
self.mocked_popen = _get_mocked_popen(cmd_resp)
def __call__(self, f):
f = patch("subprocess.Popen", self.mocked_popen)(f)
f = patch("os.popen", self.mocked_popen)(f)
return f
class patch_disk_usage(object):
def __init__(self, disk_sizes):
self.mocked_disk_usage = _get_mocked_disk_usage(disk_sizes)
def __call__(self, f):
if sys.version_info >= (3, 3):
return patch("shutil.disk_usage", self.mocked_disk_usage)(f)
else:
return patch("os.statvfs", self.mocked_disk_usage)(f)
def patch_os_access(readable, writable):
def wrapper(f):
def check_os_access(path, flag):
if flag == os.R_OK:
lookup = readable
elif flag == os.W_OK:
lookup = writable
if path not in lookup:
raise Exception("os.access() called for an unmocked path '{}'!".format(path))
return lookup[path]
return patch("os.access", check_os_access)(f)
return wrapper
def patch_os_path_exists_for_kolibri_folder(folder_lookup):
def wrapper(f):
def check_os_path_exists(path):
if not path.endswith(EXPORT_FOLDER_NAME):
raise Exception("Checking os.path.exists only mocked for kolibri data folder paths.")
base_path = os.path.realpath(os.path.join(path, ".."))
if base_path not in folder_lookup:
raise Exception("os.path.exists() called for an unmocked path '{}'!".format(path))
return folder_lookup[base_path]
return patch("os.path.exists", check_os_path_exists)(f)
return wrapper
class WindowsFilesystemTestCase(TestCase):
"""
Test retrieval and parsing of disk info for Windows, using mocked command output.
"""
@patch_popen(windows_data.popen_responses)
@patch_os_access(windows_data.os_access_read, windows_data.os_access_write)
@patch_os_path_exists_for_kolibri_folder(windows_data.has_kolibri_data_folder)
@patch("sys.platform", "win32")
@patch("os.path", ntpath)
def setUp(self):
self.drives = enumerate_mounted_disk_partitions()
def test_drive_list_members(self):
self.assertSetEqual(set(drive.path for drive in self.drives.values()), set(["C:\\", "D:\\"]))
def test_drive_writability(self):
self.assertTrue(self.drives["C:\\"].writable)
self.assertFalse(self.drives["D:\\"].writable)
def test_drive_data_folders(self):
self.assertEqual(self.drives["C:\\"].datafolder, None)
self.assertEqual(self.drives["D:\\"].datafolder, "D:\\" + EXPORT_FOLDER_NAME)
def test_drive_space(self):
self.assertEqual(self.drives["C:\\"].freespace, 132940218368)
self.assertEqual(self.drives["C:\\"].totalspace, 136251727872)
self.assertEqual(self.drives["D:\\"].freespace, 0)
self.assertEqual(self.drives["D:\\"].totalspace, 58388480)
def test_drive_names(self):
self.assertEqual(self.drives["C:\\"].name, 'Local Fixed Disk')
self.assertEqual(self.drives["D:\\"].name, 'VBOXADDITIONS_4.')
class LinuxFilesystemTestCase(TestCase):
"""
Test retrieval and parsing of disk info for Linux, using mocked command output.
"""
@patch_popen(linux_data.popen_responses)
@patch_os_access(linux_data.os_access_read, linux_data.os_access_write)
@patch_os_path_exists_for_kolibri_folder(linux_data.has_kolibri_data_folder)
@patch_disk_usage(linux_data.disk_sizes)
@patch("sys.platform", "linux2")
@patch("os.path", posixpath)
def setUp(self):
self.drives = enumerate_mounted_disk_partitions()
def test_drive_list_members(self):
self.assertSetEqual(set(drive.path for drive in self.drives.values()), set(['/media/user/F571-7814', '/', '/media/user/disk']))
def test_drive_writability(self):
self.assertTrue(self.drives["/"].writable)
self.assertTrue(self.drives["/media/user/F571-7814"].writable)
self.assertFalse(self.drives["/media/user/disk"].writable)
def test_drive_data_folders(self):
self.assertEqual(self.drives["/"].datafolder, None)
self.assertEqual(self.drives["/media/user/F571-7814"].datafolder, "/media/user/F571-7814/" + EXPORT_FOLDER_NAME)
self.assertEqual(self.drives["/media/user/disk"].datafolder, None)
def test_drive_space(self):
self.assertEqual(self.drives["/media/user/F571-7814"].freespace, 772001792)
self.assertEqual(self.drives["/media/user/F571-7814"].totalspace, 2142232576)
self.assertEqual(self.drives["/"].freespace, 12704473088)
self.assertEqual(self.drives["/"].totalspace, 117579513856)
self.assertEqual(self.drives["/media/user/disk"].freespace, 11328000)
self.assertEqual(self.drives["/media/user/disk"].totalspace, 31801344)
class OSXFilesystemTestCase(TestCase):
"""
Test retrieval and parsing of disk info for OSX, using mocked command output.
"""
@patch_popen(osx_data.popen_responses)
@patch_os_access(osx_data.os_access_read, osx_data.os_access_write)
@patch_os_path_exists_for_kolibri_folder(osx_data.has_kolibri_data_folder)
@patch_disk_usage(osx_data.disk_sizes)
@patch("sys.platform", "darwin")
@patch("os.path", posixpath)
def setUp(self):
self.drives = enumerate_mounted_disk_partitions()
def test_drive_list_members(self):
self.assertSetEqual(set(drive.path for drive in self.drives.values()), set(['/Volumes/HP v125w', '/']))
def test_drive_writability(self):
self.assertFalse(self.drives["/"].writable)
self.assertTrue(self.drives["/Volumes/HP v125w"].writable)
def test_drive_data_folders(self):
self.assertEqual(self.drives["/"].datafolder, None)
self.assertEqual(self.drives["/Volumes/HP v125w"].datafolder, "/Volumes/HP v125w/" + EXPORT_FOLDER_NAME)
def test_drive_space(self):
self.assertEqual(self.drives["/Volumes/HP v125w"].freespace, 1234)
self.assertEqual(self.drives["/Volumes/HP v125w"].totalspace, 45678)
self.assertEqual(self.drives["/"].freespace, 0)
self.assertEqual(self.drives["/"].totalspace, 1000)
def test_drive_names(self):
self.assertEqual(self.drives["/Volumes/HP v125w"].name, 'Untitled 1')
self.assertEqual(self.drives["/"].name, 'Macintosh HD')
| {
"content_hash": "baaa24d56cdfa8602a257fd014e142d8",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 135,
"avg_line_length": 35.98623853211009,
"alnum_prop": 0.6455066921606119,
"repo_name": "66eli77/kolibri",
"id": "f47e4a0d70eab6085e28a828af70fd0ac8e65912",
"size": "7845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kolibri/core/discovery/test/test_filesystem_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5193"
},
{
"name": "HTML",
"bytes": "2741"
},
{
"name": "JavaScript",
"bytes": "185967"
},
{
"name": "Makefile",
"bytes": "2607"
},
{
"name": "Python",
"bytes": "451615"
},
{
"name": "Shell",
"bytes": "6705"
},
{
"name": "Vue",
"bytes": "132704"
}
],
"symlink_target": ""
} |
"""server.py: Handle application's run command and its parameters."""
import app
import config
import session
def run(**kwargs):
info = dict(
server=config.SERVER_WSGI,
host=config.SERVER_HOST,
port=config.SERVER_PORT,
debug=config.SERVER_DEBUG,
reloader=config.SERVER_RELOADER,
interval=config.SERVER_INTERVAL,
quiet=config.SERVER_QUIET
)
info.update(kwargs)
app.run(app=session.session, **info)
| {
"content_hash": "aa01e1cb3eb33aa1435ce0d545adc009",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 69,
"avg_line_length": 24.842105263157894,
"alnum_prop": 0.6588983050847458,
"repo_name": "ejelome/archaic",
"id": "0e283df1681a09f3533cece96ef1d1d6d9abb363",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/wooji/wooji/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "185688"
},
{
"name": "Clojure",
"bytes": "211200"
},
{
"name": "HTML",
"bytes": "1187920"
},
{
"name": "Java",
"bytes": "15437"
},
{
"name": "JavaScript",
"bytes": "162821"
},
{
"name": "Mako",
"bytes": "44985"
},
{
"name": "Objective-C",
"bytes": "48492"
},
{
"name": "Python",
"bytes": "165209"
},
{
"name": "Shell",
"bytes": "10132"
},
{
"name": "Smarty",
"bytes": "22996"
}
],
"symlink_target": ""
} |
from contextlib import contextmanager
from traits.has_traits import HasTraits
from traits.trait_types import Instance, Bool, Str, Int
import unicodedata
from traits.traits import Property
from traitsui.message import error
from traitsui.ui import UI
from pyanno.database import PyannoDatabase
from pyanno.modelBt import ModelBt
from pyanno.ui.database_view import DatabaseView
from pyanno.ui.model_data_view import ModelDataView
import os
import errno
import os.path
import pyanno
import logging
from pyanno.util import PyannoValueError
logger = logging.getLogger(__name__)
PYANNO_PATH_NAME = '.pyanno'
DATABASE_FILENAME = 'pyanno_results.db'
class PyannoApplication(HasTraits):
database = Instance(PyannoDatabase)
main_window = Instance(ModelDataView)
database_window = Instance(DatabaseView)
database_ui = Instance(UI)
logging_level = Int(logging.INFO)
pyanno_pathname = Str
db_window_open = Property(Bool)
def _get_db_window_open(self):
return (self.database_ui is not None
and self.database_ui.control is not None)
def open(self):
self._start_logging()
self._open_pyanno_database()
self._open_main_window()
def close(self):
self.database.close()
logger.info('Closing pyAnno -- Goodbye!')
def _start_logging(self):
logging.basicConfig(level=self.logging_level)
logger.info('Starting pyAnno')
def _create_pyanno_directory(self):
"""Create a pyanno directort in the user's home if it is missing."""
home_dir = os.getenv('HOME') or os.getenv('HOMEPATH')
logger.debug('Found home directory at ' + str(home_dir))
self.pyanno_pathname = os.path.join(home_dir, PYANNO_PATH_NAME)
try:
logger.debug('Creating pyAnno directory at ' + self.pyanno_pathname)
os.makedirs(self.pyanno_pathname)
except OSError as e:
logger.debug('pyAnno directory already existing')
if e.errno != errno.EEXIST:
raise
def _open_pyanno_database(self):
# database filename
self._create_pyanno_directory()
db_filename = os.path.join(self.pyanno_pathname,
DATABASE_FILENAME)
self.database = PyannoDatabase(db_filename)
def _open_main_window(self):
self.main_window = ModelDataView(application=self)
model = ModelBt.create_initial_state(5, 8)
self.main_window.set_model(model=model)
self.main_window.configure_traits()
def open_database_window(self):
if self.db_window_open:
# windows exists, raise
self.database_ui.control.Raise()
else:
# window was closed or not existent
logger.debug('Open database window')
database_window = DatabaseView(database=self.database,
application=self)
database_ui = database_window.edit_traits(kind='live')
self.database_window = database_window
self.database_ui = database_ui
def close_database_window(self):
# wx specific
self.database_ui.control.Close()
def update_window_from_database_record(self, record):
"""Update main window from pyanno database record.
"""
self.main_window.set_from_database_record(record)
def add_current_state_to_database(self):
mdv = self.main_window
# file name may contain unicode characters
data_id = mdv.annotations_view.annotations_container.name
if data_id is '':
data_id = 'anonymous_annotations'
elif type(data_id) is unicode:
u_data_id = unicodedata.normalize('NFKD', data_id)
data_id = u_data_id.encode('ascii','ignore')
try:
self.database.store_result(
data_id,
mdv.annotations_view.annotations_container,
mdv.model,
mdv.log_likelihood
)
except PyannoValueError as e:
logger.info(e)
errmsg = e.args[0]
error('Error: ' + errmsg)
if self.db_window_open:
self.database_window.db_updated = True
def _create_debug_database(self):
"""Create and populate a test database in a temporary file.
"""
from tempfile import mktemp
from pyanno.modelA import ModelA
from pyanno.modelB import ModelB
from pyanno.annotations import AnnotationsContainer
# database filename
tmp_filename = mktemp(prefix='tmp_pyanno_db_')
db = PyannoDatabase(tmp_filename)
def _create_new_entry(model, annotations, id):
value = model.log_likelihood(annotations)
ac = AnnotationsContainer.from_array(annotations, name=id)
db.store_result(id, ac, model, value)
# populate database
model = ModelA.create_initial_state(5)
annotations = model.generate_annotations(100)
_create_new_entry(model, annotations, 'test_id')
modelb = ModelB.create_initial_state(5, 8)
_create_new_entry(modelb, annotations, 'test_id')
annotations = model.generate_annotations(100)
_create_new_entry(modelb, annotations, 'test_id2')
self.database = db
@contextmanager
def pyanno_application(**traits):
app = PyannoApplication(**traits)
yield app
app.close()
def main():
""" Entry point for standalone testing/debugging. """
app = PyannoApplication()
app._create_debug_database()
app._open_main_window()
#app.open_database_window()
app.close()
return app
if __name__ == '__main__':
app = main()
| {
"content_hash": "0841830eb30ad6a91133af1c655cd3fa",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 80,
"avg_line_length": 28.480198019801982,
"alnum_prop": 0.629236919867895,
"repo_name": "enthought/uchicago-pyanno",
"id": "7b0bca45264a1166b664863c9c9ad5788cb9ee35",
"size": "5883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyanno/ui/pyanno_ui_application.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "401644"
}
],
"symlink_target": ""
} |
"""This module implements DataFutures.
We have two basic types of futures:
1. DataFutures which represent data objects
2. AppFutures which represent the futures on App/Leaf tasks.
"""
import os
import logging
from concurrent.futures import Future
from parsl.dataflow.futures import AppFuture
from parsl.app.errors import *
from parsl.data_provider.files import File
logger = logging.getLogger(__name__)
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
class DataFuture(Future):
"""A datafuture points at an AppFuture.
We are simply wrapping a AppFuture, and adding the specific case where, if
the future is resolved i.e file exists, then the DataFuture is assumed to be
resolved.
"""
def parent_callback(self, parent_fu):
"""Callback from executor future to update the parent.
Args:
- parent_fu (Future): Future returned by the executor along with callback
Returns:
- None
Updates the super() with the result() or exception()
"""
if parent_fu.done() is True:
e = parent_fu._exception
if e:
super().set_exception(e)
else:
super().set_result(parent_fu.result())
return
def __init__(self, fut, file_obj, parent=None, tid=None):
"""Construct the DataFuture object.
If the file_obj is a string convert to a File.
Args:
- fut (AppFuture) : AppFuture that this DataFuture will track
- file_obj (string/File obj) : Something representing file(s)
Kwargs:
- parent ()
- tid (task_id) : Task id that this DataFuture tracks
"""
super().__init__()
self._tid = tid
if isinstance(file_obj, str) and not isinstance(file_obj, File):
self.file_obj = File(file_obj)
else:
self.file_obj = file_obj
self.parent = parent
self._exception = None
if fut is None:
logger.debug("Setting result to filepath since no future was passed")
self.set_result = self.file_obj
else:
if isinstance(fut, Future):
self.parent = fut
self.parent.add_done_callback(self.parent_callback)
else:
raise NotFutureError("DataFuture can be created only with a FunctionFuture on None")
logger.debug("Creating DataFuture with parent: %s", parent)
logger.debug("Filepath: %s", self.filepath)
@property
def tid(self):
"""Returns the task_id of the task that will resolve this DataFuture."""
return self._tid
@property
def filepath(self):
"""Filepath of the File object this datafuture represents."""
return self.file_obj.filepath
@property
def filename(self):
"""Filepath of the File object this datafuture represents."""
return self.filepath
def result(self, timeout=None):
"""A blocking call that returns either the result or raises an exception.
Assumptions : A DataFuture always has a parent AppFuture. The AppFuture does callbacks when
setup.
Kwargs:
- timeout (int): Timeout in seconds
Returns:
- If App completed successfully returns the filepath.
Raises:
- Exception raised by app if failed.
"""
if self.parent:
if self.parent.done():
# This explicit call to raise exceptions might be redundant.
# the result() call *should* raise an exception if there's one
e = self.parent._exception
if e:
raise e
else:
self.parent.result(timeout=timeout)
else:
self.parent.result(timeout=timeout)
return self.file_obj
def cancel(self):
"""Cancel the task that this DataFuture is tracking.
Note: This may not work
"""
if self.parent:
return self.parent.cancel
else:
return False
def cancelled(self):
if self.parent:
return self.parent.cancelled()
else:
return False
def running(self):
if self.parent:
return self.parent.running()
else:
return False
def done(self):
if self.parent:
return self.parent.done()
else:
return True
def exception(self, timeout=None):
if self.parent:
return self.parent.exception(timeout=timeout)
else:
return True
def add_done_callback(self, fn):
if self.parent:
return self.parent.add_done_callback(fn)
else:
raise ValueError("Callback will be discarded because no parent future")
def __repr__(self):
# The DataFuture could be wrapping an AppFuture whose parent is a Future
# check to find the top level parent
if isinstance(self.parent, AppFuture):
parent = self.parent.parent
else:
parent = self.parent
if parent:
with parent._condition:
if parent._state == FINISHED:
if parent._exception:
return '<%s at %#x state=%s raised %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state],
parent._exception.__class__.__name__)
else:
return '<%s at %#x state=%s returned %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state],
self.filepath)
return '<%s at %#x state=%s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state])
else:
return '<%s at %#x state=%s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state])
def testing_nonfuture():
fpath = '~/shuffled.txt'
df = DataFuture(None, fpath)
print(df)
print("Result: ", df.filepath)
assert df.filepath == os.path.abspath(os.path.expanduser(fpath))
if __name__ == "__main__":
# logging.basicConfig(filename='futures.testing.log',level=logging.DEBUG)
import sys
import random
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger.debug("Begin Testing")
with open('shuffled.txt', 'w') as testfile:
nums = list(range(0, 10000))
random.shuffle(nums)
for item in nums:
testfile.write("{0}\n".format(item))
foo = Future()
df = DataFuture(foo, './shuffled.txt')
dx = DataFuture(foo, '~/shuffled.txt')
print(foo.done())
print(df.done())
testing_nonfuture()
| {
"content_hash": "2dcc8842970180763f0d638dec7ce371",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 100,
"avg_line_length": 30.469635627530366,
"alnum_prop": 0.5611214456550625,
"repo_name": "swift-lang/swift-e-lab",
"id": "375b297f47ea1a91491a5c06d04ef3756ae52734",
"size": "7526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsl/app/futures.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "59197"
},
{
"name": "Python",
"bytes": "104539"
},
{
"name": "Shell",
"bytes": "1283"
}
],
"symlink_target": ""
} |
import io
import re
import torch
__all__ = [
"generate_sp_model",
"load_sp_model",
"sentencepiece_numericalizer",
"sentencepiece_tokenizer",
"numericalize_tokens_from_iterator",
"filter_wikipedia_xml",
"to_map_style_dataset",
]
"""
This file contains experimental functionality.
All of these are experimental, unstable, and subject to change or deletion.
"""
def generate_sp_model(filename, vocab_size=20000, model_type="unigram", model_prefix="m_user"):
r"""Train a SentencePiece tokenizer.
Args:
filename: the data file for training SentencePiece model.
vocab_size: the size of vocabulary (Default: 20,000).
model_type: the type of SentencePiece model, including unigram,
bpe, char, word.
model_prefix: the prefix of the files saving model and vocab.
Outputs:
The model and vocab are saved in two separate files with
model_prefix.
Examples:
>>> from torchtext.data.functional import generate_sp_model
>>> generate_sp_model('test.csv', vocab_size=23456, model_prefix='spm_user')
"""
torch.ops.torchtext.generate_sp_model(filename, vocab_size, model_type, model_prefix)
def load_sp_model(spm):
r"""Load a sentencepiece model for file.
Args:
spm: the file path or a file object saving the sentencepiece model.
Outputs:
output: a SentencePiece model.
Examples:
>>> from torchtext.data.functional import load_sp_model
>>> sp_model = load_sp_model("m_user.model")
>>> sp_model = load_sp_model(open("m_user.model", 'rb'))
"""
if isinstance(spm, str):
return torch.ops.torchtext.load_sp_model(spm)
elif isinstance(spm, io.BufferedReader):
return torch.ops.torchtext.load_sp_model_string(spm.read())
else:
raise TypeError(
f"Unsupported type for spm argument: {type(spm).__name__}. "
+ "Supported types are: "
+ ", ".join(["str", "io.BufferedReader"])
)
def sentencepiece_numericalizer(sp_model):
r"""A sentencepiece model to numericalize a text sentence into
a generator over the ids.
Args:
sp_model: a SentencePiece model.
Outputs:
output: a generator with the input of text sentence and the output of the
corresponding ids based on SentencePiece model.
Examples:
>>> from torchtext.data.functional import sentencepiece_numericalizer
>>> sp_id_generator = sentencepiece_numericalizer(sp_model)
>>> list_a = ["sentencepiece encode as pieces", "examples to try!"]
>>> list(sp_id_generator(list_a))
[[9858, 9249, 1629, 1305, 1809, 53, 842],
[2347, 13, 9, 150, 37]]
"""
def _internal_func(txt_iter):
for line in txt_iter:
yield sp_model.EncodeAsIds(line)
return _internal_func
def sentencepiece_tokenizer(sp_model):
r"""A sentencepiece model to tokenize a text sentence into
a generator over the tokens.
Args:
sp_model: a SentencePiece model.
Outputs:
output: a generator with the input of text sentence and the output of the
corresponding tokens based on SentencePiece model.
Examples:
>>> from torchtext.data.functional import sentencepiece_tokenizer
>>> sp_tokens_generator = sentencepiece_tokenizer(sp_model)
>>> list_a = ["sentencepiece encode as pieces", "examples to try!"]
>>> list(sp_tokens_generator(list_a))
[['_sentence', 'piece', '_en', 'co', 'de', '_as', '_pieces'],
['_example', 's', '_to', '_try', '!']]
"""
def _internal_func(txt_iter):
for line in txt_iter:
yield sp_model.EncodeAsPieces(line)
return _internal_func
def custom_replace(replace_pattern):
r"""A transform to convert text string.
Examples:
>>> from torchtext.data.functional import custom_replace
>>> custom_replace_transform = custom_replace([(r'S', 's'), (r'\s+', ' ')])
>>> list_a = ["Sentencepiece encode aS pieces", "exampleS to try!"]
>>> list(custom_replace_transform(list_a))
['sentencepiece encode as pieces', 'examples to try!']
"""
_patterns = list((re.compile(p), r) for (p, r) in replace_pattern)
def _internal_func(txt_iter):
for line in txt_iter:
for pattern_re, replaced_str in _patterns:
line = pattern_re.sub(replaced_str, line)
yield line
return _internal_func
def simple_space_split(iterator):
r"""A transform to split text string by spaces.
Examples:
>>> from torchtext.data.functional import simple_space_split
>>> list_a = ["Sentencepiece encode as pieces", "example to try!"]
>>> list(simple_space_split(list_a))
[['Sentencepiece', 'encode', 'as', 'pieces'], ['example', 'to', 'try!']]
"""
for line in iterator:
yield line.split()
def numericalize_tokens_from_iterator(vocab, iterator, removed_tokens=None):
r"""Yield a list of ids from an token iterator with a vocab.
Args:
vocab: the vocabulary convert token into id.
iterator: the iterator yield a list of tokens.
removed_tokens: removed tokens from output dataset (Default: None)
Examples:
>>> from torchtext.data.functional import simple_space_split
>>> from torchtext.data.functional import numericalize_tokens_from_iterator
>>> vocab = {'Sentencepiece' : 0, 'encode' : 1, 'as' : 2, 'pieces' : 3}
>>> ids_iter = numericalize_tokens_from_iterator(vocab,
>>> simple_space_split(["Sentencepiece as pieces",
>>> "as pieces"]))
>>> for ids in ids_iter:
>>> print([num for num in ids])
>>> [0, 2, 3]
>>> [2, 3]
"""
for tokens in iterator:
if removed_tokens is None:
yield iter(vocab[token] for token in tokens)
else:
yield iter(map(lambda x: vocab[x], filter(lambda x: x not in removed_tokens, tokens)))
_patterns = [
(r"<.*>", ""),
(r"&", "&"),
(r"<", "<"),
(r">", ">"),
(r"<ref[^<]*<\/ref>", ""),
(r"<[^>]*>", ""),
(r"\[http:[^] ]*", "["),
(r"\|thumb", ""),
(r"\|left", ""),
(r"\|right", ""),
(r"\|\d+px", ""),
(r"\[\[image:[^\[\]]*\|", ""),
(r"\[\[category:([^|\]]*)[^]]*\]\]", "[[$1]]"),
(r"\[\[[a-z\-]*:[^\]]*\]\]", ""),
(r"\[\[[^\|\]]*\|", "[["),
(r"\{\{[^\}]*\}\}", ""),
(r"\{[^\}]*\}", ""),
(r"\[", ""),
(r"\]", ""),
(r"&[^;]*;", " "),
(r"A", "a"),
(r"B", "b"),
(r"C", "c"),
(r"D", "d"),
(r"E", "e"),
(r"F", "f"),
(r"G", "g"),
(r"H", "h"),
(r"I", "i"),
(r"J", "j"),
(r"K", "k"),
(r"L", "l"),
(r"M", "m"),
(r"N", "n"),
(r"O", "o"),
(r"P", "p"),
(r"Q", "q"),
(r"R", "r"),
(r"S", "s"),
(r"T", "t"),
(r"U", "u"),
(r"V", "v"),
(r"W", "w"),
(r"X", "x"),
(r"Y", "y"),
(r"Z", "z"),
(r"0", " zero "),
(r"1", " one "),
(r"2", " two "),
(r"3", " three "),
(r"4", " four "),
(r"5", " five "),
(r"6", " six "),
(r"7", " seven "),
(r"8", " eight "),
(r"9", " nine "),
(r"[^a-z\n]+", " "),
(r"\n ", ""),
(r"\s+", " "),
(r"\n\s*\n", r"\n"),
]
def filter_wikipedia_xml(text_iterator):
r"""Filter wikipedia xml lines according to https://github.com/facebookresearch/fastText/blob/master/wikifil.pl
args:
text_iterator: An iterator type object that yields strings. Examples include string list, text io, generators etc.
Examples:
>>> from torchtext.data.functional import filter_wikipedia_xml
>>> from torchtext.datasets import EnWik9
>>> data_iter = EnWik9(split='train')
>>> filter_data_iter = filter_wikipedia_xml(data_iter)
>>> file_name = '.data/EnWik9/enwik9'
>>> filter_data_iter = filter_wikipedia_xml(open(file_name,'r'))
"""
try:
iter(text_iterator)
except:
raise TypeError("Input {} must support iterator semantics".format(text_iterator))
norm_transform = custom_replace(_patterns)
for line in text_iterator:
if "#redirect" in line or "#REDIRECT" in line:
continue
line = list(norm_transform([line]))[0].strip()
if line:
yield line
def to_map_style_dataset(iter_data):
r"""Convert iterable-style dataset to map-style dataset.
args:
iter_data: An iterator type object. Examples include Iterable datasets, string list, text io, generators etc.
Examples:
>>> from torchtext.datasets import IMDB
>>> from torchtext.data import to_map_style_dataset
>>> train_iter = IMDB(split='train')
>>> train_dataset = to_map_style_dataset(train_iter)
>>> file_name = '.data/EnWik9/enwik9'
>>> data_iter = to_map_style_dataset(open(file_name,'r'))
"""
# Inner class to convert iterable-style to map-style dataset
class _MapStyleDataset(torch.utils.data.Dataset):
def __init__(self, iter_data) -> None:
# TODO Avoid list issue #1296
self._data = list(iter_data)
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
return self._data[idx]
return _MapStyleDataset(iter_data)
| {
"content_hash": "08156736a17d356f047e2f86f5a6970f",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 122,
"avg_line_length": 30.905844155844157,
"alnum_prop": 0.5548902195608783,
"repo_name": "pytorch/text",
"id": "7806595e679fff1d205b0050abb58c7333af2372",
"size": "9519",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "torchtext/data/functional.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5989"
},
{
"name": "C",
"bytes": "1165"
},
{
"name": "C++",
"bytes": "103773"
},
{
"name": "CMake",
"bytes": "6607"
},
{
"name": "Dockerfile",
"bytes": "1632"
},
{
"name": "Python",
"bytes": "761434"
},
{
"name": "Shell",
"bytes": "19559"
}
],
"symlink_target": ""
} |
import copy
from yahtr.core.hex_lib import index_of_direction
from yahtr.utils import attr, clamp
from yahtr.utils.event import Event
from yahtr.data.actions import ActionType
from yahtr.data.skill_template import Target
from yahtr.rank import Rank
from yahtr.weapon import RankedWeapon
class Unit:
""" Unit in a player pool of available units """
__attributes = ['move', 'initiative', 'speed', 'shields', 'color', 'actions_tree', 'skills', 'shape', 'health']
def __init__(self, template):
self.template = template
attr.copy_from_instance(template, self, *Unit.__attributes)
self.hex_coords = None
self.orientation = None
self.equipped_weapons = []
self.owner = None
self.current_shape = []
if not self.shields:
self.shields = [0 for __ in range(6)]
self.prev_hex_coords = None
self.prev_orientation = None
self.reachables = []
# events
self.on_health_change = Event('health', 'context')
self.on_unit_targeted = Event()
self.on_shield_change = Event()
self.on_shield_targeted = Event('shield_index')
self.on_targeted_end = Event()
self.on_sim_move = Event('trajectory', 'orientation')
self.on_skill_move = Event('context', 'unit')
def __repr__(self):
return f'U<{self.template.name!s}>'
@property
def ai_controlled(self):
return self.owner.ai_controlled
def move_to(self, hex_coords=None, orientation=None):
calc_shape = False
if hex_coords and hex_coords != self.hex_coords:
self.prev_hex_coords = self.hex_coords
self.hex_coords = hex_coords
calc_shape = True
if orientation and orientation != self.orientation:
self.prev_orientation = self.orientation
self.orientation = orientation
calc_shape = True
if calc_shape:
self.current_shape = list(self.calc_shape_at(self.hex_coords, self.orientation))
def sim_move(self, trajectory=None, orientation=None):
""" Move is ordered from simulation (AI, events...) and UI need to be aware
UI must call move_to after"""
self.on_sim_move(trajectory, orientation)
def skill_move(self, context, unit=None):
""" Skill move is not directly managed by the unit because UI may want to do something
UI must call move_to after"""
self.on_skill_move(context)
def equip(self, weapon):
if weapon.wp_type.name in self.template.weapons:
rank = Rank[self.template.weapons[weapon.wp_type.name]]
self.equipped_weapons.append(RankedWeapon(weapon, rank))
def get_skills(self, action_type):
skills = []
if action_type == ActionType.weapon:
for ranked_weapon in self.equipped_weapons:
skills.extend(ranked_weapon.skills)
elif action_type == ActionType.skill:
skills = self.skills
return skills
def calc_shape_at(self, position, orientation):
for shape_part in self.shape:
new_pos = copy.copy(shape_part).rotate_to(orientation)
new_pos += position
yield new_pos
def hex_test(self, hex_coords):
if hex_coords in self.current_shape:
return True
return False
def health_change(self, health, context):
self.health = clamp(0, self.health + health, self.template.health)
if self.health <= 0:
context.targets_killed.append((self, Target.unit))
self.on_health_change(health, context)
def unit_targeted(self, health, context):
self.on_unit_targeted()
def shield_change(self, shield_index, context):
self.shields[shield_index] -= 1
self.on_shield_change()
def shield_targeted(self, shield_index, context):
self.on_shield_targeted(shield_index)
def end_targeting(self, shield_index=None):
self.on_targeted_end(shield_index)
def get_shield(self, origin, destination):
for shape_part_index, shape_part in enumerate(self.current_shape):
if shape_part == destination:
dir_index = index_of_direction(self.orientation)
hit_index = index_of_direction(origin - destination)
shield_index = shape_part_index * 6 + (6 - dir_index + hit_index) % 6
if self.shields[shield_index] > 0:
return shield_index
return -1
| {
"content_hash": "0dd3d4375dc1a6305982bcd12bebac10",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 115,
"avg_line_length": 36.41935483870968,
"alnum_prop": 0.6193534100974314,
"repo_name": "fp12/yahtr",
"id": "66bc2f9442c6c754220a50bc31152465eacc9406",
"size": "4516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yahtr/unit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "794"
},
{
"name": "GLSL",
"bytes": "1120"
},
{
"name": "Python",
"bytes": "169305"
},
{
"name": "Shell",
"bytes": "91"
}
],
"symlink_target": ""
} |
"""helloworld.py
Illustrate how to load some data, and cache the results.
"""
from environment import Session
from model import Person
from caching_query import FromCache
# load Person objects. cache the result under the namespace "all_people".
print "loading people...."
people = Session.query(Person).options(FromCache("default")).all()
# remove the Session. next query starts from scratch.
Session.remove()
# load again, using the same FromCache option. now they're cached
# under "all_people", no SQL is emitted.
print "loading people....again!"
people = Session.query(Person).options(FromCache("default")).all()
# want to load on some different kind of query ? change the namespace
# you send to FromCache
print "loading people two through twelve"
people_two_through_twelve = Session.query(Person).\
options(FromCache("default")).\
filter(Person.name.between("person 02", "person 12")).\
all()
# the data is cached under the "namespace" you send to FromCache, *plus*
# the bind parameters of the query. So this query, having
# different literal parameters under "Person.name.between()" than the
# previous one, issues new SQL...
print "loading people five through fifteen"
people_five_through_fifteen = Session.query(Person).\
options(FromCache("default")).\
filter(Person.name.between("person 05", "person 15")).\
all()
# ... but using the same params as are already cached, no SQL
print "loading people two through twelve...again!"
people_two_through_twelve = Session.query(Person).\
options(FromCache("default")).\
filter(Person.name.between("person 02", "person 12")).\
all()
# invalidate the cache for the three queries we've done. Recreate
# each Query, which includes at the very least the same FromCache,
# same list of objects to be loaded, and the same parameters in the
# same order, then call invalidate().
print "invalidating everything"
Session.query(Person).options(FromCache("default")).invalidate()
Session.query(Person).\
options(FromCache("default")).\
filter(Person.name.between("person 02", "person 12")).invalidate()
Session.query(Person).\
options(FromCache("default", "people_on_range")).\
filter(Person.name.between("person 05", "person 15")).invalidate()
| {
"content_hash": "a411745d3e7bd1330ec8d4013dcaac20",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 83,
"avg_line_length": 40.41935483870968,
"alnum_prop": 0.6548284118116521,
"repo_name": "rclmenezes/sqlalchemy",
"id": "e2e4d4f7861c291e87d37c6cec350dfcefd878d4",
"size": "2506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dogpile_caching/helloworld.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "38103"
},
{
"name": "CSS",
"bytes": "7760"
},
{
"name": "JavaScript",
"bytes": "244"
},
{
"name": "Makefile",
"bytes": "7072"
},
{
"name": "Python",
"bytes": "7243712"
},
{
"name": "TeX",
"bytes": "13927"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from tape import Tape
class TestTape(TestCase):
def test_initially_blank_head(self):
t = Tape()
self.assertEqual(t.read(), 0)
def test_all_blanks(self):
t = Tape()
t.shift_left()
t.shift_left()
t.shift_left()
t.shift_left()
self.assertEqual(t.read(), 0)
t2 = Tape()
t2.shift_right()
t2.shift_right()
t2.shift_right()
t2.shift_right()
self.assertEqual(t2.read(), 0)
def test_different_blank_symbol(self):
t = Tape(blank_symbol=None)
self.assertEqual(t.read(), None)
t.shift_left()
t.shift_left()
t.shift_left()
t.shift_left()
self.assertEqual(t.read(), None)
t2 = Tape(blank_symbol=None)
t2.shift_right()
t2.shift_right()
t2.shift_right()
t2.shift_right()
self.assertEqual(t2.read(), None)
def test_write(self):
t = Tape()
self.assertEqual(t.read(), 0)
t.write(1)
self.assertEqual(t.read(), 1)
def test_write_shift(self):
t = Tape()
t.write(1)
t.shift_left()
self.assertEqual(t.read(), 0)
t.shift_right()
self.assertEqual(t.read(), 1)
t.shift_right()
self.assertEqual(t.read(), 0)
t.write(2)
t.shift_right()
t.shift_left()
self.assertEqual(t.read(), 2)
t.shift_left()
self.assertEqual(t.read(), 1)
def test_get_contents_and_index_blank(self):
t = Tape()
tape_contents, index = t.get_contents_and_index()
self.assertEqual(tape_contents, [0])
self.assertEqual(index, 0)
def test_get_contents_and_index_write(self):
t = Tape()
t.write(1)
tape_contents, index = t.get_contents_and_index()
self.assertEqual(tape_contents, [1])
self.assertEqual(index, 0)
def test_get_contents_and_index_write_shfit(self):
t = Tape()
t.write(1)
t.shift_left()
t.write(2)
t.shift_left()
t.shift_right()
t.shift_right()
t.shift_right()
t.shift_right()
t.write(3)
t.shift_right()
t.shift_left()
t.shift_left()
tape_contents, index = t.get_contents_and_index()
self.assertEqual(tape_contents, [2,1,0,3])
self.assertEqual(index, 2)
def test_get_contents_and_index_shift(self):
t = Tape()
t.write(1)
t.shift_left()
tape_contents, index = t.get_contents_and_index()
self.assertEqual(tape_contents, [0,1])
self.assertEqual(index, 0)
| {
"content_hash": "40c5913c6389fbd2becacd56de8c0021",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 57,
"avg_line_length": 26.613861386138613,
"alnum_prop": 0.5379464285714286,
"repo_name": "nahumj/turing-ga",
"id": "de887408f95e0cbb9263194062538240795a3edb",
"size": "2688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turing_machine/test_tape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17918"
}
],
"symlink_target": ""
} |
from decimal import Decimal as D
from django.core.validators import MinValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from oscar.core import loading, prices
from oscar.models.fields import AutoSlugField
Scale = loading.get_class('shipping.scales', 'Scale')
class AbstractBase(models.Model):
"""
Implements the interface declared by shipping.base.Base
"""
code = AutoSlugField(_("Slug"), max_length=128, unique=True,
populate_from='name', db_index=True)
name = models.CharField(_("Name"), max_length=128, unique=True, db_index=True)
description = models.TextField(_("Description"), blank=True)
# We allow shipping methods to be linked to a specific set of countries
countries = models.ManyToManyField('address.Country',
blank=True, verbose_name=_("Countries"))
# We need this to mimic the interface of the Base shipping method
is_discounted = False
class Meta:
abstract = True
app_label = 'shipping'
ordering = ['name']
verbose_name = _("Shipping Method")
verbose_name_plural = _("Shipping Methods")
def __str__(self):
return self.name
def discount(self, basket):
"""
Return the discount on the standard shipping charge
"""
# This method is identical to the Base.discount().
return D('0.00')
class AbstractOrderAndItemCharges(AbstractBase):
"""
Standard shipping method
This method has two components:
* a charge per order
* a charge per item
Many sites use shipping logic which fits into this system. However, for
more complex shipping logic, a custom shipping method object will need to
be provided that subclasses ShippingMethod.
"""
price_per_order = models.DecimalField(
_("Price per order"), decimal_places=2, max_digits=12,
default=D('0.00'))
price_per_item = models.DecimalField(
_("Price per item"), decimal_places=2, max_digits=12,
default=D('0.00'))
# If basket value is above this threshold, then shipping is free
free_shipping_threshold = models.DecimalField(
_("Free Shipping"), decimal_places=2, max_digits=12, blank=True,
null=True)
class Meta(AbstractBase.Meta):
abstract = True
app_label = 'shipping'
verbose_name = _("Order and Item Charge")
verbose_name_plural = _("Order and Item Charges")
def calculate(self, basket):
if (self.free_shipping_threshold is not None
and basket.total_incl_tax >= self.free_shipping_threshold):
return prices.Price(
currency=basket.currency, excl_tax=D('0.00'),
incl_tax=D('0.00'))
charge = self.price_per_order
for line in basket.lines.all():
if line.product.is_shipping_required:
charge += line.quantity * self.price_per_item
# Zero tax is assumed...
return prices.Price(
currency=basket.currency,
excl_tax=charge,
incl_tax=charge)
class AbstractWeightBased(AbstractBase):
# The attribute code to use to look up the weight of a product
weight_attribute = 'weight'
# The default weight to use (in kg) when a product doesn't have a weight
# attribute.
default_weight = models.DecimalField(
_("Default Weight"), decimal_places=3, max_digits=12,
default=D('0.000'),
validators=[MinValueValidator(D('0.00'))],
help_text=_("Default product weight in kg when no weight attribute "
"is defined"))
class Meta(AbstractBase.Meta):
abstract = True
app_label = 'shipping'
verbose_name = _("Weight-based Shipping Method")
verbose_name_plural = _("Weight-based Shipping Methods")
def calculate(self, basket):
# Note, when weighing the basket, we don't check whether the item
# requires shipping or not. It is assumed that if something has a
# weight, then it requires shipping.
scale = Scale(attribute_code=self.weight_attribute,
default_weight=self.default_weight)
weight = scale.weigh_basket(basket)
charge = self.get_charge(weight)
# Zero tax is assumed...
return prices.Price(
currency=basket.currency,
excl_tax=charge,
incl_tax=charge)
def get_charge(self, weight):
"""
Calculates shipping charges for a given weight.
If there is one or more matching weight band for a given weight, the
charge of the closest matching weight band is returned.
If the weight exceeds the top weight band, the top weight band charge
is added until a matching weight band is found. This models the concept
of "sending as many of the large boxes as needed".
Please note that it is assumed that the closest matching weight band
is the most cost-effective one, and that the top weight band is more
cost effective than e.g. sending out two smaller parcels.
Without that assumption, determining the cheapest shipping solution
becomes an instance of the bin packing problem. The bin packing problem
is NP-hard and solving it is left as an exercise to the reader.
"""
weight = D(weight) # weight really should be stored as a decimal
if not self.bands.exists():
return D('0.00')
top_band = self.top_band
if weight <= top_band.upper_limit:
band = self.get_band_for_weight(weight)
return band.charge
else:
quotient, remaining_weight = divmod(weight, top_band.upper_limit)
if remaining_weight:
remainder_band = self.get_band_for_weight(remaining_weight)
return quotient * top_band.charge + remainder_band.charge
else:
return quotient * top_band.charge
def get_band_for_weight(self, weight):
"""
Return the closest matching weight band for a given weight.
"""
return self.bands.filter(upper_limit__gte=weight).order_by('upper_limit').first()
@property
def num_bands(self):
return self.bands.count()
@property
def top_band(self):
return self.bands.order_by('-upper_limit').first()
class AbstractWeightBand(models.Model):
"""
Represents a weight band which are used by the WeightBasedShipping method.
"""
method = models.ForeignKey(
'shipping.WeightBased',
on_delete=models.CASCADE,
related_name='bands',
verbose_name=_("Method"))
upper_limit = models.DecimalField(
_("Upper Limit"), decimal_places=3, max_digits=12, db_index=True,
validators=[MinValueValidator(D('0.00'))],
help_text=_("Enter upper limit of this weight band in kg. The lower "
"limit will be determined by the other weight bands."))
charge = models.DecimalField(
_("Charge"), decimal_places=2, max_digits=12,
validators=[MinValueValidator(D('0.00'))])
@property
def weight_from(self):
lower_bands = self.method.bands.filter(
upper_limit__lt=self.upper_limit).order_by('-upper_limit')
if not lower_bands:
return D('0.000')
return lower_bands[0].upper_limit
@property
def weight_to(self):
return self.upper_limit
class Meta:
abstract = True
app_label = 'shipping'
ordering = ['method', 'upper_limit']
verbose_name = _("Weight Band")
verbose_name_plural = _("Weight Bands")
def __str__(self):
return _('Charge for weights up to %s kg') % (self.upper_limit,)
| {
"content_hash": "970b2fa6ec1b92af0c4042d33db922b9",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 89,
"avg_line_length": 36.1889400921659,
"alnum_prop": 0.6252387622564625,
"repo_name": "solarissmoke/django-oscar",
"id": "bc89d0c71025f2fa820c740cc3bd11f9e7ba9422",
"size": "7877",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/oscar/apps/shipping/abstract_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "529"
},
{
"name": "HTML",
"bytes": "562906"
},
{
"name": "JavaScript",
"bytes": "40879"
},
{
"name": "Makefile",
"bytes": "4234"
},
{
"name": "Python",
"bytes": "2199293"
},
{
"name": "SCSS",
"bytes": "21362"
},
{
"name": "Shell",
"bytes": "308"
}
],
"symlink_target": ""
} |
import uuid
import glance_store
import mock
import six
import webob
import glance.api.policy
import glance.api.v2.image_data
from glance.common import exception
from glance.common import wsgi
from glance.tests.unit import base
import glance.tests.unit.utils as unit_test_utils
import glance.tests.utils as test_utils
class Raise(object):
def __init__(self, exc):
self.exc = exc
def __call__(self, *args, **kwargs):
raise self.exc
class FakeImage(object):
def __init__(self, image_id=None, data=None, checksum=None, size=0,
virtual_size=0, locations=None, container_format='bear',
disk_format='rawr', status=None):
self.image_id = image_id
self.data = data
self.checksum = checksum
self.size = size
self.virtual_size = virtual_size
self.locations = locations
self.container_format = container_format
self.disk_format = disk_format
self._status = status
@property
def status(self):
return self._status
@status.setter
def status(self, value):
if isinstance(self._status, BaseException):
raise self._status
else:
self._status = value
def get_data(self, *args, **kwargs):
return self.data
def set_data(self, data, size=None):
self.data = ''.join(data)
self.size = size
self.status = 'modified-by-fake'
class FakeImageRepo(object):
def __init__(self, result=None):
self.result = result
def get(self, image_id):
if isinstance(self.result, BaseException):
raise self.result
else:
return self.result
def save(self, image, from_state=None):
self.saved_image = image
class FakeGateway(object):
def __init__(self, repo):
self.repo = repo
def get_repo(self, context):
return self.repo
class TestImagesController(base.StoreClearingUnitTest):
def setUp(self):
super(TestImagesController, self).setUp()
self.config(verbose=True, debug=True)
self.image_repo = FakeImageRepo()
self.gateway = FakeGateway(self.image_repo)
self.controller = glance.api.v2.image_data.ImageDataController(
gateway=self.gateway)
def test_download(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd',
locations=[{'url': 'http://example.com/image',
'metadata': {}, 'status': 'active'}])
self.image_repo.result = image
image = self.controller.download(request, unit_test_utils.UUID1)
self.assertEqual('abcd', image.image_id)
def test_download_deactivated(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd',
status='deactivated',
locations=[{'url': 'http://example.com/image',
'metadata': {}, 'status': 'active'}])
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPForbidden, self.controller.download,
request, str(uuid.uuid4()))
def test_download_no_location(self):
# NOTE(mclaren): NoContent will be raised by the ResponseSerializer
# That's tested below.
request = unit_test_utils.get_fake_request()
self.image_repo.result = FakeImage('abcd')
image = self.controller.download(request, unit_test_utils.UUID2)
self.assertEqual('abcd', image.image_id)
def test_download_non_existent_image(self):
request = unit_test_utils.get_fake_request()
self.image_repo.result = exception.NotFound()
self.assertRaises(webob.exc.HTTPNotFound, self.controller.download,
request, str(uuid.uuid4()))
def test_download_forbidden(self):
request = unit_test_utils.get_fake_request()
self.image_repo.result = exception.Forbidden()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.download,
request, str(uuid.uuid4()))
def test_download_ok_when_get_image_location_forbidden(self):
class ImageLocations(object):
def __len__(self):
raise exception.Forbidden()
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd')
self.image_repo.result = image
image.locations = ImageLocations()
image = self.controller.download(request, unit_test_utils.UUID1)
self.assertEqual('abcd', image.image_id)
def test_upload(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd')
self.image_repo.result = image
self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4)
self.assertEqual('YYYY', image.data)
self.assertEqual(4, image.size)
def test_upload_status(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd')
self.image_repo.result = image
insurance = {'called': False}
def read_data():
insurance['called'] = True
self.assertEqual('saving', self.image_repo.saved_image.status)
yield 'YYYY'
self.controller.upload(request, unit_test_utils.UUID2,
read_data(), None)
self.assertTrue(insurance['called'])
self.assertEqual('modified-by-fake',
self.image_repo.saved_image.status)
def test_upload_no_size(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd')
self.image_repo.result = image
self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', None)
self.assertEqual('YYYY', image.data)
self.assertIsNone(image.size)
def test_upload_invalid(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd')
image.status = ValueError()
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.upload,
request, unit_test_utils.UUID1, 'YYYY', 4)
def test_upload_with_expired_token(self):
def side_effect(image, from_state=None):
if from_state == 'saving':
raise exception.NotAuthenticated()
mocked_save = mock.Mock(side_effect=side_effect)
mocked_delete = mock.Mock()
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd')
image.delete = mocked_delete
self.image_repo.result = image
self.image_repo.save = mocked_save
self.assertRaises(webob.exc.HTTPUnauthorized, self.controller.upload,
request, unit_test_utils.UUID1, 'YYYY', 4)
self.assertEqual(3, mocked_save.call_count)
mocked_delete.assert_called_once_with()
def test_upload_non_existent_image_during_save_initiates_deletion(self):
def fake_save_not_found(self):
raise exception.NotFound()
def fake_save_conflict(self):
raise exception.Conflict()
for fun in [fake_save_not_found, fake_save_conflict]:
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd', locations=['http://example.com/image'])
self.image_repo.result = image
self.image_repo.save = fun
image.delete = mock.Mock()
self.assertRaises(webob.exc.HTTPGone, self.controller.upload,
request, str(uuid.uuid4()), 'ABC', 3)
self.assertTrue(image.delete.called)
def test_upload_non_existent_image_raises_not_found_exception(self):
def fake_save(self):
raise exception.NotFound()
def fake_delete():
raise exception.NotFound()
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd', locations=['http://example.com/image'])
self.image_repo.result = image
self.image_repo.save = fake_save
image.delete = fake_delete
self.assertRaises(webob.exc.HTTPGone, self.controller.upload,
request, str(uuid.uuid4()), 'ABC', 3)
def test_upload_non_existent_image_before_save(self):
request = unit_test_utils.get_fake_request()
self.image_repo.result = exception.NotFound()
self.assertRaises(webob.exc.HTTPNotFound, self.controller.upload,
request, str(uuid.uuid4()), 'ABC', 3)
def test_upload_data_exists(self):
request = unit_test_utils.get_fake_request()
image = FakeImage()
exc = exception.InvalidImageStatusTransition(cur_status='active',
new_status='queued')
image.set_data = Raise(exc)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPConflict, self.controller.upload,
request, unit_test_utils.UUID1, 'YYYY', 4)
def test_upload_storage_full(self):
request = unit_test_utils.get_fake_request()
image = FakeImage()
image.set_data = Raise(glance_store.StorageFull)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.upload,
request, unit_test_utils.UUID2, 'YYYYYYY', 7)
def test_image_size_limit_exceeded(self):
request = unit_test_utils.get_fake_request()
image = FakeImage()
image.set_data = Raise(exception.ImageSizeLimitExceeded)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.upload,
request, unit_test_utils.UUID1, 'YYYYYYY', 7)
def test_upload_storage_quota_full(self):
request = unit_test_utils.get_fake_request()
self.image_repo.result = exception.StorageQuotaFull("message")
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.upload,
request, unit_test_utils.UUID1, 'YYYYYYY', 7)
def test_upload_storage_forbidden(self):
request = unit_test_utils.get_fake_request(user=unit_test_utils.USER2)
image = FakeImage()
image.set_data = Raise(exception.Forbidden)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPForbidden, self.controller.upload,
request, unit_test_utils.UUID2, 'YY', 2)
def test_upload_storage_internal_error(self):
request = unit_test_utils.get_fake_request()
self.image_repo.result = exception.ServerError()
self.assertRaises(exception.ServerError,
self.controller.upload,
request, unit_test_utils.UUID1, 'ABC', 3)
def test_upload_storage_write_denied(self):
request = unit_test_utils.get_fake_request(user=unit_test_utils.USER3)
image = FakeImage()
image.set_data = Raise(glance_store.StorageWriteDenied)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPServiceUnavailable,
self.controller.upload,
request, unit_test_utils.UUID2, 'YY', 2)
def test_upload_storage_store_disabled(self):
"""Test that uploading an image file raises StoreDisabled exception"""
request = unit_test_utils.get_fake_request(user=unit_test_utils.USER3)
image = FakeImage()
image.set_data = Raise(glance_store.StoreAddDisabled)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPGone,
self.controller.upload,
request, unit_test_utils.UUID2, 'YY', 2)
def _test_upload_download_prepare_notification(self):
request = unit_test_utils.get_fake_request()
self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4)
output = self.controller.download(request, unit_test_utils.UUID2)
output_log = self.notifier.get_logs()
prepare_payload = output['meta'].copy()
prepare_payload['checksum'] = None
prepare_payload['size'] = None
prepare_payload['virtual_size'] = None
prepare_payload['location'] = None
prepare_payload['status'] = 'queued'
del prepare_payload['updated_at']
prepare_log = {
'notification_type': "INFO",
'event_type': "image.prepare",
'payload': prepare_payload,
}
self.assertEqual(3, len(output_log))
prepare_updated_at = output_log[0]['payload']['updated_at']
del output_log[0]['payload']['updated_at']
self.assertTrue(prepare_updated_at <= output['meta']['updated_at'])
self.assertEqual(output_log[0], prepare_log)
def _test_upload_download_upload_notification(self):
request = unit_test_utils.get_fake_request()
self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4)
output = self.controller.download(request, unit_test_utils.UUID2)
output_log = self.notifier.get_logs()
upload_payload = output['meta'].copy()
upload_log = {
'notification_type': "INFO",
'event_type': "image.upload",
'payload': upload_payload,
}
self.assertEqual(3, len(output_log))
self.assertEqual(output_log[1], upload_log)
def _test_upload_download_activate_notification(self):
request = unit_test_utils.get_fake_request()
self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4)
output = self.controller.download(request, unit_test_utils.UUID2)
output_log = self.notifier.get_logs()
activate_payload = output['meta'].copy()
activate_log = {
'notification_type': "INFO",
'event_type': "image.activate",
'payload': activate_payload,
}
self.assertEqual(3, len(output_log))
self.assertEqual(output_log[2], activate_log)
def test_restore_image_when_upload_failed(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('fake')
image.set_data = Raise(glance_store.StorageWriteDenied)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPServiceUnavailable,
self.controller.upload,
request, unit_test_utils.UUID2, 'ZZZ', 3)
self.assertEqual('queued', self.image_repo.saved_image.status)
class TestImageDataDeserializer(test_utils.BaseTestCase):
def setUp(self):
super(TestImageDataDeserializer, self).setUp()
self.deserializer = glance.api.v2.image_data.RequestDeserializer()
def test_upload(self):
request = unit_test_utils.get_fake_request()
request.headers['Content-Type'] = 'application/octet-stream'
request.body = 'YYY'
request.headers['Content-Length'] = 3
output = self.deserializer.upload(request)
data = output.pop('data')
self.assertEqual('YYY', data.read())
expected = {'size': 3}
self.assertEqual(expected, output)
def test_upload_chunked(self):
request = unit_test_utils.get_fake_request()
request.headers['Content-Type'] = 'application/octet-stream'
# If we use body_file, webob assumes we want to do a chunked upload,
# ignoring the Content-Length header
request.body_file = six.StringIO('YYY')
output = self.deserializer.upload(request)
data = output.pop('data')
self.assertEqual('YYY', data.read())
expected = {'size': None}
self.assertEqual(expected, output)
def test_upload_chunked_with_content_length(self):
request = unit_test_utils.get_fake_request()
request.headers['Content-Type'] = 'application/octet-stream'
request.body_file = six.StringIO('YYY')
# The deserializer shouldn't care if the Content-Length is
# set when the user is attempting to send chunked data.
request.headers['Content-Length'] = 3
output = self.deserializer.upload(request)
data = output.pop('data')
self.assertEqual('YYY', data.read())
expected = {'size': 3}
self.assertEqual(expected, output)
def test_upload_with_incorrect_content_length(self):
request = unit_test_utils.get_fake_request()
request.headers['Content-Type'] = 'application/octet-stream'
# The deserializer shouldn't care if the Content-Length and
# actual request body length differ. That job is left up
# to the controller
request.body = 'YYY'
request.headers['Content-Length'] = 4
output = self.deserializer.upload(request)
data = output.pop('data')
self.assertEqual('YYY', data.read())
expected = {'size': 4}
self.assertEqual(expected, output)
def test_upload_wrong_content_type(self):
request = unit_test_utils.get_fake_request()
request.headers['Content-Type'] = 'application/json'
request.body = 'YYYYY'
self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.deserializer.upload, request)
request = unit_test_utils.get_fake_request()
request.headers['Content-Type'] = 'application/octet-st'
request.body = 'YYYYY'
self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.deserializer.upload, request)
class TestImageDataSerializer(test_utils.BaseTestCase):
def setUp(self):
super(TestImageDataSerializer, self).setUp()
self.serializer = glance.api.v2.image_data.ResponseSerializer()
def test_download(self):
request = wsgi.Request.blank('/')
request.environ = {}
response = webob.Response()
response.request = request
image = FakeImage(size=3, data=iter('ZZZ'))
self.serializer.download(response, image)
self.assertEqual('ZZZ', response.body)
self.assertEqual('3', response.headers['Content-Length'])
self.assertNotIn('Content-MD5', response.headers)
self.assertEqual('application/octet-stream',
response.headers['Content-Type'])
def test_download_with_checksum(self):
request = wsgi.Request.blank('/')
request.environ = {}
response = webob.Response()
response.request = request
checksum = '0745064918b49693cca64d6b6a13d28a'
image = FakeImage(size=3, checksum=checksum, data=iter('ZZZ'))
self.serializer.download(response, image)
self.assertEqual('ZZZ', response.body)
self.assertEqual('3', response.headers['Content-Length'])
self.assertEqual(checksum, response.headers['Content-MD5'])
self.assertEqual('application/octet-stream',
response.headers['Content-Type'])
def test_download_forbidden(self):
"""Make sure the serializer can return 403 forbidden error instead of
500 internal server error.
"""
def get_data(*args, **kwargs):
raise exception.Forbidden()
self.stubs.Set(glance.api.policy.ImageProxy,
'get_data',
get_data)
request = wsgi.Request.blank('/')
request.environ = {}
response = webob.Response()
response.request = request
image = FakeImage(size=3, data=iter('ZZZ'))
image.get_data = get_data
self.assertRaises(webob.exc.HTTPForbidden,
self.serializer.download,
response, image)
def test_download_no_content(self):
"""Test image download returns HTTPNoContent
Make sure that serializer returns 204 no content error in case of
image data is not available at specified location.
"""
with mock.patch.object(glance.api.policy.ImageProxy,
'get_data') as mock_get_data:
mock_get_data.side_effect = glance_store.NotFound(image="image")
request = wsgi.Request.blank('/')
response = webob.Response()
response.request = request
image = FakeImage(size=3, data=iter('ZZZ'))
image.get_data = mock_get_data
self.assertRaises(webob.exc.HTTPNoContent,
self.serializer.download,
response, image)
def test_download_service_unavailable(self):
"""Test image download returns HTTPServiceUnavailable."""
with mock.patch.object(glance.api.policy.ImageProxy,
'get_data') as mock_get_data:
mock_get_data.side_effect = glance_store.RemoteServiceUnavailable()
request = wsgi.Request.blank('/')
response = webob.Response()
response.request = request
image = FakeImage(size=3, data=iter('ZZZ'))
image.get_data = mock_get_data
self.assertRaises(webob.exc.HTTPServiceUnavailable,
self.serializer.download,
response, image)
def test_download_store_get_not_support(self):
"""Test image download returns HTTPBadRequest.
Make sure that serializer returns 400 bad request error in case of
getting images from this store is not supported at specified location.
"""
with mock.patch.object(glance.api.policy.ImageProxy,
'get_data') as mock_get_data:
mock_get_data.side_effect = glance_store.StoreGetNotSupported()
request = wsgi.Request.blank('/')
response = webob.Response()
response.request = request
image = FakeImage(size=3, data=iter('ZZZ'))
image.get_data = mock_get_data
self.assertRaises(webob.exc.HTTPBadRequest,
self.serializer.download,
response, image)
def test_download_store_random_get_not_support(self):
"""Test image download returns HTTPBadRequest.
Make sure that serializer returns 400 bad request error in case of
getting randomly images from this store is not supported at
specified location.
"""
with mock.patch.object(glance.api.policy.ImageProxy,
'get_data') as m_get_data:
err = glance_store.StoreRandomGetNotSupported(offset=0,
chunk_size=0)
m_get_data.side_effect = err
request = wsgi.Request.blank('/')
response = webob.Response()
response.request = request
image = FakeImage(size=3, data=iter('ZZZ'))
image.get_data = m_get_data
self.assertRaises(webob.exc.HTTPBadRequest,
self.serializer.download,
response, image)
def test_upload(self):
request = webob.Request.blank('/')
request.environ = {}
response = webob.Response()
response.request = request
self.serializer.upload(response, {})
self.assertEqual(204, response.status_int)
self.assertEqual('0', response.headers['Content-Length'])
| {
"content_hash": "a8cf4dd3c886b1ae2a231f7024ccf085",
"timestamp": "",
"source": "github",
"line_count": 574,
"max_line_length": 79,
"avg_line_length": 41.01567944250871,
"alnum_prop": 0.6055303062481417,
"repo_name": "darren-wang/gl",
"id": "3bbb0c882f1815b0a81a0eb9228e881764040846",
"size": "24179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/unit/v2/test_image_data_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3866461"
},
{
"name": "Shell",
"bytes": "7860"
}
],
"symlink_target": ""
} |
"""The kodi component."""
import asyncio
import voluptuous as vol
from homeassistant.components.kodi.const import DOMAIN
from homeassistant.components.media_player.const import DOMAIN as MP_DOMAIN
from homeassistant.const import ATTR_ENTITY_ID, CONF_PLATFORM
from homeassistant.helpers import config_validation as cv
SERVICE_ADD_MEDIA = "add_to_playlist"
SERVICE_CALL_METHOD = "call_method"
ATTR_MEDIA_TYPE = "media_type"
ATTR_MEDIA_NAME = "media_name"
ATTR_MEDIA_ARTIST_NAME = "artist_name"
ATTR_MEDIA_ID = "media_id"
ATTR_METHOD = "method"
MEDIA_PLAYER_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.comp_entity_ids})
KODI_ADD_MEDIA_SCHEMA = MEDIA_PLAYER_SCHEMA.extend(
{
vol.Required(ATTR_MEDIA_TYPE): cv.string,
vol.Optional(ATTR_MEDIA_ID): cv.string,
vol.Optional(ATTR_MEDIA_NAME): cv.string,
vol.Optional(ATTR_MEDIA_ARTIST_NAME): cv.string,
}
)
KODI_CALL_METHOD_SCHEMA = MEDIA_PLAYER_SCHEMA.extend(
{vol.Required(ATTR_METHOD): cv.string}, extra=vol.ALLOW_EXTRA
)
SERVICE_TO_METHOD = {
SERVICE_ADD_MEDIA: {
"method": "async_add_media_to_playlist",
"schema": KODI_ADD_MEDIA_SCHEMA,
},
SERVICE_CALL_METHOD: {
"method": "async_call_method",
"schema": KODI_CALL_METHOD_SCHEMA,
},
}
async def async_setup(hass, config):
"""Set up the Kodi integration."""
if any(
((CONF_PLATFORM, DOMAIN) in cfg.items() for cfg in config.get(MP_DOMAIN, []))
):
# Register the Kodi media_player services
async def async_service_handler(service):
"""Map services to methods on MediaPlayerDevice."""
method = SERVICE_TO_METHOD.get(service.service)
if not method:
return
params = {
key: value for key, value in service.data.items() if key != "entity_id"
}
entity_ids = service.data.get("entity_id")
if entity_ids:
target_players = [
player
for player in hass.data[DOMAIN].values()
if player.entity_id in entity_ids
]
else:
target_players = hass.data[DOMAIN].values()
update_tasks = []
for player in target_players:
await getattr(player, method["method"])(**params)
for player in target_players:
if player.should_poll:
update_coro = player.async_update_ha_state(True)
update_tasks.append(update_coro)
if update_tasks:
await asyncio.wait(update_tasks)
for service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service]["schema"]
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=schema
)
# Return boolean to indicate that initialization was successful.
return True
| {
"content_hash": "1a61f1ce14cb1075cfa06550598dc666",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 87,
"avg_line_length": 32.40659340659341,
"alnum_prop": 0.6052899287894201,
"repo_name": "Teagan42/home-assistant",
"id": "1f2d3cb5cd09568a1860c90ae95a06b0f76f910c",
"size": "2949",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/kodi/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
from concurrent.futures import ThreadPoolExecutor
from functools import partial, wraps
import time
import tornado.ioloop
import tornado.web
EXECUTOR = ThreadPoolExecutor(max_workers=4)
def unblock(f):
@tornado.web.asynchronous
@wraps(f)
def wrapper(*args, **kwargs):
self = args[0]
def callback(future):
self.write(future.result())
self.finish()
EXECUTOR.submit(
partial(f, *args, **kwargs)
).add_done_callback(
lambda future: tornado.ioloop.IOLoop.instance().add_callback(
partial(callback, future)))
return wrapper
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world %s" % time.time())
class SleepHandler(tornado.web.RequestHandler):
@unblock
def get(self, n):
time.sleep(float(n))
return "Awake! %s" % time.time()
class SleepAsyncHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self, n):
def callback(future):
self.write(future.result())
self.finish()
EXECUTOR.submit(partial(self.get_, n)).add_done_callback(
lambda future: tornado.ioloop.IOLoop.instance().add_callback(partial(callback, future)))
def get_(self, n):
time.sleep(float(n))
return "Awake! %s" % time.time()
application = tornado.web.Application([
(r"/", MainHandler),
(r"/sleep/(\d+)", SleepHandler),
(r"/sleep_async/(\d+)", SleepAsyncHandler),
])
if __name__ == "__main__":
application.listen(9999)
tornado.ioloop.IOLoop.instance().start() | {
"content_hash": "bf37aaca92c4fa3381a4144e492e1b32",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 100,
"avg_line_length": 23.11111111111111,
"alnum_prop": 0.6129807692307693,
"repo_name": "tgonzales/tornado-handson",
"id": "4919dae4738e8be4e6f1b7e8c4bde9d6190f70e5",
"size": "1664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "no_blocking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11621"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
try:
from urllib.parse import urljoin, urlencode
except ImportError:
from urllib import urlencode
from urlparse import urljoin
try:
from django.db.models import get_model
except ImportError:
from django.apps import apps
get_model = apps.get_model
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
PAYMENT_VARIANTS = {
'default': ('payments.dummy.DummyProvider', {})}
PAYMENT_HOST = getattr(settings, 'PAYMENT_HOST', None)
PAYMENT_USES_SSL = getattr(settings, 'PAYMENT_USES_SSL', False)
if not PAYMENT_HOST:
if not 'django.contrib.sites' in settings.INSTALLED_APPS:
raise ImproperlyConfigured('The PAYMENT_HOST setting without '
'the sites app must not be empty.')
def get_base_url():
protocol = 'https' if PAYMENT_USES_SSL else 'http'
if not PAYMENT_HOST:
current_site = Site.objects.get_current()
domain = current_site.domain
return '%s://%s' % (protocol, domain)
return '%s://%s' % (protocol, PAYMENT_HOST)
class BasicProvider(object):
'''
This class defines the provider API. It should not be instantiated
directly. Use factory instead.
'''
_method = 'post'
def get_action(self, payment):
return self.get_return_url(payment)
def __init__(self, capture=True):
self._capture = capture
def get_hidden_fields(self, payment):
'''
Converts a payment into a dict containing transaction data. Use
get_form instead to get a form suitable for templates.
When implementing a new payment provider, overload this method to
transfer provider-specific data.
'''
raise NotImplementedError()
def get_form(self, payment, data=None):
'''
Converts *payment* into a form suitable for Django templates.
'''
from .forms import PaymentForm
return PaymentForm(self.get_hidden_fields(payment),
self.get_action(payment), self._method)
def process_data(self, payment, request):
'''
Process callback request from a payment provider.
'''
raise NotImplementedError()
def get_token_from_request(self, payment, request):
'''
Return payment token from provider request.
'''
raise NotImplementedError()
def get_return_url(self, payment, extra_data=None):
payment_link = payment.get_process_url()
url = urljoin(get_base_url(), payment_link)
if extra_data:
qs = urlencode(extra_data)
return url + '?' + qs
return url
def capture(self, payment, amount=None):
raise NotImplementedError()
def release(self, payment):
raise NotImplementedError()
def refund(self, payment, amount=None):
raise NotImplementedError()
PROVIDER_CACHE = {}
def provider_factory(variant):
'''
Return the provider instance based on variant
'''
variants = getattr(settings, 'PAYMENT_VARIANTS', PAYMENT_VARIANTS)
handler, config = variants.get(variant, (None, None))
if not handler:
raise ValueError('Payment variant does not exist: %s' %
(variant,))
if variant not in PROVIDER_CACHE:
module_path, class_name = handler.rsplit('.', 1)
module = __import__(
str(module_path), globals(), locals(), [str(class_name)])
class_ = getattr(module, class_name)
PROVIDER_CACHE[variant] = class_(**config)
return PROVIDER_CACHE[variant]
def get_payment_model():
'''
Return the Payment model that is active in this project
'''
try:
app_label, model_name = settings.PAYMENT_MODEL.split('.')
except (ValueError, AttributeError):
raise ImproperlyConfigured('PAYMENT_MODEL must be of the form '
'"app_label.model_name"')
payment_model = get_model(app_label, model_name)
if payment_model is None:
msg = (
'PAYMENT_MODEL refers to model "%s" that has not been installed' %
settings.PAYMENT_MODEL)
raise ImproperlyConfigured(msg)
return payment_model
CARD_TYPES = [
(r'^4[0-9]{12}(?:[0-9]{3})?$', 'visa', 'VISA'),
(r'^5[1-5][0-9]{14}$', 'mastercard', 'MasterCard'),
(r'^6(?:011|5[0-9]{2})[0-9]{12}$', 'discover', 'Discover'),
(r'^3[47][0-9]{13}$', 'amex', 'American Express'),
(r'^(?:(?:2131|1800|35\d{3})\d{11})$', 'jcb', 'JCB'),
(r'^(?:3(?:0[0-5]|[68][0-9])[0-9]{11})$', 'diners', 'Diners Club'),
(r'^(?:5[0678]\d\d|6304|6390|67\d\d)\d{8,15}$', 'maestro', 'Maestro')]
def get_credit_card_issuer(number):
for regexp, card_type, name in CARD_TYPES:
if re.match(regexp, number):
return card_type, name
return None, None
| {
"content_hash": "308e9e94cb3c85a9eaf1e773be2b87b5",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 78,
"avg_line_length": 32.48026315789474,
"alnum_prop": 0.6169738707717237,
"repo_name": "artursmet/django-payments",
"id": "649fbae950737a13e5e6926948d977283df8a4eb",
"size": "4937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "payments/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "899"
},
{
"name": "JavaScript",
"bytes": "2625"
},
{
"name": "Python",
"bytes": "160376"
}
],
"symlink_target": ""
} |
import urllib, urllib2, json, base64, datetime
from pprint import pprint
class FreedomPop:
refreshToken = None
token = None
tokenExpireTimestamp = None
accessToken = None
_apiUsername = "3726328870"
_apiPassword = "pNp6TIgVm4viVadoyoUdxbsrfmiBwudN"
endPoint = "https://api.freedompop.com"
def __init__(self, username, password):
self.username = username
self.password = password
def _updateToken(self, url):
req = urllib2.Request(url, data = "")
req.add_header("Authorization", "Basic %s" % base64.encodestring("%s:%s" % (self._apiUsername, self._apiPassword)).replace("\n", ""))
try:
resp = urllib2.urlopen(req)
data = json.loads(resp.read())
self.accessToken = data["access_token"]
self.refreshToken = data["refresh_token"]
self.tokenExpireTimestamp = datetime.datetime.now() + datetime.timedelta(seconds = data["expires_in"])
except urllib2.HTTPError, e:
print "HTTP Error:", e.code
print e.read()
return False
return True
def _getAccessToken(self):
params = urllib.urlencode(dict(username = self.username, password = self.password, grant_type = "password"))
url = "%s/auth/token?%s" % (self.endPoint, params)
return self._updateToken(url)
def _refreshAccessToken(self):
params = urllib.urlencode(dict(refresh_token = self.refreshToken, grant_type = "refresh_token"))
url = "%s/auth/token?%s" % (self.endPoint, params)
return self._updateToken(url)
def initToken(self):
if self.refreshToken is None:
return self._getAccessToken()
elif self.tokenExpireTimestamp < datetime.datetime.now():
return self._refreshAccessToken()
return True
def _getBasic(self, command):
if not self.initToken():
return {}
params = urllib.urlencode(dict(accessToken = self.accessToken))
url = "%s/%s?%s" % (self.endPoint, command, params)
try:
buffer = urllib2.urlopen(url).read()
return json.loads(buffer)
except urllib2.HTTPError, e:
print "HTTP Error:", e.code
print e.read()
return False
def getUsage(self):
return self._getBasic("user/usage")
def getInfo(self):
return self._getBasic("user/info")
def getPlan(self, planId = None):
if planId is None:
return self._getBasic("plan")
else:
return self._getBasic("plan/%s" % planId)
def getPlans(self):
return self._getBasic("plans")
def getService(self, serviceId = None):
if serviceId is None:
return self._getBasic("service")
else:
return self._getBasic("service/%s" % serviceId)
def getServices(self):
return self._getBasic("services")
def getContacts(self):
return self._getBasic("contacts")
def getFriends(self):
return self._getBasic("friends")
def printMyInfo(self):
usage = self.getUsage()
inMB = 1024 * 1024
endTime = datetime.datetime.fromtimestamp(usage["endTime"] / 1000)
delta = endTime - datetime.datetime.now()
print "Data used: %0.2f%% (%0.2f MB of %0.2f MB) Time until quota reset: %d days %d hours (%s)" % (usage["percentUsed"] * 100, usage["planLimitUsed"] / inMB, usage["totalLimit"] / inMB, delta.days, delta.seconds / 3600, endTime )
def run(username, password):
fp = FreedomPop(username, password)
fp.printMyInfo()
"""
Full list of methods:
fp.getUsage() # get the data usage, begin/end quota period, quota MB bonuses.
fp.getPlan() # get current plan
fp.getPlans() # list of available plans
fp.getService() # get current subscribed service
fp.getServices() # list of available services
fp.getInfo() # get the account's first/last name, last login time, email address
fp.getFriends() # list of people friended this account
fp.Contacts() # I'm not sure what this for
# there are some other API that can update/write to your account that I reluctant to expose it here...
"""
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print "Usage: python api.py <username> <password>"
sys.exit()
run(sys.argv[1], sys.argv[2])
| {
"content_hash": "05f7c153cfbc0135a3d2b6da386304ac",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 237,
"avg_line_length": 35.07874015748032,
"alnum_prop": 0.6074074074074074,
"repo_name": "dodysw/fpopclient",
"id": "802d20ab49f46c9ed134d7d7de6d4cd5eb7683af",
"size": "4455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from django.utils.encoding import force_str
from pyquery import PyQuery as pq
from olympia import amo
from olympia.addons.models import Addon, AddonReviewerFlags
from olympia.amo.tests import TestCase, addon_factory, file_factory, version_factory
from olympia.constants.reviewers import REVIEWER_DELAYED_REJECTION_PERIOD_DAYS_DEFAULT
from olympia.reviewers.forms import ReviewForm
from olympia.reviewers.models import AutoApprovalSummary, CannedResponse
from olympia.reviewers.utils import ReviewHelper
from olympia.users.models import UserProfile
from olympia.versions.models import Version
class TestReviewForm(TestCase):
fixtures = ('base/users', 'base/addon_3615')
def setUp(self):
super(TestReviewForm, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.version = self.addon.versions.all()[0]
class FakeRequest:
user = UserProfile.objects.get(pk=10482)
self.request = FakeRequest()
self.file = self.version.files.all()[0]
def get_form(self, data=None):
return ReviewForm(
data=data,
helper=ReviewHelper(
request=self.request, addon=self.addon, version=self.version
),
)
def set_statuses_and_get_actions(self, addon_status, file_status):
self.file.update(status=file_status)
self.addon.update(status=addon_status)
# Need to clear self.version.all_files cache since we updated the file.
del self.version.all_files
form = self.get_form()
return form.helper.get_actions(self.request)
def test_actions_reject(self):
self.grant_permission(self.request.user, 'Addons:Review')
actions = self.set_statuses_and_get_actions(
addon_status=amo.STATUS_NOMINATED, file_status=amo.STATUS_AWAITING_REVIEW
)
action = actions['reject']['details']
assert force_str(action).startswith('This will reject this version')
def test_actions_addon_status_null(self):
# If the add-on is null we only show reply, comment and super review.
self.grant_permission(self.request.user, 'Addons:Review')
actions = self.set_statuses_and_get_actions(
addon_status=amo.STATUS_NULL, file_status=amo.STATUS_NULL
)
assert list(actions.keys()) == ['reply', 'super', 'comment']
def test_actions_addon_status_deleted(self):
# If the add-on is deleted we only show reply, comment and
# super review.
self.grant_permission(self.request.user, 'Addons:Review')
actions = self.set_statuses_and_get_actions(
addon_status=amo.STATUS_DELETED, file_status=amo.STATUS_NULL
)
assert list(actions.keys()) == ['reply', 'super', 'comment']
def test_actions_no_pending_files(self):
# If the add-on has no pending files we only show
# reject_multiple_versions, reply, comment and super review.
self.grant_permission(self.request.user, 'Addons:Review')
actions = self.set_statuses_and_get_actions(
addon_status=amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED
)
assert list(actions.keys()) == [
'reject_multiple_versions',
'reply',
'super',
'comment',
]
# The add-on is already disabled so we don't show
# reject_multiple_versions, but reply/super/comment are still present.
actions = self.set_statuses_and_get_actions(
addon_status=amo.STATUS_DISABLED, file_status=amo.STATUS_DISABLED
)
assert list(actions.keys()) == ['reply', 'super', 'comment']
def test_actions_admin_flagged_addon_actions(self):
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True
)
# Test with an admin reviewer.
self.grant_permission(self.request.user, 'Reviews:Admin')
actions = self.set_statuses_and_get_actions(
addon_status=amo.STATUS_NOMINATED, file_status=amo.STATUS_AWAITING_REVIEW
)
assert 'public' in actions.keys()
# Test with an non-admin reviewer.
self.request.user.groupuser_set.all().delete()
self.grant_permission(self.request.user, 'Addons:Review')
actions = self.set_statuses_and_get_actions(
addon_status=amo.STATUS_NOMINATED, file_status=amo.STATUS_AWAITING_REVIEW
)
assert 'public' not in actions.keys()
def test_canned_responses(self):
self.cr_addon = CannedResponse.objects.create(
name='addon reason',
response='addon reason body',
sort_group='public',
type=amo.CANNED_RESPONSE_TYPE_ADDON,
)
self.cr_theme = CannedResponse.objects.create(
name='theme reason',
response='theme reason body',
sort_group='public',
type=amo.CANNED_RESPONSE_TYPE_THEME,
)
self.grant_permission(self.request.user, 'Addons:Review')
self.set_statuses_and_get_actions(
addon_status=amo.STATUS_NOMINATED, file_status=amo.STATUS_AWAITING_REVIEW
)
form = self.get_form()
choices = form.fields['canned_response'].choices[1][1]
# choices is grouped by the sort_group, where choices[0] is the
# default "Choose a response..." option.
# Within that, it's paired by [group, [[response, name],...]].
# So above, choices[1][1] gets the first real group's list of
# responses.
assert len(choices) == 1 # No theme response
assert self.cr_addon.response in choices[0]
# Check we get different canned responses for static themes.
self.grant_permission(self.request.user, 'Addons:ThemeReview')
self.addon.update(type=amo.ADDON_STATICTHEME)
form = self.get_form()
choices = form.fields['canned_response'].choices[1][1]
assert self.cr_theme.response in choices[0]
assert len(choices) == 1 # No addon response
def test_comments_and_action_required_by_default(self):
self.grant_permission(self.request.user, 'Addons:Review')
form = self.get_form()
assert not form.is_bound
form = self.get_form(data={})
assert form.is_bound
assert not form.is_valid()
assert form.errors == {
'action': ['This field is required.'],
'comments': ['This field is required.'],
}
# Alter the action to make it not require comments to be sent
# regardless of what the action actually is, what we want to test is
# the form behaviour.
form = self.get_form(data={'action': 'reply'})
form.helper.actions['reply']['comments'] = False
assert form.is_bound
assert form.is_valid()
assert not form.errors
def test_versions_queryset(self):
self.grant_permission(self.request.user, 'Addons:Review')
# Add a bunch of extra data that shouldn't be picked up.
addon_factory()
file_factory(version=self.addon.current_version)
version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
# auto-approve everything (including self.addon.current_version)
for version in Version.unfiltered.all():
AutoApprovalSummary.objects.create(
version=version, verdict=amo.AUTO_APPROVED
)
form = self.get_form()
assert not form.is_bound
assert form.fields['versions'].required is False
assert list(form.fields['versions'].queryset) == [self.addon.current_version]
def test_versions_queryset_contains_pending_files_for_listed(self):
self.grant_permission(self.request.user, 'Addons:Review')
addon_factory() # Extra add-on, shouldn't be included.
version_factory(
addon=self.addon,
channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
# auto-approve everything (including self.addon.current_version)
for version in Version.unfiltered.all():
AutoApprovalSummary.objects.create(
version=version, verdict=amo.AUTO_APPROVED
)
form = self.get_form()
assert not form.is_bound
assert form.fields['versions'].required is False
assert list(form.fields['versions'].queryset) == list(
self.addon.versions.all().order_by('pk')
)
assert form.fields['versions'].queryset.count() == 2
content = str(form['versions'])
doc = pq(content)
# <select> should have 'data-toggle' class and data-value attribute to
# show/hide it depending on action in JavaScript.
select = doc('select')[0]
select.attrib.get('class') == 'data-toggle'
assert select.attrib.get('data-value') == 'reject_multiple_versions|'
# <option>s shouldn't, because for listed review they will all be
# shown. They should still have a value attribute however.
options = doc('option')
assert len(options) == 2
for option in options:
assert option.attrib.get('class') is None
assert option.attrib.get('data-value') is None
assert option.attrib.get('value')
def test_versions_queryset_contains_pending_files_for_unlisted(self):
# We also return pending versions for unlisted, but hide some of the
# versions using JavaScript + some data attributes on each <option>.
# The queryset should contain both pending and approved versions.
addon_factory() # Extra add-on, shouldn't be included.
pending_version = version_factory(
addon=self.addon,
channel=amo.RELEASE_CHANNEL_UNLISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
# auto-approve everything
for version in Version.unfiltered.all():
AutoApprovalSummary.objects.create(
version=version, verdict=amo.AUTO_APPROVED
)
form = self.get_form()
assert not form.is_bound
assert form.fields['versions'].required is False
assert list(form.fields['versions'].queryset) == []
# With Addons:ReviewUnlisted permission, the reject_multiple_versions
# action will be available, resetting the queryset of allowed choices.
self.grant_permission(self.request.user, 'Addons:ReviewUnlisted')
form = self.get_form()
assert not form.is_bound
assert form.fields['versions'].required is False
assert list(form.fields['versions'].queryset) == list(
self.addon.versions.all().order_by('pk')
)
assert form.fields['versions'].queryset.count() == 2
content = str(form['versions'])
doc = pq(content)
# <select> should have 'data-toggle' class and data-value attribute to
# show/hide it depending on action in JavaScript.
select = doc('select')[0]
select.attrib.get('class') == 'data-toggle'
assert select.attrib.get('data-value') == (
'reject_multiple_versions|block_multiple_versions|'
'confirm_multiple_versions|'
)
# <option>s should as well, and the value depends on which version:
# the approved one and the pending one should have different values.
assert len(doc('option')) == 2
option1 = doc('option[value="%s"]' % self.version.pk)[0]
assert option1.attrib.get('class') == 'data-toggle'
assert option1.attrib.get('data-value') == (
# That version is approved.
'confirm_multiple_versions|block_multiple_versions|'
)
assert option1.attrib.get('value') == str(self.version.pk)
option2 = doc('option[value="%s"]' % pending_version.pk)[0]
assert option2.attrib.get('class') == 'data-toggle'
assert option2.attrib.get('data-value') == (
# That version is pending.
'reject_multiple_versions|'
)
assert option2.attrib.get('value') == str(pending_version.pk)
def test_versions_required(self):
# auto-approve everything (including self.addon.current_version)
for version in Version.unfiltered.all():
AutoApprovalSummary.objects.create(
version=version, verdict=amo.AUTO_APPROVED
)
self.grant_permission(self.request.user, 'Addons:Review')
form = self.get_form(
data={'action': 'reject_multiple_versions', 'comments': 'lol'}
)
form.helper.actions['reject_multiple_versions']['versions'] = True
assert form.is_bound
assert not form.is_valid()
assert form.errors == {'versions': ['This field is required.']}
def test_delayed_rejection_days_widget_attributes(self):
# Regular reviewers can't customize the delayed rejection period.
form = self.get_form()
widget = form.fields['delayed_rejection_days'].widget
assert widget.attrs == {
'min': REVIEWER_DELAYED_REJECTION_PERIOD_DAYS_DEFAULT,
'max': REVIEWER_DELAYED_REJECTION_PERIOD_DAYS_DEFAULT,
'readonly': 'readonly',
}
# Admin reviewers can customize the delayed rejection period.
self.grant_permission(self.request.user, 'Reviews:Admin')
form = self.get_form()
widget = form.fields['delayed_rejection_days'].widget
assert widget.attrs == {
'min': 1,
'max': 99,
}
| {
"content_hash": "fb36609cc4bb867817bea19fffca658c",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 86,
"avg_line_length": 43.55095541401274,
"alnum_prop": 0.629908592321755,
"repo_name": "bqbn/addons-server",
"id": "1c0a4bfdd04cd7bcedde9684efc651e0d3b4117f",
"size": "13675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/reviewers/tests/test_forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "810080"
},
{
"name": "Dockerfile",
"bytes": "2868"
},
{
"name": "HTML",
"bytes": "585550"
},
{
"name": "JavaScript",
"bytes": "1071952"
},
{
"name": "Makefile",
"bytes": "827"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5323934"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11171"
},
{
"name": "Smarty",
"bytes": "1503"
}
],
"symlink_target": ""
} |
from docutils import nodes, utils
import re
def tag_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
tag_class = 'default'
tag_text = utils.unescape(text)
tag_array = tag_text.split('::')
# @type tag_array a
if len(tag_array) == 2:
if tag_array[0]:
tag_class = tag_array[0]
tag_content = tag_array[1]
else:
tag_content = tag_array[0]
tag_tag = '<span class="tag tag-%s">%s</span>' % (tag_class, tag_content)
raw = nodes.raw('', tag_tag, format='html')
return [raw], []
def awesome_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
awesome_tag = '<i class="fa fa-%s"></i>' % utils.unescape(text)
raw = nodes.raw('', awesome_tag, format='html')
return [raw], []
def clear_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
clear_both_tag = '<div class="clear-%s"></div>' % utils.unescape(text)
raw = nodes.raw('', clear_both_tag, format='html')
return [raw], []
def setup(app):
app.add_role('tag', tag_role)
app.add_stylesheet('css/tag.css')
app.add_role('awesome', awesome_role)
app.add_stylesheet('http://maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css')
app.add_role('clear', clear_role)
app.add_stylesheet('css/clear.css') | {
"content_hash": "27066203ff0cdaae55ce372ed2b61e3e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 100,
"avg_line_length": 32.926829268292686,
"alnum_prop": 0.6014814814814815,
"repo_name": "Open-Wide/sphinx_rtd_theme",
"id": "4d67ff21c63b92b550f7eff3873cad8caececfb3",
"size": "1375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sphinx_rtd_theme/directives.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "236979"
},
{
"name": "HTML",
"bytes": "20137"
},
{
"name": "JavaScript",
"bytes": "15521"
},
{
"name": "Makefile",
"bytes": "5621"
},
{
"name": "Python",
"bytes": "29307"
},
{
"name": "Ruby",
"bytes": "429"
},
{
"name": "Shell",
"bytes": "121"
},
{
"name": "TeX",
"bytes": "31674"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='panlex_API',
version='1.2.0',
author="Maxwell Joslyn; Caroline Glazer; Gary Krug; Alex DelPriore; Ben Yang",
author_email="info@panlex.org",
py_modules=["panlex"],
url="https://github.com/longnow/panlex_python_API",
description='Python wrapper for PanLex API',
install_requires=['ratelimit','requests'],
classifiers=["Development Status :: 5 - Production/Stable", "Programming Language :: Python",
"Programming Language :: Python :: 3", "Operating System :: OS Independent",
"License :: OSI Approved :: MIT License", "Topic :: Software Development :: Libraries :: Python Modules"]
)
| {
"content_hash": "d2b20dec2c72c7c9013f8bedad6ee36d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 122,
"avg_line_length": 47.4,
"alnum_prop": 0.6610407876230661,
"repo_name": "longnow/panlex_python_API",
"id": "0823ed02496ba20ca8671668a9a4422178536dd3",
"size": "711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6513"
}
],
"symlink_target": ""
} |
import unittest
from streamlink.plugins.rtve import Rtve
class TestPluginRtve(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'http://www.rtve.es/directo/la-1',
'http://www.rtve.es/directo/la-2/',
'http://www.rtve.es/directo/teledeporte/',
'http://www.rtve.es/directo/canal-24h/',
]
for url in should_match:
self.assertTrue(Rtve.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://www.rtve.es',
]
for url in should_not_match:
self.assertFalse(Rtve.can_handle_url(url))
| {
"content_hash": "a4d5be4e9bffc720e4c013065eeee512",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 54,
"avg_line_length": 30.545454545454547,
"alnum_prop": 0.5803571428571429,
"repo_name": "wlerin/streamlink",
"id": "42b1c5e683d32dc7b95cebfb13cb16f4df8c4995",
"size": "672",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/plugins/test_rtve.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1538552"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
"""
Created on Sun Jun 28 16:53:49 2015
@author: warpinski.matthew
"""
import re
DATAFILE_PATTERN = '^(.+),"(.+)",(.*),(.*),(.*)'
def removeQuotes(s):
""" Remove quotation marks from an input string
Args:
s (str): input string that might have the quote "" characters
Returns:
str: a string without the quote characters
"""
return ''.join(i for i in s if i!='"')
def parseDatafileLine(datafileLine):
""" Parse a line of the data file using the specified regular expression pattern
Args:
datafileLine (str): input string that is a line from the data file
Returns:
str: a string parsed using the given regular expression and without the quote characters
"""
match = re.search(DATAFILE_PATTERN, datafileLine)
if match is None:
print 'Invalid datafile line: %s' % datafileLine
return (datafileLine, -1)
elif match.group(1) == '"id"':
print 'Header datafile line: %s' % datafileLine
return (datafileLine, 0)
else:
product = '%s %s %s' % (match.group(2), match.group(3), match.group(4))
return ((removeQuotes(match.group(1)), product), 1)
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab3')
GOOGLE_PATH = 'Google.csv'
GOOGLE_SMALL_PATH = 'Google_small.csv'
AMAZON_PATH = 'Amazon.csv'
AMAZON_SMALL_PATH = 'Amazon_small.csv'
GOLD_STANDARD_PATH = 'Amazon_Google_perfectMapping.csv'
STOPWORDS_PATH = 'stopwords.txt'
def parseData(filename):
""" Parse a data file
Args:
filename (str): input file name of the data file
Returns:
RDD: a RDD of parsed lines
"""
return (sc
.textFile(filename, 4, 0)
.map(parseDatafileLine)
.cache())
def loadData(path):
""" Load a data file
Args:
path (str): input file name of the data file
Returns:
RDD: a RDD of parsed valid lines
"""
filename = os.path.join(baseDir, inputPath, path)
raw = parseData(filename).cache()
failed = (raw
.filter(lambda s: s[1] == -1)
.map(lambda s: s[0]))
for line in failed.take(10):
print '%s - Invalid datafile line: %s' % (path, line)
valid = (raw
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
print '%s - Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (path,
raw.count(),
valid.count(),
failed.count())
assert failed.count() == 0
assert raw.count() == (valid.count() + 1)
return valid
googleSmall = loadData(GOOGLE_SMALL_PATH)
google = loadData(GOOGLE_PATH)
amazonSmall = loadData(AMAZON_SMALL_PATH)
amazon = loadData(AMAZON_PATH)
for line in googleSmall.take(3):
print 'google: %s: %s\n' % (line[0], line[1])
for line in amazonSmall.take(3):
print 'amazon: %s: %s\n' % (line[0], line[1])
# TODO: Replace <FILL IN> with appropriate code
quickbrownfox = 'A quick brown fox jumps over the lazy dog.'
split_regex = r'\W+'
def simpleTokenize(string):
""" A simple implementation of input string tokenization
Args:
string (str): input string
Returns:
list: a list of tokens
"""
return re.sub('[^a-zA-Z0-9\s_]+', ' ', string).lower().split()
print simpleTokenize(quickbrownfox) # Should give ['a', 'quick', 'brown', ... ]
# TEST Tokenize a String (1a)
Test.assertEquals(simpleTokenize(quickbrownfox),
['a','quick','brown','fox','jumps','over','the','lazy','dog'],
'simpleTokenize should handle sample text')
Test.assertEquals(simpleTokenize(' '), [], 'simpleTokenize should handle empty string')
Test.assertEquals(simpleTokenize('!!!!123A/456_B/789C.123A'), ['123a','456_b','789c','123a'],
'simpleTokenize should handle puntuations and lowercase result')
Test.assertEquals(simpleTokenize('fox fox'), ['fox', 'fox'],
'simpleTokenize should not remove duplicates')
# TODO: Replace <FILL IN> with appropriate code
stopfile = os.path.join(baseDir, inputPath, STOPWORDS_PATH)
stopwords = set(sc.textFile(stopfile).collect())
print 'These are the stopwords: %s' % stopwords
def tokenize(string):
""" An implementation of input string tokenization that excludes stopwords
Args:
string (str): input string
Returns:
list: a list of tokens without stopwords
"""
wordlist = re.sub('[^a-zA-Z0-9\s_]+', ' ', string).lower().split()
return [word for word in wordlist if word not in stopwords]
print tokenize(quickbrownfox) # Should give ['quick', 'brown', ... ]
# TEST Removing stopwords (1b)
Test.assertEquals(tokenize("Why a the?"), [], 'tokenize should remove all stopwords')
Test.assertEquals(tokenize("Being at the_?"), ['the_'], 'tokenize should handle non-stopwords')
Test.assertEquals(tokenize(quickbrownfox), ['quick','brown','fox','jumps','lazy','dog'],
'tokenize should handle sample text')
# TODO: Replace <FILL IN> with appropriate code
amazonRecToToken = amazonSmall.map(lambda x: (x[0], tokenize(x[1])))
googleRecToToken = googleSmall.map(lambda x: (x[0], tokenize(x[1])))
def countTokens(vendorRDD):
""" Count and return the number of tokens
Args:
vendorRDD (RDD of (recordId, tokenizedValue)): Pair tuple of record ID to tokenized output
Returns:
count: count of all tokens
"""
return (vendorRDD
.map(lambda (x,y): len(y))
.sum()
)
totalTokens = countTokens(amazonRecToToken) + countTokens(googleRecToToken)
print 'There are %s tokens in the combined datasets' % totalTokens
# TEST Tokenizing the small datasets (1c)
Test.assertEquals(totalTokens, 22520, 'incorrect totalTokens')
# TODO: Replace <FILL IN> with appropriate code
def findBiggestRecord(vendorRDD):
""" Find and return the record with the largest number of tokens
Args:
vendorRDD (RDD of (recordId, tokens)): input Pair Tuple of record ID and tokens
Returns:
list: a list of 1 Pair Tuple of record ID and tokens
"""
return (vendorRDD
#.map(lambda (x,y): (x,len(y)))
.sortBy(lambda x: len(x[1]),ascending = False)
.collect()
)
biggestRecordAmazon = findBiggestRecord(amazonRecToToken)
print 'The Amazon record with ID "%s" has the most tokens (%s)' % (biggestRecordAmazon[0][0],
len(biggestRecordAmazon[0][1]))
# TEST Amazon record with the most tokens (1d)
Test.assertEquals(biggestRecordAmazon[0][0], 'b000o24l3q', 'incorrect biggestRecordAmazon')
Test.assertEquals(len(biggestRecordAmazon[0][1]), 1547, 'incorrect len for biggestRecordAmazon')
# TODO: Replace <FILL IN> with appropriate code
def tf(tokens):
""" Compute TF
Args:
tokens (list of str): input list of tokens from tokenize
Returns:
dictionary: a dictionary of tokens to its TF values
"""
return dict( [ (i, float(tokens.count(i))/len(tokens)) for i in set(tokens) ] )
print tf(tokenize(quickbrownfox)) # Should give { 'quick': 0.1666 ... }
# TEST Implement a TF function (2a)
tf_test = tf(tokenize(quickbrownfox))
Test.assertEquals(tf_test, {'brown': 0.16666666666666666, 'lazy': 0.16666666666666666,
'jumps': 0.16666666666666666, 'fox': 0.16666666666666666,
'dog': 0.16666666666666666, 'quick': 0.16666666666666666},
'incorrect result for tf on sample text')
tf_test2 = tf(tokenize('one_ one_ two!'))
Test.assertEquals(tf_test2, {'one_': 0.6666666666666666, 'two': 0.3333333333333333},
'incorrect result for tf test')
# TODO: Replace <FILL IN> with appropriate code
corpusRDD = amazonRecToToken.union(googleRecToToken)
print(corpusRDD.count())
# TEST Create a corpus (2b)
Test.assertEquals(corpusRDD.count(), 400, 'incorrect corpusRDD.count()')
# TODO: Replace <FILL IN> with appropriate code
def idfs(corpus):
""" Compute IDF
Args:
corpus (RDD): input corpus
Returns:
RDD: a RDD of (token, IDF value)
"""
N = corpus.count()
uniqueTokens = corpus.flatMap(lambda (x,y): set(list(y)))
tokenCountPairTuple = uniqueTokens.map(lambda a: (a,1.0))
tokenSumPairTuple = tokenCountPairTuple.reduceByKey(lambda a,b : (a+b))
return (tokenSumPairTuple.map(lambda (a, b): (a, N/b)))
#return tokenSumPairTuple
idfsSmall = idfs(amazonRecToToken.union(googleRecToToken))
uniqueTokenCount = idfsSmall.count()
#print amazonRecToToken.union(googleRecToToken).take(1)
#print idfsSmall.sortBy(lambda x: x[1],ascending = False).collect()
print 'There are %s unique tokens in the small datasets.' % uniqueTokenCount
# TEST Implement an IDFs function (2c)
Test.assertEquals(uniqueTokenCount, 4772, 'incorrect uniqueTokenCount')
tokenSmallestIdf = idfsSmall.takeOrdered(1, lambda s: s[1])[0]
Test.assertEquals(tokenSmallestIdf[0], 'software', 'incorrect smallest IDF token')
Test.assertTrue(abs(tokenSmallestIdf[1] - 4.25531914894) < 0.0000000001,
'incorrect smallest IDF value')
smallIDFTokens = idfsSmall.takeOrdered(11, lambda s: s[1])
print smallIDFTokens
import matplotlib.pyplot as plt
small_idf_values = idfsSmall.map(lambda s: s[1]).collect()
fig = plt.figure(figsize=(8,3))
plt.hist(small_idf_values, 50, log=True)
pass
# TODO: Replace <FILL IN> with appropriate code
def tfidf(tokens, idfs):
""" Compute TF-IDF
Args:
tokens (list of str): input list of tokens from tokenize
idfs (dictionary): record to IDF value
Returns:
dictionary: a dictionary of records to TF-IDF values
"""
tfs = tf(tokens)
tfIdfDict = {}
for token in tokens:
tfIdfDict[token] = tfs[token] * idfs[token]
return tfIdfDict
recb000hkgj8k = amazonRecToToken.filter(lambda x: x[0] == 'b000hkgj8k').collect()[0][1]
idfsSmallWeights = idfsSmall.collectAsMap()
rec_b000hkgj8k_weights = tfidf(recb000hkgj8k, idfsSmallWeights)
print 'Amazon record "b000hkgj8k" has tokens and weights:\n%s' % rec_b000hkgj8k_weights
# TEST Implement a TF-IDF function (2f)
Test.assertEquals(rec_b000hkgj8k_weights,
{'autocad': 33.33333333333333, 'autodesk': 8.333333333333332,
'courseware': 66.66666666666666, 'psg': 33.33333333333333,
'2007': 3.5087719298245617, 'customizing': 16.666666666666664,
'interface': 3.0303030303030303}, 'incorrect rec_b000hkgj8k_weights')
# TODO: Replace <FILL IN> with appropriate code
import math
def dotprod(a, b):
""" Compute dot product
Args:
a (dictionary): first dictionary of record to value
b (dictionary): second dictionary of record to value
Returns:
dotProd: result of the dot product with the two input dictionaries
"""
# get commong dict keys:
common_keys = set(a).intersection(set(b))
dotproduct = 0
for key in common_keys:
prod = a[key] * b[key]
dotproduct += prod
return dotproduct
def norm(a):
""" Compute square root of the dot product
Args:
a (dictionary): a dictionary of record to value
Returns:
norm: a dictionary of tokens to its TF values
"""
dotproduct = 0
for key in a:
dotproduct += (a[key]**2)
return dotproduct**(0.5)
def cossim(a, b):
""" Compute cosine similarity
Args:
a (dictionary): first dictionary of record to value
b (dictionary): second dictionary of record to value
Returns:
cossim: dot product of two dictionaries divided by the norm of the first dictionary and
then by the norm of the second dictionary
"""
return dotprod(a,b)/norm(a)/norm(b)
testVec1 = {'foo': 2, 'bar': 3, 'baz': 5 }
testVec2 = {'foo': 1, 'bar': 0, 'baz': 20 }
dp = dotprod(testVec1, testVec2)
nm = norm(testVec1)
print dp, nm
# TEST Implement the components of a cosineSimilarity function (3a)
Test.assertEquals(dp, 102, 'incorrect dp')
Test.assertTrue(abs(nm - 6.16441400297) < 0.0000001, 'incorrrect nm')
# TODO: Replace <FILL IN> with appropriate code
def cosineSimilarity(string1, string2, idfsDictionary):
""" Compute cosine similarity between two strings
Args:
string1 (str): first string
string2 (str): second string
idfsDictionary (dictionary): a dictionary of IDF values
Returns:
cossim: cosine similarity value
"""
w1 = tfidf(tokenize(string1),idfsDictionary)
w2 = tfidf(tokenize(string2),idfsDictionary)
return cossim(w1, w2)
cossimAdobe = cosineSimilarity('Adobe Photoshop',
'Adobe Illustrator',
idfsSmallWeights)
print cossimAdobe
# TEST Implement a cosineSimilarity function (3b)
Test.assertTrue(abs(cossimAdobe - 0.0577243382163) < 0.0000001, 'incorrect cossimAdobe')
# TODO: Replace <FILL IN> with appropriate code
crossSmall = (googleSmall
.cartesian(amazonSmall)
.cache())
def computeSimilarity(record):
""" Compute similarity on a combination record
Args:
record: a pair, (google record, amazon record)
Returns:
pair: a pair, (google URL, amazon ID, cosine similarity value)
"""
googleRec = record[0]
amazonRec = record[1]
googleURL = googleRec[0]
amazonID = amazonRec[0]
googleValue = googleRec[1]
amazonValue = amazonRec[1]
cs = cosineSimilarity(googleValue, amazonValue, idfsSmallWeights)
return (googleURL, amazonID, cs)
similarities = (crossSmall
.map(lambda x: computeSimilarity(x))
.cache())
def similar(amazonID, googleURL):
""" Return similarity value
Args:
amazonID: amazon ID
googleURL: google URL
Returns:
similar: cosine similarity value
"""
return (similarities
.filter(lambda record: (record[0] == googleURL and record[1] == amazonID))
.collect()[0][2])
similarityAmazonGoogle = similar('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561')
print 'Requested similarity is %s.' % similarityAmazonGoogle
# TEST Perform Entity Resolution (3c)
Test.assertTrue(abs(similarityAmazonGoogle - 0.000303171940451) < 0.0000001,
'incorrect similarityAmazonGoogle')
# TODO: Replace <FILL IN> with appropriate code
def computeSimilarityBroadcast(record):
""" Compute similarity on a combination record, using Broadcast variable
Args:
record: a pair, (google record, amazon record)
Returns:
pair: a pair, (google URL, amazon ID, cosine similarity value)
"""
googleRec = record[0]
amazonRec = record[1]
googleURL = googleRec[0]
amazonID = amazonRec[0]
googleValue = googleRec[1]
amazonValue = amazonRec[1]
cs = cosineSimilarity(googleValue, amazonValue, idfsSmallBroadcast.value)
return (googleURL, amazonID, cs)
idfsSmallBroadcast = sc.broadcast(idfsSmallWeights)
similaritiesBroadcast = (crossSmall
.map(lambda x: computeSimilarityBroadcast(x))
.cache())
def similarBroadcast(amazonID, googleURL):
""" Return similarity value, computed using Broadcast variable
Args:
amazonID: amazon ID
googleURL: google URL
Returns:
similar: cosine similarity value
"""
return (similaritiesBroadcast
.filter(lambda record: (record[0] == googleURL and record[1] == amazonID))
.collect()[0][2])
similarityAmazonGoogleBroadcast = similarBroadcast('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561')
print 'Requested similarity is %s.' % similarityAmazonGoogleBroadcast
# TEST Perform Entity Resolution with Broadcast Variables (3d)
from pyspark import Broadcast
Test.assertTrue(isinstance(idfsSmallBroadcast, Broadcast), 'incorrect idfsSmallBroadcast')
Test.assertEquals(len(idfsSmallBroadcast.value), 4772, 'incorrect idfsSmallBroadcast value')
Test.assertTrue(abs(similarityAmazonGoogleBroadcast - 0.000303171940451) < 0.0000001,
'incorrect similarityAmazonGoogle')
GOLDFILE_PATTERN = '^(.+),(.+)'
# Parse each line of a data file useing the specified regular expression pattern
def parse_goldfile_line(goldfile_line):
""" Parse a line from the 'golden standard' data file
Args:
goldfile_line: a line of data
Returns:
pair: ((key, 'gold', 1 if successful or else 0))
"""
match = re.search(GOLDFILE_PATTERN, goldfile_line)
if match is None:
print 'Invalid goldfile line: %s' % goldfile_line
return (goldfile_line, -1)
elif match.group(1) == '"idAmazon"':
print 'Header datafile line: %s' % goldfile_line
return (goldfile_line, 0)
else:
key = '%s %s' % (removeQuotes(match.group(1)), removeQuotes(match.group(2)))
return ((key, 'gold'), 1)
goldfile = os.path.join(baseDir, inputPath, GOLD_STANDARD_PATH)
gsRaw = (sc
.textFile(goldfile)
.map(parse_goldfile_line)
.cache())
gsFailed = (gsRaw
.filter(lambda s: s[1] == -1)
.map(lambda s: s[0]))
for line in gsFailed.take(10):
print 'Invalid goldfile line: %s' % line
goldStandard = (gsRaw
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
print 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (gsRaw.count(),
goldStandard.count(),
gsFailed.count())
assert (gsFailed.count() == 0)
assert (gsRaw.count() == (goldStandard.count() + 1))
# TODO: Replace <FILL IN> with appropriate code
sims = similaritiesBroadcast.map(lambda (x,y,z): (("%s %s" % (y,x)),z))
trueDupsRDD = (sims
.join(goldStandard))
trueDupsCount = trueDupsRDD.count()
avgSimDups = trueDupsRDD.map(lambda (x,(y,z)): y).mean()
nonDupsRDD = (sims
.leftOuterJoin(goldStandard)
.filter(lambda x: x[1][1] == None))
avgSimNon = nonDupsRDD.map(lambda (x,(y,z)): y).mean()
print 'There are %s true duplicates.' % trueDupsCount
print 'The average similarity of true duplicates is %s.' % avgSimDups
print 'And for non duplicates, it is %s.' % avgSimNon
# TEST Perform a Gold Standard evaluation (3e)
Test.assertEquals(trueDupsCount, 146, 'incorrect trueDupsCount')
Test.assertTrue(abs(avgSimDups - 0.264332573435) < 0.0000001, 'incorrect avgSimDups')
Test.assertTrue(abs(avgSimNon - 0.00123476304656) < 0.0000001, 'incorrect avgSimNon')
# TODO: Replace <FILL IN> with appropriate code
amazonFullRecToToken = amazon.map(lambda x: (x[0], tokenize(x[1])))
googleFullRecToToken = google.map(lambda x: (x[0], tokenize(x[1])))
print 'Amazon full dataset is %s products, Google full dataset is %s products' % (amazonFullRecToToken.count(),
googleFullRecToToken.count())
# TEST Tokenize the full dataset (4a)
Test.assertEquals(amazonFullRecToToken.count(), 1363, 'incorrect amazonFullRecToToken.count()')
Test.assertEquals(googleFullRecToToken.count(), 3226, 'incorrect googleFullRecToToken.count()')
# TODO: Replace <FILL IN> with appropriate code
fullCorpusRDD = amazonFullRecToToken.union(googleFullRecToToken)
#print fullCorpusRDD.count()
#print testRDD.count()
idfsFull = idfs(fullCorpusRDD)
idfsFullCount = idfsFull.count()
print 'There are %s unique tokens in the full datasets.' % idfsFullCount
# Recompute IDFs for full dataset
idfsFullWeights = idfs(fullCorpusRDD)
idfsFullBroadcast = idfsFullWeights.collectAsMap()
#print idfsFullWeights.first()
#print amazonFullRecToToken.first()
#print googleFullRecToToken.first()
# Pre-compute TF-IDF weights. Build mappings from record ID weight vector.
amazonWeightsRDD = amazonFullRecToToken.map(lambda (x,y): (x,tfidf(y,idfsFullBroadcast)))
googleWeightsRDD = googleFullRecToToken.map(lambda (x,y): (x,tfidf(y,idfsFullBroadcast)))
print 'There are %s Amazon weights and %s Google weights.' % (amazonWeightsRDD.count(),
googleWeightsRDD.count())
# TEST Compute IDFs and TF-IDFs for the full datasets (4b)
Test.assertEquals(idfsFullCount, 17078, 'incorrect idfsFullCount')
Test.assertEquals(amazonWeightsRDD.count(), 1363, 'incorrect amazonWeightsRDD.count()')
Test.assertEquals(googleWeightsRDD.count(), 3226, 'incorrect googleWeightsRDD.count()')
# delete below this line:
#amazonFullRecToToken.first()
# TODO: Replace <FILL IN> with appropriate code
amazonNorms = amazonWeightsRDD.map(lambda (x,y): (x,norm(y))).collectAsMap()
amazonNormsBroadcast = sc.broadcast(amazonNorms)
googleNorms = googleWeightsRDD.map(lambda (x,y): (x,norm(y))).collectAsMap()
googleNormsBroadcast = sc.broadcast(googleNorms)
# TEST Compute Norms for the weights from the full datasets (4c)
Test.assertTrue(isinstance(amazonNormsBroadcast, Broadcast), 'incorrect amazonNormsBroadcast')
Test.assertEquals(len(amazonNormsBroadcast.value), 1363, 'incorrect amazonNormsBroadcast.value')
Test.assertTrue(isinstance(googleNormsBroadcast, Broadcast), 'incorrect googleNormsBroadcast')
Test.assertEquals(len(googleNormsBroadcast.value), 3226, 'incorrect googleNormsBroadcast.value')
# TODO: Replace <FILL IN> with appropriate code
def invert(record):
""" Invert (ID, tokens) to a list of (token, ID)
Args:
record: a pair, (ID, token vector)
Returns:
pairs: a list of pairs of token to ID
"""
recordList = []
for key in record[1].keys():
recordList.append((key,record[0]))
return recordList
amazonInvPairsRDD = (amazonWeightsRDD
.flatMap(invert)
.cache())
googleInvPairsRDD = (googleWeightsRDD
.flatMap(invert)
.cache())
print 'There are %s Amazon inverted pairs and %s Google inverted pairs.' % (amazonInvPairsRDD.count(),
googleInvPairsRDD.count())
# TEST Create inverted indicies from the full datasets (4d)
invertedPair = invert((1, {'foo': 2}))
#print invertedPair[0]
Test.assertEquals(invertedPair[0][1], 1, 'incorrect invert result')
Test.assertEquals(amazonInvPairsRDD.count(), 111387, 'incorrect amazonInvPairsRDD.count()')
Test.assertEquals(googleInvPairsRDD.count(), 77678, 'incorrect googleInvPairsRDD.count()')
# deletet below this line:
#print type(amazonInvPairsRDD.take(1))
# TODO: Replace <FILL IN> with appropriate code
def swap(record):
""" Swap (token, (ID, URL)) to ((ID, URL), token)
Args:
record: a pair, (token, (ID, URL))
Returns:
pair: ((ID, URL), token)
"""
token = record[0]
keys = record[1]
return (keys,token)
commonTokens = (amazonInvPairsRDD
.join(googleInvPairsRDD)
.map(swap)
.groupByKey()
.map(lambda (x,y): (x,list(y)))
.cache())
print 'Found %d common tokens' % commonTokens.count()
# TEST Identify common tokens from the full dataset (4e)
Test.assertEquals(commonTokens.count(), 2441100, 'incorrect commonTokens.count()')
# TODO: Replace <FILL IN> with appropriate code
amazonWeightsBroadcast = sc.broadcast(amazonWeightsRDD.collectAsMap())
googleWeightsBroadcast = sc.broadcast(googleWeightsRDD.collectAsMap())
def fastCosineSimilarity(record):
""" Compute Cosine Similarity using Broadcast variables
Args:
record: ((ID, URL), token)
Returns:
pair: ((ID, URL), cosine similarity value)
"""
amazonRec = record[0][0]
googleRec = record[0][1]
tokens = record[1]
cs = 0
for token in tokens:
cs += (amazonWeightsBroadcast.value[amazonRec][token] * googleWeightsBroadcast.value[googleRec][token])
# This is correct:
value = cs / (googleNormsBroadcast.value[googleRec] * amazonNormsBroadcast.value[amazonRec])
key = (amazonRec, googleRec)
return (key, value)
#print amazonWeightsRDD.collectAsMap()
#fastCosineSimilarity(commonTokens.first())
#print amazonWeightsRDD.first()
#print commonTokens.first()[1].first()
similaritiesFullRDD = (commonTokens
.map(fastCosineSimilarity)
.cache())
#type(similaritiesFullRDD)
#similaritiesFullRDD.first()
print similaritiesFullRDD.count()
# TEST Identify common tokens from the full dataset (4f)
similarityTest = similaritiesFullRDD.filter(lambda ((aID, gURL), cs): aID == 'b00005lzly' and gURL == 'http://www.google.com/base/feeds/snippets/13823221823254120257').collect()
Test.assertEquals(len(similarityTest), 1, 'incorrect len(similarityTest)')
Test.assertTrue(abs(similarityTest[0][1] - 4.286548414e-06) < 0.000000000001, 'incorrect similarityTest fastCosineSimilarity')
Test.assertEquals(similaritiesFullRDD.count(), 2441100, 'incorrect similaritiesFullRDD.count()')
#
#print abs(similarityTest[0][1] - 4.286548414e-06)
# Create an RDD of ((Amazon ID, Google URL), similarity score)
simsFullRDD = similaritiesFullRDD.map(lambda x: ("%s %s" % (x[0][0], x[0][1]), x[1]))
assert (simsFullRDD.count() == 2441100)
# Create an RDD of just the similarity scores
simsFullValuesRDD = (simsFullRDD
.map(lambda x: x[1])
.cache())
assert (simsFullValuesRDD.count() == 2441100)
# Look up all similarity scores for true duplicates
# This helper function will return the similarity score for records that are in the gold standard and the simsFullRDD (True positives), and will return 0 for records that are in the gold standard but not in simsFullRDD (False Negatives).
def gs_value(record):
if (record[1][1] is None):
return 0
else:
return record[1][1]
# Join the gold standard and simsFullRDD, and then extract the similarities scores using the helper function
trueDupSimsRDD = (goldStandard
.leftOuterJoin(simsFullRDD)
.map(gs_value)
.cache())
print 'There are %s true duplicates.' % trueDupSimsRDD.count()
assert(trueDupSimsRDD.count() == 1300)
from pyspark.accumulators import AccumulatorParam
class VectorAccumulatorParam(AccumulatorParam):
# Initialize the VectorAccumulator to 0
def zero(self, value):
return [0] * len(value)
# Add two VectorAccumulator variables
def addInPlace(self, val1, val2):
for i in xrange(len(val1)):
val1[i] += val2[i]
return val1
# Return a list with entry x set to value and all other entries set to 0
def set_bit(x, value, length):
bits = []
for y in xrange(length):
if (x == y):
bits.append(value)
else:
bits.append(0)
return bits
# Pre-bin counts of false positives for different threshold ranges
BINS = 101
nthresholds = 100
def bin(similarity):
return int(similarity * nthresholds)
# fpCounts[i] = number of entries (possible false positives) where bin(similarity) == i
zeros = [0] * BINS
fpCounts = sc.accumulator(zeros, VectorAccumulatorParam())
def add_element(score):
global fpCounts
b = bin(score)
fpCounts += set_bit(b, 1, BINS)
simsFullValuesRDD.foreach(add_element)
# Remove true positives from FP counts
def sub_element(score):
global fpCounts
b = bin(score)
fpCounts += set_bit(b, -1, BINS)
trueDupSimsRDD.foreach(sub_element)
def falsepos(threshold):
fpList = fpCounts.value
return sum([fpList[b] for b in range(0, BINS) if float(b) / nthresholds >= threshold])
def falseneg(threshold):
return trueDupSimsRDD.filter(lambda x: x < threshold).count()
def truepos(threshold):
return trueDupSimsRDD.count() - falsenegDict[threshold]
# Precision = true-positives / (true-positives + false-positives)
# Recall = true-positives / (true-positives + false-negatives)
# F-measure = 2 x Recall x Precision / (Recall + Precision)
def precision(threshold):
tp = trueposDict[threshold]
return float(tp) / (tp + falseposDict[threshold])
def recall(threshold):
tp = trueposDict[threshold]
return float(tp) / (tp + falsenegDict[threshold])
def fmeasure(threshold):
r = recall(threshold)
p = precision(threshold)
return 2 * r * p / (r + p)
thresholds = [float(n) / nthresholds for n in range(0, nthresholds)]
falseposDict = dict([(t, falsepos(t)) for t in thresholds])
falsenegDict = dict([(t, falseneg(t)) for t in thresholds])
trueposDict = dict([(t, truepos(t)) for t in thresholds])
precisions = [precision(t) for t in thresholds]
recalls = [recall(t) for t in thresholds]
fmeasures = [fmeasure(t) for t in thresholds]
print precisions[0], fmeasures[0]
assert (abs(precisions[0] - 0.000532546802671) < 0.0000001)
assert (abs(fmeasures[0] - 0.00106452669505) < 0.0000001)
fig = plt.figure()
plt.plot(thresholds, precisions)
plt.plot(thresholds, recalls)
plt.plot(thresholds, fmeasures)
plt.legend(['Precision', 'Recall', 'F-measure'])
pass
| {
"content_hash": "d2f40d1b3e049882edd7324d96119411",
"timestamp": "",
"source": "github",
"line_count": 798,
"max_line_length": 237,
"avg_line_length": 36.788220551378444,
"alnum_prop": 0.6596723098409238,
"repo_name": "aleph-w/ApacheSparkLearning",
"id": "25a040ae191b73940a06705b996a1c8ef2060e88",
"size": "29381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AmazonGoogle_WordAnalysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "220526"
}
],
"symlink_target": ""
} |
import itertools
from copy import deepcopy
import numpy as np
import pandas as pd
from scipy.spatial.distance import squareform
from skbio._base import SkbioObject
from skbio.stats._misc import _pprint_strs
from skbio.util import find_duplicates
from skbio.util._decorator import experimental, classonlymethod
from skbio.util._misc import resolve_key
from ._utils import is_symmetric_and_hollow
from ._utils import distmat_reorder, distmat_reorder_condensed
class DissimilarityMatrixError(Exception):
"""General error for dissimilarity matrix validation failures."""
pass
class DistanceMatrixError(DissimilarityMatrixError):
"""General error for distance matrix validation failures."""
pass
class MissingIDError(DissimilarityMatrixError):
"""Error for ID lookup that doesn't exist in the dissimilarity matrix."""
def __init__(self, missing_id):
super(MissingIDError, self).__init__()
self.args = ("The ID '%s' is not in the dissimilarity matrix." %
missing_id,)
class DissimilarityMatrix(SkbioObject):
"""Store dissimilarities between objects.
A `DissimilarityMatrix` instance stores a square, hollow, two-dimensional
matrix of dissimilarities between objects. Objects could be, for example,
samples or DNA sequences. A sequence of IDs accompanies the
dissimilarities.
Methods are provided to load and save dissimilarity matrices from/to disk,
as well as perform common operations such as extracting dissimilarities
based on object ID.
Parameters
----------
data : array_like or DissimilarityMatrix
Square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities
(floats), or a structure that can be converted to a ``numpy.ndarray``
using ``numpy.asarray`` or a one-dimensional vector of dissimilarities
(floats), as defined by `scipy.spatial.distance.squareform`. Can
instead be a `DissimilarityMatrix` (or subclass) instance,
in which case the instance's data will be used.
Data will be converted to a float ``dtype`` if necessary. A copy will
*not* be made if already a ``numpy.ndarray`` with a float ``dtype``.
ids : sequence of str, optional
Sequence of strings to be used as object IDs. Must match the number of
rows/cols in `data`. If ``None`` (the default), IDs will be
monotonically-increasing integers cast as strings, with numbering
starting from zero, e.g., ``('0', '1', '2', '3', ...)``.
validate : bool, optional
If `validate` is ``True`` (the default) and data is not a
DissimilarityMatrix object, the input data will be validated.
See Also
--------
DistanceMatrix
scipy.spatial.distance.squareform
Notes
-----
The dissimilarities are stored in redundant (square-form) format [1]_.
The data are not checked for symmetry, nor guaranteed/assumed to be
symmetric.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
default_write_format = 'lsmat'
# Used in __str__
_matrix_element_name = 'dissimilarity'
@experimental(as_of="0.4.0")
def __init__(self, data, ids=None, validate=True):
validate_full = validate
validate_shape = False
validate_ids = False
if isinstance(data, DissimilarityMatrix):
if isinstance(data, self.__class__):
# Never validate when copying from an object
# of the same type
# We should be able to assume it is already
# in a good state.
validate_full = False
validate_shape = False
# but do validate ids, if redefining them
validate_ids = False if ids is None else True
ids = data.ids if ids is None else ids
data = data.data
# It is necessary to standardize the representation of the .data
# attribute of this object. The input types might be list, tuple,
# np.array, or possibly some other object type. Generally, this
# normalization of type will require a copy of data. For example,
# moving from a Python type representation (e.g., [[0, 1], [1, 0]])
# requires casting all of the values to numpy types, which is handled
# as an implicit copy via np.asarray. However, these copies are
# unnecessary if the data object is already a numpy array. np.asarray
# is smart enough to not copy the data, however if a dtype change is
# requested it will. The following block of code limits the use of
# np.asarray to situations where the data are (a) not already a numpy
# array or (b) the data are not a single or double precision numpy
# data type.
_issue_copy = True
if isinstance(data, np.ndarray):
if data.dtype in (np.float32, np.float64):
_issue_copy = False
if _issue_copy:
data = np.asarray(data, dtype='float')
if data.ndim == 1:
# We can assume squareform will return a symmetric square matrix
# so no need for full validation.
# Still do basic checks (e.g. zero length)
# and id validation
data = squareform(data, force='tomatrix', checks=False)
validate_full = False
validate_shape = True
validate_ids = True
if ids is None:
ids = (str(i) for i in range(data.shape[0]))
# I just created the ids, so no need to re-validate them
validate_ids = False
ids = tuple(ids)
if validate_full:
self._validate(data, ids)
else:
if validate_shape:
self._validate_shape(data)
if validate_ids:
self._validate_ids(data, ids)
self._data = data
self._ids = ids
self._id_index = self._index_list(self._ids)
@classonlymethod
@experimental(as_of="0.5.1")
def from_iterable(cls, iterable, metric, key=None, keys=None):
"""Create DissimilarityMatrix from an iterable given a metric.
Parameters
----------
iterable : iterable
Iterable containing objects to compute pairwise dissimilarities on.
metric : callable
A function that takes two arguments and returns a float
representing the dissimilarity between the two arguments.
key : callable or metadata key, optional
A function that takes one argument and returns a string
representing the id of the element in the dissimilarity matrix.
Alternatively, a key to a `metadata` property if it exists for
each element in the `iterable`. If None, then default ids will be
used.
keys : iterable, optional
An iterable of the same length as `iterable`. Each element will be
used as the respective key.
Returns
-------
DissimilarityMatrix
The `metric` applied to all pairwise elements in the `iterable`.
Raises
------
ValueError
If `key` and `keys` are both provided.
"""
iterable = list(iterable)
if key is not None and keys is not None:
raise ValueError("Cannot use both `key` and `keys` at the same"
" time.")
keys_ = None
if key is not None:
keys_ = [resolve_key(e, key) for e in iterable]
elif keys is not None:
keys_ = keys
dm = np.empty((len(iterable),) * 2)
for i, a in enumerate(iterable):
for j, b in enumerate(iterable):
dm[i, j] = metric(a, b)
return cls(dm, keys_)
@property
@experimental(as_of="0.4.0")
def data(self):
"""Array of dissimilarities.
A square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities
(floats). A copy is *not* returned.
Notes
-----
This property is not writeable.
"""
return self._data
@property
@experimental(as_of="0.4.0")
def ids(self):
"""Tuple of object IDs.
A tuple of strings, one for each object in the dissimilarity matrix.
Notes
-----
This property is writeable, but the number of new IDs must match the
number of objects in `data`.
"""
return self._ids
@ids.setter
def ids(self, ids_):
ids_ = tuple(ids_)
self._validate_ids(self.data, ids_)
self._ids = ids_
self._id_index = self._index_list(self._ids)
@property
@experimental(as_of="0.4.0")
def dtype(self):
"""Data type of the dissimilarities."""
return self.data.dtype
@property
@experimental(as_of="0.4.0")
def shape(self):
"""Two-element tuple containing the dissimilarity matrix dimensions.
Notes
-----
As the dissimilarity matrix is guaranteed to be square, both tuple
entries will always be equal.
"""
return self.data.shape
@property
@experimental(as_of="0.4.0")
def size(self):
"""Total number of elements in the dissimilarity matrix.
Notes
-----
Equivalent to ``self.shape[0] * self.shape[1]``.
"""
return self.data.size
@property
@experimental(as_of="0.4.0")
def T(self):
"""Transpose of the dissimilarity matrix.
See Also
--------
transpose
"""
return self.transpose()
@experimental(as_of="0.4.0")
def transpose(self):
"""Return the transpose of the dissimilarity matrix.
Notes
-----
A deep copy is returned.
Returns
-------
DissimilarityMatrix
Transpose of the dissimilarity matrix. Will be the same type as
`self`.
"""
# Note: Skip validation, since we assume self was already validated
return self.__class__(self.data.T.copy(),
deepcopy(self.ids),
validate=False)
@experimental(as_of="0.4.0")
def index(self, lookup_id):
"""Return the index of the specified ID.
Parameters
----------
lookup_id : str
ID whose index will be returned.
Returns
-------
int
Row/column index of `lookup_id`.
Raises
------
MissingIDError
If `lookup_id` is not in the dissimilarity matrix.
"""
if lookup_id in self:
return self._id_index[lookup_id]
else:
raise MissingIDError(lookup_id)
@experimental(as_of="0.4.0")
def redundant_form(self):
"""Return an array of dissimilarities in redundant format.
As this is the native format that the dissimilarities are stored in,
this is simply an alias for `data`.
Returns
-------
ndarray
Two-dimensional ``numpy.ndarray`` of dissimilarities in redundant
format.
Notes
-----
Redundant format is described in [1]_.
Does *not* return a copy of the data.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
return self.data
@experimental(as_of="0.4.0")
def copy(self):
"""Return a deep copy of the dissimilarity matrix.
Returns
-------
DissimilarityMatrix
Deep copy of the dissimilarity matrix. Will be the same type as
`self`.
"""
# We deepcopy IDs in case the tuple contains mutable objects at some
# point in the future.
# Note: Skip validation, since we assume self was already validated
return self.__class__(self.data.copy(),
deepcopy(self.ids),
validate=False)
@experimental(as_of="0.4.0")
def filter(self, ids, strict=True):
"""Filter the dissimilarity matrix by IDs.
Parameters
----------
ids : iterable of str
IDs to retain. May not contain duplicates or be empty. Each ID must
be present in the dissimilarity matrix.
strict : bool, optional
If `strict` is ``True`` and an ID that is not found in the distance
matrix is found in `ids`, a ``MissingIDError`` exception will be
raised, otherwise the ID will be ignored.
Returns
-------
DissimilarityMatrix
Filtered dissimilarity matrix containing only the IDs specified in
`ids`. IDs will be in the same order as they appear in `ids`.
Raises
------
MissingIDError
If an ID in `ids` is not in the object's list of IDs.
"""
if tuple(self._ids) == tuple(ids):
return self.__class__(self._data, self._ids)
if strict:
idxs = [self.index(id_) for id_ in ids]
else:
# get the indices to slice the inner numpy array
idxs = []
# save the IDs that were found in the distance matrix
found_ids = []
for id_ in ids:
try:
idxs.append(self.index(id_))
found_ids.append(id_)
except MissingIDError:
pass
ids = found_ids
# Note: Skip validation, since we assume self was already validated
# But ids are new, so validate them explicitly
filtered_data = distmat_reorder(self._data, idxs)
self._validate_ids(filtered_data, ids)
return self.__class__(filtered_data, ids, validate=False)
def _stable_order(self, ids):
"""Obtain a stable ID order with respect to self
Parameters
----------
ids : Iterable of ids
The IDs to establish a stable ordering for.
Returns
-------
np.array, dtype=int
The corresponding index values
"""
id_order = sorted(self._id_index[i] for i in ids)
return np.array(id_order, dtype=int)
@experimental(as_of="0.5.5")
def within(self, ids):
"""Obtain all the distances among the set of IDs
Parameters
----------
ids : Iterable of str
The IDs to obtain distances for. All pairs of distances are
returned such that, if provided ['a', 'b', 'c'], the distances
for [('a', 'a'), ('a', 'b'), ('a', 'c'), ('b', 'a'), ('b', 'b'),
('b', 'c'), ('c', 'a'), ('c', 'b'), ('c', 'c')] are gathered.
Returns
-------
pd.DataFrame
(i, j, value) representing the source ID ("i"), the target ID ("j")
and the distance ("value").
Raises
------
MissingIDError
If an ID(s) specified is not in the dissimilarity matrix.
Notes
-----
Order of the return items is stable, meaning that requesting IDs
['a', 'b'] is equivalent to ['b', 'a']. The order is with respect
to the order of the .ids attribute of self.
Example
-------
>>> from skbio.stats.distance import DissimilarityMatrix
>>> dm = DissimilarityMatrix([[0, 1, 2, 3, 4], [1, 0, 1, 2, 3],
... [2, 1, 0, 1, 2], [3, 2, 1, 0, 1],
... [4, 3, 2, 1, 0]],
... ['A', 'B', 'C', 'D', 'E'])
>>> dm.within(['A', 'B', 'C'])
i j value
0 A A 0.0
1 A B 1.0
2 A C 2.0
3 B A 1.0
4 B B 0.0
5 B C 1.0
6 C A 2.0
7 C B 1.0
8 C C 0.0
"""
ids = set(ids)
not_present = ids - set(self._id_index)
if not_present:
raise MissingIDError("At least one ID (e.g., '%s') was not "
"found." % not_present.pop())
return self._subset_to_dataframe(ids, ids)
@experimental(as_of="0.5.5")
def between(self, from_, to_, allow_overlap=False):
"""Obtain the distances between the two groups of IDs
Parameters
----------
from_ : Iterable of str
The IDs to obtain distances from. Distances from all pairs of IDs
in from and to will be obtained.
to_ : Iterable of str
The IDs to obtain distances to. Distances from all pairs of IDs
in to and from will be obtained.
allow_overlap : bool, optional
If True, allow overlap in the IDs of from and to (which would in
effect be collecting the within distances). Default is False.
Returns
-------
pd.DataFrame
(i, j, value) representing the source ID ("i"), the target ID ("j")
and the distance ("value").
Raises
------
MissingIDError
If an ID(s) specified is not in the dissimilarity matrix.
Notes
-----
Order of the return items is stable, meaning that requesting IDs
['a', 'b'] is equivalent to ['b', 'a']. The order is with respect to
the .ids attribute of self.
Example
-------
>>> from skbio.stats.distance import DissimilarityMatrix
>>> dm = DissimilarityMatrix([[0, 1, 2, 3, 4], [1, 0, 1, 2, 3],
... [2, 1, 0, 1, 2], [3, 2, 1, 0, 1],
... [4, 3, 2, 1, 0]],
... ['A', 'B', 'C', 'D', 'E'])
>>> dm.between(['A', 'B'], ['C', 'D', 'E'])
i j value
0 A C 2.0
1 A D 3.0
2 A E 4.0
3 B C 1.0
4 B D 2.0
5 B E 3.0
"""
from_ = set(from_)
to_ = set(to_)
all_ids = from_ | to_
not_present = all_ids - set(self._id_index)
if not_present:
raise MissingIDError("At least one ID (e.g., '%s') was not "
"found." % not_present.pop())
overlapping = from_ & to_
if not allow_overlap and overlapping:
raise KeyError("At least one ID overlaps in from_ and to_ "
"(e.g., '%s'). This constraint can removed with "
"allow_overlap=True." % overlapping.pop())
return self._subset_to_dataframe(from_, to_)
def _subset_to_dataframe(self, i_ids, j_ids):
"""Extract a subset of self and express as a DataFrame
Parameters
----------
i_order : Iterable of str
The "from" IDs.
j_order : Iterable of str
The "to" IDs.
Notes
-----
ID membership is not tested by this private method, and it is assumed
the caller has asserted the IDs are present.
Returns
-------
pd.DataFrame
(i, j, value) representing the source ID ("i"), the target ID ("j")
and the distance ("value").
"""
i_indices = self._stable_order(i_ids)
j_indices = self._stable_order(j_ids)
j_length = len(j_indices)
j_labels = tuple([self.ids[j] for j in j_indices])
i = []
j = []
# np.hstack([]) throws a ValueError. However, np.hstack([np.array([])])
# is valid and returns an empty array. Accordingly, an empty array is
# included here so that np.hstack works in the event that either i_ids
# or j_ids is empty.
values = [np.array([])]
for i_idx in i_indices:
i.extend([self.ids[i_idx]] * j_length)
j.extend(j_labels)
subset = self._data[i_idx, j_indices]
values.append(subset)
i = pd.Series(i, name='i', dtype=str)
j = pd.Series(j, name='j', dtype=str)
values = pd.Series(np.hstack(values), name='value')
return pd.concat([i, j, values], axis=1)
@experimental(as_of="0.4.0")
def plot(self, cmap=None, title=""):
"""Creates a heatmap of the dissimilarity matrix
Parameters
----------
cmap: str or matplotlib.colors.Colormap, optional
Sets the color scheme of the heatmap
If ``None``, defaults to the colormap specified in the matplotlib
rc file.
title: str, optional
Sets the title label of the heatmap
(Default is blank)
Returns
-------
matplotlib.figure.Figure
Figure containing the heatmap and colorbar of the plotted
dissimilarity matrix.
Examples
--------
.. plot::
Define a dissimilarity matrix with five objects labeled A-E:
>>> from skbio.stats.distance import DissimilarityMatrix
>>> dm = DissimilarityMatrix([[0, 1, 2, 3, 4], [1, 0, 1, 2, 3],
... [2, 1, 0, 1, 2], [3, 2, 1, 0, 1],
... [4, 3, 2, 1, 0]],
... ['A', 'B', 'C', 'D', 'E'])
Plot the dissimilarity matrix as a heatmap:
>>> fig = dm.plot(cmap='Reds', title='Example heatmap')
"""
import matplotlib.pyplot as plt
# based on http://stackoverflow.com/q/14391959/3776794
fig, ax = plt.subplots()
# use pcolormesh instead of pcolor for performance
heatmap = ax.pcolormesh(self.data, cmap=cmap)
fig.colorbar(heatmap)
# center labels within each cell
ticks = np.arange(0.5, self.shape[0])
ax.set_xticks(ticks, minor=False)
ax.set_yticks(ticks, minor=False)
# Ensure there is no white border around the heatmap by manually
# setting the limits
ax.set_ylim(0, len(self.ids))
ax.set_xlim(0, len(self.ids))
# display data as it is stored in the dissimilarity matrix
# (default is to have y-axis inverted)
ax.invert_yaxis()
ax.set_xticklabels(self.ids, rotation=90, minor=False)
ax.set_yticklabels(self.ids, minor=False)
ax.set_title(title)
return fig
def _repr_png_(self):
return self._figure_data('png')
def _repr_svg_(self):
return self._figure_data('svg')
@property
@experimental(as_of="0.4.0")
def png(self):
"""Display heatmap in IPython Notebook as PNG.
"""
from IPython.core.display import Image
return Image(self._repr_png_(), embed=True)
@property
@experimental(as_of="0.4.0")
def svg(self):
"""Display heatmap in IPython Notebook as SVG.
"""
from IPython.core.display import SVG
return SVG(self._repr_svg_())
def _figure_data(self, format):
import matplotlib.pyplot as plt
from IPython.core.pylabtools import print_figure
fig = self.plot()
data = print_figure(fig, format)
# We MUST close the figure, otherwise IPython's display machinery
# will pick it up and send it as output, resulting in a double display
plt.close(fig)
return data
@experimental(as_of="0.4.1")
def to_data_frame(self):
"""Create a ``pandas.DataFrame`` from this ``DissimilarityMatrix``.
Returns
-------
pd.DataFrame
``pd.DataFrame`` with IDs on index and columns.
Examples
--------
>>> from skbio import DistanceMatrix
>>> dm = DistanceMatrix([[0, 1, 2],
... [1, 0, 3],
... [2, 3, 0]], ids=['a', 'b', 'c'])
>>> df = dm.to_data_frame()
>>> df
a b c
a 0.0 1.0 2.0
b 1.0 0.0 3.0
c 2.0 3.0 0.0
"""
return pd.DataFrame(data=self.data, index=self.ids, columns=self.ids)
@experimental(as_of="0.4.0")
def __str__(self):
"""Return a string representation of the dissimilarity matrix.
Summary includes matrix dimensions, a (truncated) list of IDs, and
(truncated) array of dissimilarities.
Returns
-------
str
String representation of the dissimilarity matrix.
"""
return '%dx%d %s matrix\nIDs:\n%s\nData:\n' % (
self.shape[0], self.shape[1], self._matrix_element_name,
_pprint_strs(self.ids)) + str(self.data)
@experimental(as_of="0.4.0")
def __eq__(self, other):
"""Compare this dissimilarity matrix to another for equality.
Two dissimilarity matrices are equal if they have the same shape, IDs
(in the same order!), and have data arrays that are equal.
Checks are *not* performed to ensure that `other` is a
`DissimilarityMatrix` instance.
Parameters
----------
other : DissimilarityMatrix
Dissimilarity matrix to compare to for equality.
Returns
-------
bool
``True`` if `self` is equal to `other`, ``False`` otherwise.
"""
equal = True
# The order these checks are performed in is important to be as
# efficient as possible. The check for shape equality is not strictly
# necessary as it should be taken care of in np.array_equal, but I'd
# rather explicitly bail before comparing IDs or data. Use array_equal
# instead of (a == b).all() because of this issue:
# http://stackoverflow.com/a/10582030
try:
if self.shape != other.shape:
equal = False
elif self.ids != other.ids:
equal = False
elif not np.array_equal(self.data, other.data):
equal = False
except AttributeError:
equal = False
return equal
@experimental(as_of="0.4.0")
def __ne__(self, other):
"""Determine whether two dissimilarity matrices are not equal.
Parameters
----------
other : DissimilarityMatrix
Dissimilarity matrix to compare to.
Returns
-------
bool
``True`` if `self` is not equal to `other`, ``False`` otherwise.
See Also
--------
__eq__
"""
return not self == other
@experimental(as_of="0.4.0")
def __contains__(self, lookup_id):
"""Check if the specified ID is in the dissimilarity matrix.
Parameters
----------
lookup_id : str
ID to search for.
Returns
-------
bool
``True`` if `lookup_id` is in the dissimilarity matrix, ``False``
otherwise.
See Also
--------
index
"""
return lookup_id in self._id_index
@experimental(as_of="0.4.0")
def __getitem__(self, index):
"""Slice into dissimilarity data by object ID or numpy indexing.
Extracts data from the dissimilarity matrix by object ID, a pair of
IDs, or numpy indexing/slicing.
Parameters
----------
index : str, two-tuple of str, or numpy index
`index` can be one of the following forms: an ID, a pair of IDs, or
a numpy index.
If `index` is a string, it is assumed to be an ID and a
``numpy.ndarray`` row vector is returned for the corresponding ID.
Note that the ID's row of dissimilarities is returned, *not* its
column. If the matrix is symmetric, the two will be identical, but
this makes a difference if the matrix is asymmetric.
If `index` is a two-tuple of strings, each string is assumed to be
an ID and the corresponding matrix element is returned that
represents the dissimilarity between the two IDs. Note that the
order of lookup by ID pair matters if the matrix is asymmetric: the
first ID will be used to look up the row, and the second ID will be
used to look up the column. Thus, ``dm['a', 'b']`` may not be the
same as ``dm['b', 'a']`` if the matrix is asymmetric.
Otherwise, `index` will be passed through to
``DissimilarityMatrix.data.__getitem__``, allowing for standard
indexing of a ``numpy.ndarray`` (e.g., slicing).
Returns
-------
ndarray or scalar
Indexed data, where return type depends on the form of `index` (see
description of `index` for more details).
Raises
------
MissingIDError
If the ID(s) specified in `index` are not in the dissimilarity
matrix.
Notes
-----
The lookup based on ID(s) is quick.
"""
if isinstance(index, str):
return self.data[self.index(index)]
elif self._is_id_pair(index):
return self.data[self.index(index[0]), self.index(index[1])]
else:
return self.data.__getitem__(index)
def _validate_ids(self, data, ids):
"""Validate the IDs.
Checks that IDs are unique and that the
number of IDs matches the number of rows/cols in the data array.
Subclasses can override this method to perform different/more specific
validation.
Notes
-----
Accepts arguments instead of inspecting instance attributes to avoid
creating an invalid dissimilarity matrix before raising an error.
Otherwise, the invalid dissimilarity matrix could be used after the
exception is caught and handled.
"""
duplicates = find_duplicates(ids)
if duplicates:
formatted_duplicates = ', '.join(repr(e) for e in duplicates)
raise DissimilarityMatrixError("IDs must be unique. Found the "
"following duplicate IDs: %s" %
formatted_duplicates)
if 0 == len(ids):
raise DissimilarityMatrixError("IDs must be at least 1 in "
"size.")
if len(ids) != data.shape[0]:
raise DissimilarityMatrixError("The number of IDs (%d) must match "
"the number of rows/columns in the "
"data (%d)." %
(len(ids), data.shape[0]))
def _validate_shape(self, data):
"""Validate the data array shape.
Checks that the data is at least 1x1 in size, 2D, square, and
contains only floats.
Notes
-----
Accepts arguments instead of inspecting instance attributes to avoid
creating an invalid dissimilarity matrix before raising an error.
Otherwise, the invalid dissimilarity matrix could be used after the
exception is caught and handled.
"""
if 0 in data.shape:
raise DissimilarityMatrixError("Data must be at least 1x1 in "
"size.")
if len(data.shape) != 2:
raise DissimilarityMatrixError("Data must have exactly two "
"dimensions.")
if data.shape[0] != data.shape[1]:
raise DissimilarityMatrixError("Data must be square (i.e., have "
"the same number of rows and "
"columns).")
if data.dtype not in (np.float32, np.float64):
raise DissimilarityMatrixError("Data must contain only floating "
"point values.")
def _validate(self, data, ids):
"""Validate the data array and IDs.
Checks that the data is at least 1x1 in size, 2D, square, and
contains only floats. Also checks that IDs are unique and that the
number of IDs matches the number of rows/cols in the data array.
Subclasses can override this method to perform different/more specific
validation (e.g., see `DistanceMatrix`).
Notes
-----
Accepts arguments instead of inspecting instance attributes to avoid
creating an invalid dissimilarity matrix before raising an error.
Otherwise, the invalid dissimilarity matrix could be used after the
exception is caught and handled.
"""
self._validate_shape(data)
self._validate_ids(data, ids)
def _index_list(self, list_):
return {id_: idx for idx, id_ in enumerate(list_)}
def _is_id_pair(self, index):
return (isinstance(index, tuple) and
len(index) == 2 and
all(map(lambda e: isinstance(e, str), index)))
class DistanceMatrix(DissimilarityMatrix):
"""Store distances between objects.
A `DistanceMatrix` is a `DissimilarityMatrix` with the additional
requirement that the matrix data is symmetric. There are additional methods
made available that take advantage of this symmetry.
See Also
--------
DissimilarityMatrix
Notes
-----
The distances are stored in redundant (square-form) format [1]_. To
facilitate use with other scientific Python routines (e.g., scipy), the
distances can be retrieved in condensed (vector-form) format using
`condensed_form`.
`DistanceMatrix` only requires that the distances it stores are symmetric.
Checks are *not* performed to ensure the other three metric properties
hold (non-negativity, identity of indiscernibles, and triangle inequality)
[2]_. Thus, a `DistanceMatrix` instance can store distances that are not
metric.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
.. [2] http://planetmath.org/metricspace
"""
# Override here, used in superclass __str__
_matrix_element_name = 'distance'
@classonlymethod
@experimental(as_of="0.4.1")
def from_iterable(cls, iterable, metric, key=None, keys=None,
validate=True):
"""Create DistanceMatrix from all pairs in an iterable given a metric.
Parameters
----------
iterable : iterable
Iterable containing objects to compute pairwise distances on.
metric : callable
A function that takes two arguments and returns a float
representing the distance between the two arguments.
key : callable or metadata key, optional
A function that takes one argument and returns a string
representing the id of the element in the distance matrix.
Alternatively, a key to a `metadata` property if it exists for
each element in the `iterable`. If None, then default ids will be
used.
keys : iterable, optional
An iterable of the same length as `iterable`. Each element will be
used as the respective key.
validate : boolean, optional
If ``True``, all pairwise distances are computed, including upper
and lower triangles and the diagonal, and the resulting matrix is
validated for symmetry and hollowness. If ``False``, `metric` is
assumed to be hollow and symmetric and only the lower triangle
(excluding the diagonal) is computed. Pass ``validate=False`` if
you are sure `metric` is hollow and symmetric for improved
performance.
Returns
-------
DistanceMatrix
The `metric` applied to pairwise elements in the `iterable`.
Raises
------
ValueError
If `key` and `keys` are both provided.
"""
if validate:
return super(DistanceMatrix, cls).from_iterable(iterable, metric,
key, keys)
iterable = list(iterable)
if key is not None and keys is not None:
raise ValueError("Cannot use both `key` and `keys` at the same"
" time.")
keys_ = None
if key is not None:
keys_ = [resolve_key(e, key) for e in iterable]
elif keys is not None:
keys_ = keys
dm = np.zeros((len(iterable),) * 2)
for i, a in enumerate(iterable):
for j, b in enumerate(iterable[:i]):
dm[i, j] = dm[j, i] = metric(a, b)
return cls(dm, keys_)
@experimental(as_of="0.4.0")
def condensed_form(self):
"""Return an array of distances in condensed format.
Returns
-------
ndarray
One-dimensional ``numpy.ndarray`` of distances in condensed format.
Notes
-----
Condensed format is described in [1]_.
The conversion is not a constant-time operation, though it should be
relatively quick to perform.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
return squareform(self._data, force='tovector', checks=False)
@experimental(as_of="0.4.0")
def permute(self, condensed=False):
"""Randomly permute both rows and columns in the matrix.
Randomly permutes the ordering of rows and columns in the matrix. The
same permutation is applied to both rows and columns in order to
maintain symmetry and hollowness. Only the rows/columns in the distance
matrix are permuted; the IDs are *not* permuted.
Parameters
----------
condensed : bool, optional
If ``True``, return the permuted distance matrix in condensed
format. Otherwise, return the permuted distance matrix as a new
``DistanceMatrix`` instance.
Returns
-------
DistanceMatrix or ndarray
Permuted distances as a new ``DistanceMatrix`` or as a ``ndarray``
in condensed format.
See Also
--------
condensed_form
Notes
-----
This method does not modify the distance matrix that it is called on.
It is more efficient to pass ``condensed=True`` than permuting the
distance matrix and then converting to condensed format.
"""
order = np.random.permutation(self.shape[0])
if condensed:
permuted_condensed = distmat_reorder_condensed(self._data, order)
return permuted_condensed
else:
# Note: Skip validation, since we assume self was already validated
permuted = distmat_reorder(self._data, order)
return self.__class__(permuted, self.ids, validate=False)
def _validate(self, data, ids):
"""Validate the data array and IDs.
Overrides the superclass `_validate`. Performs a check for symmetry in
addition to the checks performed in the superclass.
"""
super(DistanceMatrix, self)._validate(data, ids)
data_sym, data_hol = is_symmetric_and_hollow(data)
if not data_sym:
raise DistanceMatrixError(
"Data must be symmetric and cannot contain NaNs.")
if not data_hol:
raise DistanceMatrixError("Data must be hollow (i.e., the diagonal"
" can only contain zeros).")
@experimental(as_of="0.5.1")
def to_series(self):
"""Create a ``pandas.Series`` from this ``DistanceMatrix``.
The series will contain distances in condensed form: only distances
from one matrix triangle are included, and the diagonal is excluded.
The series' index will be a ``pd.MultiIndex`` relating pairs of IDs to
distances. The pairs of IDs will be in row-major order with respect to
the upper matrix triangle.
To obtain all distances (i.e. both upper and lower matrix triangles and
the diagonal), use ``DistanceMatrix.to_data_frame``. To obtain *only*
the distances in condensed form (e.g. for use with SciPy), use
``DistanceMatrix.condensed_form``.
Returns
-------
pd.Series
``pd.Series`` with pairs of IDs on the index.
See Also
--------
to_data_frame
condensed_form
scipy.spatial.distance.squareform
Examples
--------
>>> from skbio import DistanceMatrix
>>> dm = DistanceMatrix([[0, 1, 2, 3],
... [1, 0, 4, 5],
... [2, 4, 0, 6],
... [3, 5, 6, 0]], ids=['a', 'b', 'c', 'd'])
>>> dm.to_series()
a b 1.0
c 2.0
d 3.0
b c 4.0
d 5.0
c d 6.0
dtype: float64
"""
distances = self.condensed_form()
# `id_pairs` will not be interpreted as a `pd.MultiIndex` if it is an
# iterable returned by `itertools.combinations`.
id_pairs = list(itertools.combinations(self.ids, 2))
index = pd.Index(id_pairs, tupleize_cols=True)
return pd.Series(data=distances, index=index, dtype=float)
@experimental(as_of="0.4.0")
def randdm(num_objects, ids=None, constructor=None, random_fn=None):
"""Generate a distance matrix populated with random distances.
Using the default `random_fn`, distances are randomly drawn from a uniform
distribution over ``[0, 1)``.
Regardless of `random_fn`, the resulting distance matrix is guaranteed to
be symmetric and hollow.
Parameters
----------
num_objects : int
The number of objects in the resulting distance matrix. For example, if
`num_objects` is 3, a 3x3 distance matrix will be returned.
ids : sequence of str or None, optional
A sequence of strings to be used as IDs. ``len(ids)`` must be equal to
`num_objects`. If not provided, IDs will be monotonically-increasing
integers cast as strings (numbering starts at 1). For example,
``('1', '2', '3')``.
constructor : type, optional
`DissimilarityMatrix` or subclass constructor to use when creating the
random distance matrix. The returned distance matrix will be of this
type. If ``None`` (the default), a `DistanceMatrix` instance will be
returned.
random_fn : function, optional
Function to generate random values. `random_fn` must accept two
arguments (number of rows and number of columns) and return a 2D
``numpy.ndarray`` of floats (or something that can be cast to float).
If ``None`` (the default), ``numpy.random.rand`` will be used.
Returns
-------
DissimilarityMatrix
`DissimilarityMatrix` (or subclass) instance of random distances. Type
depends on `constructor`.
See Also
--------
numpy.random.rand
"""
if constructor is None:
constructor = DistanceMatrix
if random_fn is None:
random_fn = np.random.rand
data = np.tril(random_fn(num_objects, num_objects), -1)
data = data + data.T
if not ids:
ids = map(str, range(1, num_objects + 1))
return constructor(data, ids)
# helper functions for anosim and permanova
def _preprocess_input_sng(ids, sample_size, grouping, column):
"""Compute intermediate results not affected by permutations.
These intermediate results can be computed a single time for efficiency,
regardless of grouping vector permutations (i.e., when calculating the
p-value). These intermediate results are used by both ANOSIM and PERMANOVA.
Also validates and normalizes input (e.g., converting ``DataFrame`` column
into grouping vector).
"""
if isinstance(grouping, pd.DataFrame):
if column is None:
raise ValueError(
"Must provide a column name if supplying a DataFrame.")
else:
grouping = _df_to_vector(ids, grouping, column)
elif column is not None:
raise ValueError(
"Must provide a DataFrame if supplying a column name.")
if len(grouping) != sample_size:
raise ValueError(
"Grouping vector size must match the number of IDs in the "
"distance matrix.")
# Find the group labels and convert grouping to an integer vector
# (factor).
groups, grouping = np.unique(grouping, return_inverse=True)
num_groups = len(groups)
if num_groups == len(grouping):
raise ValueError(
"All values in the grouping vector are unique. This method cannot "
"operate on a grouping vector with only unique values (e.g., "
"there are no 'within' distances because each group of objects "
"contains only a single object).")
if num_groups == 1:
raise ValueError(
"All values in the grouping vector are the same. This method "
"cannot operate on a grouping vector with only a single group of "
"objects (e.g., there are no 'between' distances because there is "
"only a single group).")
return num_groups, grouping
def _preprocess_input(distance_matrix, grouping, column):
"""Compute intermediate results not affected by permutations.
These intermediate results can be computed a single time for efficiency,
regardless of grouping vector permutations (i.e., when calculating the
p-value). These intermediate results are used by both ANOSIM and PERMANOVA.
Also validates and normalizes input (e.g., converting ``DataFrame`` column
into grouping vector).
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Input must be a DistanceMatrix.")
sample_size = distance_matrix.shape[0]
num_groups, grouping = _preprocess_input_sng(distance_matrix.ids,
sample_size, grouping, column)
tri_idxs = np.triu_indices(sample_size, k=1)
distances = distance_matrix.condensed_form()
return sample_size, num_groups, grouping, tri_idxs, distances
def _df_to_vector(ids, df, column):
"""Return a grouping vector from a ``DataFrame`` column.
Parameters
----------
ids : liat
IDs that will be mapped to group labels.
df : pandas.DataFrame
``DataFrame`` (indexed by distance matrix ID).
column : str
Column name in `df` containing group labels.
Returns
-------
list
Grouping vector (vector of labels) based on the IDs in
`ids`. Each ID's label is looked up in the ``DataFrame``
under the column specified by `column`.
Raises
------
ValueError
If `column` is not in the ``DataFrame``, or a distance matrix ID is
not in the ``DataFrame``.
"""
if column not in df:
raise ValueError("Column '%s' not in DataFrame." % column)
grouping = df.reindex(ids, axis=0).loc[:, column]
if grouping.isnull().any():
raise ValueError(
"One or more IDs in the distance matrix are not in the data "
"frame.")
return grouping.tolist()
def _run_monte_carlo_stats(test_stat_function, grouping, permutations):
"""Run stat test and compute significance with Monte Carlo permutations."""
if permutations < 0:
raise ValueError(
"Number of permutations must be greater than or equal to zero.")
stat = test_stat_function(grouping)
p_value = np.nan
if permutations > 0:
perm_stats = np.empty(permutations, dtype=np.float64)
for i in range(permutations):
perm_grouping = np.random.permutation(grouping)
perm_stats[i] = test_stat_function(perm_grouping)
p_value = ((perm_stats >= stat).sum() + 1) / (permutations + 1)
return stat, p_value
def _build_results(method_name, test_stat_name, sample_size, num_groups, stat,
p_value, permutations):
"""Return ``pandas.Series`` containing results of statistical test."""
return pd.Series(
data=[method_name, test_stat_name, sample_size, num_groups, stat,
p_value, permutations],
index=['method name', 'test statistic name', 'sample size',
'number of groups', 'test statistic', 'p-value',
'number of permutations'],
name='%s results' % method_name)
| {
"content_hash": "cf98dd78ed092de41effb25c5afdfd7c",
"timestamp": "",
"source": "github",
"line_count": 1416,
"max_line_length": 79,
"avg_line_length": 34.4795197740113,
"alnum_prop": 0.5707146222067468,
"repo_name": "gregcaporaso/scikit-bio",
"id": "f659397ef2346c732a9f910741d84a290ede134e",
"size": "49177",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "skbio/stats/distance/_base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "822164"
},
{
"name": "CSS",
"bytes": "4379"
},
{
"name": "Cython",
"bytes": "66355"
},
{
"name": "Dockerfile",
"bytes": "904"
},
{
"name": "Jupyter Notebook",
"bytes": "210926"
},
{
"name": "Makefile",
"bytes": "1075"
},
{
"name": "Python",
"bytes": "2960199"
},
{
"name": "Roff",
"bytes": "471"
}
],
"symlink_target": ""
} |
"""
base_solver.py
Copyright 2016 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class BaseSolver(object):
def __init__(self, variable_list):
self.VariableList = variable_list
def WriteCSV(self, f_name): # pragma: no cover
out = self.CreateCsvString()
with open(f_name, 'w') as f:
f.write(out)
def CreateCsvString(self):
varlist = self.VariableList
if 't' in varlist:
varlist.remove('t')
varlist = ['t', ] + varlist
out = '\t'.join(varlist) + '\n'
for i in range(0, len(getattr(self, varlist[0]))):
txt = []
for v in varlist:
txt.append(str(getattr(self, v)[i]))
out += '\t'.join(txt) + '\n'
return out
| {
"content_hash": "4189d7db5bcdcdd446abf622f4ceb49d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 72,
"avg_line_length": 31.775,
"alnum_prop": 0.6341463414634146,
"repo_name": "brianr747/SFC_models",
"id": "13c45494062e987c0e6559e18ef417c6e7283fbf",
"size": "1271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sfc_models/base_solver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "137"
},
{
"name": "Python",
"bytes": "433125"
}
],
"symlink_target": ""
} |
import argparse
import sqlite3
import os
import csv
import statistics
NORMALIZE = "normalize"
CATEGORIZE = "categorize"
NOTHING = "nothing"
ENUMERATE = "enumerate"
#######################################################
################ Configuration ########################
#######################################################
# Any meta-data we want to skip over;
# how many rows until we get the header
LINES_TO_SKIP = 1
# column_name => NORMALIZE|CATEGORIZE|ENUMERATE|NOTHING
data_configuration = {
"loan_amnt": NORMALIZE,
"term": CATEGORIZE,
"int_rate": NORMALIZE,
"installment": NORMALIZE,
"grade": CATEGORIZE,
"sub_grade": CATEGORIZE,
"emp_length": CATEGORIZE,
"home_ownership": CATEGORIZE,
"annual_inc": NORMALIZE,
"verification_status": CATEGORIZE,
"purpose": CATEGORIZE,
"dti": NORMALIZE,
"fico_range_low": NORMALIZE,
"inq_last_6mths": NORMALIZE,
"open_acc": NORMALIZE,
"revol_bal": NORMALIZE,
"revol_util": NORMALIZE,
"total_acc": NORMALIZE,
"grade": CATEGORIZE,
"loan_status": ENUMERATE
}
# Used for SVM output, must be enumerated column
LABEL_COLUMN_NAME = "loan_status"
# What type of samples do we want from the label column
# hard code what number corresponds to each class in the
# resulting libsvm file
SET_LABELS = {"Fully Paid":0, "Charged Off":1}
TRAIN_PERC = 50
#######################################################
#######################################################
#######################################################
def main():
"""
"""
parser = argparse.ArgumentParser()
parser.add_argument("csv_file", type=str, help="Path to CSV file")
parser.add_argument("--svm", action="store_true", help="Create file formatted for libsvm")
parser.add_argument("--train", action="store_true", help="Create train/test files for libsvm")
args = parser.parse_args()
if(not os.path.exists(args.csv_file)):
print("Could not find CSV file directory")
exit(1)
# Read column data from CSV file
column_table, row_number = parse_csv_file(args.csv_file)
# Process the columns we want
output_columns = {}
for column_name,method in data_configuration.items():
if(column_table.get(column_name) is not None):
if(method == NORMALIZE):
try:
output_columns[column_name] = standardize_column(column_table[column_name])
output_columns[column_name] = normalize_column(column_table[column_name])
except Exception as e:
print("Error on column", column_name, e)
elif(method == CATEGORIZE):
for new_name, new_column in categorize_column(column_table[column_name]).items():
output_columns[new_name] = new_column
elif(method == NOTHING):
output_columns[column_name] = column_table[column_name]
elif(method == ENUMERATE):
output_columns[column_name] = enumerate_column(args.csv_file, column_table[column_name], column_name)
else:
print("Skipping column " + column_name)
# Save processed data to a new file
with open(args.csv_file + ".processed.csv", 'w', encoding="utf8") as f:
column_idx = 0
header_list = [] # Need to know order we are writing to file in, dicts are not ordered
column_number = len(output_columns.keys())
for column_name in output_columns.keys():
header_list.append(column_name)
f.write(column_name)
column_idx += 1
if(column_idx == column_number):
f.write("\n")
else:
f.write(",")
for row in range(row_number):
column_idx = 0
for header in header_list:
f.write(str(output_columns[header][row]))
column_idx += 1
if(column_idx == column_number):
f.write("\n")
else:
f.write(",")
if(args.svm):
format_for_libsvm(args.csv_file, row_number, output_columns)
if(args.svm and args.train):
create_test_train_files(args.csv_file + ".libsvm")
def create_test_train_files(svm_file_path):
"""
"""
num_rows = 0
with open(svm_file_path, 'r', encoding="utf8") as svm_in_f:
for line in svm_in_f:
num_rows += 1
num_train_rows = int((TRAIN_PERC/100) * num_rows)
with open(svm_file_path, 'r', encoding="utf8") as svm_in_f:
with open(svm_file_path + ".train.libsvm", 'w', encoding="utf8") as svm_out_train:
with open(svm_file_path + ".test.libsvm", 'w', encoding="utf8") as svm_out_test:
for idx,line in enumerate(svm_in_f):
if(idx < num_train_rows):
svm_out_train.write(line)
else:
svm_out_test.write(line)
def normalize_column(column):
""" Normalize column data on a scale of [-1,1]
"""
max_value = 0
min_value = 0
# Convert to floats
for idx,value in enumerate(column):
try:
column[idx] = float(value.strip("% "))
except:
break
# Find max and min
max_value = max(column)
min_value = min(column)
# Normalize each value
# (value - min)/(max - min)
for idx,value in enumerate(column):
column[idx] = round((value - min_value)/(max_value - min_value),4)
#column[idx] = round((2*value - max_value - min_value)/(max_value - min_value),4)
return column
def standardize_column(column):
""" Standardize column data
"""
# Convert to floats
for idx,value in enumerate(column):
try:
column[idx] = float(value.strip("% "))
except:
break
# Find mean and std dev
mean = statistics.mean(column)
stddev = statistics.stdev(column)
for idx,value in enumerate(column):
column[idx] = round((value - mean)/stddev,4)
return column
def categorize_column(column):
""" Create new binary asymmetric columns from a single column with discrete values.
"""
# Get set of unique values
discrete_values = set()
for value in column:
discrete_values.add(value)
# Create a column for each value
new_columns = {}
for value in discrete_values:
new_columns[value] = []
# Assign 0/1 based on value and column
for discrete_value in discrete_values:
for value in column:
if(value == discrete_value):
new_columns[discrete_value].append(1)
else:
new_columns[discrete_value].append(0)
return new_columns
def enumerate_column(csv_file, column, column_name):
""" Change a column with discrete values to numerical representations
"""
# Get set of unique values
discrete_values = set()
for value in column:
discrete_values.add(value)
with open(csv_file + "." + column_name + ".legend.txt", "w") as f:
for label,discrete_value in enumerate(discrete_values):
f.write(str(label) + ":" + discrete_value + "\n")
for label,discrete_value in enumerate(discrete_values):
for idx,value in enumerate(column):
if(value == discrete_value):
column[idx] = label
return column
def format_for_libsvm(csv_file, row_number, output_columns):
"""lib svm format: <label> <feature_idx>:<feature_value> <feature_idx>:<feature_value> ...
"""
# Read the legend that was created from the "enumerate_column" function.
label_legend = {}
with open(csv_file + "." + LABEL_COLUMN_NAME + ".legend.txt", "r") as f:
for line in f:
label_key, label_value = line.split(":", 1)
label_legend[int(label_key)] = label_value.strip()
header_list = [LABEL_COLUMN_NAME]
column_number = len(output_columns.keys())
for column_name in output_columns.keys():
if(column_name != LABEL_COLUMN_NAME):
header_list.append(column_name)
with open(csv_file + ".libsvm", 'w', encoding="utf8") as f:
for row in range(row_number):
column_idx = 0
for feature_num,header in enumerate(header_list):
if(feature_num == 0):
if(label_legend[output_columns[header][row]] not in SET_LABELS.keys()): break
#f.write(str(output_columns[header][row]))
f.write(str(SET_LABELS[label_legend[output_columns[header][row]]]))
else:
f.write(str(feature_num) + ":" + str(output_columns[header][row]))
column_idx += 1
if(column_idx == column_number):
f.write("\n")
else:
f.write(" ")
with open(csv_file + ".libsvm.features.txt", 'w', encoding="utf8") as f:
for feature_num,header in enumerate(header_list):
f.write(str(feature_num) + ":" + str(header) + "\n")
def parse_csv_file(csv_file):
"""
"""
print("Loading " + csv_file)
header_index_table = {}
column_table = {}
# Count the number of lines
line_number = 0
with open(csv_file, 'r', encoding="utf8") as f:
for line in f:
line_number += 1
# Read each line into memory
line_count = 0
row_number = 0
with open(csv_file, 'r', encoding="utf8") as f:
reader = csv.reader(f)
for data_line in reader:
if(line_count > LINES_TO_SKIP):
if(not check_line(data_line, header_index_table)): continue
row_number += 1
for idx, data in enumerate(data_line):
data = data.strip("\"'")
column_table[header_index_table[idx]].append(data)
elif(line_count == LINES_TO_SKIP):
for idx, header in enumerate(data_line):
header = header.strip("\"'")
header_index_table[idx] = header
column_table[header] = []
line_count += 1
if(line_count % 10000 == 0):
print(round(float(line_count/line_number)*100,2), "% complete")
print("Done loading " + csv_file)
return (column_table, row_number)
def check_line(data_line, header_index_table):
""" Validate that all columns we want exist in sample
"""
if(len(data_line) < 2): return False
for idx, data in enumerate(data_line):
data = data.strip("\"'")
if(data == "" and header_index_table[idx] in data_configuration.keys()): return False
return True
if __name__ == "__main__": main() | {
"content_hash": "88264c55e38f8ca7a6a4202b2968e475",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 117,
"avg_line_length": 36.711974110032365,
"alnum_prop": 0.5298836389280677,
"repo_name": "lattrelr7/cse802",
"id": "d12bd911af528e214d5b9b97f2690a776590be1e",
"size": "11355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "preprocess_lc_status.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58842"
}
],
"symlink_target": ""
} |
import sys
from setuptools import setup, find_packages
from codecs import open
from os import path
current_path = path.abspath(path.dirname(__file__))
with open(path.join(current_path, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='SentimentClassifier',
version='0.0.75',
description='A library used in the Sentiment Analysis app',
long_description=long_description,
author='Michael',
author_email='mihai@mandrescu.co',
url='http://www.mandrescu.co',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
],
keywords='nltk',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
package_data={
'SentimentClassifier': ['*.sh'],
},
install_requires=['nltk']
)
| {
"content_hash": "274c308bcf4f4084b8be0a6ef95a21ad",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 71,
"avg_line_length": 24.88095238095238,
"alnum_prop": 0.6296650717703349,
"repo_name": "mayk93/SentimentAnalysis",
"id": "1080c05d732274277ff11ed96d73c7822899fd91",
"size": "1069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Libraries/SentimentClassifier/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1229"
},
{
"name": "HTML",
"bytes": "650"
},
{
"name": "JavaScript",
"bytes": "1281386"
},
{
"name": "Python",
"bytes": "18916"
},
{
"name": "Shell",
"bytes": "6915"
}
],
"symlink_target": ""
} |
import numpy, scipy
from pyamg import gallery, rootnode_solver
from pyamg.gallery import stencil_grid
from pyamg.gallery.diffusion import diffusion_stencil_2d
from cvoutput import *
from convergence_tools import print_cycle_history
##
# Run Rotated Anisotropic Diffusion
n = 10
nx = n
ny = n
stencil = diffusion_stencil_2d(type='FE',epsilon=0.001,theta=scipy.pi/3)
A = stencil_grid(stencil, (nx,ny), format='csr')
numpy.random.seed(625)
x = scipy.rand(A.shape[0])
b = A*scipy.rand(A.shape[0])
ml = rootnode_solver(A, strength=('evolution', {'epsilon':2.0}),
smooth=('energy', {'degree':2}), max_coarse=10)
resvec = []
x = ml.solve(b, x0=x, maxiter=20, tol=1e-14, residuals=resvec)
print_cycle_history(resvec, ml, verbose=True, plotting=False)
##
# Write ConnectionViewer files for multilevel hierarchy ml
xV,yV = numpy.meshgrid(numpy.arange(0,ny,dtype=float),numpy.arange(0,nx,dtype=float))
Verts = numpy.concatenate([[xV.ravel()],[yV.ravel()]],axis=0).T
outputML("test", Verts, ml)
print "\n\nOutput files for matrix stencil visualizations in ConnectionViewer are: \n \
test_A*.mat \n test_fine*.marks \n test_coarse*.marks \n \
test_R*.mat \n test_P*.mat \nwhere \'*\' is the level number"
##
print "\n\nYou can download ConnectionViewer from \nhttp://gcsc.uni-frankfurt.de/Members/mrupp/connectionviewer/ \n\nWhen you open test_A0.mat with ConnectionViewer, you'll get"
##
print "\nIn ConnectionViewer, you can zoom in with the mousewheel\n \
and drag the grid around. By clicking on a node, you can see its\n \
matrix connections."
| {
"content_hash": "4f0dbb06c3a0578b0ed470274960fd69",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 177,
"avg_line_length": 36.51162790697674,
"alnum_prop": 0.7203821656050955,
"repo_name": "pombreda/pyamg",
"id": "8dad3ca308945e808161a9e8821ce9e511ec331a",
"size": "1607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Examples/Visualization_ConnectionViewer/demo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1112880"
},
{
"name": "CSS",
"bytes": "9832"
},
{
"name": "Makefile",
"bytes": "3249"
},
{
"name": "Matlab",
"bytes": "2742"
},
{
"name": "Python",
"bytes": "1215339"
},
{
"name": "Shell",
"bytes": "558"
},
{
"name": "TeX",
"bytes": "232"
}
],
"symlink_target": ""
} |
"""NoteSequence processing pipelines."""
import copy
from magenta.music import constants
from magenta.music import sequences_lib
from magenta.pipelines import pipeline
from magenta.pipelines import statistics
from magenta.protobuf import music_pb2
import tensorflow as tf
# Shortcut to chord symbol text annotation type.
CHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
class NoteSequencePipeline(pipeline.Pipeline):
"""Superclass for pipelines that input and output NoteSequences."""
def __init__(self, name=None):
"""Construct a NoteSequencePipeline. Should only be called by subclasses.
Args:
name: Pipeline name.
"""
super(NoteSequencePipeline, self).__init__(
input_type=music_pb2.NoteSequence,
output_type=music_pb2.NoteSequence,
name=name)
class Splitter(NoteSequencePipeline):
"""A Pipeline that splits NoteSequences at regular intervals."""
def __init__(self, hop_size_seconds, name=None):
"""Creates a Splitter pipeline.
Args:
hop_size_seconds: Hop size in seconds that will be used to split a
NoteSequence at regular intervals.
name: Pipeline name.
"""
super(Splitter, self).__init__(name=name)
self._hop_size_seconds = hop_size_seconds
def transform(self, note_sequence):
return sequences_lib.split_note_sequence(
note_sequence, self._hop_size_seconds)
class TimeChangeSplitter(NoteSequencePipeline):
"""A Pipeline that splits NoteSequences on time signature & tempo changes."""
def transform(self, note_sequence):
return sequences_lib.split_note_sequence_on_time_changes(note_sequence)
class Quantizer(NoteSequencePipeline):
"""A Pipeline that quantizes NoteSequence data."""
def __init__(self, steps_per_quarter=None, steps_per_second=None, name=None):
"""Creates a Quantizer pipeline.
Exactly one of `steps_per_quarter` and `steps_per_second` should be defined.
Args:
steps_per_quarter: Steps per quarter note to use for quantization.
steps_per_second: Steps per second to use for quantization.
name: Pipeline name.
Raises:
ValueError: If both or neither of `steps_per_quarter` and
`steps_per_second` are set.
"""
super(Quantizer, self).__init__(name=name)
if (steps_per_quarter is not None) == (steps_per_second is not None):
raise ValueError(
'Exactly one of steps_per_quarter or steps_per_second must be set.')
self._steps_per_quarter = steps_per_quarter
self._steps_per_second = steps_per_second
def transform(self, note_sequence):
try:
if self._steps_per_quarter is not None:
quantized_sequence = sequences_lib.quantize_note_sequence(
note_sequence, self._steps_per_quarter)
else:
quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
note_sequence, self._steps_per_second)
return [quantized_sequence]
except sequences_lib.MultipleTimeSignatureError as e:
tf.logging.warning('Multiple time signatures in NoteSequence %s: %s',
note_sequence.filename, e)
self._set_stats([statistics.Counter(
'sequences_discarded_because_multiple_time_signatures', 1)])
return []
except sequences_lib.MultipleTempoError as e:
tf.logging.warning('Multiple tempos found in NoteSequence %s: %s',
note_sequence.filename, e)
self._set_stats([statistics.Counter(
'sequences_discarded_because_multiple_tempos', 1)])
return []
except sequences_lib.BadTimeSignatureError as e:
tf.logging.warning('Bad time signature in NoteSequence %s: %s',
note_sequence.filename, e)
self._set_stats([statistics.Counter(
'sequences_discarded_because_bad_time_signature', 1)])
return []
class SustainPipeline(NoteSequencePipeline):
"""Applies sustain pedal control changes to a NoteSequence."""
def transform(self, note_sequence):
return [sequences_lib.apply_sustain_control_changes(note_sequence)]
class StretchPipeline(NoteSequencePipeline):
"""Creates stretched versions of the input NoteSequence."""
def __init__(self, stretch_factors, name=None):
"""Creates a StretchPipeline.
Args:
stretch_factors: A Python list of uniform stretch factors to apply.
name: Pipeline name.
"""
super(StretchPipeline, self).__init__(name=name)
self._stretch_factors = stretch_factors
def transform(self, note_sequence):
return [sequences_lib.stretch_note_sequence(note_sequence, stretch_factor)
for stretch_factor in self._stretch_factors]
class TranspositionPipeline(NoteSequencePipeline):
"""Creates transposed versions of the input NoteSequence."""
def __init__(self, transposition_range, min_pitch=constants.MIN_MIDI_PITCH,
max_pitch=constants.MAX_MIDI_PITCH, name=None):
"""Creates a TranspositionPipeline.
Args:
transposition_range: Collection of integer pitch steps to transpose.
min_pitch: Integer pitch value below which notes will be considered
invalid.
max_pitch: Integer pitch value above which notes will be considered
invalid.
name: Pipeline name.
"""
super(TranspositionPipeline, self).__init__(name=name)
self._transposition_range = transposition_range
self._min_pitch = min_pitch
self._max_pitch = max_pitch
def transform(self, sequence):
stats = dict((state_name, statistics.Counter(state_name)) for state_name in
['skipped_due_to_range_exceeded', 'transpositions_generated'])
if sequence.key_signatures:
tf.logging.warn('Key signatures ignored by TranspositionPipeline.')
if any(note.pitch_name for note in sequence.notes):
tf.logging.warn('Pitch names ignored by TranspositionPipeline.')
if any(ta.annotation_type == CHORD_SYMBOL
for ta in sequence.text_annotations):
tf.logging.warn('Chord symbols ignored by TranspositionPipeline.')
transposed = []
for amount in self._transposition_range:
# Note that transpose is called even with a transpose amount of zero, to
# ensure that out-of-range pitches are handled correctly.
ts = self._transpose(sequence, amount, stats)
if ts is not None:
transposed.append(ts)
stats['transpositions_generated'].increment(len(transposed))
self._set_stats(stats.values())
return transposed
def _transpose(self, ns, amount, stats):
"""Transposes a note sequence by the specified amount."""
ts = copy.deepcopy(ns)
for note in ts.notes:
if not note.is_drum:
note.pitch += amount
if note.pitch < self._min_pitch or note.pitch > self._max_pitch:
stats['skipped_due_to_range_exceeded'].increment()
return None
return ts
| {
"content_hash": "44bcff3c96373a48940557b672ab98e9",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 80,
"avg_line_length": 36.53191489361702,
"alnum_prop": 0.6895748398369249,
"repo_name": "adarob/magenta",
"id": "7d7baad5272306bc176aa0c29ed84bcc857727e3",
"size": "7453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magenta/pipelines/note_sequence_pipelines.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1933"
},
{
"name": "Python",
"bytes": "2941402"
},
{
"name": "Shell",
"bytes": "24986"
}
],
"symlink_target": ""
} |
import unittest
import sys
try:
import aula1_resp as aula1
except ImportError:
print('Erro: o arquivo aula1.py não foi encontrado')
sys.exit(1)
MAX_PRIMES = 10000
def primes_sieve(limit):
limitn = limit+1
not_prime = [False] * limitn
primes = []
for i in range(2, limitn):
if not_prime[i]:
continue
for f in range(i*2, limitn, i):
not_prime[f] = True
primes.append(i)
return primes
def fibonacci(n):
a, b = 0, 1
for i in range(n):
a, b = b, a+b
return a
def factorial(n):
for i in range(2, n):
n *= i
return n
class TesteAula1(unittest.TestCase):
@unittest.skipIf('is_prime' not in vars(aula1),
'Função "is_prime" não foi encontrada')
def test_is_prime(self):
primes = primes_sieve(MAX_PRIMES)
for i in range(1, MAX_PRIMES):
if aula1.is_prime(i):
self.assertIn(i, primes)
else:
self.assertNotIn(i, primes)
@unittest.skipIf('fibonacci' not in vars(aula1),
'Função "fibonacci" não foi encontrada')
def test_fibonacci(self):
for i in range(0, 30):
self.assertEqual(fibonacci(i), aula1.fibonacci(i))
@unittest.skipIf('factorial' not in vars(aula1),
'Função "factorial" não foi encontrada')
def test_factorial(self):
for i in range(1, 70):
self.assertEqual(factorial(i), aula1.factorial(i))
if __name__ == '__main__':
unittest.main(verbosity=2) | {
"content_hash": "dfc36784c13dfb67569b96b47813dc4f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 53,
"avg_line_length": 21.174603174603174,
"alnum_prop": 0.6671664167916042,
"repo_name": "CalicoUFSC/minicurso-python",
"id": "1a58bf14f278628307ba469c1664385de9dcf958",
"size": "1344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Aula1/tests_aula1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8851"
}
],
"symlink_target": ""
} |
import glob
import io
import re
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
).read()
setup(
name="nose-htmloutput",
version="0.6.0",
license="BSD",
description="Nose plugin that generates a nice html test report.",
long_description="%s\n%s" % (read("README.rst"), re.sub(":obj:`~?(.*?)`", r"``\1``", read("CHANGELOG.rst"))),
author="Ionel Cristian Mărieș",
author_email="contact@ionelmc.ro",
url="https://github.com/ionelmc/nose-htmloutput",
packages=find_packages("src"),
package_dir={"": "src"},
py_modules=[splitext(basename(i))[0] for i in glob.glob("src/*.py")],
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Utilities",
],
keywords=[
],
install_requires=[
'Jinja2',
'nose',
],
extras_require={
},
entry_points = {
'nose.plugins.0.10': [
'html = nose_htmloutput:HtmlOutput'
]
}
)
| {
"content_hash": "9fa3e5b39c22f98bb5fc0f1c423c8c28",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 113,
"avg_line_length": 30.09375,
"alnum_prop": 0.5950155763239875,
"repo_name": "ionelmc/nose-htmloutput",
"id": "ef20bc38a8a7598d577c54f687fde168755ba252",
"size": "1954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4330"
},
{
"name": "Python",
"bytes": "12909"
}
],
"symlink_target": ""
} |
import sys, types
class DbData(object):
def __init__(self, metadata, DeclarativeBase, session):
self.metadata = metadata
self.DeclarativeBase = DeclarativeBase
self.session = session
class BaseProcessor(object):
def __init__(self, to_mud, to_console):
"""Called when a session is being opened.
Both arguments passed are function objects:
``to_mud(text, type='raw')``
Send a message to the mud. Message type can be either 'raw'
for normal text, or 'gmcp' for GMCP messages.
``to_console(message)``
Send a message to the console (JSON-serializable object).
"""
def shutdown(self):
"""Called when a session is being closed."""
def reload(self):
"""Called when scripts reloading is requested."""
def from_mud(self, message, type):
"""Called when a message has been received from the mud."""
def from_console(self, line):
"""Called when a message has been received from the console."""
class ScriptError(Exception):
def __init__(self, message):
self.message = message
def _get_mod(modulePath, reload_=False):
try:
aMod = sys.modules[modulePath]
if not isinstance(aMod, types.ModuleType):
raise KeyError
if reload:
reload(aMod)
except KeyError:
# The last [''] is very important.
aMod = __import__(modulePath, globals(), locals(), [''])
sys.modules[modulePath] = aMod
return aMod
def _get_func(fullFuncName, reload=False):
"""Retrieve a function object from a full dotted-package name."""
lastDot = fullFuncName.rfind(u".")
funcName = fullFuncName[lastDot + 1:]
modPath = fullFuncName[:lastDot]
aMod = _get_mod(modPath, reload)
aFunc = getattr(aMod, funcName)
assert callable(aFunc), u"%s is not callable." % fullFuncName
return aFunc
def _get_class(fullClassName, parentClass=None, reload=False):
"""Load a module and retrieve a class (not an instance).
If the parentClass is supplied, className must be of parentClass
or a subclass of parentClass (or None is returned).
"""
aClass = _get_func(fullClassName, reload)
if parentClass is not None:
if not issubclass(aClass, parentClass):
raise TypeError(u"%s is not a subclass of %s" %
(fullClassName, parentClass))
return aClass
def load_processor(user_name, game_name, reload=False):
return _get_class('mudwyrm_users.%s.%s.Processor' % (user_name, game_name),
BaseProcessor, reload)
| {
"content_hash": "e481edf1a0cc9647342b65989a706da7",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 32.77906976744186,
"alnum_prop": 0.5877970911670806,
"repo_name": "sh-ft/mudwyrm_engine",
"id": "58a9dbf15fa51c16b7d2567add012a1444fb33c1",
"size": "2819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mudwyrm_engine/processor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61742"
}
],
"symlink_target": ""
} |
import argparse
import os
import re
def parse_invocations(path):
# # tt_run_test(test_minimal_func_decl, number_of_tests_run, number_of_tests_failed); \
with open(path, 'r') as content:
test_funcs = re.findall(
r'static char ?\* ?test_([\w_]+)\(', content.read())
return [" tt_run_test(test_{}, number_of_tests_run, number_of_tests_failed);".format(fn) for fn in test_funcs]
parser = argparse.ArgumentParser(
description='Generate the test invocation files.')
parser.add_argument('-i', '--additional-include', dest='includes', action='append',
help='the path of the file to include at the top of the generated test file')
parser.add_argument('-t', '--test-dir', dest='test_dir', required=True,
help='the base path for the tests directory')
parser.add_argument('-o', dest='output_path', required=True,
help='the path to output the generated test source file')
parser.add_argument('-p', '--include-prefix', dest='include_prefix',
default='', help='the path to prefix before all include directives')
args = parser.parse_args()
test_file_lines = []
test_file_lines.append('// this is a generated file, DO NOT MODIFY!!')
test_file_lines.append('')
additional_includes = []
test_invocations = []
test_file_lines.append('// includes specified from tool invocation')
for include in args.includes:
test_file_lines.append('#include "{}{}"'.format(
args.include_prefix, include))
# generate each of the test sections based on the presence of a .c file.
for root, _, files in os.walk(args.test_dir):
for file in [f for f in files if os.path.splitext(f)[1] == '.c']:
path = os.path.join(root, file)
additional_includes.append(path)
test_invocations += parse_invocations(path)
test_file_lines.append('')
test_file_lines.append('// test file includes')
for include in additional_includes:
test_file_lines.append('#include "{}{}"'.format(
args.include_prefix, include))
test_file_lines.append('')
test_file_lines.append('int main(int argc, char **argv) {')
test_file_lines.append(' int number_of_tests_run = 0;')
test_file_lines.append(' int number_of_tests_failed = 0;')
test_file_lines.append('')
test_file_lines.append(' tt_test_header();')
test_file_lines.append('')
test_file_lines.append(' // test invocations')
for invocation in test_invocations:
test_file_lines.append(invocation)
test_file_lines.append('')
test_file_lines.append(
' tt_test_footer(number_of_tests_run, number_of_tests_failed);')
test_file_lines.append('')
test_file_lines.append(' return number_of_tests_failed != 0;')
test_file_lines.append('}')
# ensure there is always a blank line
test_file_lines.append('')
test_file_path = os.path.realpath(args.output_path)
with open(test_file_path, 'w') as test_file:
test_file.write('\n'.join(test_file_lines))
| {
"content_hash": "dcf0337990f9a6032467b513dbc77aa5",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 128,
"avg_line_length": 39.026315789473685,
"alnum_prop": 0.6608226567768037,
"repo_name": "owensd/proteus",
"id": "305dc0c7a4dbc00f916e8905071e5af78fc53faf",
"size": "3013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/generate-tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "19580"
},
{
"name": "Makefile",
"bytes": "679"
},
{
"name": "Python",
"bytes": "3013"
},
{
"name": "Shell",
"bytes": "371"
},
{
"name": "Swift",
"bytes": "9591"
}
],
"symlink_target": ""
} |
import json
import requests
class NotAuthorizedException(BaseException):
pass
class IncorrectAuthInfoException(Exception):
pass
class RequestErroredException(Exception):
pass
def make_url(url, params):
url += '?'
for k,v in params.iteritems():
url += "%s=%s&" % (k, v)
return url[:-1]
def parse_url(url):
params = {}
params_str = url.split('#')[1]
for pair in params_str.split('&'):
kv = pair.split('=')
params[kv[0]] = kv[1]
return params
def list_to_str(lst): # translates list elements into comma-separated string
string = ''
for item in lst:
string += item
string += ','
return string[:-1] # striping last comma
class VkApi:
__auth_vars = {
"client_id": int(),
"scope": str(),
"redirect_uri": "https://oauth.vk.com/blank.html",
"display": "popup",
"v": "5.5",
"response_type": "token"
}
__auth_url = "https://oauth.vk.com/authorize"
__api_endpoint = "https://api.vk.com/method/%s"
__authorized = False
__access_token = str()
def __init__(self, client_id=0, scope=[], access_token='', console_auth=True, keep_token=False, try_loading_token=False):
if try_loading_token and self.__load_auth_info():
self.__authorized = True
if access_token:
self.__access_token = access_token
self.__authorized = True
elif client_id and scope:
self.__auth_vars["client_id"] = client_id
self.__auth_vars["scope"] = list_to_str(scope)
if console_auth:
self.__console_auth()
else:
raise IncorrectAuthInfoException()
if keep_token:
self.__save_auth_info()
def __console_auth(self):
print "### %s ###" % self.get_auth_url()
print "### Please, open this URL in your browser and authorize ###"
response = parse_url(raw_input(">>> Resulting URL: "))
self.__access_token = response["access_token"]
self.__authorized = True
def __save_auth_info(self):
with open("auth.json", "w") as file:
json.dump({"access_token": self.__access_token}, file)
def __load_auth_info(self):
try:
with open("auth.json", "r") as file:
info = json.load(file)
self.__access_token = info["access_token"]
return True
except:
return False
def set_token(self, access_token):
self.__access_token = access_token
self.__authorized = True
def get_token(self):
return self.__access_token
def is_authorized(self):
return self.__authorized
def get_auth_url(self):
return make_url(self.__auth_url, self.__auth_vars)
def request(self, api_method, data={}):
if not self.__authorized:
raise NotAuthorizedException()
data["access_token"] = self.__access_token
url = self.__api_endpoint % api_method
r = requests.get(url, params=data)
try:
return r.json()["response"]
except KeyError:
raise RequestErroredException(r.json()['error']['error_msg']) | {
"content_hash": "091777025436b205aafb6391602a8ccb",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 125,
"avg_line_length": 29.803571428571427,
"alnum_prop": 0.5395446375074895,
"repo_name": "falceeffect/PyVk",
"id": "ce3bb6d318fdc811bdd537e950b76da24a7af5b7",
"size": "3338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VkApi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3338"
}
],
"symlink_target": ""
} |
import contextlib
from unittest import mock
from heat.common import exception as exc
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
class SoftwareConfigTest(common.HeatTestCase):
def setUp(self):
super(SoftwareConfigTest, self).setUp()
self.ctx = utils.dummy_context()
self.properties = {
'group': 'Heat::Shell',
'inputs': [],
'outputs': [],
'options': {},
'config': '#!/bin/bash'
}
self.stack = stack.Stack(
self.ctx, 'software_config_test_stack',
template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'config_mysql': {
'Type': 'OS::Heat::SoftwareConfig',
'Properties': self.properties
}}}))
self.config = self.stack['config_mysql']
self.rpc_client = mock.MagicMock()
self.config._rpc_client = self.rpc_client
@contextlib.contextmanager
def exc_filter(*args):
try:
yield
except exc.NotFound:
pass
self.rpc_client.ignore_error_by_name.side_effect = exc_filter
def test_handle_create(self):
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
value = {'id': config_id}
self.rpc_client.create_software_config.return_value = value
self.config.handle_create()
self.assertEqual(config_id, self.config.resource_id)
def test_handle_delete(self):
self.resource_id = None
self.assertIsNone(self.config.handle_delete())
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
self.config.resource_id = config_id
self.rpc_client.delete_software_config.return_value = None
self.assertIsNone(self.config.handle_delete())
self.rpc_client.delete_software_config.side_effect = exc.NotFound
self.assertIsNone(self.config.handle_delete())
def test_resolve_attribute(self):
self.assertIsNone(self.config._resolve_attribute('others'))
self.config.resource_id = None
self.assertIsNone(self.config._resolve_attribute('config'))
self.config.resource_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
value = {'config': '#!/bin/bash'}
self.rpc_client.show_software_config.return_value = value
self.assertEqual(
'#!/bin/bash', self.config._resolve_attribute('config'))
self.rpc_client.show_software_config.side_effect = exc.NotFound
self.assertIsNone(self.config._resolve_attribute('config'))
| {
"content_hash": "1c9b71501e30c8c57203f758a9d959e1",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 73,
"avg_line_length": 37.72222222222222,
"alnum_prop": 0.6078792341678939,
"repo_name": "openstack/heat",
"id": "f194ec7355a4d9c706ec5d12aea9d6deb79f6dc6",
"size": "3291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/openstack/heat/test_software_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9145593"
},
{
"name": "Shell",
"bytes": "65832"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-generic-confirmation'
copyright = u'2009, Arne Brodowski'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = ['README',]
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-generic-confirmationdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-generic-confirmation.tex', u'django-generic-confirmation Documentation',
u'Arne Brodowski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| {
"content_hash": "565e2a719fd5f7b2a539414f4181440f",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 92,
"avg_line_length": 32.966850828729285,
"alnum_prop": 0.7125858890564772,
"repo_name": "m000/django_generic_confirmation",
"id": "b1177c00ea29a533277a6eaa01b4f352a20c2e16",
"size": "6405",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "56693"
}
],
"symlink_target": ""
} |
from datetime import time
from flask import request
from wtforms.fields import BooleanField, HiddenField, IntegerField, StringField, TextAreaField
from wtforms.validators import DataRequired, InputRequired, Optional, ValidationError
from indico.modules.events.papers.fields import PaperEmailSettingsField
from indico.modules.events.papers.models.reviews import PaperAction
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import (EditableFileField, FileField, HiddenEnumField, HiddenFieldList,
IndicoDateTimeField, IndicoMarkdownField, IndicoTagListField)
from indico.web.forms.fields.principals import PrincipalListField
from indico.web.forms.util import inject_validators
from indico.web.forms.validators import HiddenUnless, LinkedDateTime, UsedIf
from indico.web.forms.widgets import SwitchWidget
def make_competences_form(event):
form_class = type('PaperCompetencesForm', (IndicoForm,), {})
for entry in event.cfp.assignees:
name = f'competences_{entry.id}'
field = IndicoTagListField('Competences')
setattr(form_class, name, field)
return form_class
class PaperTeamsForm(IndicoForm):
managers = PrincipalListField(_('Paper managers'), allow_groups=True, allow_emails=True,
description=_('List of users allowed to manage the call for papers'))
judges = PrincipalListField(_('Judges'),
description=_('List of users allowed to judge papers'))
content_reviewers = PrincipalListField(_('Content reviewers'),
description=_('List of users allowed to review the content of '
'the assigned papers'))
layout_reviewers = PrincipalListField(_('Layout reviewers'),
description=_('List of users allowed to review the layout of the '
'assigned papers'))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
if not self.event.cfp.content_reviewing_enabled:
del self.content_reviewers
if not self.event.cfp.layout_reviewing_enabled:
del self.layout_reviewers
class PapersScheduleForm(IndicoForm):
start_dt = IndicoDateTimeField(_("Start"), [Optional()], default_time=time(0, 0),
description=_("The moment users can start submitting papers"))
end_dt = IndicoDateTimeField(_("End"), [Optional(), LinkedDateTime('start_dt')], default_time=time(23, 59),
description=_("The moment the submission process ends"))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
class BulkPaperJudgmentForm(IndicoForm):
judgment = HiddenEnumField(enum=PaperAction)
contribution_id = HiddenFieldList()
submitted = HiddenField()
judgment_comment = TextAreaField(_("Comment"), render_kw={'placeholder': _("Leave a comment for the submitter..."),
'class': 'grow'})
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
def is_submitted(self):
return super().is_submitted() and 'submitted' in request.form
class PaperReviewingSettingsForm(IndicoForm):
"""Settings form for paper reviewing."""
RATING_FIELDS = ('scale_lower', 'scale_upper')
announcement = IndicoMarkdownField(_('Announcement'), editor=True)
scale_lower = IntegerField(_("Scale (from)"), [UsedIf(lambda form, field: not form.has_ratings), InputRequired()])
scale_upper = IntegerField(_("Scale (to)"), [UsedIf(lambda form, field: not form.has_ratings), InputRequired()])
email_settings = PaperEmailSettingsField(_("Email notifications"))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.has_ratings = kwargs.pop('has_ratings', False)
super().__init__(*args, **kwargs)
if self.has_ratings:
self.scale_upper.warning = _("Some reviewers have already submitted ratings so the scale cannot be changed "
"anymore.")
def validate_scale_upper(self, field):
lower = self.scale_lower.data
upper = self.scale_upper.data
if lower is None or upper is None:
return
if lower >= upper:
raise ValidationError(_("The scale's 'to' value must be greater than the 'from' value."))
if upper - lower > 20:
raise ValidationError(_("The difference between 'to' and' from' may not be greater than 20."))
@property
def data(self):
data = super().data
if self.has_ratings:
for key in self.RATING_FIELDS:
del data[key]
return data
class PaperSubmissionForm(IndicoForm):
files = FileField(_("Files"), [DataRequired()], multiple_files=True)
def _get_template_data(tpl):
return {
'filename': tpl.filename,
'size': tpl.size,
'content_type': tpl.content_type,
'url': url_for('.download_template', tpl)
}
class PaperTemplateForm(IndicoForm):
name = StringField(_("Name"), [DataRequired()])
description = TextAreaField(_("Description"))
template = EditableFileField(_("Template"), add_remove_links=False, added_only=True,
get_metadata=_get_template_data)
def __init__(self, *args, **kwargs):
template = kwargs.pop('template', None)
if template is None:
inject_validators(self, 'template', [DataRequired()])
super().__init__(*args, **kwargs)
class DeadlineForm(IndicoForm):
deadline = IndicoDateTimeField(_("Deadline"), [Optional()], default_time=time(23, 59))
enforce = BooleanField(_("Enforce deadline"), [HiddenUnless('deadline')], widget=SwitchWidget())
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
| {
"content_hash": "904c16bb658dda51987d87441d820e79",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 120,
"avg_line_length": 43.37931034482759,
"alnum_prop": 0.6263910969793323,
"repo_name": "pferreir/indico",
"id": "e2bf40b50986cb414c79447590875f1f390c225c",
"size": "6504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/events/papers/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1394116"
},
{
"name": "JavaScript",
"bytes": "2078347"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "4993798"
},
{
"name": "SCSS",
"bytes": "475126"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
"""
Run all the examples in the $CLAW/doc/sphinx subdirectories that are used in the
documentation webpages.
"""
import os,sys
example_dirs = ["example-acoustics-1d", \
"example-acoustics-2d/1drad", \
"example-acoustics-2d", \
"example-acoustics-2d-amr"]
rootdir = os.getcwd()
for dir in example_dirs:
os.chdir(dir)
os.system("rm -f .output .rst .htmls")
os.system("make .output")
os.system("make .rst")
os.system("make .htmls")
os.chdir(rootdir)
os.system("chmod -R og+rX *")
os.system("touch plotexamples*.rst")
| {
"content_hash": "0e4590d9687b3ef6075e88187e161a5a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 80,
"avg_line_length": 24.625,
"alnum_prop": 0.6209813874788495,
"repo_name": "clawpack/clawpack-4.x",
"id": "25d1182821a7d3b1b06361af6b3a8c862034e849",
"size": "591",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/sphinx/run_doc_examples.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Fortran",
"bytes": "1413468"
},
{
"name": "HTML",
"bytes": "1032"
},
{
"name": "Limbo",
"bytes": "135"
},
{
"name": "M",
"bytes": "123"
},
{
"name": "Makefile",
"bytes": "153571"
},
{
"name": "Matlab",
"bytes": "311883"
},
{
"name": "Objective-C",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1242190"
},
{
"name": "Shell",
"bytes": "1579"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import logging
from barf import BARF
from barf.core.reil import ReilMnemonic
logger = logging.getLogger(__name__)
def check_path_satisfiability(code_analyzer, path, start_address):
"""Check satisfiability of a basic block path.
"""
start_instr_found = False
sat = False
# Traverse basic blocks, translate its instructions to SMT
# expressions and add them as assertions.
for bb_curr, bb_next in zip(path[:-1], path[1:]):
logger.info("BB @ {:#x}".format(bb_curr.address))
# For each instruction...
for instr in bb_curr:
# If the start instruction have not been found, keep
# looking...
if not start_instr_found:
if instr.address == start_address:
start_instr_found = True
else:
continue
logger.info("{:#x} {}".format(instr.address, instr))
# For each REIL instruction...
for reil_instr in instr.ir_instrs:
logger.info("{:#x} {:02d} {}".format(reil_instr.address >> 0x8, reil_instr.address & 0xff,
reil_instr))
if reil_instr.mnemonic == ReilMnemonic.JCC:
# Check that the JCC is the last instruction of
# the basic block (skip CALL instructions.)
if instr.address + instr.size - 1 != bb_curr.end_address:
logger.error("Unexpected JCC instruction: {:#x} {} ({})".format(instr.address,
instr,
reil_instr))
# raise Exception()
continue
# Make sure branch target address from current
# basic block is the start address of the next.
assert(bb_curr.taken_branch == bb_next.address or
bb_curr.not_taken_branch == bb_next.address or
bb_curr.direct_branch == bb_next.address)
# Set branch condition accordingly.
if bb_curr.taken_branch == bb_next.address:
branch_var_goal = 0x1
elif bb_curr.not_taken_branch == bb_next.address:
branch_var_goal = 0x0
else:
continue
# Add branch condition goal constraint.
code_analyzer.add_constraint(code_analyzer.get_operand_expr(reil_instr.operands[0]) == branch_var_goal)
# The JCC instruction was the last within the
# current basic block. End this iteration and
# start next one.
break
# Translate and add SMT expressions to the solver.
code_analyzer.add_instruction(reil_instr)
sat = code_analyzer.check() == 'sat'
logger.info("BB @ {:#x} sat? {}".format(bb_curr.address, sat))
if not sat:
break
# Return satisfiability.
return sat
if __name__ == "__main__":
#
# Open file
#
barf = BARF("./samples/bin/constraint3.x86")
#
# Check constraint
#
# 80483ed: 55 push ebp
# 80483ee: 89 e5 mov ebp,esp
# 80483f0: 83 ec 10 sub esp,0x10
# 80483f3: c7 45 f0 01 00 00 00 mov DWORD PTR [ebp-0x10],0x1
# 80483fa: 81 7d f4 44 43 42 41 cmp DWORD PTR [ebp-0xc],0x41424344
# 8048401: 75 19 jne 804841c <main+0x2f>
# 8048403: 81 7d f8 48 47 46 45 cmp DWORD PTR [ebp-0x8],0x45464748
# 804840a: 75 10 jne 804841c <main+0x2f>
# 804840c: 81 7d fc ef cd ab 00 cmp DWORD PTR [ebp-0x4],0xabcdef
# 8048413: 75 07 jne 804841c <main+0x2f>
# 8048415: c7 45 f0 00 00 00 00 mov DWORD PTR [ebp-0x10],0x0
# 804841c: 8b 45 f0 mov eax,DWORD PTR [ebp-0x10]
# 804841f: c9 leave
# 8048420: c3 ret
start_addr = 0x80483ed
end_addr = 0x8048420
print("[+] Recovering function CFG...")
cfg = barf.recover_cfg(start_addr, end_addr)
print("[+] Checking path satisfiability...")
# Preconditions: set stack
# Note: this isn't strictly necessary but it helps reduce the time it
# takes the solver find a solution.
esp = barf.code_analyzer.get_register_expr("esp", mode="pre")
barf.code_analyzer.add_constraint(esp == 0xffffceec)
# Traverse paths and check satisfiability
for bb_path in cfg.all_simple_bb_paths(start_addr, end_addr):
print("[+] Path: {0}".format(" -> ".join([hex(bb.address) for bb in bb_path])))
if check_path_satisfiability(barf.code_analyzer, list(bb_path), start_addr):
print("[+] Satisfiable! Possible assignments:")
ebp = barf.code_analyzer.get_register_expr("ebp", mode="post")
rv = barf.code_analyzer.get_memory_expr(ebp-0x10, 4, mode="post")
cookie1 = barf.code_analyzer.get_memory_expr(ebp-0xc, 4, mode="post")
cookie2 = barf.code_analyzer.get_memory_expr(ebp-0x8, 4, mode="post")
cookie3 = barf.code_analyzer.get_memory_expr(ebp-0x4, 4, mode="post")
rv_val = barf.code_analyzer.get_expr_value(rv)
cookie1_val = barf.code_analyzer.get_expr_value(cookie1)
cookie2_val = barf.code_analyzer.get_expr_value(cookie2)
cookie3_val = barf.code_analyzer.get_expr_value(cookie3)
print("- cookie1: 0x{0:08x} ({0})".format(cookie1_val))
print("- cookie2: 0x{0:08x} ({0})".format(cookie2_val))
print("- cookie3: 0x{0:08x} ({0})".format(cookie3_val))
print("- rv: 0x{0:08x} ({0})".format(rv_val))
else:
print("[-] Unsatisfiable!")
| {
"content_hash": "d7e13364fcbf492d671e6e6c1d51194d",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 127,
"avg_line_length": 42.20261437908497,
"alnum_prop": 0.5037943317330029,
"repo_name": "programa-stic/barf-project",
"id": "bd5428af3b39e646d955da6faa0736eb021e0910",
"size": "6481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/misc/check_paths.x86.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "4766"
},
{
"name": "Dockerfile",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "1359"
},
{
"name": "Python",
"bytes": "1105738"
}
],
"symlink_target": ""
} |
import pytest
from pyscipopt import Model, Eventhdlr, SCIP_RESULT, SCIP_EVENTTYPE, SCIP_PARAMSETTING
class NodeEventHandler(Eventhdlr):
def __init__(self):
self.calls = []
def eventinit(self):
self.model.catchEvent(SCIP_EVENTTYPE.NODEFOCUSED, self)
def eventexit(self):
self.model.dropEvent(SCIP_EVENTTYPE.NODEFOCUSED, self)
def eventexec(self, event):
self.calls.append('eventexec')
assert event.getType() == SCIP_EVENTTYPE.NODEFOCUSED
node = event.getNode()
if node.getDepth() == 0:
assert node.getParent() is None
assert node.getParentBranchings() is None
return
variables, branchbounds, boundtypes = node.getParentBranchings()
assert len(variables) == 1
assert len(branchbounds) == 1
assert len(boundtypes) == 1
domain_changes = node.getDomchg()
bound_changes = domain_changes.getBoundchgs()
assert len(bound_changes) == 1
def test_tree():
# create solver instance
s = Model()
s.setMaximize()
s.hideOutput()
s.setPresolve(SCIP_PARAMSETTING.OFF)
node_eventhdlr = NodeEventHandler()
s.includeEventhdlr(node_eventhdlr, "NodeEventHandler", "python event handler to catch NODEFOCUSED")
# add some variables
n = 121
x = [s.addVar("x{}".format(i), obj=1.0, vtype="INTEGER") for i in range(n)]
# add some constraints
for i in range(n):
for j in range(i):
dist = min(abs(i - j), abs(n - i - j))
if dist in (1, 3, 4):
s.addCons(x[i] + x[j] <= 1)
# solve problem
s.optimize()
# print solution
assert round(s.getObjVal()) == 36.0
del s
assert len(node_eventhdlr.calls) > 3
if __name__ == "__main__":
test_tree()
| {
"content_hash": "d4b4d358b24d2dde911c9d244e28da7b",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 103,
"avg_line_length": 27.28787878787879,
"alnum_prop": 0.61188228761799,
"repo_name": "SCIP-Interfaces/PySCIPOpt",
"id": "74f0c7e691d89a85f031a51532e704dc9f760f2e",
"size": "1801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "500394"
},
{
"name": "Shell",
"bytes": "1779"
}
],
"symlink_target": ""
} |
import copy
import mock
from neutron.api.v2 import attributes as attr
from neutron.plugins.common import constants
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base as test_api_v2
from neutron.tests.unit.extensions import base as test_api_v2_extension
from oslo_utils import uuidutils
from webob import exc
import webtest
from neutron_fwaas.extensions import firewall
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
_long_name = 'x' * (attr.NAME_MAX_LEN + 1)
_long_description = 'y' * (attr.DESCRIPTION_MAX_LEN + 1)
class FirewallExtensionTestCase(test_api_v2_extension.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(FirewallExtensionTestCase, self).setUp()
plural_mappings = {'firewall_policy': 'firewall_policies'}
self._setUpExtension(
'neutron_fwaas.extensions.firewall.FirewallPluginBase',
constants.FIREWALL, firewall.RESOURCE_ATTRIBUTE_MAP,
firewall.Firewall, 'fw', plural_mappings=plural_mappings)
def test_create_firewall(self):
fw_id = _uuid()
data = {'firewall': {'description': 'descr_firewall1',
'name': 'firewall1',
'admin_state_up': True,
'firewall_policy_id': _uuid(),
'shared': False,
'tenant_id': _uuid()}}
return_value = copy.copy(data['firewall'])
return_value.update({'id': fw_id})
# since 'shared' is hidden
del return_value['shared']
instance = self.plugin.return_value
instance.create_firewall.return_value = return_value
res = self.api.post(_get_path('fw/firewalls', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_firewall.assert_called_with(mock.ANY,
firewall=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('firewall', res)
self.assertEqual(res['firewall'], return_value)
def test_create_firewall_invalid_long_name(self):
data = {'firewall': {'description': 'descr_firewall1',
'name': _long_name,
'admin_state_up': True,
'firewall_policy_id': _uuid(),
'shared': False,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('fw/firewalls', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
self.assertTrue('Invalid input for name' in res.body.decode('utf-8'))
def test_create_firewall_invalid_long_description(self):
data = {'firewall': {'description': _long_description,
'name': 'firewall1',
'admin_state_up': True,
'firewall_policy_id': _uuid(),
'shared': False,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('fw/firewalls', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
self.assertTrue('Invalid input '
'for description' in res.body.decode('utf-8'))
def test_firewall_list(self):
fw_id = _uuid()
return_value = [{'tenant_id': _uuid(),
'id': fw_id}]
instance = self.plugin.return_value
instance.get_firewalls.return_value = return_value
res = self.api.get(_get_path('fw/firewalls', fmt=self.fmt))
instance.get_firewalls.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_firewall_get(self):
fw_id = _uuid()
return_value = {'tenant_id': _uuid(),
'id': fw_id}
instance = self.plugin.return_value
instance.get_firewall.return_value = return_value
res = self.api.get(_get_path('fw/firewalls',
id=fw_id, fmt=self.fmt))
instance.get_firewall.assert_called_with(mock.ANY,
fw_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall', res)
self.assertEqual(res['firewall'], return_value)
def test_firewall_update(self):
fw_id = _uuid()
update_data = {'firewall': {'name': 'new_name'}}
return_value = {'tenant_id': _uuid(),
'id': fw_id}
instance = self.plugin.return_value
instance.update_firewall.return_value = return_value
res = self.api.put(_get_path('fw/firewalls', id=fw_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_firewall.assert_called_with(mock.ANY, fw_id,
firewall=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall', res)
self.assertEqual(res['firewall'], return_value)
def test_firewall_delete(self):
self._test_entity_delete('firewall')
def _test_create_firewall_rule(self, src_port, dst_port):
rule_id = _uuid()
data = {'firewall_rule': {'description': 'descr_firewall_rule1',
'name': 'rule1',
'shared': False,
'protocol': 'tcp',
'ip_version': 4,
'source_ip_address': '192.168.0.1',
'destination_ip_address': '127.0.0.1',
'source_port': src_port,
'destination_port': dst_port,
'action': 'allow',
'enabled': True,
'tenant_id': _uuid()}}
expected_ret_val = copy.copy(data['firewall_rule'])
expected_ret_val['source_port'] = str(src_port)
expected_ret_val['destination_port'] = str(dst_port)
expected_call_args = copy.copy(expected_ret_val)
expected_ret_val['id'] = rule_id
instance = self.plugin.return_value
instance.create_firewall_rule.return_value = expected_ret_val
res = self.api.post(_get_path('fw/firewall_rules', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_firewall_rule.assert_called_with(
mock.ANY,
firewall_rule={'firewall_rule': expected_call_args})
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('firewall_rule', res)
self.assertEqual(res['firewall_rule'], expected_ret_val)
def test_create_firewall_rule_with_integer_ports(self):
self._test_create_firewall_rule(1, 10)
def test_create_firewall_rule_with_string_ports(self):
self._test_create_firewall_rule('1', '10')
def test_create_firewall_rule_with_port_range(self):
self._test_create_firewall_rule('1:20', '30:40')
def test_create_firewall_rule_invalid_long_name(self):
data = {'firewall_rule': {'description': 'descr_firewall_rule1',
'name': _long_name,
'shared': False,
'protocol': 'tcp',
'ip_version': 4,
'source_ip_address': '192.168.0.1',
'destination_ip_address': '127.0.0.1',
'source_port': 1,
'destination_port': 1,
'action': 'allow',
'enabled': True,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('fw/firewall_rules', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
self.assertTrue('Invalid input for name' in res.body.decode('utf-8'))
def test_create_firewall_rule_invalid_long_description(self):
data = {'firewall_rule': {'description': _long_description,
'name': 'rule1',
'shared': False,
'protocol': 'tcp',
'ip_version': 4,
'source_ip_address': '192.168.0.1',
'destination_ip_address': '127.0.0.1',
'source_port': 1,
'destination_port': 1,
'action': 'allow',
'enabled': True,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('fw/firewall_rules', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
self.assertTrue('Invalid input '
'for description' in res.body.decode('utf-8'))
def test_firewall_rule_list(self):
rule_id = _uuid()
return_value = [{'tenant_id': _uuid(),
'id': rule_id}]
instance = self.plugin.return_value
instance.get_firewall_rules.return_value = return_value
res = self.api.get(_get_path('fw/firewall_rules', fmt=self.fmt))
instance.get_firewall_rules.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_firewall_rule_get(self):
rule_id = _uuid()
return_value = {'tenant_id': _uuid(),
'id': rule_id}
instance = self.plugin.return_value
instance.get_firewall_rule.return_value = return_value
res = self.api.get(_get_path('fw/firewall_rules',
id=rule_id, fmt=self.fmt))
instance.get_firewall_rule.assert_called_with(mock.ANY,
rule_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_rule', res)
self.assertEqual(res['firewall_rule'], return_value)
def test_firewall_rule_update(self):
rule_id = _uuid()
update_data = {'firewall_rule': {'action': 'deny'}}
return_value = {'tenant_id': _uuid(),
'id': rule_id}
instance = self.plugin.return_value
instance.update_firewall_rule.return_value = return_value
res = self.api.put(_get_path('fw/firewall_rules', id=rule_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_firewall_rule.assert_called_with(
mock.ANY,
rule_id,
firewall_rule=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_rule', res)
self.assertEqual(res['firewall_rule'], return_value)
def test_firewall_rule_delete(self):
self._test_entity_delete('firewall_rule')
def test_create_firewall_policy(self):
policy_id = _uuid()
data = {'firewall_policy': {'description': 'descr_firewall_policy1',
'name': 'new_fw_policy1',
'shared': False,
'firewall_rules': [_uuid(), _uuid()],
'audited': False,
'tenant_id': _uuid()}}
return_value = copy.copy(data['firewall_policy'])
return_value.update({'id': policy_id})
instance = self.plugin.return_value
instance.create_firewall_policy.return_value = return_value
res = self.api.post(_get_path('fw/firewall_policies',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_firewall_policy.assert_called_with(
mock.ANY,
firewall_policy=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('firewall_policy', res)
self.assertEqual(res['firewall_policy'], return_value)
def test_create_firewall_policy_invalid_long_name(self):
data = {'firewall_policy': {'description': 'descr_firewall_policy1',
'name': _long_name,
'shared': False,
'firewall_rules': [_uuid(), _uuid()],
'audited': False,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('fw/firewall_policies',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
self.assertTrue('Invalid input for name' in res.body.decode('utf-8'))
def test_create_firewall_policy_invalid_long_description(self):
data = {'firewall_policy': {'description': _long_description,
'name': 'new_fw_policy1',
'shared': False,
'firewall_rules': [_uuid(), _uuid()],
'audited': False,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('fw/firewall_policies',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
self.assertTrue('Invalid input '
'for description' in res.body.decode('utf-8'))
def test_firewall_policy_list(self):
policy_id = _uuid()
return_value = [{'tenant_id': _uuid(),
'id': policy_id}]
instance = self.plugin.return_value
instance.get_firewall_policies.return_value = return_value
res = self.api.get(_get_path('fw/firewall_policies',
fmt=self.fmt))
instance.get_firewall_policies.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_firewall_policy_get(self):
policy_id = _uuid()
return_value = {'tenant_id': _uuid(),
'id': policy_id}
instance = self.plugin.return_value
instance.get_firewall_policy.return_value = return_value
res = self.api.get(_get_path('fw/firewall_policies',
id=policy_id, fmt=self.fmt))
instance.get_firewall_policy.assert_called_with(mock.ANY,
policy_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_policy', res)
self.assertEqual(res['firewall_policy'], return_value)
def test_firewall_policy_update(self):
policy_id = _uuid()
update_data = {'firewall_policy': {'audited': True}}
return_value = {'tenant_id': _uuid(),
'id': policy_id}
instance = self.plugin.return_value
instance.update_firewall_policy.return_value = return_value
res = self.api.put(_get_path('fw/firewall_policies',
id=policy_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_firewall_policy.assert_called_with(
mock.ANY,
policy_id,
firewall_policy=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_policy', res)
self.assertEqual(res['firewall_policy'], return_value)
def test_firewall_policy_update_malformed_rules(self):
# emulating client request when no rule uuids are provided for
# --firewall_rules parameter
update_data = {'firewall_policy': {'firewall_rules': True}}
# have to check for generic AppError
self.assertRaises(
webtest.AppError,
self.api.put,
_get_path('fw/firewall_policies', id=_uuid(), fmt=self.fmt),
self.serialize(update_data))
def test_firewall_policy_delete(self):
self._test_entity_delete('firewall_policy')
def test_firewall_policy_insert_rule(self):
firewall_policy_id = _uuid()
firewall_rule_id = _uuid()
ref_firewall_rule_id = _uuid()
insert_data = {'firewall_rule_id': firewall_rule_id,
'insert_before': ref_firewall_rule_id,
'insert_after': None}
return_value = {'firewall_policy':
{'tenant_id': _uuid(),
'id': firewall_policy_id,
'firewall_rules': [ref_firewall_rule_id,
firewall_rule_id]}}
instance = self.plugin.return_value
instance.insert_rule.return_value = return_value
path = _get_path('fw/firewall_policies', id=firewall_policy_id,
action="insert_rule",
fmt=self.fmt)
res = self.api.put(path, self.serialize(insert_data))
instance.insert_rule.assert_called_with(mock.ANY, firewall_policy_id,
insert_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertEqual(res, return_value)
def test_firewall_policy_remove_rule(self):
firewall_policy_id = _uuid()
firewall_rule_id = _uuid()
remove_data = {'firewall_rule_id': firewall_rule_id}
return_value = {'firewall_policy':
{'tenant_id': _uuid(),
'id': firewall_policy_id,
'firewall_rules': []}}
instance = self.plugin.return_value
instance.remove_rule.return_value = return_value
path = _get_path('fw/firewall_policies', id=firewall_policy_id,
action="remove_rule",
fmt=self.fmt)
res = self.api.put(path, self.serialize(remove_data))
instance.remove_rule.assert_called_with(mock.ANY, firewall_policy_id,
remove_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertEqual(res, return_value)
class TestFirewallAttributeValidators(base.BaseTestCase):
def test_validate_port_range(self):
msg = firewall._validate_port_range(None)
self.assertIsNone(msg)
msg = firewall._validate_port_range('10')
self.assertIsNone(msg)
msg = firewall._validate_port_range(10)
self.assertIsNone(msg)
msg = firewall._validate_port_range(-1)
self.assertEqual(msg, "Invalid port '-1'")
msg = firewall._validate_port_range('66000')
self.assertEqual(msg, "Invalid port '66000'")
msg = firewall._validate_port_range('10:20')
self.assertIsNone(msg)
msg = firewall._validate_port_range('1:65535')
self.assertIsNone(msg)
msg = firewall._validate_port_range('0:65535')
self.assertEqual(msg, "Invalid port '0'")
msg = firewall._validate_port_range('1:65536')
self.assertEqual(msg, "Invalid port '65536'")
msg = firewall._validate_port_range('abc:efg')
self.assertEqual(msg, "Port 'abc' is not a valid number")
msg = firewall._validate_port_range('1:efg')
self.assertEqual(msg, "Port 'efg' is not a valid number")
msg = firewall._validate_port_range('-1:10')
self.assertEqual(msg, "Invalid port '-1'")
msg = firewall._validate_port_range('66000:10')
self.assertEqual(msg, "Invalid port '66000'")
msg = firewall._validate_port_range('10:66000')
self.assertEqual(msg, "Invalid port '66000'")
msg = firewall._validate_port_range('1:-10')
self.assertEqual(msg, "Invalid port '-10'")
def test_validate_ip_or_subnet_or_none(self):
msg = firewall._validate_ip_or_subnet_or_none(None)
self.assertIsNone(msg)
msg = firewall._validate_ip_or_subnet_or_none('1.1.1.1')
self.assertIsNone(msg)
msg = firewall._validate_ip_or_subnet_or_none('1.1.1.0/24')
self.assertIsNone(msg)
ip_addr = '1111.1.1.1'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
ip_addr = '1.1.1.1 has whitespace'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
ip_addr = '111.1.1.1\twhitespace'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
ip_addr = '111.1.1.1\nwhitespace'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
# Valid - IPv4
cidr = "10.0.2.0/24"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Valid - IPv6 without final octets
cidr = "fe80::/24"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Valid - IPv6 with final octets
cidr = "fe80::0/24"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
cidr = "fe80::"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Invalid - IPv6 with final octets, missing mask
cidr = "fe80::0"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Invalid - Address format error
cidr = 'invalid'
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (cidr,
cidr))
| {
"content_hash": "5749feea17d5d4dcaa2f794da552dca2",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 77,
"avg_line_length": 43.730633802816904,
"alnum_prop": 0.5061395386287693,
"repo_name": "gaolichuang/neutron-fwaas",
"id": "65ce5f3679de78cf82b1e57069e1c611163de028",
"size": "25462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron_fwaas/tests/unit/extensions/test_firewall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1053"
},
{
"name": "Python",
"bytes": "527037"
},
{
"name": "Shell",
"bytes": "2822"
}
],
"symlink_target": ""
} |
import os
from oslo_log import log as logging
from oslo_utils import uuidutils
from ironic.conf import CONF
from ironic.dhcp import base
LOG = logging.getLogger(__name__)
class DnsmasqDHCPApi(base.BaseDHCP):
"""API for managing host specific Dnsmasq configuration."""
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None,
context=None):
pass
def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': '67',
'opt_value': 'pxelinux.0',
'ip_version': 4},
{'opt_name': '66',
'opt_value': '123.123.123.456',
'ip_version': 4}]
:param vifs: Ignored argument
"""
node = task.node
macs = set(self._pxe_enabled_macs(task.ports))
opt_file = self._opt_file_path(node)
tag = node.driver_internal_info.get('dnsmasq_tag')
if not tag:
tag = uuidutils.generate_uuid()
node.set_driver_internal_info('dnsmasq_tag', tag)
node.save()
LOG.debug('Writing to %s:', opt_file)
with open(opt_file, 'w') as f:
# Apply each option by tag
for option in options:
entry = 'tag:{tag},{opt_name},{opt_value}\n'.format(
tag=tag,
opt_name=option.get('opt_name'),
opt_value=option.get('opt_value'),
)
LOG.debug(entry)
f.write(entry)
for mac in macs:
host_file = self._host_file_path(mac)
LOG.debug('Writing to %s:', host_file)
with open(host_file, 'w') as f:
# Tag each address with the unique uuid scoped to
# this node and DHCP transaction
entry = '{mac},set:{tag},set:ironic\n'.format(
mac=mac, tag=tag)
LOG.debug(entry)
f.write(entry)
def _opt_file_path(self, node):
return os.path.join(CONF.dnsmasq.dhcp_optsdir,
'ironic-{}.conf'.format(node.uuid))
def _host_file_path(self, mac):
return os.path.join(CONF.dnsmasq.dhcp_hostsdir,
'ironic-{}.conf'.format(mac))
def _pxe_enabled_macs(self, ports):
for port in ports:
if port.pxe_enabled:
yield port.address
def get_ip_addresses(self, task):
"""Get IP addresses for all ports/portgroups in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with
task's ports/portgroups.
"""
lease_path = CONF.dnsmasq.dhcp_leasefile
macs = set(self._pxe_enabled_macs(task.ports))
addresses = []
with open(lease_path, 'r') as f:
for line in f.readlines():
lease = line.split()
if lease[1] in macs:
addresses.append(lease[2])
LOG.debug('Found addresses for %s: %s',
task.node.uuid, ', '.join(addresses))
return addresses
def clean_dhcp_opts(self, task):
"""Clean up the DHCP BOOT options for the host in `task`.
:param task: A TaskManager instance.
:raises: FailedToCleanDHCPOpts
"""
node = task.node
# Discard this unique tag
node.del_driver_internal_info('dnsmasq_tag')
node.save()
# Changing the host rule to ignore will be picked up by dnsmasq
# without requiring a SIGHUP. When the mac address is active again
# this file will be replaced with one that applies a new unique tag.
macs = set(self._pxe_enabled_macs(task.ports))
for mac in macs:
host_file = self._host_file_path(mac)
with open(host_file, 'w') as f:
entry = '{mac},ignore\n'.format(mac=mac)
f.write(entry)
# Deleting the file containing dhcp-option won't remove the rules from
# dnsmasq but no requests will be tagged with the dnsmasq_tag uuid so
# these rules will not apply.
opt_file = self._opt_file_path(node)
if os.path.exists(opt_file):
os.remove(opt_file)
def supports_ipxe_tag(self):
"""Whether the provider will correctly apply the 'ipxe' tag.
When iPXE makes a DHCP request, does this provider support adding
the tag `ipxe` or `ipxe6` (for IPv6). When the provider returns True,
options can be added which filter on these tags.
The `dnsmasq` provider sets this to True on the assumption that the
following is included in the dnsmasq.conf:
dhcp-match=set:ipxe,175
:returns: True
"""
return True
| {
"content_hash": "ced18669b38a03f4de6e7e3eaf60c462",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 78,
"avg_line_length": 35.145833333333336,
"alnum_prop": 0.5455443588223671,
"repo_name": "openstack/ironic",
"id": "c6f27afe467df8ad38a097a8ab10ecea4ef3f862",
"size": "5669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/dhcp/dnsmasq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "PowerShell",
"bytes": "1676"
},
{
"name": "Python",
"bytes": "9506176"
},
{
"name": "Shell",
"bytes": "188127"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/all/shared_frn_all_decorative_lg_s1.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_art")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "020be6274160c2a393f791f4f431ad1b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 23.76923076923077,
"alnum_prop": 0.6925566343042071,
"repo_name": "obi-two/Rebelion",
"id": "fb76809b3702c82a09735a0d134a69bde1a6860e",
"size": "454",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/furniture/all/shared_frn_all_decorative_lg_s1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from webpay import WebPay
from httmock import HTTMock
import pytest
import tests.helper as helper
import webpay.errors as errors
class TestErrors:
def test_request_raises_api_exception(self):
with pytest.raises(errors.ApiError) as excinfo:
with HTTMock(helper.mock_api('/charges',
'errors/unknown_api_error.txt')):
WebPay('test_key').charges.all()
exc = excinfo.value
assert exc.__str__() == 'Unknown error occurred'
assert exc.type == 'api_error'
assert exc.status == 500
def test_request_raises_invalid_request(self):
with pytest.raises(errors.InvalidRequestError) as excinfo:
with HTTMock(helper.mock_api('/charges',
'errors/bad_request.txt')):
WebPay('test_key').charges.all()
exc = excinfo.value
assert exc.__str__() == 'Missing required param: currency'
assert exc.type == 'invalid_request_error'
assert exc.param == 'currency'
assert exc.status == 400
def test_request_raises_not_found(self):
with pytest.raises(errors.InvalidRequestError) as excinfo:
with HTTMock(helper.mock_api('/charges',
'errors/not_found.txt')):
WebPay('test_key').charges.all()
exc = excinfo.value
assert exc.__str__() == 'No such charge: foo'
assert exc.type == 'invalid_request_error'
assert exc.param == 'id'
assert exc.status == 404
def test_request_raises_not_found_without_params(self):
with pytest.raises(errors.InvalidRequestError) as excinfo:
with HTTMock(helper.mock_api('/charges',
'errors/not_found_url.txt')):
WebPay('test_key').charges.all()
exc = excinfo.value
assert exc.__str__() == 'Unrecognized request URL.'
assert exc.type == 'invalid_request_error'
assert exc.status == 404
def test_request_raises_unauthorized(self):
with pytest.raises(errors.AuthenticationError) as excinfo:
with HTTMock(helper.mock_api('/charges',
'errors/unauthorized.txt')):
WebPay('test_key').charges.all()
exc = excinfo.value
assert exc.__str__() == \
'Invalid API key provided. Check your API key is correct.'
assert exc.status == 401
def test_request_raises_card_error(self):
with pytest.raises(errors.CardError) as excinfo:
with HTTMock(helper.mock_api('/charges',
'errors/card_error.txt')):
WebPay('test_key').charges.all()
exc = excinfo.value
assert exc.__str__() == 'Your card number is incorrect'
assert exc.type == 'card_error'
assert exc.code == 'incorrect_number'
assert exc.param == 'number'
assert exc.status == 402
def test_request_raises_card_error_without_param(self):
with pytest.raises(errors.CardError) as excinfo:
with HTTMock(helper.mock_api('/charges',
'errors/card_error_declined.txt')):
WebPay('test_key').charges.all()
exc = excinfo.value
assert exc.__str__() == 'This card cannot be used.'
assert exc.type == 'card_error'
assert exc.code == 'card_declined'
assert exc.param is None
assert exc.status == 402
def test_server_not_found(self):
with pytest.raises(errors.ApiConnectionError) as excinfo:
with HTTMock(helper.mock_api('/charges',
'errors/not_found.txt')):
WebPay('test_key', 'http://localhost:123').charges.all()
exc = excinfo.value
assert 'Error while requesting API' in exc.__str__()
assert exc.error_info is None
assert exc.status is None
def test_response_json_is_broken(self):
with pytest.raises(errors.ApiConnectionError) as excinfo:
with HTTMock(helper.mock_api('/charges',
'errors/broken_json.txt')):
WebPay('test_key').charges.all()
exc = excinfo.value
assert 'Error while parsing response JSON' in exc.__str__()
assert exc.error_info is None
assert exc.status is None
| {
"content_hash": "9ca3c57dcc9a60957559bed8ca1fe240",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 76,
"avg_line_length": 42.57142857142857,
"alnum_prop": 0.565324384787472,
"repo_name": "yamaneko1212/webpay-python",
"id": "95b70a180ed15c32a9f31f87e2a6ae89aff9ff60",
"size": "4470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "253628"
}
],
"symlink_target": ""
} |
import vim
import sys
import os
from inspect import getsourcefile
closure_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(getsourcefile(lambda _:None))), '..'))
gflags_path = os.path.abspath(os.path.join(closure_path, 'gflags'))
if closure_path not in sys.path:
sys.path += [closure_path]
if gflags_path not in sys.path:
sys.path += [gflags_path]
from closure_linter import error_fixer
from closure_linter import runner
from closure_linter.common import simplefileflags as fileflags
import gflags as flags
class VimErrorFixer(error_fixer.ErrorFixer):
def FinishFile(self):
if self._file_fix_count:
token = self._file_token
while token.is_deleted:
token = token.next
while token.previous:
token = token.previous
lines = []
line = ''
char_count = 0
while token:
line += token.string
char_count += len(token.string)
if token.IsLastInLine():
if (line or not self._file_is_html or
token.orig_line_number is None):
lines += [line]
else:
# TODO: get original line corresponding to token.orig_line_number -
# 1 and append it to lines here.
pass
line = ''
# TODO: deal with lines that are longer than 80 characters here.
char_count = 0
token = token.next
vim.current.range[:] = lines
flags.FLAGS([os.path.abspath(getsourcefile(lambda _:None)), '--jslint_error=all'])
runner.Run(vim.current.buffer.name, VimErrorFixer(), vim.current.range)
| {
"content_hash": "1164b326a7b167456892977b8dcbe7fc",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 114,
"avg_line_length": 28.727272727272727,
"alnum_prop": 0.6424050632911392,
"repo_name": "asankah/closure-linter",
"id": "8a59b662d73d6e4a2147a278ae90491e34d7965d",
"size": "1580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "closure_linter/vim_style.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "116105"
},
{
"name": "Python",
"bytes": "437465"
}
],
"symlink_target": ""
} |
from om.base import *
from sqlalchemy.orm import relationship, backref, column_property
from sqlalchemy import Table, MetaData, create_engine, Column, Integer, \
String, Float, ForeignKey, select
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.schema import UniqueConstraint,PrimaryKeyConstraint
import simplejson as json
class Gene(GenomeRegion):
__tablename__ = 'gene'
id = Column(Integer, ForeignKey('genome_region.id'), primary_key=True)
locus_id = Column(String(20))
info = Column(String(300))
long_name = Column(String(100))
__mapper_args__ = { 'polymorphic_identity': 'gene' }
def __repr__(self):
return "Gene: (%s, %s) %d-%d (%s)"% \
(self.locus_id, self.name, self.leftpos, self.rightpos,\
self.strand)
def __init__(self, name, leftpos, rightpos, strand, genome_id, locus_id, info=None, long_name=None):
super(Gene, self).__init__(leftpos, rightpos, strand, genome_id, name)
self.locus_id = locus_id
self.info = info
self.long_name = long_name
class Motif(GenomeRegion):
__tablename__ = 'motif'
id = Column(Integer, ForeignKey('genome_region.id'), primary_key=True)
pval = Column(Float)
bound_component_id = Column(Integer, ForeignKey('component.id'))
bound_component = relationship("Component")
def __repr__(self):
return "Motif (%s) %d-%d %s %5.2f"% \
(self.bound_component.name, self.leftpos, self.rightpos,\
self.strand, self.pval)
def __init__(self, leftpos, rightpos, strand, genome_id, pval, bound_component_id, info=None):
super(Motif, self).__init__(leftpos, rightpos, strand, genome_id)
self.pval = pval
self.bound_component_id = bound_component_id
self.info = info
class ComplexComposition(Base):
__tablename__ = 'complex_composition'
complex_id = Column(Integer, ForeignKey('complex.id'), primary_key=True)
component_id = Column(Integer, ForeignKey('component.id'), primary_key=True)
stoichiometry = Column(Integer)
__table_args__ = (UniqueConstraint('complex_id','component_id'),{})
def __init__(self, complex_id, component_id, stoichiometry):
self.complex_id = complex_id
self.component_id = component_id
self.stoichiometry = stoichiometry
class Complex(Component):
__tablename__ = 'complex'
__mapper_args__ = {'polymorphic_identity': 'complex'}
id = Column(Integer, ForeignKey('component.id'), primary_key=True)
long_name = Column(String(200))
children = relationship("Component", secondary="complex_composition",\
primaryjoin = id == ComplexComposition.complex_id,\
backref="parent")
@hybrid_property
def all_children(self):
session = Session()
included_components = session.query(
ComplexComposition.complex_id,
ComplexComposition.component_id).\
filter(ComplexComposition.complex_id == self.id).\
cte(name="included_components", recursive=True)
incl_alias = aliased(included_components, name="incl_cplx")
complex_alias = aliased(ComplexComposition, name="cplx")
included_components = included_components.union_all(
session.query(
complex_alias.complex_id,
complex_alias.component_id).\
filter(complex_alias.complex_id==incl_alias.c.component_id)
)
return session.query(Component).join(included_components, Component.id == included_components.c.component_id).all()
def __repr__(self):
return "Complex (#%d): %s" % \
(self.id, self.long_name)
def __init__(self, name, long_name=None):
super(Complex, self).__init__(name)
self.long_name = long_name
class DNA(Component):
__tablename__ = 'dna'
id = Column(Integer, ForeignKey('component.id'), primary_key=True)
type = Column(String(20))
genome_region_id = Column(Integer, ForeignKey('genome_region.id'))
genome_region = relationship('GenomeRegion', backref=backref('dna', lazy='dynamic'))
__mapper_args__ = { 'polymorphic_identity': 'dna',
'polymorphic_on': type
}
def __init__(self, name=None, leftpos=None, rightpos=None, strand=None, genome_id=None):
super(DNA, self).__init__(name)
session = Session()
self.genome_region_id = session.get_or_create(GenomeRegion, leftpos=leftpos,\
rightpos=rightpos, genome_id=genome_id,
strand=strand).id
session.close()
def __repr__(self):
return "DNA (#%d, %s) %d-%d %s"% \
(self.id, self.name, self.genome_region.leftpos, self.genome_region.rightpos,\
self.genome_region.strand)
class DnaBindingSite(DNA):
__tablename__ = 'dna_binding_site'
__mapper_args__ = { 'polymorphic_identity': 'binding_site' }
id = Column(Integer, ForeignKey('dna.id'), primary_key=True)
centerpos = Column(Integer)
width = Column(Integer)
def __init__(self, name, leftpos, rightpos, strand, genome_id, centerpos, width):
super(DnaBindingSite, self).__init__(name, leftpos, rightpos, strand, genome_id)
self.centerpos = centerpos
self.width = width
class RNA(Component):
__tablename__ = 'rna'
__mapper_args__ = { 'polymorphic_identity': 'rna' }
id = Column(Integer, ForeignKey('component.id', ondelete='CASCADE'), primary_key=True)
type = Column(String(20))
genome_region_id = Column(Integer, ForeignKey('genome_region.id'))
def __init__(self, name=None, leftpos=None, rightpos=None, strand=None, genome_id=None):
super(RNA, self).__init__(name)
session = Session()
self.genome_region_id = session.get_or_create(GenomeRegion, leftpos=leftpos,\
rightpos=rightpos, strand=strand,
genome_id=genome_id).id
session.close()
def __repr__(self):
return "RNA (#%d, %s)" % \
(self.id, self.name)
class TUGenes(Base):
__tablename__ = 'tu_genes'
tu_id = Column(Integer, ForeignKey('tu.id'), primary_key=True)
gene_id = Column(Integer, ForeignKey('gene.id'), primary_key=True)
__table_args__ = (UniqueConstraint('tu_id','gene_id'),{})
def __init__(self, tu_id, gene_id):
self.tu_id = tu_id
self.gene_id = gene_id
class TU(RNA):
__tablename__ = 'tu'
__mapper_args__ = { 'polymorphic_identity': 'tu' }
id = Column(Integer, ForeignKey('rna.id', ondelete='CASCADE'), primary_key=True)
genome_region = relationship("GenomeRegion")
genes = relationship("Gene", secondary="tu_genes",\
primaryjoin = id == TUGenes.tu_id,\
backref="tu")
strand = column_property(select([GenomeRegion.strand]).\
where(GenomeRegion.id == id))
leftpos = column_property(select([GenomeRegion.leftpos]).\
where(GenomeRegion.id == id))
rightpos = column_property(select([GenomeRegion.rightpos]).\
where(GenomeRegion.id == id))
long_name = Column(String(200))
@hybrid_property
def tss(self):
if self.strand == '+':
return self.leftpos
else:
return self.rightpos
def __init__(self, name, leftpos, rightpos, strand, genome_id, long_name=None):
super(TU, self).__init__(name, leftpos, rightpos, strand, genome_id)
self.long_name = long_name
def __repr__(self):
return "TU (#%d, %s)" % \
(self.id, self.name)
class Protein(Component):
__tablename__ = 'protein'
__mapper_args__ = { 'polymorphic_identity': 'protein' }
id = Column(Integer, ForeignKey('component.id'), primary_key=True)
long_name = Column(String(200))
gene_id = Column(Integer, ForeignKey('gene.id'))
gene = relationship('Gene', backref='protein')
def __init__(self, name, gene_id=None, long_name=None):
super(Protein, self).__init__(name)
self.gene_id = gene_id
self.long_name = long_name
def __repr__(self):
return "Protein (#%d, %s)" % \
(self.id, self.long_name)
class Metabolite(Component):
__tablename__ = 'metabolite'
__mapper_args__ = { 'polymorphic_identity': 'metabolite' }
id = Column(Integer, ForeignKey('component.id'), primary_key=True)
long_name = Column(String(200))
formula = Column(String(200))
smiles = Column(String(200))
def __init__(self, name, long_name, formula="", smiles=""):
super(Metabolite, self).__init__(name)
self.long_name = long_name
self.formula = formula
self.smiles = smiles
def __repr__(self):
return "Small Molecule (#%d, %s)" % \
(self.id, self.long_name)
class GeneGrouping(Base):
__tablename__ = 'gene_grouping'
group_id = Column(Integer, ForeignKey('gene_group.id', ondelete="CASCADE"), primary_key=True)
gene_id = Column(Integer, ForeignKey('gene.id', ondelete="CASCADE"), primary_key=True)
__table_args__ = (UniqueConstraint('group_id','gene_id'),{})
def __init__(self, group_id, gene_id):
self.group_id = group_id
self.gene_id = gene_id
class GeneGroup(Base):
__tablename__ = 'gene_group'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100))
genes = relationship("Gene", secondary="gene_grouping",\
primaryjoin = id == GeneGrouping.group_id,\
backref="groups")
__table_args__ = (UniqueConstraint('name'),{})
def __repr__(self):
return "Gene Group (#%d, %s) %d genes" % \
(self.id, self.name, len(self.genes))
def __init__(self, name):
self.name = name
| {
"content_hash": "2ec457599f75d7d9d575c6a82b047149",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 123,
"avg_line_length": 33.06012658227848,
"alnum_prop": 0.5728917392552886,
"repo_name": "steve-federowicz/om",
"id": "e82f4d38b48365cc5fc9207a39839b1874a09fee",
"size": "10447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "om/components.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "42989"
},
{
"name": "Python",
"bytes": "197913"
},
{
"name": "Shell",
"bytes": "6704"
}
],
"symlink_target": ""
} |
from runtime import classonlymethod
import console
def sum(x, y):
return x + y
def nop():
pass
class Foo(object):
def __init__(self, x):
self.x = x
def getX(self):
return self.x
def addX(self, y):
print Foo.bar(5)
return self.x + y
def nop(self):
pass
@classonlymethod
def bar(cls, multiplier):
return Foo.z * multiplier
Foo.z = 23
print "hello, world"
print "5 + 10 = " + str(sum(5, 10))
foo = Foo(5)
print foo.getX()
print foo.addX(10)
print Foo.bar(2)
print "hello, world!"
| {
"content_hash": "3afd401c1e602707457e3f5630715689",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 35,
"avg_line_length": 16.142857142857142,
"alnum_prop": 0.5787610619469027,
"repo_name": "kevinb7/js2py",
"id": "a01344be6d99da0c9e7e8e1b6223c9e0e8bd9522",
"size": "565",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/current.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "185974"
},
{
"name": "Python",
"bytes": "2221"
}
],
"symlink_target": ""
} |
__author__ = 'fahadadeel'
import jpype
class AddSmartArt:
def __init__(self, dataDir):
print "init func"
self.dataDir = dataDir
self.Presentation=jpype.JClass("com.aspose.slides.Presentation")
self.SaveFormat=jpype.JClass("com.aspose.slides.SaveFormat")
self.SmartArtLayoutType=jpype.JClass("com.aspose.slides.SmartArtLayoutType")
def main(self):
self.create_smartart_shape()
def create_smartart_shape(self):
# Create an instance of Presentation class
pres = self.Presentation()
# Get the first slide
slide = pres.getSlides().get_Item(0)
# Add Smart Art Shape
smartArtLayoutType = self.SmartArtLayoutType
smart = slide.getShapes().addSmartArt(0, 0, 400, 400, smartArtLayoutType.BasicBlockList)
# Write the presentation as a PPTX file
saveFormat = self.SaveFormat
pres.save(self.dataDir + "SimpleSmartArt.pptx", saveFormat.Pptx)
print "Created smartart shape, please check the output file."
class FillFormat:
def __init__(self, dataDir):
print "init func"
self.dataDir = dataDir
self.Presentation=jpype.JClass("com.aspose.slides.Presentation")
self.SaveFormat=jpype.JClass("com.aspose.slides.SaveFormat")
self.SmartArtLayoutType=jpype.JClass("com.aspose.slides.SmartArtLayoutType")
self.FillType=jpype.JClass("com.aspose.slides.FillType")
def main(self):
self.create_smartart_shape()
def create_smartart_shape(self):
# Create an instance of Presentation class
pres = self.Presentation()
# Get the first slide
slide = pres.getSlides().get_Item(0)
# Adding SmartArt shape and nodes
smartArtLayoutType = self.SmartArtLayoutType
chevron = slide.getShapes().addSmartArt(10, 10, 800, 60, smartArtLayoutType.ClosedChevronProcess)
node = chevron.getAllNodes().addNode()
node.getTextFrame().setText("Some text")
# Setting node fill color
color = self.Color
fillType = self.FillType
item = node.getShapes().get_Item(0)
item.getFillFormat().setFillType(fillType.Solid)
item.getFillFormat().getSolidFillColor().setColor(color.RED)
# Write the presentation as a PPTX file
saveFormat = self.SaveFormat
pres.save(dataDir + "FillFormat.pptx", saveFormat.Pptx)
print "Set fill format for smartart node, please check the output file." | {
"content_hash": "50cb4d00ae85003a678bf67d90e74f77",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 105,
"avg_line_length": 31.962025316455698,
"alnum_prop": 0.6605940594059406,
"repo_name": "asposeslides/Aspose_Slides_Java",
"id": "8165f2ca989f279b18e21bb070d0c3766a438749",
"size": "2525",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Plugins/Aspose-Slides-Java-for-Python/WorkingWithSmartArt/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "17962"
},
{
"name": "Java",
"bytes": "344919"
},
{
"name": "PHP",
"bytes": "146397"
},
{
"name": "Python",
"bytes": "132116"
},
{
"name": "Ruby",
"bytes": "166824"
}
],
"symlink_target": ""
} |
"""Commands for reading and manipulating target HTTPS proxies."""
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class TargetHTTPSProxies(base.Group):
"""List, create, and delete target HTTPS proxies."""
TargetHTTPSProxies.detailed_help = {
'brief': 'List, create, and delete target HTTPS proxies',
}
| {
"content_hash": "8ff1c33d5ccf7f733bcc84ccb8e5a5e1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 28.846153846153847,
"alnum_prop": 0.7626666666666667,
"repo_name": "wemanuel/smry",
"id": "5f5e540c04f75aaba68a51c96f97788e99c10b7a",
"size": "425",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/target_https_proxies/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3990"
},
{
"name": "Groff",
"bytes": "1221174"
},
{
"name": "HTML",
"bytes": "1873470"
},
{
"name": "JavaScript",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "6032"
},
{
"name": "PHP",
"bytes": "16660"
},
{
"name": "Python",
"bytes": "47139164"
},
{
"name": "Shell",
"bytes": "37102"
},
{
"name": "SourcePawn",
"bytes": "1160"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import logging
l = logging.getLogger(__name__)
from datetime import timedelta
from django.conf import settings
from django.core.management.base import CommandError
from django_lean.experiments.models import (Experiment, DailyEngagementReport,
DailyConversionReport)
from django_lean.experiments.management.commands import (
update_experiment_reports
)
from django_lean.experiments.tests.utils import patch, TestCase
class TestManagement(TestCase):
def setUp(self):
self.experiment = Experiment(name="test_experiment")
self.experiment.save()
self.experiment.state = Experiment.ENABLED_STATE
self.experiment.save()
self.experiment.start_date = (self.experiment.start_date -
timedelta(days=5))
self.experiment.save()
def testManageCommand(self):
with patch(settings, 'LEAN_ENGAGEMENT_CALCULATOR',
'django_lean.experiments.testsettings.SimpleEngagementCalculator'):
#make sure the manage.py command that generates daily stats work
#Running with arguments should raise Exception
self.assertRaises(CommandError,
update_experiment_reports.Command().handle,
"some", "args")
#This is what manage.py will call
self.runner = update_experiment_reports.Command().run_from_argv
#Run the reports
self.runner(['manage.py', 'update_experiment_reports'])
#Make sure they were generated
self.assertEqual(5, DailyEngagementReport.objects.filter(
experiment=self.experiment).count())
self.assertEqual(5, DailyConversionReport.objects.filter(
experiment=self.experiment).count())
| {
"content_hash": "26e1f1f4459e241642d09d0ee0350cc7",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 87,
"avg_line_length": 40.829787234042556,
"alnum_prop": 0.6347055758207399,
"repo_name": "MontmereLimited/django-lean",
"id": "86286591631db8bbab61a43f5110f07891414d8c",
"size": "1943",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_lean/experiments/tests/test_management.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "14189"
},
{
"name": "JavaScript",
"bytes": "2774"
},
{
"name": "Python",
"bytes": "301048"
}
],
"symlink_target": ""
} |
"""Config flow for elmax-cloud integration."""
from __future__ import annotations
import logging
from typing import Any
from elmax_api.exceptions import ElmaxBadLoginError, ElmaxBadPinError, ElmaxNetworkError
from elmax_api.http import Elmax
from elmax_api.model.panel import PanelEntry
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.data_entry_flow import FlowResult
from homeassistant.exceptions import HomeAssistantError
from .const import (
CONF_ELMAX_PANEL_ID,
CONF_ELMAX_PANEL_NAME,
CONF_ELMAX_PANEL_PIN,
CONF_ELMAX_PASSWORD,
CONF_ELMAX_USERNAME,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
LOGIN_FORM_SCHEMA = vol.Schema(
{
vol.Required(CONF_ELMAX_USERNAME): str,
vol.Required(CONF_ELMAX_PASSWORD): str,
}
)
def _store_panel_by_name(
panel: PanelEntry, username: str, panel_names: dict[str, str]
) -> None:
original_panel_name = panel.get_name_by_user(username=username)
panel_id = panel.hash
collisions_count = 0
panel_name = original_panel_name
while panel_name in panel_names:
# Handle same-name collision.
collisions_count += 1
panel_name = f"{original_panel_name} ({collisions_count})"
panel_names[panel_name] = panel_id
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for elmax-cloud."""
VERSION = 1
_client: Elmax
_username: str
_password: str
_panels_schema: vol.Schema
_panel_names: dict
_reauth_username: str | None
_reauth_panelid: str | None
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
# When invokes without parameters, show the login form.
if user_input is None:
return self.async_show_form(step_id="user", data_schema=LOGIN_FORM_SCHEMA)
username = user_input[CONF_ELMAX_USERNAME]
password = user_input[CONF_ELMAX_PASSWORD]
# Otherwise, it means we are handling now the "submission" of the user form.
# In this case, let's try to log in to the Elmax cloud and retrieve the available panels.
try:
client = await self._async_login(username=username, password=password)
except ElmaxBadLoginError:
return self.async_show_form(
step_id="user",
data_schema=LOGIN_FORM_SCHEMA,
errors={"base": "invalid_auth"},
)
except ElmaxNetworkError:
_LOGGER.exception("A network error occurred")
return self.async_show_form(
step_id="user",
data_schema=LOGIN_FORM_SCHEMA,
errors={"base": "network_error"},
)
# If the login succeeded, retrieve the list of available panels and filter the online ones
online_panels = [x for x in await client.list_control_panels() if x.online]
# If no online panel was found, we display an error in the next UI.
if not online_panels:
return self.async_show_form(
step_id="user",
data_schema=LOGIN_FORM_SCHEMA,
errors={"base": "no_panel_online"},
)
# Show the panel selection.
# We want the user to choose the panel using the associated name, we set up a mapping
# dictionary to handle that case.
panel_names: dict[str, str] = {}
username = client.get_authenticated_username()
for panel in online_panels:
_store_panel_by_name(
panel=panel, username=username, panel_names=panel_names
)
self._client = client
self._panel_names = panel_names
schema = vol.Schema(
{
vol.Required(CONF_ELMAX_PANEL_NAME): vol.In(self._panel_names.keys()),
vol.Required(CONF_ELMAX_PANEL_PIN, default="000000"): str,
}
)
self._panels_schema = schema
self._username = username
self._password = password
# If everything went OK, proceed to panel selection.
return await self.async_step_panels(user_input=None)
async def async_step_panels(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle Panel selection step."""
errors: dict[str, Any] = {}
if user_input is None:
return self.async_show_form(
step_id="panels", data_schema=self._panels_schema, errors=errors
)
panel_name = user_input[CONF_ELMAX_PANEL_NAME]
panel_pin = user_input[CONF_ELMAX_PANEL_PIN]
# Lookup the panel id from the panel name.
panel_id = self._panel_names[panel_name]
# Make sure this is the only elmax integration for this specific panel id.
await self.async_set_unique_id(panel_id)
self._abort_if_unique_id_configured()
# Try to list all the devices using the given PIN.
try:
await self._client.get_panel_status(
control_panel_id=panel_id, pin=panel_pin
)
return self.async_create_entry(
title=f"Elmax {panel_name}",
data={
CONF_ELMAX_PANEL_ID: panel_id,
CONF_ELMAX_PANEL_PIN: panel_pin,
CONF_ELMAX_USERNAME: self._username,
CONF_ELMAX_PASSWORD: self._password,
},
)
except ElmaxBadPinError:
errors["base"] = "invalid_pin"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error occurred")
errors["base"] = "unknown"
return self.async_show_form(
step_id="panels", data_schema=self._panels_schema, errors=errors
)
async def async_step_reauth(self, user_input=None):
"""Perform reauth upon an API authentication error."""
self._reauth_username = user_input.get(CONF_ELMAX_USERNAME)
self._reauth_panelid = user_input.get(CONF_ELMAX_PANEL_ID)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
"""Handle reauthorization flow."""
errors = {}
if user_input is not None:
panel_pin = user_input.get(CONF_ELMAX_PANEL_PIN)
password = user_input.get(CONF_ELMAX_PASSWORD)
entry = await self.async_set_unique_id(self._reauth_panelid)
# Handle authentication, make sure the panel we are re-authenticating against is listed among results
# and verify its pin is correct.
try:
# Test login.
client = await self._async_login(
username=self._reauth_username, password=password
)
# Make sure the panel we are authenticating to is still available.
panels = [
p
for p in await client.list_control_panels()
if p.hash == self._reauth_panelid
]
if len(panels) < 1:
raise NoOnlinePanelsError()
# Verify the pin is still valid.from
await client.get_panel_status(
control_panel_id=self._reauth_panelid, pin=panel_pin
)
# If it is, proceed with configuration update.
self.hass.config_entries.async_update_entry(
entry,
data={
CONF_ELMAX_PANEL_ID: self._reauth_panelid,
CONF_ELMAX_PANEL_PIN: panel_pin,
CONF_ELMAX_USERNAME: self._reauth_username,
CONF_ELMAX_PASSWORD: password,
},
)
await self.hass.config_entries.async_reload(entry.entry_id)
self._reauth_username = None
self._reauth_panelid = None
return self.async_abort(reason="reauth_successful")
except ElmaxBadLoginError:
_LOGGER.error(
"Wrong credentials or failed login while re-authenticating"
)
errors["base"] = "invalid_auth"
except NoOnlinePanelsError:
_LOGGER.warning(
"Panel ID %s is no longer associated to this user",
self._reauth_panelid,
)
errors["base"] = "reauth_panel_disappeared"
except ElmaxBadPinError:
errors["base"] = "invalid_pin"
# We want the user to re-authenticate only for the given panel id using the same login.
# We pin them to the UI, so the user realizes she must log in with the appropriate credentials
# for the that specific panel.
schema = vol.Schema(
{
vol.Required(CONF_ELMAX_USERNAME): self._reauth_username,
vol.Required(CONF_ELMAX_PASSWORD): str,
vol.Required(CONF_ELMAX_PANEL_ID): self._reauth_panelid,
vol.Required(CONF_ELMAX_PANEL_PIN): str,
}
)
return self.async_show_form(
step_id="reauth_confirm", data_schema=schema, errors=errors
)
@staticmethod
async def _async_login(username: str, password: str) -> Elmax:
"""Log in to the Elmax cloud and return the http client."""
client = Elmax(username=username, password=password)
await client.login()
return client
class NoOnlinePanelsError(HomeAssistantError):
"""Error occurring when no online panel was found."""
| {
"content_hash": "96be7b9a435e9bfdd075e5a1a73ce557",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 113,
"avg_line_length": 37.69111969111969,
"alnum_prop": 0.5820528580208973,
"repo_name": "toddeye/home-assistant",
"id": "6872a555b8a2a75a23809ba40460d8df1cb14082",
"size": "9762",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/elmax/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import itertools
from werkzeug.datastructures import MultiDict
def to_multidict(value):
if not isinstance(value, MultiDict) and isinstance(value, dict):
value = _dict_to_multidict(value)
return value
def flatten(to_flatten_list):
if not isinstance(to_flatten_list, (list, tuple)):
return to_flatten_list
result = []
for list_item in to_flatten_list:
if isinstance(list_item, (list, tuple)):
result.extend(flatten(item) for item in list_item)
else:
result.append(list_item)
return result
def create_key_value_pairs(dictionary, key):
def get_values(key):
if hasattr(dictionary, 'getall'):
data = dictionary.getall(key)
else:
data = [dictionary.get(key)]
return flatten(data)
values = get_values(key)
return zip([key] * len(values), values)
def _dict_to_multidict(value):
return MultiDict(
itertools.chain.from_iterable(
create_key_value_pairs(value, key)
for key in value.keys()
)
)
| {
"content_hash": "b0d21c33429280452065ded3578fc634",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 68,
"avg_line_length": 24.568181818181817,
"alnum_prop": 0.6197964847363552,
"repo_name": "cbrand/python-filterparams",
"id": "4cae7e61bde0bd0bd5be5a747ddb535c6c59f050",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/filterparams/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25230"
}
],
"symlink_target": ""
} |
"""
FILE: strategy.py
DESC: describes an arbitrage strategy.
"""
__author__ = 'Luis F. Serazo'
__email__ = 'lserazo.projects@gmail.com'
from collections import OrderedDict
class Arbitrage(object):
"""
Only want differences that are within the (lower, upper) interval
"""
def __init__(self, lower: int, upper: int or float, quantity: int):
self.lower = lower
self.upper = upper
self.quantity = quantity
"""
greedy_search:
prices: ordered dictionary: {Exchange_Name -> Price, ... }.
"""
def greedy_search(self, prices: OrderedDict):
if len(prices) < 2:
raise ArbitrageException()
p1, p2 = prices.popitem(False), prices.popitem(False)
if p1[1] < p2[1]:
p_min, p_max = p1[1], p2[1]
p_min_key, p_max_key = p1[0], p2[0]
delta = p_max - p_min
else:
p_min, p_max = p2[1], p1[1]
p_min_key, p_max_key = p2[0], p1[0]
delta = p_max - p_min
## Here we adjust the p_min/p_max in a greedy fashion
## that is, we want to find the p_maximum profit within
## reason
for exchange, price in prices.items():
if price > p_max and price - p_min < self.upper:
p_max = price
p_max_key = exchange
delta = p_max - p_min
elif price < p_min and p_max - price > self.lower:
p_min = price
p_min_key = exchange
delta = p_max - p_min
if delta == 0 or delta < self.lower or delta > self.upper: return None
return {'sell': p_max_key, 'buy': p_min_key, 'qnt': self.quantity}
class ArbitrageException(Exception):
pass | {
"content_hash": "6646dc2d199560dee06636a1263f441c",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 72,
"avg_line_length": 23.761904761904763,
"alnum_prop": 0.6285905143620575,
"repo_name": "luserazo/OracleReloaded",
"id": "855af9ca0180989764982ce15fd911a9a57f3d3f",
"size": "1497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/strategy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21113"
},
{
"name": "Shell",
"bytes": "1208"
}
],
"symlink_target": ""
} |
"""
SST-2 Binary text classification with XLM-RoBERTa model
=======================================================
**Author**: `Parmeet Bhatia <parmeetbhatia@fb.com>`__
"""
######################################################################
# Overview
# --------
#
# This tutorial demonstrates how to train a text classifier on SST-2 binary dataset using a pre-trained XLM-RoBERTa (XLM-R) model.
# We will show how to use torchtext library to:
#
# 1. build text pre-processing pipeline for XLM-R model
# 2. read SST-2 dataset and transform it using text and label transformation
# 3. instantiate classification model using pre-trained XLM-R encoder
#
#
######################################################################
# Common imports
# --------------
import torch
import torch.nn as nn
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
#######################################################################
# Data Transformation
# -------------------
#
# Models like XLM-R cannot work directly with raw text. The first step in training
# these models is to transform input text into tensor (numerical) form such that it
# can then be processed by models to make predictions. A standard way to process text is:
#
# 1. Tokenize text
# 2. Convert tokens into (integer) IDs
# 3. Add any special tokens IDs
#
# XLM-R uses sentencepiece model for text tokenization. Below, we use pre-trained sentencepiece
# model along with corresponding vocabulary to build text pre-processing pipeline using torchtext's transforms.
# The transforms are pipelined using :py:func:`torchtext.transforms.Sequential` which is similar to :py:func:`torch.nn.Sequential`
# but is torchscriptable. Note that the transforms support both batched and non-batched text inputs i.e, one
# can either pass a single sentence or list of sentences.
#
import torchtext.transforms as T
from torch.hub import load_state_dict_from_url
padding_idx = 1
bos_idx = 0
eos_idx = 2
max_seq_len = 256
xlmr_vocab_path = r"https://download.pytorch.org/models/text/xlmr.vocab.pt"
xlmr_spm_model_path = r"https://download.pytorch.org/models/text/xlmr.sentencepiece.bpe.model"
text_transform = T.Sequential(
T.SentencePieceTokenizer(xlmr_spm_model_path),
T.VocabTransform(load_state_dict_from_url(xlmr_vocab_path)),
T.Truncate(max_seq_len - 2),
T.AddToken(token=bos_idx, begin=True),
T.AddToken(token=eos_idx, begin=False),
)
from torch.utils.data import DataLoader
#######################################################################
# Alternately we can also use transform shipped with pre-trained model that does all of the above out-of-the-box
#
# ::
#
# text_transform = XLMR_BASE_ENCODER.transform()
#
#######################################################################
# Dataset
# -------
# torchtext provides several standard NLP datasets. For complete list, refer to documentation
# at https://pytorch.org/text/stable/datasets.html. These datasets are build using composable torchdata
# datapipes and hence support standard flow-control and mapping/transformation using user defined functions
# and transforms. Below, we demonstrate how to use text and label processing transforms to pre-process the
# SST-2 dataset.
#
# .. note::
# Using datapipes is still currently subject to a few caveats. If you wish
# to extend this example to include shuffling, multi-processing, or
# distributed learning, please see :ref:`this note <datapipes_warnings>`
# for further instructions.
from torchtext.datasets import SST2
batch_size = 16
train_datapipe = SST2(split="train")
dev_datapipe = SST2(split="dev")
# Transform the raw dataset using non-batched API (i.e apply transformation line by line)
def apply_transform(x):
return text_transform(x[0]), x[1]
train_datapipe = train_datapipe.map(apply_transform)
train_datapipe = train_datapipe.batch(batch_size)
train_datapipe = train_datapipe.rows2columnar(["token_ids", "target"])
train_dataloader = DataLoader(train_datapipe, batch_size=None)
dev_datapipe = dev_datapipe.map(apply_transform)
dev_datapipe = dev_datapipe.batch(batch_size)
dev_datapipe = dev_datapipe.rows2columnar(["token_ids", "target"])
dev_dataloader = DataLoader(dev_datapipe, batch_size=None)
#######################################################################
# Alternately we can also use batched API (i.e apply transformation on the whole batch)
#
# ::
#
# def batch_transform(x):
# return {"token_ids": text_transform(x["text"]), "target": x["label"]}
#
#
# train_datapipe = train_datapipe.batch(batch_size).rows2columnar(["text", "label"])
# train_datapipe = train_datapipe.map(lambda x: batch_transform)
# dev_datapipe = dev_datapipe.batch(batch_size).rows2columnar(["text", "label"])
# dev_datapipe = dev_datapipe.map(lambda x: batch_transform)
#
######################################################################
# Model Preparation
# -----------------
#
# torchtext provides SOTA pre-trained models that can be used to fine-tune on downstream NLP tasks.
# Below we use pre-trained XLM-R encoder with standard base architecture and attach a classifier head to fine-tune it
# on SST-2 binary classification task. We shall use standard Classifier head from the library, but users can define
# their own appropriate task head and attach it to the pre-trained encoder. For additional details on available pre-trained models,
# please refer to documentation at https://pytorch.org/text/main/models.html
#
#
num_classes = 2
input_dim = 768
from torchtext.models import RobertaClassificationHead, XLMR_BASE_ENCODER
classifier_head = RobertaClassificationHead(num_classes=num_classes, input_dim=input_dim)
model = XLMR_BASE_ENCODER.get_model(head=classifier_head)
model.to(DEVICE)
#######################################################################
# Training methods
# ----------------
#
# Let's now define the standard optimizer and training criteria as well as some helper functions
# for training and evaluation
#
import torchtext.functional as F
from torch.optim import AdamW
learning_rate = 1e-5
optim = AdamW(model.parameters(), lr=learning_rate)
criteria = nn.CrossEntropyLoss()
def train_step(input, target):
output = model(input)
loss = criteria(output, target)
optim.zero_grad()
loss.backward()
optim.step()
def eval_step(input, target):
output = model(input)
loss = criteria(output, target).item()
return float(loss), (output.argmax(1) == target).type(torch.float).sum().item()
def evaluate():
model.eval()
total_loss = 0
correct_predictions = 0
total_predictions = 0
counter = 0
with torch.no_grad():
for batch in dev_dataloader:
input = F.to_tensor(batch["token_ids"], padding_value=padding_idx).to(DEVICE)
target = torch.tensor(batch["target"]).to(DEVICE)
loss, predictions = eval_step(input, target)
total_loss += loss
correct_predictions += predictions
total_predictions += len(target)
counter += 1
return total_loss / counter, correct_predictions / total_predictions
#######################################################################
# Train
# -----
#
# Now we have all the ingredients to train our classification model. Note that we are able to directly iterate
# on our dataset object without using DataLoader. Our pre-process dataset shall yield batches of data already,
# thanks to the batching datapipe we have applied. For distributed training, we would need to use DataLoader to
# take care of data-sharding.
#
num_epochs = 1
for e in range(num_epochs):
for batch in train_dataloader:
input = F.to_tensor(batch["token_ids"], padding_value=padding_idx).to(DEVICE)
target = torch.tensor(batch["target"]).to(DEVICE)
train_step(input, target)
loss, accuracy = evaluate()
print("Epoch = [{}], loss = [{}], accuracy = [{}]".format(e, loss, accuracy))
#######################################################################
# Output
# ------
#
# ::
#
# 100%|██████████|5.07M/5.07M [00:00<00:00, 40.8MB/s]
# Downloading: "https://download.pytorch.org/models/text/xlmr.vocab.pt" to /root/.cache/torch/hub/checkpoints/xlmr.vocab.pt
# 100%|██████████|4.85M/4.85M [00:00<00:00, 16.8MB/s]
# Downloading: "https://download.pytorch.org/models/text/xlmr.base.encoder.pt" to /root/.cache/torch/hub/checkpoints/xlmr.base.encoder.pt
# 100%|██████████|1.03G/1.03G [00:26<00:00, 47.1MB/s]
# Epoch = [0], loss = [0.2629831412637776], accuracy = [0.9105504587155964]
#
| {
"content_hash": "6e92106d2a59695712b563381317d246",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 139,
"avg_line_length": 36.205882352941174,
"alnum_prop": 0.6506904955320877,
"repo_name": "pytorch/text",
"id": "fbd602db9c19d5b5119c5edb99b4938c42a5d2be",
"size": "8677",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/tutorials/sst2_classification_non_distributed.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5989"
},
{
"name": "C",
"bytes": "1165"
},
{
"name": "C++",
"bytes": "103773"
},
{
"name": "CMake",
"bytes": "6607"
},
{
"name": "Dockerfile",
"bytes": "1632"
},
{
"name": "Python",
"bytes": "761434"
},
{
"name": "Shell",
"bytes": "19559"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import override_settings
from django.views.generic.base import View
from django.utils.encoding import force_str
from .models import Author, Artist
class ListViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertTrue(isinstance(res.context['view'], View))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_orphaned_queryset(self):
self._make_authors(92)
res = self.client.get('/list/authors/paginated-orphaned/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 1)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '3'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '4'})
self.assertEqual(res.status_code, 404)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
| {
"content_hash": "fbd0888d76eae684587b83996954e1d7",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 92,
"avg_line_length": 48.58986175115207,
"alnum_prop": 0.65847875569044,
"repo_name": "jgeskens/django",
"id": "cc4d2f5966ae760c17d6a5133aca5815c1990796",
"size": "10544",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/generic_views/test_list.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "98169"
},
{
"name": "Python",
"bytes": "8298907"
},
{
"name": "Shell",
"bytes": "11941"
}
],
"symlink_target": ""
} |
import os
import yaml
from oslo_config import cfg
CONF = cfg.CONF
def update_fabscript_map(fabscript_map):
for fabscript_name, fabscript in fabscript_map.items():
splited_name = fabscript_name.rsplit('/', 1)
fabscript_cluster = splited_name[0]
script = splited_name[1]
fabscript_yaml = os.path.join(
CONF._fabscript_module_dir, fabscript_cluster, '__fabscript.yml')
if os.path.exists(fabscript_yaml):
with open(fabscript_yaml, 'r') as f:
data = yaml.load(f)
if data is not None:
fabscript.update(data.get(script, {}))
| {
"content_hash": "6a9385aa6f9ea1d6f07d44def00e044c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 77,
"avg_line_length": 33.73684210526316,
"alnum_prop": 0.6037441497659907,
"repo_name": "fabrickit/fabkit",
"id": "12558a9b734a9305bcd9b79ca1fd602e270f9bca",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/webapp/web_lib/util/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4979"
},
{
"name": "CoffeeScript",
"bytes": "65442"
},
{
"name": "HTML",
"bytes": "40630"
},
{
"name": "JavaScript",
"bytes": "2315"
},
{
"name": "Mako",
"bytes": "988"
},
{
"name": "Python",
"bytes": "256382"
},
{
"name": "Shell",
"bytes": "2697"
}
],
"symlink_target": ""
} |
"""The tests for WebOS TV device triggers."""
import pytest
from homeassistant.components import automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.webostv import DOMAIN, device_trigger
from homeassistant.config_entries import ConfigEntryState
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.device_registry import async_get as get_dev_reg
from homeassistant.setup import async_setup_component
from . import setup_webostv
from .const import ENTITY_ID, FAKE_UUID
from tests.common import MockConfigEntry, async_get_device_automations
async def test_get_triggers(hass, client):
"""Test we get the expected triggers."""
await setup_webostv(hass)
device_reg = get_dev_reg(hass)
device = device_reg.async_get_device(identifiers={(DOMAIN, FAKE_UUID)})
turn_on_trigger = {
"platform": "device",
"domain": DOMAIN,
"type": "webostv.turn_on",
"device_id": device.id,
"metadata": {},
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device.id
)
assert turn_on_trigger in triggers
async def test_if_fires_on_turn_on_request(hass, calls, client):
"""Test for turn_on and turn_off triggers firing."""
await setup_webostv(hass)
device_reg = get_dev_reg(hass)
device = device_reg.async_get_device(identifiers={(DOMAIN, FAKE_UUID)})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "webostv.turn_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.device_id }}",
"id": "{{ trigger.id }}",
},
},
},
{
"trigger": {
"platform": "webostv.turn_on",
"entity_id": ENTITY_ID,
},
"action": {
"service": "test.automation",
"data_template": {
"some": ENTITY_ID,
"id": "{{ trigger.id }}",
},
},
},
],
},
)
await hass.services.async_call(
"media_player",
"turn_on",
{"entity_id": ENTITY_ID},
blocking=True,
)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[0].data["some"] == device.id
assert calls[0].data["id"] == 0
assert calls[1].data["some"] == ENTITY_ID
assert calls[1].data["id"] == 0
async def test_get_triggers_for_invalid_device_id(hass, caplog):
"""Test error raised for invalid shelly device_id."""
await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "invalid_device_id",
"type": "webostv.turn_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.invalid_device }}",
"id": "{{ trigger.id }}",
},
},
}
]
},
)
await hass.async_block_till_done()
assert (
"Invalid config for [automation]: Device invalid_device_id is not a valid webostv device"
in caplog.text
)
async def test_failure_scenarios(hass, client):
"""Test failure scenarios."""
await setup_webostv(hass)
# Test wrong trigger platform type
with pytest.raises(HomeAssistantError):
await device_trigger.async_attach_trigger(
hass, {"type": "wrong.type", "device_id": "invalid_device_id"}, None, {}
)
# Test invalid device id
with pytest.raises(InvalidDeviceAutomationConfig):
await device_trigger.async_validate_trigger_config(
hass,
{
"platform": "device",
"domain": DOMAIN,
"type": "webostv.turn_on",
"device_id": "invalid_device_id",
},
)
entry = MockConfigEntry(domain="fake", state=ConfigEntryState.LOADED, data={})
entry.add_to_hass(hass)
device_reg = get_dev_reg(hass)
device = device_reg.async_get_or_create(
config_entry_id=entry.entry_id, identifiers={("fake", "fake")}
)
config = {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "webostv.turn_on",
}
# Test that device id from non webostv domain raises exception
with pytest.raises(InvalidDeviceAutomationConfig):
await device_trigger.async_validate_trigger_config(hass, config)
# Test no exception if device is not loaded
await hass.config_entries.async_unload(entry.entry_id)
assert await device_trigger.async_validate_trigger_config(hass, config) == config
| {
"content_hash": "39475177004aba35ec053ff4d9b31e34",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 97,
"avg_line_length": 32.33519553072626,
"alnum_prop": 0.531271596406358,
"repo_name": "nkgilley/home-assistant",
"id": "db15ce3a5920a617684024fff37da35d7166e996",
"size": "5788",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/webostv/test_device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
'''
This state downloads artifacts from artifactory.
'''
# Import python libs
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
def downloaded(name, artifact, target_dir='/tmp', target_file=None):
'''
Ensures that the artifact from artifactory exists at given location. If it doesn't exist, then
it will be downloaded. It it already exists then the checksum of existing file is checked against checksum
in artifactory. If it is different then the step will fail.
artifact
Details of the artifact to be downloaded from artifactory. Various options are:
- artifactory_url: URL of the artifactory instance
- repository: Repository in artifactory
- artifact_id: Artifact ID
- group_id: Group ID
- packaging: Packaging
- classifier: Classifier
.. versionadded:: 2015.8.0
- version: Version
- username: Artifactory username
.. versionadded:: 2015.8.0
- password: Artifactory password
.. versionadded:: 2015.8.0
target_dir
Directory where the artifact should be downloaded. By default it is downloaded to /tmp directory.
target_file
Target file to download artifact to. By default file name is resolved by artifactory.
An example to download an artifact to a specific file:
.. code-block:: yaml
jboss_module_downloaded:
artifactory.downloaded:
- artifact:
artifactory_url: http://artifactory.intranet.example.com/artifactory
repository: 'libs-release-local'
artifact_id: 'module'
group_id: 'com.company.module'
packaging: 'jar'
classifier: 'sources'
version: '1.0'
- target_file: /opt/jboss7/modules/com/company/lib/module.jar
Download artifact to the folder (automatically resolves file name):
.. code-block:: yaml
jboss_module_downloaded:
artifactory.downloaded:
- artifact:
artifactory_url: http://artifactory.intranet.example.com/artifactory
repository: 'libs-release-local'
artifact_id: 'module'
group_id: 'com.company.module'
packaging: 'jar'
classifier: 'sources'
version: '1.0'
- target_dir: /opt/jboss7/modules/com/company/lib
'''
log.debug(" ======================== STATE: artifactory.downloaded (name: %s) ", name)
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
try:
fetch_result = __fetch_from_artifactory(artifact, target_dir, target_file)
log.debug("fetch_result=%s", str(fetch_result))
ret['result'] = fetch_result['status']
ret['comment'] = fetch_result['comment']
ret['changes'] = fetch_result['changes']
log.debug("ret=%s", str(ret))
return ret
except Exception as exc:
ret['result'] = False
ret['comment'] = exc
return ret
def __fetch_from_artifactory(artifact, target_dir, target_file):
if 'latest_snapshot' in artifact and artifact['latest_snapshot']:
fetch_result = __salt__['artifactory.get_latest_snapshot'](artifactory_url=artifact['artifactory_url'],
repository=artifact['repository'],
group_id=artifact['group_id'],
artifact_id=artifact['artifact_id'],
packaging=artifact['packaging'],
classifier=artifact['classifier'] if 'classifier' in artifact else None,
target_dir=target_dir,
target_file=target_file,
username=artifact['username'] if 'username' in artifact else None,
password=artifact['password'] if 'password' in artifact else None)
elif artifact['version'].endswith('SNAPSHOT'):
fetch_result = __salt__['artifactory.get_snapshot'](artifactory_url=artifact['artifactory_url'],
repository=artifact['repository'],
group_id=artifact['group_id'],
artifact_id=artifact['artifact_id'],
packaging=artifact['packaging'],
classifier=artifact['classifier'] if 'classifier' in artifact else None,
version=artifact['version'],
target_dir=target_dir,
target_file=target_file,
username=artifact['username'] if 'username' in artifact else None,
password=artifact['password'] if 'password' in artifact else None)
else:
fetch_result = __salt__['artifactory.get_release'](artifactory_url=artifact['artifactory_url'],
repository=artifact['repository'],
group_id=artifact['group_id'],
artifact_id=artifact['artifact_id'],
packaging=artifact['packaging'],
classifier=artifact['classifier'] if 'classifier' in artifact else None,
version=artifact['version'],
target_dir=target_dir,
target_file=target_file,
username=artifact['username'] if 'username' in artifact else None,
password=artifact['password'] if 'password' in artifact else None)
return fetch_result
| {
"content_hash": "a8943aa4379d79a5800869c6d1a34f3b",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 139,
"avg_line_length": 50.60606060606061,
"alnum_prop": 0.48008982035928144,
"repo_name": "smallyear/linuxLearn",
"id": "a535d17f1b167861f81e25787fce852ada6cabb2",
"size": "6704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/salt/states/artifactory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "269"
},
{
"name": "CSS",
"bytes": "35"
},
{
"name": "HTML",
"bytes": "23373"
},
{
"name": "JavaScript",
"bytes": "510"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "12800734"
},
{
"name": "Shell",
"bytes": "240576"
}
],
"symlink_target": ""
} |
import os
import sys
import re
import logging
import fcntl
from optparse import OptionParser
from ksscommand import KssCommand, KssCommandException, KssCommandOptException
import __cmd__
try:
import karesansui
from karesansui import __version__
from karesansui.lib.utils import load_locale, execute_command, pipe_execute_command, generate_phrase, get_filesystem_info
from karesansui.lib.const import KARESANSUI_TMP_DIR, MOUNT_CMD, UMOUNT_CMD, FORMAT_CMD, YES_CMD
except ImportError, e:
print >>sys.stderr, "[Error] some packages not found. - %s" % e
sys.exit(1)
_ = load_locale()
usage = '%prog [options]'
def getopts():
optp = OptionParser(usage=usage, version=__version__)
optp.add_option('-d', '--dev', dest='dev', help=_('Target device'), default=None)
optp.add_option('-t', '--type', dest='type', help=_('Format type'), default="ext3")
optp.add_option('-f', '--format', dest='format', action="store_true", help=_('Format on mount failed'), default=False)
return optp.parse_args()
def chkopts(opts):
reg = re.compile("[^a-zA-Z0-9\./_:-]")
if opts.dev:
if reg.search(opts.dev):
raise KssCommandOptException('ERROR: Illigal option value. option=%s value=%s' % ('-d or --dev', opts.dev))
else:
raise KssCommandOptException('ERROR: %s option is required.' % '-d or --dev')
if opts.type not in get_filesystem_info():
raise KssCommandOptException('ERROR: Unknown format type. type=%s' % (opts.type))
class ReadyMount(KssCommand):
def process(self):
(opts, args) = getopts()
chkopts(opts)
self.up_progress(10)
try:
tmp_dir_name = generate_phrase(12,'abcdefghijklmnopqrstuvwxyz')
tmp_dir_path = "%s/%s" % (KARESANSUI_TMP_DIR, tmp_dir_name)
os.mkdir(tmp_dir_path)
except:
raise KssCommandException('Failed to make tmpdir. path=%s' % (tmp_dir_path))
try:
self.up_progress(10)
mount_command_args = (MOUNT_CMD,
opts.dev,
tmp_dir_path,
)
umount_command_args = (UMOUNT_CMD,
tmp_dir_path,
)
is_mountable = False
try:
(mount_cmd_rc, mount_cmd_res) = execute_command(mount_command_args)
if mount_cmd_rc == 0:
is_mountable = True
else:
self.logger.debug('Failed to mount. dev=%s' % (opts.dev))
finally:
(umount_cmd_rc, umount_cmd_res) = execute_command(umount_command_args)
self.up_progress(30)
if is_mountable is False and opts.format is True:
first_command_args = YES_CMD
second_command_args = (FORMAT_CMD,
"-t",
opts.type,
opts.dev,
)
format_command_args = (first_command_args,
second_command_args,
)
(format_cmd_rc, format_cmd_res) = pipe_execute_command(format_command_args)
if format_cmd_rc != 0:
raise KssCommandException('Failed to format. dev=%s type=%s res=%s' % (opts.dev, opts.type, format_cmd_res))
try:
(mount_cmd_rc, mount_cmd_res) = execute_command(mount_command_args)
if mount_cmd_rc == 0:
is_mountable = True
else:
self.logger.debug('Failed to mount. dev=%s' % (opts.dev))
finally:
(umount_cmd_rc, umount_cmd_res) = execute_command(umount_command_args)
self.up_progress(40)
finally:
try:
os.rmdir(tmp_dir_path)
except:
raise KssCommandException('Failed to delete tmpdir. path=%s' % (tmp_dir_path))
if is_mountable is True:
self.logger.info('Device "%s" is mountable.' % (opts.dev))
print >>sys.stdout, _('Device "%s" is mountable.' % (opts.dev))
else:
self.logger.info('Device "%s" is not mountable.' % (opts.dev))
print >>sys.stdout, _('Device "%s" is not mountable.' % (opts.dev))
return is_mountable
if __name__ == "__main__":
target = ReadyMount()
sys.exit(target.run())
| {
"content_hash": "9a0e80d4ef7c92b58cad9019d7dd7444",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 128,
"avg_line_length": 37.86065573770492,
"alnum_prop": 0.5245724182723533,
"repo_name": "karesansui/karesansui",
"id": "538393cc29560b460720272e7c19dfd694c92a5f",
"size": "5795",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bin/ready_mount.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79865"
},
{
"name": "HTML",
"bytes": "32774"
},
{
"name": "JavaScript",
"bytes": "286445"
},
{
"name": "Makefile",
"bytes": "265"
},
{
"name": "Python",
"bytes": "2226164"
},
{
"name": "Shell",
"bytes": "18293"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
from inverse_covariance import (
QuicGraphicalLassoEBIC,
AdaptiveGraphicalLasso,
QuicGraphicalLassoCV,
)
from inverse_covariance.profiling import ClusterGraph
class TestAdaptiveGraphicalLasso(object):
@pytest.mark.parametrize(
"params_in",
[
(
{
"estimator": QuicGraphicalLassoCV(
cv=2,
n_refinements=6,
init_method="cov",
score_metric="log_likelihood",
),
"method": "binary",
}
),
(
{
"estimator": QuicGraphicalLassoCV(
cv=2,
n_refinements=6,
init_method="cov",
score_metric="log_likelihood",
),
"method": "inverse",
}
),
(
{
"estimator": QuicGraphicalLassoCV(
cv=2,
n_refinements=6,
init_method="cov",
score_metric="log_likelihood",
),
"method": "inverse_squared",
}
),
({"estimator": QuicGraphicalLassoEBIC(), "method": "binary"}),
({"estimator": QuicGraphicalLassoEBIC(), "method": "inverse"}),
({"estimator": QuicGraphicalLassoEBIC(), "method": "inverse_squared"}),
],
)
def test_integration_adaptive_graphical_lasso(self, params_in):
"""
Just tests inputs/outputs (not validity of result).
"""
n_features = 20
n_samples = 25
cov, prec, adj = ClusterGraph(n_blocks=1, chain_blocks=False, seed=1).create(
n_features, 0.8
)
prng = np.random.RandomState(2)
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
model = AdaptiveGraphicalLasso(**params_in)
model.fit(X)
assert model.estimator_ is not None
assert model.lam_ is not None
assert np.sum(model.lam_[np.diag_indices(n_features)]) == 0
if params_in["method"] == "binary":
uvals = set(model.lam_.flat)
assert len(uvals) == 2
assert 0 in uvals
assert 1 in uvals
elif (
params_in["method"] == "inverse" or params_in["method"] == "inverse_squared"
):
uvals = set(model.lam_.flat[model.lam_.flat != 0])
assert len(uvals) > 0
| {
"content_hash": "febe0308f5e2faec057cb8bbacdaa6cc",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 88,
"avg_line_length": 32.792682926829265,
"alnum_prop": 0.46374116772034213,
"repo_name": "skggm/skggm",
"id": "ada96d601a49d1e85041c045e4a7fca6ac4db9a3",
"size": "2689",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "inverse_covariance/tests/adaptive_graph_lasso_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "15378"
},
{
"name": "Makefile",
"bytes": "130"
},
{
"name": "Python",
"bytes": "140851"
}
],
"symlink_target": ""
} |
import logging
import re
import unicodedata
from lxml import etree
from moxie.places.importers.helpers import prepare_document
logger = logging.getLogger(__name__)
rx = re.compile(' +')
# Normalization Form Compatibility Decomposition
UNICODE_NORMALIZE = "NFKD"
# Values for policies from librarydata
POLICIES = {
'1': 'All',
'2': 'Some',
'3': 'None'
}
def text(l, n):
"""Handles a text node
:param l: XMLNode
:param n: occurrence to find
:return: string if node found
"""
e = l.find(n)
if e is not None and e.text is not None:
return string_cleanup(e.text)
def subjects(l, n):
"""Handles list of text nodes
:param l: XMLNode
:param n: occurrence to find
:return: list of string if nodes found
"""
e = l.findall(n)
if e is not None:
return list(set(string_cleanup(s.text) for s in e if s is not None))
def policies(l, n):
"""Handles "policies" nodes
Transform/format XML structure in flat strings per policy
:param l: XMLNode
:param n: occurrence to find
:return: dict of policies
"""
e = l.findall(n)
pol = {}
if e is not None:
for policy in e:
access = "Admissions: {access}. Borrowings: {borrowing}.".format(
access=POLICIES[policy.find('access').text],
borrowing=POLICIES[policy.find('borrowing').text])
notes = string_cleanup(policy.find('notes').text)
if notes:
access = "{access} Notes: {notes}".format(access=access, notes=notes)
pol[policy.get('for')] = access
return pol
def string_cleanup(s):
"""Cleanup string (removes multiple whitespaces...)
Normalize it if it is unicode
:param s: string (or unicode)
:return: string
"""
if s:
if isinstance(s, unicode):
s = unicodedata.normalize(UNICODE_NORMALIZE, s).encode('ascii', 'ignore')
return rx.sub(' ', s).strip()
else:
return None
class OxLibraryDataImporter(object):
def __init__(self, indexer, precedence, file, identifier_key='identifiers',
lib_data_identifier='librarydata',
prefix_index_key='_library_'):
self.indexer = indexer
self.precedence = precedence
self.file = file
self.identifier_key = identifier_key
self.lib_data_identifier = lib_data_identifier
self.prefix_index_key = prefix_index_key
def run(self):
self.parse_libs()
if self.indexer:
self.index_all()
def parse_libs(self):
xml = etree.parse(self.file)
libraries = xml.xpath('.//library')
self.libs = [{'id': text(l, 'id'),
'opening_hours_termtime': text(l, 'hours/termtime'),
'opening_hours_vacation': text(l, 'hours/vacation'),
'opening_hours_closed': text(l, 'hours/closed'),
'subjects': subjects(l, 'subjects/subject'),
'policies': policies(l, 'policies/policy')
} for l in libraries]
def index_all(self):
docs = []
for lib in self.libs:
try:
doc = self.index_library(lib)
if doc:
docs.append(doc)
except Exception as e:
logger.warning('Could not index library {ident}'.format(ident=lib['id']),
exc_info=True)
self.indexer.index(docs)
self.indexer.commit()
def index_library(self, lib):
ident = "{key}:{value}".format(key=self.lib_data_identifier,
value=lib['id'])
search_results = self.indexer.search_for_ids(self.identifier_key,
[ident])
if search_results.results:
doc = search_results.results[0]
doc[self.prefix_index_key+'opening_hours_termtime'] = lib['opening_hours_termtime']
doc[self.prefix_index_key+'opening_hours_vacation'] = lib['opening_hours_vacation']
doc[self.prefix_index_key+'opening_hours_closed'] = lib['opening_hours_closed']
doc[self.prefix_index_key+'subject'] = lib['subjects']
if 'academic' in lib['policies']:
doc[self.prefix_index_key+'policy_academic'] = lib['policies']['academic']
if 'other' in lib['policies']:
doc[self.prefix_index_key+'policy_other'] = lib['policies']['other']
if 'postgraduate' in lib['policies']:
doc[self.prefix_index_key+'policy_postgraduate'] = lib['policies']['postgraduate']
if 'undergraduate' in lib['policies']:
doc[self.prefix_index_key+'policy_undergraduate'] = lib['policies']['undergraduate']
return prepare_document(doc, search_results, self.precedence)
else:
logger.info('No results for {ident}'.format(ident=ident))
return None
def main():
import argparse
args = argparse.ArgumentParser()
args.add_argument('file', type=argparse.FileType('r'))
ns = args.parse_args()
importer = OxLibraryDataImporter(None, 10, ns.file)
importer.run()
import pprint
pprint.pprint(importer.libs)
if __name__ == '__main__':
main()
| {
"content_hash": "136d287551486cb1ac21017d99c0ebf5",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 100,
"avg_line_length": 33.55345911949686,
"alnum_prop": 0.5746954076850984,
"repo_name": "ox-it/moxie",
"id": "dca489b71d05ea218d6e4931c423622eff86583a",
"size": "5335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moxie/places/importers/ox_library_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "796"
},
{
"name": "HTML",
"bytes": "103308"
},
{
"name": "Python",
"bytes": "260816"
}
],
"symlink_target": ""
} |
import logging
from openerp import http
from openerp.http import request
_logger = logging.getLogger(__name__)
class BarcodeController(http.Controller):
@http.route(['/barcode/web/'], type='http', auth='user')
def a(self, debug=False, **k):
if not request.session.uid:
return http.local_redirect('/web/login?redirect=/barcode/web')
return request.render('stock.barcode_index')
| {
"content_hash": "e76d4d0f5f84d2e85fee3933a09feaca",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 74,
"avg_line_length": 27.866666666666667,
"alnum_prop": 0.6818181818181818,
"repo_name": "diogocs1/comps",
"id": "fa2744a4498cbe5d8335d6bb7def637fac76cfc0",
"size": "442",
"binary": false,
"copies": "296",
"ref": "refs/heads/master",
"path": "web/addons/stock/controllers/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import re
import subprocess
from setuptools import setup
from setuptools import find_packages
from io import open
def version():
try:
pkg_info_version_re = re.compile("\nVersion: ([^\s]*)\n")
caller_path = os.path.dirname(__file__)
fd = open(os.path.join(caller_path, 'PKG-INFO'))
match = pkg_info_version_re.search(fd.read())
if match:
return match.groups()[0]
else:
raise Exception("No version match.")
except Exception:
args = ["git", "describe", "--all", "--always", "--dirty"]
try:
return subprocess.check_output(args).rstrip().lstrip("tags/")
except Exception:
return "0.0.0.dev0"
INSTALL_REQUIRES = ['Yapsy', 'Image', 'gitpython', 'filemagic',
'logilab-common', 'setuptools', 'thrift', 'argcomplete',
'pyacoustid']
TESTS_REQUIRE = ['pylint', 'nose', 'coverage', 'mock']
setup(
name='damn_at',
description='Digital Assets Managed Neatly: Analyzers and Transcoders',
author='sueastside',
author_email='No, thanks',
version=version(),
packages=find_packages('src'),
package_dir={"": "src"},
include_package_data = True,
url='https://github.com/peragro/peragro-at',
download_url='https://github.com/peragro/peragro-at',
test_suite='nose.collector',
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
scripts=[],
entry_points={
'console_scripts': ['pt = damn_at.cli:main',
'damn_at-server = damn_at.thrift.server:main',
'damn_at-analyze = damn_at.analyzer:main',
'damn_at-transcode = damn_at.transcoder:main',
'damn_fs = damn_at.damnfs.damnfs:main']
}
)
| {
"content_hash": "aa2368ec1e64676e8e763366f5a23e62",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 34.09090909090909,
"alnum_prop": 0.5808,
"repo_name": "peragro/peragro-at",
"id": "1bbdabf6023b0a18aa4978ecf2139f717502ea83",
"size": "1875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "300042"
},
{
"name": "Shell",
"bytes": "3259"
},
{
"name": "Thrift",
"bytes": "1639"
}
],
"symlink_target": ""
} |
"""
Django settings for codebin project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')ev6ok1=i&+c6l$dgno^perm%v+r3ypc#n3lng4$p1)(2#n6c#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.editor',
'apps.home',
'apps.browse',
'apps.view',
'database.project',
'common',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'codebin.urls'
WSGI_APPLICATION = 'codebin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "da0bb458b9cd52734d023f031482d633",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 71,
"avg_line_length": 24.213483146067414,
"alnum_prop": 0.7150812064965197,
"repo_name": "UALR-CodeTalks/codetalks",
"id": "f5d6c49f9450087ed7222d34938f46250884629f",
"size": "2155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codetalks/codebin/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "193398"
},
{
"name": "HTML",
"bytes": "54430"
},
{
"name": "JavaScript",
"bytes": "15569521"
},
{
"name": "Python",
"bytes": "8527"
}
],
"symlink_target": ""
} |
import json
import os
from pathlib import Path
import pytest
from anchore_engine.configuration.localconfig import (
DEFAULT_CONFIG,
get_config,
load_defaults,
load_filepath_to_config,
load_policy_bundle_paths,
load_policy_bundles,
validate_config,
)
DEFAULT_CONFIG_FN = "config.yaml"
DEFAULT_ANALYZER_CONFIG_FN = "analyzer_config.yaml"
DEFAULT_POLICY_BUNDLE_FN = "anchore_default_bundle.yaml"
CUSTOM_POLICY_BUNDLE_FN = "anchore_custom_bundle.yaml"
ANALYZER_CONFIG_TEXT = "analyzer\n"
POLICY_BUNDLE_TEXT = "policy bundle\n"
INPUT_CONFIG_DIR = "input_config"
INPUT_BUNDLES_DIR_ROOT = "input_bundles"
OUTPUT_BUNDLES_DIR = "bundles"
@pytest.fixture
def mock_default_config(tmpdir):
config_copy = DEFAULT_CONFIG.copy()
config_fn = tmpdir.join(DEFAULT_CONFIG_FN)
with open(config_fn.strpath, "w") as fp:
fp.write(str(config_copy))
fp.flush()
def mock_test_files(input_dir, config_filenames):
# setup files to read and/or copy later
for config_filename in config_filenames:
mock_test_file(input_dir, config_filename)
def mock_test_file(input_dir, config_filename):
# setup files to read and/or copy later
Path(input_dir.strpath + "/" + config_filename).touch()
def get_mock_config_with_policy_bundles(dir, bundle_filenames, simulate_exception):
policy_bundles = []
input_dir = dir.mkdir(OUTPUT_BUNDLES_DIR)
# setup files to read and/or copy later
mock_id = 0
for bundle_filename in bundle_filenames:
bundle_path = os.path.join(input_dir, bundle_filename)
if simulate_exception:
mock_body = "not json"
else:
mock_body = json.dumps({"id": str(mock_id), "name": bundle_filename})
mock_id += 1
with open(bundle_path, "w") as fp:
fp.write(mock_body)
fp.flush()
policy_bundle = {}
# Just make the first bundle active since it's arbitrary for these tests
policy_bundle["active"] = len(policy_bundles) == 0
policy_bundle["bundle_path"] = bundle_path
policy_bundles.append(policy_bundle)
return {"policy_bundles": policy_bundles}
def test_empty_src_dirs(mock_default_config, tmpdir):
# setup the default config
load_defaults(configdir=tmpdir)
# function under test
load_policy_bundle_paths(src_dirs=[])
# get and validate the relevant config bits
config = get_config()
assert config["policy_bundles"] is None
@pytest.mark.parametrize(
"config_filename_sets",
[
([[]]),
([["anchore_default_bundle.json"]]),
([["anchore_default_bundle.json", "second_bundle.json"]]),
(
[
["anchore_default_bundle.json", "second_bundle.json"],
["third_bundle.json", "fourth_bundle.json"],
["fifth_bundle.json"],
]
),
],
)
def test_load_policy_bundle_paths(mock_default_config, tmpdir, config_filename_sets):
# setup files to read
src_dirs = []
i = 0
for set in config_filename_sets:
input_dir = tmpdir.mkdir(INPUT_BUNDLES_DIR_ROOT + "_" + str(i))
i += 1
mock_test_files(input_dir, set)
src_dirs.append(input_dir.strpath)
# setup the expected output. We will expect to see output_dir_name contain the
# files in config_filenames_flat
output_dir_name = tmpdir.strpath + "/bundles"
config_filenames_flat = [
filename for set in config_filename_sets for filename in set
]
# setup the default config
load_defaults(configdir=tmpdir)
# function under test
load_policy_bundle_paths(src_dirs=src_dirs)
# get and validate the relevant config bits
config = get_config()
assert config["policy_bundles"] is not None
assert len(config["policy_bundles"]) == len(config_filenames_flat)
for config_filename in config_filenames_flat:
policy_bundle = next(
policy_bundle
for policy_bundle in config["policy_bundles"]
if policy_bundle["bundle_path"] == output_dir_name + "/" + config_filename
)
assert policy_bundle is not None
if config_filename == "anchore_default_bundle.json":
assert policy_bundle["active"]
else:
assert not policy_bundle["active"]
assert os.path.exists(policy_bundle["bundle_path"])
@pytest.mark.parametrize(
"config_key, config_filename",
[
("anchore_scanner_analyzer_config_file", "analyzer_config.yaml"),
("anchore_scanner_analyzer_config_file", "other_config.yaml"),
],
)
def test_load_filepath_to_config(
mock_default_config, tmpdir, config_key, config_filename
):
# setup files to read
input_dir = tmpdir.mkdir(INPUT_CONFIG_DIR)
mock_test_file(input_dir, config_filename)
output_dir_name = tmpdir.strpath
# setup the default config
load_defaults(configdir=tmpdir)
load_filepath_to_config(config_key, config_filename, src_dir=input_dir.strpath)
config = get_config()
assert config["anchore_scanner_analyzer_config_file"] is not None
assert (
config["anchore_scanner_analyzer_config_file"]
== output_dir_name + "/" + config_filename
)
assert os.path.exists(config["anchore_scanner_analyzer_config_file"])
@pytest.mark.parametrize(
"bundle_filenames, simulate_exception, expected_bundles, expected_exceptions",
[
(["first_bundle.json"], False, 1, 0),
(["first_bundle.json", "first_bundle.json"], False, 2, 0),
(["first_bundle.json"], True, 0, 1),
],
)
def test_load_policy_bundles(
tmpdir, bundle_filenames, simulate_exception, expected_bundles, expected_exceptions
):
config = get_mock_config_with_policy_bundles(
tmpdir, bundle_filenames, simulate_exception
)
policy_bundles = []
bundles = []
exceptions = []
def process_bundle(policy_bundle, bundle):
policy_bundles.append(policy_bundle)
bundles.append(bundle)
def process_exception(exception):
exceptions.append(exception)
load_policy_bundles(config, process_bundle, process_exception)
assert len(policy_bundles) == expected_bundles
assert len(bundles) == expected_bundles
assert len(exceptions) == expected_exceptions
def test_validate_max_compressed_image_size_mb():
validate_config({"max_compressed_image_size_mb": 54}, {})
validate_config({"max_compressed_image_size_mb": -1}, {})
with pytest.raises(Exception):
validate_config({"max_compressed_image_size_mb": "Test"}, {})
| {
"content_hash": "c34f9393ae8ad3d70b6c3a0e21fddc95",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 87,
"avg_line_length": 31.31904761904762,
"alnum_prop": 0.6515128478029497,
"repo_name": "anchore/anchore-engine",
"id": "845a3a57e04f203e4d1cfbd56f68515dfb278bd6",
"size": "6577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/anchore_engine/configuration/test_localconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
} |
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder.objects import fields as c_fields
from oslo_versionedobjects import fields
@base.CinderObjectRegistry.register
class Group(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added group_snapshots, group_snapshot_id, and
# source_group_id
VERSION = '1.1'
OPTIONAL_FIELDS = ['volumes', 'volume_types', 'group_snapshots']
fields = {
'id': fields.UUIDField(),
'user_id': fields.StringField(),
'project_id': fields.StringField(),
'cluster_name': fields.StringField(nullable=True),
'host': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'name': fields.StringField(nullable=True),
'description': fields.StringField(nullable=True),
'group_type_id': fields.StringField(),
'volume_type_ids': fields.ListOfStringsField(nullable=True),
'status': c_fields.GroupStatusField(nullable=True),
'group_snapshot_id': fields.UUIDField(nullable=True),
'source_group_id': fields.UUIDField(nullable=True),
'volumes': fields.ObjectField('VolumeList', nullable=True),
'volume_types': fields.ObjectField('VolumeTypeList',
nullable=True),
'group_snapshots': fields.ObjectField('GroupSnapshotList',
nullable=True),
}
@staticmethod
def _from_db_object(context, group, db_group,
expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for name, field in group.fields.items():
if name in Group.OPTIONAL_FIELDS:
continue
value = db_group.get(name)
setattr(group, name, value)
if 'volumes' in expected_attrs:
volumes = base.obj_make_list(
context, objects.VolumeList(context),
objects.Volume,
db_group['volumes'])
group.volumes = volumes
if 'volume_types' in expected_attrs:
volume_types = base.obj_make_list(
context, objects.VolumeTypeList(context),
objects.VolumeType,
db_group['volume_types'])
group.volume_types = volume_types
if 'group_snapshots' in expected_attrs:
group_snapshots = base.obj_make_list(
context, objects.GroupSnapshotList(context),
objects.GroupSnapshot,
db_group['group_snapshots'])
group.group_snapshots = group_snapshots
group._context = context
group.obj_reset_changes()
return group
def create(self, group_snapshot_id=None, source_group_id=None):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason=_('already_created'))
updates = self.cinder_obj_get_changes()
if 'volume_types' in updates:
raise exception.ObjectActionError(
action='create',
reason=_('volume_types assigned'))
if 'volumes' in updates:
raise exception.ObjectActionError(action='create',
reason=_('volumes assigned'))
if 'group_snapshots' in updates:
raise exception.ObjectActionError(
action='create',
reason=_('group_snapshots assigned'))
db_groups = db.group_create(self._context,
updates,
group_snapshot_id,
source_group_id)
self._from_db_object(self._context, self, db_groups)
def obj_load_attr(self, attrname):
if attrname not in Group.OPTIONAL_FIELDS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
if attrname == 'volume_types':
self.volume_types = objects.VolumeTypeList.get_all_by_group(
self._context, self.id)
if attrname == 'volumes':
self.volumes = objects.VolumeList.get_all_by_generic_group(
self._context, self.id)
if attrname == 'group_snapshots':
self.group_snapshots = objects.GroupSnapshotList.get_all_by_group(
self._context, self.id)
self.obj_reset_changes(fields=[attrname])
def save(self):
updates = self.cinder_obj_get_changes()
if updates:
if 'volume_types' in updates:
msg = _('Cannot save volume_types changes in group object '
'update.')
raise exception.ObjectActionError(
action='save', reason=msg)
if 'volumes' in updates:
msg = _('Cannot save volumes changes in group object update.')
raise exception.ObjectActionError(
action='save', reason=msg)
if 'group_snapshots' in updates:
msg = _('Cannot save group_snapshots changes in group object '
'update.')
raise exception.ObjectActionError(
action='save', reason=msg)
db.group_update(self._context, self.id, updates)
self.obj_reset_changes()
def destroy(self):
with self.obj_as_admin():
db.group_destroy(self._context, self.id)
@base.CinderObjectRegistry.register
class GroupList(base.ObjectListBase, base.CinderObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('Group')
}
child_version = {
'1.0': '1.0',
}
@classmethod
def get_all(cls, context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
groups = db.group_get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
return base.obj_make_list(context, cls(context),
objects.Group,
groups)
@classmethod
def get_all_by_project(cls, context, project_id, filters=None, marker=None,
limit=None, offset=None, sort_keys=None,
sort_dirs=None):
groups = db.group_get_all_by_project(
context, project_id, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
return base.obj_make_list(context, cls(context),
objects.Group,
groups)
| {
"content_hash": "a86a3282c5cabed88e2462817c12d84d",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 79,
"avg_line_length": 39.56284153005465,
"alnum_prop": 0.562292817679558,
"repo_name": "NetApp/cinder",
"id": "a7d0e6c15d710df9e674b609ff9f7d0c3d8854eb",
"size": "7851",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/objects/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17587090"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
} |
from __future__ import with_statement, print_function
from math import fabs
from typing import List, Tuple
Instruction = Tuple[str, int]
Dir = Tuple[int, int]
NORTH = (0, 1)
SOUTH = (0, -1)
EAST = (1, 0)
WEST = (-1, 0)
DIRS = [NORTH, EAST, SOUTH, WEST]
def move_right(dir: Dir) -> Dir:
idx = DIRS.index(dir)
if idx == len(DIRS)-1:
return NORTH
return DIRS[idx+1]
def move_left(dir: Dir) -> Dir:
idx = DIRS.index(dir)
if idx == 0:
return WEST
return DIRS[idx-1]
class Map(object):
def __init__(self):
self.visited = set()
self.dir = NORTH
self.pos = [0, 0]
def move(self, i: Instruction) -> bool:
self.dir = move_right(self.dir) if i[0] == 'R' else move_left(self.dir)
for i in range(i[1]):
if self.forward():
return False
return True
def forward(self) -> bool:
self.pos = [self.pos[0] + self.dir[0], self.pos[1] + self.dir[1]]
exists = tuple(self.pos) in self.visited
self.visited.add(tuple(self.pos))
return exists
def distance(self) -> int:
return fabs(self.pos[0]) + fabs(self.pos[1])
def to_instruction(input: str) -> Instruction:
return (input[0], int(input[1:]))
def get_instructions(file: str) -> List[Instruction]:
with open(file, 'r') as f:
data = f.read()
return list(map(to_instruction, data.strip().split(', ')))
def main():
m = Map()
for i in get_instructions("data.txt"):
if not m.move(i):
break
print(m.distance())
if __name__ == '__main__':
main()
| {
"content_hash": "d4304ecf99ea1012323aa5565ccfe4fd",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 75,
"avg_line_length": 22.93846153846154,
"alnum_prop": 0.608316566063045,
"repo_name": "mvader/advent-of-code",
"id": "b07743ea70870f3be94d9885c249c3f4921e5d9a",
"size": "1491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2016/day1/taxicab2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Clojure",
"bytes": "7683"
},
{
"name": "Go",
"bytes": "7710"
},
{
"name": "Python",
"bytes": "4253"
},
{
"name": "Rust",
"bytes": "6406"
}
],
"symlink_target": ""
} |
from flask import Flask
import connexion
from connexion.resolver import RestyResolver
from flask.ext.cors import CORS
app = connexion.FlaskApp(__name__)
app.add_api('swagger.yml', resolver=RestyResolver('api'))
CORS(app.app)
@app.route('/')
def default_landing():
return 'See <a href="/v1/ui">swagger api</a> and try it <a href="v1/repository?seed=low">here</a>'
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0', port=5002)
| {
"content_hash": "341bd3230caccf0c74080ce2056cfceb",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 102,
"avg_line_length": 28,
"alnum_prop": 0.6919642857142857,
"repo_name": "s-rusev/github-matchmaker",
"id": "467ce61a4e0e457396c3f994b2ec4facd846d731",
"size": "448",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "query-services/repository/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2364"
},
{
"name": "HCL",
"bytes": "1227"
},
{
"name": "HTML",
"bytes": "12432"
},
{
"name": "JavaScript",
"bytes": "2104"
},
{
"name": "Python",
"bytes": "5266"
},
{
"name": "Shell",
"bytes": "563"
},
{
"name": "TypeScript",
"bytes": "19085"
}
],
"symlink_target": ""
} |
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit(0)
with open('README.rst', 'r') as f:
long_description = f.read()
# Dynamically calculate the version based on fullcalendar.VERSION.
VERSION = __import__('fullcalendar').get_version()
setup(
name='mezzanine-fullcalendar',
version=VERSION,
url='https://github.com/jonge-democraten/mezzanine-fullcalendar',
author_email='ict-team@jd.nl',
description='A Mezzanine calendaring application using the fullcalendar.io '
'widget.',
long_description=long_description,
author='David A Krauth, Jonge Democraten',
platforms=['any'],
license='MIT License',
classifiers=(
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
),
packages=[
'fullcalendar',
'fullcalendar.migrations',
'fullcalendar.templatetags'
],
install_requires=['python-dateutil', 'django>=1.6', 'mezzanine>=3.1']
)
| {
"content_hash": "5832be9261ea5e4f11cbe31885842a1d",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 80,
"avg_line_length": 29.34090909090909,
"alnum_prop": 0.6452362509682417,
"repo_name": "jonge-democraten/mezzanine-fullcalendar",
"id": "959063da43f75eee1546cf0acc5e6984b9b80831",
"size": "1313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37865"
}
],
"symlink_target": ""
} |
import datetime
import glob
import heapq
import logging
import os
import os.path
import random
import re
import shutil
import subprocess as subprocess
import sys
import tempfile
import time
from catapult_base import cloud_storage # pylint: disable=import-error
import dependency_manager # pylint: disable=import-error
from telemetry.internal.util import binary_manager
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.internal.backends import browser_backend
from telemetry.internal.backends.chrome import chrome_browser_backend
from telemetry.internal.util import path
def ParseCrashpadDateTime(date_time_str):
# Python strptime does not support time zone parsing, strip it.
date_time_parts = date_time_str.split()
if len(date_time_parts) >= 3:
date_time_str = ' '.join(date_time_parts[:2])
return datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
def GetSymbolBinaries(minidump, arch_name, os_name):
# Returns binary file where symbols are located.
minidump_dump = binary_manager.FetchPath('minidump_dump', arch_name, os_name)
assert minidump_dump
symbol_binaries = []
minidump_cmd = [minidump_dump, minidump]
try:
with open(os.devnull, 'wb') as DEVNULL:
minidump_output = subprocess.check_output(minidump_cmd, stderr=DEVNULL)
except subprocess.CalledProcessError as e:
# For some reason minidump_dump always fails despite successful dumping.
minidump_output = e.output
minidump_binary_re = re.compile(r'\W+\(code_file\)\W+=\W\"(.*)\"')
for minidump_line in minidump_output.splitlines():
line_match = minidump_binary_re.match(minidump_line)
if line_match:
binary_path = line_match.group(1)
if not os.path.isfile(binary_path):
continue
# Filter out system binaries.
if (binary_path.startswith('/usr/lib/') or
binary_path.startswith('/System/Library/') or
binary_path.startswith('/lib/')):
continue
# Filter out other binary file types which have no symbols.
if (binary_path.endswith('.pak') or
binary_path.endswith('.bin') or
binary_path.endswith('.dat')):
continue
symbol_binaries.append(binary_path)
return symbol_binaries
def GenerateBreakpadSymbols(minidump, arch, os_name, symbols_dir, browser_dir):
logging.info('Dumping breakpad symbols.')
generate_breakpad_symbols_command = binary_manager.FetchPath(
'generate_breakpad_symbols', arch, os_name)
if generate_breakpad_symbols_command is None:
return
for binary_path in GetSymbolBinaries(minidump, arch, os_name):
cmd = [
sys.executable,
generate_breakpad_symbols_command,
'--binary=%s' % binary_path,
'--symbols-dir=%s' % symbols_dir,
'--build-dir=%s' % browser_dir,
]
try:
subprocess.check_output(cmd, stderr=open(os.devnull, 'w'))
except subprocess.CalledProcessError:
logging.warning('Failed to execute "%s"' % ' '.join(cmd))
return
class DesktopBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
"""The backend for controlling a locally-executed browser instance, on Linux,
Mac or Windows.
"""
def __init__(self, desktop_platform_backend, browser_options, executable,
flash_path, is_content_shell, browser_directory,
output_profile_path, extensions_to_load):
super(DesktopBrowserBackend, self).__init__(
desktop_platform_backend,
supports_tab_control=not is_content_shell,
supports_extensions=not is_content_shell,
browser_options=browser_options,
output_profile_path=output_profile_path,
extensions_to_load=extensions_to_load)
# Initialize fields so that an explosion during init doesn't break in Close.
self._proc = None
self._tmp_profile_dir = None
self._tmp_output_file = None
self._executable = executable
if not self._executable:
raise Exception('Cannot create browser, no executable found!')
assert not flash_path or os.path.exists(flash_path)
self._flash_path = flash_path
self._is_content_shell = is_content_shell
if len(extensions_to_load) > 0 and is_content_shell:
raise browser_backend.ExtensionsNotSupportedException(
'Content shell does not support extensions.')
self._browser_directory = browser_directory
self._port = None
self._tmp_minidump_dir = tempfile.mkdtemp()
self._crash_service = None
if self.browser_options.enable_logging:
self._log_file_path = os.path.join(tempfile.mkdtemp(), 'chrome.log')
else:
self._log_file_path = None
self._SetupProfile()
@property
def log_file_path(self):
return self._log_file_path
@property
def supports_uploading_logs(self):
return (self.browser_options.logs_cloud_bucket and self.log_file_path and
os.path.isfile(self.log_file_path))
def _SetupProfile(self):
if not self.browser_options.dont_override_profile:
if self._output_profile_path:
self._tmp_profile_dir = self._output_profile_path
else:
self._tmp_profile_dir = tempfile.mkdtemp()
profile_dir = self.browser_options.profile_dir
if profile_dir:
assert self._tmp_profile_dir != profile_dir
if self._is_content_shell:
logging.critical('Profiles cannot be used with content shell')
sys.exit(1)
logging.info("Using profile directory:'%s'." % profile_dir)
shutil.rmtree(self._tmp_profile_dir)
shutil.copytree(profile_dir, self._tmp_profile_dir)
# No matter whether we're using an existing profile directory or
# creating a new one, always delete the well-known file containing
# the active DevTools port number.
port_file = self._GetDevToolsActivePortPath()
if os.path.isfile(port_file):
try:
os.remove(port_file)
except Exception as e:
logging.critical('Unable to remove DevToolsActivePort file: %s' % e)
sys.exit(1)
def _GetDevToolsActivePortPath(self):
return os.path.join(self.profile_directory, 'DevToolsActivePort')
def _GetCrashServicePipeName(self):
# Ensure a unique pipe name by using the name of the temp dir.
pipe = r'\\.\pipe\%s_service' % os.path.basename(self._tmp_minidump_dir)
return pipe
def _StartCrashService(self):
os_name = self.browser.platform.GetOSName()
if os_name != 'win':
return None
arch_name = self.browser.platform.GetArchName()
command = binary_manager.FetchPath('crash_service', arch_name, os_name)
if not command:
logging.warning('crash_service.exe not found for %s %s',
arch_name, os_name)
return None
if not os.path.exists(command):
logging.warning('crash_service.exe not found for %s %s',
arch_name, os_name)
return None
try:
crash_service = subprocess.Popen([
command,
'--no-window',
'--dumps-dir=%s' % self._tmp_minidump_dir,
'--pipe-name=%s' % self._GetCrashServicePipeName()])
except Exception:
logging.error(
'Failed to run %s --no-window --dump-dir=%s --pip-name=%s' % (
command, self._tmp_minidump_dir, self._GetCrashServicePipeName()))
logging.error('Running on platform: %s and arch: %s.', os_name, arch_name)
wmic_stdout, _ = subprocess.Popen(
['wmic', 'process', 'get', 'CommandLine,Name,ProcessId,ParentProcessId',
'/format:csv'], stdout=subprocess.PIPE).communicate()
logging.error('Current running processes:\n%s' % wmic_stdout)
raise
return crash_service
def _GetCdbPath(self):
possible_paths = (
'Debugging Tools For Windows',
'Debugging Tools For Windows (x86)',
'Debugging Tools For Windows (x64)',
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x86'),
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x64'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x86'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x64'),
)
for possible_path in possible_paths:
app_path = os.path.join(possible_path, 'cdb.exe')
app_path = path.FindInstalledWindowsApplication(app_path)
if app_path:
return app_path
return None
def HasBrowserFinishedLaunching(self):
# In addition to the functional check performed by the base class, quickly
# check if the browser process is still alive.
if not self.IsBrowserRunning():
raise exceptions.ProcessGoneException(
"Return code: %d" % self._proc.returncode)
# Start DevTools on an ephemeral port and wait for the well-known file
# containing the port number to exist.
port_file = self._GetDevToolsActivePortPath()
if not os.path.isfile(port_file):
# File isn't ready yet. Return false. Will retry.
return False
# Attempt to avoid reading the file until it's populated.
got_port = False
try:
if os.stat(port_file).st_size > 0:
with open(port_file) as f:
port_string = f.read()
self._port = int(port_string)
logging.info('Discovered ephemeral port %s' % self._port)
got_port = True
except Exception:
# Both stat and open can throw exceptions.
pass
if not got_port:
# File isn't ready yet. Return false. Will retry.
return False
return super(DesktopBrowserBackend, self).HasBrowserFinishedLaunching()
def GetBrowserStartupArgs(self):
args = super(DesktopBrowserBackend, self).GetBrowserStartupArgs()
self._port = 0
logging.info('Requested remote debugging port: %d' % self._port)
args.append('--remote-debugging-port=%i' % self._port)
args.append('--enable-crash-reporter-for-testing')
if not self._is_content_shell:
args.append('--window-size=1280,1024')
if self._flash_path:
args.append('--ppapi-flash-path=%s' % self._flash_path)
if not self.browser_options.dont_override_profile:
args.append('--user-data-dir=%s' % self._tmp_profile_dir)
else:
args.append('--data-path=%s' % self._tmp_profile_dir)
trace_config_file = (self.platform_backend.tracing_controller_backend
.GetChromeTraceConfigFile())
if trace_config_file:
args.append('--trace-config-file=%s' % trace_config_file)
return args
def Start(self):
assert not self._proc, 'Must call Close() before Start()'
args = [self._executable]
args.extend(self.GetBrowserStartupArgs())
if self.browser_options.startup_url:
args.append(self.browser_options.startup_url)
env = os.environ.copy()
env['CHROME_HEADLESS'] = '1' # Don't upload minidumps.
env['BREAKPAD_DUMP_LOCATION'] = self._tmp_minidump_dir
env['CHROME_BREAKPAD_PIPE_NAME'] = self._GetCrashServicePipeName()
if self.browser_options.enable_logging:
sys.stderr.write(
'Chrome log file will be saved in %s\n' % self.log_file_path)
env['CHROME_LOG_FILE'] = self.log_file_path
self._crash_service = self._StartCrashService()
logging.info('Starting Chrome %s', args)
if not self.browser_options.show_stdout:
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._proc = subprocess.Popen(
args, stdout=self._tmp_output_file, stderr=subprocess.STDOUT, env=env)
else:
self._proc = subprocess.Popen(args, env=env)
try:
self._WaitForBrowserToComeUp()
# browser is foregrounded by default on Windows and Linux, but not Mac.
if self.browser.platform.GetOSName() == 'mac':
subprocess.Popen([
'osascript', '-e', ('tell application "%s" to activate' %
self._executable)])
self._InitDevtoolsClientBackend()
if self._supports_extensions:
self._WaitForExtensionsToLoad()
except:
self.Close()
raise
@property
def pid(self):
if self._proc:
return self._proc.pid
return None
@property
def browser_directory(self):
return self._browser_directory
@property
def profile_directory(self):
return self._tmp_profile_dir
def IsBrowserRunning(self):
return self._proc and self._proc.poll() == None
def GetStandardOutput(self):
if not self._tmp_output_file:
if self.browser_options.show_stdout:
# This can happen in the case that loading the Chrome binary fails.
# We print rather than using logging here, because that makes a
# recursive call to this function.
print >> sys.stderr, "Can't get standard output with --show-stdout"
return ''
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
def _GetMostRecentCrashpadMinidump(self):
os_name = self.browser.platform.GetOSName()
arch_name = self.browser.platform.GetArchName()
try:
crashpad_database_util = binary_manager.FetchPath(
'crashpad_database_util', arch_name, os_name)
if not crashpad_database_util:
return None
except dependency_manager.NoPathFoundError:
return None
report_output = subprocess.check_output([
crashpad_database_util, '--database=' + self._tmp_minidump_dir,
'--show-pending-reports', '--show-completed-reports',
'--show-all-report-info'])
last_indentation = -1
reports_list = []
report_dict = {}
for report_line in report_output.splitlines():
# Report values are grouped together by the same indentation level.
current_indentation = 0
for report_char in report_line:
if not report_char.isspace():
break
current_indentation += 1
# Decrease in indentation level indicates a new report is being printed.
if current_indentation >= last_indentation:
report_key, report_value = report_line.split(':', 1)
if report_value:
report_dict[report_key.strip()] = report_value.strip()
elif report_dict:
try:
report_time = ParseCrashpadDateTime(report_dict['Creation time'])
report_path = report_dict['Path'].strip()
reports_list.append((report_time, report_path))
except (ValueError, KeyError) as e:
logging.warning('Crashpad report expected valid keys'
' "Path" and "Creation time": %s', e)
finally:
report_dict = {}
last_indentation = current_indentation
# Include the last report.
if report_dict:
try:
report_time = ParseCrashpadDateTime(report_dict['Creation time'])
report_path = report_dict['Path'].strip()
reports_list.append((report_time, report_path))
except (ValueError, KeyError) as e:
logging.warning('Crashpad report expected valid keys'
' "Path" and "Creation time": %s', e)
if reports_list:
_, most_recent_report_path = max(reports_list)
return most_recent_report_path
return None
def _GetMostRecentMinidump(self):
# Crashpad dump layout will be the standard eventually, check it first.
most_recent_dump = self._GetMostRecentCrashpadMinidump()
# Typical breakpad format is simply dump files in a folder.
if not most_recent_dump:
dumps = glob.glob(os.path.join(self._tmp_minidump_dir, '*.dmp'))
if dumps:
most_recent_dump = heapq.nlargest(1, dumps, os.path.getmtime)[0]
# As a sanity check, make sure the crash dump is recent.
if (most_recent_dump and
os.path.getmtime(most_recent_dump) < (time.time() - (5 * 60))):
logging.warning('Crash dump is older than 5 minutes. May not be correct.')
return most_recent_dump
def _IsExecutableStripped(self):
if self.browser.platform.GetOSName() == 'mac':
try:
symbols = subprocess.check_output(['/usr/bin/nm', self._executable])
except subprocess.CalledProcessError as err:
logging.warning('Error when checking whether executable is stripped: %s'
% err.output)
# Just assume that binary is stripped to skip breakpad symbol generation
# if this check failed.
return True
num_symbols = len(symbols.splitlines())
# We assume that if there are more than 10 symbols the executable is not
# stripped.
return num_symbols < 10
else:
return False
def _GetStackFromMinidump(self, minidump):
os_name = self.browser.platform.GetOSName()
if os_name == 'win':
cdb = self._GetCdbPath()
if not cdb:
logging.warning('cdb.exe not found.')
return None
output = subprocess.check_output([cdb, '-y', self._browser_directory,
'-c', '.ecxr;k30;q', '-z', minidump])
# cdb output can start the stack with "ChildEBP", "Child-SP", and possibly
# other things we haven't seen yet. If we can't find the start of the
# stack, include output from the beginning.
stack_start = 0
stack_start_match = re.search("^Child(?:EBP|-SP)", output, re.MULTILINE)
if stack_start_match:
stack_start = stack_start_match.start()
stack_end = output.find('quit:')
return output[stack_start:stack_end]
arch_name = self.browser.platform.GetArchName()
stackwalk = binary_manager.FetchPath(
'minidump_stackwalk', arch_name, os_name)
if not stackwalk:
logging.warning('minidump_stackwalk binary not found.')
return None
with open(minidump, 'rb') as infile:
minidump += '.stripped'
with open(minidump, 'wb') as outfile:
outfile.write(''.join(infile.read().partition('MDMP')[1:]))
symbols_path = os.path.join(self._tmp_minidump_dir, 'symbols')
symbols = glob.glob(os.path.join(self._browser_directory, '*.breakpad*'))
if symbols:
for symbol in sorted(symbols, key=os.path.getmtime, reverse=True):
if not os.path.isfile(symbol):
continue
with open(symbol, 'r') as f:
fields = f.readline().split()
if not fields:
continue
sha = fields[3]
binary = ' '.join(fields[4:])
symbol_path = os.path.join(symbols_path, binary, sha)
if os.path.exists(symbol_path):
continue
os.makedirs(symbol_path)
shutil.copyfile(symbol, os.path.join(symbol_path, binary + '.sym'))
else:
# On some platforms generating the symbol table can be very time
# consuming, skip it if there's nothing to dump.
if self._IsExecutableStripped():
logging.info('%s appears to be stripped, skipping symbol dump.' % (
self._executable))
return
GenerateBreakpadSymbols(minidump, arch_name, os_name,
symbols_path, self._browser_directory)
return subprocess.check_output([stackwalk, minidump, symbols_path],
stderr=open(os.devnull, 'w'))
def _UploadMinidumpToCloudStorage(self, minidump_path):
""" Upload minidump_path to cloud storage and return the cloud storage url.
"""
remote_path = ('minidump-%s-%i.dmp' %
(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
random.randint(0, 1000000)))
try:
return cloud_storage.Insert(cloud_storage.TELEMETRY_OUTPUT, remote_path,
minidump_path)
except cloud_storage.CloudStorageError as err:
logging.error('Cloud storage error while trying to upload dump: %s' %
repr(err))
return '<Missing link>'
def GetStackTrace(self):
most_recent_dump = self._GetMostRecentMinidump()
if not most_recent_dump:
return 'No crash dump found.'
logging.info('Minidump found: %s' % most_recent_dump)
stack = self._GetStackFromMinidump(most_recent_dump)
if not stack:
cloud_storage_link = self._UploadMinidumpToCloudStorage(most_recent_dump)
return ('Failed to symbolize minidump. Raw stack is uploaded to cloud '
'storage: %s.' % cloud_storage_link)
return stack
def __del__(self):
self.Close()
def _TryCooperativeShutdown(self):
if self.browser.platform.IsCooperativeShutdownSupported():
# Ideally there would be a portable, cooperative shutdown
# mechanism for the browser. This seems difficult to do
# correctly for all embedders of the content API. The only known
# problem with unclean shutdown of the browser process is on
# Windows, where suspended child processes frequently leak. For
# now, just solve this particular problem. See Issue 424024.
if self.browser.platform.CooperativelyShutdown(self._proc, "chrome"):
try:
util.WaitFor(lambda: not self.IsBrowserRunning(), timeout=5)
logging.info('Successfully shut down browser cooperatively')
except exceptions.TimeoutException as e:
logging.warning('Failed to cooperatively shutdown. ' +
'Proceeding to terminate: ' + str(e))
def Close(self):
super(DesktopBrowserBackend, self).Close()
# First, try to cooperatively shutdown.
if self.IsBrowserRunning():
self._TryCooperativeShutdown()
# Second, try to politely shutdown with SIGTERM.
if self.IsBrowserRunning():
self._proc.terminate()
try:
util.WaitFor(lambda: not self.IsBrowserRunning(), timeout=5)
self._proc = None
except exceptions.TimeoutException:
logging.warning('Failed to gracefully shutdown.')
# Shutdown aggressively if all above failed.
if self.IsBrowserRunning():
logging.warning('Proceed to kill the browser.')
self._proc.kill()
self._proc = None
if self._crash_service:
self._crash_service.kill()
self._crash_service = None
if self._output_profile_path:
# If we need the output then double check that it exists.
if not (self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir)):
raise Exception("No profile directory generated by Chrome: '%s'." %
self._tmp_profile_dir)
else:
# If we don't need the profile after the run then cleanup.
if self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir):
shutil.rmtree(self._tmp_profile_dir, ignore_errors=True)
self._tmp_profile_dir = None
if self._tmp_output_file:
self._tmp_output_file.close()
self._tmp_output_file = None
if self._tmp_minidump_dir:
shutil.rmtree(self._tmp_minidump_dir, ignore_errors=True)
self._tmp_minidump_dir = None
| {
"content_hash": "6044983ca0f662a8d797b433c6ec1e35",
"timestamp": "",
"source": "github",
"line_count": 602,
"max_line_length": 80,
"avg_line_length": 37.785714285714285,
"alnum_prop": 0.6502395920341144,
"repo_name": "SummerLW/Perf-Insight-Report",
"id": "4044ad9914189c20c16aa82bb210c5980c79410f",
"size": "22910",
"binary": false,
"copies": "1",
"ref": "refs/heads/test",
"path": "telemetry/telemetry/internal/backends/chrome/desktop_browser_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6411"
},
{
"name": "CSS",
"bytes": "14952"
},
{
"name": "HTML",
"bytes": "27508823"
},
{
"name": "JavaScript",
"bytes": "75587"
},
{
"name": "Python",
"bytes": "4638631"
},
{
"name": "Shell",
"bytes": "2124"
}
],
"symlink_target": ""
} |
"""
Tests for Results.predict
"""
from statsmodels.compat.pandas import testing as pdt
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose, assert_equal
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
class CheckPredictReturns(object):
def test_2d(self):
res = self.res
data = self.data
fitted = res.fittedvalues.iloc[1:10:2]
pred = res.predict(data.iloc[1:10:2])
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# plain dict
xd = dict(zip(data.columns, data.iloc[1:10:2].values.T))
pred = res.predict(xd)
assert_equal(pred.index, np.arange(len(pred)))
assert_allclose(pred.values, fitted.values, rtol=1e-13)
def test_1d(self):
# one observation
res = self.res
data = self.data
pred = res.predict(data.iloc[:1])
pdt.assert_index_equal(pred.index, data.iloc[:1].index)
assert_allclose(pred.values, res.fittedvalues[0], rtol=1e-13)
fittedm = res.fittedvalues.mean()
xmean = data.mean()
pred = res.predict(xmean.to_frame().T)
assert_equal(pred.index, np.arange(1))
assert_allclose(pred, fittedm, rtol=1e-13)
# Series
pred = res.predict(data.mean())
assert_equal(pred.index, np.arange(1))
assert_allclose(pred.values, fittedm, rtol=1e-13)
# dict with scalar value (is plain dict)
# Note: this warns about dropped nan, even though there are None -FIXED
pred = res.predict(data.mean().to_dict())
assert_equal(pred.index, np.arange(1))
assert_allclose(pred.values, fittedm, rtol=1e-13)
def test_nopatsy(self):
res = self.res
data = self.data
fitted = res.fittedvalues.iloc[1:10:2]
# plain numpy array
pred = res.predict(res.model.exog[1:10:2], transform=False)
assert_allclose(pred, fitted.values, rtol=1e-13)
# pandas DataFrame
x = pd.DataFrame(res.model.exog[1:10:2],
index = data.index[1:10:2],
columns=res.model.exog_names)
pred = res.predict(x)
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# one observation - 1-D
pred = res.predict(res.model.exog[1], transform=False)
assert_allclose(pred, fitted.values[0], rtol=1e-13)
# one observation - pd.Series
pred = res.predict(x.iloc[0])
pdt.assert_index_equal(pred.index, fitted.index[:1])
assert_allclose(pred.values[0], fitted.values[0], rtol=1e-13)
class TestPredictOLS(CheckPredictReturns):
@classmethod
def setup_class(cls):
nobs = 30
np.random.seed(987128)
x = np.random.randn(nobs, 3)
y = x.sum(1) + np.random.randn(nobs)
index = ['obs%02d' % i for i in range(nobs)]
# add one extra column to check that it does not matter
cls.data = pd.DataFrame(np.round(np.column_stack((y, x)), 4),
columns='y var1 var2 var3'.split(),
index=index)
cls.res = OLS.from_formula('y ~ var1 + var2', data=cls.data).fit()
class TestPredictGLM(CheckPredictReturns):
@classmethod
def setup_class(cls):
nobs = 30
np.random.seed(987128)
x = np.random.randn(nobs, 3)
y = x.sum(1) + np.random.randn(nobs)
index = ['obs%02d' % i for i in range(nobs)]
# add one extra column to check that it does not matter
cls.data = pd.DataFrame(np.round(np.column_stack((y, x)), 4),
columns='y var1 var2 var3'.split(),
index=index)
cls.res = GLM.from_formula('y ~ var1 + var2', data=cls.data).fit()
def test_predict_offset(self):
res = self.res
data = self.data
fitted = res.fittedvalues.iloc[1:10:2]
offset = np.arange(len(fitted))
fitted = fitted + offset
pred = res.predict(data.iloc[1:10:2], offset=offset)
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# plain dict
xd = dict(zip(data.columns, data.iloc[1:10:2].values.T))
pred = res.predict(xd, offset=offset)
assert_equal(pred.index, np.arange(len(pred)))
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# offset as pandas.Series
data2 = data.iloc[1:10:2].copy()
data2['offset'] = offset
pred = res.predict(data2, offset=data2['offset'])
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# check nan in exog is ok, preserves index matching offset length
data2 = data.iloc[1:10:2].copy()
data2['offset'] = offset
data2.iloc[0, 1] = np.nan
pred = res.predict(data2, offset=data2['offset'])
pdt.assert_index_equal(pred.index, fitted.index)
fitted_nan = fitted.copy()
fitted_nan[0] = np.nan
assert_allclose(pred.values, fitted_nan.values, rtol=1e-13)
| {
"content_hash": "f903c0f32dd487f7d3bfd62fd5ec4a64",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 79,
"avg_line_length": 35.05263157894737,
"alnum_prop": 0.6004129129129129,
"repo_name": "jseabold/statsmodels",
"id": "1acc717023a35840ab6450b909a95c91f42e0a66",
"size": "5352",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "statsmodels/base/tests/test_predict.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "1383"
},
{
"name": "Python",
"bytes": "8609450"
},
{
"name": "R",
"bytes": "34228"
},
{
"name": "Stata",
"bytes": "41179"
}
],
"symlink_target": ""
} |
"""
WSGI config for djangoblog project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "djangoblog.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoblog.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "4648d6f53c254ae51443b70f760d8069",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 44.71875,
"alnum_prop": 0.7945492662473794,
"repo_name": "NikhilKalige/DjangoBlog",
"id": "94e15f2161578f1d81e180cf65dffaac88967bf2",
"size": "1431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangoblog/djangoblog/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10302"
}
],
"symlink_target": ""
} |
import frappe
from frappe import _
from frappe.model import no_value_fields
from frappe.model.document import Document
from frappe.translate import set_default_language
from frappe.twofactor import toggle_two_factor_auth
from frappe.utils import cint, today
from frappe.utils.momentjs import get_all_timezones
class SystemSettings(Document):
def validate(self):
enable_password_policy = cint(self.enable_password_policy) and True or False
minimum_password_score = cint(getattr(self, "minimum_password_score", 0)) or 0
if enable_password_policy and minimum_password_score <= 0:
frappe.throw(_("Please select Minimum Password Score"))
elif not enable_password_policy:
self.minimum_password_score = ""
for key in ("session_expiry", "session_expiry_mobile"):
if self.get(key):
parts = self.get(key).split(":")
if len(parts) != 2 or not (cint(parts[0]) or cint(parts[1])):
frappe.throw(_("Session Expiry must be in format {0}").format("hh:mm"))
if self.enable_two_factor_auth:
if self.two_factor_method == "SMS":
if not frappe.db.get_single_value("SMS Settings", "sms_gateway_url"):
frappe.throw(
_("Please setup SMS before setting it as an authentication method, via SMS Settings")
)
toggle_two_factor_auth(True, roles=["All"])
else:
self.bypass_2fa_for_retricted_ip_users = 0
self.bypass_restrict_ip_check_if_2fa_enabled = 0
frappe.flags.update_last_reset_password_date = False
if self.force_user_to_reset_password and not cint(
frappe.db.get_single_value("System Settings", "force_user_to_reset_password")
):
frappe.flags.update_last_reset_password_date = True
self.validate_user_pass_login()
def validate_user_pass_login(self):
if not self.disable_user_pass_login:
return
social_login_enabled = frappe.db.exists("Social Login Key", {"enable_social_login": 1})
ldap_enabled = frappe.db.get_single_value("LDAP Settings", "enabled")
if not (social_login_enabled or ldap_enabled):
frappe.throw(
_(
"Please enable atleast one Social Login Key or LDAP before disabling username/password based login."
)
)
def on_update(self):
self.set_defaults()
frappe.cache().delete_value("system_settings")
frappe.cache().delete_value("time_zone")
if frappe.flags.update_last_reset_password_date:
update_last_reset_password_date()
def set_defaults(self):
for df in self.meta.get("fields"):
if df.fieldtype not in no_value_fields and self.has_value_changed(df.fieldname):
frappe.db.set_default(df.fieldname, self.get(df.fieldname))
if self.language:
set_default_language(self.language)
def update_last_reset_password_date():
frappe.db.sql(
""" UPDATE `tabUser`
SET
last_password_reset_date = %s
WHERE
last_password_reset_date is null""",
today(),
)
@frappe.whitelist()
def load():
if not "System Manager" in frappe.get_roles():
frappe.throw(_("Not permitted"), frappe.PermissionError)
all_defaults = frappe.db.get_defaults()
defaults = {}
for df in frappe.get_meta("System Settings").get("fields"):
if df.fieldtype in ("Select", "Data"):
defaults[df.fieldname] = all_defaults.get(df.fieldname)
return {"timezones": get_all_timezones(), "defaults": defaults}
| {
"content_hash": "27982ac052bd0e3e3a0227d9d5c483ab",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 105,
"avg_line_length": 32.26,
"alnum_prop": 0.7120272783632982,
"repo_name": "frappe/frappe",
"id": "1fc27ca1147e72411ce01c5a69d0fc8142c37a7f",
"size": "3324",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/core/doctype/system_settings/system_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65093"
},
{
"name": "HTML",
"bytes": "250850"
},
{
"name": "JavaScript",
"bytes": "2523337"
},
{
"name": "Less",
"bytes": "10921"
},
{
"name": "Python",
"bytes": "3618097"
},
{
"name": "SCSS",
"bytes": "261690"
},
{
"name": "Vue",
"bytes": "98456"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from optparse import make_option
from django.core.management.base import NoArgsCommand
from docutil.commands_util import recocommand
from docutil.str_util import smart_decode
from codebase.actions import diff_codebases
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--pname', action='store', dest='pname',
default='-1', help='Project unix name'),
make_option('--bname', action='store', dest='bname',
default='-1', help='Code Base name'),
make_option('--release1', action='store', dest='release1',
default='-1', help='Project Release'),
make_option('--release2', action='store', dest='release2',
default='-1', help='Project Release'),
)
help = "Diff 2 codebases"
@recocommand
def handle_noargs(self, **options):
pname = smart_decode(options.get('pname'))
bname = smart_decode(options.get('bname'))
release1 = smart_decode(options.get('release1'))
release2 = smart_decode(options.get('release2'))
diff_codebases(pname, bname, release1, release2)
| {
"content_hash": "7aea94a6ff8acce47da47da48d723eb3",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 66,
"avg_line_length": 40.275862068965516,
"alnum_prop": 0.6523972602739726,
"repo_name": "bartdag/recodoc2",
"id": "980667d901e48ad982d6002944a424c3f561ce2e",
"size": "1168",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "recodoc2/apps/codebase/management/commands/codediff.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5581"
},
{
"name": "HTML",
"bytes": "32211467"
},
{
"name": "Java",
"bytes": "13646"
},
{
"name": "Perl",
"bytes": "503"
},
{
"name": "Python",
"bytes": "717834"
}
],
"symlink_target": ""
} |
import ustruct
from ubinascii import hexlify, unhexlify
from micropython import const
"""
Fixed channel ids for L2CAP packets
References can be found here:
* https://www.bluetooth.org/en-us/specification/adopted-specifications - Core specification 4.1
** [vol 3] Part A (Section 2.1) - Channel identifiers
"""
L2CAP_CID_NUL = const(0x0000)
L2CAP_CID_SCH = const(0x0001)
L2CAP_CID_ATT = const(0x0004)
L2CAP_CID_LE_SCH = const(0x0005)
L2CAP_CID_SMP = const(0x0006)
L2CAP_CHANNEL_IDS = {
L2CAP_CID_NUL: "NUL",
L2CAP_CID_SCH: "SCH",
L2CAP_CID_ATT: "ATT",
L2CAP_CID_LE_SCH: "LE_SCH",
L2CAP_CID_SMP: "SMP"
}
class L2CAP(object):
struct_format = "<HH"
struct_size = ustruct.calcsize(struct_format)
def __init__(self, cid, data=b''):
self._cid = cid
self._cid_name = L2CAP_CHANNEL_IDS[cid]
self._data = data
def __getattr__(self, name):
if name == "cid":
return self._cid
elif name == "cid_name":
return self._cid_name
elif name == "length":
return len(self._data)
elif name == "data":
return self._data
def __str__(self):
desc_str = (
"<{:s} "
"cid={:s}(0x{:02x}) length={:d} data={:s}>"
)
return desc_str.format(
self.__class__.__name__,
self.cid_name,
self.cid,
self.length,
hexlify(self.data)
)
@staticmethod
def from_buffer(data):
"""
Parse L2CAP packet
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
-----------------------------------------------------------------
| length | channel id |
-----------------------------------------------------------------
L2CAP is packet-based but follows a communication model based on channels.
A channel represents a data flow between L2CAP entities in remote devices.
Channels may be connection-oriented or connectionless. Fixed channels
other than the L2CAP connectionless channel (CID 0x0002) and the two L2CAP
signaling channels (CIDs 0x0001 and 0x0005) are considered connection-oriented.
All L2CAP layer packet fields shall use Little Endian byte order with the exception of the
information payload field. The endian-ness of higher layer protocols encapsulated within
L2CAP information payload is protocol-specific
References can be found here:
* https://www.bluetooth.org/en-us/specification/adopted-specifications
** Core specification 4.1
** [vol 3] Part A (Section 3) - Data Packet Format
"""
_, cid = ustruct.unpack(
L2CAP.struct_format,
data[:L2CAP.struct_size]
)
data = data[L2CAP.struct_size:]
return L2CAP(cid, data)
def to_buffer(self):
return ustruct.pack(
self.struct_format,
self.length,
self.cid
) + self.data
"""
Codes and names for L2CAP Signaling Protocol
"""
SCH_COMMAND_REJECT = const(0x01)
SCH_CONNECTION_REQUEST = const(0x02)
SCH_CONNECTION_RESPONSE = const(0x03)
SCH_CONFIGURE_REQUEST = const(0x04)
SCH_CONFIGURE_RESPONSE = const(0x05)
SCH_DISCONNECTION_REQUEST = const(0x06)
SCH_DISCONNECTION_RESPONSE = const(0x07)
SCH_ECHO_REQUEST = const(0x08)
SCH_ECHO_RESPONSE = const(0x09)
SCH_INFORMATION_REQUEST = const(0x0a)
SCH_INFORMATION_RESPONSE = const(0x0b)
SCH_CREATE_CHANNEL_REQUEST = const(0x0c)
SCH_CREATE_CHANNEL_RESPONSE = const(0x0d)
SCH_MOVE_CHANNEL_REQUEST = const(0x0e)
SCH_MOVE_CHANNEL_RESPONSE = const(0x0f)
SCH_MOVE_CHANNEL_CONFIRMATION = const(0x10)
SCH_MOVE_CHANNEL_CONFIRMATION_RESPONSE = const(0x11)
LE_SCH_CONNECTION_PARAMETER_UPDATE_REQUEST = const(0x12)
LE_SCH_CONNECTION_PARAMETER_UPDATE_RESPONSE = const(0x13)
LE_SCH_LE_CREDIT_BASED_CONNECTION_REQUEST = const(0x14)
LE_SCH_LE_CREDIT_BASED_CONNECTION_RESPONSE = const(0x15)
LE_SCH_LE_FLOW_CONTROL_CREDIT = const(0x16)
L2CAP_SCH_PDUS = {
SCH_COMMAND_REJECT: "SCH Command reject",
SCH_CONNECTION_REQUEST: "SCH Connection request",
SCH_CONNECTION_RESPONSE: "SCH Connection response",
SCH_CONFIGURE_REQUEST: "SCH Configure request",
SCH_CONFIGURE_RESPONSE: "SCH Configure response",
SCH_DISCONNECTION_REQUEST: "SCH Disconnection request",
SCH_DISCONNECTION_RESPONSE: "SCH Disconnection response",
SCH_ECHO_REQUEST: "SCH Echo request",
SCH_ECHO_RESPONSE: "SCH Echo response",
SCH_INFORMATION_REQUEST: "SCH Information request",
SCH_INFORMATION_RESPONSE: "SCH Information response",
SCH_CREATE_CHANNEL_REQUEST: "SCH Create Channel request",
SCH_CREATE_CHANNEL_RESPONSE: "SCH Create Channel response",
SCH_MOVE_CHANNEL_REQUEST: "SCH Move Channel request",
SCH_MOVE_CHANNEL_RESPONSE: "SCH Move Channel response",
SCH_MOVE_CHANNEL_CONFIRMATION: "SCH Move Channel Confirmation",
SCH_MOVE_CHANNEL_CONFIRMATION_RESPONSE:
"SCH Move Channel Confirmation response",
LE_SCH_CONNECTION_PARAMETER_UPDATE_REQUEST:
"LE SCH Connection_Parameter_Update_Request",
LE_SCH_CONNECTION_PARAMETER_UPDATE_RESPONSE:
"LE SCH Connection_Parameter_Update_Response",
LE_SCH_LE_CREDIT_BASED_CONNECTION_REQUEST:
"LE SCH LE_Credit_Based_Connection Request",
LE_SCH_LE_CREDIT_BASED_CONNECTION_RESPONSE:
"LE SCH LE_Credit_Based_Connection Response",
LE_SCH_LE_FLOW_CONTROL_CREDIT:
"LE SCH LE_Flow_Control_Credit"
}
class L2CAP_SCH(object):
"""L2CAP_SCH"""
struct_format = "<BBH"
struct_size = ustruct.calcsize(struct_format)
def __init__(self, code, cid, data=b''):
self._code = code
self._cid = cid
self._cid_name = L2CAP_SCH_PDUS[id]
self._data = data
def __getattr__(self, name):
if name == "_code":
return self._code
elif name == "cid":
return self._cid
elif name == "cid_name":
return self._cid_name
elif name == "length":
return len(self._data)
elif name == "data":
return self._data
def __str__(self):
desc_str = (
"<{:s} "
"code={:02x} cid={:s}(0x{:02x}) length={:d} data={:s}>"
)
return desc_str.format(
self.__class__.__name__,
self.code,
self.cid_name,
self.cid,
self.length,
hexlify(self.data)
)
@staticmethod
def from_buffer(data):
"""
Parse the signaling channel data.
The signaling channel is a L2CAP packet with channel id 0x0001 (L2CAP CID_SCH)
or 0x0005 (L2CAP_CID_LE_SCH)
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
-----------------------------------------------------------------
| code | cid | length |
-----------------------------------------------------------------
References can be found here:
* https://www.bluetooth.org/en-us/specification/adopted-specifications
** Core specification 4.1
** [vol 3] Part A (Section 4) - Signaling Packet Formats
"""
code, cid, _ = ustruct.unpack(
L2CAP_SCH.struct_format,
data[:L2CAP_SCH.struct_size]
)
data = data[L2CAP_SCH.struct_size:]
return L2CAP_SCH(code, cid, data)
def to_buffer(self):
"""
Get data string
"""
return ustruct.pack(
self.struct_format,
self.code,
self.cid,
self.length
) + self.data
| {
"content_hash": "4542919dcbcf1606c6a2b25c9cad94c3",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 99,
"avg_line_length": 33.61304347826087,
"alnum_prop": 0.5884102962100634,
"repo_name": "dmazzella/uble",
"id": "7192b498d45c04c2f10b1a2ba938423d1ffa0fd0",
"size": "7803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluetooth_low_energy/protocols/hci/l2cap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "257521"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import re
from .basecase import BaseTestCase, cqlsh
from .cassconnect import testrun_cqlsh
import unittest
import sys
BEL = '\x07' # the terminal-bell character
CTRL_C = '\x03'
TAB = '\t'
# completions not printed out in this many seconds may not be acceptable.
# tune if needed for a slow system, etc, but be aware that the test will
# need to wait this long for each completion test, to make sure more info
# isn't coming
COMPLETION_RESPONSE_TIME = 0.5
completion_separation_re = re.compile(r'\s+')
@unittest.skipIf(sys.platform == "win32", 'Tab completion tests not supported on Windows')
class CqlshCompletionCase(BaseTestCase):
def setUp(self):
self.cqlsh_runner = testrun_cqlsh(cqlver=cqlsh.DEFAULT_CQLVER, env={'COLUMNS': '100000'})
self.cqlsh = self.cqlsh_runner.__enter__()
def tearDown(self):
self.cqlsh_runner.__exit__(None, None, None)
def _get_completions(self, inputstring, split_completed_lines=True):
"""
Get results of tab completion in cqlsh. Returns a bare string if a
string completes immediately. Otherwise, returns a set of all
whitespace-separated tokens in the offered completions by default, or a
list of the lines in the offered completions if split_completed_lines is
False.
"""
self.cqlsh.send(inputstring)
self.cqlsh.send(TAB)
immediate = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
immediate = immediate.replace(' \b', '')
self.assertEqual(immediate[:len(inputstring)], inputstring)
immediate = immediate[len(inputstring):]
immediate = immediate.replace(BEL, '')
if immediate:
return immediate
self.cqlsh.send(TAB)
choice_output = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
if choice_output == BEL:
choice_output = ''
self.cqlsh.send(CTRL_C) # cancel any current line
self.cqlsh.read_to_next_prompt()
choice_lines = choice_output.splitlines()
if choice_lines:
# ensure the last line of the completion is the prompt
prompt_regex = self.cqlsh.prompt.lstrip() + re.escape(inputstring)
msg = ('Double-tab completion '
'does not print prompt for input "{}"'.format(inputstring))
self.assertRegexpMatches(choice_lines[-1], prompt_regex, msg=msg)
choice_lines = [line.strip() for line in choice_lines[:-1]]
choice_lines = [line for line in choice_lines if line]
if split_completed_lines:
completed_lines = map(set, (completion_separation_re.split(line.strip())
for line in choice_lines))
if not completed_lines:
return set()
completed_tokens = set.union(*completed_lines)
return completed_tokens - {''}
else:
return choice_lines
assert False
def _trycompletions_inner(self, inputstring, immediate='', choices=(),
other_choices_ok=False,
split_completed_lines=True):
"""
Test tab completion in cqlsh. Enters in the text in inputstring, then
simulates a tab keypress to see what is immediately completed (this
should only happen when there is only one completion possible). If
there is an immediate completion, the new text is expected to match
'immediate'. If there is no immediate completion, another tab keypress
is simulated in order to get a list of choices, which are expected to
match the items in 'choices' (order is not important, but case is).
"""
completed = self._get_completions(inputstring,
split_completed_lines=split_completed_lines)
if immediate:
msg = 'cqlsh completed %r, but we expected %r' % (completed, immediate)
self.assertEqual(completed, immediate, msg=msg)
return
if other_choices_ok:
self.assertEqual(set(choices), completed.intersection(choices))
else:
self.assertEqual(set(choices), set(completed))
def trycompletions(self, inputstring, immediate='', choices=(),
other_choices_ok=False, split_completed_lines=True):
try:
self._trycompletions_inner(inputstring, immediate, choices,
other_choices_ok=other_choices_ok,
split_completed_lines=split_completed_lines)
finally:
self.cqlsh.send(CTRL_C) # cancel any current line
self.cqlsh.read_to_next_prompt()
def strategies(self):
return self.module.CqlRuleSet.replication_strategies
class TestCqlshCompletion(CqlshCompletionCase):
cqlver = '3.1.6'
module = cqlsh.cql3handling
def test_complete_on_empty_string(self):
self.trycompletions('', choices=('?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY',
'COPY', 'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE',
'DROP', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING', 'REVOKE',
'SELECT', 'SHOW', 'SOURCE', 'TRACING', 'EXPAND', 'SERIAL', 'TRUNCATE',
'UPDATE', 'USE', 'exit', 'quit', 'CLEAR', 'CLS'))
def test_complete_command_words(self):
self.trycompletions('alt', '\b\b\bALTER ')
self.trycompletions('I', 'NSERT INTO ')
self.trycompletions('exit', ' ')
def test_complete_in_uuid(self):
pass
def test_complete_in_select(self):
pass
def test_complete_in_insert(self):
self.trycompletions('INSERT INTO ',
choices=('twenty_rows_table',
'ascii_with_special_chars',
'users',
'has_all_types',
'system.',
'empty_composite_table',
'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.',
'songs'),
other_choices_ok=True)
self.trycompletions('INSERT INTO twenty_rows_composite_table',
immediate=' ')
self.trycompletions('INSERT INTO twenty_rows_composite_table ',
choices=['(', 'JSON'])
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b ',
choices=(')', ','))
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, ',
immediate='c ')
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c ',
choices=(',', ')'))
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b)',
immediate=' VALUES ( ')
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c) VAL',
immediate='UES ( ')
self.trycompletions(
'INSERT INTO twenty_rows_composite_table (a, b, c) VALUES (',
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('",
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ( 'eggs",
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('eggs'",
immediate=', ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs',"),
['<value for b (text)>'],
split_completed_lines=False)
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam')"),
immediate=' ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') "),
choices=[';', 'USING', 'IF'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam');"),
choices=['?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY', 'COPY',
'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE', 'DROP',
'EXPAND', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING',
'REVOKE', 'SELECT', 'SHOW', 'SOURCE', 'SERIAL', 'TRACING',
'TRUNCATE', 'UPDATE', 'USE', 'exit', 'quit',
'CLEAR', 'CLS'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') US"),
immediate='ING T')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING"),
immediate=' T')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING T"),
choices=['TTL', 'TIMESTAMP'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TT"),
immediate='L ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TI"),
immediate='MESTAMP ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 A"),
immediate='ND TTL ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 A"),
immediate='ND TIMESTAMP ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 AND "),
choices=[])
def test_complete_in_update(self):
self.trycompletions("UPD", immediate="ATE ")
self.trycompletions("UPDATE ",
choices=['twenty_rows_table',
'users', 'has_all_types', 'system.',
'ascii_with_special_chars',
'empty_composite_table', 'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs'],
other_choices_ok=True)
self.trycompletions("UPDATE empty_table ", choices=['USING', 'SET'])
self.trycompletions("UPDATE empty_table S",
immediate='ET lonelycol = ')
self.trycompletions("UPDATE empty_table SET lon",
immediate='elycol = ')
self.trycompletions("UPDATE empty_table SET lonelycol",
immediate=' = ')
self.trycompletions("UPDATE empty_table U", immediate='SING T')
self.trycompletions("UPDATE empty_table USING T",
choices=["TTL", "TIMESTAMP"])
self.trycompletions("UPDATE empty_table SET lonelycol = ",
choices=['<term (text)>'],
split_completed_lines=False)
self.trycompletions("UPDATE empty_table SET lonelycol = 'eg",
choices=['<term (text)>'],
split_completed_lines=False)
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs'",
choices=[',', 'WHERE'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonel",
immediate='ykey ')
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey ",
choices=['=', '<=', '>=', '>', '<', 'CONTAINS', 'IN', '['])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 ",
choices=['AND', 'IF', ';'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 AND ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey ",
choices=[',', ')'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) ",
choices=['=', '<=', '>=', '<', '>'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) ",
choices=[';', 'AND', 'IF'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF ",
choices=['EXISTS', '<quotedName>', '<identifier>'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF EXISTS ",
choices=['>=', '!=', '<=', 'IN', '[', ';', '=', '<', '>', '.'])
def test_complete_in_delete(self):
self.trycompletions('DELETE F', choices=['FROM', '<identifier>', '<quotedName>'])
self.trycompletions('DELETE a ', choices=['FROM', '[', '.', ','])
self.trycompletions('DELETE a [',
choices=['<wholenumber>', 'false', '-', '<uuid>',
'<pgStringLiteral>', '<float>', 'TOKEN',
'<identifier>', '<quotedStringLiteral>',
'{', '[', 'NULL', 'true', '<blobLiteral>'])
self.trycompletions('DELETE a, ',
choices=['<identifier>', '<quotedName>'])
self.trycompletions('DELETE a FROM ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DELETE FROM ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'system_auth.', 'system_distributed.',
'system_schema.', 'system_traces.',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DELETE FROM twenty_rows_composite_table ',
choices=['USING', 'WHERE'])
self.trycompletions('DELETE FROM twenty_rows_composite_table U',
immediate='SING TIMESTAMP ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP ',
choices=['<wholenumber>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0',
choices=['<wholenumber>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 ',
immediate='WHERE ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ',
choices=['a', 'b', 'TOKEN('])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE a ',
choices=['<=', '>=', 'CONTAINS', 'IN', '[', '=', '<', '>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(',
immediate='a ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a',
immediate=' ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a ',
choices=[')', ','])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) ',
choices=['>=', '<=', '=', '<', '>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) >= ',
choices=['false', 'true', '<pgStringLiteral>',
'token(', '-', '<float>', 'TOKEN',
'<identifier>', '<uuid>', '{', '[', 'NULL',
'<quotedStringLiteral>', '<blobLiteral>',
'<wholenumber>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) '),
choices=['AND', 'IF', ';'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF '),
choices=['EXISTS', '<identifier>', '<quotedName>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b '),
choices=['>=', '!=', '<=', 'IN', '=', '<', '>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b < 0 '),
choices=['AND', ';'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b < 0 AND '),
choices=['<identifier>', '<quotedName>'])
self.trycompletions(("DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE "
"b = 'eggs'"),
choices=['AND', 'IF', ';'])
def test_complete_in_batch(self):
pass
def test_complete_in_create_keyspace(self):
self.trycompletions('create keyspace ', '', choices=('<identifier>', '<quotedName>', 'IF'))
self.trycompletions('create keyspace moo ',
"WITH replication = {'class': '")
self.trycompletions('create keyspace "12SomeName" with ',
"replication = {'class': '")
self.trycompletions("create keyspace fjdkljf with foo=bar ", "",
choices=('AND', ';'))
self.trycompletions("create keyspace fjdkljf with foo=bar AND ",
"replication = {'class': '")
self.trycompletions("create keyspace moo with replication", " = {'class': '")
self.trycompletions("create keyspace moo with replication=", " {'class': '")
self.trycompletions("create keyspace moo with replication={", "'class':'")
self.trycompletions("create keyspace moo with replication={'class'", ":'")
self.trycompletions("create keyspace moo with replication={'class': ", "'")
self.trycompletions("create keyspace moo with replication={'class': '", "",
choices=self.strategies())
# ttl is an "unreserved keyword". should work
self.trycompletions("create keySPACE ttl with replication ="
"{ 'class' : 'SimpleStrategy'", ", 'replication_factor': ")
self.trycompletions("create keyspace ttl with replication ="
"{'class':'SimpleStrategy',", " 'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', ", "'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', 'repl", "ication_factor'")
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': ", '',
choices=('<term>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1", '',
choices=('<term>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1 ", '}')
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1, ",
'', choices=())
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1} ",
'', choices=('AND', ';'))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'NetworkTopologyStrategy', ", '',
choices=('<dc_name>',))
self.trycompletions("create keyspace \"PB and J\" with replication={"
"'class': 'NetworkTopologyStrategy'", ', ')
self.trycompletions("create keyspace PBJ with replication={"
"'class': 'NetworkTopologyStrategy'} and ",
"durable_writes = '")
def test_complete_in_string_literals(self):
# would be great if we could get a space after this sort of completion,
# but readline really wants to make things difficult for us
self.trycompletions("create keyspace blah with replication = {'class': 'Sim",
"pleStrategy'")
def test_complete_in_drop(self):
self.trycompletions('DR', immediate='OP ')
self.trycompletions('DROP ',
choices=['AGGREGATE', 'COLUMNFAMILY', 'FUNCTION',
'INDEX', 'KEYSPACE', 'ROLE', 'TABLE',
'TRIGGER', 'TYPE', 'USER', 'MATERIALIZED'])
def test_complete_in_drop_keyspace(self):
self.trycompletions('DROP K', immediate='EYSPACE ')
quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions('DROP KEYSPACE ',
choices=['IF', quoted_keyspace])
self.trycompletions('DROP KEYSPACE ' + quoted_keyspace,
choices=[';'])
self.trycompletions('DROP KEYSPACE I',
immediate='F EXISTS ' + quoted_keyspace + ';')
def create_columnfamily_table_template(self, name):
"""Parameterized test for CREATE COLUMNFAMILY and CREATE TABLE. Since
they're synonyms, they should have the same completion behavior, so this
test avoids duplication between tests for the two statements."""
prefix = 'CREATE ' + name + ' '
quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions(prefix + '',
choices=['IF', quoted_keyspace, '<new_table_name>'])
self.trycompletions(prefix + 'IF ',
immediate='NOT EXISTS ')
self.trycompletions(prefix + 'IF NOT EXISTS ',
choices=['<new_table_name>', quoted_keyspace])
self.trycompletions(prefix + 'IF NOT EXISTS new_table ',
immediate='( ')
self.trycompletions(prefix + quoted_keyspace, choices=['.', '('])
self.trycompletions(prefix + quoted_keyspace + '( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + quoted_keyspace + '.',
choices=['<new_table_name>'])
self.trycompletions(prefix + quoted_keyspace + '.new_table ',
immediate='( ')
self.trycompletions(prefix + quoted_keyspace + '.new_table ( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + ' new_table ( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + ' new_table (col_a ine',
immediate='t ')
self.trycompletions(prefix + ' new_table (col_a int ',
choices=[',', 'PRIMARY'])
self.trycompletions(prefix + ' new_table (col_a int P',
immediate='RIMARY KEY ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY ',
choices=[')', ','])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY,',
choices=['<identifier>', '<quotedName>'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY)',
immediate=' ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) ',
choices=[';', 'WITH'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) W',
immediate='ITH ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ',
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'dclocal_read_repair_chance',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'read_repair_chance', 'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ',
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'dclocal_read_repair_chance',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'read_repair_chance', 'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance ',
immediate='= ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance = ',
choices=['<float_between_0_and_1>'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH compaction ',
immediate="= {'class': '")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': '",
choices=['SizeTieredCompactionStrategy',
'LeveledCompactionStrategy',
'DateTieredCompactionStrategy',
'TimeWindowCompactionStrategy'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'S",
immediate="izeTieredCompactionStrategy'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy",
immediate="'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'",
choices=['}', ','])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy', ",
immediate="'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy', '",
choices=['bucket_high', 'bucket_low', 'class',
'enabled', 'max_threshold',
'min_sstable_size', 'min_threshold',
'tombstone_compaction_interval',
'tombstone_threshold',
'unchecked_tombstone_compaction',
'only_purge_repaired_tombstones'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'}",
choices=[';', 'AND'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'} AND ",
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'dclocal_read_repair_chance',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'read_repair_chance', 'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'DateTieredCompactionStrategy', '",
choices=['base_time_seconds', 'max_sstable_age_days',
'timestamp_resolution', 'min_threshold', 'class', 'max_threshold',
'tombstone_compaction_interval', 'tombstone_threshold',
'enabled', 'unchecked_tombstone_compaction',
'max_window_size_seconds', 'only_purge_repaired_tombstones'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'TimeWindowCompactionStrategy', '",
choices=['compaction_window_unit', 'compaction_window_size',
'timestamp_resolution', 'min_threshold', 'class', 'max_threshold',
'tombstone_compaction_interval', 'tombstone_threshold',
'enabled', 'unchecked_tombstone_compaction',
'only_purge_repaired_tombstones'])
def test_complete_in_create_columnfamily(self):
self.trycompletions('CREATE C', choices=['COLUMNFAMILY', 'CUSTOM'])
self.trycompletions('CREATE CO', immediate='LUMNFAMILY ')
self.create_columnfamily_table_template('COLUMNFAMILY')
def test_complete_in_create_table(self):
self.trycompletions('CREATE T', choices=['TRIGGER', 'TABLE', 'TYPE'])
self.trycompletions('CREATE TA', immediate='BLE ')
self.create_columnfamily_table_template('TABLE')
def test_complete_in_describe(self):
"""
Tests for Cassandra-10733
"""
self.trycompletions('DES', immediate='C')
# quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions('DESCR', immediate='IBE ')
self.trycompletions('DESC TABLE ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'system_distributed.',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DESC TYPE ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'address',
'phone_number',
'band_info_type',
'tags'],
other_choices_ok=True)
self.trycompletions('DESC FUNCTION ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'fbestband',
'fbestsong',
'fmax',
'fmin',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DESC AGGREGATE ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'aggmin',
'aggmax',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
# Unfortunately these commented tests will not work. This is due to the keyspace name containing quotes;
# cqlsh auto-completes a DESC differently when the keyspace contains quotes. I'll leave the
# test here though in case we ever change this script to test using keyspace names without
# quotes
# self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '"', immediate='.')
self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '".',
choices=['twenty_rows_table',
'ascii_with_special_chars',
'users',
'has_all_types',
'empty_composite_table',
'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'songs'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '"', immediate='.')
self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '".',
choices=['address',
'phone_number',
'band_info_type',
'tags'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '"', immediate='.f')
self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".', immediate='f')
self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".f',
choices=['fbestband',
'fbestsong',
'fmax',
'fmin'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '"', immediate='.aggm')
self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".', immediate='aggm')
self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".aggm',
choices=['aggmin',
'aggmax'],
other_choices_ok=True)
def test_complete_in_drop_columnfamily(self):
pass
def test_complete_in_truncate(self):
pass
def test_complete_in_alter_columnfamily(self):
pass
def test_complete_in_use(self):
pass
def test_complete_in_create_index(self):
pass
def test_complete_in_drop_index(self):
pass
| {
"content_hash": "e0c57aa318ee219b0bd42754dba236b4",
"timestamp": "",
"source": "github",
"line_count": 787,
"max_line_length": 119,
"avg_line_length": 52.38754764930114,
"alnum_prop": 0.48480438526280045,
"repo_name": "Imran-C/cassandra",
"id": "8485ff08a4a9e4d671df78695ff04917b8214a29",
"size": "42158",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "pylib/cqlshlib/test/test_cqlsh_completion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "801"
},
{
"name": "Batchfile",
"bytes": "23782"
},
{
"name": "GAP",
"bytes": "75443"
},
{
"name": "Java",
"bytes": "15882093"
},
{
"name": "Lex",
"bytes": "10154"
},
{
"name": "PowerShell",
"bytes": "39870"
},
{
"name": "Python",
"bytes": "499741"
},
{
"name": "Shell",
"bytes": "50971"
},
{
"name": "Thrift",
"bytes": "40282"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=1024)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(related_name='articles', to='article.Category', null=True),
preserve_default=True,
),
]
| {
"content_hash": "ecf205192851b44f3efe058dda9f70be",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 114,
"avg_line_length": 27.75862068965517,
"alnum_prop": 0.5416149068322982,
"repo_name": "F483/trainlessmagazine.com",
"id": "0fa3eedd8df4692e50b33ad097b2c22bd72c5ce3",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "article/migrations/0002_auto_20141207_0840.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1927635"
},
{
"name": "HTML",
"bytes": "6030086"
},
{
"name": "JavaScript",
"bytes": "232810"
},
{
"name": "Makefile",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "44196"
},
{
"name": "Ruby",
"bytes": "4418"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
from __main__ import cli
from ansible.compat.six import iteritems
from ansible.errors import AnsibleError
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
from ansible.plugins.callback import CallbackBase
from ansible.template import Templar
class CallbackModule(CallbackBase):
''' Creates and modifies play and host variables '''
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'vars'
def __init__(self):
self.loader = DataLoader()
self._options = cli.options if cli else None
def raw_triage(self, key_string, item, patterns):
# process dict values
if isinstance(item, AnsibleMapping):
return AnsibleMapping(dict((key,self.raw_triage('.'.join([key_string, key]), value, patterns)) for key,value in item.iteritems()))
# process list values
elif isinstance(item, AnsibleSequence):
return AnsibleSequence([self.raw_triage('.'.join([key_string, str(i)]), value, patterns) for i,value in enumerate(item)])
# wrap values if they match raw_vars pattern
elif isinstance(item, AnsibleUnicode):
match = next((pattern for pattern in patterns if re.match(pattern, key_string)), None)
return AnsibleUnicode(''.join(['{% raw %}', item, '{% endraw %}'])) if not item.startswith(('{% raw', '{%raw')) and match else item
def raw_vars(self, play, host, hostvars):
if 'raw_vars' not in hostvars:
return
raw_vars = Templar(variables=hostvars, loader=self.loader).template(hostvars['raw_vars'])
if not isinstance(raw_vars, list):
raise AnsibleError('The `raw_vars` variable must be defined as a list.')
patterns = [re.sub(r'\*', '(.)*', re.sub(r'\.', '\.', var)) for var in raw_vars if var.split('.')[0] in hostvars]
keys = set(pattern.split('\.')[0] for pattern in patterns)
for key in keys:
if key in play.vars:
play.vars[key] = self.raw_triage(key, play.vars[key], patterns)
elif key in hostvars:
host.vars[key] = self.raw_triage(key, hostvars[key], patterns)
def cli_options(self):
options = []
strings = {
'--connection': 'connection',
'--inventory-file': 'inventory',
'--private-key': 'private_key_file',
'--ssh-common-args': 'ssh_common_args',
'--ssh-extra-args': 'ssh_extra_args',
'--timeout': 'timeout',
'--vault-password-file': 'vault_password_file',
}
for option,value in strings.iteritems():
if getattr(self._options, value, False):
options.append("{0}='{1}'".format(option, str(getattr(self._options, value))))
if getattr(self._options, 'ask_vault_pass', False):
options.append('--ask-vault-pass')
return ' '.join(options)
def darwin_without_passlib(self):
if not sys.platform.startswith('darwin'):
return False
try:
import passlib.hash
return False
except:
return True
def v2_playbook_on_play_start(self, play):
for host in play.get_variable_manager()._inventory.list_hosts(play.hosts[0]):
hostvars = play.get_variable_manager().get_vars(loader=self.loader, play=play, host=host)
self.raw_vars(play, host, hostvars)
host.vars['cli_options'] = self.cli_options()
host.vars['cli_ask_pass'] = getattr(self._options, 'ask_pass', False)
host.vars['cli_ask_become_pass'] = getattr(self._options, 'become_ask_pass', False)
host.vars['darwin_without_passlib'] = self.darwin_without_passlib()
| {
"content_hash": "9028f5a564f67f6b81149cb742ce686d",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 143,
"avg_line_length": 40.96842105263158,
"alnum_prop": 0.6138232271325796,
"repo_name": "alan-c/trellis",
"id": "c9dadfbd012979115885de3dcb4ec16e87389aab",
"size": "3892",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/trellis/plugins/callback/vars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21003"
},
{
"name": "Shell",
"bytes": "2188"
}
],
"symlink_target": ""
} |
import unittest
import subprocess
import os
import shutil
from rmgpy import getPath
from rmgpy.qm.main import QMSettings, QMCalculator
from rmgpy.molecule import Molecule
from rmgpy.qm.gaussian import Gaussian
from rmgpy.qm.mopac import Mopac
class TestQMSettings(unittest.TestCase):
"""
Contains unit tests for the QMSettings class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
self.settings1 = QMSettings(software = 'mopac',
method = 'pm3',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = None,
onlyCyclics = False,
maxRadicalNumber = 0,
)
self.settings2 = QMSettings()
def testCheckAllSet(self):
"""
Test that checkAllSet() works correctly.
"""
try:
self.settings1.checkAllSet()
except AssertionError:
self.fail("checkAllSet() raised unexpected AssertionError.")
with self.assertRaises(AssertionError):
self.settings2.checkAllSet()
class TestQMCalculator(unittest.TestCase):
"""
Contains unit tests for the QMSettings class.
"""
mopExecutablePath = Mopac.executablePath
if not os.path.exists(mopExecutablePath):
NO_MOPAC = NO_LICENCE = True
else:
NO_MOPAC = False
process = subprocess.Popen(mopExecutablePath,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdut, stderr = process.communicate("\n")
NO_LICENCE = 'To install the MOPAC license' in stderr
gaussExecutablePath = Gaussian.executablePath
NO_GAUSSIAN = not os.path.exists(gaussExecutablePath)
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles')
self.mop1 = QMCalculator(software = 'mopac',
method = 'pm3',
fileStore = fileStore
)
self.mop2 = QMCalculator(software = 'mopac',
method = 'pm6',
)
self.mop3 = QMCalculator(software = 'mopac',
method = 'pm7',
fileStore = fileStore
)
self.mop4 = QMCalculator(software = 'mopac',
method = 'pm8',
fileStore = fileStore
)
self.gauss1 = QMCalculator(software = 'gaussian',
method = 'pm3',
)
self.gauss2 = QMCalculator(software = 'gaussian',
method = 'pm6',
fileStore = fileStore
)
self.gauss3 = QMCalculator(software = 'gaussian',
method = 'pm7',
fileStore = fileStore
)
self.molpro1 = QMCalculator(software = 'molpro',
method = 'mp2',
fileStore = fileStore
)
self.qmmol1 = QMCalculator(fileStore=fileStore)
self.qmmol2 = QMCalculator(fileStore=fileStore)
def testSetDefaultOutputDirectory(self):
"""
Test that setDefaultOutputDirectory() works correctly.
"""
self.assertIsNotNone(self.mop1.settings.fileStore)
self.assertIsNotNone(self.mop3.settings.fileStore)
self.assertIsNotNone(self.gauss2.settings.fileStore)
self.assertIsNone(self.mop2.settings.fileStore)
self.assertIsNone(self.gauss1.settings.fileStore)
self.assertIsNone(self.mop1.settings.scratchDirectory)
self.assertIsNone(self.mop2.settings.scratchDirectory)
self.assertIsNone(self.mop3.settings.scratchDirectory)
self.assertIsNone(self.gauss1.settings.scratchDirectory)
self.assertIsNone(self.gauss2.settings.scratchDirectory)
# Now set the default directories for those not set
outputDirectory = os.path.join(self.mop1.settings.fileStore, '..','..')
self.mop1.setDefaultOutputDirectory(outputDirectory)
self.mop2.setDefaultOutputDirectory(outputDirectory)
self.mop3.setDefaultOutputDirectory(outputDirectory)
self.gauss1.setDefaultOutputDirectory(outputDirectory)
self.gauss2.setDefaultOutputDirectory(outputDirectory)
self.assertIsNotNone(self.mop1.settings.fileStore)
self.assertIsNotNone(self.mop2.settings.fileStore)
self.assertIsNotNone(self.mop3.settings.fileStore)
self.assertIsNotNone(self.gauss1.settings.fileStore)
self.assertIsNotNone(self.gauss2.settings.fileStore)
self.assertIsNotNone(self.mop1.settings.scratchDirectory)
self.assertIsNotNone(self.mop2.settings.scratchDirectory)
self.assertIsNotNone(self.mop3.settings.scratchDirectory)
self.assertIsNotNone(self.gauss1.settings.scratchDirectory)
self.assertIsNotNone(self.gauss2.settings.scratchDirectory)
def testInitialize(self):
"""
Test that initialize() works correctly.
"""
# Now set the default directories for those not set
outputDirectory = os.path.join(self.mop1.settings.fileStore, '..', '..')
self.mop1.setDefaultOutputDirectory(outputDirectory)
self.mop2.setDefaultOutputDirectory(outputDirectory)
self.mop3.setDefaultOutputDirectory(outputDirectory)
self.gauss1.setDefaultOutputDirectory(outputDirectory)
self.gauss2.setDefaultOutputDirectory(outputDirectory)
try:
self.mop1.initialize()
self.mop2.initialize()
self.mop3.initialize()
self.gauss1.initialize()
self.gauss2.initialize()
except AssertionError:
self.fail("initialize() raised unexpected AssertionError.")
except Exception:
self.fail("initialize() raised Exception. Output file paths not correctly set.")
def testGetThermoData(self):
"""
Test that getThermoData() fails when expected.
"""
outputDirectory = os.path.join(self.mop4.settings.fileStore, '..', '..')
self.mop4.setDefaultOutputDirectory(outputDirectory)
self.gauss3.setDefaultOutputDirectory(outputDirectory)
self.molpro1.setDefaultOutputDirectory(outputDirectory)
mol = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
with self.assertRaises(Exception):
self.mop4.getThermoData(mol)
self.gauss3.getThermoData(mol)
self.molpro1.getThermoData(mol)
@unittest.skipIf(NO_MOPAC, "MOPAC not found. Try resetting your environment variables if you want to use it.")
@unittest.skipIf(NO_LICENCE, "MOPAC license not installed. Run mopac for instructions")
def testGetThermoDataMopac(self):
"""
Test that Mocpac getThermoData() works correctly.
"""
outputDirectory = os.path.join(self.mop1.settings.fileStore, '..', '..')
self.mop1.setDefaultOutputDirectory(outputDirectory)
self.mop2.setDefaultOutputDirectory(outputDirectory)
self.mop3.setDefaultOutputDirectory(outputDirectory)
mol = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
for directory in (self.mop1.settings.fileStore, self.mop1.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
for directory in (self.mop2.settings.fileStore, self.mop2.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
for directory in (self.mop3.settings.fileStore, self.mop3.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
thermo1 = self.mop1.getThermoData(mol)
thermo2 = self.mop2.getThermoData(mol)
thermo3 = self.mop3.getThermoData(mol)
self.assertTrue(thermo1.comment.startswith('QM MopacMolPM3'))
self.assertTrue(thermo2.comment.startswith('QM MopacMolPM6'))
self.assertTrue(thermo3.comment.startswith('QM MopacMolPM7'))
self.assertAlmostEqual(thermo1.H298.value_si, 169708.0608, 1) # to 1 decimal place
self.assertAlmostEqual(thermo1.S298.value_si, 334.5007584, 1) # to 1 decimal place
self.assertAlmostEqual(thermo2.H298.value_si, 167704.4270, 1) # to 1 decimal place
self.assertAlmostEqual(thermo2.S298.value_si, 338.0999241, 1) # to 1 decimal place
self.assertAlmostEqual(thermo3.H298.value_si, 166168.8571, 1) # to 1 decimal place
self.assertAlmostEqual(thermo3.S298.value_si, 336.3330406, 1) # to 1 decimal place
@unittest.skipIf(NO_GAUSSIAN, "Gaussian not found. Try resetting your environment variables if you want to use it.")
def testGetThermoDataGaussian(self):
"""
Test that Gaussian getThermoData() works correctly.
"""
outputDirectory = os.path.join(self.mop1.settings.fileStore, '..', '..')
self.gauss1.setDefaultOutputDirectory(outputDirectory)
self.gauss2.setDefaultOutputDirectory(outputDirectory)
mol = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
for directory in (self.gauss1.settings.fileStore, self.gauss1.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
for directory in (self.gauss1.settings.fileStore, self.gauss2.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
thermo1 = self.gauss1.getThermoData(mol)
thermo2 = self.gauss2.getThermoData(mol)
self.assertTrue(thermo1.comment.startswith('QM GaussianMolPM3'))
self.assertTrue(thermo2.comment.startswith('QM GaussianMolPM6'))
self.assertAlmostEqual(thermo1.H298.value_si, 169908.3376, 0) # to 1 decimal place
self.assertAlmostEqual(thermo1.S298.value_si, 335.5438748, 0) # to 1 decimal place
self.assertAlmostEqual(thermo2.H298.value_si, 169326.2504, 0) # to 1 decimal place
self.assertAlmostEqual(thermo2.S298.value_si, 338.2696063, 0) # to 1 decimal place
################################################################################
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
| {
"content_hash": "36f6e0257a816c7b3c92e74fa8088aef",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 117,
"avg_line_length": 36.90421455938697,
"alnum_prop": 0.7003737541528239,
"repo_name": "pierrelb/RMG-Py",
"id": "4c6ad6b66c753afbd4a4e0cf9be2ae03ca86236a",
"size": "9675",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "rmgpy/qm/mainTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "461"
},
{
"name": "Jupyter Notebook",
"bytes": "32950"
},
{
"name": "Makefile",
"bytes": "5832"
},
{
"name": "Python",
"bytes": "3507378"
},
{
"name": "Shell",
"bytes": "2733"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine DBAL'
copyright = u'2010, Roman Borschel, Guilherme Blanco, Benjamin Eberlei, Jonathan Wage'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1'
# The full version, including alpha/beta/rc tags.
release = '2.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'php'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'DoctrineDBALdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DoctrineDBAL.tex', u'Doctrine DBAL Documentation',
u'Roman Borschel, Guilherme Blanco, Benjamin Eberlei, Jonathan Wage', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| {
"content_hash": "c2a3298a307a1b40c58f3a50632b4c9a",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 86,
"avg_line_length": 33.82608695652174,
"alnum_prop": 0.6908740359897172,
"repo_name": "mowema/verano",
"id": "bfd57654c6bf0167a9d0de5cd8f3db3cf0adaf6f",
"size": "6659",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vendor/doctrine/dbal/docs/en/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "197567"
},
{
"name": "JavaScript",
"bytes": "401036"
},
{
"name": "PHP",
"bytes": "250839"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
from collections import OrderedDict
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from rest_framework import serializers
from stackdio.api.users import models, utils
from stackdio.core.fields import HyperlinkedField, PasswordField
from stackdio.core.notifications.serializers import NotificationChannelSerializer
from stackdio.core.serializers import (
StackdioHyperlinkedModelSerializer,
StackdioParentHyperlinkedModelSerializer,
)
logger = logging.getLogger(__name__)
LDAP_MANAGED_FIELDS = (
'username',
'first_name',
'last_name',
'email',
)
class UserGroupSerializer(StackdioHyperlinkedModelSerializer):
class Meta:
model = Group
lookup_field = 'name'
fields = (
'url',
'name',
)
class GroupUserSerializer(StackdioHyperlinkedModelSerializer):
class Meta:
model = get_user_model()
lookup_field = 'username'
fields = (
'url',
'username',
)
class GroupSerializer(StackdioHyperlinkedModelSerializer):
users = serializers.HyperlinkedIdentityField(
view_name='api:users:group-userlist',
lookup_field='name', lookup_url_kwarg='parent_name')
action = serializers.HyperlinkedIdentityField(
view_name='api:users:group-action',
lookup_field='name', lookup_url_kwarg='parent_name')
channels = serializers.HyperlinkedIdentityField(
view_name='api:users:group-channel-list',
lookup_field='name', lookup_url_kwarg='parent_name')
user_permissions = serializers.HyperlinkedIdentityField(
view_name='api:users:group-object-user-permissions-list',
lookup_field='name', lookup_url_kwarg='parent_name')
group_permissions = serializers.HyperlinkedIdentityField(
view_name='api:users:group-object-group-permissions-list',
lookup_field='name', lookup_url_kwarg='parent_name')
class Meta:
model = Group
lookup_field = 'name'
fields = (
'url',
'name',
'users',
'action',
'channels',
'user_permissions',
'group_permissions',
)
class GroupActionReturnSerializer(GroupSerializer):
users = GroupUserSerializer(source='user_set', many=True)
class GroupActionSerializer(serializers.Serializer): # pylint: disable=abstract-method
available_actions = ('add-user', 'remove-user')
action = serializers.ChoiceField(available_actions)
user = serializers.SlugRelatedField(slug_field='username', queryset=models.get_user_queryset())
def to_representation(self, instance):
"""
We just want to return a serialized group object here -
that way you can see immediately
what the new users in the group are
"""
return GroupActionReturnSerializer(
instance,
context=self.context
).to_representation(instance)
def save(self, **kwargs):
group = self.instance
action = self.validated_data['action']
user = self.validated_data['user']
if action == 'add-user':
group.user_set.add(user)
elif action == 'remove-user':
group.user_set.remove(user)
return group
class UserSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = models.UserSettings
fields = (
'public_key',
'advanced_view',
)
def update(self, instance, validated_data):
previous_value = instance.advanced_view
instance = super(UserSettingsSerializer, self).update(instance, validated_data)
new_value = instance.advanced_view
request = self.context['request']._request
if previous_value and not new_value:
messages.info(request, 'You have disabled the advanced view. It may take a minute '
'for all of the advanced links to be disabled.')
elif not previous_value and new_value:
messages.info(request, 'You have enabled the advanced view. It may take a minute '
'for all of the advanced links to be enabled.')
return instance
class UserSerializer(StackdioHyperlinkedModelSerializer):
superuser = serializers.BooleanField(source='is_superuser', read_only=True)
groups = serializers.HyperlinkedIdentityField(
view_name='api:users:user-grouplist',
lookup_field='username', lookup_url_kwarg='parent_username',
)
settings = UserSettingsSerializer()
channels = HyperlinkedField(view_name='api:users:currentuser-channel-list')
token = HyperlinkedField(view_name='api:users:currentuser-token')
reset_token = HyperlinkedField(view_name='api:users:currentuser-token-reset')
change_password = HyperlinkedField(view_name='api:users:currentuser-password')
class Meta:
model = get_user_model()
lookup_field = 'username'
fields = (
'username',
'first_name',
'last_name',
'email',
'superuser',
'last_login',
'groups',
'channels',
'token',
'reset_token',
'change_password',
'settings',
)
extra_kwargs = {
'email': {'required': True, 'allow_blank': False},
}
def validate(self, attrs):
if settings.LDAP_ENABLED and self.instance:
# We only run into issues if using LDAP
errors = OrderedDict()
for attr, value in attrs.items():
current_value = getattr(self.instance, attr)
# Only deny the request if the field is LDAP managed AND is changed
if attr in LDAP_MANAGED_FIELDS and value != current_value:
errors[attr] = ['This in an LDAP managed field.']
if errors:
raise serializers.ValidationError(errors)
return attrs
def create(self, validated_data):
"""
We want to override this method so we can send an email to the new user with a link
to reset their password
"""
user = super(UserSerializer, self).create(validated_data)
request = self.context['request']
from_email = None
subject_template_name = 'stackdio/auth/new_user_subject.txt'
email_template_name = 'stackdio/auth/password_reset_email.html'
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
context = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': default_token_generator.make_token(user),
'protocol': 'https' if request.is_secure() else 'http',
'intro_line': 'You\'re receiving this email because one of the administrators at {0} '
'has created an account for you.'.format(site_name)
}
utils.send_mail(subject_template_name, email_template_name, context, from_email, user.email)
return user
# We need a custom update since we have a nested field
def update(self, instance, validated_data):
# We need to manually pop off settings and update manually
user_settings = validated_data.pop('settings')
if user_settings:
settings_serializer = self.fields['settings']
settings_serializer.update(instance.settings, user_settings)
instance = super(UserSerializer, self).update(instance, validated_data)
# Now we need to put it back, in case something else needs it later.
if user_settings:
validated_data['settings'] = user_settings
return instance
class PublicUserSerializer(UserSerializer):
"""
This is the serializer for the main user list view. It's the same as the main UserSerializer,
it just has a few fields hidden.
"""
class Meta(UserSerializer.Meta):
fields = (
'url',
'username',
'first_name',
'last_name',
'email',
'groups',
)
class ChangePasswordSerializer(serializers.Serializer): # pylint: disable=abstract-method
current_password = PasswordField(label='Current Password')
new_password1 = PasswordField(label='New Password')
new_password2 = PasswordField(label='New Password Again')
def to_representation(self, instance):
"""
We just want to return a serialized user object here, since we should never show
passwords in plain text
"""
return UserSerializer(instance, context=self.context).to_representation(instance)
def validate(self, attrs):
if settings.LDAP_ENABLED:
# Just stop immediately if we're on LDAP
raise serializers.ValidationError({
'current_password': ['You cannot change your password when using LDAP '
'authentication.']
})
# the current user is set as the instance when we initialize the serializer
user = self.instance
if not user.check_password(attrs['current_password']):
raise serializers.ValidationError({
'current_password': ['You entered an incorrect current password value.']
})
if attrs['new_password1'] != attrs['new_password2']:
raise serializers.ValidationError({
'new_password2': ['The 2 new passwords don\'t match.']
})
return attrs
def save(self, **kwargs):
"""
Using create / update here doesn't really make sense, so we'll just
override save() directly
"""
assert hasattr(self, '_errors'), (
'You must call `.is_valid()` before calling `.save()`.'
)
assert not self.errors, (
'You cannot call `.save()` on a serializer with invalid data.'
)
# change the password. We can just grab new_password1 since we validated that it's the
# same as new_password2 in the validate() method
new_password = self.validated_data['new_password1']
self.instance.set_password(new_password)
self.instance.save()
return self.instance
class UserNotificationChannelSerializer(NotificationChannelSerializer):
class Meta(NotificationChannelSerializer.Meta):
app_label = 'users'
model_name = 'currentuser-channel'
class GroupNotificationChannelSerializer(StackdioParentHyperlinkedModelSerializer,
NotificationChannelSerializer):
class Meta(NotificationChannelSerializer.Meta):
app_label = 'users'
model_name = 'group-channel'
parent_attr = 'auth_object'
parent_lookup_field = 'name'
| {
"content_hash": "f76052107d7e2cd4f0927a94323d9ae0",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 100,
"avg_line_length": 33.54705882352941,
"alnum_prop": 0.6273014203051026,
"repo_name": "stackdio/stackdio",
"id": "36ce86f49f44ff0784c3e8e2c9cda55adf094886",
"size": "12017",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stackdio/api/users/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6462"
},
{
"name": "HTML",
"bytes": "200474"
},
{
"name": "JavaScript",
"bytes": "365621"
},
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Python",
"bytes": "1034237"
},
{
"name": "SaltStack",
"bytes": "4594"
},
{
"name": "Scheme",
"bytes": "2371"
},
{
"name": "Shell",
"bytes": "6131"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import flask
from app import database_read
def main():
if 'order_nr' not in flask.request.args:
flask.abort(404, 'Unknown order_nr')
body = "<p>Your payment status is '%s'" % database_read(flask.request.args['order_nr'])
body += '<p>'
body += '<a href="/">Back to examples</a><br>'
body += '</p>'
return body
if __name__ == '__main__':
print(main())
| {
"content_hash": "243b07ea6c69288c08a1ecb0c15e6106",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 91,
"avg_line_length": 21.55,
"alnum_prop": 0.5893271461716937,
"repo_name": "Akoten/mollie-api-python",
"id": "a340a5131752c107f4d93235816248ea81e5c0fc",
"size": "613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/3-return-page.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "11856"
},
{
"name": "Shell",
"bytes": "39"
}
],
"symlink_target": ""
} |
from sugerencias import *
from efectivo import *
from yes_no import *
from aceptar import *
| {
"content_hash": "82effba4de40cd42840f1ae52f68d37c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 25,
"avg_line_length": 23,
"alnum_prop": 0.7717391304347826,
"repo_name": "vallemrv/tpvB3",
"id": "a878c0378cda3d3223340094b3616cfa006d9b51",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tpv/modals/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28419"
},
{
"name": "HTML",
"bytes": "76010"
},
{
"name": "JavaScript",
"bytes": "640565"
},
{
"name": "Python",
"bytes": "467690"
}
],
"symlink_target": ""
} |
"""Generates chains of network neighbour from the results of traceroute
From the current host, a set of hosts can be tried to be reached by using the
``traceroute`` facility. Then the chains of neighbour hosts are going to be
dumped into a pickle file to be further processed.
"""
import subprocess
import ipaddress
import socket
import sys
import pickle
from host import Host
#
# Querying hosts information
# --------------------------
#
def get_curr_host():
"""Returns the Host data structure for the host that the code running on"""
hostname = subprocess.check_output(['hostname', '-f']).decode('utf-8')
ip = subprocess.check_output(['hostname', '-i']).decode('utf-8')
return Host(
hostname=hostname.strip(),
ip=ipaddress.ip_address(ip.strip())
)
def get_dest_host(dest):
"""Gets the Host data structure for the destination
The destination is given as a string for its hostname.
"""
ip = socket.gethostbyname(dest)
return Host(hostname=dest,
ip=ipaddress.ip_address(ip))
def get_hosts_on_route(dest):
"""Returns a list of hosts by invoking traceroute on the destination
Note that the origin and destination themselves are also included.
"""
hosts = [get_curr_host(), ]
waiting_time = 15
max_hop = 30
raw_out = subprocess.check_output(
['traceroute', '-w', str(waiting_time), '-m', str(max_hop), dest]
).decode('utf-8')
for line in raw_out.split('\n')[1:-1]:
fields = line.split()
if fields[1] == '*':
continue
hostname = fields[1]
ip = fields[2].strip('()')
hosts.append(
Host(hostname=hostname, ip=ipaddress.ip_address(ip))
)
continue
hosts.append(get_dest_host(dest))
print(" Host %s successfully reached." % dest)
return hosts
#
# The main driver
# ---------------
#
def main():
"""The main driver function
The list of destinations are going to be read from the file with name given
as the first command line argument.
"""
try:
input_filename = sys.argv[1]
except IndexError:
print("Input file name not given!")
raise
with open(input_filename, 'r') as inp_f:
dests = [i.strip() for i in inp_f]
chains = [get_hosts_on_route(i) for i in dests]
base_name = input_filename.split('.')[0]
with open(base_name + '.pickle', 'wb') as out_file:
pickle.dump(chains, out_file)
print("Chains dumped into the output file %s.pickel" % base_name)
if __name__ == '__main__':
main()
| {
"content_hash": "6d4f4dd6bd0e389ff510a1d950ccddcc",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 79,
"avg_line_length": 22.612068965517242,
"alnum_prop": 0.6126572626763248,
"repo_name": "tschijnmo/TopologyFromTraceroute",
"id": "64865c0fefb5a99b95e2d341e0de1af5be074f2f",
"size": "2647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainsfromtranceroute.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8253"
}
],
"symlink_target": ""
} |
"""
Created on Feb 2, 2018
@author: nhan.nguyen
This module contains class "PerformanceTestRunner" that executes the test base
on the mode that user pass to system.
"""
import os
import argparse
import time
import threading
import asyncio
import sys
import perf_add_requests
import perf_get_requests
import perf_load
import perf_traffic
import requests_sender
import utils
class Options:
def __init__(self):
parser = argparse.ArgumentParser(
description='This script will execute the test base on the '
'mode that user passes to system.')
parser.add_argument('-a',
help='Use this parameter to start adding '
'request performance testing',
action='store_true',
default=False, required=False, dest='adding')
parser.add_argument('-g',
help='Use this parameter to start getting '
'request performance testing',
action='store_true',
default=False, required=False, dest='getting')
parser.add_argument('-l',
help='Use this parameter to perform load test',
action='store_true',
default=False, required=False, dest='loading')
parser.add_argument('-t',
help='Use this parameter to simulate traffic',
action='store_true',
default=False, required=False,
dest='simulate_traffic')
parser.add_argument('-c',
help='Number of client you want to create. '
'Default value will be 1',
default=1, type=int, required=False,
dest='clients')
parser.add_argument('-d',
help='Directory you want to store requests '
'info when sending adding request. '
'If you start getting request testing, '
'program will collect info from '
'this dir instead.'
'Default value will be {}'.
format(os.path.join(os.path.dirname(__file__),
"request_info")),
default=os.path.join(os.path.dirname(__file__),
"request_info"),
required=False,
dest='info_dir')
parser.add_argument('-n',
help='How many transactions you want to submit to '
'ledger when starting adding requests.'
'If you start getting request testing, '
'this arg will be ignore.'
'In case that you use flag "-t", this '
'parameter will be the number of '
'transactions of a set.'
'Default value will be 100',
default=100, type=int, required=False, dest='txns')
parser.add_argument('-s',
help='Number of thread will '
'be created by each client.'
'Default value is 1',
default=1, type=int, required=False,
dest='thread_num')
parser.add_argument('-k',
help='Kind of request to be sent. '
'The default value will be "nym"',
action='store',
choices=['nym', 'schema', 'attribute', 'claim'],
default='nym', dest='kind', required=False)
parser.add_argument('--log',
help='To see all log. If this flag does not exist,'
'program just only print fail message',
action='store_true', default=False, dest='log',
required=False)
parser.add_argument('-to',
help='Timeout of testing. This flag '
'just visible in two mode "-l" and "-t"'
'Default value will be 100.',
action='store', type=int,
default=100, dest='time_out', required=False)
parser.add_argument('--init',
help='To build "GET" request, we need to '
'send "ADD" request first. This argument is '
'the number of "ADD" request will be sent '
'to ledger to make sample for "GET" requests.'
' Default value will be 100',
action='store', type=int, required=False,
default=100, dest='number_of_request_samples')
self.args = parser.parse_args()
class PerformanceTestRunner:
modes = ["-t", "-l", "-a", "-g"]
def __init__(self):
self.options = Options().args
self.tester = None
temp = 0
for mode in PerformanceTestRunner.modes:
if mode in sys.argv:
temp += 1
if temp == 0:
utils.print_error(
'Cannot determine any kind of request for testing')
utils.print_error(
'May be you missing arguments "-a" or "-b" or "-t" or "-l"')
sys.exit(1)
if temp > 1:
utils.force_print_error_to_console(
'"-a" and "-g" and "-t" and "-l" '
'cannot exist at the same time\n')
sys.exit(1)
self.list_tester = list()
self.start_time = self.finish_time = 0
self.lowest = self.fastest = 0
self.passed_req = self.failed_req = 0
self.result_path = os.path.join(os.path.dirname(__file__), 'results')
utils.create_folder(self.result_path)
log_path = os.path.join(os.path.dirname(__file__), 'logs')
utils.create_folder(log_path)
now = time.strftime("%d-%m-%Y_%H-%M-%S")
self.result_path = os.path.join(self.result_path,
'result_{}.txt'.format(now))
log_path = os.path.join(
log_path, self.create_log_file_name())
requests_sender.RequestsSender.init_log_file(log_path)
utils.create_folder(self.options.info_dir)
def run(self):
"""
Run the test.
"""
utils.print_header("Start {}...\n".format(self.get_kind_of_test()))
if not self.options.log:
utils.start_capture_console()
self.start_time = time.time()
if self.options.adding or self.options.getting \
and self.options.clients > 1:
self.start_tester_in_thread()
else:
self.list_tester.append(self.create_tester())
utils.run_async_method(None, self.list_tester[-1].test)
self.finish_time = time.time()
utils.stop_capture_console()
self.collect_result()
with open(self.result_path, 'w') as result:
self.write_result(result)
self.write_result(sys.stdout)
requests_sender.RequestsSender.close_log_file()
utils.print_header("\nFinish {}\n".format(self.get_kind_of_test()))
def collect_result(self):
"""
Collect all necessary information to make the result.
"""
self.passed_req = self.failed_req = 0
for tester in self.list_tester:
self.failed_req += tester.failed_req
self.passed_req += tester.passed_req
self.find_lowest_and_fastest_transaction()
self.find_start_and_finish_time()
def write_result(self, result_file):
"""
Compute and write result to file.
:param result_file: the file that result will be written.
"""
total_time = self.finish_time - self.start_time
hours = total_time / 3600
minutes = total_time / 60 % 60
seconds = total_time % 60
ttl_txns = int(self.passed_req + self.failed_req)
ttl_seconds = total_time
if ttl_seconds == 0:
print('\nThere is no request sent.\n', file=result_file)
return
txns_per_second = int(ttl_txns / ttl_seconds)
txns_per_client = ttl_txns / self.options.clients
print("\n ----------- Total time to run the test: %dh:%dm:%ds" % (
hours, minutes, seconds) + " -----------", file=result_file)
print("\n Kind: " + self.get_kind_of_test(), file=result_file)
print("\n Client(s): " + str(self.options.clients), file=result_file)
print("\n Fastest transaction (individual thread): {} second(s)".
format(str(self.fastest)),
file=result_file)
print("\n Lowest transaction (individual thread): {} second(s)".
format(str(self.lowest)), file=result_file)
print("\n Transaction per client: " + str(int(txns_per_client)),
file=result_file)
print("\n Total requested transactions: " + str(int(ttl_txns)),
file=result_file)
print("\n Total passed transactions: " + str(self.passed_req),
file=result_file)
print("\n Total failed transactions: " + str(self.failed_req),
file=result_file)
print("\n Average time of a transaction "
"(multiple threads): {} second(s)".
format(str((self.finish_time - self.start_time) / ttl_txns)),
file=result_file)
print("\n Estimated transactions per second: " + str(txns_per_second),
file=result_file)
def find_lowest_and_fastest_transaction(self):
"""
Find lowest and fastest transactions.
"""
self.lowest = self.list_tester[0].lowest_txn
self.fastest = self.list_tester[0].fastest_txn
for tester in self.list_tester:
temp_lowest = tester.lowest_txn
temp_fastest = tester.fastest_txn
if self.lowest < temp_lowest:
self.lowest = temp_lowest
if self.fastest > temp_fastest:
self.fastest = temp_fastest
def find_start_and_finish_time(self):
"""
Find the earliest time that a client is started and latest time that a
client is finished.
"""
self.start_time = self.list_tester[0].start_time
self.finish_time = self.list_tester[0].finish_time
for tester in self.list_tester:
if self.start_time > tester.start_time:
self.start_time = tester.start_time
if self.finish_time < tester.finish_time:
self.finish_time = tester.finish_time
def start_tester_in_thread(self):
"""
Create thread and start all the tester in list.
"""
threads = list()
for _ in range(self.options.clients):
tester = self.create_tester()
self.list_tester.append(tester)
thread = threading.Thread(target=self.run_tester_in_thread,
kwargs={'tester': tester})
thread.daemon = True
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
@staticmethod
def run_tester_in_thread(tester):
"""
Execute testing function of tester.
"""
loop = asyncio.new_event_loop()
utils.run_async_method(loop, tester.test)
loop.close()
def create_tester(self):
"""
Create tester base mode "-a", "-t", "-g", "-l".
:return: tester
"""
if self.options.adding:
return perf_add_requests.PerformanceTesterForAddingRequest(
self.options.info_dir, self.options.txns, self.options.kind,
thread_num=self.options.thread_num, log=self.options.log)
elif self.options.getting:
return perf_get_requests.PerformanceTesterGetSentRequestFromLedger(
self.options.info_dir, self.options.kind,
self.options.thread_num, log=self.options.log)
elif self.options.loading:
return perf_load.TesterSimulateLoad(
self.options.clients, self.options.txns,
self.options.time_out, self.options.log)
elif self.options.simulate_traffic:
return perf_traffic.TesterSimulateTraffic(
self.options.clients, self.options.txns,
self.options.time_out, self.options.log)
return None
def get_kind_of_test(self) -> str:
"""
Return kind of testing.
:return: kind of test.
"""
if self.options.adding:
return "sending 'ADD {}' requests".format(self.options.kind)
elif self.options.getting:
return "sending 'GET {}' requests".format(self.options.kind)
elif self.options.simulate_traffic:
return "simulating traffic"
elif self.options.loading:
return "performing load test"
return ""
def create_log_file_name(self):
"""
Create and return log file name.
:return: log file name.
"""
temp = 'get' if self.options.getting else ""
now = time.strftime("%d-%m-%Y_%H-%M-%S")
if self.options.adding or self.options.getting:
return '{}-perf-{}{}_{}.log'.format(self.options.clients, temp,
self.options.kind, now)
elif self.options.simulate_traffic:
return '{}-{}_{}.log'.format(self.options.clients,
'simulate_traffic', now)
else:
return '{}-{}_{}.log'.format(self.options.clients,
'perform_load_test', now)
if __name__ == '__main__':
PerformanceTestRunner().run()
| {
"content_hash": "ebd198692376c320d4045da2f382b3ea",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 79,
"avg_line_length": 38.649732620320854,
"alnum_prop": 0.509789000345901,
"repo_name": "spivachuk/sovrin-node",
"id": "53e2a9d9b1f9069fd5c7165a228b7c675df46b06",
"size": "14455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/performance/Perf_runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3329"
},
{
"name": "Dockerfile",
"bytes": "7269"
},
{
"name": "Groovy",
"bytes": "8984"
},
{
"name": "Makefile",
"bytes": "11151"
},
{
"name": "Python",
"bytes": "1681637"
},
{
"name": "Ruby",
"bytes": "65393"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "132633"
}
],
"symlink_target": ""
} |
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def is_active(context, url):
current_path = context.request.get_full_path()
if current_path.startswith(url):
return 'active'
return ''
| {
"content_hash": "44b97642a4ac55745c86a054e8acb62b",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 50,
"avg_line_length": 23.363636363636363,
"alnum_prop": 0.7042801556420234,
"repo_name": "allcaps/django-scaffold",
"id": "a0700f62c1fd936cc17e7037f2e1d1c3dac0c991",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scaffold/templates/scaffold/templatetags/APP_NAME_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "46011"
},
{
"name": "Python",
"bytes": "9716"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from plugins.generic.syntax import Syntax as GenericSyntax
class Syntax(GenericSyntax):
def __init__(self):
GenericSyntax.__init__(self)
@staticmethod
def escape(expression, quote=True):
"""
>>> Syntax.escape("SELECT 'abcdefgh' FROM foobar")
'SELECT CHR(97)||CHR(98)||CHR(99)||CHR(100)||CHR(101)||CHR(102)||CHR(103)||CHR(104) FROM foobar'
"""
def escaper(value):
return "||".join("%s(%d)" % ("CHR" if ord(value[i]) < 256 else "NCHR", ord(value[i])) for i in xrange(len(value)))
return Syntax._escape(expression, quote, escaper)
| {
"content_hash": "a28bd63f1cdd7c2a4f84a711bdaf04c3",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 126,
"avg_line_length": 33.36363636363637,
"alnum_prop": 0.6158038147138964,
"repo_name": "JeyZeta/Dangerous",
"id": "48c8781475b548dd45f55501139db07615b1f9ad",
"size": "757",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/tools/sqlmap/plugins/dbms/oracle/syntax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
} |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.internal_gains import ElectricEquipment
log = logging.getLogger(__name__)
class TestElectricEquipment(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_electricequipment(self):
pyidf.validation_level = ValidationLevel.error
obj = ElectricEquipment()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_zone_or_zonelist_name = "object-list|Zone or ZoneList Name"
obj.zone_or_zonelist_name = var_zone_or_zonelist_name
# object-list
var_schedule_name = "object-list|Schedule Name"
obj.schedule_name = var_schedule_name
# alpha
var_design_level_calculation_method = "EquipmentLevel"
obj.design_level_calculation_method = var_design_level_calculation_method
# real
var_design_level = 0.0
obj.design_level = var_design_level
# real
var_watts_per_zone_floor_area = 0.0
obj.watts_per_zone_floor_area = var_watts_per_zone_floor_area
# real
var_watts_per_person = 0.0
obj.watts_per_person = var_watts_per_person
# real
var_fraction_latent = 0.5
obj.fraction_latent = var_fraction_latent
# real
var_fraction_radiant = 0.5
obj.fraction_radiant = var_fraction_radiant
# real
var_fraction_lost = 0.5
obj.fraction_lost = var_fraction_lost
# alpha
var_enduse_subcategory = "End-Use Subcategory"
obj.enduse_subcategory = var_enduse_subcategory
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.electricequipments[0].name, var_name)
self.assertEqual(idf2.electricequipments[0].zone_or_zonelist_name, var_zone_or_zonelist_name)
self.assertEqual(idf2.electricequipments[0].schedule_name, var_schedule_name)
self.assertEqual(idf2.electricequipments[0].design_level_calculation_method, var_design_level_calculation_method)
self.assertAlmostEqual(idf2.electricequipments[0].design_level, var_design_level)
self.assertAlmostEqual(idf2.electricequipments[0].watts_per_zone_floor_area, var_watts_per_zone_floor_area)
self.assertAlmostEqual(idf2.electricequipments[0].watts_per_person, var_watts_per_person)
self.assertAlmostEqual(idf2.electricequipments[0].fraction_latent, var_fraction_latent)
self.assertAlmostEqual(idf2.electricequipments[0].fraction_radiant, var_fraction_radiant)
self.assertAlmostEqual(idf2.electricequipments[0].fraction_lost, var_fraction_lost)
self.assertEqual(idf2.electricequipments[0].enduse_subcategory, var_enduse_subcategory) | {
"content_hash": "6c479293bff3cb45a19d43be52a89ef9",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 121,
"avg_line_length": 39.34615384615385,
"alnum_prop": 0.6728576083414793,
"repo_name": "rbuffat/pyidf",
"id": "606ad7c1e55e4398344081a3e8efea976b6d9a31",
"size": "3069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_electricequipment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22271673"
}
],
"symlink_target": ""
} |
from .util import journalpump_initialized
from journalpump.journalpump import JournalPump
from subprocess import Popen
from time import sleep
import json
import logging
import logging.handlers
import os
import random
import re
import socket
import string
import threading
RSYSLOGD = "/usr/sbin/rsyslogd"
RSYSLOGD_TCP_CONF = """
$ModLoad imtcp
$InputTCPServerRun {port}
$template RemoteLogs,"{logfile}"
*.* ?RemoteLogs
& ~
"""
class _TestRsyslogd:
def __init__(self, *, workdir, logfile, port):
if not os.path.exists(RSYSLOGD):
raise RuntimeError('"{}" not available'.format(RSYSLOGD))
self.port = port
self.conffile = "{}/rsyslogd.conf".format(workdir)
self.process = None
with open(self.conffile, "w") as fp:
print(RSYSLOGD_TCP_CONF.format(logfile=logfile, port=port), file=fp)
def _wait_until_running(self):
# Wait until the rsyslogd port is available, but if it is not up in
# five seconds assume that it has failed to start
attempt = 0
s = socket.socket()
while (self.process.poll() is None) and (attempt < 5):
if s.connect_ex(("127.0.0.1", self.port)) == 0:
s.close()
return
sleep(1)
attempt += 1
raise RuntimeError("rsyslogd failed to start correctly")
def start(self):
# Start rsyslogd in the foreground
# pylint: disable=consider-using-with
self.process = Popen([RSYSLOGD, "-f", self.conffile, "-i", "NONE", "-n", "-C"])
self._wait_until_running()
def stop(self):
if self.process is not None:
if self.process.poll() is not None:
raise RuntimeError("rsyslogd did not start properly")
self.process.terminate()
self.process.wait(timeout=5)
self.process = None
def _run_pump_test(*, config_path, logfile):
journalpump = None
threads = []
try:
journalpump = JournalPump(config_path)
pump = threading.Thread(target=journalpump.run)
pump.start()
threads.append(pump)
assert journalpump_initialized(journalpump), "Failed to initialize journalpump"
identifier = "".join(random.sample(string.ascii_uppercase + string.digits, k=8))
logger = logging.getLogger("rsyslog-tester")
logger.info("Info message for %s", identifier)
logger.warning("Warning message for %s", identifier)
logger.error("Error message for %s", identifier)
logger.critical("Critical message for %s", identifier)
# Wait for everything to trickle thru
sleep(5)
finally:
# Stop the journalpump and senders
if journalpump is not None:
journalpump.running = False
for _, reader in journalpump.readers.items():
for _, sender in reader.senders.items():
threads.append(sender)
sender.request_stop()
# Wait a little while for threads to finish
retry = 0
while retry < 5:
if not [thread for thread in threads if thread.is_alive()]:
break
sleep(1)
retry += 1
# Check the results
found = 0
with open(logfile, "r") as fp:
lines = fp.readlines()
for txt in ["Info", "Warning", "Error", "Critical"]:
m = re.compile(r".*{} message for {}.*".format(txt, identifier))
for line in lines:
if m.match(line):
found += 1
break
assert found == 4, "Expected messages not found in syslog"
def test_rsyslogd_tcp_sender(tmpdir):
workdir = tmpdir.dirname
logfile = "{}/test.log".format(workdir)
config_path = "{}/journalpump.json".format(workdir)
with open(config_path, "w") as fp:
json.dump({
"readers": {
"syslog-tcp": {
"initial_position": "tail",
"senders": {
"rsyslog": {
"output_type": "rsyslog",
"rsyslog_server": "127.0.0.1",
"rsyslog_port": 5140,
"format": "custom",
"logline": "<%pri%>%timestamp% %HOSTNAME% %app-name%[%procid%]: %msg% {%%} %not-valid-tag%",
},
},
},
},
}, fp)
rsyslogd = _TestRsyslogd(workdir=workdir, logfile=logfile, port=5140)
try:
rsyslogd.start()
_run_pump_test(config_path=config_path, logfile=logfile)
finally:
rsyslogd.stop()
| {
"content_hash": "ab33ca955658ef7ac559f7684ad9ec5f",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 120,
"avg_line_length": 33.1063829787234,
"alnum_prop": 0.5616966580976864,
"repo_name": "aiven/kafkajournalpump",
"id": "1bfa7a47ae3a0c1f1260553d65964e59b3836b7c",
"size": "4806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "systest/test_rsyslog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1435"
},
{
"name": "Python",
"bytes": "24986"
}
],
"symlink_target": ""
} |
"""An executor for running Hadoop/Hive jobs in Qubole cluster.
For more information about Qubole and its API, please check:
- https://api.qubole.com
"""
import getpass
import logging
import os
import re
import subprocess
import sys
import StringIO
import time
from qds_sdk.commands import Command
from qds_sdk.commands import HiveCommand
from qds_sdk.commands import ShellCommand
from qds_sdk.qubole import Qubole
from pinball_ext.common import output_filter
from pinball_ext.common import s3_utils
from pinball_ext.common import utils
from pinball_ext.common.decorators import retry
from pinball_ext.executor.cluster_executor import ClusterExecutor
from pinball_ext.executor.common import Platform
__author__ = 'Zach Drach, Changshu Liu, Jie Li'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
class QuboleExecutor(ClusterExecutor):
"""Implements the executor on the Qubole platform.
The implementation is based on the Qubole qds_sdk Python version. For
detailed information, please check: https://github.com/qubole/qds-sdk-py.
The following configuration paths are supposed to be absolute paths on s3:
- self.config.USER_LIBJAR_DIRS
- self.config.USER_APPJAR_PATH
- self.config.USER_ARCHIVE_PATH
This executor will generate a shell command line, which downloads all
resource files to a node inside Qubole cluster and then submits the actual
Hadoop job to the cluster from that node.
TODO(csliu):
- make executor work when jars are stored on HDFS.
- make Hadoop streaming work for this executor.
"""
class Config(ClusterExecutor.Config):
# Qubole API token required to use Qubole's API, check
# https://api.qubole.com/users/edit for detail.
if getpass.getuser() == 'prod':
# Should be configured in executor ctor later.
API_TOKEN = ''
else:
# Make it flexible if developer runs it manually.
API_TOKEN = os.environ.get('QUBOLE_API_TOKEN')
API_URL = 'https://api.qubole.com/api/'
API_VERSION = 'v1.2'
POLL_INTERVAL_SEC = 20
SCHEDULER_PARAM = 'mapred.fairscheduler.pool'
PLATFORM = Platform.QUBOLE
# These jars are already provided in Qubole's Hadoop distribution, so we need
# to remove these jars from the Hadoop classpath, and also not ship these jars
# over to the task nodes.
QUBOLE_JARS_BLACKLIST = [
'slf4j-api-1.7.2.jar',
'slf4j-jdk14-1.6.4.jar',
'slf4j-log4j12-1.7.2.jar',
'log4j-1.2.15.jar',
'log4j-over-slf4j-1.7.2.jar',
'hadoop-core-1.0.3.jar',
'hadoop-lzo-0.4.16.jar',
'hadoop-test-1.0.3.jar',
'hadoop-tools-1.0.3.jar',
'jcl-over-slf4j-1.7.2.jar',
]
# Number of retries for pulling status info from Quoble.
NUM_RETRIES = 6
# Initial delay for the retry of pulling status info from Quoble.
INITIAL_DELAY = 60
def __init__(self, executor_config=None):
qubole_blacklist_jars = None
for key, value in executor_config.items():
if key == 'QUBOLE_JARS_BLACKLIST':
qubole_blacklist_jars = value.split(',')
if qubole_blacklist_jars:
executor_config['QUBOLE_JARS_BLACKLIST'] = qubole_blacklist_jars
super(QuboleExecutor, self).__init__(executor_config=executor_config)
def run_hive_query(self, query_str, upload_archive=False):
# tmp is a keyword in Qubole
regex = r'([\s\(\),;]|^)tmp([\s\(\),;]|$)'
query_str = re.sub(regex, r'\g<1>`tmp`\g<2>', query_str)
full_query_string = self._generate_hive_query_header(
upload_archive=upload_archive)
full_query_string += self._get_scheduler_hive_setting()
full_query_string += query_str
self.log.info('Running query %s' % full_query_string)
kwargs = dict(query=full_query_string)
hc, output, stderr, job_ids = self._run_qubole_command_with_stderr(
HiveCommand, self._hive_query_log_line_processor, kwargs)
return output, stderr, job_ids
def kill_command(self, qubole_jid):
"""Kills a qubole job with the given job_id."""
self._configure_qubole()
qubole_jid = int(qubole_jid)
Command.cancel_id(qubole_jid)
def get_job_result(self, qubole_jid):
"""Finds and retrieves results for existing Qubole job.
Args:
id: qubole job id.
Returns:
Job stdout output.
"""
self._configure_qubole()
qubole_jid = str(qubole_jid)
return self._get_qubole_command_output(HiveCommand.find(qubole_jid))
def run_hadoop_job(self,
class_name,
jobconf_args=None,
extra_args=None,
extra_jars=None):
"""Run a Hadoop job in Qubole cluster.
We assume extra_jars are stored on s3 and the path looks like:
s3://pinball/%{USER}/some_jar_dir/
We fail the entire command if pulling the JARs down from s3 fails,
so we use "&&" to connect shell commands.
"""
jobconf_args = jobconf_args if jobconf_args else {}
extra_args = extra_args if extra_args else []
extra_jars = extra_jars if extra_jars else []
# The place where all jars are stored in s3.
s3_jar_dirs = self.config.USER_LIBJAR_DIRS + extra_jars
# The place where all jars will be copied to locally.
local_jar_dir = '/tmp/hadoop_users/%s/%s' % \
(self.config.USER, utils.get_random_string())
download_jar_cmds = ['hadoop fs -get %s %s' % (s3_dir, local_jar_dir)
for s3_dir in s3_jar_dirs]
download_jar_cmd = ' && '.join(download_jar_cmds)
appjar_name = s3_utils.extract_file_name_from_s3_path(
self.config.USER_APPJAR_PATH)
download_jar_cmd += ' && hadoop fs -get %s %s/%s' % (
self.config.USER_APPJAR_PATH,
local_jar_dir,
appjar_name
)
# Set default JobConf args.
jobconf_args = {} if jobconf_args is None else jobconf_args.copy()
if self.config.SCHEDULER_QUEUE:
jobconf_args[self.config.SCHEDULER_PARAM] = \
self.config.SCHEDULER_QUEUE
jobconf_args['mapred.job.name'] = self.job_name
# Create arguments.
arguments = ' '.join('-D%s=%s' % (k, v) for k, v in jobconf_args.iteritems())
arguments += ' '
arguments += ' '.join(extra_args)
libjars = self._get_libjars_local_paths(s3_jar_dirs, local_jar_dir)
hadoop_classpath = '%s/*' % local_jar_dir
cmd = 'mkdir -p %(local_jar_dir)s && %(download_jar_cmd)s'
files_to_be_deleted = []
for qubole_jar in self.config.QUBOLE_JARS_BLACKLIST:
files_to_be_deleted.append('%s/%s' % (local_jar_dir, qubole_jar))
if files_to_be_deleted:
cmd += ' && rm -f %s' % (' && rm -f '.join(files_to_be_deleted))
# Generate command.
var_dict = {
'class_name': class_name,
'arguments': arguments,
'appjar_name': appjar_name,
'download_jar_cmd': download_jar_cmd,
'local_jar_dir': local_jar_dir,
'hadoop_classpath': hadoop_classpath,
'libjars': libjars,
}
cmd += (' && export HADOOP_CLASSPATH=%(hadoop_classpath)s'
' && hadoop jar %(local_jar_dir)s/%(appjar_name)s'
' %(class_name)s'
' -libjars %(libjars)s'
' %(arguments)s')
cmd += ';\nEXIT_CODE=$?; \nrm -rf %(local_jar_dir)s; \nexit $EXIT_CODE;'
cmd = cmd % var_dict
# Log command messages.
self.log.info('Full command: %s' % cmd)
# Run command.
hc, output, stderr, job_ids = self.run_shell_command(cmd)
return output, stderr, job_ids
def run_shell_command(self, cmd):
kwargs = {'inline': cmd}
hc, output, stderr, job_ids = self._run_qubole_command_with_stderr(
ShellCommand,
self._shell_command_log_line_processor,
kwargs)
return hc, output, stderr, job_ids
def _retry_wrapper(self, fn, tries=4):
return retry(Exception, tries=tries, logger=self.log)(fn)()
def _get_libjars_local_paths(self, s3_jar_dirs, local_jar_dir):
"""Returns a list of local jar paths downloaded from s3.
Args:
s3_jar_dirs: S3 path from which we pull down JARs from.
local_jar_dir: local path on every machine in the Qubole cluster
to which jars are pulled down
Returns:
List of local file paths as a string, with each file name delimited
comma (,), if the supplied s3_jar_dirs is valid. Otherwise, returns
empty string.
"""
file_paths = []
for s3_jar_dir in s3_jar_dirs:
file_paths += s3_utils.list_s3_directory(s3_jar_dir)
jar_names = [
s3_utils.extract_file_name_from_s3_path(file_path)
for file_path in file_paths if str(file_path).endswith('jar')]
filtered_jar_names = [
jar_name
for jar_name in jar_names if jar_name not in self.config.QUBOLE_JARS_BLACKLIST]
# dedup jar lists.
filtered_jar_names = list(set(filtered_jar_names))
final_jar_paths = [
'%s/%s' % (local_jar_dir, jar_name)
for jar_name in filtered_jar_names]
return ','.join(final_jar_paths)
############################################################################
# Private helper functions for Qubole functions
############################################################################
def _configure_qubole(self):
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('qds_connection')
logger.propagate = False
qdslog = logging.getLogger('qds')
if not self.config.API_TOKEN:
raise Exception("You didn't specify your QUBOLE_API_TOKEN in "
"your environment before running commands on "
"Qubole!\n. It can be found at http://api.qubole"
".com/users/edit")
Qubole.configure(api_token=self.config.API_TOKEN,
api_url=self.config.API_URL,
version=self.config.API_VERSION,
poll_interval=self.config.POLL_INTERVAL_SEC)
return qdslog
def _get_qubole_command_output(self, q_cmd):
"""Return the stdout output from a Qubole command object.
Args:
hc: the qubole command object
Returns:
Query stdout output.
"""
raw_result_str = ''
file_handler = StringIO.StringIO()
if q_cmd.status == 'done':
self._retry_wrapper(lambda: q_cmd.get_results(file_handler), tries=10)
raw_result_str = file_handler.getvalue()
file_handler.close()
rows = []
# Qubole writes very large outputs to S3,
# with each field in this file delimited by ^A
raw_result_str = raw_result_str.replace('\x01', '\t')
lines = raw_result_str.strip().split('\n')
for line in lines:
if line == '[Empty]':
continue
if line.strip():
rows.append(line.strip().split('\t'))
return rows
def _run_qubole_command_with_stderr(self, cls, log_line_processor, kwargs):
"""Run the Qubole command and print the log to sys.stderr in real time.
Args:
cls: the qds_sdk.command.Command subclass
kwargs: for the constructor of the qubole command class
Returns:
The tuple (hc, output, stderr)
hc: the qubole command object pointer
output: the command output, same as run_raw_hive_query
stderr: the command stderr, same as run_raw_hive_query
"""
self._configure_qubole()
# TODO(mao): set proper number for the tries param.
hc = self._retry_wrapper(lambda: cls.create(**kwargs), tries=10)
sys.stderr.write("PINBALL:kill_id=%s/%s\n" % (self.config.PLATFORM,
hc.id))
sys.stderr.flush()
f = None
if log_line_processor:
f = output_filter.OutputFilter(log_line_processor,
output=sys.stderr)
stderr_list = []
self._job_ids = []
retry_exception = self.config.NUM_RETRIES
retry_delay = self.config.INITIAL_DELAY
while retry_exception > 0:
try:
if cls.is_done(hc.status):
break
self.log.info("Sleeping for %s seconds and polling."
% Qubole.poll_interval)
time.sleep(Qubole.poll_interval)
hc = cls.find(hc.id)
# TODO(csliu): polling entire error log file is very inefficient
stderr = self._retry_wrapper(lambda: hc.get_log())
stderr = stderr.strip().split('\n')
for i in range(len(stderr_list), len(stderr)):
line = stderr[i] + "\n"
stderr_list.append([line])
if f:
f.process_and_output([line])
# Get a successful status pulling from quoble, reset the retry
# exception number
retry_exception = self.config.NUM_RETRIES
except Exception as e:
retry_exception -= 1
self.log.error("Got error %s when checking Qubole status."
" Going to retry %d more times." %
(e.message, retry_exception))
time.sleep(retry_delay)
retry_delay *= 2
query_id = str(hc.id)
self.log.info('Completed Query, Id: %s, Status: %s' %
(query_id, hc.status))
if hc.status == 'error' or hc.status == 'cancelled':
error_message = "Failed on query: %s" % query_id
raise subprocess.CalledProcessError(1, error_message)
elif hc.status == "running":
error_message = "The job is still running, but got too many " \
"qubole exceptions: %s" % query_id
raise subprocess.CalledProcessError(1, error_message)
self.log.info("Now receiving the query output.")
output = self._get_qubole_command_output(hc)
self.log.info("Received the query output.")
self.log.info("Output has %d rows. First 10 rows:\n\t%s"
% (len(output),
'\n\t'.join([str(o) for o in output[:9]])))
return hc, output, stderr_list, self._job_ids
############################################################################
# Private log line processors.
############################################################################
def _hive_query_log_line_processor(self, line):
""" A callback function that gets executed for every line of
stderr coming from the running job. Returns a dict of pinball
metadata.
"""
job_regex = r"Starting Job = (?P<job_id>.+?), " \
r"Tracking URL = <a href='(?P<job_url>.+?)'"
m = re.search(job_regex, line)
if m:
job_id = m.group('job_id')
job_url = m.group('job_url')
if job_id and job_url:
self._job_ids.append(job_id)
return {'job_id': job_id, 'job_url': job_url}
return {}
def _shell_command_log_line_processor(self, line):
""" A callback function that gets executed for every line of
stderr coming from the running job. Returns a dict of pinball
metadata.
"""
job_regex = r"Submitted job: (?P<job_id>.+)"
m = re.search(job_regex, line)
if m:
job_id = m.group('job_id')
if job_id not in self._job_ids:
self._job_ids.append(job_id)
return {'job_id': job_id}
# Checking "Running job" besides of "Submitted job" makes
# job_id extraction more reliable even we miss some logs.
job_regex = r"Running job: (?P<job_id>.+)"
m = re.search(job_regex, line)
if m:
job_id = m.group('job_id')
if job_id not in self._job_ids:
self._job_ids.append(job_id)
return {'job_id': job_id}
url_regex = r"Tracking URL: <a href='(?P<job_url>.+?)'"
m = re.search(url_regex, line)
if m:
job_url = m.group('job_url')
return {'job_url': job_url}
return {}
| {
"content_hash": "298582faedd595e28f3e0a21e2fd6e41",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 91,
"avg_line_length": 38.53603603603604,
"alnum_prop": 0.5548801870251315,
"repo_name": "jparise/pinball",
"id": "f353c91a3fe15eeeb0c111258add41eab659b77e",
"size": "17691",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pinball_ext/executor/qubole_executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "100206"
},
{
"name": "HTML",
"bytes": "63662"
},
{
"name": "Java",
"bytes": "3670"
},
{
"name": "JavaScript",
"bytes": "1083731"
},
{
"name": "Makefile",
"bytes": "1016"
},
{
"name": "Python",
"bytes": "880457"
},
{
"name": "Shell",
"bytes": "463"
},
{
"name": "Thrift",
"bytes": "8532"
}
],
"symlink_target": ""
} |
"""Data structures for programs executable on a quantum runtime."""
import abc
import dataclasses
from dataclasses import dataclass
from typing import Union, Tuple, Optional, Sequence, cast, Dict, Any, List, Iterator
import cirq
from cirq import _compat, study
class ExecutableSpec(metaclass=abc.ABCMeta):
"""Specification metadata about an executable.
Subclasses should add problem-specific fields.
"""
executable_family: str = NotImplemented
"""A unique name to group executables."""
@dataclass(frozen=True)
class KeyValueExecutableSpec(ExecutableSpec):
"""A generic executable spec whose metadata is a list of key-value pairs.
The key-value pairs define an implicit data schema. Consider defining a problem-specific
subclass of `ExecutableSpec` instead of using this class to realize the benefits of having
an explicit schema.
See Also:
`KeyValueExecutableSpec.from_dict` will use a dictionary to populate `key_value_pairs`.
Args:
executable_family: A unique name to group executables.
key_value_pairs: A tuple of key-value pairs. The keys should be strings but the values
can be any immutable object. Note that the order of the key-value pairs does NOT matter
when comparing two objects.
"""
executable_family: str
key_value_pairs: Tuple[Tuple[str, Any], ...] = ()
def to_dict(self) -> Dict[str, Any]:
return dict(self.key_value_pairs)
@classmethod
def _json_namespace_(cls) -> str:
return 'cirq.google'
def _json_dict_(self) -> Dict[str, Any]:
return cirq.dataclass_json_dict(self)
@classmethod
def from_dict(cls, d: Dict[str, Any], *, executable_family: str) -> 'KeyValueExecutableSpec':
return cls(
executable_family=executable_family, key_value_pairs=tuple((k, v) for k, v in d.items())
)
@classmethod
def _from_json_dict_(
cls, executable_family: str, key_value_pairs: List[List[Union[str, Any]]], **kwargs
) -> 'KeyValueExecutableSpec':
return cls(
executable_family=executable_family,
key_value_pairs=tuple((k, v) for k, v in key_value_pairs),
)
def __repr__(self) -> str:
return cirq._compat.dataclass_repr(self, namespace='cirq_google')
def __eq__(self, other):
# The conversion to a dict object is required so that the order of the keys doesn't matter.
return (self.executable_family == other.executable_family) and (
dict(self.key_value_pairs) == dict(other.key_value_pairs)
)
@dataclass(frozen=True)
class BitstringsMeasurement:
"""Use in-circuit MeasurementGate to collect many repetitions of strings of bits.
This is the lowest-level measurement type allowed in `QuantumExecutable` and behaves
identically to the `cirq.Sampler.run` function. The executable's circuit must contain
explicit measurement gates.
Args:
n_repeitions: The number of repetitions to execute the circuit.
"""
n_repetitions: int
@classmethod
def _json_namespace_(cls) -> str:
return 'cirq.google'
def _json_dict_(self):
return cirq.dataclass_json_dict(self)
def __repr__(self):
return cirq._compat.dataclass_repr(self, namespace='cirq_google')
TParamPair = Tuple[cirq.TParamKey, cirq.TParamVal]
@dataclass(frozen=True)
class QuantumExecutable:
"""An executable quantum program.
This serves a similar purpose to `cirq.Circuit` with some key differences. First, a quantum
executable contains all the relevant context for execution including parameters as well as
the desired number of repetitions. Second, this object is immutable. Finally, there are
optional fields enabling a higher level of abstraction for certain aspects of the executable.
Attributes:
circuit: A `cirq.Circuit` describing the quantum operations to execute.
measurement: A description of the measurement properties or process.
params: An immutable `cirq.ParamResolver` (or similar type). It's representation is
normalized to a tuple of key value pairs.
spec: Optional `cg.ExecutableSpec` containing metadata about this executable that is not
used by the quantum runtime, but will be forwarded to all downstream result objects.
problem_topology: Optional `cirq.NamedTopology` instance specifying the topology of the
circuit. This is useful when optimizing on-device layout. If none is provided we
assume `circuit` already has a valid on-device layout.
initial_state: A `cirq.ProductState` specifying the desired initial state before executing
`circuit`. If not specified, default to the all-zeros state.
"""
circuit: cirq.FrozenCircuit
measurement: BitstringsMeasurement
params: Optional[Tuple[TParamPair, ...]] = None
spec: Optional[ExecutableSpec] = None
problem_topology: Optional[cirq.NamedTopology] = None
initial_state: Optional[cirq.ProductState] = None
# pylint: disable=missing-raises-doc
def __init__(
self,
circuit: cirq.AbstractCircuit,
measurement: BitstringsMeasurement,
params: Union[Sequence[TParamPair], cirq.ParamResolverOrSimilarType] = None,
spec: Optional[ExecutableSpec] = None,
problem_topology: Optional[cirq.NamedTopology] = None,
initial_state: Optional[cirq.ProductState] = None,
):
"""Initialize the quantum executable.
The actual fields in this class are immutable, but we allow more liberal input types
which will be frozen in this __init__ method.
Args:
circuit: The circuit. This will be frozen before being set as an attribute.
measurement: A description of the measurement properties or process.
params: A cirq.ParamResolverOrSimilarType which will be frozen into a tuple of
key value pairs.
spec: Specification metadata about this executable that is not used by the quantum
runtime, but is persisted in result objects to associate executables with results.
problem_topology: Description of the multiqubit gate topology present in the circuit.
If not specified, the circuit must be compatible with the device topology.
initial_state: How to initialize the quantum system before running `circuit`. If not
specified, the device will be initialized into the all-zeros state.
"""
# We care a lot about mutability in this class. No object is truly immutable in Python,
# but we can get pretty close by following the example of dataclass(frozen=True), which
# deletes this class's __setattr__ magic method. To set values ever, we use
# object.__setattr__ in this __init__ function.
#
# We write our own __init__ function to be able to accept a wider range of input formats
# that can be easily converted to our native, immutable format.
object.__setattr__(self, 'circuit', circuit.freeze())
object.__setattr__(self, 'measurement', measurement)
if isinstance(params, tuple) and all(
isinstance(param_kv, tuple) and len(param_kv) == 2 for param_kv in params
):
frozen_params = params
elif isinstance(params, Sequence) and all(
isinstance(param_kv, Sequence) and len(param_kv) == 2 for param_kv in params
):
frozen_params = tuple((k, v) for k, v in params)
elif study.resolver._is_param_resolver_or_similar_type(params):
param_resolver = cirq.ParamResolver(cast(cirq.ParamResolverOrSimilarType, params))
frozen_params = tuple(param_resolver.param_dict.items())
else:
raise ValueError(f"`params` should be a ParamResolverOrSimilarType, not {params}.")
object.__setattr__(self, 'params', frozen_params)
object.__setattr__(self, 'spec', spec)
object.__setattr__(self, 'problem_topology', problem_topology)
object.__setattr__(self, 'initial_state', initial_state)
# Hash may be expensive to compute, especially for large circuits.
# This should be safe since this class should be immutable. This line will
# also check for hashibility of members at construction time.
object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))
def __str__(self):
return f'QuantumExecutable(spec={self.spec})'
def __repr__(self):
return _compat.dataclass_repr(self, namespace='cirq_google')
@classmethod
def _json_namespace_(cls) -> str:
return 'cirq.google'
def _json_dict_(self):
return cirq.dataclass_json_dict(self)
@dataclass(frozen=True)
class QuantumExecutableGroup:
"""A collection of `QuantumExecutable`s.
Attributes:
executables: A tuple of `cg.QuantumExecutable`.
"""
executables: Tuple[QuantumExecutable, ...]
def __init__(self, executables: Sequence[QuantumExecutable]):
"""Initialize and normalize the quantum executable group.
Args:
executables: A sequence of `cg.QuantumExecutable` which will be frozen into a
tuple.
"""
if not isinstance(executables, tuple):
executables = tuple(executables)
object.__setattr__(self, 'executables', executables)
object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))
def __len__(self) -> int:
return len(self.executables)
def __iter__(self) -> Iterator[QuantumExecutable]:
yield from self.executables
def __str__(self) -> str:
exe_str = ', '.join(str(exe) for exe in self.executables[:2])
if len(self.executables) > 2:
exe_str += ', ...'
return f'QuantumExecutableGroup(executables=[{exe_str}])'
def __repr__(self) -> str:
return _compat.dataclass_repr(self, namespace='cirq_google')
def __hash__(self) -> int:
return self._hash # type: ignore
@classmethod
def _json_namespace_(cls) -> str:
return 'cirq.google'
def _json_dict_(self) -> Dict[str, Any]:
return cirq.dataclass_json_dict(self)
| {
"content_hash": "bae4e6b324e884c3a9732920e1679c5a",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 100,
"avg_line_length": 39.65,
"alnum_prop": 0.665534969444175,
"repo_name": "quantumlib/Cirq",
"id": "3b256aac305aa86dbd5a89d0b97677fc1fb47bf3",
"size": "10894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cirq-google/cirq_google/workflow/quantum_executable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4616"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "JavaScript",
"bytes": "660"
},
{
"name": "Jupyter Notebook",
"bytes": "672675"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "8643017"
},
{
"name": "Scilab",
"bytes": "735"
},
{
"name": "Shell",
"bytes": "64230"
},
{
"name": "TypeScript",
"bytes": "91766"
}
],
"symlink_target": ""
} |
import unittest
import copy
import config
import thread_cert
from pktverify.packet_verifier import PacketVerifier
LEADER = 1
ROUTER = 2
MTD = 3
FRAGMENTED_DATA_LEN = 256
# Test Purpose and Description:
# -----------------------------
# The purpose of this test case is to validate the Realm-Local addresses
# that the DUT configures.
#
# Test Topology:
# -------------
# Leader
# |
# Router
# |
# DUT
#
# DUT Types:
# ----------
# ED
# SED
class Cert_6_4_2_RealmLocal_Base(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'panid': 0xface,
'allowlist': [ROUTER]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER, MTD]
},
MTD: {
'name': 'DUT',
'is_mtd': True,
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[MTD].start()
self.simulator.go(5)
self.assertEqual(self.nodes[MTD].get_state(), 'child')
self.collect_ipaddrs()
self.collect_rloc16s()
dut_addr = self.nodes[MTD].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
self.assertTrue(self.nodes[LEADER].\
ping(dut_addr,
size=FRAGMENTED_DATA_LEN))
self.simulator.go(1)
self.assertTrue(self.nodes[LEADER].\
ping(dut_addr))
self.simulator.go(1)
if self.TOPOLOGY[MTD]['mode'] == 'rn':
self.assertTrue(self.nodes[LEADER].\
ping(config.REALM_LOCAL_ALL_NODES_ADDRESS,
num_responses=2,
size=FRAGMENTED_DATA_LEN))
self.simulator.go(2)
self.assertTrue(self.nodes[LEADER].\
ping(config.REALM_LOCAL_ALL_NODES_ADDRESS,
num_responses=2))
self.simulator.go(2)
self.assertTrue(self.nodes[LEADER].\
ping(config.REALM_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS,
num_responses=2,
size=FRAGMENTED_DATA_LEN))
self.simulator.go(2)
self.assertTrue(self.nodes[LEADER].\
ping(config.REALM_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS,
num_responses=2))
self.simulator.go(2)
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
LEADER_MLEID = pv.vars['LEADER_MLEID']
ROUTER = pv.vars['ROUTER']
ROUTER_MLEID = pv.vars['ROUTER_MLEID']
ROUTER_RLOC16 = pv.vars['ROUTER_RLOC16']
DUT = pv.vars['DUT']
DUT_MLEID = pv.vars['DUT_MLEID']
DUT_RLOC16 = pv.vars['DUT_RLOC16']
# Step 1: Ensure topology is formed correctly
pv.verify_attached('ROUTER', 'LEADER')
pv.verify_attached('DUT', 'ROUTER', 'MTD')
# Step 2: Leader sends a Fragmented ICMPv6 Echo Request to
# DUT's ML-EID
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt = pkts.filter_ping_request().\
filter_ipv6_src_dst(LEADER_MLEID, DUT_MLEID).\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_ipv6_src_dst(DUT_MLEID, LEADER_MLEID).\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
# Step 3: Leader sends an Unfragmented ICMPv6 Echo Request to
# DUT’s ML-EID
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt = pkts.filter_ping_request().\
filter_ipv6_src_dst(LEADER_MLEID, DUT_MLEID).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_ipv6_src_dst(DUT_MLEID, LEADER_MLEID).\
must_next()
if self.TOPOLOGY[MTD]['mode'] == 'rn':
# Step 4: Leader sends a Fragmented ICMPv6 Echo Request to the
# Realm-Local All Nodes multicast address (FF03::1)
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt1 = pkts.filter_ping_request().\
filter_wpan_src64(LEADER).\
filter_RLANMA().\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
with pkts.save_index():
pkts.filter_ping_reply(identifier=_pkt1.icmpv6.echo.identifier).\
filter_ipv6_src_dst(ROUTER_MLEID, LEADER_MLEID).\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
pkts.filter_ping_request(identifier=_pkt1.icmpv6.echo.identifier).\
filter_wpan_src16_dst16(ROUTER_RLOC16, DUT_RLOC16).\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_not_next()
# Step 5: Leader sends an Unfragmented ICMPv6 Echo Request to the
# Realm-Local All Nodes multicast address (FF03::1)
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt2 = pkts.filter_ping_request().\
filter_wpan_src64(LEADER).\
filter_RLANMA().\
filter(lambda p: p.icmpv6.echo.sequence_number !=
_pkt1.icmpv6.echo.sequence_number
).\
must_next()
with pkts.save_index():
pkts.filter_ping_reply(identifier=_pkt2.icmpv6.echo.identifier).\
filter_ipv6_src_dst(ROUTER_MLEID, LEADER_MLEID).\
must_next()
pkts.filter_ping_request(identifier = _pkt2.icmpv6.echo.identifier).\
filter_wpan_src16_dst16(ROUTER_RLOC16, DUT_RLOC16).\
must_not_next()
# Step 6: Leader sends a Fragmented ICMPv6 Echo Request to the
# Realm-Local All Thread Nodes multicast address
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt = pkts.filter_ping_request().\
filter_wpan_src64(LEADER).\
filter_RLATNMA().\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_wpan_src64(DUT).\
filter_ipv6_dst(LEADER_MLEID).\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
# Step 7: Leader sends an Unfragmented ICMPv6 Echo Request to the
# Realm-Local All Thread Nodes multicast address
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt = pkts.filter_ping_request().\
filter_wpan_src64(LEADER).\
filter_RLATNMA().\
filter(lambda p: p.icmpv6.data.len != FRAGMENTED_DATA_LEN).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_wpan_src64(DUT).\
filter_ipv6_dst(LEADER_MLEID).\
must_next()
class Cert_6_4_2_RealmLocal_ED(Cert_6_4_2_RealmLocal_Base):
TOPOLOGY = copy.deepcopy(Cert_6_4_2_RealmLocal_Base.TOPOLOGY)
TOPOLOGY[MTD]['mode'] = 'rn'
class Cert_6_4_2_RealmLocal_SED(Cert_6_4_2_RealmLocal_Base):
TOPOLOGY = copy.deepcopy(Cert_6_4_2_RealmLocal_Base.TOPOLOGY)
TOPOLOGY[MTD]['mode'] = '-'
del (Cert_6_4_2_RealmLocal_Base)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0705cdb3bf75ab7b7c36293791a3cf66",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 83,
"avg_line_length": 36.208888888888886,
"alnum_prop": 0.5428992267092181,
"repo_name": "bukepo/openthread",
"id": "41cfc7c970a4ac72b521849400e212ce25da1357",
"size": "9754",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/Cert_6_4_02_RealmLocal.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "50"
},
{
"name": "C",
"bytes": "1080565"
},
{
"name": "C++",
"bytes": "5839893"
},
{
"name": "CMake",
"bytes": "95509"
},
{
"name": "Dockerfile",
"bytes": "6286"
},
{
"name": "M4",
"bytes": "36443"
},
{
"name": "Makefile",
"bytes": "161153"
},
{
"name": "Python",
"bytes": "3379923"
},
{
"name": "Shell",
"bytes": "134708"
}
],
"symlink_target": ""
} |
import threading
import sys
import os
import json
import itertools
import time
from queue import Queue
import urllib.parse
import requests
from .common.utils import log
from .common import utils
# names to export
__all__ = ['stackhut_api_call', 'stackhut_api_user_call', 'keen_client', 'get_res_path', 'Spinner']
# Setup app paths - this is unique for each stackhut package
sys_dir = os.path.dirname(sys.executable) if getattr(sys, 'frozen', False) else os.path.dirname(__file__)
res_dir = os.path.normpath(os.path.join(sys_dir, './res'))
def get_res_path(res_name):
return os.path.join(res_dir, res_name)
###################################################################################################
# StackHut server comms
json_header = {'content-type': 'application/json'}
def stackhut_api_call(endpoint, msg, secure=True, return_json=True):
url = urllib.parse.urljoin(utils.SERVER_URL, endpoint)
log.debug("Calling Stackhut Server at {} with \n\t{}".format(url, json.dumps(msg)))
r = requests.post(url, data=json.dumps(msg), headers=json_header)
if r.status_code == requests.codes.ok:
return r.json() if return_json else r.text
else:
log.error("Error {} talking to Stackhut Server".format(r.status_code))
log.error(r.text)
r.raise_for_status()
def stackhut_api_user_call(endpoint, _msg, usercfg):
msg = _msg.copy()
msg['auth'] = dict(username=usercfg.username, hash=usercfg['hash'])
return stackhut_api_call(endpoint, msg)
###################################################################################################
# Keen analytlics
class KeenClient(threading.Thread):
project_id = '559f866f96773d25d47419f6'
write_key = 'abd65ad8684753678eabab1f1c536b36a70704e6c4f10bcfe928c10ec859edb1d0366f3fad9b7794b0' \
'eeab9825a27346e0186e2e062f76079708b66ddfca7ecc82b8db23062f8cd2e4f6a961d8d2ea23b22f' \
'c9aae1387514da6d46cdbebec2d15c9167d401963ee8f96b00e06acf4e48'
keen_url = "https://api.keen.io/3.0/projects/{project_id}/events/{{event_collection}}?" \
"api_key={write_key}".format(project_id=project_id, write_key=write_key)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.send_analytics = False
self.analytics_ids = None
self.queue = Queue()
def start(self, usercfg):
self.send_analytics = usercfg.send_analytics
if self.send_analytics:
log.debug("User analytics enabled")
self.analytics_ids = usercfg.analytics_ids
super().start()
else:
log.debug("User analytics disabled")
def run(self):
while True:
(endpoint, msg) = self.queue.get()
msg.update(self.analytics_ids)
try:
log.debug("Sending analytics msg to {}".format(endpoint))
# log.debug("Analytics msg - {}".format(msg))
url = self.keen_url.format(event_collection=endpoint)
r = requests.post(url, data=json.dumps(msg), headers=json_header, timeout=2)
if not (r.status_code == requests.codes.created and r.json().get('created')):
log.debug("{} - {}".format(r.status_code, r.text()))
raise IOError()
except:
log.debug("Failed sending analytics msg to '{}'".format(endpoint))
finally:
self.queue.task_done()
def send(self, endpoint, msg):
if self.send_analytics:
self.queue.put((endpoint, msg))
def shutdown(self):
if self.send_analytics:
self.queue.join()
keen_client = KeenClient(daemon=True)
class Spinner(threading.Thread):
"""A simple console spinner to use with long-running tasks"""
spin_interval = 0.5
dot_interval = 10
dot_max = int(dot_interval / spin_interval)
def __init__(self):
super().__init__(daemon=True)
self.spinning = threading.Event()
self.spinner = itertools.cycle(['-', '\\', '|', '/'])
def __enter__(self):
self.spinning.set()
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.spinning.clear()
def run(self):
dot_count = 0
while self.spinning.is_set():
sys.stdout.write(next(self.spinner)) # write the next character
sys.stdout.flush() # flush stdout buffer (actual character display)
sys.stdout.write('\b') # erase the last written char
time.sleep(self.spin_interval)
dot_count += 1
if dot_count >= self.dot_max:
sys.stdout.write('.') # write the next character
dot_count = 0
sys.stdout.write('\n')
def stop(self):
self.spinning.clear()
| {
"content_hash": "aab94dd8da062e5fb7357aa76f0f3d84",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 105,
"avg_line_length": 35.62043795620438,
"alnum_prop": 0.5885245901639344,
"repo_name": "StackHut/stackhut",
"id": "626cd6ef5392d74964e9f7de43caa984f6a9e294",
"size": "5453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stackhut_toolkit/toolkit_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6705"
},
{
"name": "JavaScript",
"bytes": "7363"
},
{
"name": "Makefile",
"bytes": "8553"
},
{
"name": "Python",
"bytes": "248858"
},
{
"name": "Shell",
"bytes": "4583"
}
],
"symlink_target": ""
} |
import requests
from send_mail import SendMail
import logging
from string import Template
import base64
class SendReminders:
'''Class responsible in filter the data, to configure the template data and to send the email.
'''
def __init__(self, db_cursor, mandrill_config):
self.db_cursor = db_cursor
self.mandrill_config = mandrill_config
self.logger = logging.getLogger(__name__)
def Send(self, reminder_config):
'''Filter the data and send the reminder.
'''
query = reminder_config.get_query()
self.logger.info('QUERY=%s' % query)
total_records = self.db_cursor.execute(query)
self.logger.info('Records in cursor set: %d' % total_records)
send_mail = SendMail(self.mandrill_config)
total_sent = 0
total_error = 0
fields = self.db_cursor.description
for row in self.db_cursor:
content = [{"name": x[0], "content": row[x[0]]} for x in fields]
email = row['email']
name = row['name']
row_content = {x[0]: row[x[0]] for x in fields}
attachments = self.__build_attachments_parameter(reminder_config.attachment_url, row_content)
sent, reject_reason = send_mail.send_using_template(email, name, reminder_config.reminder_template_name, content, attachments)
if sent == False:
self.logger.error('Email {}. Reason: {}'.format(email, reject_reason))
total_error += 1
continue
self.__update_record(reminder_config.update, row_content)
total_sent += 1
self.logger.info('OK: Email {}'.format(email))
self.logger.info('Sent with success: {} Fail: {}'.format(total_sent, total_error))
return total_sent, total_error, query
def __build_attachments_parameter(self, attachment_url, row_content):
if attachment_url == '':
return None
adjusted_attachment_url = Template(attachment_url).safe_substitute(row_content)
self.logger.info('URL=%s' % adjusted_attachment_url)
attachment_content = requests.get(adjusted_attachment_url)
encoded_content = base64.b64encode(attachment_content.content)
return [{'content': encoded_content, 'name': 'boleto_atar.pdf', 'type': 'application/pdf'}]
def __update_record(self, update_clausule, row_content):
if update_clausule == '':
return
update_sql = Template(update_clausule).safe_substitute(row_content)
self.logger.info('UPDATE SQL=%s' % update_sql)
self.db_cursor.execute(update_sql)
return
| {
"content_hash": "9115139ba69702600e2abebe83495c29",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 138,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.6157580283793876,
"repo_name": "alexgarzao/email_reminders",
"id": "634ef8b0385bc4e8dd74e8d7e779332d1a8a170f",
"size": "2790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "email_reminders/send_reminders.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "422"
},
{
"name": "Python",
"bytes": "15467"
}
],
"symlink_target": ""
} |
import os
import time
from subprocess import Popen as execute
from twisted.internet.defer import Deferred
#---------------------------------------------------------------------------#
# configure the client logging
#---------------------------------------------------------------------------#
import logging
log = logging.getLogger(__name__)
class ContextRunner(object):
'''
This is the base runner class for all the integration tests
'''
__bit_functions = [2,1] # redundant are removed for now
__reg_functions = [4,3] # redundant are removed for now
def initialize(self, service=None):
''' Initializes the test environment '''
if service:
self.fnull = open(os.devnull, 'w')
self.service = execute(service, stdout=self.fnull, stderr=self.fnull)
log.debug("%s service started: %s", service, self.service.pid)
time.sleep(0.2)
else: self.service = None
log.debug("%s context started", self.context)
def shutdown(self):
''' Cleans up the test environment '''
try:
if self.service:
self.service.kill()
self.fnull.close()
self.context.reset()
except: pass
log.debug("%s context stopped" % self.context)
def testDataContextRegisters(self):
''' Test that the context gets and sets registers '''
address = 10
values = [0x1234] * 32
for fx in self.__reg_functions:
self.context.setValues(fx, address, values)
result = self.context.getValues(fx, address, len(values))
self.assertEquals(len(result), len(values))
self.assertEquals(result, values)
def testDataContextDiscretes(self):
''' Test that the context gets and sets discretes '''
address = 10
values = [True] * 32
for fx in self.__bit_functions:
self.context.setValues(fx, address, values)
result = self.context.getValues(fx, address, len(values))
self.assertEquals(len(result), len(values))
self.assertEquals(result, values)
| {
"content_hash": "8ac67fd43d306df1b6b73584257a0e60",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 81,
"avg_line_length": 37,
"alnum_prop": 0.5643056849953402,
"repo_name": "uzumaxy/pymodbus3",
"id": "2782485587ab79f8290117dc747194f32c49e7e2",
"size": "2146",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/functional/base_context.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "459633"
}
],
"symlink_target": ""
} |
"""
WSGI config for djangoSRV project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "djangoSRV.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoSRV.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "1dd3d6f2e764bb6791c110dadcdf7c20",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 44.625,
"alnum_prop": 0.7941176470588235,
"repo_name": "varun-verma11/CodeDrill",
"id": "cfbc6160ee3aa768a8103698ff0a3b3f627a6550",
"size": "1428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangoSRV/djangoSRV/wsgi.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "85283"
},
{
"name": "JavaScript",
"bytes": "608943"
},
{
"name": "PHP",
"bytes": "29"
},
{
"name": "Python",
"bytes": "76311"
},
{
"name": "Ruby",
"bytes": "1882"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.