text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/item/shared_item_storage_cabinet_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "7e5f5bef11c95d31535fff7da9975bbf",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 23.307692307692307,
"alnum_prop": 0.6897689768976898,
"repo_name": "obi-two/Rebelion",
"id": "42234c05d61a2b5c3cae3278944b1bc1acd615fc",
"size": "448",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/item/shared_item_storage_cabinet_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""
Created on Mon May 2 21:12:16 2016
Jan Cimbalnik
"""
import pyedflib,os
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname+'/../test_data/')
file_name='./test.edf'
file_name = '/home/jan_cimbalnik/Dropbox/HFO_detectors_project/HFO-detect-python/test_data/test.edf'
f = pyedflib.EdfReader(file_name)
ch_idx = f.getSignalLabels().index(b'DC01')
sig = f.readSignal(0)
|
{
"content_hash": "b3ba560d2ef2da7a0040e09c5ef8575e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 100,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.7107843137254902,
"repo_name": "HFO-detect/HFO-detect-python",
"id": "388179fd55f3f82af1871079f16a0169410092b9",
"size": "432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/file_read_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "81373"
}
],
"symlink_target": ""
}
|
import json
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.client import Client, MULTIPART_CONTENT
from django.utils.encoding import force_text
from rest_framework import status
class APIClient(Client):
def patch(self, path, data='', content_type=MULTIPART_CONTENT, follow=False, **extra):
return self.generic('PATCH', path, data, content_type, **extra)
def options(self, path, data='', content_type=MULTIPART_CONTENT, follow=False, **extra):
return self.generic('OPTIONS', path, data, content_type, **extra)
class BaseAPITestCase(object):
"""
base for API tests:
* easy request calls, f.e.: self.post(url, data), self.get(url)
* easy status check, f.e.: self.post(url, data, status_code=200)
"""
def send_request(self, request_method, *args, **kwargs):
request_func = getattr(self.client, request_method)
status_code = None
if 'content_type' not in kwargs and request_method != 'get':
kwargs['content_type'] = 'application/json'
if 'data' in kwargs and request_method != 'get' and kwargs['content_type'] == 'application/json':
data = kwargs.get('data', '')
kwargs['data'] = json.dumps(data) # , cls=CustomJSONEncoder
if 'status_code' in kwargs:
status_code = kwargs.pop('status_code')
# check_headers = kwargs.pop('check_headers', True)
if hasattr(self, 'token'):
if getattr(settings, 'REST_USE_JWT', False):
kwargs['HTTP_AUTHORIZATION'] = 'JWT %s' % self.token
else:
kwargs['HTTP_AUTHORIZATION'] = 'Token %s' % self.token
self.response = request_func(*args, **kwargs)
is_json = bool(
[x for x in self.response._headers['content-type'] if 'json' in x])
self.response.json = {}
if is_json and self.response.content:
self.response.json = json.loads(force_text(self.response.content))
if status_code:
self.assertEqual(self.response.status_code, status_code)
return self.response
def post(self, *args, **kwargs):
return self.send_request('post', *args, **kwargs)
def get(self, *args, **kwargs):
return self.send_request('get', *args, **kwargs)
def patch(self, *args, **kwargs):
return self.send_request('patch', *args, **kwargs)
# def put(self, *args, **kwargs):
# return self.send_request('put', *args, **kwargs)
# def delete(self, *args, **kwargs):
# return self.send_request('delete', *args, **kwargs)
# def options(self, *args, **kwargs):
# return self.send_request('options', *args, **kwargs)
# def post_file(self, *args, **kwargs):
# kwargs['content_type'] = MULTIPART_CONTENT
# return self.send_request('post', *args, **kwargs)
# def get_file(self, *args, **kwargs):
# content_type = None
# if 'content_type' in kwargs:
# content_type = kwargs.pop('content_type')
# response = self.send_request('get', *args, **kwargs)
# if content_type:
# self.assertEqual(
# bool(filter(lambda x: content_type in x, response._headers['content-type'])), True)
# return response
def init(self):
settings.DEBUG = True
self.client = APIClient()
self.login_url = reverse('rest_login')
self.logout_url = reverse('rest_logout')
self.password_change_url = reverse('rest_password_change')
self.register_url = reverse('rest_register')
self.password_reset_url = reverse('rest_password_reset')
self.user_url = reverse('rest_user_details')
self.veirfy_email_url = reverse('rest_verify_email')
self.fb_login_url = reverse('fb_login')
self.tw_login_url = reverse('tw_login')
self.tw_login_no_view_url = reverse('tw_login_no_view')
self.tw_login_no_adapter_url = reverse('tw_login_no_adapter')
def _login(self):
payload = {
"username": self.USERNAME,
"password": self.PASS
}
self.post(self.login_url, data=payload, status_code=status.HTTP_200_OK)
def _logout(self):
self.post(self.logout_url, status=status.HTTP_200_OK)
|
{
"content_hash": "55cfd3772402404769aa337c5a8393fc",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 105,
"avg_line_length": 37.85964912280702,
"alnum_prop": 0.6065801668211307,
"repo_name": "maxim-kht/django-rest-auth",
"id": "faaf7bb4e191471465b300f9be0710f1d02569e7",
"size": "4316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_auth/tests/test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "17039"
},
{
"name": "Python",
"bytes": "79094"
}
],
"symlink_target": ""
}
|
"""
file: splot.py
created: 20160806
author(s): mr-augustine
Statevars Plot (splot) produces plots for data in a given data file according to
the file's user-defined format specification.
"""
import getopt
import os.path
import sys
from statevars import Statevars
from data import Data
from plot import Plot
HELP_TEXT = 'usage: ' + sys.argv[0] + ' -i <input file> -s <statevars def> -o <output plot list>'
def main(argv):
input_filename = ''
statevars_spec_filename = ''
plot_types_list_filename = ''
plot_spec_filename = ''
try:
options, args = getopt.getopt(argv, "hi:s:o:", ["input=", "statevars=", "plots="])
except getopt.GetoptError:
print HELP_TEXT
sys.exit(2)
# Reject any invocation that doesn't have the exact number of arguments
if len(argv) != 6:
print HELP_TEXT
sys.exit(2)
for (option, argument) in options:
if option in ('-i', '--input'):
print 'input file: ' + argument
input_filename = argument
elif option in ('-s', '--statevars'):
print 'statevars definition file: ' + argument
statevars_spec_filename = argument
elif option in ('-o', '--plots'):
print 'output spec: ' + argument
plot_types_list_filename = argument
elif option in ('-p', '--plot-defs'):
print 'plot definition file: ' + argument
plot_spec_filename = argument
else:
print HELP_TEXT
if not os.path.isfile(argument):
print 'File: ' + argument + ' does not exist. Exiting...'
sys.exit(2)
# Parse the statevars file
myStatevar = Statevars(statevars_spec_filename)
# Use the parsed statevars to interpet the data in the data file
myData = Data(input_filename, myStatevar)
# Use the parsed data to create the plots specified by the user
plotter = Plot(plot_types_list_filename, myData)
if __name__ == '__main__':
main(sys.argv[1:])
|
{
"content_hash": "8696424ba1c8f3b3c8e4e131a2d7127b",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 97,
"avg_line_length": 28.728571428571428,
"alnum_prop": 0.6116360019890602,
"repo_name": "mr-augustine/splot",
"id": "5fbe21308fa6568b245b08d41b45ab2019e0f0cb",
"size": "2030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "splot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71570"
}
],
"symlink_target": ""
}
|
import os
import subprocess
if "VIRTUAL_ENV" not in os.environ:
sys.stderr.write("$VIRTUAL_ENV not found.\n\n")
parser.print_usage()
sys.exit(-1)
virtualenv = os.environ["VIRTUAL_ENV"]
file_path = os.path.dirname(__file__)
subprocess.call(["pip", "install", "-E", virtualenv, "--requirement",
os.path.join(file_path, "requirements.txt")])
|
{
"content_hash": "b3a6513eabbfc17e9b118842c51581a0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 33.54545454545455,
"alnum_prop": 0.6531165311653117,
"repo_name": "gcaprio/cfbreference_com",
"id": "97c833f98db0189b2fb8d309fa87642fc38f3594",
"size": "369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootstrap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "210392"
}
],
"symlink_target": ""
}
|
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_entity_pb(project, kind, integer_id, name=None, str_val=None):
from google.cloud.datastore._generated import entity_pb2
from google.cloud.datastore.helpers import _new_value_pb
entity_pb = entity_pb2.Entity()
entity_pb.key.partition_id.project_id = project
path_element = entity_pb.key.path.add()
path_element.kind = kind
path_element.id = integer_id
if name is not None and str_val is not None:
value_pb = _new_value_pb(entity_pb, name)
value_pb.string_value = str_val
return entity_pb
class Test__get_gcd_project(unittest.TestCase):
def _call_fut(self):
from google.cloud.datastore.client import _get_gcd_project
return _get_gcd_project()
def test_no_value(self):
environ = {}
with mock.patch('os.getenv', new=environ.get):
project = self._call_fut()
self.assertIsNone(project)
def test_value_set(self):
from google.cloud.datastore.client import GCD_DATASET
MOCK_PROJECT = object()
environ = {GCD_DATASET: MOCK_PROJECT}
with mock.patch('os.getenv', new=environ.get):
project = self._call_fut()
self.assertEqual(project, MOCK_PROJECT)
class Test__determine_default_project(unittest.TestCase):
def _call_fut(self, project=None):
from google.cloud.datastore.client import (
_determine_default_project)
return _determine_default_project(project=project)
def _determine_default_helper(self, gcd=None, fallback=None,
project_called=None):
_callers = []
def gcd_mock():
_callers.append('gcd_mock')
return gcd
def fallback_mock(project=None):
_callers.append(('fallback_mock', project))
return fallback
patch = mock.patch.multiple(
'google.cloud.datastore.client',
_get_gcd_project=gcd_mock,
_base_default_project=fallback_mock)
with patch:
returned_project = self._call_fut(project_called)
return returned_project, _callers
def test_no_value(self):
project, callers = self._determine_default_helper()
self.assertIsNone(project)
self.assertEqual(callers, ['gcd_mock', ('fallback_mock', None)])
def test_explicit(self):
PROJECT = object()
project, callers = self._determine_default_helper(
project_called=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, [])
def test_gcd(self):
PROJECT = object()
project, callers = self._determine_default_helper(gcd=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, ['gcd_mock'])
def test_fallback(self):
PROJECT = object()
project, callers = self._determine_default_helper(fallback=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, ['gcd_mock', ('fallback_mock', None)])
class TestClient(unittest.TestCase):
PROJECT = 'PROJECT'
def setUp(self):
KLASS = self._get_target_class()
self.original_cnxn_class = KLASS._connection_class
KLASS._connection_class = _MockConnection
def tearDown(self):
KLASS = self._get_target_class()
KLASS._connection_class = self.original_cnxn_class
@staticmethod
def _get_target_class():
from google.cloud.datastore.client import Client
return Client
def _make_one(self, project=PROJECT, namespace=None,
credentials=None, http=None):
return self._get_target_class()(project=project,
namespace=namespace,
credentials=credentials,
http=http)
def test_ctor_w_project_no_environ(self):
# Some environments (e.g. AppVeyor CI) run in GCE, so
# this test would fail artificially.
patch = mock.patch(
'google.cloud.datastore.client._base_default_project',
new=lambda project: None)
with patch:
self.assertRaises(EnvironmentError, self._make_one, None)
def test_ctor_w_implicit_inputs(self):
OTHER = 'other'
creds = _make_credentials()
default_called = []
def fallback_mock(project):
default_called.append(project)
return project or OTHER
klass = self._get_target_class()
patch1 = mock.patch(
'google.cloud.datastore.client._determine_default_project',
new=fallback_mock)
patch2 = mock.patch(
'google.cloud.client.get_credentials',
new=lambda: creds)
with patch1:
with patch2:
client = klass()
self.assertEqual(client.project, OTHER)
self.assertIsNone(client.namespace)
self.assertIsInstance(client._connection, _MockConnection)
self.assertIs(client._connection.credentials, creds)
self.assertIsNone(client._connection.http)
self.assertIsNone(client.current_batch)
self.assertIsNone(client.current_transaction)
self.assertEqual(default_called, [None])
def test_ctor_w_explicit_inputs(self):
OTHER = 'other'
NAMESPACE = 'namespace'
creds = _make_credentials()
http = object()
client = self._make_one(project=OTHER,
namespace=NAMESPACE,
credentials=creds,
http=http)
self.assertEqual(client.project, OTHER)
self.assertEqual(client.namespace, NAMESPACE)
self.assertIsInstance(client._connection, _MockConnection)
self.assertIs(client._connection.credentials, creds)
self.assertIs(client._connection.http, http)
self.assertIsNone(client.current_batch)
self.assertEqual(list(client._batch_stack), [])
def test__push_batch_and__pop_batch(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
batch = client.batch()
xact = client.transaction()
client._push_batch(batch)
self.assertEqual(list(client._batch_stack), [batch])
self.assertIs(client.current_batch, batch)
self.assertIsNone(client.current_transaction)
client._push_batch(xact)
self.assertIs(client.current_batch, xact)
self.assertIs(client.current_transaction, xact)
# list(_LocalStack) returns in reverse order.
self.assertEqual(list(client._batch_stack), [xact, batch])
self.assertIs(client._pop_batch(), xact)
self.assertEqual(list(client._batch_stack), [batch])
self.assertIs(client._pop_batch(), batch)
self.assertEqual(list(client._batch_stack), [])
def test_get_miss(self):
_called_with = []
def _get_multi(*args, **kw):
_called_with.append((args, kw))
return []
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.get_multi = _get_multi
key = object()
self.assertIsNone(client.get(key))
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]['keys'], [key])
self.assertIsNone(_called_with[0][1]['missing'])
self.assertIsNone(_called_with[0][1]['deferred'])
self.assertIsNone(_called_with[0][1]['transaction'])
def test_get_hit(self):
TXN_ID = '123'
_called_with = []
_entity = object()
def _get_multi(*args, **kw):
_called_with.append((args, kw))
return [_entity]
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.get_multi = _get_multi
key, missing, deferred = object(), [], []
self.assertIs(client.get(key, missing, deferred, TXN_ID), _entity)
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]['keys'], [key])
self.assertIs(_called_with[0][1]['missing'], missing)
self.assertIs(_called_with[0][1]['deferred'], deferred)
self.assertEqual(_called_with[0][1]['transaction'], TXN_ID)
def test_get_multi_no_keys(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
results = client.get_multi([])
self.assertEqual(results, [])
def test_get_multi_miss(self):
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._connection._add_lookup_result()
key = Key('Kind', 1234, project=self.PROJECT)
results = client.get_multi([key])
self.assertEqual(results, [])
def test_get_multi_miss_w_missing(self):
from google.cloud.datastore._generated import entity_pb2
from google.cloud.datastore.key import Key
KIND = 'Kind'
ID = 1234
# Make a missing entity pb to be returned from mock backend.
missed = entity_pb2.Entity()
missed.key.partition_id.project_id = self.PROJECT
path_element = missed.key.path.add()
path_element.kind = KIND
path_element.id = ID
creds = _make_credentials()
client = self._make_one(credentials=creds)
# Set missing entity on mock connection.
client._connection._add_lookup_result(missing=[missed])
key = Key(KIND, ID, project=self.PROJECT)
missing = []
entities = client.get_multi([key], missing=missing)
self.assertEqual(entities, [])
self.assertEqual([missed.key.to_protobuf() for missed in missing],
[key.to_protobuf()])
def test_get_multi_w_missing_non_empty(self):
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = self._make_one(credentials=creds)
key = Key('Kind', 1234, project=self.PROJECT)
missing = ['this', 'list', 'is', 'not', 'empty']
self.assertRaises(ValueError, client.get_multi,
[key], missing=missing)
def test_get_multi_w_deferred_non_empty(self):
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = self._make_one(credentials=creds)
key = Key('Kind', 1234, project=self.PROJECT)
deferred = ['this', 'list', 'is', 'not', 'empty']
self.assertRaises(ValueError, client.get_multi,
[key], deferred=deferred)
def test_get_multi_miss_w_deferred(self):
from google.cloud.datastore.key import Key
key = Key('Kind', 1234, project=self.PROJECT)
# Set deferred entity on mock connection.
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._connection._add_lookup_result(deferred=[key.to_protobuf()])
deferred = []
entities = client.get_multi([key], deferred=deferred)
self.assertEqual(entities, [])
self.assertEqual([def_key.to_protobuf() for def_key in deferred],
[key.to_protobuf()])
def test_get_multi_w_deferred_from_backend_but_not_passed(self):
from google.cloud.datastore._generated import entity_pb2
from google.cloud.datastore.entity import Entity
from google.cloud.datastore.key import Key
key1 = Key('Kind', project=self.PROJECT)
key1_pb = key1.to_protobuf()
key2 = Key('Kind', 2345, project=self.PROJECT)
key2_pb = key2.to_protobuf()
entity1_pb = entity_pb2.Entity()
entity1_pb.key.CopyFrom(key1_pb)
entity2_pb = entity_pb2.Entity()
entity2_pb.key.CopyFrom(key2_pb)
creds = _make_credentials()
client = self._make_one(credentials=creds)
# mock up two separate requests
client._connection._add_lookup_result([entity1_pb], deferred=[key2_pb])
client._connection._add_lookup_result([entity2_pb])
missing = []
found = client.get_multi([key1, key2], missing=missing)
self.assertEqual(len(found), 2)
self.assertEqual(len(missing), 0)
# Check the actual contents on the response.
self.assertIsInstance(found[0], Entity)
self.assertEqual(found[0].key.path, key1.path)
self.assertEqual(found[0].key.project, key1.project)
self.assertIsInstance(found[1], Entity)
self.assertEqual(found[1].key.path, key2.path)
self.assertEqual(found[1].key.project, key2.project)
cw = client._connection._lookup_cw
self.assertEqual(len(cw), 2)
ds_id, k_pbs, eventual, tid = cw[0]
self.assertEqual(ds_id, self.PROJECT)
self.assertEqual(len(k_pbs), 2)
self.assertEqual(key1_pb, k_pbs[0])
self.assertEqual(key2_pb, k_pbs[1])
self.assertFalse(eventual)
self.assertIsNone(tid)
ds_id, k_pbs, eventual, tid = cw[1]
self.assertEqual(ds_id, self.PROJECT)
self.assertEqual(len(k_pbs), 1)
self.assertEqual(key2_pb, k_pbs[0])
self.assertFalse(eventual)
self.assertIsNone(tid)
def test_get_multi_hit(self):
from google.cloud.datastore.key import Key
KIND = 'Kind'
ID = 1234
PATH = [{'kind': KIND, 'id': ID}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, KIND, ID, 'foo', 'Foo')
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._connection._add_lookup_result([entity_pb])
key = Key(KIND, ID, project=self.PROJECT)
result, = client.get_multi([key])
new_key = result.key
# Check the returned value is as expected.
self.assertIsNot(new_key, key)
self.assertEqual(new_key.project, self.PROJECT)
self.assertEqual(new_key.path, PATH)
self.assertEqual(list(result), ['foo'])
self.assertEqual(result['foo'], 'Foo')
def test_get_multi_hit_w_transaction(self):
from google.cloud.datastore.key import Key
TXN_ID = '123'
KIND = 'Kind'
ID = 1234
PATH = [{'kind': KIND, 'id': ID}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, KIND, ID, 'foo', 'Foo')
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._connection._add_lookup_result([entity_pb])
key = Key(KIND, ID, project=self.PROJECT)
txn = client.transaction()
txn._id = TXN_ID
result, = client.get_multi([key], transaction=txn)
new_key = result.key
# Check the returned value is as expected.
self.assertIsNot(new_key, key)
self.assertEqual(new_key.project, self.PROJECT)
self.assertEqual(new_key.path, PATH)
self.assertEqual(list(result), ['foo'])
self.assertEqual(result['foo'], 'Foo')
cw = client._connection._lookup_cw
self.assertEqual(len(cw), 1)
_, _, _, transaction_id = cw[0]
self.assertEqual(transaction_id, TXN_ID)
def test_get_multi_hit_multiple_keys_same_project(self):
from google.cloud.datastore.key import Key
KIND = 'Kind'
ID1 = 1234
ID2 = 2345
# Make a found entity pb to be returned from mock backend.
entity_pb1 = _make_entity_pb(self.PROJECT, KIND, ID1)
entity_pb2 = _make_entity_pb(self.PROJECT, KIND, ID2)
# Make a connection to return the entity pbs.
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._connection._add_lookup_result([entity_pb1, entity_pb2])
key1 = Key(KIND, ID1, project=self.PROJECT)
key2 = Key(KIND, ID2, project=self.PROJECT)
retrieved1, retrieved2 = client.get_multi([key1, key2])
# Check values match.
self.assertEqual(retrieved1.key.path, key1.path)
self.assertEqual(dict(retrieved1), {})
self.assertEqual(retrieved2.key.path, key2.path)
self.assertEqual(dict(retrieved2), {})
def test_get_multi_hit_multiple_keys_different_project(self):
from google.cloud.datastore.key import Key
PROJECT1 = 'PROJECT'
PROJECT2 = 'PROJECT-ALT'
# Make sure our IDs are actually different.
self.assertNotEqual(PROJECT1, PROJECT2)
key1 = Key('KIND', 1234, project=PROJECT1)
key2 = Key('KIND', 1234, project=PROJECT2)
creds = _make_credentials()
client = self._make_one(credentials=creds)
with self.assertRaises(ValueError):
client.get_multi([key1, key2])
def test_get_multi_max_loops(self):
from google.cloud.datastore.key import Key
KIND = 'Kind'
ID = 1234
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, KIND, ID, 'foo', 'Foo')
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._connection._add_lookup_result([entity_pb])
key = Key(KIND, ID, project=self.PROJECT)
deferred = []
missing = []
patch = mock.patch(
'google.cloud.datastore.client._MAX_LOOPS', new=-1)
with patch:
result = client.get_multi([key], missing=missing,
deferred=deferred)
# Make sure we have no results, even though the connection has been
# set up as in `test_hit` to return a single result.
self.assertEqual(result, [])
self.assertEqual(missing, [])
self.assertEqual(deferred, [])
def test_put(self):
_called_with = []
def _put_multi(*args, **kw):
_called_with.append((args, kw))
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.put_multi = _put_multi
entity = object()
client.put(entity)
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]['entities'], [entity])
def test_put_multi_no_entities(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertIsNone(client.put_multi([]))
def test_put_multi_w_single_empty_entity(self):
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/649
from google.cloud.datastore.entity import Entity
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertRaises(ValueError, client.put_multi, Entity())
def test_put_multi_no_batch_w_partial_key(self):
from google.cloud.datastore.helpers import _property_tuples
entity = _Entity(foo=u'bar')
key = entity.key = _Key(self.PROJECT)
key._id = None
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._connection._commit.append([_KeyPB(key)])
result = client.put_multi([entity])
self.assertIsNone(result)
self.assertEqual(len(client._connection._commit_cw), 1)
(project,
commit_req, transaction_id) = client._connection._commit_cw[0]
self.assertEqual(project, self.PROJECT)
mutated_entity = _mutated_pb(self, commit_req.mutations, 'insert')
self.assertEqual(mutated_entity.key, key.to_protobuf())
prop_list = list(_property_tuples(mutated_entity))
self.assertTrue(len(prop_list), 1)
name, value_pb = prop_list[0]
self.assertEqual(name, 'foo')
self.assertEqual(value_pb.string_value, u'bar')
self.assertIsNone(transaction_id)
def test_put_multi_existing_batch_w_completed_key(self):
from google.cloud.datastore.helpers import _property_tuples
creds = _make_credentials()
client = self._make_one(credentials=creds)
entity = _Entity(foo=u'bar')
key = entity.key = _Key(self.PROJECT)
with _NoCommitBatch(client) as CURR_BATCH:
result = client.put_multi([entity])
self.assertIsNone(result)
mutated_entity = _mutated_pb(self, CURR_BATCH.mutations, 'upsert')
self.assertEqual(mutated_entity.key, key.to_protobuf())
prop_list = list(_property_tuples(mutated_entity))
self.assertTrue(len(prop_list), 1)
name, value_pb = prop_list[0]
self.assertEqual(name, 'foo')
self.assertEqual(value_pb.string_value, u'bar')
def test_delete(self):
_called_with = []
def _delete_multi(*args, **kw):
_called_with.append((args, kw))
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.delete_multi = _delete_multi
key = object()
client.delete(key)
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]['keys'], [key])
def test_delete_multi_no_keys(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
result = client.delete_multi([])
self.assertIsNone(result)
self.assertEqual(len(client._connection._commit_cw), 0)
def test_delete_multi_no_batch(self):
key = _Key(self.PROJECT)
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._connection._commit.append([])
result = client.delete_multi([key])
self.assertIsNone(result)
self.assertEqual(len(client._connection._commit_cw), 1)
(project,
commit_req, transaction_id) = client._connection._commit_cw[0]
self.assertEqual(project, self.PROJECT)
mutated_key = _mutated_pb(self, commit_req.mutations, 'delete')
self.assertEqual(mutated_key, key.to_protobuf())
self.assertIsNone(transaction_id)
def test_delete_multi_w_existing_batch(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
key = _Key(self.PROJECT)
with _NoCommitBatch(client) as CURR_BATCH:
result = client.delete_multi([key])
self.assertIsNone(result)
mutated_key = _mutated_pb(self, CURR_BATCH.mutations, 'delete')
self.assertEqual(mutated_key, key._key)
self.assertEqual(len(client._connection._commit_cw), 0)
def test_delete_multi_w_existing_transaction(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
key = _Key(self.PROJECT)
with _NoCommitTransaction(client) as CURR_XACT:
result = client.delete_multi([key])
self.assertIsNone(result)
mutated_key = _mutated_pb(self, CURR_XACT.mutations, 'delete')
self.assertEqual(mutated_key, key._key)
self.assertEqual(len(client._connection._commit_cw), 0)
def test_allocate_ids_w_partial_key(self):
NUM_IDS = 2
INCOMPLETE_KEY = _Key(self.PROJECT)
INCOMPLETE_KEY._id = None
creds = _make_credentials()
client = self._make_one(credentials=creds)
result = client.allocate_ids(INCOMPLETE_KEY, NUM_IDS)
# Check the IDs returned.
self.assertEqual([key._id for key in result], list(range(NUM_IDS)))
def test_allocate_ids_with_completed_key(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
COMPLETE_KEY = _Key(self.PROJECT)
self.assertRaises(ValueError, client.allocate_ids, COMPLETE_KEY, 2)
def test_key_w_project(self):
KIND = 'KIND'
ID = 1234
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertRaises(TypeError,
client.key, KIND, ID, project=self.PROJECT)
def test_key_wo_project(self):
KIND = 'KIND'
ID = 1234
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Key', new=_Dummy)
with patch:
key = client.key(KIND, ID)
self.assertIsInstance(key, _Dummy)
self.assertEqual(key.args, (KIND, ID))
expected_kwargs = {
'project': self.PROJECT,
'namespace': None,
}
self.assertEqual(key.kwargs, expected_kwargs)
def test_key_w_namespace(self):
KIND = 'KIND'
ID = 1234
NAMESPACE = object()
creds = _make_credentials()
client = self._make_one(namespace=NAMESPACE, credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Key', new=_Dummy)
with patch:
key = client.key(KIND, ID)
self.assertIsInstance(key, _Dummy)
expected_kwargs = {
'project': self.PROJECT,
'namespace': NAMESPACE,
}
self.assertEqual(key.kwargs, expected_kwargs)
def test_key_w_namespace_collision(self):
KIND = 'KIND'
ID = 1234
NAMESPACE1 = object()
NAMESPACE2 = object()
creds = _make_credentials()
client = self._make_one(namespace=NAMESPACE1, credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Key', new=_Dummy)
with patch:
key = client.key(KIND, ID, namespace=NAMESPACE2)
self.assertIsInstance(key, _Dummy)
expected_kwargs = {
'project': self.PROJECT,
'namespace': NAMESPACE2,
}
self.assertEqual(key.kwargs, expected_kwargs)
def test_batch(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Batch', new=_Dummy)
with patch:
batch = client.batch()
self.assertIsInstance(batch, _Dummy)
self.assertEqual(batch.args, (client,))
self.assertEqual(batch.kwargs, {})
def test_transaction_defaults(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Transaction', new=_Dummy)
with patch:
xact = client.transaction()
self.assertIsInstance(xact, _Dummy)
self.assertEqual(xact.args, (client,))
self.assertEqual(xact.kwargs, {})
def test_query_w_client(self):
KIND = 'KIND'
creds = _make_credentials()
client = self._make_one(credentials=creds)
other = self._make_one(credentials=_make_credentials())
self.assertRaises(TypeError, client.query, kind=KIND, client=other)
def test_query_w_project(self):
KIND = 'KIND'
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertRaises(TypeError,
client.query, kind=KIND, project=self.PROJECT)
def test_query_w_defaults(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Query', new=_Dummy)
with patch:
query = client.query()
self.assertIsInstance(query, _Dummy)
self.assertEqual(query.args, (client,))
expected_kwargs = {
'project': self.PROJECT,
'namespace': None,
}
self.assertEqual(query.kwargs, expected_kwargs)
def test_query_explicit(self):
KIND = 'KIND'
NAMESPACE = 'NAMESPACE'
ANCESTOR = object()
FILTERS = [('PROPERTY', '==', 'VALUE')]
PROJECTION = ['__key__']
ORDER = ['PROPERTY']
DISTINCT_ON = ['DISTINCT_ON']
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Query', new=_Dummy)
with patch:
query = client.query(
kind=KIND,
namespace=NAMESPACE,
ancestor=ANCESTOR,
filters=FILTERS,
projection=PROJECTION,
order=ORDER,
distinct_on=DISTINCT_ON,
)
self.assertIsInstance(query, _Dummy)
self.assertEqual(query.args, (client,))
kwargs = {
'project': self.PROJECT,
'kind': KIND,
'namespace': NAMESPACE,
'ancestor': ANCESTOR,
'filters': FILTERS,
'projection': PROJECTION,
'order': ORDER,
'distinct_on': DISTINCT_ON,
}
self.assertEqual(query.kwargs, kwargs)
def test_query_w_namespace(self):
KIND = 'KIND'
NAMESPACE = object()
creds = _make_credentials()
client = self._make_one(namespace=NAMESPACE, credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Query', new=_Dummy)
with patch:
query = client.query(kind=KIND)
self.assertIsInstance(query, _Dummy)
self.assertEqual(query.args, (client,))
expected_kwargs = {
'project': self.PROJECT,
'namespace': NAMESPACE,
'kind': KIND,
}
self.assertEqual(query.kwargs, expected_kwargs)
def test_query_w_namespace_collision(self):
KIND = 'KIND'
NAMESPACE1 = object()
NAMESPACE2 = object()
creds = _make_credentials()
client = self._make_one(namespace=NAMESPACE1, credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Query', new=_Dummy)
with patch:
query = client.query(kind=KIND, namespace=NAMESPACE2)
self.assertIsInstance(query, _Dummy)
self.assertEqual(query.args, (client,))
expected_kwargs = {
'project': self.PROJECT,
'namespace': NAMESPACE2,
'kind': KIND,
}
self.assertEqual(query.kwargs, expected_kwargs)
class _Dummy(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class _MockConnection(object):
def __init__(self, credentials=None, http=None):
self.credentials = credentials
self.http = http
self._lookup_cw = []
self._lookup = []
self._commit_cw = []
self._commit = []
self._alloc_cw = []
self._alloc = []
self._index_updates = 0
def _add_lookup_result(self, results=(), missing=(), deferred=()):
self._lookup.append((list(results), list(missing), list(deferred)))
def lookup(self, project, key_pbs, eventual=False, transaction_id=None):
self._lookup_cw.append((project, key_pbs, eventual, transaction_id))
triple, self._lookup = self._lookup[0], self._lookup[1:]
results, missing, deferred = triple
return results, missing, deferred
def commit(self, project, commit_request, transaction_id):
self._commit_cw.append((project, commit_request, transaction_id))
response, self._commit = self._commit[0], self._commit[1:]
return self._index_updates, response
def allocate_ids(self, project, key_pbs):
self._alloc_cw.append((project, key_pbs))
num_pbs = len(key_pbs)
return [_KeyPB(i) for i in list(range(num_pbs))]
class _NoCommitBatch(object):
def __init__(self, client):
from google.cloud.datastore.batch import Batch
self._client = client
self._batch = Batch(client)
self._batch.begin()
def __enter__(self):
self._client._push_batch(self._batch)
return self._batch
def __exit__(self, *args):
self._client._pop_batch()
class _NoCommitTransaction(object):
def __init__(self, client, transaction_id='TRANSACTION'):
from google.cloud.datastore.batch import Batch
from google.cloud.datastore.transaction import Transaction
self._client = client
xact = self._transaction = Transaction(client)
xact._id = transaction_id
Batch.begin(xact)
def __enter__(self):
self._client._push_batch(self._transaction)
return self._transaction
def __exit__(self, *args):
self._client._pop_batch()
class _Entity(dict):
key = None
exclude_from_indexes = ()
_meanings = {}
class _Key(object):
_MARKER = object()
_kind = 'KIND'
_key = 'KEY'
_path = None
_id = 1234
_stored = None
def __init__(self, project):
self.project = project
@property
def is_partial(self):
return self._id is None
def to_protobuf(self):
from google.cloud.datastore._generated import entity_pb2
key = self._key = entity_pb2.Key()
# Don't assign it, because it will just get ripped out
# key.partition_id.project_id = self.project
element = key.path.add()
element.kind = self._kind
if self._id is not None:
element.id = self._id
return key
def completed_key(self, new_id):
assert self.is_partial
new_key = self.__class__(self.project)
new_key._id = new_id
return new_key
class _PathElementPB(object):
def __init__(self, id_):
self.id = id_
class _KeyPB(object):
def __init__(self, id_):
self.path = [_PathElementPB(id_)]
def _assert_num_mutations(test_case, mutation_pb_list, num_mutations):
test_case.assertEqual(len(mutation_pb_list), num_mutations)
def _mutated_pb(test_case, mutation_pb_list, mutation_type):
# Make sure there is only one mutation.
_assert_num_mutations(test_case, mutation_pb_list, 1)
# We grab the only mutation.
mutated_pb = mutation_pb_list[0]
# Then check if it is the correct type.
test_case.assertEqual(mutated_pb.WhichOneof('operation'),
mutation_type)
return getattr(mutated_pb, mutation_type)
|
{
"content_hash": "6c9442da3e011a45780a22529ac112d9",
"timestamp": "",
"source": "github",
"line_count": 1036,
"max_line_length": 79,
"avg_line_length": 33.25965250965251,
"alnum_prop": 0.5996749571930232,
"repo_name": "Fkawala/gcloud-python",
"id": "f6e016d03712dc3147fbcb962b21958608b812c9",
"size": "35033",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "datastore/unit_tests/test_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "89702"
},
{
"name": "Python",
"bytes": "3403274"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_organization
version_added: "2.3"
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower organizations
description:
- Create, update, or destroy Ansible Tower organizations. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the organization.
required: True
description:
description:
- The description to use for the organization.
required: False
default: null
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.3"
notes:
- If no I(tower_config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(tower_config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Create tower organization
tower_organization:
name: "Foo"
description: "Foo bar organization"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
description=dict(),
tower_host=dict(),
tower_username=dict(),
tower_password=dict(no_log=True),
tower_verify_ssl=dict(type='bool', default=True),
tower_config_file=dict(type='path'),
state=dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
description = module.params.get('description')
state = module.params.get('state')
json_output = {'organization': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
organization = tower_cli.get_resource('organization')
try:
if state == 'present':
result = organization.modify(name=name, description=description, create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = organization.delete(name=name)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to update the organization: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
{
"content_hash": "5da0ca70b204f50fc5a2ecb355e7d370",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 105,
"avg_line_length": 29.70921985815603,
"alnum_prop": 0.6306994509429458,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "249ac7280dcb18c0e984dc91c5692c3aaa5bc7e5",
"size": "4377",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/web_infrastructure/ansible_tower/tower_organization.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
}
|
import os, os.path
import re
import argparse
import fileinput
includes = "(.*\.html)|(.*\.js)|(.*\.css)"
parser = argparse.ArgumentParser(description="Add a vendor prefix to the @viewport rules.")
parser.add_argument("--vendor", required=True, help="""Specify the vendor name. For instance,
"--vendor moz" will change all @viewport rules into @-moz-viewport.""")
args = parser.parse_args()
for root, dirs, files in os.walk("."):
files = [os.path.join(root, f) for f in files]
files = [f for f in files if re.match(includes, f)]
for file in files:
for line in fileinput.FileInput(file, inplace=1):
print re.sub("@(viewport)", "@-"+args.vendor+"-\g<1>", line, flags=re.IGNORECASE),
|
{
"content_hash": "2ec59ba4471506bad7ec4113452dccd0",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 94,
"avg_line_length": 36.85,
"alnum_prop": 0.6431478968792401,
"repo_name": "operasoftware/viewport-compliance-test",
"id": "b66fcc6bb859b3a13dd0d00494e5a020fe18f35a",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prefix.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "93075"
},
{
"name": "Python",
"bytes": "760"
}
],
"symlink_target": ""
}
|
'''Compute recombination probabilities
E.g: for each sample repertoire, compute P(V), P(J), P(D, J),
P(delV|V), P(delJ|J), P(del5, del3|D), P(insVD), P(x_i|x_i-1),
P(insDJ), P(y_j|y_j+1)
Compute the median of a subset of samples etc...
'''
import os
import sys
import gzip
import cPickle as pickle
from optparse import OptionGroup
from jobTree.scriptTree.target import Target
from sonLib.bioio import system
import aimseqtk.lib.sample as libsample
#import aimseqtk.src.recomb.recomb_common as rcommon
from aimseqtk.src.recomb.recomb_common import RecombModel
####################### Functions ############################
def dict_convert_to_freq(mydict):
total = sum(mydict.values())
if total > 0:
for k, v in mydict.iteritems():
mydict[k] = float(v) / total
def dict_convert_to_freq_depth2(mydict):
for k, dict2 in mydict.iteritems():
dict_convert_to_freq(dict2)
def model_convert_to_freq(model):
for attr in ['v', 'j', 'd', 'dj', 'ins_vd', 'ins_dj']:
dict_convert_to_freq(model[attr])
for attr in ['v2del', 'j2del', 'd2del', 'vd2next', 'dj2prev']:
dict_convert_to_freq_depth2(model[attr])
def update_dict(dict1, dict2):
# dictionary has 1 level: key 2 value
for k, v in dict2.iteritems():
if k not in dict1:
dict1[k] = v
else:
dict1[k] += v
def update_dict_depth2(dict1, dict2):
# dictionary has 2 depth levels: {k1: {k2: v}}
for k1, k2v in dict2.iteritems():
if k1 not in dict1:
dict1[k1] = k2v
else:
update_dict(dict1[k1], k2v)
def model_update(model1, model2):
for attr in ['v', 'j', 'd', 'dj', 'ins_vd', 'ins_dj']:
update_dict(model1[attr], model2[attr])
for attr in ['v2del', 'j2del', 'd2del', 'vd2next', 'dj2prev']:
update_dict_depth2(model1[attr], model2[attr])
def get_recomb_stats0(clones):
# in case of unambiguous calls, just record the first call
model = RecombModel()
for clone in clones:
# V
model.update_attr('v', clone.vgenes[0], 1)
model.update_attr2('v2del', clone.vgenes[0], clone.vdel, 1)
# J
model.update_attr('j', clone.jgenes[0], 1)
model.update_attr2('j2del', clone.jgenes[0], clone.jdel, 1)
# D
model.update_attr('d', clone.dgenes[0], 1)
model.update_attr2('d2del', clone.dgenes[0],
(clone.d5del, clone.d3del), 1)
# DJ
model.update_attr('dj', (clone.dgenes[0], clone.jgenes[0]), 1)
# Insertion length
ins_vd = clone.firstdpos - clone.lastvpos - 1
model.update_attr('ins_vd', ins_vd, 1)
ins_dj = clone.firstjpos - clone.lastdpos - 1
model.update_attr('ins_dj', ins_dj, 1)
# inserted nt given 5' nt
vd_nts = clone.nuc[clone.lastvpos: clone.firstdpos] # include the lastV
model.update_vd2next(vd_nts, 1)
dj_nts = clone.nuc[clone.lastdpos + 1: clone.firstjpos + 1] # include lastJ
model.update_dj2prev(dj_nts, 1)
return model
def get_recomb_stats_splitweight0(clones):
# in case of unambiguous calls, split the weight
model = RecombModel()
for clone in clones:
# V
vsize = 1.0 / len(clone.vgenes)
for v in clone.vgenes:
model.update_attr('v', v, vsize)
model.update_attr2('v2del', clone.vgenes[0], clone.vdel, vsize)
# J
jsize = 1.0 / len(clone.jgenes)
for j in clone.jgenes:
model.update_attr('j', j, jsize)
model.update_attr2('j2del', clone.jgenes[0], clone.jdel, jsize)
# D
dsize = 1.0 / len(clone.dgenes)
for d in clone.dgenes:
model.update_attr('d', d, dsize)
model.update_attr2('d2del', clone.dgenes[0], (clone.d5del, clone.d3del), dsize)
# DJ
numdj = len(clone.jgenes) * len(clone.dgenes)
djsize = 1.0 / numdj
for d in clone.dgenes:
for j in clone.jgenes:
model.update_attr('dj', (d, j), djsize)
# Insertion length
ins_vd = clone.firstdpos - clone.lastvpos - 1
model.update_attr('ins_vd', ins_vd, vsize)
ins_dj = clone.firstjpos - clone.lastdpos - 1
model.update_attr('ins_dj', ins_dj, jsize)
# inserted nt given 5' nt
vd_nts = clone.nuc[clone.lastvpos: clone.firstdpos] # include the lastV
model.update_vd2next(vd_nts, vsize)
dj_nts = clone.nuc[clone.lastdpos + 1: clone.firstjpos + 1] # include lastJ
model.update_dj2prev(dj_nts, jsize)
return model
def get_recomb_stats(clones):
# in case of unambiguous calls, just record the first call
model = RecombModel()
for clone in clones:
# V
model.update_attr('v', clone.v, 1)
model.update_attr2('v2del', clone.v, clone.vdel, 1)
# J
model.update_attr('j', clone.j, 1)
model.update_attr2('j2del', clone.j, clone.jdel, 1)
# D
model.update_attr('d', clone.d, 1)
model.update_attr2('d2del', clone.d, (clone.d5del, clone.d3del), 1)
# DJ
model.update_attr('dj', (clone.d, clone.j), 1)
# Insertion length
ins_vd = len(clone.vdins) - 1
model.update_attr('ins_vd', ins_vd, 1)
ins_dj = len(clone.djins) - 1
model.update_attr('ins_dj', ins_dj, 1)
# inserted nt given 5' nt
model.update_vd2next(clone.vdins, 1) # include the lastV
model.update_dj2prev(clone.djins, 1) # include the fistJ
return model
def get_recomb_stats_splitweight(clones):
# in case of unambiguous calls, split the weight
model = RecombModel()
for clone in clones:
# V
model.update_attr('v', clone.v, 1.0)
model.update_attr2('v2del', clone.v, clone.vdel, 1.0)
# J
model.update_attr('j', clone.j, 1.0)
model.update_attr2('j2del', clone.j, clone.jdel, 1.0)
# D
model.update_attr('d', clone.d, 1.0)
model.update_attr2('d2del', clone.d, (clone.d5del, clone.d3del), 1.0)
# DJ
model.update_attr('dj', (clone.d, clone.j), 1.0)
# Insertion length
ins_vd = len(clone.vdins) - 1
model.update_attr('ins_vd', ins_vd, 1.0)
ins_dj = len(clone.djins) - 1
model.update_attr('ins_dj', ins_dj, 1.0)
# inserted nt given 5' nt
model.update_vd2next(clone.vdins, 1.0)
model.update_dj2prev(clone.djins, 1.0)
return model
def write_attr(mydict, outfile):
f = open(outfile, 'w')
keys = sorted(mydict.keys())
f.write("#Name\tFreq\n")
for k in keys:
f.write("%s\t%f\n" % (k, mydict[k]))
f.close()
def write_attr2(mydict, outfile):
f = open(outfile, 'w')
cols = sorted(mydict.keys())
f.write("#\t%s\n" % "\t".join(cols))
rows = []
for col in cols:
for r in mydict[col].keys():
if r not in rows:
rows.append(r)
for row in sorted(rows):
f.write("%s" % str(row))
for col in cols:
if row in mydict[col]:
f.write("\t%f" % mydict[col][row])
else:
f.write("\t0.0")
f.write("\n")
f.close()
def model_write(model, outdir):
# write model probabilites to text files
for attr in ['v', 'j', 'd', 'dj', 'ins_vd', 'ins_dj']:
outfile = os.path.join(outdir, "%s.txt" % attr)
write_attr(model[attr], outfile)
for attr in ['v2del', 'j2del', 'd2del', 'vd2next', 'dj2prev']:
outfile = os.path.join(outdir, "%s.txt" % attr)
write_attr2(model[attr], outfile)
class ClonesRecombModel(Target):
'''Get the recomb_model related counts for a subset of clones
Return a "RecomModel" obj picked to outfile
'''
def __init__(self, infile, outfile):
Target.__init__(self)
self.infile = infile
self.outfile = outfile
def run(self):
clones = pickle.load(gzip.open(self.infile, 'rb'))
recomb_model = get_recomb_stats(clones)
pickle.dump(recomb_model, gzip.open(self.outfile, 'wb'))
class SampleRecombModelAgg(Target):
'''Combine stats of each batched computed indepently to 1 model
Convert count to frequencies (probabilities)
'''
def __init__(self, indir, outdir):
Target.__init__(self)
self.indir = indir
self.outdir = outdir
def run(self):
model = RecombModel()
for file in os.listdir(self.indir):
filepath = os.path.join(self.indir, file)
submodel = pickle.load(gzip.open(filepath, 'rb'))
model_update(model, submodel)
# convert to frequecies
model_convert_to_freq(model)
outfile = os.path.join(self.outdir, "model.pickle")
pickle.dump(model, gzip.open(outfile, "wb"))
model_write(model, self.outdir)
class SampleRecombModel(Target):
'''Get the recombination model for the input sample
'''
def __init__(self, indir, outdir):
Target.__init__(self)
self.indir = indir # indir contains pickle files (batches) of clones
self.outdir = outdir # directory to put outfile file there
def run(self):
name = os.path.basename(self.indir.rstrip("/"))
global_dir = self.getGlobalTempDir()
tempdir = os.path.join(global_dir, "recom_model", name)
system("mkdir -p %s" % tempdir)
for file in os.listdir(self.indir): # each batch
if file == os.path.basename(self.indir):
continue
infile = os.path.join(self.indir, file)
outfile = os.path.join(tempdir, file)
self.addChildTarget(ClonesRecombModel(infile, outfile))
self.setFollowOnTarget(SampleRecombModelAgg(tempdir, self.outdir))
|
{
"content_hash": "8f4adf845a4dad19c73b60d5b640c2c2",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 87,
"avg_line_length": 35.57090909090909,
"alnum_prop": 0.5850541811490493,
"repo_name": "ngannguyen/aimseqtk",
"id": "f39dcc040c9f126744994b6b5edb8a5559823788",
"size": "9869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/recomb/recomb_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "239"
},
{
"name": "Python",
"bytes": "473996"
}
],
"symlink_target": ""
}
|
import os
import sys
def verify(msys_path):
if not msys_path:
sys.exit(
"Please set environment variable MSYS_PATH "
"to directory with make.exe"
)
make_found = False
for path in msys_path.split(';'):
if not os.path.isdir(path):
sys.exit(
"One of the MSYS_PATH components is not a directory: {} ".
format(path)
)
msys_make = os.path.join(path, 'make.exe')
if os.path.isfile(msys_make):
make_found = True
if not make_found:
sys.exit(
"File make.exe not found in "
"directories `{}` (MSYS_PATH environment variable)".format(msys_path)
)
|
{
"content_hash": "a7279e9cf47e17fc5cb6aea1b13496a4",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 25.88,
"alnum_prop": 0.5935085007727975,
"repo_name": "idscan/polly",
"id": "6e9562965c48d7e3ba0579fb8390f0b036317f08",
"size": "708",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "bin/detail/verify_msys_path.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "26"
},
{
"name": "CMake",
"bytes": "636016"
},
{
"name": "Python",
"bytes": "89186"
},
{
"name": "Shell",
"bytes": "2426"
}
],
"symlink_target": ""
}
|
class NoMixedCaseMeta(type):
def __new__(cls, clsname, bases, clsdict):
for name in clsdict:
if name.lower() != name:
raise TypeError('Bad attribute name: ' + name)
return super().__new__(cls, clsname, bases, clsdict)
class Root(metaclass=NoMixedCaseMeta):
pass
class A(Root):
def foo_bar(self): # Ok
pass
print('**** About to generate a TypeError')
class B(Root):
def fooBar(self): # TypeError
pass
|
{
"content_hash": "2b03e1eab90d7b01c284a07a85132100",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.5816326530612245,
"repo_name": "tuanavu/python-cookbook-3rd",
"id": "78d32aaf2153e646feffcb5e92d9a5314ef0a94f",
"size": "548",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/9/enforcing_coding_conventions_in_classes/example1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "20265"
},
{
"name": "CSS",
"bytes": "184"
},
{
"name": "Jupyter Notebook",
"bytes": "219413"
},
{
"name": "Makefile",
"bytes": "231"
},
{
"name": "Python",
"bytes": "250592"
},
{
"name": "Shell",
"bytes": "179"
}
],
"symlink_target": ""
}
|
"""Mappings for OpenSearch v2."""
|
{
"content_hash": "50435c598eea922d186b0d6eb36ea49f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 33,
"avg_line_length": 34,
"alnum_prop": 0.6764705882352942,
"repo_name": "inveniosoftware/invenio-communities",
"id": "8ad3be2550179838b299cf7514a44827dcfee361",
"size": "263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_communities/members/records/mappings/os-v2/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21753"
},
{
"name": "JavaScript",
"bytes": "226672"
},
{
"name": "Python",
"bytes": "361750"
},
{
"name": "Shell",
"bytes": "1881"
}
],
"symlink_target": ""
}
|
"""
Contains a CLICommand that can issue DELETE requests.
Uses the following from :py:class:`swiftly.cli.context.CLIContext`:
============== =====================================================
cdn True if the CDN Management URL should be used instead
of the Storage URL.
client_manager For connecting to Swift.
concurrency The number of concurrent actions that can be
performed.
headers A dict of headers to send.
ignore_404 True if 404s should be silently ignored.
io_manager For directing output.
query A dict of query parameters to send.
============== =====================================================
"""
"""
Copyright 2011-2013 Gregory Holt
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from swiftly.concurrency import Concurrency
from swiftly.cli.command import CLICommand, ReturnCode
def cli_empty_account(context, yes_empty_account=False, until_empty=False):
"""
Deletes all objects and containers in the account.
You must set yes_empty_account to True to verify you really want to
do this.
By default, this will perform one pass at deleting all objects and
containers; so if objects revert to previous versions or if new
objects or containers otherwise arise during the process, the
account may not be empty once done.
Set `until_empty` to True if you want multiple passes to keep trying
to fully empty and delete the containers. Note until_empty=True
could run forever if something else is making new items faster than
they're being deleted.
See :py:mod:`swiftly.cli.delete` for context usage information.
See :py:class:`CLIDelete` for more information.
"""
if not yes_empty_account:
raise ReturnCode(
'called cli_empty_account without setting yes_empty_account=True')
marker = None
while True:
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.get_account(
marker=marker, headers=context.headers, query=context.query,
cdn=context.cdn)
if status // 100 != 2:
if status == 404 and context.ignore_404:
return
raise ReturnCode('listing account: %s %s' % (status, reason))
if not contents:
if until_empty and marker:
marker = None
continue
break
for item in contents:
cli_delete(
context, item['name'], context.headers, recursive=True)
marker = item['name']
def cli_empty_container(context, path, until_empty=False):
"""
Deletes all objects in the container.
By default, this will perform one pass at deleting all objects in
the container; so if objects revert to previous versions or if new
objects otherwise arise during the process, the container may not be
empty once done.
Set `until_empty` to True if you want multiple passes to keep trying
to fully empty the container. Note until_empty=True could run
forever if something else is making new objects faster than they're
being deleted.
See :py:mod:`swiftly.cli.delete` for context usage information.
See :py:class:`CLIDelete` for more information.
"""
path = path.rstrip('/').decode('utf8')
conc = Concurrency(context.concurrency)
def check_conc():
for (exc_type, exc_value, exc_tb, result) in \
conc.get_results().itervalues():
if exc_value:
with context.io_manager.with_stderr() as fp:
fp.write(str(exc_value))
fp.write('\n')
fp.flush()
marker = None
while True:
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.get_container(
path, marker=marker, headers=context.headers,
query=context.query, cdn=context.cdn)
if status // 100 != 2:
if status == 404 and context.ignore_404:
return
raise ReturnCode(
'listing container %r: %s %s' % (path, status, reason))
if not contents:
if until_empty and marker:
marker = None
continue
break
for item in contents:
newpath = '%s/%s' % (path, item['name'])
new_context = context.copy()
new_context.ignore_404 = True
check_conc()
conc.spawn(newpath, cli_delete, new_context, newpath)
marker = item['name']
conc.join()
check_conc()
def cli_delete(context, path, body=None, recursive=False,
yes_empty_account=False, yes_delete_account=False,
until_empty=False):
"""
Deletes the item (account, container, or object) at the path.
See :py:mod:`swiftly.cli.delete` for context usage information.
See :py:class:`CLIDelete` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param path: The path of the item (acount, container, or object)
to delete.
:param body: The body to send with the DELETE request. Bodies are
not normally sent with DELETE requests, but this can be
useful with bulk deletes for instance.
:param recursive: If True and the item is an account or
container, deletes will be issued for any containing items as
well. This does one pass at the deletion; so if objects revert
to previous versions or if new objects otherwise arise during
the process, the container(s) may not be empty once done. Set
`until_empty` to True if you want multiple passes to keep trying
to fully empty the containers.
:param until_empty: If True and recursive is True, this will cause
Swiftly to keep looping through the deletes until the containers
are completely empty. Useful if you have object versioning
turned on or otherwise have objects that seemingly reappear
after being deleted. It could also run forever if you have
something that's uploading objects at a faster rate than they
are deleted.
:param yes_empty_account: This must be set to True for
verification when the item is an account and recursive is
True.
:param yes_delete_account: This must be set to True for
verification when the item is an account and you really wish
a delete to be issued for the account itself.
"""
path = path.lstrip('/') if path else ''
if not path:
if yes_empty_account:
cli_empty_account(
context, yes_empty_account=yes_empty_account,
until_empty=until_empty)
if yes_delete_account:
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.delete_account(
headers=context.headers, query=context.query,
cdn=context.cdn, body=body,
yes_i_mean_delete_the_account=yes_delete_account)
if status // 100 != 2:
if status == 404 and context.ignore_404:
return
raise ReturnCode(
'deleting account: %s %s' % (status, reason))
elif '/' not in path.rstrip('/'):
path = path.rstrip('/')
if recursive:
cli_empty_container(context, path, until_empty=until_empty)
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.delete_container(
path, headers=context.headers,
query=context.query, cdn=context.cdn, body=body)
if status // 100 != 2:
if status == 404 and context.ignore_404:
return
raise ReturnCode(
'deleting container %r: %s %s' % (path, status, reason))
else:
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.delete_object(
*path.split('/', 1), headers=context.headers,
query=context.query, cdn=context.cdn, body=body)
if status // 100 != 2:
if status == 404 and context.ignore_404:
return
raise ReturnCode(
'deleting object %r: %s %s' % (path, status, reason))
class CLIDelete(CLICommand):
"""
A CLICommand that can issue DELETE requests.
See the output of ``swiftly help delete`` for more information.
"""
def __init__(self, cli):
super(CLIDelete, self).__init__(
cli, 'delete', max_args=1, usage="""
Usage: %prog [main_options] delete [options] [path]
For help on [main_options] run %prog with no args.
Issues a DELETE request of the [path] given.""".strip())
self.option_parser.add_option(
'-h', '-H', '--header', dest='header', action='append',
metavar='HEADER:VALUE',
help='Add a header to the request. This can be used multiple '
'times for multiple headers. Examples: '
'-hx-some-header:some-value -h "X-Some-Other-Header: Some '
'other value"')
self.option_parser.add_option(
'-q', '--query', dest='query', action='append',
metavar='NAME[=VALUE]',
help='Add a query parameter to the request. This can be used '
'multiple times for multiple query parameters. Example: '
'-qmultipart-manifest=get')
self.option_parser.add_option(
'-i', '--input', dest='input_', metavar='PATH',
help='Indicates where to read the DELETE request body from; '
'use a dash (as in "-i -") to specify standard input since '
'DELETEs do not normally take input.')
self.option_parser.add_option(
'--recursive', dest='recursive', action='store_true',
help='Normally a delete for a non-empty container will error with '
'a 409 Conflict; --recursive will first delete all objects '
'in a container and then delete the container itself. For an '
'account delete, all containers and objects will be deleted '
'(requires the --yes-i-mean-empty-the-account option). Note '
'that this will do just one pass at deletion, so if objects '
'revert to previous versions or somehow otherwise arise '
'after the deletion pass, the container or account may not '
'be full empty once done. See --until-empty for a '
'multiple-pass option.')
self.option_parser.add_option(
'--until-empty', dest='until_empty', action='store_true',
help='If used with --recursive, multiple passes will be attempted '
'to empty all the containers of objects and the account of '
'all containers. Note that could run forever if there is '
'something else creating items faster than they are deleted.')
self.option_parser.add_option(
'--yes-i-mean-empty-the-account', dest='yes_empty_account',
action='store_true',
help='Required when issuing a delete directly on an account with '
'the --recursive option. This will delete all containers and '
'objects in the account without deleting the account itself, '
'leaving an empty account. THERE IS NO GOING BACK!')
self.option_parser.add_option(
'--yes-i-mean-delete-the-account', dest='yes_delete_account',
action='store_true',
help='Required when issuing a delete directly on an account. Some '
'Swift clusters do not support this. Those that do will mark '
'the account as deleted and immediately begin removing the '
'objects from the cluster in the backgound. THERE IS NO '
'GOING BACK!')
self.option_parser.add_option(
'--ignore-404', dest='ignore_404', action='store_true',
help='Ignores 404 Not Found responses; the exit code will be 0 '
'instead of 1.')
def __call__(self, args):
options, args, context = self.parse_args_and_create_context(args)
context.headers = self.options_list_to_lowered_dict(options.header)
context.query = self.options_list_to_lowered_dict(options.query)
context.ignore_404 = options.ignore_404
path = args.pop(0).lstrip('/') if args else None
body = None
if options.input_:
if options.input_ == '-':
body = self.cli.context.io_manager.get_stdin()
else:
body = open(options.input_, 'rb')
recursive = options.recursive
until_empty = options.until_empty
yes_empty_account = options.yes_empty_account
yes_delete_account = options.yes_delete_account
if not path:
if not recursive:
if not yes_delete_account:
raise ReturnCode("""
A delete directly on an account requires the --yes-i-mean-delete-the-account
option as well.
Some Swift clusters do not support this.
Those that do will mark the account as deleted and immediately begin removing
the objects from the cluster in the backgound.
THERE IS NO GOING BACK!""".strip())
else:
if not yes_empty_account:
raise ReturnCode("""
A delete --recursive directly on an account requires the
--yes-i-mean-empty-the-account option as well.
All containers and objects in the account will be deleted, leaving an empty
account.
THERE IS NO GOING BACK!""".strip())
return cli_delete(
context, path, body=body, recursive=recursive,
yes_empty_account=yes_empty_account,
yes_delete_account=yes_delete_account, until_empty=until_empty)
|
{
"content_hash": "d335c86e259a813f6b251a722a959483",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 79,
"avg_line_length": 44.2289156626506,
"alnum_prop": 0.6082811223099973,
"repo_name": "rackerlabs/swiftly",
"id": "37e2ddf28d12a81462c37617ee22632761ee3f4b",
"size": "14684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swiftly/cli/delete.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "287648"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
from django.shortcuts import get_object_or_404
from bulbs.content.models import FeatureType
class Command(BaseCommand):
help = "Set a provided FeatureType as available for Facebook Instant Articles."
def add_arguments(self, parser):
parser.add_argument(
"--slug",
dest="featuretype_slug",
help="The slug for the desired FeatureType.",
required=True,
type=str
)
def handle(self, *args, **kwargs):
featuretype_slug = kwargs.get("featuretype_slug")
featuretype = get_object_or_404(FeatureType, slug=featuretype_slug)
if not featuretype.instant_article:
featuretype.instant_article = True
featuretype.save()
# celery is not configured during commands.
for content in featuretype.content_set.all():
content.index()
|
{
"content_hash": "5c66968f5e31032b415a8df7e3f30cfe",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 83,
"avg_line_length": 33.714285714285715,
"alnum_prop": 0.638771186440678,
"repo_name": "theonion/django-bulbs",
"id": "4fe90273fc2920b0424ecfe27ee480366cd8d58a",
"size": "944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bulbs/instant_articles/management/commands/migrate_ia_featuretype.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36651"
},
{
"name": "HTML",
"bytes": "73968"
},
{
"name": "JavaScript",
"bytes": "57288"
},
{
"name": "Python",
"bytes": "1055540"
},
{
"name": "Ruby",
"bytes": "397"
},
{
"name": "Shell",
"bytes": "1629"
}
],
"symlink_target": ""
}
|
"""
Tests for IBM Model 5 training methods
"""
import unittest
from collections import defaultdict
from nltk.translate import AlignedSent
from nltk.translate import IBMModel
from nltk.translate import IBMModel4
from nltk.translate import IBMModel5
from nltk.translate.ibm_model import AlignmentInfo
class TestIBMModel5(unittest.TestCase):
def test_set_uniform_vacancy_probabilities_of_max_displacements(self):
# arrange
src_classes = {'schinken': 0, 'eier': 0, 'spam': 1}
trg_classes = {'ham': 0, 'eggs': 1, 'spam': 2}
corpus = [
AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
]
model5 = IBMModel5(corpus, 0, src_classes, trg_classes)
# act
model5.set_uniform_probabilities(corpus)
# assert
# number of vacancy difference values =
# 2 * number of words in longest target sentence
expected_prob = 1.0 / (2 * 4)
# examine the boundary values for (dv, max_v, trg_class)
self.assertEqual(model5.head_vacancy_table[4][4][0], expected_prob)
self.assertEqual(model5.head_vacancy_table[-3][1][2], expected_prob)
self.assertEqual(model5.non_head_vacancy_table[4][4][0], expected_prob)
self.assertEqual(model5.non_head_vacancy_table[-3][1][2], expected_prob)
def test_set_uniform_vacancy_probabilities_of_non_domain_values(self):
# arrange
src_classes = {'schinken': 0, 'eier': 0, 'spam': 1}
trg_classes = {'ham': 0, 'eggs': 1, 'spam': 2}
corpus = [
AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
]
model5 = IBMModel5(corpus, 0, src_classes, trg_classes)
# act
model5.set_uniform_probabilities(corpus)
# assert
# examine dv and max_v values that are not in the training data domain
self.assertEqual(model5.head_vacancy_table[5][4][0],
IBMModel.MIN_PROB)
self.assertEqual(model5.head_vacancy_table[-4][1][2],
IBMModel.MIN_PROB)
self.assertEqual(model5.head_vacancy_table[4][0][0],
IBMModel.MIN_PROB)
self.assertEqual(model5.non_head_vacancy_table[5][4][0],
IBMModel.MIN_PROB)
self.assertEqual(model5.non_head_vacancy_table[-4][1][2],
IBMModel.MIN_PROB)
def test_prob_t_a_given_s(self):
# arrange
src_sentence = ["ich", 'esse', 'ja', 'gern', 'räucherschinken']
trg_sentence = ['i', 'love', 'to', 'eat', 'smoked', 'ham']
src_classes = {'räucherschinken': 0, 'ja': 1, 'ich': 2, 'esse': 3,
'gern': 4}
trg_classes = {'ham': 0, 'smoked': 1, 'i': 3, 'love': 4, 'to': 2,
'eat': 4}
corpus = [AlignedSent(trg_sentence, src_sentence)]
alignment_info = AlignmentInfo((0, 1, 4, 0, 2, 5, 5),
[None] + src_sentence,
['UNUSED'] + trg_sentence,
[[3], [1], [4], [], [2], [5, 6]])
head_vacancy_table = defaultdict(
lambda: defaultdict(lambda: defaultdict(float)))
head_vacancy_table[1 - 0][6][3] = 0.97 # ich -> i
head_vacancy_table[3 - 0][5][4] = 0.97 # esse -> eat
head_vacancy_table[1 - 2][4][4] = 0.97 # gern -> love
head_vacancy_table[2 - 0][2][1] = 0.97 # räucherschinken -> smoked
non_head_vacancy_table = defaultdict(
lambda: defaultdict(lambda: defaultdict(float)))
non_head_vacancy_table[1 - 0][1][0] = 0.96 # räucherschinken -> ham
translation_table = defaultdict(lambda: defaultdict(float))
translation_table['i']['ich'] = 0.98
translation_table['love']['gern'] = 0.98
translation_table['to'][None] = 0.98
translation_table['eat']['esse'] = 0.98
translation_table['smoked']['räucherschinken'] = 0.98
translation_table['ham']['räucherschinken'] = 0.98
fertility_table = defaultdict(lambda: defaultdict(float))
fertility_table[1]['ich'] = 0.99
fertility_table[1]['esse'] = 0.99
fertility_table[0]['ja'] = 0.99
fertility_table[1]['gern'] = 0.99
fertility_table[2]['räucherschinken'] = 0.999
fertility_table[1][None] = 0.99
probabilities = {
'p1': 0.167,
'translation_table': translation_table,
'fertility_table': fertility_table,
'head_vacancy_table': head_vacancy_table,
'non_head_vacancy_table': non_head_vacancy_table,
'head_distortion_table': None,
'non_head_distortion_table': None,
'alignment_table': None
}
model5 = IBMModel5(corpus, 0, src_classes, trg_classes,
probabilities)
# act
probability = model5.prob_t_a_given_s(alignment_info)
# assert
null_generation = 5 * pow(0.167, 1) * pow(0.833, 4)
fertility = 1*0.99 * 1*0.99 * 1*0.99 * 1*0.99 * 2*0.999
lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98
vacancy = 0.97 * 0.97 * 1 * 0.97 * 0.97 * 0.96
expected_probability = (null_generation * fertility *
lexical_translation * vacancy)
self.assertEqual(round(probability, 4), round(expected_probability, 4))
def test_prune(self):
# arrange
alignment_infos = [
AlignmentInfo((1, 1), None, None, None),
AlignmentInfo((1, 2), None, None, None),
AlignmentInfo((2, 1), None, None, None),
AlignmentInfo((2, 2), None, None, None),
AlignmentInfo((0, 0), None, None, None)
]
min_factor = IBMModel5.MIN_SCORE_FACTOR
best_score = 0.9
scores = {
(1, 1): min(min_factor * 1.5, 1) * best_score, # above threshold
(1, 2): best_score,
(2, 1): min_factor * best_score, # at threshold
(2, 2): min_factor * best_score * 0.5, # low score
(0, 0): min(min_factor * 1.1, 1) * 1.2 # above threshold
}
corpus = [AlignedSent(['a'], ['b'])]
original_prob_function = IBMModel4.model4_prob_t_a_given_s
# mock static method
IBMModel4.model4_prob_t_a_given_s = staticmethod(
lambda a, model: scores[a.alignment])
model5 = IBMModel5(corpus, 0, None, None)
# act
pruned_alignments = model5.prune(alignment_infos)
# assert
self.assertEqual(len(pruned_alignments), 3)
# restore static method
IBMModel4.model4_prob_t_a_given_s = original_prob_function
|
{
"content_hash": "b53c24b7bbace7ecab5b3a1eb94a0f6a",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 80,
"avg_line_length": 42.842424242424244,
"alnum_prop": 0.5378412788230301,
"repo_name": "MyRookie/SentimentAnalyse",
"id": "9e9ce219436e962ccf79c6c4a36adff1897fa0af",
"size": "7101",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/nltk/test/unit/translate/test_ibm5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "316238"
},
{
"name": "C++",
"bytes": "5171"
},
{
"name": "CSS",
"bytes": "6267"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "449"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Prolog",
"bytes": "60188"
},
{
"name": "Python",
"bytes": "13690978"
},
{
"name": "Shell",
"bytes": "8340"
},
{
"name": "TeX",
"bytes": "212"
}
],
"symlink_target": ""
}
|
""" DeiT model configuration """
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/deit-base-distilled-patch16-224": "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json",
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class DeiTConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.DeiTModel`. It is used to
instantiate an DeiT model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the DeiT
`facebook/deit-base-distilled-patch16-224 <https://huggingface.co/facebook/deit-base-distilled-patch16-224>`__
architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
hidden_size (:obj:`int`, `optional`, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"selu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
image_size (:obj:`int`, `optional`, defaults to :obj:`224`):
The size (resolution) of each image.
patch_size (:obj:`int`, `optional`, defaults to :obj:`16`):
The size (resolution) of each patch.
num_channels (:obj:`int`, `optional`, defaults to :obj:`3`):
The number of input channels.
Example::
>>> from transformers import DeiTModel, DeiTConfig
>>> # Initializing a DeiT deit-base-distilled-patch16-224 style configuration
>>> configuration = DeiTConfig()
>>> # Initializing a model from the deit-base-distilled-patch16-224 style configuration
>>> model = DeiTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "deit"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-12,
is_encoder_decoder=False,
image_size=224,
patch_size=16,
num_channels=3,
**kwargs
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
|
{
"content_hash": "cd6943fa5f3f441cd9d848614c8dc53c",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 129,
"avg_line_length": 45.83495145631068,
"alnum_prop": 0.6606651133234485,
"repo_name": "huggingface/pytorch-transformers",
"id": "0bbbff709b83f7ebb5dff7f79e99abadef43f59e",
"size": "5379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/transformers/models/deit/configuration_deit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Jupyter Notebook",
"bytes": "535623"
},
{
"name": "Python",
"bytes": "897445"
}
],
"symlink_target": ""
}
|
"""Removes the `users.username` column and adds the `users.facebook_id` column.
Revision ID: ddd00fbe2758
Revises: b5c16f116e54
Create Date: 2017-06-19 21:49:45.269126
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'ddd00fbe2758'
down_revision = 'b5c16f116e54'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('facebook_id', sa.String(length=80), nullable=True))
op.create_index(op.f('ix_users_facebook_id'), 'users', ['facebook_id'], unique=True)
op.drop_constraint('users_username_key', 'users', type_='unique')
op.drop_column('users', 'username')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('username', sa.VARCHAR(length=80), autoincrement=False, nullable=False))
op.create_unique_constraint('users_username_key', 'users', ['username'])
op.drop_index(op.f('ix_users_facebook_id'), table_name='users')
op.drop_column('users', 'facebook_id')
# ### end Alembic commands ###
|
{
"content_hash": "9c7faeb4e7acc8597677da4a406122fa",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 109,
"avg_line_length": 35.878787878787875,
"alnum_prop": 0.6875,
"repo_name": "Rdbaker/Mealbound",
"id": "99f31ebcf46817d9c2859699a369b533e829de05",
"size": "1184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/ddd00fbe2758_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "758133"
},
{
"name": "HTML",
"bytes": "21264"
},
{
"name": "JavaScript",
"bytes": "747474"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "200911"
},
{
"name": "Shell",
"bytes": "210"
},
{
"name": "TypeScript",
"bytes": "166005"
}
],
"symlink_target": ""
}
|
"""The tests for numeric state automation."""
import unittest
from homeassistant.bootstrap import setup_component
import homeassistant.components.automation as automation
from tests.common import get_test_home_assistant
class TestAutomationNumericState(unittest.TestCase):
"""Test the event automation."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.components.append('group')
self.calls = []
def record_call(service):
"""Helper to record calls."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_if_fires_on_entity_change_below(self):
""""Test the firing with changed entity."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
# Set above 12 so the automation will fire again
self.hass.states.set('test.entity', 12)
automation.turn_off(self.hass)
self.hass.block_till_done()
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_entity_change_over_to_below(self):
""""Test the firing with changed entity."""
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_not_fires_on_entity_change_below_to_below(self):
""""Test the firing with changed entity."""
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10 so this should not fire again
self.hass.states.set('test.entity', 8)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_fires_on_entity_change_above(self):
""""Test the firing with changed entity."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is above 10
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_entity_change_below_to_above(self):
""""Test the firing with changed entity."""
# set initial state
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is above 10 and 9 is below
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_not_fires_on_entity_change_above_to_above(self):
""""Test the firing with changed entity."""
# set initial state
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'above': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is above 10 so this should fire again
self.hass.states.set('test.entity', 12)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_fires_on_entity_change_below_range(self):
""""Test the firing with changed entity."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
'above': 5,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_entity_change_below_above_range(self):
""""Test the firing with changed entity."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
'above': 5,
},
'action': {
'service': 'test.automation'
}
}
})
# 4 is below 5
self.hass.states.set('test.entity', 4)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_fires_on_entity_change_over_to_below_range(self):
""""Test the firing with changed entity."""
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
'above': 5,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 9)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_entity_change_over_to_below_above_range(self):
""""Test the firing with changed entity."""
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
'above': 5,
},
'action': {
'service': 'test.automation'
}
}
})
# 4 is below 5 so it should not fire
self.hass.states.set('test.entity', 4)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_not_fires_if_entity_not_match(self):
""""Test if not fired with non matching entity."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.another_entity',
'below': 100,
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 11)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_fires_on_entity_change_below_with_attribute(self):
""""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 9, {'test_attribute': 11})
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_not_fires_on_entity_change_not_below_with_attribute(self):
""""Test attributes."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is not below 10
self.hass.states.set('test.entity', 11, {'test_attribute': 9})
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_fires_on_attribute_change_with_attribute_below(self):
""""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is below 10
self.hass.states.set('test.entity', 'entity', {'test_attribute': 9})
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_not_fires_on_attribute_change_with_attribute_not_below(self):
""""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is not below 10
self.hass.states.set('test.entity', 'entity', {'test_attribute': 11})
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_not_fires_on_entity_change_with_attribute_below(self):
""""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is not below 10, entity state value should not be tested
self.hass.states.set('test.entity', '9', {'test_attribute': 11})
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_not_fires_on_entity_change_with_not_attribute_below(self):
""""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is not below 10, entity state value should not be tested
self.hass.states.set('test.entity', 'entity')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_fires_on_attr_change_with_attribute_below_and_multiple_attr(self):
""""Test attributes change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 9 is not below 10
self.hass.states.set('test.entity', 'entity',
{'test_attribute': 9, 'not_test_attribute': 11})
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_template_list(self):
""""Test template list."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template':
'{{ state.attributes.test_attribute[2] }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 3 is below 10
self.hass.states.set('test.entity', 'entity',
{'test_attribute': [11, 15, 3]})
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_template_string(self):
""""Test template string."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template':
'{{ state.attributes.test_attribute | multiply(10) }}',
'below': 10,
},
'action': {
'service': 'test.automation',
'data_template': {
'some': '{{ trigger.%s }}' % '}} - {{ trigger.'.join((
'platform', 'entity_id', 'below', 'above',
'from_state.state', 'to_state.state'))
},
}
}
})
self.hass.states.set('test.entity', 'test state 1',
{'test_attribute': '1.2'})
self.hass.block_till_done()
self.hass.states.set('test.entity', 'test state 2',
{'test_attribute': '0.9'})
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(
'numeric_state - test.entity - 10.0 - None - test state 1 - '
'test state 2',
self.calls[0].data['some'])
def test_not_fires_on_attr_change_with_attr_not_below_multiple_attr(self):
""""Test if not fired changed attributes."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'numeric_state',
'entity_id': 'test.entity',
'value_template': '{{ state.attributes.test_attribute }}',
'below': 10,
},
'action': {
'service': 'test.automation'
}
}
})
# 11 is not below 10
self.hass.states.set('test.entity', 'entity',
{'test_attribute': 11, 'not_test_attribute': 9})
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_action(self):
""""Test if action."""
entity_id = 'domain.test_entity'
test_state = 10
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'condition': 'numeric_state',
'entity_id': entity_id,
'above': test_state,
'below': test_state + 2
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set(entity_id, test_state)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, test_state - 1)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, test_state + 1)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(2, len(self.calls))
|
{
"content_hash": "c47e3dda3866fa5dfb3cfa88346cd4c2",
"timestamp": "",
"source": "github",
"line_count": 529,
"max_line_length": 79,
"avg_line_length": 35.61058601134216,
"alnum_prop": 0.473564072619174,
"repo_name": "betrisey/home-assistant",
"id": "fa2d237ee00c97ed795f1bb3d9da4612d0b16e24",
"size": "18838",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/automation/test_numeric_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1446205"
},
{
"name": "Python",
"bytes": "3827469"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
from m5.SimObject import *
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from DVFSHandler import *
from SimpleMemory import *
class MemoryMode(Enum): vals = ['invalid', 'atomic', 'timing',
'atomic_noncaching']
class System(MemObject):
type = 'System'
cxx_header = "sim/system.hh"
system_port = MasterPort("System port")
cxx_exports = [
PyBindMethod("getMemoryMode"),
PyBindMethod("setMemoryMode"),
]
memories = VectorParam.AbstractMemory(Self.all,
"All memories in the system")
mem_mode = Param.MemoryMode('atomic', "The mode the memory system is in")
thermal_model = Param.ThermalModel(NULL, "Thermal model")
thermal_components = VectorParam.SimObject([],
"A collection of all thermal components in the system.")
# When reserving memory on the host, we have the option of
# reserving swap space or not (by passing MAP_NORESERVE to
# mmap). By enabling this flag, we accommodate cases where a large
# (but sparse) memory is simulated.
mmap_using_noreserve = Param.Bool(False, "mmap the backing store " \
"without reserving swap")
# The memory ranges are to be populated when creating the system
# such that these can be passed from the I/O subsystem through an
# I/O bridge or cache
mem_ranges = VectorParam.AddrRange([], "Ranges that constitute main memory")
cache_line_size = Param.Unsigned(64, "Cache line size in bytes")
exit_on_work_items = Param.Bool(False, "Exit from the simulation loop when "
"encountering work item annotations.")
work_item_id = Param.Int(-1, "specific work item id")
num_work_ids = Param.Int(16, "Number of distinct work item types")
work_begin_cpu_id_exit = Param.Int(-1,
"work started on specific id, now exit simulation")
work_begin_ckpt_count = Param.Counter(0,
"create checkpoint when work items begin count value is reached")
work_begin_exit_count = Param.Counter(0,
"exit simulation when work items begin count value is reached")
work_end_ckpt_count = Param.Counter(0,
"create checkpoint when work items end count value is reached")
work_end_exit_count = Param.Counter(0,
"exit simulation when work items end count value is reached")
work_cpus_ckpt_count = Param.Counter(0,
"create checkpoint when active cpu count value is reached")
init_param = Param.UInt64(0, "numerical value to pass into simulator")
boot_osflags = Param.String("a", "boot flags to pass to the kernel")
kernel = Param.String("", "file that contains the kernel code")
kernel_addr_check = Param.Bool(True,
"whether to address check on kernel (disable for baremetal)")
readfile = Param.String("", "file to read startup script from")
symbolfile = Param.String("", "file to get the symbols from")
load_addr_mask = Param.UInt64(0xffffffffff,
"Address to mask loading binaries with")
load_offset = Param.UInt64(0, "Address to offset loading binaries with")
multi_thread = Param.Bool(False,
"Supports multi-threaded CPUs? Impacts Thread/Context IDs")
# Dynamic voltage and frequency handler for the system, disabled by default
# Provide list of domains that need to be controlled by the handler
dvfs_handler = DVFSHandler()
if buildEnv['USE_KVM']:
kvm_vm = Param.KvmVM(NULL, 'KVM VM (i.e., shared memory domain)')
|
{
"content_hash": "30a80bc7b03b2be0d9e9086e5edd1c75",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 80,
"avg_line_length": 44.8875,
"alnum_prop": 0.6636034530771373,
"repo_name": "gedare/gem5",
"id": "53377989d4b800cf79e519d603bf74d421132bc5",
"size": "5256",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/sim/System.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "346716"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "C",
"bytes": "1211355"
},
{
"name": "C++",
"bytes": "20825474"
},
{
"name": "CMake",
"bytes": "35626"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "49620"
},
{
"name": "Makefile",
"bytes": "38368"
},
{
"name": "Perl",
"bytes": "33602"
},
{
"name": "Protocol Buffer",
"bytes": "11148"
},
{
"name": "Python",
"bytes": "4745439"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "Shell",
"bytes": "74060"
},
{
"name": "Vim script",
"bytes": "4335"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
}
|
import copy
from django.test import TestCase
from mock import patch, call, MagicMock
from datetime import datetime
from graphite.render.datalib import TimeSeries
from graphite.render import functions
from graphite.render.functions import NormalizeEmptyResultError
def return_greater(series, value):
return [i for i in series if i is not None and i > value]
def return_less(series, value):
return [i for i in series if i is not None and i < value]
class FunctionsTest(TestCase):
def test_highest_max(self):
config = [20, 50, 30, 40]
seriesList = [range(max_val) for max_val in config]
# Expect the test results to be returned in decending order
expected = [
[seriesList[1]],
[seriesList[1], seriesList[3]],
[seriesList[1], seriesList[3], seriesList[2]],
# Test where num_return == len(seriesList)
[seriesList[1], seriesList[3], seriesList[2], seriesList[0]],
# Test where num_return > len(seriesList)
[seriesList[1], seriesList[3], seriesList[2], seriesList[0]],
]
for index, test in enumerate(expected):
results = functions.highestMax({}, seriesList, index + 1)
self.assertEqual(test, results)
def test_highest_max_empty_series_list(self):
# Test the function works properly with an empty seriesList provided.
self.assertEqual([], functions.highestMax({}, [], 1))
def testGetPercentile(self):
seriesList = [
([None, None, 15, 20, 35, 40, 50], 20),
(range(100), 30),
(range(200), 60),
(range(300), 90),
(range(1, 101), 31),
(range(1, 201), 61),
(range(1, 301), 91),
(range(0, 102), 30),
(range(1, 203), 61),
(range(1, 303), 91),
]
for index, conf in enumerate(seriesList):
series, expected = conf
result = functions._getPercentile(series, 30)
self.assertEqual(expected, result, 'For series index <%s> the 30th percentile ordinal is not %d, but %d ' % (index, expected, result))
def test_integral(self):
seriesList = [TimeSeries('test', 0, 600, 60, [None, 1, 2, 3, 4, 5, None, 6, 7, 8])]
expected = [TimeSeries('integral(test)', 0, 600, 60, [None, 1, 3, 6, 10, 15, None, 21, 28, 36])]
result = functions.integral({}, seriesList)
self.assertEqual(expected, result, 'integral result incorrect')
def test_integralByInterval(self):
seriesList = [TimeSeries('test', 0, 600, 60, [None, 1, 2, 3, 4, 5, None, 6, 7, 8])]
expected = [TimeSeries("integral(test,'2min')", 0, 600, 60, [0, 1, 2, 5, 4, 9, 0, 6, 7, 15])]
result = functions.integralByInterval({'startTime' : datetime(1970,1,1)}, seriesList, '2min')
self.assertEqual(expected, result, 'integralByInterval result incorrect %s %s' %(result, result[0]))
def test_n_percentile(self):
seriesList = []
config = [
[15, 35, 20, 40, 50],
range(1, 101),
range(1, 201),
range(1, 301),
range(0, 100),
range(0, 200),
range(0, 300),
# Ensure None values in list has no effect.
[None, None, None] + range(0, 300),
]
for i, c in enumerate(config):
seriesList.append(TimeSeries('Test(%d)' % i, 0, 1, 1, c))
def n_percentile(perc, expected):
result = functions.nPercentile({}, seriesList, perc)
self.assertEqual(expected, result)
n_percentile(30, [[20], [31], [61], [91], [30], [60], [90], [90]])
n_percentile(90, [[50], [91], [181], [271], [90], [180], [270], [270]])
n_percentile(95, [[50], [96], [191], [286], [95], [190], [285], [285]])
def test_sorting_by_total(self):
seriesList = []
config = [[1000, 100, 10, 0], [1000, 100, 10, 1]]
for i, c in enumerate(config):
seriesList.append(TimeSeries('Test(%d)' % i, 0, 0, 0, c))
self.assertEqual(1110, functions.safeSum(seriesList[0]))
result = functions.sortByTotal({}, seriesList)
self.assertEqual(1111, functions.safeSum(result[0]))
self.assertEqual(1110, functions.safeSum(result[1]))
def _generate_series_list(self):
seriesList = []
config = [range(101), range(101), [1, None, None, None, None]]
for i, c in enumerate(config):
name = "collectd.test-db{0}.load.value".format(i + 1)
seriesList.append(TimeSeries(name, 0, 1, 1, c))
return seriesList
def test_remove_above_percentile(self):
seriesList = self._generate_series_list()
percent = 50
results = functions.removeAbovePercentile({}, seriesList, percent)
for result in results:
self.assertListEqual(return_greater(result, percent), [])
def test_remove_below_percentile(self):
seriesList = self._generate_series_list()
percent = 50
results = functions.removeBelowPercentile({}, seriesList, percent)
expected = [[], [], [1]]
for i, result in enumerate(results):
self.assertListEqual(return_less(result, percent), expected[i])
def test_remove_above_value(self):
seriesList = self._generate_series_list()
value = 5
results = functions.removeAboveValue({}, seriesList, value)
for result in results:
self.assertListEqual(return_greater(result, value), [])
def test_remove_below_value(self):
seriesList = self._generate_series_list()
value = 5
results = functions.removeBelowValue({}, seriesList, value)
for result in results:
self.assertListEqual(return_less(result, value), [])
def test_limit(self):
seriesList = self._generate_series_list()
limit = len(seriesList) - 1
results = functions.limit({}, seriesList, limit)
self.assertEqual(len(results), limit,
"More than {0} results returned".format(limit),
)
def _verify_series_options(self, seriesList, name, value):
"""
Verify a given option is set and True for each series in a
series list
"""
for series in seriesList:
self.assertIn(name, series.options)
if value is True:
test_func = self.assertTrue
else:
test_func = self.assertEqual
test_func(series.options.get(name), value)
def test_second_y_axis(self):
seriesList = self._generate_series_list()
results = functions.secondYAxis({}, seriesList)
self._verify_series_options(results, "secondYAxis", True)
def test_draw_as_infinite(self):
seriesList = self._generate_series_list()
results = functions.drawAsInfinite({}, seriesList)
self._verify_series_options(results, "drawAsInfinite", True)
def test_line_width(self):
seriesList = self._generate_series_list()
width = 10
results = functions.lineWidth({}, seriesList, width)
self._verify_series_options(results, "lineWidth", width)
def test_transform_null(self):
seriesList = self._generate_series_list()
transform = -5
results = functions.transformNull({}, copy.deepcopy(seriesList), transform)
for counter, series in enumerate(seriesList):
if not None in series:
continue
# If the None values weren't transformed, there is a problem
self.assertNotIn(None, results[counter],
"tranformNull should remove all None values",
)
# Anywhere a None was in the original series, verify it
# was transformed to the given value it should be.
for i, value in enumerate(series):
if value is None:
result_val = results[counter][i]
self.assertEqual(transform, result_val,
"Transformed value should be {0}, not {1}".format(transform, result_val),
)
def test_alias(self):
seriesList = self._generate_series_list()
substitution = "Ni!"
results = functions.alias({}, seriesList, substitution)
for series in results:
self.assertEqual(series.name, substitution)
def test_alias_sub(self):
seriesList = self._generate_series_list()
substitution = "Shrubbery"
results = functions.aliasSub({}, seriesList, "^\w+", substitution)
for series in results:
self.assertTrue(series.name.startswith(substitution),
"aliasSub should replace the name with {0}".format(substitution),
)
# TODO: Add tests for * globbing and {} matching to this
def test_alias_by_node(self):
seriesList = self._generate_series_list()
def verify_node_name(*nodes):
if isinstance(nodes, int):
node_number = [nodes]
# Use deepcopy so the original seriesList is unmodified
results = functions.aliasByNode({}, copy.deepcopy(seriesList), *nodes)
for i, series in enumerate(results):
fragments = seriesList[i].name.split('.')
# Super simplistic. Doesn't match {thing1,thing2}
# or glob with *, both of what graphite allow you to use
expected_name = '.'.join([fragments[i] for i in nodes])
self.assertEqual(series.name, expected_name)
verify_node_name(1)
verify_node_name(1, 0)
verify_node_name(-1, 0)
# Verify broken input causes broken output
with self.assertRaises(IndexError):
verify_node_name(10000)
def test_alpha(self):
seriesList = self._generate_series_list()
alpha = 0.5
results = functions.alpha({}, seriesList, alpha)
self._verify_series_options(results, "alpha", alpha)
def test_color(self):
seriesList = self._generate_series_list()
color = "red"
# Leave the original seriesList unmodified
results = functions.color({}, copy.deepcopy(seriesList), color)
for i, series in enumerate(results):
self.assertTrue(hasattr(series, "color"),
"The transformed seriesList is missing the 'color' attribute",
)
self.assertFalse(hasattr(seriesList[i], "color"),
"The original seriesList shouldn't have a 'color' attribute",
)
self.assertEqual(series.color, color)
def test_constantLine(self):
requestContext = {'startTime':datetime(2014, 03, 12, 2, 0, 0),'endTime':datetime(2014, 03, 12, 3, 0, 0)}
results = functions.constantLine(requestContext, [1])
def test_scale(self):
seriesList = self._generate_series_list()
multiplier = 2
# Leave the original seriesList undisturbed for verification
results = functions.scale({}, copy.deepcopy(seriesList), multiplier)
for i, series in enumerate(results):
for counter, value in enumerate(series):
if value is None:
continue
original_value = seriesList[i][counter]
expected_value = original_value * multiplier
self.assertEqual(value, expected_value)
def test_normalize_empty(self):
try:
functions.normalize([])
except NormalizeEmptyResultError:
pass
def _generate_mr_series(self):
seriesList = [
TimeSeries('group.server1.metric1',0,1,1,[None]),
TimeSeries('group.server1.metric2',0,1,1,[None]),
TimeSeries('group.server2.metric1',0,1,1,[None]),
TimeSeries('group.server2.metric2',0,1,1,[None]),
]
mappedResult = [
[seriesList[0],seriesList[1]],
[seriesList[2],seriesList[3]]
]
return (seriesList,mappedResult)
def test_mapSeries(self):
seriesList, expectedResult = self._generate_mr_series()
results = functions.mapSeries({}, copy.deepcopy(seriesList), 1)
self.assertEqual(results,expectedResult)
def test_reduceSeries(self):
sl, inputList = self._generate_mr_series()
expectedResult = [
TimeSeries('group.server1.reduce.mock',0,1,1,[None]),
TimeSeries('group.server2.reduce.mock',0,1,1,[None])
]
resultSeriesList = [TimeSeries('mock(series)',0,1,1,[None])]
mock = MagicMock(return_value = resultSeriesList)
with patch.dict(functions.SeriesFunctions,{ 'mock': mock }):
results = functions.reduceSeries({}, copy.deepcopy(inputList), "mock", 2, "metric1","metric2" )
self.assertEqual(results,expectedResult)
self.assertEqual(mock.mock_calls, [call({},inputList[0]), call({},inputList[1])])
|
{
"content_hash": "0f17c9f8e1c4a6015b2f2d0701227603",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 146,
"avg_line_length": 40.4392523364486,
"alnum_prop": 0.5895539634850936,
"repo_name": "g76r/graphite-web",
"id": "7a447465b463aa769419941dea8600cfe0cce872",
"size": "12981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/tests/test_functions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "152063"
},
{
"name": "HTML",
"bytes": "37711"
},
{
"name": "JavaScript",
"bytes": "1919268"
},
{
"name": "Perl",
"bytes": "857"
},
{
"name": "Python",
"bytes": "426258"
},
{
"name": "Ruby",
"bytes": "1950"
},
{
"name": "Shell",
"bytes": "2197"
}
],
"symlink_target": ""
}
|
"""Tests for `lyricsmaster` package."""
import os
import sys
import codecs
import pytest
from click.testing import CliRunner
from bs4 import BeautifulSoup, Tag
from lyricsmaster import models
from lyricsmaster import cli
from lyricsmaster.providers import LyricWiki, AzLyrics, Genius, Lyrics007, \
MusixMatch
from lyricsmaster.utils import TorController, normalize
try:
basestring # Python 2.7 compatibility
except NameError:
basestring = str
import gevent.monkey
# Works for Python 2 and 3
try:
from importlib import reload
except ImportError:
try:
from imp import reload
except:
pass
def socket_is_patched():
return gevent.monkey.is_module_patched('socket')
# Boolean tests
python_is_outdated = '2.7' in sys.version or '3.3' in sys.version
is_appveyor = 'APPVEYOR' in os.environ
is_travis = 'TRAVIS' in os.environ
providers = [AzLyrics(), MusixMatch(), LyricWiki(), Genius(), Lyrics007()]
real_singer = {'name': 'The Notorious B.I.G.', 'album': 'Ready to Die (1994)',
'songs': [{'song': 'Things Done Changed',
'lyrics': 'Remember back in the days...'},
{'song': 'Things Done Changed',
'lyrics': 'Remember back in the days...'}]
}
fake_singer = {'name': 'Fake Rapper', 'album': "In my mom's basement",
'song': 'I fap',
'lyrics': 'Everyday I fap furiously...'}
provider_strings = {
'LyricWiki': {'artist_name': 'The_Notorious_B.I.G.',
'artist_url': 'http://lyrics.wikia.com/wiki/The_Notorious_B.I.G.',
'song_url': 'http://lyrics.wikia.com/wiki/The_Notorious_B.I.G.:Things_Done_Changed',
'fake_url': 'http://lyrics.wikia.com/wiki/Things_Done_Changed:Things_Done_Changed_fake_url'},
'AzLyrics': {'artist_name': 'The Notorious B.I.G.',
'artist_url': 'https://www.azlyrics.com/n/notorious.html',
'song_url': 'https://www.azlyrics.com/lyrics/notoriousbig/thingsdonechanged.html',
'fake_url': 'https://www.azlyrics.com/lyrics/notoriousbig/thingsdonechanged_fake.html'},
'Genius': {'artist_name': 'The-notorious-big',
'artist_url': 'https://genius.com/artists/The-notorious-big',
'song_url': 'https://genius.com/The-notorious-big-things-done-changed-lyrics',
'fake_url': 'https://genius.com/The-notorious-big-things-done-changed-lyrics_fake'},
'Lyrics007': {'artist_name': 'The Notorious B.I.G.',
'artist_url': 'https://www.lyrics007.com/artist/the-notorious-b-i-g/TVRJMk5EQT0=',
'song_url': 'https://www.lyrics007.com/Notorious%20B.i.g.%20Lyrics/Things%20Done%20Changed%20Lyrics.html',
'fake_url': 'https://www.lyrics007.com/Notorious%20B.i.g.%20Lyrics/Things%20Done%20Changed%20fake_Lyrics.html'},
'MusixMatch': {'artist_name': 'The-Notorious-B-I-G',
'artist_url': 'https://www.musixmatch.com/artist/The-Notorious-B-I-G',
'song_url': 'https://www.musixmatch.com/lyrics/The-Notorious-B-I-G/Things-Done-Changed',
'fake_url': 'https://www.musixmatch.com/lyrics/The-Notorious-B-I-G/Things-Done-Changed_fake'},
}
songs = [models.Song(real_singer['songs'][0]['song'], real_singer['album'],
real_singer['name'],real_singer['songs'][0]['lyrics']),
models.Song(real_singer['songs'][1]['song'], real_singer['album'],
real_singer['name'], real_singer['songs'][1]['lyrics'])]
class TestSongs:
"""Tests for Song Class."""
song = songs[0]
def test_song(self):
assert self.song.__repr__() == 'lyricsmaster.models.Song({0}, {1}, {2})'.format(
real_singer['songs'][0]['song'],
real_singer['album'],
real_singer['name'])
def test_song_save(self):
self.song.save()
path = os.path.join(os.path.expanduser("~"), 'Documents',
'LyricsMaster', normalize(real_singer['name']),
normalize(real_singer['album']),
'Things-Done-Changed.txt')
assert os.path.exists(path)
folder = os.path.join(os.path.expanduser("~"), 'Documents',
'test_lyricsmaster_save')
self.song.save(folder)
path = os.path.join(folder, 'LyricsMaster',
normalize(real_singer['name']),
normalize(real_singer['album']),
'Things-Done-Changed.txt')
assert os.path.exists(path)
with codecs.open(path, 'r', encoding='utf-8') as file:
assert self.song.lyrics == file.readlines()[0]
class TestAlbums:
"""Tests for Album Class."""
album = models.Album(real_singer['album'], real_singer['name'], songs, '2017')
def test_album(self):
assert self.album.__idx__ == 0
assert self.album.title == real_singer['album']
assert self.album.artist == real_singer['name']
assert self.album.__repr__() == 'lyricsmaster.models.Album({0}, {1})'.format(
real_singer['album'],
real_singer['name'])
def test_album_isiter(self):
assert len(self.album) == 2
assert [elmt for elmt in self.album] == songs
for x, y in zip(reversed(self.album), reversed(self.album.songs)):
assert x == y
def test_album_save(self):
self.album.save()
for song in self.album.songs:
artist = normalize(song.artist)
album = normalize(song.album)
title = normalize(song.title)
path = os.path.join(os.path.expanduser("~"), 'Documents',
'LyricsMaster', artist, album, title + '.txt')
assert os.path.exists(path)
with codecs.open(path, 'r', encoding='utf-8') as file:
assert song.lyrics == '\n'.join(file.readlines())
class TestDiscography:
"""Tests for Discography Class."""
albums = [models.Album(real_singer['album'], real_singer['name'], songs, '2017'),
models.Album(real_singer['album'], real_singer['name'], songs, '2017')]
discography = models.Discography(real_singer['name'], albums)
def test_discography(self):
assert self.discography.__repr__() == 'lyricsmaster.models.Discography({0})'.format(
real_singer['name'])
def test_discography_isiter(self):
assert self.discography.__idx__ == 0
assert len(self.discography) == 2
assert [elmt for elmt in self.discography] == self.albums
for x, y in zip(reversed(self.discography),
reversed(self.discography.albums)):
assert x == y
def test_discography_save(self):
self.discography.save()
for album in self.albums:
for song in album.songs:
artist = normalize(song.artist)
album = normalize(song.album)
title = normalize(song.title)
path = os.path.join(os.path.expanduser("~"), 'Documents',
'LyricsMaster', artist, album,
title + '.txt')
assert os.path.exists(path)
with codecs.open(path, 'r', encoding='utf-8') as file:
assert song.lyrics == '\n'.join(file.readlines())
class TestLyricsProviders:
"""Tests for LyricWiki Class."""
@pytest.mark.skipif(is_appveyor and '3.3' in sys.version,
reason="[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:548) on Appveyor 3.3.")
@pytest.mark.parametrize('provider', providers)
def test_get_page(self, provider):
# Generates unreproducebale errors when when the server does not exist
# url = 'http://non-existent-url.com'
# request = provider.get_page(url)
# assert request is None
request = provider.get_page('http://www.google.com')
assert request.status == 200
@pytest.mark.parametrize('provider', providers)
def test_clean_string(self, provider):
assert provider._clean_string(real_singer['name']) == \
provider_strings[provider.name]['artist_name']
@pytest.mark.parametrize('provider', providers)
def test_has_artist(self, provider):
clean = provider._clean_string
url = provider._make_artist_url(clean(real_singer['name']))
page = BeautifulSoup(provider.get_page(url).data, 'lxml')
assert provider._has_artist(page)
url = provider._make_artist_url(clean(fake_singer['name']))
if not url:
assert url is None
else:
page = BeautifulSoup(provider.get_page(url).data, 'lxml')
assert not provider._has_artist(page)
pass
@pytest.mark.parametrize('provider', providers)
def test_make_artist_url(self, provider):
clean = provider._clean_string
assert provider._make_artist_url(clean(real_singer['name'])) == provider_strings[provider.name]['artist_url']
@pytest.mark.parametrize('provider', providers)
def test_get_artist_page(self, provider):
page = provider.get_artist_page(real_singer['name'])
assert '<!doctype html>' in str(page).lower()
page = provider.get_artist_page(fake_singer['name'])
assert page is None
@pytest.mark.parametrize('provider', providers)
def test_get_album_page(self, provider):
if provider.name in ('AzLyrics', 'Genius', 'Lyrics007', 'MusixMatch'):
return
else:
page = provider.get_album_page(real_singer['name'],
fake_singer['album'])
assert page is None
page = provider.get_album_page(fake_singer['name'],
fake_singer['album'])
assert page is None
page = provider.get_album_page(real_singer['name'],
real_singer['album'])
assert '<!doctype html>' in str(page).lower()
@pytest.mark.parametrize('provider', providers)
def test_has_lyrics(self, provider):
url = provider_strings[provider.name]['song_url']
page = BeautifulSoup(provider.get_page(url).data, 'lxml')
assert provider._has_lyrics(page)
url = provider_strings[provider.name]['fake_url']
page = BeautifulSoup(provider.get_page(url).data, 'lxml')
assert not provider._has_lyrics(page)
@pytest.mark.parametrize('provider', providers)
def test_get_lyrics_page(self, provider):
page = provider.get_lyrics_page(
provider_strings[provider.name]['song_url'])
assert '<!doctype html>' in str(page).lower()
page = provider.get_lyrics_page(
provider_strings[provider.name]['fake_url'])
assert page is None
@pytest.mark.parametrize('provider', providers)
def test_get_albums(self, provider):
url = provider_strings[provider.name]['artist_url']
page = provider.get_page(url).data
albums = provider.get_albums(page)
for album in albums:
assert isinstance(album, Tag)
@pytest.mark.parametrize('provider', providers)
def test_get_album_infos(self, provider):
url = provider_strings[provider.name]['artist_url']
page = provider.get_page(url).data
album = provider.get_albums(page)[0]
album_title, release_date = provider.get_album_infos(album)
assert isinstance(release_date, basestring)
# assert album_title.lower() in real_singer['album'].lower() or \
# album_title.lower() in 'Demo Tape'.lower() or 'notorious themes' in \
# album_title.lower() or 'greatest hits' in album_title.lower()
@pytest.mark.parametrize('provider', providers)
def test_extract_lyrics(self, provider):
page = provider.get_lyrics_page(provider_strings[provider.name]['song_url'])
lyrics_page = BeautifulSoup(page, 'lxml')
lyrics = provider.extract_lyrics(lyrics_page)
assert isinstance(lyrics, basestring)
assert 'Remember back in the days'.lower() in lyrics.lower()
assert "Don't ask me why I'm".lower() in lyrics.lower()
@pytest.mark.parametrize('provider', [prov for prov in providers if
not prov.name in ('Lyrics007',
'LyricWiki')])
def test_extract_writers(self, provider):
page = provider.get_lyrics_page(provider_strings[provider.name]['song_url'])
lyrics_page = BeautifulSoup(page, 'lxml')
writers = provider.extract_writers(lyrics_page)
assert isinstance(writers, basestring)
assert "c. wallace" in writers.lower() or "notorious" in writers.lower()\
or "christopher wallace" in writers.lower() or writers == ''
@pytest.mark.parametrize('provider', providers)
def test_get_songs(self, provider):
artist_page = provider.get_artist_page(real_singer['name'])
album = provider.get_albums(artist_page)[0]
song_links = provider.get_songs(album)
for link in song_links:
assert isinstance(link, Tag)
@pytest.mark.parametrize('provider', providers)
def test_create_song(self, provider):
artist_page = provider.get_artist_page(real_singer['name'])
album = provider.get_albums(artist_page)[0]
song_links = provider.get_songs(album)
song_links[-1].attrs['href'] = provider_strings[provider.name]['fake_url']
fail_song = provider.create_song(song_links[-1], real_singer['name'], real_singer['album'])
assert fail_song is None
good_song = provider.create_song(song_links[0], real_singer['name'], real_singer['album'])
assert isinstance(good_song, models.Song)
assert isinstance(good_song.title, basestring)
assert good_song.album == real_singer['album']
assert good_song.artist == real_singer['name']
assert isinstance(good_song.lyrics, basestring)
# Tests existing song with known empty lyrics
if provider.name == 'LyricWiki':
tag = '<a href="http://lyrics.wikia.com/wiki/Reggie_Watts:Feel_The_Same" class="new" title="Reggie Watts:Feel The Same (page does not exist)">Feel the Same</a>'
page = BeautifulSoup(tag, 'lxml')
page.attrs[
'title'] = "Reggie Watts:Feel The Same (page does not exist)"
page.attrs[
'href'] = "http://lyrics.wikia.com/wiki/Reggie_Watts:Feel_The_Same"
non_existent_song = provider.create_song(page, real_singer['name'],
real_singer['album'])
assert non_existent_song == None
@pytest.mark.parametrize('provider', providers)
def test_get_lyrics(self, provider):
discography = provider.get_lyrics(fake_singer['name'])
discography2 = provider.get_lyrics('Reggie Watts', 'Why $#!+ So Crazy?',
'Fuck Shit Stack')
assert discography is None
discography = provider.get_lyrics('Reggie Watts', 'Why $#!+ So Crazy?')
if provider.name == 'AzLyrics':
discography is None
discography2 is None
else:
assert isinstance(discography, models.Discography)
assert isinstance(discography2, models.Discography)
class TestCli:
"""Tests for Command Line Interface."""
def test_command_line_interface(self):
artist = 'Reggie Watts'
runner = CliRunner()
result = runner.invoke(cli.main, [artist, '-a', 'Why $#!+ So Crazy?', '-s', 'Fuck Shit Stack'])
assert result.exit_code == 0
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert 'Show this message and exit.' in help_result.output
# If tests involving Tor are run first, the following tests fail with error: 'an integer is required (got type object)'
class TestTor:
"""Tests for Tor functionality."""
tor_basic = TorController()
if is_travis or (is_appveyor and python_is_outdated):
tor_advanced = TorController(controlport='/var/run/tor/control',
password='password')
else:
tor_advanced = TorController(controlport=9051, password='password')
non_anon_provider = LyricWiki()
provider = LyricWiki(tor_basic)
provider2 = LyricWiki(tor_advanced)
@pytest.mark.skipif(is_appveyor,
reason="Tor error on ApppVeyor.")
def test_anonymisation(self):
real_ip = self.non_anon_provider.get_page("http://httpbin.org/ip").data
anonymous_ip = self.provider.get_page("http://httpbin.org/ip").data
assert real_ip != anonymous_ip
# this function is tested out in travis using a unix path as a control port instead of port 9051.
# for now gets permission denied on '/var/run/tor/control' in Travis CI
@pytest.mark.skipif(is_travis or is_appveyor,
reason="Tor error on CI.")
def test_renew_tor_session(self):
real_ip = self.non_anon_provider.get_page("http://httpbin.org/ip").data
anonymous_ip = self.provider2.get_page("http://httpbin.org/ip").data
assert real_ip != anonymous_ip
new_tor_circuit = self.provider2.tor_controller.renew_tor_circuit()
real_ip2 = self.non_anon_provider.get_page("http://httpbin.org/ip").data
anonymous_ip2 = self.provider2.get_page("http://httpbin.org/ip").data
assert real_ip2 != anonymous_ip2
assert new_tor_circuit is True
@pytest.mark.skipif(is_appveyor,
reason="Tor error on ApppVeyor.")
def test_get_lyrics_tor_basic(self):
discography = self.provider.get_lyrics(
'Reggie Watts', 'Why $#!+ So Crazy?',
'Fuck Shit Stack') # put another realsinger who has not so many songs to speed up testing.
assert isinstance(discography, models.Discography)
@pytest.mark.skipif(is_appveyor or is_travis,
reason="Tor error on CI.")
def test_get_lyrics_tor_advanced(self):
discography = self.provider2.get_lyrics(
'Reggie Watts', 'Why $#!+ So Crazy?', 'Fuck Shit Stack')
assert isinstance(discography, models.Discography)
@pytest.mark.skipif(is_appveyor,
reason="Tor error on ApppVeyor.")
def test_command_line_interface_tor(self):
artist = 'Reggie Watts'
runner = CliRunner()
result_tor1 = runner.invoke(cli.main,
[artist, '-a', 'Why $#!+ So Crazy?', '-s',
'Fuck Shit Stack', '--tor', '127.0.0.1',
'--controlport', '9051', '--password',
'password'])
assert result_tor1.exit_code == 0
@pytest.mark.skipif(is_appveyor,
reason="Tor error on ApppVeyor.")
def test_command_line_interface_tor(self):
artist = 'Reggie Watts'
runner = CliRunner()
result_tor = runner.invoke(cli.main, [artist, '-a', 'Why $#!+ So Crazy?', '-s',
'Fuck Shit Stack', '--tor', '127.0.0.1'])
assert result_tor.exit_code == 0
|
{
"content_hash": "da330e248fa2dc794b187bad234b159a",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 172,
"avg_line_length": 45.56410256410256,
"alnum_prop": 0.5935437663068501,
"repo_name": "SekouD/lyricsmaster",
"id": "3697cee9bb39e36366b6787325017ff710165f16",
"size": "19594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_lyricsmaster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2307"
},
{
"name": "Python",
"bytes": "78963"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
from sympy.core import C, sympify
from sympy.core.add import Add
from sympy.core.function import Lambda, Function, ArgumentIndexError
from sympy.core.cache import cacheit
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Wild, Dummy
from sympy.core.mul import Mul
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.ntheory import multiplicity, perfect_power
from sympy.core.compatibility import xrange
# NOTE IMPORTANT
# The series expansion code in this file is an important part of the gruntz
# algorithm for determining limits. _eval_nseries has to return a generalized
# power series with coefficients in C(log(x), log).
# In more detail, the result of _eval_nseries(self, x, n) must be
# c_0*x**e_0 + ... (finitely many terms)
# where e_i are numbers (not necessarily integers) and c_i involve only
# numbers, the function log, and log(x). [This also means it must not contain
# log(x(1+p)), this *has* to be expanded to log(x)+log(1+p) if x.is_positive and
# p.is_positive.]
class ExpBase(Function):
unbranched = True
def inverse(self, argindex=1):
"""
Returns the inverse function of ``exp(x)``.
"""
return log
def as_numer_denom(self):
"""
Returns this with a positive exponent as a 2-tuple (a fraction).
Examples
========
>>> from sympy.functions import exp
>>> from sympy.abc import x
>>> exp(-x).as_numer_denom()
(1, exp(x))
>>> exp(x).as_numer_denom()
(exp(x), 1)
"""
# this should be the same as Pow.as_numer_denom wrt
# exponent handling
exp = self.exp
neg_exp = exp.is_negative
if not neg_exp and not (-exp).is_negative:
neg_exp = _coeff_isneg(exp)
if neg_exp:
return S.One, self.func(-exp)
return self, S.One
@property
def exp(self):
"""
Returns the exponent of the function.
"""
return self.args[0]
def as_base_exp(self):
"""
Returns the 2-tuple (base, exponent).
"""
return self.func(1), Mul(*self.args)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_finite(self):
arg = self.args[0]
if arg.is_infinite:
if arg.is_negative:
return True
if arg.is_positive:
return False
if arg.is_finite:
return True
def _eval_is_rational(self):
s = self.func(*self.args)
if s.func == self.func:
if s.exp is S.Zero:
return True
elif s.exp.is_rational and s.exp.is_nonzero:
return False
else:
return s.is_rational
def _eval_is_zero(self):
return (self.args[0] is S.NegativeInfinity)
def _eval_power(self, other):
"""exp(arg)**e -> exp(arg*e) if assumptions allow it.
"""
b, e = self.as_base_exp()
return Pow._eval_power(Pow(b, e, evaluate=False), other)
def _eval_expand_power_exp(self, **hints):
arg = self.args[0]
if arg.is_Add and arg.is_commutative:
expr = 1
for x in arg.args:
expr *= self.func(x)
return expr
return self.func(arg)
class exp_polar(ExpBase):
r"""
Represent a 'polar number' (see g-function Sphinx documentation).
``exp_polar`` represents the function
`Exp: \mathbb{C} \rightarrow \mathcal{S}`, sending the complex number
`z = a + bi` to the polar number `r = exp(a), \theta = b`. It is one of
the main functions to construct polar numbers.
>>> from sympy import exp_polar, pi, I, exp
The main difference is that polar numbers don't "wrap around" at `2 \pi`:
>>> exp(2*pi*I)
1
>>> exp_polar(2*pi*I)
exp_polar(2*I*pi)
apart from that they behave mostly like classical complex numbers:
>>> exp_polar(2)*exp_polar(3)
exp_polar(5)
See also
========
sympy.simplify.simplify.powsimp
sympy.functions.elementary.complexes.polar_lift
sympy.functions.elementary.complexes.periodic_argument
sympy.functions.elementary.complexes.principal_branch
"""
is_polar = True
is_comparable = False # cannot be evalf'd
def _eval_Abs(self):
from sympy import expand_mul
return sqrt( expand_mul(self * self.conjugate()) )
def _eval_evalf(self, prec):
""" Careful! any evalf of polar numbers is flaky """
from sympy import im, pi, re
i = im(self.args[0])
if i <= -pi or i > pi:
return self # cannot evalf for this argument
res = exp(self.args[0])._eval_evalf(prec)
if i > 0 and im(res) < 0:
# i ~ pi, but exp(I*i) evaluated to argument slightly bigger than pi
return re(res)
return res
def _eval_power(self, other):
return self.func(self.args[0]*other)
def _eval_is_real(self):
if self.args[0].is_real:
return True
def as_base_exp(self):
# XXX exp_polar(0) is special!
if self.args[0] == 0:
return self, S(1)
return ExpBase.as_base_exp(self)
class exp(ExpBase):
"""
The exponential function, :math:`e^x`.
See Also
========
log
"""
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return self
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Zero:
return S.One
elif arg is S.One:
return S.Exp1
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Zero
elif arg.func is log:
return arg.args[0]
elif arg.is_Mul:
Ioo = S.ImaginaryUnit*S.Infinity
if arg in [Ioo, -Ioo]:
return S.NaN
coeff = arg.coeff(S.Pi*S.ImaginaryUnit)
if coeff:
if (2*coeff).is_integer:
if coeff.is_even:
return S.One
elif coeff.is_odd:
return S.NegativeOne
elif (coeff + S.Half).is_even:
return -S.ImaginaryUnit
elif (coeff + S.Half).is_odd:
return S.ImaginaryUnit
# Warning: code in risch.py will be very sensitive to changes
# in this (see DifferentialExtension).
# look for a single log factor
coeff, terms = arg.as_coeff_Mul()
# but it can't be multiplied by oo
if coeff in [S.NegativeInfinity, S.Infinity]:
return None
coeffs, log_term = [coeff], None
for term in Mul.make_args(terms):
if term.func is log:
if log_term is None:
log_term = term.args[0]
else:
return None
elif term.is_comparable:
coeffs.append(term)
else:
return None
return log_term**Mul(*coeffs) if log_term else None
elif arg.is_Add:
out = []
add = []
for a in arg.args:
if a is S.One:
add.append(a)
continue
newa = cls(a)
if newa.func is cls:
add.append(a)
else:
out.append(newa)
if out:
return Mul(*out)*cls(Add(*add), evaluate=False)
elif arg.is_Matrix:
from sympy import Matrix
return arg.exp()
@property
def base(self):
"""
Returns the base of the exponential function.
"""
return S.Exp1
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""
Calculates the next term in the Taylor series expansion.
"""
if n < 0:
return S.Zero
if n == 0:
return S.One
x = sympify(x)
if previous_terms:
p = previous_terms[-1]
if p is not None:
return p * x / n
return x**n/C.factorial()(n)
def as_real_imag(self, deep=True, **hints):
"""
Returns this function as a 2-tuple representing a complex number.
Examples
========
>>> from sympy import I
>>> from sympy.abc import x
>>> from sympy.functions import exp
>>> exp(x).as_real_imag()
(exp(re(x))*cos(im(x)), exp(re(x))*sin(im(x)))
>>> exp(1).as_real_imag()
(E, 0)
>>> exp(I).as_real_imag()
(cos(1), sin(1))
>>> exp(1+I).as_real_imag()
(E*cos(1), E*sin(1))
See Also
========
sympy.functions.elementary.complexes.re
sympy.functions.elementary.complexes.im
"""
re, im = self.args[0].as_real_imag()
if deep:
re = re.expand(deep, **hints)
im = im.expand(deep, **hints)
cos, sin = C.cos(im), C.sin(im)
return (exp(re)*cos, exp(re)*sin)
def _eval_subs(self, old, new):
# keep processing of power-like args centralized in Pow
if old.is_Pow: # handle (exp(3*log(x))).subs(x**2, z) -> z**(3/2)
old = exp(old.exp*log(old.base))
elif old is S.Exp1 and new.is_Function:
old = exp
if old.func is exp or old is S.Exp1:
f = lambda a: Pow(*a.as_base_exp(), evaluate=False) if (
a.is_Pow or a.func is exp) else a
return Pow._eval_subs(f(self), f(old), new)
if old is exp and not new.is_Function:
return new**self.exp._subs(old, new)
return Function._eval_subs(self, old, new)
def _eval_is_real(self):
if self.args[0].is_real:
return True
elif self.args[0].is_imaginary:
arg2 = -S(2) * S.ImaginaryUnit * self.args[0] / S.Pi
return arg2.is_even
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if self.exp.is_nonzero:
if self.exp.is_algebraic:
return False
elif (self.exp/S.Pi).is_rational:
return False
else:
return s.is_algebraic
def _eval_is_positive(self):
if self.args[0].is_real:
return not self.args[0] is S.NegativeInfinity
elif self.args[0].is_imaginary:
arg2 = -S.ImaginaryUnit * self.args[0] / S.Pi
return arg2.is_even
def _eval_nseries(self, x, n, logx):
# NOTE Please see the comment at the beginning of this file, labelled
# IMPORTANT.
from sympy import limit, oo, powsimp
arg = self.args[0]
arg_series = arg._eval_nseries(x, n=n, logx=logx)
if arg_series.is_Order:
return 1 + arg_series
arg0 = limit(arg_series.removeO(), x, 0)
if arg0 in [-oo, oo]:
return self
t = Dummy("t")
exp_series = exp(t)._taylor(t, n)
o = exp_series.getO()
exp_series = exp_series.removeO()
r = exp(arg0)*exp_series.subs(t, arg_series - arg0)
r += C.Order(o.expr.subs(t, (arg_series - arg0)), x)
r = r.expand()
return powsimp(r, deep=True, combine='exp')
def _taylor(self, x, n):
l = []
g = None
for i in xrange(n):
g = self.taylor_term(i, self.args[0], g)
g = g.nseries(x, n=n)
l.append(g)
return Add(*l) + C.Order(x**n, x)
def _eval_as_leading_term(self, x):
arg = self.args[0]
if arg.is_Add:
return Mul(*[exp(f).as_leading_term(x) for f in arg.args])
arg = self.args[0].as_leading_term(x)
if C.Order(1, x).contains(arg):
return S.One
return exp(arg)
def _eval_rewrite_as_sin(self, arg):
I = S.ImaginaryUnit
return C.sin(I*arg + S.Pi/2) - I*C.sin(I*arg)
def _eval_rewrite_as_cos(self, arg):
I = S.ImaginaryUnit
return C.cos(I*arg) + I*C.cos(I*arg + S.Pi/2)
def _eval_rewrite_as_tanh(self, arg):
return (1 + C.tanh(arg/2))/(1 - C.tanh(arg/2))
def _sage_(self):
import sage.all as sage
return sage.exp(self.args[0]._sage_())
class log(Function):
"""
The natural logarithm function `\ln(x)` or `\log(x)`.
Logarithms are taken with the natural base, `e`. To get
a logarithm of a different base ``b``, use ``log(x, b)``,
which is essentially short-hand for ``log(x)/log(b)``.
See Also
========
exp
"""
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if argindex == 1:
return 1/self.args[0]
s = C.Dummy('x')
return Lambda(s**(-1), s)
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns `e^x`, the inverse function of `\log(x)`.
"""
return exp
@classmethod
def eval(cls, arg, base=None):
from sympy import unpolarify
arg = sympify(arg)
if base is not None:
base = sympify(base)
if base == 1:
if arg == 1:
return S.NaN
else:
return S.ComplexInfinity
try:
# handle extraction of powers of the base now
# or else expand_log in Mul would have to handle this
n = multiplicity(base, arg)
if n:
den = base**n
if den.is_Integer:
return n + log(arg // den) / log(base)
else:
return n + log(arg / den) / log(base)
else:
return log(arg)/log(base)
except ValueError:
pass
if base is not S.Exp1:
return cls(arg)/cls(base)
else:
return cls(arg)
if arg.is_Number:
if arg is S.Zero:
return S.ComplexInfinity
elif arg is S.One:
return S.Zero
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Infinity
elif arg is S.NaN:
return S.NaN
elif arg.is_Rational:
if arg.q != 1:
return cls(arg.p) - cls(arg.q)
if arg.func is exp and arg.args[0].is_real:
return arg.args[0]
elif arg.func is exp_polar:
return unpolarify(arg.exp)
if arg.is_number:
if arg.is_negative:
return S.Pi * S.ImaginaryUnit + cls(-arg)
elif arg is S.ComplexInfinity:
return S.ComplexInfinity
elif arg is S.Exp1:
return S.One
# don't autoexpand Pow or Mul (see the issue 3351):
if not arg.is_Add:
coeff = arg.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if coeff is S.Infinity:
return S.Infinity
elif coeff is S.NegativeInfinity:
return S.Infinity
elif coeff.is_Rational:
if coeff.is_nonnegative:
return S.Pi * S.ImaginaryUnit * S.Half + cls(coeff)
else:
return -S.Pi * S.ImaginaryUnit * S.Half + cls(-coeff)
def as_base_exp(self):
"""
Returns this function in the form (base, exponent).
"""
return self, S.One
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms): # of log(1+x)
"""
Returns the next term in the Taylor series expansion of `\log(1+x)`.
"""
from sympy import powsimp
if n < 0:
return S.Zero
x = sympify(x)
if n == 0:
return x
if previous_terms:
p = previous_terms[-1]
if p is not None:
return powsimp((-n) * p * x / (n + 1), deep=True, combine='exp')
return (1 - 2*(n % 2)) * x**(n + 1)/(n + 1)
def _eval_expand_log(self, deep=True, **hints):
from sympy import unpolarify
from sympy.concrete import Sum, Product
force = hints.get('force', False)
arg = self.args[0]
if arg.is_Integer:
# remove perfect powers
p = perfect_power(int(arg))
if p is not False:
return p[1]*self.func(p[0])
elif arg.is_Mul:
expr = []
nonpos = []
for x in arg.args:
if force or x.is_positive or x.is_polar:
a = self.func(x)
if isinstance(a, log):
expr.append(self.func(x)._eval_expand_log(**hints))
else:
expr.append(a)
elif x.is_negative:
a = self.func(-x)
expr.append(a)
nonpos.append(S.NegativeOne)
else:
nonpos.append(x)
return Add(*expr) + log(Mul(*nonpos))
elif arg.is_Pow or isinstance(arg, exp):
if force or (arg.exp.is_real and arg.base.is_positive) or \
arg.base.is_polar:
b = arg.base
e = arg.exp
a = self.func(b)
if isinstance(a, log):
return unpolarify(e) * a._eval_expand_log(**hints)
else:
return unpolarify(e) * a
elif isinstance(arg, Product):
if arg.function.is_positive:
return Sum(log(arg.function), *arg.limits)
return self.func(arg)
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import expand_log, logcombine, simplify
expr = self.func(simplify(self.args[0], ratio=ratio, measure=measure))
expr = expand_log(expr, deep=True)
return min([expr, self], key=measure)
def as_real_imag(self, deep=True, **hints):
"""
Returns this function as a complex coordinate.
Examples
========
>>> from sympy import I
>>> from sympy.abc import x
>>> from sympy.functions import log
>>> log(x).as_real_imag()
(log(Abs(x)), arg(x))
>>> log(I).as_real_imag()
(0, pi/2)
>>> log(1 + I).as_real_imag()
(log(sqrt(2)), pi/4)
>>> log(I*x).as_real_imag()
(log(Abs(x)), arg(I*x))
"""
if deep:
abs = C.Abs(self.args[0].expand(deep, **hints))
arg = C.arg(self.args[0].expand(deep, **hints))
else:
abs = C.Abs(self.args[0])
arg = C.arg(self.args[0])
if hints.get('log', False): # Expand the log
hints['complex'] = False
return (log(abs).expand(deep, **hints), arg)
else:
return (log(abs), arg)
def _eval_is_rational(self):
s = self.func(*self.args)
if s.func == self.func:
if (self.args[0] - 1).is_zero:
return True
if s.args[0].is_rational and (self.args[0] - 1).is_nonzero:
return False
else:
return s.is_rational
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if (self.args[0] - 1).is_zero:
return True
elif (self.args[0] - 1).is_nonzero:
if self.args[0].is_algebraic:
return False
else:
return s.is_algebraic
def _eval_is_real(self):
return self.args[0].is_positive
def _eval_is_finite(self):
arg = self.args[0]
if arg.is_zero:
return False
return arg.is_finite
def _eval_is_positive(self):
arg = self.args[0]
if arg.is_positive:
if arg.is_infinite:
return True
if arg.is_zero:
return False
return (arg - 1).is_positive
def _eval_is_zero(self):
return (self.args[0] - 1).is_zero
def _eval_nseries(self, x, n, logx):
# NOTE Please see the comment at the beginning of this file, labelled
# IMPORTANT.
from sympy import cancel
if not logx:
logx = log(x)
if self.args[0] == x:
return logx
arg = self.args[0]
k, l = Wild("k"), Wild("l")
r = arg.match(k*x**l)
if r is not None:
#k = r.get(r, S.One)
#l = r.get(l, S.Zero)
k, l = r[k], r[l]
if l != 0 and not l.has(x) and not k.has(x):
r = log(k) + l*logx # XXX true regardless of assumptions?
return r
# TODO new and probably slow
s = self.args[0].nseries(x, n=n, logx=logx)
while s.is_Order:
n += 1
s = self.args[0].nseries(x, n=n, logx=logx)
a, b = s.leadterm(x)
p = cancel(s/(a*x**b) - 1)
g = None
l = []
for i in xrange(n + 2):
g = log.taylor_term(i, p, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
return log(a) + b*logx + Add(*l) + C.Order(p**n, x)
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if arg is S.One:
return (self.args[0] - 1).as_leading_term(x)
return self.func(arg)
def _sage_(self):
import sage.all as sage
return sage.log(self.args[0]._sage_())
class LambertW(Function):
"""
The Lambert W function `W(z)` is defined as the inverse
function of `w \exp(w)` [1]_.
In other words, the value of `W(z)` is such that `z = W(z) \exp(W(z))`
for any complex number `z`. The Lambert W function is a multivalued
function with infinitely many branches `W_k(z)`, indexed by
`k \in \mathbb{Z}`. Each branch gives a different solution `w`
of the equation `z = w \exp(w)`.
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real `z > -1/e`, and the
`k = -1` branch is real for `-1/e < z < 0`. All branches except
`k = 0` have a logarithmic singularity at `z = 0`.
Examples
========
>>> from sympy import LambertW
>>> LambertW(1.2)
0.635564016364870
>>> LambertW(1.2, -1).n()
-1.34747534407696 - 4.41624341514535*I
>>> LambertW(-1).is_real
False
References
==========
.. [1] http://en.wikipedia.org/wiki/Lambert_W_function
"""
@classmethod
def eval(cls, x, k=None):
if k is S.Zero:
return cls(x)
elif k is None:
k = S.Zero
if k is S.Zero:
if x is S.Zero:
return S.Zero
if x is S.Exp1:
return S.One
if x == -1/S.Exp1:
return S.NegativeOne
if x == -log(2)/2:
return -log(2)
if x is S.Infinity:
return S.Infinity
if k.is_nonzero:
if x is S.Zero:
return S.NegativeInfinity
if k is S.NegativeOne:
if x == -S.Pi/2:
return -S.ImaginaryUnit*S.Pi/2
elif x == -1/S.Exp1:
return S.NegativeOne
def fdiff(self, argindex=1):
"""
Return the first derivative of this function.
"""
x = self.args[0]
if len(self.args) == 1:
if argindex == 1:
return LambertW(x)/(x*(1 + LambertW(x)))
else:
k = self.args[1]
if argindex == 1:
return LambertW(x, k)/(x*(1 + LambertW(x, k)))
raise ArgumentIndexError(self, argindex)
def _eval_is_real(self):
x = self.args[0]
if len(self.args) == 1:
k = S.Zero
else:
k = self.args[1]
if k.is_zero:
return (x + 1/S.Exp1).is_positive
elif (k + 1).is_zero:
from sympy.core.logic import fuzzy_and
return fuzzy_and([x.is_negative, (x + 1/S.Exp1).is_positive])
elif k.is_nonzero and (k + 1).is_nonzero:
return False
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if self.args[0].is_nonzero and self.args[0].is_algebraic:
return False
else:
return s.is_algebraic
from sympy.core.function import _coeff_isneg
|
{
"content_hash": "7dcb1f16915b9ed2ea36019dae587413",
"timestamp": "",
"source": "github",
"line_count": 824,
"max_line_length": 80,
"avg_line_length": 30.70266990291262,
"alnum_prop": 0.5048420886201036,
"repo_name": "jamesblunt/sympy",
"id": "1f9168bed9d09ed9d964434f96186d88f08ad6f3",
"size": "25299",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sympy/functions/elementary/exponential.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5094"
},
{
"name": "Python",
"bytes": "15416009"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
import time
class Solution(object):
def fractionToDecimal(self, numerator, denominator):
"""
:type numerator: int
:type denominator: int
:rtype: str
"""
if numerator == 0: return '0'
if numerator < 0 and denominator < 0:
return self.fractionToDecimal(-numerator, -denominator)
elif numerator < 0 or denominator < 0:
return '-' + self.fractionToDecimal(abs(numerator), abs(denominator))
a, b = divmod(numerator, denominator)
if b == 0:
return str(a)
return str(a)+'.' + self.getDecimal(b, denominator)
def getDecimal(self, num, denominator):
res = ''
dot = False
visitedNums = {}
while num != 0:
if num in visitedNums:
# print 'repeat!', num, res[visitedNums[num]:]
repeatStart = visitedNums[num]
return res[:repeatStart] + '(' + res[repeatStart:] + ')'
visitedNums[num] = len(res)
num *= 10
if num < denominator:
res += '0'
else:
# print res, num, denominator
a, b = divmod(num, denominator)
res += str(a)
num = b
return res
print Solution().fractionToDecimal(2, 1)
print Solution().fractionToDecimal(1, 2)
print Solution().fractionToDecimal(2, 3)
print Solution().fractionToDecimal(4, 9)
print Solution().fractionToDecimal(4, 333)
|
{
"content_hash": "2047c952db1d87c106f6f8fa116b5309",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 81,
"avg_line_length": 27.563636363636363,
"alnum_prop": 0.5356200527704486,
"repo_name": "xiaonanln/myleetcode-python",
"id": "74c877722010d45093123c6bb2302c0e0c62cc73",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/166. Fraction to Recurring Decimal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1956173"
}
],
"symlink_target": ""
}
|
"""
Kivy-iconfonts
==============
Simple helper functions to make easier to use icon fonts in Labels and derived
widgets.
"""
from .iconfonts import *
if __name__ == '__main__':
from kivy.lang import Builder
from kivy.base import runTouchApp
from kivy.animation import Animation
from os.path import join, dirname
kv = """
#: import icon iconfonts.icon
BoxLayout:
Button:
markup: True
text: "%s"%(icon('icon-comment', 32))
Button:
markup: True
text: "%s"%(icon('icon-emo-happy', 64))
Button:
markup: True
text: "%s Text"%(icon('icon-plus-circled', 24))
Button:
markup: True
text: "%s"%(icon('icon-doc-text-inv', 64, 'ff3333'))
Label:
id: _anim
markup: True
text: "%s"%(icon('icon-spin6', 32))
font_color: 1, 0, 0, 1
p: 0
canvas:
Clear
PushMatrix
Rotate:
angle: -self.p
origin: self.center_x , self.center_y
Rectangle:
size: (32, 32)
pos: self.center_x - 16, self.center_y - 16
texture: self.texture
PopMatrix
"""
register('default_font', 'iconfont_sample.ttf',
join(dirname(__file__), 'iconfont_sample.fontd'))
root = Builder.load_string(kv)
an = Animation(p=360, duration=2) + Animation(p=0, duration=0)
an.repeat = True
an.start(root.ids['_anim'])
runTouchApp(root)
|
{
"content_hash": "03b47b644235854045e3986d4fc03787",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 24.737704918032787,
"alnum_prop": 0.5347912524850894,
"repo_name": "bit0001/chumme",
"id": "23dac3eed20b51adbd13970fd3bdd8d864a0c2cb",
"size": "1509",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "iconfonts/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1966"
},
{
"name": "Python",
"bytes": "56165"
}
],
"symlink_target": ""
}
|
import webapp2
from jinja2 import Environment
class Hello(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
env = Environment()
template = env.from_string("Hello {{ greeting }}!")
self.response.out.write(template.render(greeting='world'))
app = webapp2.WSGIApplication([('/', Hello)], debug=True)
|
{
"content_hash": "f2f3f45f5b5f8e342467e99253bd0d15",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 66,
"avg_line_length": 31.5,
"alnum_prop": 0.671957671957672,
"repo_name": "Trii/NoseGAE",
"id": "ed9c06baae39b296c9bff563bbb56016a668b71d",
"size": "378",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/issue-38-ndb-transactions/helloworld.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "12209"
},
{
"name": "Shell",
"bytes": "345"
}
],
"symlink_target": ""
}
|
from django.views.i18n import JavaScriptCatalog
from django.views.static import serve
from cms.utils.conf import get_cms_setting
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^media/(?P<path>.*)$', serve,
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
url(r'^media/cms/(?P<path>.*)$', serve,
{'document_root': get_cms_setting('MEDIA_ROOT'), 'show_indexes': True}),
url(r'^jsi18n/(?P<packages>\S+?)/$', JavaScriptCatalog.as_view()),
]
urlpatterns += i18n_patterns(
url(r'^', include('cms.test_utils.project.second_cms_urls_for_apphook_tests')),
)
|
{
"content_hash": "e6ca8fd4abf077bca15c3a75a76cb4f3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 83,
"avg_line_length": 32.916666666666664,
"alnum_prop": 0.6924050632911393,
"repo_name": "benzkji/django-cms",
"id": "37f218a55635b1f0df1bed55aab52d09f6dde865",
"size": "790",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "cms/test_utils/project/second_urls_for_apphook_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "132972"
},
{
"name": "HTML",
"bytes": "201324"
},
{
"name": "JavaScript",
"bytes": "1238070"
},
{
"name": "Python",
"bytes": "2356866"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
}
|
"""
Transformation utilities for STIX pattern comparison expressions.
"""
import functools
import itertools
from stix2.equivalence.pattern.compare import iter_in, iter_lex_cmp
from stix2.equivalence.pattern.compare.comparison import (
comparison_expression_cmp,
)
from stix2.equivalence.pattern.transform import Transformer
from stix2.equivalence.pattern.transform.specials import (
ipv4_addr, ipv6_addr, windows_reg_key,
)
from stix2.patterns import (
AndBooleanExpression, ObjectPath, OrBooleanExpression,
ParentheticalExpression, _BooleanExpression, _ComparisonExpression,
)
def _dupe_ast(ast):
"""
Create a duplicate of the given AST.
Args:
ast: The AST to duplicate
Returns:
The duplicate AST
"""
if isinstance(ast, AndBooleanExpression):
result = AndBooleanExpression([
_dupe_ast(operand) for operand in ast.operands
])
elif isinstance(ast, OrBooleanExpression):
result = OrBooleanExpression([
_dupe_ast(operand) for operand in ast.operands
])
elif isinstance(ast, _ComparisonExpression):
# Maybe we go as far as duping the ObjectPath object too?
new_object_path = ObjectPath(
ast.lhs.object_type_name, ast.lhs.property_path,
)
result = _ComparisonExpression(
ast.operator, new_object_path, ast.rhs, ast.negated,
)
else:
raise TypeError("Can't duplicate " + type(ast).__name__)
return result
class ComparisonExpressionTransformer(Transformer):
"""
Transformer base class with special support for transforming comparison
expressions. The transform method implemented here performs a bottom-up
in-place transformation, with support for some comparison
expression-specific callbacks.
Specifically, subclasses can implement methods:
"transform_or" for OR nodes
"transform_and" for AND nodes
"transform_comparison" for plain comparison nodes (<prop> <op> <value>)
"transform_default" for both types of nodes
"transform_default" is a fallback, if a type-specific callback is not
found. The default implementation does nothing to the AST. The
type-specific callbacks are preferred over the default, if both exist.
In all cases, the callbacks are called with an AST for a subtree rooted at
the appropriate node type, where the subtree's children have already been
transformed. They must return the same thing as the base transform()
method: a 2-tuple with the transformed AST and a boolean for change
detection. See doc for the superclass' method.
This process currently silently drops parenthetical nodes.
"""
def transform(self, ast):
if isinstance(ast, _BooleanExpression):
changed = False
for i, operand in enumerate(ast.operands):
operand_result, this_changed = self.transform(operand)
if this_changed:
changed = True
ast.operands[i] = operand_result
result, this_changed = self.__dispatch_transform(ast)
if this_changed:
changed = True
elif isinstance(ast, _ComparisonExpression):
result, changed = self.__dispatch_transform(ast)
elif isinstance(ast, ParentheticalExpression):
# Drop these
result, changed = self.transform(ast.expression)
else:
raise TypeError("Not a comparison expression: " + str(ast))
return result, changed
def __dispatch_transform(self, ast):
"""
Invoke a transformer callback method based on the given ast root node
type.
Args:
ast: The AST
Returns:
The callback's result
"""
if isinstance(ast, AndBooleanExpression):
meth = getattr(self, "transform_and", self.transform_default)
elif isinstance(ast, OrBooleanExpression):
meth = getattr(self, "transform_or", self.transform_default)
elif isinstance(ast, _ComparisonExpression):
meth = getattr(
self, "transform_comparison", self.transform_default,
)
else:
meth = self.transform_default
return meth(ast)
def transform_default(self, ast):
"""
Override to handle transforming AST nodes which don't have a more
specific method implemented.
"""
return ast, False
class OrderDedupeTransformer(
ComparisonExpressionTransformer,
):
"""
Order the children of all nodes in the AST. Because the deduping algorithm
is based on sorted data, this transformation also does deduping.
E.g.:
A and A => A
A or A => A
"""
def __transform(self, ast):
"""
Sort/dedupe children. AND and OR can be treated identically.
Args:
ast: The comparison expression AST
Returns:
The same AST node, but with sorted children
"""
sorted_children = sorted(
ast.operands, key=functools.cmp_to_key(comparison_expression_cmp),
)
deduped_children = [
# Apparently when using a key function, groupby()'s "keys" are the
# key wrappers, not actual sequence values. Obviously we don't
# need key wrappers in our ASTs!
k.obj for k, _ in itertools.groupby(
sorted_children, key=functools.cmp_to_key(
comparison_expression_cmp,
),
)
]
changed = iter_lex_cmp(
ast.operands, deduped_children, comparison_expression_cmp,
) != 0
ast.operands = deduped_children
return ast, changed
def transform_or(self, ast):
return self.__transform(ast)
def transform_and(self, ast):
return self.__transform(ast)
class FlattenTransformer(ComparisonExpressionTransformer):
"""
Flatten all nodes of the AST. E.g.:
A and (B and C) => A and B and C
A or (B or C) => A or B or C
(A) => A
"""
def __transform(self, ast):
"""
Flatten children. AND and OR can be treated mostly identically. The
little difference is that we can absorb AND children if we're an AND
ourselves; and OR for OR.
Args:
ast: The comparison expression AST
Returns:
The same AST node, but with flattened children
"""
changed = False
if len(ast.operands) == 1:
# Replace an AND/OR with one child, with the child itself.
ast = ast.operands[0]
changed = True
else:
flat_operands = []
for operand in ast.operands:
if isinstance(operand, _BooleanExpression) \
and ast.operator == operand.operator:
flat_operands.extend(operand.operands)
changed = True
else:
flat_operands.append(operand)
ast.operands = flat_operands
return ast, changed
def transform_or(self, ast):
return self.__transform(ast)
def transform_and(self, ast):
return self.__transform(ast)
class AbsorptionTransformer(
ComparisonExpressionTransformer,
):
"""
Applies boolean "absorption" rules for AST simplification. E.g.:
A and (A or B) = A
A or (A and B) = A
"""
def __transform(self, ast):
changed = False
secondary_op = "AND" if ast.operator == "OR" else "OR"
to_delete = set()
# Check i (child1) against j to see if we can delete j.
for i, child1 in enumerate(ast.operands):
if i in to_delete:
continue
for j, child2 in enumerate(ast.operands):
if i == j or j in to_delete:
continue
# We're checking if child1 is contained in child2, so
# child2 has to be a compound object, not just a simple
# comparison expression. We also require the right operator
# for child2: "AND" if ast is "OR" and vice versa.
if not isinstance(child2, _BooleanExpression) \
or child2.operator != secondary_op:
continue
# The simple check: is child1 contained in child2?
if iter_in(
child1, child2.operands, comparison_expression_cmp,
):
to_delete.add(j)
# A more complicated check: does child1 occur in child2
# in a "flattened" form?
elif child1.operator == child2.operator:
if all(
iter_in(
child1_operand, child2.operands,
comparison_expression_cmp,
)
for child1_operand in child1.operands
):
to_delete.add(j)
if to_delete:
changed = True
for i in reversed(sorted(to_delete)):
del ast.operands[i]
return ast, changed
def transform_or(self, ast):
return self.__transform(ast)
def transform_and(self, ast):
return self.__transform(ast)
class DNFTransformer(ComparisonExpressionTransformer):
"""
Convert a comparison expression AST to DNF. E.g.:
A and (B or C) => (A and B) or (A and C)
"""
def transform_and(self, ast):
or_children = []
other_children = []
changed = False
# Sort AND children into two piles: the ORs and everything else
for child in ast.operands:
if isinstance(child, _BooleanExpression) and child.operator == "OR":
# Need a list of operand lists, so we can compute the
# product below.
or_children.append(child.operands)
else:
other_children.append(child)
if or_children:
distributed_and_arg_sets = (
itertools.chain(other_children, prod_seq)
for prod_seq in itertools.product(*or_children)
)
# The AST implementation will error if AND boolean comparison
# operands have no common SCO types. We need to handle that here.
# The following will drop AND's with no common SCO types, which is
# harmless (since they're impossible patterns and couldn't match
# anything anyway). It also acts as a nice simplification of the
# pattern. If the original AND node was legal (operands had at
# least one SCO type in common), it is guaranteed that there will
# be at least one legal distributed AND node (distributed_children
# below will not wind up empty).
distributed_children = []
for and_arg_set in distributed_and_arg_sets:
try:
and_node = AndBooleanExpression(
# Make dupes: distribution implies adding repetition,
# and we should ensure each repetition is independent
# of the others.
_dupe_ast(arg) for arg in and_arg_set
)
except ValueError:
pass
else:
distributed_children.append(and_node)
# Need to recursively continue to distribute AND over OR in
# any of our new sub-expressions which need it. This causes
# more downward recursion in the midst of this bottom-up transform.
# It's not good for performance. I wonder if a top-down
# transformation algorithm would make more sense in this phase?
# But then we'd be using two different algorithms for the same
# thing... Maybe this transform should be completely top-down
# (no bottom-up component at all)?
distributed_children = [
self.transform(child)[0] for child in distributed_children
]
result = OrBooleanExpression(distributed_children)
changed = True
else:
# No AND-over-OR; nothing to do
result = ast
return result, changed
class SpecialValueCanonicalization(ComparisonExpressionTransformer):
"""
Try to find particular leaf-node comparison expressions whose rhs (i.e. the
constant) can be canonicalized. This is an idiosyncratic transformation
based on some ideas people had for context-sensitive semantic equivalence
in constant values.
"""
def transform_comparison(self, ast):
if ast.lhs.object_type_name == "windows-registry-key":
windows_reg_key(ast)
elif ast.lhs.object_type_name == "ipv4-addr":
ipv4_addr(ast)
elif ast.lhs.object_type_name == "ipv6-addr":
ipv6_addr(ast)
# Hard-code False here since this particular canonicalization is never
# worth doing more than once. I think it's okay to pretend nothing has
# changed.
return ast, False
|
{
"content_hash": "f845480f925c0886bc4021e076ff1a67",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 80,
"avg_line_length": 33.12871287128713,
"alnum_prop": 0.5882396891811118,
"repo_name": "oasis-open/cti-python-stix2",
"id": "9da91eff1f96ac267b91bef84f29c143a471fc1f",
"size": "13384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stix2/equivalence/pattern/transform/comparison.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1737742"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from setuptools.command.install import install as _install
class install(_install):
def run(self):
_install.run(self)
setup(
cmdclass = { 'install' : install },
name = 'hydra',
version = '0.1',
author = 'tatsy',
author_email = 'tatsy.mail@gmail.com',
url = 'https://github.com/tatsy/hydra.git',
description = 'Python HDR image processing library.',
license = 'MIT',
classifiers = [
'Development Status :: 1 - Planning',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
],
packages = [
'hydra',
'hydra.core',
'hydra.eo',
'hydra.filters',
'hydra.gen',
'hydra.io',
'hydra.tonemap'
]
)
|
{
"content_hash": "53fab3095f62e015ac07ae3cafc79c69",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 58,
"avg_line_length": 26,
"alnum_prop": 0.5637019230769231,
"repo_name": "tatsy/hydra",
"id": "e7480e46acbddcd69f925328f54a3a8f35b1978c",
"size": "832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33466"
}
],
"symlink_target": ""
}
|
import os
import sys
import subprocess
import itertools
from os import path
f = open(sys.argv[1], 'wb')
components = sys.argv[2].split(' ')
components = [i for i in components if i] # ignore extra whitespaces
enable_static = sys.argv[3]
f.write("""// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// WARNING: THIS IS A GENERATED FILE, DO NOT MODIFY
// take a look at src/etc/mklldeps.py if you're interested
""")
def run(args):
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if err:
print("failed to run llconfig: args = `{}`".format(args))
print(err)
sys.exit(1)
return out
for llconfig in sys.argv[4:]:
f.write("\n")
out = run([llconfig, '--host-target'])
arch, os = out.split('-', 1)
arch = 'x86' if arch == 'i686' or arch == 'i386' else arch
if 'darwin' in os:
os = 'macos'
elif 'linux' in os:
os = 'linux'
elif 'freebsd' in os:
os = 'freebsd'
elif 'dragonfly' in os:
os = 'dragonfly'
elif 'android' in os:
os = 'android'
elif 'win' in os or 'mingw' in os:
os = 'windows'
cfg = [
"target_arch = \"" + arch + "\"",
"target_os = \"" + os + "\"",
]
f.write("#[cfg(" + ', '.join(cfg) + ")]\n")
version = run([llconfig, '--version']).strip()
# LLVM libs
if version < '3.5':
args = [llconfig, '--libs']
else:
args = [llconfig, '--libs', '--system-libs']
args.extend(components)
out = run(args)
for lib in out.strip().replace("\n", ' ').split(' '):
lib = lib.strip()[2:] # chop of the leading '-l'
f.write("#[link(name = \"" + lib + "\"")
# LLVM libraries are all static libraries
if 'LLVM' in lib:
f.write(", kind = \"static\"")
f.write(")]\n")
# llvm-config before 3.5 didn't have a system-libs flag
if version < '3.5':
if os == 'win32':
f.write("#[link(name = \"imagehlp\")]")
# LLVM ldflags
out = run([llconfig, '--ldflags'])
for lib in out.strip().split(' '):
if lib[:2] == "-l":
f.write("#[link(name = \"" + lib[2:] + "\")]\n")
# C++ runtime library
out = run([llconfig, '--cxxflags'])
if enable_static == '1':
assert('stdlib=libc++' not in out)
f.write("#[link(name = \"stdc++\", kind = \"static\")]\n")
else:
if 'stdlib=libc++' in out:
f.write("#[link(name = \"c++\")]\n")
else:
f.write("#[link(name = \"stdc++\")]\n")
# Attach everything to an extern block
f.write("extern {}\n")
|
{
"content_hash": "06480616b9a24ba559e1d3122c5b49e6",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 81,
"avg_line_length": 29.95098039215686,
"alnum_prop": 0.5577741407528641,
"repo_name": "erickt/rust",
"id": "f184e07891b9642cd2487cff40ef70eb5c04a744",
"size": "3518",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/etc/mklldeps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "3206"
},
{
"name": "Assembly",
"bytes": "25901"
},
{
"name": "Awk",
"bytes": "159"
},
{
"name": "C",
"bytes": "685127"
},
{
"name": "C++",
"bytes": "54059"
},
{
"name": "CSS",
"bytes": "20180"
},
{
"name": "Emacs Lisp",
"bytes": "43154"
},
{
"name": "JavaScript",
"bytes": "32223"
},
{
"name": "Perl",
"bytes": "1076"
},
{
"name": "Puppet",
"bytes": "11385"
},
{
"name": "Python",
"bytes": "99103"
},
{
"name": "Rust",
"bytes": "16621557"
},
{
"name": "Shell",
"bytes": "281700"
},
{
"name": "Vim script",
"bytes": "34089"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from tastypie import authorization
from tastypie.authentication import MultiAuthentication
from crits.samples.sample import Sample
from crits.samples.handlers import handle_uploaded_file
from crits.core.api import CRITsApiKeyAuthentication, CRITsSessionAuthentication
from crits.core.api import CRITsSerializer, CRITsAPIResource
class SampleResource(CRITsAPIResource):
"""
Class to handle everything related to the Sample API.
Currently supports GET and POST.
"""
class Meta:
object_class = Sample
allowed_methods = ('get', 'post', 'patch')
resource_name = "samples"
authentication = MultiAuthentication(CRITsApiKeyAuthentication(),
CRITsSessionAuthentication())
authorization = authorization.Authorization()
serializer = CRITsSerializer()
def get_object_list(self, request):
"""
Use the CRITsAPIResource to get our objects but provide the class to get
the objects from.
:param request: The incoming request.
:type request: :class:`django.http.HttpRequest`
:returns: Resulting objects in the specified format (JSON by default).
"""
return super(SampleResource, self).get_object_list(request, Sample)
def obj_create(self, bundle, **kwargs):
"""
Handles creating Samples through the API.
:param bundle: Bundle containing the information to create the Sample.
:type bundle: Tastypie Bundle object.
:returns: HttpResponse.
"""
analyst = bundle.request.user.username
type_ = bundle.data.get('upload_type', None)
content = {'return_code': 1,
'type': 'Sample'}
if not type_:
content['message'] = 'Must provide an upload type.'
self.crits_response(content)
if type_ not in ('metadata', 'file'):
content['message'] = 'Not a valid upload type.'
self.crits_response(content)
if type_ == 'metadata':
filename = bundle.data.get('filename', None)
md5 = bundle.data.get('md5', None)
password = None
filedata = None
elif type_ == 'file':
md5 = None
password = bundle.data.get('password', None)
file_ = bundle.data.get('filedata', None)
if not file_:
content['message'] = "Upload type of 'file' but no file uploaded."
self.crits_response(content)
filedata = file_
filename = None
campaign = bundle.data.get('campaign', None)
confidence = bundle.data.get('confidence', None)
source = bundle.data.get('source', None)
method = bundle.data.get('method', "")
reference = bundle.data.get('reference', None)
file_format = bundle.data.get('file_format', None)
related_md5 = bundle.data.get('related_md5', None)
related_id = bundle.data.get('related_id', None)
related_type = bundle.data.get('related_type', None)
backdoor_name = bundle.data.get('backdoor_name', None)
backdoor_version = bundle.data.get('backdoor_version', None)
bucket_list = bundle.data.get('bucket_list', None)
ticket = bundle.data.get('ticket', None)
sha1 = bundle.data.get('sha1', None)
sha256 = bundle.data.get('sha256', None)
size = bundle.data.get('size', None)
mimetype = bundle.data.get('mimetype', None)
if ((related_id and not related_type) or
(related_type and not related_id)):
content['message'] = "Must specify related_type and related_id"
self.crits_response(content)
sample_md5 = handle_uploaded_file(filedata,
source,
method,
reference,
file_format,
password,
user=analyst,
campaign=campaign,
confidence=confidence,
related_md5=related_md5,
related_id=related_id,
related_type=related_type,
filename=filename,
md5=md5,
sha1=sha1,
sha256=sha256,
size=size,
mimetype=mimetype,
bucket_list=bucket_list,
ticket=ticket,
is_return_only_md5=False,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
result = {'success': False}
if len(sample_md5) > 0:
result = sample_md5[0]
if result.get('message'):
content['message'] = result.get('message')
if result.get('object'):
content['id'] = str(result.get('object').id)
if content.get('id'):
url = reverse('api_dispatch_detail',
kwargs={'resource_name': 'samples',
'api_name': 'v1',
'pk': content.get('id')})
content['url'] = url
else:
content['message'] = "Could not create Sample for unknown reason."
if result['success']:
content['return_code'] = 0
self.crits_response(content)
|
{
"content_hash": "79e25715a3c9bd043786ee143678f38c",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 82,
"avg_line_length": 42.198581560283685,
"alnum_prop": 0.5097478991596639,
"repo_name": "ckane/crits",
"id": "871cdaf3fdb48607d58a1c8e67ed3433f6e1cc56",
"size": "5950",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "crits/samples/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "8694"
},
{
"name": "CSS",
"bytes": "390510"
},
{
"name": "HTML",
"bytes": "478069"
},
{
"name": "JavaScript",
"bytes": "3555668"
},
{
"name": "Python",
"bytes": "2002476"
},
{
"name": "Shell",
"bytes": "20173"
}
],
"symlink_target": ""
}
|
import ast
import dendropy
import unittest
import numpy as np
import taxonfixer
class TestTreeReWrite(unittest.TestCase):
def test_tree_rewrite1(self):
treelist = dendropy.TreeList.get_from_path('/Users/ruthdavidson/code/phylogenetics-tools/tests/feefifofum','newick')
taxondict = {'fa': 1, 'fee': 2, 'fi' : 3, 'fo': 4, 'foo': 5, 'fum': 6}
newlist = ['((2,3),(4,6));', '((2,6),(3,1));', '(((5,1),3),4);']
n,d = taxonfixer.listparse('/Users/ruthdavidson/code/phylogenetics-tools/tests/feefifofum')
self.assertEqual(n,newlist)
self.assertEqual(d,taxondict)
def test_tree_rewrite2(self):
treelist = dendropy.TreeList.get_from_path('/Users/ruthdavidson/code/phylogenetics-tools/tests/abcdtrees','newick')
taxondict = {'a':1, 'b':2, 'c':3, 'd':4}
newlist = ['(((1,2),3),4);','((1,3),(2,4));','((4,1),(2,3));']
n,d = taxonfixer.listparse('/Users/ruthdavidson/code/phylogenetics-tools/tests/abcdtrees')
self.assertEqual(n,newlist)
self.assertEqual(d,taxondict)
class TestFileSwap(unittest.TestCase):
def test_file_rewrite(self):
taxonfixer.fileswap('/Users/ruthdavidson/code/phylogenetics-tools/tests/feefifofum','/Users/ruthdavidson/code/phylogenetics-tools/tests/feefifofumswap','newick')
#d = {'fa': 1, 'fee': 2, 'fi' : 3, 'fo': 4, 'foo': 5, 'fum': 6}
f = open('/Users/ruthdavidson/code/phylogenetics-tools/tests/feefifofumswap','r')
s1 = f.read()
g = open('/Users/ruthdavidson/code/phylogenetics-tools/tests/testffffswap','r')
s2 = g.read()
self.assertEqual(s1,s2)
h = open('/Users/ruthdavidson/code/phylogenetics-tools/tests/dictffffswap','r')
Sh = h.read()
dh = ast.literal_eval(Sh)
k = open('/Users/ruthdavidson/code/phylogenetics-tools/tests/feefifofum.taxon_key','r')
Sk = k.read()
dk = ast.literal_eval(Sk)
self.assertEqual(dh, dk)
h.close()
k.close()
f.close()
g.close()
def test_file_rewrite2(self):
taxonfixer.fileswap('/Users/ruthdavidson/code/phylogenetics-tools/tests/abcdtrees','/Users/ruthdavidson/code/phylogenetics-tools/tests/abcdtreesswap','newick')
l =[]
f = open('/Users/ruthdavidson/code/phylogenetics-tools/tests/abcdtreesswap','r')
s1 = f.read()
g = open('/Users/ruthdavidson/code/phylogenetics-tools/tests/testabcdswap','r')
s2 = g.read()
self.assertEqual(s1,s2)
h = open('/Users/ruthdavidson/code/phylogenetics-tools/tests/dictabcdswap','r')
Sh = h.read()
dh = ast.literal_eval(Sh)
k = open('/Users/ruthdavidson/code/phylogenetics-tools/tests/abcdtrees.taxon_key','r')
Sk = k.read()
dk = ast.literal_eval(Sk)
self.assertEqual(dh, dk)
h.close()
k.close()
f.close()
g.close()
|
{
"content_hash": "441c4d6c4ef2a0ddec72b896c0d3a868",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 169,
"avg_line_length": 44.22727272727273,
"alnum_prop": 0.6173347036656389,
"repo_name": "redavids/phylogenetics-tools",
"id": "21bf8fd371ef12e91b523f61e1342a895a5695ea",
"size": "2919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_taxonfixer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "374714"
},
{
"name": "Makefile",
"bytes": "3143"
},
{
"name": "Perl",
"bytes": "46877"
},
{
"name": "Python",
"bytes": "12248"
},
{
"name": "Shell",
"bytes": "3786"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
f = open(os.path.join(os.path.dirname(__file__), 'README.txt'))
long_description = f.read().strip()
f.close()
setup(
name='nose-advancedlogging',
version='0.1',
author='Anand Palanisamy',
author_email='apalanisamy@paypal.com',
description='Advanced logging for nosetests.',
long_description=long_description,
license='Apache License 2.0',
py_modules=['advancedlogging'],
entry_points={
'nose.plugins.0.10': [
'advancedlogging = advancedlogging:AdvancedLogging',
]
},
install_requires=['beautifulsoup4>=4.2.1'],
platforms='any',
zip_safe=False
)
|
{
"content_hash": "be13eab761218e4f1381152358b39c92",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 64,
"avg_line_length": 26.64,
"alnum_prop": 0.6516516516516516,
"repo_name": "paypal/aurora",
"id": "fcf3de815c1ef99a895d4bbb75ab9180d0db8197",
"size": "666",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/advancedloggingplugin/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1208418"
},
{
"name": "Groovy",
"bytes": "524327"
},
{
"name": "Java",
"bytes": "1221"
},
{
"name": "JavaScript",
"bytes": "3179536"
},
{
"name": "Python",
"bytes": "356151"
},
{
"name": "Shell",
"bytes": "14218"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cog', '0004_auto_20160106_0812'),
]
operations = [
migrations.AddField(
model_name='project',
name='shared',
field=models.BooleanField(default=True),
),
]
|
{
"content_hash": "9ed9769fe3ece668c34a94d549a39c56",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 52,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.5893333333333334,
"repo_name": "EarthSystemCoG/COG",
"id": "0bd618ff497319a870e7cacd1c90a2f252eeb557",
"size": "447",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cog/migrations/0005_project_shared.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "270"
},
{
"name": "CSS",
"bytes": "893678"
},
{
"name": "Classic ASP",
"bytes": "48011"
},
{
"name": "HTML",
"bytes": "96546078"
},
{
"name": "Java",
"bytes": "483882"
},
{
"name": "JavaScript",
"bytes": "13287152"
},
{
"name": "MATLAB",
"bytes": "30087"
},
{
"name": "PHP",
"bytes": "80287"
},
{
"name": "Python",
"bytes": "852780"
},
{
"name": "Rich Text Format",
"bytes": "6112"
},
{
"name": "Shell",
"bytes": "10602"
}
],
"symlink_target": ""
}
|
from datadog.api.base import ActionAPIResource
class Host(ActionAPIResource):
"""
A wrapper around Host HTTP API.
"""
_class_url = '/host'
@classmethod
def mute(cls, host_name, **params):
"""
Mute a host.
:param host_name: hostname
:type host_name: string
:param end: timestamp to end muting
:type end: POSIX timestamp
:param override: if true and the host is already muted, will override\
existing end on the host
:type override: bool
:param message: message to associate with the muting of this host
:type message: string
:returns: JSON response from HTTP API request
"""
return super(Host, cls)._trigger_class_action('POST', 'mute', host_name, **params)
@classmethod
def unmute(cls, host_name):
"""
Unmute a host.
:param host_name: hostname
:type host_name: string
:returns: JSON response from HTTP API request
"""
return super(Host, cls)._trigger_class_action('POST', 'unmute', host_name)
|
{
"content_hash": "028ae9c98325f79f769b32f0642a351f",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 90,
"avg_line_length": 25.15909090909091,
"alnum_prop": 0.5998193315266486,
"repo_name": "jofusa/datadogpy",
"id": "f14432c1fcaccf0199fa21e0e2edcdbad37aa9b5",
"size": "1107",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "datadog/api/hosts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "259510"
},
{
"name": "Ruby",
"bytes": "333"
}
],
"symlink_target": ""
}
|
from menpo.shape.exceptions import FieldError
class TriFieldError(FieldError):
pass
|
{
"content_hash": "cb004782e57f1147f6f90aadcb87aa4e",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 18,
"alnum_prop": 0.8,
"repo_name": "karla3jo/menpo-old",
"id": "2e73ff7f0f5eca54decd0c44a6d018299825f6a1",
"size": "90",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "menpo/shape/mesh/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.contrib import admin
from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from sso.models import Member
class MemberCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = Member
fields = ('email', 'full_name', 'short_name')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(MemberCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class MemberChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = Member
fields = ('email', 'password', 'full_name', 'short_name',
'is_active', 'is_admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class MemberAdmin(UserAdmin):
# The forms to add and change user instances
form = MemberChangeForm
add_form = MemberCreationForm
# The fields to be used in displaying the Member model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'full_name', 'short_name', 'is_admin')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('full_name', 'short_name',)}),
('Permissions', {'fields': ('is_admin',)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. MemberAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'full_name', 'short_name', 'password1', 'password2')}
),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
# Now register the new MemberAdmin...
admin.site.register(Member, MemberAdmin)
# ... and, since we're not using Django's built-in permissions,
# unregister the Group model from admin.
admin.site.unregister(Group)
|
{
"content_hash": "4269779a343d8edd8b07643e93dd5ec8",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 90,
"avg_line_length": 36.40909090909091,
"alnum_prop": 0.6644818976279651,
"repo_name": "favoritemedium/sso-prototype",
"id": "e07b39da97b73714a08f85b096538eb0e0a5138c",
"size": "3204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sso/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3157"
},
{
"name": "Python",
"bytes": "31541"
}
],
"symlink_target": ""
}
|
"""This module defines tableau file reader base class"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from future.utils import raise_with_traceback
import lxml.etree as etree
from pathlib2 import Path
from tableaupy.contenthandlers import ContentHandlerException
from tableaupy.readers import exceptions
class Reader(object):
"""Base class for all readers"""
_parser = etree.XMLParser(remove_blank_text=True, remove_comments=True)
def __init__(self, extension, content_handler):
super(Reader, self).__init__()
self.__extension = extension
self._xml_content_handler = content_handler()
@property
def extension(self):
"""extension getter"""
return self.__extension
def read(self, file_path):
"""Reads and parses the content of the file
Parameters
----------
file_path : str
path to file to be read and parsed
Raises
------
FileNotFound
when file does not exists
NodeNotFile
when `file_path` is not a file
FileNotReadable
when file is not readable
FileExtensionMismatch
when extension in file name does not match with desired extension
ReaderException
when not able to parse file
"""
try:
file_path = Path(file_path)
if not file_path.exists():
raise exceptions.FileNotFound(filename=str(file_path))
absolute_path = str(file_path.resolve())
if not file_path.is_file():
raise exceptions.NodeNotFile(filename=str(file_path))
if not os.access(absolute_path, os.R_OK):
raise exceptions.FileNotReadable(filename=file_path)
if file_path.suffix != self.__extension:
raise exceptions.FileExtensionMismatch(
filename=str(file_path),
extension=self.__extension
)
tree = etree.parse(absolute_path, parser=self._parser)
root = tree.getroot()
self._xml_content_handler.parse(root)
except (etree.XMLSchemaParseError, ContentHandlerException) as err:
raise_with_traceback(exceptions.ReaderException(err))
|
{
"content_hash": "46d9f9d2be8dd9c4eb4dae020f09f4e8",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 29.25925925925926,
"alnum_prop": 0.6118143459915611,
"repo_name": "practo/Tableau-Py",
"id": "c1a533ef9fdca6fcb1f9933abc7e4d134ae62a11",
"size": "2394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tableaupy/readers/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70817"
}
],
"symlink_target": ""
}
|
"""Package contenant la commande 'porter'."""
from primaires.interpreteur.commande.commande import Commande
class CmdPorter(Commande):
"""Commande 'porter'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "porter", "wear")
self.nom_categorie = "objets"
self.schema = "<nom_objet>"
self.aide_courte = "équipe un objet"
self.aide_longue = \
"Cette commande permet d'équiper des objets (vêtements, " \
"armures, armes...). Vous devez pour cela avoir au moins " \
"une main libre, ainsi qu'un emplacement corporel adéquat " \
"(un pied nu pour une chaussette)."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
nom_objet = self.noeud.get_masque("nom_objet")
nom_objet.proprietes["conteneurs"] = \
"(personnage.equipement.inventaire_simple, )"
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
objets = list(dic_masques["nom_objet"].objets_conteneurs)[0]
objet, conteneur = objets
personnage.agir("porter")
# Si l'objet est tenu en main, ne pas exiger une main libre
est_tenu = False
for membre in personnage.equipement.membres:
if membre.peut_tenir() and membre.tenu is objet:
est_tenu = True
break
if not est_tenu and personnage.equipement.cb_peut_tenir() < 1:
personnage << "|err|Il vous faut au moins une main libre pour " \
"vous équiper.|ff|"
return
for membre in personnage.equipement.membres:
if membre.peut_equiper(objet):
objet.contenu.retirer(objet)
membre.equiper(objet)
personnage << "Vous équipez {}.".format(objet.nom_singulier)
personnage.salle.envoyer(
"{{}} équipe {}.".format(objet.nom_singulier), personnage)
objet.script["porte"].executer(objet=objet,
personnage=personnage)
return
personnage << "|err|Vous ne pouvez équiper {}.|ff|".format(
objet.nom_singulier)
|
{
"content_hash": "86c1f1f9c920c873f00857e85dbe8354",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 78,
"avg_line_length": 40.45614035087719,
"alnum_prop": 0.5754553339115351,
"repo_name": "stormi/tsunami",
"id": "44bcef171d3a7661f6e4f4db14a25b866a4b255d",
"size": "3882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/objet/commandes/porter/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_commoner_tatooine_bith_female_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","bith_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "68ae345690e6794a23aa4113fd7ad25e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 24.692307692307693,
"alnum_prop": 0.7040498442367601,
"repo_name": "obi-two/Rebelion",
"id": "671734b9c76e41588d21e90e4ca7bd22333d045e",
"size": "466",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_commoner_tatooine_bith_female_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""
Common functionalities shared between different iLO modules.
"""
import tempfile
from oslo.utils import importutils
from oslo_config import cfg
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common import images
from ironic.common import swift
from ironic.common import utils
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import log as logging
ilo_client = importutils.try_import('proliantutils.ilo.ribcl')
STANDARD_LICENSE = 1
ESSENTIALS_LICENSE = 2
ADVANCED_LICENSE = 3
opts = [
cfg.IntOpt('client_timeout',
default=60,
help='Timeout (in seconds) for iLO operations'),
cfg.IntOpt('client_port',
default=443,
help='Port to be used for iLO operations'),
cfg.StrOpt('swift_ilo_container',
default='ironic_ilo_container',
help='The Swift iLO container to store data.'),
cfg.IntOpt('swift_object_expiry_timeout',
default=900,
help='Amount of time in seconds for Swift objects to '
'auto-expire.'),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='ilo')
LOG = logging.getLogger(__name__)
REQUIRED_PROPERTIES = {
'ilo_address': _("IP address or hostname of the iLO. Required."),
'ilo_username': _("username for the iLO with administrator privileges. "
"Required."),
'ilo_password': _("password for ilo_username. Required.")
}
OPTIONAL_PROPERTIES = {
'client_port': _("port to be used for iLO operations. Optional."),
'client_timeout': _("timeout (in seconds) for iLO operations. Optional.")
}
CONSOLE_PROPERTIES = {
'console_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
DEFAULT_BOOT_MODE = 'LEGACY'
BOOT_MODE_GENERIC_TO_ILO = {'bios': 'legacy', 'uefi': 'uefi'}
BOOT_MODE_ILO_TO_GENERIC = dict((v, k)
for (k, v) in BOOT_MODE_GENERIC_TO_ILO.items())
def parse_driver_info(node):
"""Gets the driver specific Node info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver.
:param node: an ironic Node object.
:returns: a dict containing information from driver_info (or where
applicable, config values).
:raises: InvalidParameterValue if any parameters are incorrect
:raises: MissingParameterValue if some mandatory information
is missing on the node
"""
info = node.driver_info
d_info = {}
missing_info = []
for param in REQUIRED_PROPERTIES:
try:
d_info[param] = info[param]
except KeyError:
missing_info.append(param)
if missing_info:
raise exception.MissingParameterValue(_(
"The following required iLO parameters are missing from the "
"node's driver_info: %s") % missing_info)
not_integers = []
for param in OPTIONAL_PROPERTIES:
value = info.get(param, CONF.ilo.get(param))
try:
d_info[param] = int(value)
except ValueError:
not_integers.append(param)
for param in CONSOLE_PROPERTIES:
value = info.get(param)
if value:
try:
d_info[param] = int(value)
except ValueError:
not_integers.append(param)
if not_integers:
raise exception.InvalidParameterValue(_(
"The following iLO parameters from the node's driver_info "
"should be integers: %s") % not_integers)
return d_info
def get_ilo_object(node):
"""Gets an IloClient object from proliantutils library.
Given an ironic node object, this method gives back a IloClient object
to do operations on the iLO.
:param node: an ironic node object.
:returns: an IloClient object.
:raises: InvalidParameterValue on invalid inputs.
:raises: MissingParameterValue if some mandatory information
is missing on the node
"""
driver_info = parse_driver_info(node)
ilo_object = ilo_client.IloClient(driver_info['ilo_address'],
driver_info['ilo_username'],
driver_info['ilo_password'],
driver_info['client_timeout'],
driver_info['client_port'])
return ilo_object
def get_ilo_license(node):
"""Gives the current installed license on the node.
Given an ironic node object, this method queries the iLO
for currently installed license and returns it back.
:param node: an ironic node object.
:returns: a constant defined in this module which
refers to the current license installed on the node.
:raises: InvalidParameterValue on invalid inputs.
:raises: MissingParameterValue if some mandatory information
is missing on the node
:raises: IloOperationError if it failed to retrieve the
installed licenses from the iLO.
"""
# Get the ilo client object, and then the license from the iLO
ilo_object = get_ilo_object(node)
try:
license_info = ilo_object.get_all_licenses()
except ilo_client.IloError as ilo_exception:
raise exception.IloOperationError(operation=_('iLO license check'),
error=str(ilo_exception))
# Check the license to see if the given license exists
current_license_type = license_info['LICENSE_TYPE']
if current_license_type.endswith("Advanced"):
return ADVANCED_LICENSE
elif current_license_type.endswith("Essentials"):
return ESSENTIALS_LICENSE
else:
return STANDARD_LICENSE
def update_ipmi_properties(task):
"""Update ipmi properties to node driver_info
:param task: a task from TaskManager.
"""
node = task.node
info = node.driver_info
# updating ipmi credentials
info['ipmi_address'] = info.get('ilo_address')
info['ipmi_username'] = info.get('ilo_username')
info['ipmi_password'] = info.get('ilo_password')
if 'console_port' in info:
info['ipmi_terminal_port'] = info['console_port']
# saving ipmi credentials to task object
task.node.driver_info = info
def _get_floppy_image_name(node):
"""Returns the floppy image name for a given node.
:param node: the node for which image name is to be provided.
"""
return "image-%s" % node.uuid
def _prepare_floppy_image(task, params):
"""Prepares the floppy image for passing the parameters.
This method prepares a temporary vfat filesystem image. Then it adds
two files into the image - one containing the authentication token and
the other containing the parameters to be passed to the ramdisk. Then it
uploads the file to Swift in 'swift_ilo_container', setting it to
auto-expire after 'swift_object_expiry_timeout' seconds. Then it returns
the temp url for the Swift object.
:param task: a TaskManager instance containing the node to act on.
:param params: a dictionary containing 'parameter name'->'value' mapping
to be passed to the deploy ramdisk via the floppy image.
:raises: ImageCreationFailed, if it failed while creating the floppy image.
:raises: SwiftOperationError, if any operation with Swift fails.
:returns: the Swift temp url for the floppy image.
"""
with tempfile.NamedTemporaryFile() as vfat_image_tmpfile_obj:
files_info = {}
token_tmpfile_obj = None
vfat_image_tmpfile = vfat_image_tmpfile_obj.name
# If auth_strategy is noauth, then no need to write token into
# the image file.
if task.context.auth_token:
token_tmpfile_obj = tempfile.NamedTemporaryFile()
token_tmpfile = token_tmpfile_obj.name
utils.write_to_file(token_tmpfile, task.context.auth_token)
files_info[token_tmpfile] = 'token'
try:
images.create_vfat_image(vfat_image_tmpfile, files_info=files_info,
parameters=params)
finally:
if token_tmpfile_obj:
token_tmpfile_obj.close()
container = CONF.ilo.swift_ilo_container
object_name = _get_floppy_image_name(task.node)
timeout = CONF.ilo.swift_object_expiry_timeout
object_headers = {'X-Delete-After': timeout}
swift_api = swift.SwiftAPI()
swift_api.create_object(container, object_name,
vfat_image_tmpfile,
object_headers=object_headers)
temp_url = swift_api.get_temp_url(container, object_name, timeout)
LOG.debug("Uploaded floppy image %(object_name)s to %(container)s "
"for deployment.",
{'object_name': object_name, 'container': container})
return temp_url
def attach_vmedia(node, device, url):
"""Attaches the given url as virtual media on the node.
:param node: an ironic node object.
:param device: the virtual media device to attach
:param url: the http/https url to attach as the virtual media device
:raises: IloOperationError if insert virtual media failed.
"""
ilo_object = get_ilo_object(node)
try:
ilo_object.insert_virtual_media(url, device=device)
ilo_object.set_vm_status(device=device, boot_option='CONNECT',
write_protect='YES')
except ilo_client.IloError as ilo_exception:
operation = _("Inserting virtual media %s") % device
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
LOG.info(_LI("Attached virtual media %s successfully."), device)
def set_boot_mode(node, boot_mode):
"""Sets the node to boot using boot_mode for the next boot.
:param node: an ironic node object.
:param boot_mode: Next boot mode.
:raises: IloOperationError if setting boot mode failed.
"""
ilo_object = get_ilo_object(node)
try:
p_boot_mode = ilo_object.get_pending_boot_mode()
except ilo_client.IloCommandNotSupportedError:
p_boot_mode = DEFAULT_BOOT_MODE
if BOOT_MODE_ILO_TO_GENERIC[p_boot_mode.lower()] == boot_mode:
LOG.info(_LI("Node %(uuid)s pending boot mode is %(boot_mode)s."),
{'uuid': node.uuid, 'boot_mode': boot_mode})
return
try:
ilo_object.set_pending_boot_mode(
BOOT_MODE_GENERIC_TO_ILO[boot_mode].upper())
except ilo_client.IloError as ilo_exception:
operation = _("Setting %s as boot mode") % boot_mode
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
LOG.info(_LI("Node %(uuid)s boot mode is set to %(boot_mode)s."),
{'uuid': node.uuid, 'boot_mode': boot_mode})
def update_boot_mode_capability(task):
"""Update 'boot_mode' capability value of node's 'capabilities' property.
:param task: Task object.
"""
ilo_object = get_ilo_object(task.node)
try:
p_boot_mode = ilo_object.get_pending_boot_mode()
if p_boot_mode == 'UNKNOWN':
# NOTE(faizan) ILO will return this in remote cases and mostly on
# the nodes which supports UEFI. Such nodes mostly comes with UEFI
# as default boot mode. So we will try setting bootmode to UEFI
# and if it fails then we fall back to BIOS boot mode.
ilo_object.set_pending_boot_mode('UEFI')
p_boot_mode = 'UEFI'
except ilo_client.IloCommandNotSupportedError:
p_boot_mode = DEFAULT_BOOT_MODE
driver_utils.rm_node_capability(task, 'boot_mode')
driver_utils.add_node_capability(task, 'boot_mode',
BOOT_MODE_ILO_TO_GENERIC[p_boot_mode.lower()])
def setup_vmedia_for_boot(task, boot_iso, parameters=None):
"""Sets up the node to boot from the given ISO image.
This method attaches the given boot_iso on the node and passes
the required parameters to it via virtual floppy image.
:param task: a TaskManager instance containing the node to act on.
:param boot_iso: a bootable ISO image to attach to. The boot iso
should be present in either Glance or in Swift. If present in
Glance, it should be of format 'glance:<glance-image-uuid>'.
If present in Swift, it should be of format 'swift:<object-name>'.
It is assumed that object is present in CONF.ilo.swift_ilo_container.
:param parameters: the parameters to pass in the virtual floppy image
in a dictionary. This is optional.
:raises: ImageCreationFailed, if it failed while creating the floppy image.
:raises: SwiftOperationError, if any operation with Swift fails.
:raises: IloOperationError, if attaching virtual media failed.
"""
LOG.info(_LI("Setting up node %s to boot from virtual media"),
task.node.uuid)
if parameters:
floppy_image_temp_url = _prepare_floppy_image(task, parameters)
attach_vmedia(task.node, 'FLOPPY', floppy_image_temp_url)
boot_iso_temp_url = None
scheme, boot_iso_ref = boot_iso.split(':')
if scheme == 'swift':
swift_api = swift.SwiftAPI()
container = CONF.ilo.swift_ilo_container
object_name = boot_iso_ref
timeout = CONF.ilo.swift_object_expiry_timeout
boot_iso_temp_url = swift_api.get_temp_url(container, object_name,
timeout)
elif scheme == 'glance':
glance_uuid = boot_iso_ref
boot_iso_temp_url = images.get_temp_url_for_glance_image(task.context,
glance_uuid)
attach_vmedia(task.node, 'CDROM', boot_iso_temp_url)
def cleanup_vmedia_boot(task):
"""Cleans a node after a virtual media boot.
This method cleans up a node after a virtual media boot. It deletes the
floppy image if it exists in CONF.ilo.swift_ilo_container. It also
ejects both virtual media cdrom and virtual media floppy.
:param task: a TaskManager instance containing the node to act on.
"""
LOG.debug("Cleaning up node %s after virtual media boot", task.node.uuid)
container = CONF.ilo.swift_ilo_container
object_name = _get_floppy_image_name(task.node)
try:
swift_api = swift.SwiftAPI()
swift_api.delete_object(container, object_name)
except exception.SwiftOperationError as e:
LOG.exception(_LE("Error while deleting %(object_name)s from "
"%(container)s. Error: %(error)s"),
{'object_name': object_name, 'container': container,
'error': e})
ilo_object = get_ilo_object(task.node)
for device in ('FLOPPY', 'CDROM'):
try:
ilo_object.eject_virtual_media(device)
except ilo_client.IloError as ilo_exception:
LOG.exception(_LE("Error while ejecting virtual media %(device)s "
"from node %(uuid)s. Error: %(error)s"),
{'device': device, 'uuid': task.node.uuid,
'error': ilo_exception})
|
{
"content_hash": "f976d038ae8b76c8c5918c69b8bcd240",
"timestamp": "",
"source": "github",
"line_count": 411,
"max_line_length": 79,
"avg_line_length": 37.34063260340633,
"alnum_prop": 0.6412979735453183,
"repo_name": "ramineni/myironic",
"id": "807e89b475eefc10404ba6c3c6b4e2675f89fa14",
"size": "15954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/drivers/modules/ilo/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1667"
},
{
"name": "Python",
"bytes": "2501292"
},
{
"name": "XML",
"bytes": "804"
}
],
"symlink_target": ""
}
|
"""setup.py
Upload to PyPI, Thx to: http://peterdowns.com/posts/first-time-with-pypi.html
python3 setup.py sdist
twine upload --repository pypitest dist/pyleri-x.x.x.tar.gz
twine upload --repository pypi dist/pyleri-x.x.x.tar.gz
"""
from setuptools import setup
from pyleri import __version__ as version
try:
with open('README.md', 'r') as f:
long_description = f.read()
except IOError:
long_description = ''
setup(
name='pyleri',
packages=['pyleri'],
version=version,
description='Python Left-Right Parser',
long_description=long_description,
long_description_content_type='text/markdown',
author='Jeroen van der Heijden',
author_email='jeroen@cesbit.com',
url='https://github.com/cesbit/pyleri',
download_url=(
'https://github.com/cesbit/'
'pyleri/tarball/{}'.format(version)),
keywords=['parser', 'grammar', 'autocompletion'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Linguistic'
],
)
|
{
"content_hash": "0913ccf39628dbfcbe537ed20b710034",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 77,
"avg_line_length": 35.46153846153846,
"alnum_prop": 0.6220173535791758,
"repo_name": "transceptor-technology/pyleri",
"id": "c1a005a10996a52cc5943000fedd8284b258fff8",
"size": "1844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83426"
}
],
"symlink_target": ""
}
|
"""
Diametrically point loaded 2-D disk, using commands for interactive use. See
:ref:`sec-primer`.
The script combines the functionality of all the ``its2D_?.py`` examples and
allows setting various simulation parameters, namely:
- material parameters
- displacement field approximation order
- uniform mesh refinement level
The example shows also how to probe the results as in
:ref:`linear_elasticity-its2D_4`, and how to display the results using Mayavi.
Using :mod:`sfepy.discrete.probes` allows correct probing of fields with the
approximation order greater than one.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/linear_elasticity/its2D_interactive.py -h
Notes
-----
The ``--probe`` and ``--show`` options work simultaneously only if Mayavi and
Matplotlib use the same backend type (for example wx).
"""
import sys
sys.path.append('.')
from optparse import OptionParser
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Integrals,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.discrete.fem.geometry_element import geometry_data
from sfepy.discrete.probes import LineProbe
from sfepy.discrete.projections import project_by_component
from its2D_2 import stress_strain
from its2D_3 import nodal_stress
def gen_lines(problem):
"""
Define two line probes.
Additional probes can be added by appending to `ps0` (start points) and
`ps1` (end points) lists.
"""
ps0 = [[0.0, 0.0], [0.0, 0.0]]
ps1 = [[75.0, 0.0], [0.0, 75.0]]
# Use enough points for higher order approximations.
n_point = 1000
labels = ['%s -> %s' % (p0, p1) for p0, p1 in zip(ps0, ps1)]
probes = []
for ip in xrange(len(ps0)):
p0, p1 = ps0[ip], ps1[ip]
probes.append(LineProbe(p0, p1, n_point))
return probes, labels
def probe_results(u, strain, stress, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(u)
results['u'] = (pars, vals)
pars, vals = probe(strain)
results['cauchy_strain'] = (pars, vals)
pars, vals = probe(stress)
results['cauchy_stress'] = (pars, vals)
fig = plt.figure()
plt.clf()
fig.subplots_adjust(hspace=0.4)
plt.subplot(311)
pars, vals = results['u']
for ic in range(vals.shape[1]):
plt.plot(pars, vals[:,ic], label=r'$u_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('displacements')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', fontsize=10)
sym_indices = ['11', '22', '12']
plt.subplot(312)
pars, vals = results['cauchy_strain']
for ic in range(vals.shape[1]):
plt.plot(pars, vals[:,ic], label=r'$e_{%s}$' % sym_indices[ic],
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('Cauchy strain')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', fontsize=10)
plt.subplot(313)
pars, vals = results['cauchy_stress']
for ic in range(vals.shape[1]):
plt.plot(pars, vals[:,ic], label=r'$\sigma_{%s}$' % sym_indices[ic],
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('Cauchy stress')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', fontsize=10)
return fig, results
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'young' : "the Young's modulus [default: %default]",
'poisson' : "the Poisson's ratio [default: %default]",
'load' : "the vertical load value (negative means compression)"
" [default: %default]",
'order' : 'displacement field approximation order [default: %default]',
'refine' : 'uniform mesh refinement level [default: %default]',
'probe' : 'probe the results',
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('--young', metavar='float', type=float,
action='store', dest='young',
default=2000.0, help=helps['young'])
parser.add_option('--poisson', metavar='float', type=float,
action='store', dest='poisson',
default=0.4, help=helps['poisson'])
parser.add_option('--load', metavar='float', type=float,
action='store', dest='load',
default=-1000.0, help=helps['load'])
parser.add_option('--order', metavar='int', type=int,
action='store', dest='order',
default=1, help=helps['order'])
parser.add_option('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_option('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
parser.add_option('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
options, args = parser.parse_args()
assert_((0.0 < options.poisson < 0.5),
"Poisson's ratio must be in ]0, 0.5[!")
assert_((0 < options.order),
'displacement approximation order must be at least 1!')
output('using values:')
output(" Young's modulus:", options.young)
output(" Poisson's ratio:", options.poisson)
output(' vertical load:', options.load)
output('uniform mesh refinement level:', options.refine)
# Build the problem definition.
mesh = Mesh.from_file(data_dir + '/meshes/2d/its2D.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in xrange(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.001', 'facet')
bottom = domain.create_region('Bottom',
'vertices in y < 0.001', 'facet')
top = domain.create_region('Top', 'vertex 2', 'vertex')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=options.order)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
D = stiffness_from_youngpoisson(2, options.young, options.poisson)
asphalt = Material('Asphalt', D=D)
load = Material('Load', values={'.val' : [0.0, options.load]})
integral = Integral('i', order=2*options.order)
integral0 = Integral('i', order=0)
t1 = Term.new('dw_lin_elastic(Asphalt.D, v, u)',
integral, omega, Asphalt=asphalt, v=v, u=u)
t2 = Term.new('dw_point_load(Load.val, v)',
integral0, top, Load=load, v=v)
eq = Equation('balance', t1 - t2)
eqs = Equations([eq])
xsym = EssentialBC('XSym', bottom, {'u.1' : 0.0})
ysym = EssentialBC('YSym', left, {'u.0' : 0.0})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = Problem('elasticity', equations=eqs, nls=nls, ls=ls)
pb.time_update(ebcs=Conditions([xsym, ysym]))
# Solve the problem.
state = pb.solve()
output(nls_status)
# Postprocess the solution.
out = state.create_output_dict()
out = stress_strain(out, pb, state, extend=True)
pb.save_state('its2D_interactive.vtk', out=out)
gdata = geometry_data['2_3']
nc = len(gdata.coors)
integral_vn = Integral('ivn', coors=gdata.coors,
weights=[gdata.volume / nc] * nc)
nodal_stress(out, pb, state, integrals=Integrals([integral_vn]))
if options.probe:
# Probe the solution.
probes, labels = gen_lines(pb)
sfield = Field.from_args('sym_tensor', nm.float64, 3, omega,
approx_order=options.order - 1)
stress = FieldVariable('stress', 'parameter', sfield,
primary_var_name='(set-to-None)')
strain = FieldVariable('strain', 'parameter', sfield,
primary_var_name='(set-to-None)')
cfield = Field.from_args('component', nm.float64, 1, omega,
approx_order=options.order - 1)
component = FieldVariable('component', 'parameter', cfield,
primary_var_name='(set-to-None)')
ev = pb.evaluate
order = 2 * (options.order - 1)
strain_qp = ev('ev_cauchy_strain.%d.Omega(u)' % order, mode='qp')
stress_qp = ev('ev_cauchy_stress.%d.Omega(Asphalt.D, u)' % order,
mode='qp', copy_materials=False)
project_by_component(strain, strain_qp, component, order)
project_by_component(stress, stress_qp, component, order)
all_results = []
for ii, probe in enumerate(probes):
fig, results = probe_results(u, strain, stress, probe, labels[ii])
fig.savefig('its2D_interactive_probe_%d.png' % ii)
all_results.append(results)
for ii, results in enumerate(all_results):
output('probe %d:' % ii)
output.level += 2
for key, res in ordered_iteritems(results):
output(key + ':')
val = res[1]
output(' min: %+.2e, mean: %+.2e, max: %+.2e'
% (val.min(), val.mean(), val.max()))
output.level -= 2
if options.show:
# Show the solution. If the approximation order is greater than 1, the
# extra DOFs are simply thrown away.
from sfepy.postprocess.viewer import Viewer
view = Viewer('its2D_interactive.vtk')
view(vector_mode='warp_norm', rel_scaling=1,
is_scalar_bar=True, is_wireframe=True)
if __name__ == '__main__':
main()
|
{
"content_hash": "58ccfa18b6bffd67851af1f04cb20de3",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 79,
"avg_line_length": 36.324137931034485,
"alnum_prop": 0.5928422251756218,
"repo_name": "RexFuzzle/sfepy",
"id": "1d7509708e14bb41b58aa42dd2a775e649d28a93",
"size": "10556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/linear_elasticity/its2D_interactive.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "443017"
},
{
"name": "C++",
"bytes": "2619"
},
{
"name": "GLSL",
"bytes": "6058"
},
{
"name": "Makefile",
"bytes": "184"
},
{
"name": "Python",
"bytes": "2420488"
},
{
"name": "Shell",
"bytes": "71"
}
],
"symlink_target": ""
}
|
"""This example creates a creative field associated with a given advertiser. To
get an advertiser ID, run get_advertisers.py.
Tags: creativefield.saveCreativeField
"""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfaClient
ADVERTISER_ID = 'INSERT_ADVERTISER_ID_HERE'
CREATIVE_FIELD_NAME = 'INSERT_CREATIVE_FIELD_NAME_HERE'
def main(client, advertiser_id, creative_field_name):
# Initialize appropriate service.
creative_field_service = client.GetCreativeFieldService(
'https://advertisersapitest.doubleclick.net', 'v1.19')
# Construct and save creative field.
creative_field = {
'name': creative_field_name,
'advertiserId': advertiser_id,
'id': '-1'
}
result = creative_field_service.SaveCreativeField(creative_field)[0]
# Display results.
print 'Creative field with ID \'%s\' was created.' % result['id']
if __name__ == '__main__':
# Initialize client object.
client = DfaClient(path=os.path.join('..', '..', '..', '..'))
main(client, ADVERTISER_ID, CREATIVE_FIELD_NAME)
|
{
"content_hash": "7cc941eebecd84a894afcf7e01dea154",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 29.170731707317074,
"alnum_prop": 0.6931438127090301,
"repo_name": "caioserra/apiAdwords",
"id": "f8886339db69dfaa58d582745d678a0026f6c9eb",
"size": "1814",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfa/v1_19/create_creative_field.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "47375"
},
{
"name": "Python",
"bytes": "3481410"
},
{
"name": "Shell",
"bytes": "14782"
}
],
"symlink_target": ""
}
|
from application import Application
import os
my_app = Application(environment=os.environ)
celery = my_app.celery()
import tasks
app = my_app.flask_app
if __name__ == '__main__':
app.logger.info("Running {}".format(app.flask_app.name))
app.run()
|
{
"content_hash": "452e77d6f53702a5ac3e2a312cbb103c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 60,
"avg_line_length": 21.416666666666668,
"alnum_prop": 0.688715953307393,
"repo_name": "rocknsm/docket",
"id": "508dad018eebc766f9db0af7bfe91569f8f116d8",
"size": "948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docket/docket.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "78716"
},
{
"name": "HTML",
"bytes": "11070"
},
{
"name": "JavaScript",
"bytes": "50475"
},
{
"name": "Python",
"bytes": "133436"
},
{
"name": "Ruby",
"bytes": "864"
},
{
"name": "Shell",
"bytes": "2355"
}
],
"symlink_target": ""
}
|
import unittest
import math
import random
import yodel.filter
import yodel.analysis
import yodel.conversion
import yodel.complex
class TestLowPassFilter(unittest.TestCase):
def setUp(self):
self.flt = yodel.filter.SinglePole()
pass
def test_coefficients(self):
samplerate = 48000
x = 0.86
a0 = 1 - x
a1 = 0.0
b1 = x
cutoff = (math.log(x) / (-2.0 * math.pi)) * samplerate
self.flt.low_pass(samplerate, cutoff)
self.assertAlmostEqual(a0, self.flt._a0)
self.assertAlmostEqual(a1, self.flt._a1)
self.assertAlmostEqual(b1, self.flt._b1)
class TestHighPassFilter(unittest.TestCase):
def setUp(self):
self.flt = yodel.filter.SinglePole()
pass
def test_coefficients(self):
samplerate = 48000
x = 0.86
a0 = (1.0 + x) / 2.0
a1 = - (1.0 + x) / 2.0
b1 = x
cutoff = (math.log(x) / (-2.0 * math.pi)) * samplerate
self.flt.high_pass(samplerate, cutoff)
self.assertAlmostEqual(a0, self.flt._a0)
self.assertAlmostEqual(a1, self.flt._a1)
self.assertAlmostEqual(b1, self.flt._b1)
class TestFlatFilter(unittest.TestCase):
def setUp(self):
self.block_size = 512
self.flt = yodel.filter.SinglePole()
pass
def common_check_flat_response(self):
self.flt.process(self.signal, self.output)
for i in range(0, self.block_size):
self.assertEqual(self.signal[i], self.output[i])
def test_zero_signal(self):
self.signal = [0] * self.block_size
self.output = [0] * self.block_size
self.common_check_flat_response()
def test_dirac_signal(self):
self.signal = [0] * self.block_size
self.signal[0] = 1
self.output = [0] * self.block_size
self.common_check_flat_response()
def test_sine_signal(self):
self.signal = [math.sin(2.0 * math.pi * 100.0 * i / 48000.0) for i in range(0, self.block_size)]
self.output = [0] * self.block_size
self.common_check_flat_response()
def test_random_signal(self):
self.signal = [random.random() for i in range(0, self.block_size)]
self.output = [0] * self.block_size
self.common_check_flat_response()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "77bddda1b4bd7b081aca30641231d4cd",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 104,
"avg_line_length": 27.38372093023256,
"alnum_prop": 0.597452229299363,
"repo_name": "rclement/yodel",
"id": "cc1715e7340e2074aeb882816a843e994fb6a444",
"size": "2355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_filter_single_pole.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167289"
},
{
"name": "Shell",
"bytes": "6697"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_user_registration_method'),
]
operations = [
migrations.AddField(
model_name='user',
name='agreement_date',
field=models.DateTimeField(help_text='Indicates when the user has agreed with the policy.', null=True, verbose_name='agreement date', blank=True),
),
]
|
{
"content_hash": "c4eb4fbe8554bbb4223e906d010eab2e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 158,
"avg_line_length": 27.38888888888889,
"alnum_prop": 0.6348884381338742,
"repo_name": "opennode/nodeconductor",
"id": "15416d5efc273c410dd0233c3a6e4934626750f8",
"size": "517",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "waldur_core/core/migrations/0004_user_agreement_date.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1877"
},
{
"name": "HTML",
"bytes": "17528"
},
{
"name": "JavaScript",
"bytes": "248900"
},
{
"name": "Python",
"bytes": "1254720"
}
],
"symlink_target": ""
}
|
from nba_py import player
pd = player.PlayerDashboard('203507')
print pd.starting_position()
ap = player.CommonAllPlayers()
print ap.info()
pc = player.PlayerInfoCommon('203507')
print pc.headline_stats()
p_cstats = player.PlayerCareerStats('201939')
print p_cstats.regular_season_career_totals()
|
{
"content_hash": "5f027f9b8494948d38323247bfa933d4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 45,
"avg_line_length": 23.153846153846153,
"alnum_prop": 0.7674418604651163,
"repo_name": "jeremyjbowers/nba_py",
"id": "c0546d39732da3765f262dbd44688a8eda38c881",
"size": "301",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_nba_py_player.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "44408"
}
],
"symlink_target": ""
}
|
import unittest
class TestDlib(unittest.TestCase):
# From http://dlib.net/face_detector.py.html
def test_face_detector(self):
import dlib
import pkgutil
import tempfile
with tempfile.NamedTemporaryFile(buffering=0) as f:
f.write(pkgutil.get_data(__name__, "abba.jpg"))
img = dlib.load_rgb_image(f.name)
noses = [(95, 101), (202, 109), (306, 118), (393, 122)]
for face in dlib.get_frontal_face_detector()(img, 1):
self.assertLess(face.right() - face.left(), 100)
self.assertLess(face.bottom() - face.top(), 100)
for nose_x, nose_y in noses:
if (face.left() < nose_x < face.right()) and \
(face.top() < nose_y < face.bottom()):
# It's safe to modify a list while iterating as long as we break
# immediately after.
noses.remove((nose_x, nose_y))
break
else:
self.fail("Unexpected face: {}".format(face))
if noses:
self.fail("Failed to find expected faces at {}".format(noses))
|
{
"content_hash": "70bfbc05bfe20de1beec2c5d297927de",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 84,
"avg_line_length": 37.38709677419355,
"alnum_prop": 0.5332182916307161,
"repo_name": "chaquo/chaquopy",
"id": "62558b97fa6c03af7326e61df86b0b113316a181",
"size": "1159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/pypi/packages/dlib/test/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "30"
},
{
"name": "C",
"bytes": "174108"
},
{
"name": "CMake",
"bytes": "1897"
},
{
"name": "CSS",
"bytes": "991"
},
{
"name": "Cython",
"bytes": "251545"
},
{
"name": "Dockerfile",
"bytes": "6938"
},
{
"name": "Groovy",
"bytes": "42472"
},
{
"name": "Java",
"bytes": "159387"
},
{
"name": "Kotlin",
"bytes": "697"
},
{
"name": "Python",
"bytes": "8043408"
},
{
"name": "Roff",
"bytes": "232"
},
{
"name": "Shell",
"bytes": "53150"
},
{
"name": "Starlark",
"bytes": "2018"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
import sys
import json
from copy import deepcopy
from itertools import count
from threading import RLock, Event
from datetime import datetime, timedelta
from collections import Mapping, MutableMapping
import six
from ws4py.client.threadedclient import WebSocketClient
import sideboard.lib
from sideboard.lib import log, config, stopped, on_startup, on_shutdown, DaemonTask, Caller
class _WebSocketClientDispatcher(WebSocketClient):
def __init__(self, dispatcher, url, ssl_opts=None):
self.connected = False
self.dispatcher = dispatcher
WebSocketClient.__init__(self, url, ssl_options=ssl_opts)
def pre_connect(self):
pass
def connect(self, *args, **kwargs):
self.pre_connect()
WebSocketClient.connect(self, *args, **kwargs)
self.connected = True
def close(self, code=1000, reason=''):
try:
WebSocketClient.close(self, code=code, reason=reason)
except:
pass
try:
WebSocketClient.close_connection(self)
except:
pass
self.connected = False
def send(self, data):
log.debug('sending {!r}', data)
assert self.connected, 'tried to send data on closed websocket {!r}'.format(self.url)
if isinstance(data, Mapping):
data = json.dumps(data)
return WebSocketClient.send(self, data)
def received_message(self, message):
message = message.data if isinstance(message.data, six.text_type) else message.data.decode('utf-8')
log.debug('received {!r}', message)
try:
message = json.loads(message)
except:
log.debug('failed to parse incoming message', exc_info=True)
finally:
self.dispatcher.defer(message)
class _Subscriber(object):
def __init__(self, method, src_client, dst_client, src_ws, dest_ws):
self.method, self.src_ws, self.dest_ws, self.src_client, self.dst_client = method, src_ws, dest_ws, src_client, dst_client
def unsubscribe(self):
self.dest_ws.unsubscribe(self.dst_client)
def callback(self, data):
self.src_ws.send(data=data, client=self.src_client)
def errback(self, error):
self.src_ws.send(error=error, client=self.src_client)
def __call__(self, *args, **kwargs):
self.dest_ws.subscribe({
'client': self.dst_client,
'callback': self.callback,
'errback': self.errback
}, self.method, *args, **kwargs)
return self.src_ws.NO_RESPONSE
def __del__(self):
self.unsubscribe()
class WebSocket(object):
"""
Utility class for making websocket connections. This improves on the ws4py
websocket client classes mainly by adding several features:
- automatically detecting dead connections and re-connecting
- utility methods for making synchronous rpc calls and for making
asynchronous subscription calls with callbacks
- adding locking to make sending messages thread-safe
"""
poll_method = 'sideboard.poll'
WebSocketDispatcher = _WebSocketClientDispatcher
def __init__(self, url=None, ssl_opts=None, connect_immediately=True, max_wait=2):
self.ws = None
self.url = url or 'ws://127.0.0.1:{}/wsrpc'.format(config['cherrypy']['server.socket_port'])
self._lock = RLock()
self._callbacks = {}
self._counter = count()
self.ssl_opts = ssl_opts
self._reconnect_attempts = 0
self._last_poll, self._last_reconnect_attempt = None, None
self._dispatcher = Caller(self._dispatch, threads=1)
self._checker = DaemonTask(self._check, interval=1)
if connect_immediately:
self.connect(max_wait=max_wait)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def preprocess(self, method, params):
"""
Each message we send has its parameters passed to this function and
the actual parameters sent are whatever this function returns. By
default this just returns the message unmodified, but plugins can
override this to add whatever logic is needed. We pass the method
name in its full "service.method" form in case the logic depends on
the service being invoked.
"""
return params
@property
def _should_reconnect(self):
interval = min(config['ws.reconnect_interval'], 2 ** self._reconnect_attempts)
cutoff = datetime.now() - timedelta(seconds=interval)
return not self.connected and (self._reconnect_attempts == 0 or self._last_reconnect_attempt < cutoff)
@property
def _should_poll(self):
cutoff = datetime.now() - timedelta(seconds=config['ws.poll_interval'])
return self.connected and (self._last_poll is None or self._last_poll < cutoff)
def _check(self):
if self._should_reconnect:
self._reconnect()
if self._should_poll:
self._poll()
def _poll(self):
assert self.ws and self.ws.connected, 'cannot poll while websocket is not connected'
try:
self.call(self.poll_method)
except:
log.warning('no poll response received from {!r}, closing connection, will attempt to reconnect', self.url, exc_info=True)
self.ws.close()
else:
self._last_poll = datetime.now()
def _refire_subscriptions(self):
try:
for cb in self._callbacks.values():
if 'client' in cb:
params = cb['paramback']() if 'paramback' in cb else cb['params']
self._send(method=cb['method'], params=params, client=cb['client'])
except:
pass # self._send() already closes and logs on error
def _reconnect(self):
with self._lock:
assert not self.connected, 'connection is still active'
try:
self.ws = self.WebSocketDispatcher(self._dispatcher, self.url, ssl_opts=self.ssl_opts)
self.ws.connect()
except Exception as e:
log.warn('failed to connect to {}: {}', self.url, str(e))
self._last_reconnect_attempt = datetime.now()
self._reconnect_attempts += 1
else:
self._reconnect_attempts = 0
self._refire_subscriptions()
def _next_id(self, prefix):
return '{}-{}'.format(prefix, next(self._counter))
def _send(self, **kwargs):
log.debug('sending {}', kwargs)
with self._lock:
assert self.connected, 'tried to send data on closed websocket {!r}'.format(self.url)
try:
return self.ws.send(kwargs)
except:
log.warn('failed to send {!r} on {!r}, closing websocket and will attempt to reconnect', kwargs, self.url)
self.ws.close()
raise
def _dispatch(self, message):
log.debug('dispatching {}', message)
try:
assert isinstance(message, Mapping), 'incoming message is not a dictionary'
assert 'client' in message or 'callback' in message, 'no callback or client in message {}'.format(message)
id = message.get('client') or message.get('callback')
assert id in self._callbacks, 'unknown dispatchee {}'.format(id)
except AssertionError:
self.fallback(message)
else:
if 'error' in message:
self._callbacks[id]['errback'](message['error'])
else:
self._callbacks[id]['callback'](message.get('data'))
def fallback(self, message):
"""
Handler method which is called for incoming websocket messages which
aren't valid responses to an outstanding call or subscription. By
default this just logs an error message. You can override this by
subclassing this class, or just by assigning a hander method, e.g.
>>> ws = WebSocket()
>>> ws.fallback = some_handler_function
>>> ws.connect()
"""
_, exc, _ = sys.exc_info()
log.error('no callback registered for message {!r}, message ignored: {}', message, exc)
@property
def connected(self):
"""boolean indicating whether or not this connection is currently active"""
return bool(self.ws) and self.ws.connected
def connect(self, max_wait=0):
"""
Start the background threads which connect this websocket and handle RPC
dispatching. This method is safe to call even if the websocket is already
connected. You may optionally pass a max_wait parameter if you want to
wait for up to that amount of time for the connection to go through; if
that amount of time elapses without successfully connecting, a warning
message is logged.
"""
self._checker.start()
self._dispatcher.start()
for i in range(10 * max_wait):
if not self.connected:
stopped.wait(0.1)
else:
break
else:
if max_wait:
log.warn('websocket {!r} not connected after {} seconds', self.url, max_wait)
def close(self):
"""
Closes the underlying websocket connection and stops background tasks.
This method is always safe to call; exceptions will be swallowed and
logged, and calling close on an already-closed websocket is a no-op.
"""
self._checker.stop()
self._dispatcher.stop()
if self.ws:
self.ws.close()
def subscribe(self, callback, method, *args, **kwargs):
"""
Send a websocket request which you expect to subscribe you to a channel
with a callback which will be called every time there is new data, and
return the client id which uniquely identifies this subscription.
Callback may be either a function or a dictionary in the form
{
'callback': <function>,
'errback': <function>, # optional
'paramback: <function>, # optional
'client': <string> # optional
}
Both callback and errback take a single argument; for callback, this is
the return value of the method, for errback it is the error message
returning. If no errback is specified, we will log errors at the ERROR
level and do nothing further.
The paramback function exists for subscriptions where we might want to
pass different parameters every time we reconnect. This might be used
for e.g. time-based parameters. This function takes no arguments and
returns the parameters which should be passed every time we connect
and fire (or re-fire) all of our subscriptions.
The client id is automatically generated if omitted, and you should not
set this yourself unless you really know what you're doing.
The positional and keyword arguments passed to this function will be
used as the arguments to the remote method, unless paramback is passed,
in which case that will be used to generate the params, and args/kwargs
will be ignored.
"""
client = self._next_id('client')
if isinstance(callback, Mapping):
assert 'callback' in callback, 'callback is required'
client = callback.setdefault('client', client)
self._callbacks[client] = callback
else:
self._callbacks[client] = {
'client': client,
'callback': callback
}
paramback = self._callbacks[client].get('paramback')
params = self.preprocess(method, paramback() if paramback else (args or kwargs))
self._callbacks[client].setdefault('errback', lambda result: log.error('{}(*{}, **{}) returned an error: {!r}', method, args, kwargs, result))
self._callbacks[client].update({
'method': method,
'params': params
})
try:
self._send(method=method, params=params, client=client)
except:
log.warn('initial subscription to {} at {!r} failed, will retry on reconnect', method, self.url)
return client
def unsubscribe(self, client):
"""
Cancel the websocket subscription identified by the specified client id.
This id is returned from the subscribe() method, e.g.
>>> client = ws.subscribe(some_callback, 'foo.some_function')
>>> ws.unsubscribe(client)
"""
self._callbacks.pop(client, None)
try:
self._send(action='unsubscribe', client=client)
except:
pass
def call(self, method, *args, **kwargs):
"""
Send a websocket rpc method call, then wait for and return the eventual
response, or raise an exception if we get back an error. This method
will raise an AssertionError after 10 seconds if no response of any
kind was received. The positional and keyword arguments to this method
are used as the arguments to the rpc function call.
"""
finished = Event()
result, error = [], []
callback = self._next_id('callback')
self._callbacks[callback] = {
'callback': lambda response: (result.append(response), finished.set()),
'errback': lambda response: (error.append(response), finished.set())
}
params = self.preprocess(method, args or kwargs)
try:
self._send(method=method, params=params, callback=callback)
except:
self._callbacks.pop(callback, None)
raise
wait_until = datetime.now() + timedelta(seconds=config['ws.call_timeout'])
while datetime.now() < wait_until:
finished.wait(0.1)
if stopped.is_set() or result or error:
break
self._callbacks.pop(callback, None)
assert not stopped.is_set(), 'websocket closed before response was received'
assert result, error[0] if error else 'no response received for 10 seconds'
return result[0]
def make_caller(self, method):
"""
Returns a function which calls the specified method; useful for creating
callbacks, e.g.
>>> authenticate = ws.make_caller('auth.authenticate')
>>> authenticate('username', 'password')
True
Sideboard supports "passthrough subscriptions", e.g.
-> a browser makes a subscription for the "foo.bar" method
-> the server has "foo" registered as a remote service
-> the server creates its own subscription to "foo.bar" on the remote
service and passes all results back to the client as they arrive
This method implements that by checking whether it was called from a
thread with an active websocket as part of a subscription request. If
so then in addition to returning a callable, it also registers the
new subscription with the client websocket so it can be cleaned up when
the client websocket closes and/or when its subscription is canceled.
"""
client = sideboard.lib.threadlocal.get_client()
originating_ws = sideboard.lib.threadlocal.get('websocket')
if client and originating_ws:
sub = originating_ws.passthru_subscriptions.get(client)
if not sub:
sub = _Subscriber(method=method, src_client=client, dst_client=self._next_id('client'), src_ws=originating_ws, dest_ws=self)
originating_ws.passthru_subscriptions[client] = sub
return sub
else:
return lambda *args, **kwargs: self.call(method, *args, **kwargs)
class Model(MutableMapping):
"""
Utility class for representing database objects found in the databases of
other Sideboard plugins. Instances of this class can have their values accessed
as either attributes or dictionary keys.
"""
_prefix = None
_unpromoted = ()
_defaults = None
def __init__(self, data, prefix=None, unpromoted=None, defaults=None):
assert prefix or self._prefix
object.__setattr__(self, '_data', deepcopy(data))
object.__setattr__(self, '_orig_data', deepcopy(data))
object.__setattr__(self, '_prefix', (prefix or self._prefix) + '_')
object.__setattr__(self, '_project_key', self._prefix + 'data')
object.__setattr__(self, '_unpromoted', self._unpromoted if unpromoted is None else unpromoted)
object.__setattr__(self, '_defaults', defaults or self._defaults or {})
@property
def query(self):
assert self.id, 'id was not set'
assert self._model, '_model was not set'
return {'_model': self._model, 'field': 'id', 'value': self.id}
@property
def dirty(self):
return {k: v for k, v in self._data.items() if v != self._orig_data.get(k)}
def to_dict(self):
data = deepcopy(self._data)
serialized = {k: v for k, v in data.pop(self._project_key, {}).items()}
for k in list(data.get('extra_data', {}).keys()):
if k.startswith(self._prefix):
serialized[k[len(self._prefix):]] = data['extra_data'].pop(k)
elif k in self._unpromoted:
serialized[k] = data['extra_data'].pop(k)
serialized.update(data)
return serialized
@property
def _extra_data(self):
return self._data.setdefault('extra_data', {})
def _extra_data_key(self, key):
return ('' if key in self._unpromoted else self._prefix) + key
def __len__(self):
return len(self._data) + len(self._extra_data) + len(self._data.get(self._project_key, {}))
def __setitem__(self, key, value):
assert key != 'id' or value == self.id, 'id is not settable'
if key in self._data:
self._data[key] = value
elif self._project_key in self._data:
self._extra_data.pop(self._prefix + key, None)
self._data[self._project_key][key] = value
else:
self._extra_data[self._extra_data_key(key)] = value
def __getitem__(self, key):
if key in self._data:
return self._data[key]
elif key in self._data.get(self._project_key, {}):
return self._data[self._project_key][key]
else:
return self._extra_data.get(self._extra_data_key(key), self._defaults.get(key))
def __delitem__(self, key):
if key in self._data:
del self._data[key]
elif key in self._data.get(self._project_key, {}):
del self._data[self._project_key][key]
else:
self._extra_data.pop(self._extra_data_key(key), None)
def __iter__(self):
return iter(k for k in self.to_dict() if k != 'extra_data')
def __repr__(self):
return repr(dict(self.items()))
def __getattr__(self, name):
return self.__getitem__(name)
def __setattr__(self, name, value):
return self.__setitem__(name, value)
def __delattr__(self, name):
self.__delitem__(name)
class Subscription(object):
"""
Utility class for opening a websocket to a given destination, subscribing to an rpc call,
and processing the response.
>>> logged_in_users = Subscription('admin.get_logged_in_users')
>>> logged_in_users.result # this will always be the latest return value of your rpc method
If you want to do postprocessing on the results, you can override the "callback" method:
>>> class UserList(Subscription):
... def __init__(self):
... self.usernames = []
... Subscription.__init__(self, 'admin.get_logged_in_users')
...
... def callback(self, users):
... self.usernames = [user['username'] for user in users]
...
>>> users = UserList()
The above code gives you a "users" object with a "usernames" attribute; when Sideboard
starts, it opens a websocket connection to whichever remote server defines the "admin"
service (as defined in the rpc_services config section), then subscribes to the
"admin.get_logged_in_users" method and calls the "callback" method on every response.
"""
def __init__(self, rpc_method, *args, **kwargs):
self.result = None
connect_immediately = kwargs.pop('connect_immediately', False)
self.method, self.args, self.kwargs = rpc_method, args, kwargs
self.ws = sideboard.lib.services.get_websocket(rpc_method.split('.')[0])
on_startup(self._subscribe)
on_shutdown(self._unsubscribe)
if connect_immediately:
self.ws.connect(max_wait=2)
self._subscribe()
def _subscribe(self):
self._client_id = self.ws.subscribe(self._callback, self.method, *self.args, **self.kwargs)
def _unsubscribe(self):
self.ws.unsubscribe(self._client_id)
def refresh(self):
"""
re-fire your subscription method and invoke the callback method with
the response; this will manually check for changes if you are
subscribed to a method which by design doesn't re-fire on every change
"""
assert self.ws.connected, 'cannot refresh {}: websocket not connected'.format(self.method)
self._callback(self.ws.call(self.method, *self.args, **self.kwargs))
def _callback(self, response_data):
self.result = response_data
self.callback(response_data)
def callback(self, response_data):
"""override this to define what to do with your rpc method return values"""
class MultiSubscription(object):
"""
A version of the Subscription utility class which subscribes to an arbitrary
number of remote servers and aggregates the results from each. You invoke
this similarly to Subscription class, with two main differences:
1) The first parameter is a list of hostnames to which we should connect.
Each hostname will have a websocket registered for it if one does not
already exist, using the standard config options under [rpc_services].
2) Unlike the Subscription class, we do not support the connect_immediately
parameter. Because this class looks in the [rpc_services] config section
of every plugin to find the client cert settings, we need to wait for all
plugins to be loaded before trying to connect.
Like the Subscription class, you can instantiate this class directly, e.g.
>>> logged_in_users = MultiSubscription(['host1', 'host2'], 'admin.get_logged_in_users')
>>> logged_in_users.results # this will always be the latest return values of your rpc method
The "results" attribute is a dictionary whose keys are the websocket objects
used to connect to each host, and whose values are the latest return values
from each of those websockets. Hosts for which we have not yet received a
response will have no key/value pair in the "results" dictionary.
If you want to do postprocessing on the results, you can subclass this and
override the "callback" method, e.g.
>>> class UserList(MultiSubscription):
... def __init__(self):
... self.usernames = set()
... MultiSubscription.__init__(self, ['host1', 'host2'], 'admin.get_logged_in_users')
...
... def callback(self, users, ws):
... self.usernames.update(user['username'] for user in users)
...
>>> users = UserList()
The above code gives you a "users" object with a "usernames" attribute; when Sideboard
starts, it opens websocket connections to 'host1' and 'host2', then subscribes to the
"admin.get_logged_in_users" method and calls the "callback" method on every response.
"""
def __init__(self, hostnames, rpc_method, *args, **kwargs):
from sideboard.lib import listify
self.hostnames, self.method, self.args, self.kwargs = listify(hostnames), rpc_method, args, kwargs
self.results, self.websockets, self._client_ids = {}, {}, {}
on_startup(self._subscribe)
on_shutdown(self._unsubscribe)
def _websocket(self, url, ssl_opts):
from sideboard.lib import services
return services._register_websocket(url, ssl_opts=ssl_opts)
def _subscribe(self):
from sideboard.lib._services import _ws_url, _rpc_opts, _ssl_opts
for hostname in self.hostnames:
rpc_opts = _rpc_opts(hostname)
self.websockets[hostname] = self._websocket(_ws_url(hostname, rpc_opts), _ssl_opts(rpc_opts))
for ws in self.websockets.values():
self._client_ids[ws] = ws.subscribe(self._make_callback(ws), self.method, *self.args, **self.kwargs)
def _unsubscribe(self):
for ws in self.websockets.values():
ws.unsubscribe(self._client_ids.get(ws))
def _make_callback(self, ws):
return lambda result_data: self._callback(result_data, ws)
def _callback(self, response_data, ws):
self.results[ws] = response_data
self.callback(response_data, ws)
def callback(self, result_data, ws):
"""override this to define what to do with your rpc method return values"""
def refresh(self):
"""
Sometimes we want to manually re-fire all of our subscription methods to
get the latest data. This is useful in cases where the remote server
isn't necessarily programmed to always push the latest data as soon as
it's available, usually for performance reasons. This method allows the
client to get the latest data more often than the server is programmed
to provide it.
"""
for ws in self.websockets.values():
try:
self._callback(self.ws.call(self.method, *self.args, **self.kwargs), ws)
except:
log.warn('failed to fetch latest data from {} on {}', self.method, ws.url)
|
{
"content_hash": "0e54aae86bbb74d3b395f4ebb268b971",
"timestamp": "",
"source": "github",
"line_count": 636,
"max_line_length": 150,
"avg_line_length": 41.125786163522015,
"alnum_prop": 0.6231075087933935,
"repo_name": "RobRuana/sideboard",
"id": "23543451b99edcce8591bddd45dd1ca7408ef3e2",
"size": "26156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sideboard/lib/_websockets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2297"
},
{
"name": "JavaScript",
"bytes": "845593"
},
{
"name": "Python",
"bytes": "355561"
},
{
"name": "Shell",
"bytes": "4377"
}
],
"symlink_target": ""
}
|
"""
__Delay.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sat Aug 30 18:23:40 2014
_______________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3String import *
from graph_Delay import *
class Delay(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['Proc', 'MetaModelElement_T']
self.graphClass_ = graph_Delay
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.cardinality=ATOM3String('1', 20)
self.cardinality=ATOM3String('1', 20)
self.classtype=ATOM3String('t_', 20)
self.classtype=ATOM3String('t_', 20)
self.name=ATOM3String('s_', 20)
self.name=ATOM3String('s_', 20)
self.generatedAttributes = {'cardinality': ('ATOM3String', ),
'cardinality': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'name': ('ATOM3String', ),
'name': ('ATOM3String', ) }
self.realOrder = ['cardinality','cardinality','classtype','classtype','name','name']
self.directEditing = [1,1,1,1,1,1]
def clone(self):
cloneObject = Delay( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
|
{
"content_hash": "7530db6d9bd06ecb7c1335dee8b89416",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 90,
"avg_line_length": 36.81720430107527,
"alnum_prop": 0.602803738317757,
"repo_name": "levilucio/SyVOLT",
"id": "2e914aa72f0ee7739f32f325e3529f5bc959d268",
"size": "3424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/Delay.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render
import opentracing
import urllib2
tracer = settings.OPENTRACING_TRACER
# Create your views here.
def client_index(request):
return HttpResponse("Client index page")
@tracer.trace()
def client_simple(request):
url = "http://localhost:8000/server/simple"
new_request = urllib2.Request(url)
current_span = tracer.get_span(request)
inject_as_headers(tracer, current_span, new_request)
try:
response = urllib2.urlopen(new_request)
return HttpResponse("Made a simple request")
except urllib2.URLError as e:
return HttpResponse("Error: " + str(e))
@tracer.trace()
def client_log(request):
url = "http://localhost:8000/server/log"
new_request = urllib2.Request(url)
current_span = tracer.get_span(request)
inject_as_headers(tracer, current_span, new_request)
try:
response = urllib2.urlopen(new_request)
return HttpResponse("Sent a request to log")
except urllib2.URLError as e:
return HttpResponse("Error: " + str(e))
@tracer.trace()
def client_child_span(request):
url = "http://localhost:8000/server/childspan"
new_request = urllib2.Request(url)
current_span = tracer.get_span(request)
inject_as_headers(tracer, current_span, new_request)
try:
response = urllib2.urlopen(new_request)
return HttpResponse("Sent a request that should produce an additional child span")
except urllib2.URLError as e:
return HttpResponse("Error: " + str(e))
def inject_as_headers(tracer, span, request):
text_carrier = {}
tracer._tracer.inject(span.context, opentracing.Format.TEXT_MAP, text_carrier)
for k, v in text_carrier.iteritems():
request.add_header(k,v)
|
{
"content_hash": "7b13390703ff2254e18e1141bf067dcd",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 90,
"avg_line_length": 32.607142857142854,
"alnum_prop": 0.696604600219058,
"repo_name": "kcamenzind/django_opentracing",
"id": "06112f52218150ad4263a4f03d17fed4b662955a",
"size": "1826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/client/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1791"
},
{
"name": "Python",
"bytes": "19194"
}
],
"symlink_target": ""
}
|
import supybot.conf as conf
import supybot.registry as registry
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Anonymous')
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified themself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Anonymous', True)
Anonymous = conf.registerPlugin('Anonymous')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Anonymous, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
conf.registerChannelValue(conf.supybot.plugins.Anonymous,
'requirePresenceInChannel', registry.Boolean(True, _("""Determines whether
the bot should require people trying to use this plugin to be in the
channel they wish to anonymously send to.""")))
conf.registerChannelValue(conf.supybot.plugins.Anonymous, 'requireRegistration',
registry.Boolean(True, _("""Determines whether the bot should require
people trying to use this plugin to be registered.""")))
conf.registerChannelValue(conf.supybot.plugins.Anonymous, 'requireCapability',
registry.String('', _("""Determines what capability (if any) the bot should
require people trying to use this plugin to have.""")))
conf.registerGlobalValue(conf.supybot.plugins.Anonymous, 'allowPrivateTarget',
registry.Boolean(False, _("""Determines whether the bot will allow the
"tell" command to be used. If true, the bot will allow the "tell"
command to send private messages to other users.""")))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
{
"content_hash": "5f335486aabcac6ebc1abe83be7aa979",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 54,
"alnum_prop": 0.7656084656084656,
"repo_name": "Ban3/Limnoria",
"id": "2737475619f605907a2ee16f8a5255dd49e303ae",
"size": "3472",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "plugins/Anonymous/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "864"
},
{
"name": "Python",
"bytes": "2513657"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
}
|
import DistributedLawnDecor
from direct.interval.IntervalGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.ShowBase import *
import GardenGlobals
from toontown.toonbase import TTLocalizer
class DistributedPlantBase(DistributedLawnDecor.DistributedLawnDecor):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPlantBase')
def __init__(self, cr):
DistributedLawnDecor.DistributedLawnDecor.__init__(self, cr)
self.model = None
self.growthLevel = -1
self.waterTrackDict = {}
return
def delete(self):
self.notify.debug('delete')
for waterTrack in self.waterTrackDict.values():
if waterTrack:
waterTrack.finish()
self.waterTrackDict = None
DistributedLawnDecor.DistributedLawnDecor.delete(self)
return
def disable(self):
self.notify.debug('disable')
DistributedLawnDecor.DistributedLawnDecor.disable(self)
def loadModel(self):
if hasattr(self, 'rotateNode') and self.rotateNode:
self.rotateNode.removeNode()
self.rotateNode = self.plantPath.attachNewNode('rotate')
self.model = None
modelName = self.getModelName()
self.model = loader.loadModel(modelName)
self.model.reparentTo(self.rotateNode)
self.stick2Ground()
return
def setupShadow(self):
DistributedLawnDecor.DistributedLawnDecor.setupShadow(self)
self.adjustWaterIndicator()
def setTypeIndex(self, typeIndex):
self.typeIndex = typeIndex
self.attributes = GardenGlobals.PlantAttributes[typeIndex]
self.name = self.attributes['name']
self.plantType = self.attributes['plantType']
self.growthThresholds = self.attributes['growthThresholds']
self.maxWaterLevel = self.attributes['maxWaterLevel']
self.minWaterLevel = self.attributes['minWaterLevel']
self.seedlingModel = self.attributes['seedlingModel']
self.establishedModel = self.attributes['establishedModel']
self.fullGrownModel = self.attributes['fullGrownModel']
def getTypeIndex(self):
return self.typeIndex
def setWaterLevel(self, waterLevel):
self.waterLevel = waterLevel
def getWaterLevel(self):
return self.waterLevel
def setGrowthLevel(self, growthLevel):
self.growthLevel = growthLevel
if self.model:
self.model.setScale(growthLevel)
def getGrowthLevel(self):
return self.growthLevel
def getShovelAction(self):
if self.isFruiting() and not self.isWilted() and self.canBeHarvested():
return TTLocalizer.GardeningPick
else:
return TTLocalizer.GardeningRemove
def getShovelCommand(self):
return self.handlePicking
def canBeHarvested(self):
return True
def handleEnterPlot(self, colEntry = None):
dist = self.getDistance(localAvatar)
self.accept('water-plant', self.__handleWatering)
base.localAvatar.addShovelRelatedDoId(self.doId)
def handleExitPlot(self, entry = None):
DistributedLawnDecor.DistributedLawnDecor.handleExitPlot(self, entry)
base.localAvatar.removeShovelRelatedDoId(self.doId)
self.ignore('water-plant')
def handleWatering(self):
self.startInteraction()
self.sendUpdate('waterPlant')
def __handleWatering(self, plantToWaterId):
if plantToWaterId == self.doId:
self.sendUpdate('waterPlant')
else:
self.notify.debug('not sending water plant')
def isFruiting(self):
retval = self.growthLevel >= self.growthThresholds[2]
return retval
def isGTEFruiting(self):
retval = self.growthLevel >= self.growthThresholds[2]
return retval
def isFullGrown(self):
if self.growthLevel >= self.growthThresholds[2]:
return False
elif self.growthLevel >= self.growthThresholds[1]:
return True
return False
def isGTEFullGrown(self):
retval = self.growthLevel >= self.growthThresholds[1]
return retval
def isEstablished(self):
if self.growthLevel >= self.growthThresholds[2]:
return False
elif self.growthLevel >= self.growthThresholds[1]:
return False
elif self.growthLevel >= self.growthThresholds[0]:
return True
return False
def isGTEEstablished(self):
if self.growthLevel >= self.growthThresholds[0]:
return True
return False
def isSeedling(self):
if self.growthLevel >= self.growthThresholds[2]:
return False
elif self.growthLevel >= self.growthThresholds[1]:
return False
elif self.growthLevel >= self.growthThresholds[0]:
return False
elif self.growthLevel < self.growthThresholds[0]:
return True
return False
def isGTESeedling(self):
return True
def isWilted(self):
return self.waterLevel < 0
def getModelName(self):
if self.growthLevel >= self.growthThresholds[1]:
modelName = self.fullGrownModel
elif self.growthLevel >= self.growthThresholds[1]:
modelName = self.fullGrownModel
elif self.growthLevel >= self.growthThresholds[0]:
modelName = self.establishedModel
else:
modelName = self.seedlingModel
return modelName
def setMovie(self, mode, avId):
if mode == GardenGlobals.MOVIE_WATER:
self.doWaterTrack(avId)
elif mode == GardenGlobals.MOVIE_FINISHPLANTING:
self.doFinishPlantingTrack(avId)
elif mode == GardenGlobals.MOVIE_REMOVE:
self.doDigupTrack(avId)
def doWaterTrack(self, avId):
toon = base.cr.doId2do.get(avId)
if not toon:
return
can = toon.getWateringCanModel()
can.hide()
can.reparentTo(toon.rightHand)
track = Sequence()
track.append(self.startCamIval(avId))
track.append(self.generateToonMoveTrack(toon))
track.append(Func(can.show))
track.append(self.generateWaterTrack(toon))
track.append(Func(can.removeNode))
track.append(self.stopCamIval(avId))
if avId == localAvatar.doId:
track.append(Func(self.sendUpdate, 'waterPlantDone'))
track.append(Func(self.finishInteraction))
track.start()
self.waterTrackDict[avId] = track
def generateWaterTrack(self, toon):
sound = loader.loadSfx('phase_5/audio/sfx/firehose_spray.ogg')
sound.setPlayRate(0.75)
waterTrack = Parallel()
waterTrack.append(Sequence(Parallel(ActorInterval(toon, 'water'), SoundInterval(sound, node=toon, volume=0.5)), Func(toon.loop, 'neutral')))
if hasattr(self, 'dropShadow') and self.dropShadow:
newColor = self.dropShadow.getColor()
alpha = min(1.0, newColor.getW() + 1 / 5.0)
newColor.setW(alpha)
waterTrack.append(LerpColorInterval(self.dropShadow, 2.1, newColor))
return waterTrack
def adjustWaterIndicator(self):
if self.model:
color = self.waterLevel / 5.0 + 1 / 5.0
self.notify.debug('%s %s' % (self.waterLevel, color))
if color < 0.2:
color = 0.2
if hasattr(self, 'dropShadow') and self.dropShadow:
self.dropShadow.setColor(0.0, 0.0, 0.0, color)
def canBeWatered(self):
return 1
def finishInteraction(self):
DistributedLawnDecor.DistributedLawnDecor.finishInteraction(self)
base.localAvatar.handleEndPlantInteraction(self)
|
{
"content_hash": "6d4b7c64c22e6680d0b2bbd5023d9891",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 148,
"avg_line_length": 34.89686098654708,
"alnum_prop": 0.6495759444872783,
"repo_name": "linktlh/Toontown-journey",
"id": "4b1803c9834117bc51bcb94478d5ca1339ab118b",
"size": "7782",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "toontown/estate/DistributedPlantBase.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
'Arquivo com diretivas de configuração para scripts de backup e restauração'
__author__ = 'Pablo Santiago Blum de Aguiar <pablo.aguiar@gmail.com>'
__copyright__ = 'Copyright (c) 2008 Blum-Aguiar'
__license__ = '''Copyright (c) 2008 Blum-Aguiar
Copyright (c) 2008 Pablo Santiago Blum de Aguiar <pablo.aguiar@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Blum-Aguiar nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.'''
# Configurações da conexão com o banco de dados
DBConfig = {
'host': 'localhost',
'user': 'root',
'pass': '',
'charset': 'utf8'
}
|
{
"content_hash": "03cede0a2e38ef4be4c35d2485d9fb57",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 82,
"avg_line_length": 51.10526315789474,
"alnum_prop": 0.7703398558187435,
"repo_name": "scorphus/dotfilesetal",
"id": "00f08cca50ac18ada1db6927ef6239356485a6c1",
"size": "1973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/backup/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "1932"
},
{
"name": "CSS",
"bytes": "1203"
},
{
"name": "JavaScript",
"bytes": "1845"
},
{
"name": "PHP",
"bytes": "156"
},
{
"name": "Perl",
"bytes": "225491"
},
{
"name": "Python",
"bytes": "34519"
},
{
"name": "Ruby",
"bytes": "24726"
},
{
"name": "Shell",
"bytes": "140892"
},
{
"name": "Vim script",
"bytes": "616"
}
],
"symlink_target": ""
}
|
import pygame
from buffalo import utils
from menu import Menu
def main():
while not utils.end:
utils.scene.logic()
utils.scene.update()
utils.scene.render()
utils.delta = utils.clock.tick( utils.FRAMES_PER_SECOND )
if __name__ == "__main__":
if not utils.init(
caption='Adept',
):
print('buffalo.utils failed to initialize')
pygame.quit()
exit()
utils.set_scene( Menu() )
main()
pygame.quit()
|
{
"content_hash": "8e979e8abfe176c1e6318e5af3fe0e00",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 65,
"avg_line_length": 18.357142857142858,
"alnum_prop": 0.5486381322957199,
"repo_name": "gragas/adept",
"id": "f59785a10008f9ca2563ce465d6f4bd81c42e2b7",
"size": "514",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5032"
}
],
"symlink_target": ""
}
|
from docutils import nodes
from docutils.writers import html4css1
class HTMLTranslator(html4css1.HTMLTranslator):
def visit_request_block(self, node):
self.body.append(node.astext())
raise nodes.SkipNode
def visit_headers_block(self, node):
self.body.append(node.astext())
raise nodes.SkipNode
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
classes = ' '.join([
'table', 'table-striped', 'table-hover', self.settings.table_style
]).strip()
self.body.append(
self.starttag(node, 'table', CLASS=classes))
class HTMLWriter(html4css1.Writer):
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = HTMLTranslator
|
{
"content_hash": "f55fdfe02712a82bc573d7a56f3c24cf",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 29.77777777777778,
"alnum_prop": 0.6393034825870647,
"repo_name": "Team-Zeus/okapi",
"id": "8114dfc1e2d989db8e866072fd7327eda081235a",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "okapi/rst/writers/html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2730"
},
{
"name": "HTML",
"bytes": "6585"
},
{
"name": "JavaScript",
"bytes": "1144"
},
{
"name": "Python",
"bytes": "47842"
}
],
"symlink_target": ""
}
|
"""Tests for mind controllers and other wizards."""
import axelrod
from .test_player import TestPlayer
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestMindController(TestPlayer):
name = 'Mind Controller'
player = axelrod.MindController
expected_classifier = {
'memory_depth': -10,
'stochastic': False,
'makes_use_of': set(),
'inspects_source': False,
'manipulates_source': True, # Finds out what opponent will do
'manipulates_state': False
}
def test_strategy(self):
""" Will always make opponent cooperate """
P1 = axelrod.MindController()
P2 = axelrod.Cooperator()
self.assertEqual(P1.strategy(P2), D)
self.assertEqual(P2.strategy(P1), C)
def test_vs_defect(self):
""" Will force even defector to cooperate """
P1 = axelrod.MindController()
P2 = axelrod.Defector()
self.assertEqual(P1.strategy(P2), D)
self.assertEqual(P2.strategy(P1), C)
def test_vs_grudger(self):
""" Will force even Grudger to forget its grudges"""
P1 = axelrod.MindController()
P2 = axelrod.Grudger()
P1.history = [D, D, D, D]
self.assertEqual(P1.strategy(P2), D)
self.assertEqual(P2.strategy(P1), C)
def test_init(self):
"""Test to make sure parameters are initialised correctly """
P1 = axelrod.MindController()
self.assertEqual(P1.history, [])
def test_reset(self):
""" test for the reset method """
P1 = axelrod.MindController()
P1.history = [C, D, D, D]
P1.reset()
self.assertEqual(P1.history, [])
class TestMindWarper(TestMindController):
name = "Mind Warper"
player = axelrod.MindWarper
expected_classifier = {
'memory_depth': -10,
'stochastic': False,
'makes_use_of': set(),
'inspects_source': False,
'manipulates_source': True, # Finds out what opponent will do
'manipulates_state': False
}
def test_setattr(self):
player = self.player()
player.strategy = lambda opponent: 'C'
def test_strategy(self):
player = self.player()
opponent = axelrod.Defector()
play1 = player.strategy(opponent)
play2 = opponent.strategy(player)
self.assertEqual(play1, 'D')
self.assertEqual(play2, 'C')
class TestMindBender(TestMindController):
name = "Mind Bender"
player = axelrod.MindBender
expected_classifier = {
'memory_depth': -10,
'stochastic': False,
'makes_use_of': set(),
'inspects_source': False,
'manipulates_source': True, # Finds out what opponent will do
'manipulates_state': False
}
def test_strategy(self):
player = self.player()
opponent = axelrod.Defector()
play1 = player.strategy(opponent)
play2 = opponent.strategy(player)
self.assertEqual(play1, 'D')
self.assertEqual(play2, 'C')
|
{
"content_hash": "d1269fe0756691bc4c586352d3da5ffb",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 70,
"avg_line_length": 28.27102803738318,
"alnum_prop": 0.6023140495867768,
"repo_name": "ranjinidas/Axelrod",
"id": "e6ab40c96c552b5f4eaf6c03b108e199e5a83626",
"size": "3025",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "axelrod/tests/unit/test_mindcontrol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "568469"
},
{
"name": "Shell",
"bytes": "82"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from Cython.Build import cythonize
cobj = cythonize("poissondisk.pyx", language="c++")
setup(name="poissondisk",
version="1.0",
ext_modules=cobj)
|
{
"content_hash": "86e75990abec2dfa9d63cd8ef36c1478",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 51,
"avg_line_length": 21.11111111111111,
"alnum_prop": 0.7052631578947368,
"repo_name": "thisch/cypoissondisk",
"id": "cc3855236637b65ec42298226c4e9b1e4009e79b",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8037"
},
{
"name": "Python",
"bytes": "10170"
}
],
"symlink_target": ""
}
|
import time
from .exceptions import EOF, TIMEOUT
class Expecter(object):
def __init__(self, spawn, searcher, searchwindowsize=-1):
self.spawn = spawn
self.searcher = searcher
if searchwindowsize == -1:
searchwindowsize = spawn.searchwindowsize
self.searchwindowsize = searchwindowsize
def new_data(self, data):
spawn = self.spawn
searcher = self.searcher
incoming = spawn.buffer + data
freshlen = len(data)
index = searcher.search(incoming, freshlen, self.searchwindowsize)
if index >= 0:
spawn.buffer = incoming[searcher.end:]
spawn.before = incoming[: searcher.start]
spawn.after = incoming[searcher.start: searcher.end]
spawn.match = searcher.match
spawn.match_index = index
# Found a match
return index
spawn.buffer = incoming
def eof(self, err=None):
spawn = self.spawn
from . import EOF
spawn.before = spawn.buffer
spawn.buffer = spawn.string_type()
spawn.after = EOF
index = self.searcher.eof_index
if index >= 0:
spawn.match = EOF
spawn.match_index = index
return index
else:
spawn.match = None
spawn.match_index = None
msg = str(spawn)
msg += '\nsearcher: %s' % self.searcher
if err is not None:
msg = str(err) + '\n' + msg
raise EOF(msg)
def timeout(self, err=None):
spawn = self.spawn
from . import TIMEOUT
spawn.before = spawn.buffer
spawn.after = TIMEOUT
index = self.searcher.timeout_index
if index >= 0:
spawn.match = TIMEOUT
spawn.match_index = index
return index
else:
spawn.match = None
spawn.match_index = None
msg = str(spawn)
msg += '\nsearcher: %s' % self.searcher
if err is not None:
msg = str(err) + '\n' + msg
raise TIMEOUT(msg)
def errored(self):
spawn = self.spawn
spawn.before = spawn.buffer
spawn.after = None
spawn.match = None
spawn.match_index = None
def expect_loop(self, timeout=-1):
"""Blocking expect"""
spawn = self.spawn
if timeout is not None:
end_time = time.time() + timeout
try:
incoming = spawn.buffer
spawn.buffer = spawn.string_type() # Treat buffer as new data
while True:
idx = self.new_data(incoming)
# Keep reading until exception or return.
if idx is not None:
return idx
# No match at this point
if (timeout is not None) and (timeout < 0):
return self.timeout()
# Still have time left, so read more data
incoming = spawn.read_nonblocking(spawn.maxread, timeout)
if self.spawn.delayafterread is not None:
time.sleep(self.spawn.delayafterread)
if timeout is not None:
timeout = end_time - time.time()
except EOF as e:
return self.eof(e)
except TIMEOUT as e:
return self.timeout(e)
except:
self.errored()
raise
class searcher_string(object):
'''This is a plain string search helper for the spawn.expect_any() method.
This helper class is for speed. For more powerful regex patterns
see the helper class, searcher_re.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the matching string itself
'''
def __init__(self, strings):
'''This creates an instance of searcher_string. This argument 'strings'
may be a list; a sequence of strings; or the EOF or TIMEOUT types. '''
self.eof_index = -1
self.timeout_index = -1
self._strings = []
for n, s in enumerate(strings):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._strings.append((n, s))
def __str__(self):
'''This returns a human-readable string that represents the state of
the object.'''
ss = [(ns[0], ' %d: "%s"' % ns) for ns in self._strings]
ss.append((-1, 'searcher_string:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index,
' %d: TIMEOUT' % self.timeout_index))
ss.sort()
ss = list(zip(*ss))[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
'''This searches 'buffer' for the first occurrence of one of the search
strings. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before. It helps to avoid
searching the same, possibly big, buffer over and over again.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, this returns -1. '''
first_match = None
# 'freshlen' helps a lot here. Further optimizations could
# possibly include:
#
# using something like the Boyer-Moore Fast String Searching
# Algorithm; pre-compiling the search through a list of
# strings into something that can scan the input once to
# search for all N strings; realize that if we search for
# ['bar', 'baz'] and the input is '...foo' we need not bother
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
# or at the very end of the old data
offset = -(freshlen + len(s))
else:
# better obey searchwindowsize
offset = -searchwindowsize
n = buffer.find(s, offset)
if n >= 0 and (first_match is None or n < first_match):
first_match = n
best_index, best_match = index, s
if first_match is None:
return -1
self.match = best_match
self.start = first_match
self.end = self.start + len(self.match)
return best_index
class searcher_re(object):
'''This is regular expression string search helper for the
spawn.expect_any() method. This helper class is for powerful
pattern matching. For speed, see the helper class, searcher_string.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the re.match object returned by a successful re.search
'''
def __init__(self, patterns):
'''This creates an instance that searches for 'patterns' Where
'patterns' may be a list or other sequence of compiled regular
expressions, or the EOF or TIMEOUT types.'''
self.eof_index = -1
self.timeout_index = -1
self._searches = []
for n, s in zip(list(range(len(patterns))), patterns):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._searches.append((n, s))
def __str__(self):
'''This returns a human-readable string that represents the state of
the object.'''
#ss = [(n, ' %d: re.compile("%s")' %
# (n, repr(s.pattern))) for n, s in self._searches]
ss = list()
for n, s in self._searches:
try:
ss.append((n, ' %d: re.compile("%s")' % (n, s.pattern)))
except UnicodeEncodeError:
# for test cases that display __str__ of searches, dont throw
# another exception just because stdout is ascii-only, using
# repr()
ss.append((n, ' %d: re.compile(%r)' % (n, s.pattern)))
ss.append((-1, 'searcher_re:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index, ' %d: TIMEOUT' %
self.timeout_index))
ss.sort()
ss = list(zip(*ss))[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
'''This searches 'buffer' for the first occurrence of one of the regular
expressions. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, returns -1.'''
first_match = None
# 'freshlen' doesn't help here -- we cannot predict the
# length of a match, and the re module provides no help.
if searchwindowsize is None:
searchstart = 0
else:
searchstart = max(0, len(buffer) - searchwindowsize)
for index, s in self._searches:
match = s.search(buffer, searchstart)
if match is None:
continue
n = match.start()
if first_match is None or n < first_match:
first_match = n
the_match = match
best_index = index
if first_match is None:
return -1
self.start = first_match
self.match = the_match
self.end = self.match.end()
return best_index
|
{
"content_hash": "2750f64d2b23d56d776f37a51582e50a",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 80,
"avg_line_length": 35.668896321070235,
"alnum_prop": 0.5537740271917487,
"repo_name": "nateprewitt/pipenv",
"id": "7e07cfa4c231bbccf3ea7857dcdf024985e93463",
"size": "10665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipenv/vendor/pexpect/expect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2589069"
},
{
"name": "Roff",
"bytes": "40754"
}
],
"symlink_target": ""
}
|
import unittest
from bulbs.config import Config
from bulbs.property import String
from bulbs.element import VertexProxy, EdgeProxy
from bulbs.model import Node, NodeProxy, Relationship, RelationshipProxy
from bulbs.base.client import Client
from .testcase import BulbsTestCase
# Test Models
class User(Node):
element_type = "user"
name = String(nullable=False)
username = String(nullable=False)
class Group(Node):
element_type = "group"
name = String(nullable=False)
class Member(Relationship):
label = "member"
class GraphTestCase(BulbsTestCase):
def test_init(self):
assert isinstance(self.graph.config, Config)
assert isinstance(self.graph.client, Client)
assert isinstance(self.graph.vertices, VertexProxy)
assert isinstance(self.graph.edges, EdgeProxy)
def test_add_proxy(self):
self.graph.add_proxy("users", User)
self.graph.add_proxy("groups", Group)
self.graph.add_proxy("members", Member)
assert isinstance(self.graph.users, NodeProxy)
assert isinstance(self.graph.groups, NodeProxy)
assert isinstance(self.graph.members, RelationshipProxy)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GraphTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
{
"content_hash": "de8a04506567d56c49ba732b72b58be5",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 72,
"avg_line_length": 23.54237288135593,
"alnum_prop": 0.6954643628509719,
"repo_name": "mudbungie/NetExplorer",
"id": "a9b3a48de3f8704af26c2978517918d3ba519058",
"size": "1389",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "env/lib/python3.4/site-packages/bulbs/tests/graph_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34672"
}
],
"symlink_target": ""
}
|
import psycopg2
import sys
import os
import os.path
import csv
import dateutil.parser
from datetime import datetime
import itertools
import re
import json
# Note: initially need to run `python scripts/create_new_detector.py --detector macrobase_ma`
# This script also requires additional logging statements in MacroBase - you need to use the nab branch.
# Then run this script with an argument of the path to the numenta anomaly benchmark directory.
# TODO we should probably tune window size
# TODO should add config parameter to disable scoring, change training percentage (0.15 for NAB)
config = '''
macrobase.query.name: numenta
macrobase.loader.attributes: [value]
macrobase.loader.targetLowMetrics: []
macrobase.loader.targetHighMetrics: [value]
macrobase.loader.timeColumn: timestamp
macrobase.loader.db.dbUrl: postgres
macrobase.loader.db.baseQuery: SELECT * FROM nab;
macrobase.analysis.detectorType: MOVING_AVERAGE
macrobase.analysis.minSupport: 0.001
macrobase.analysis.minOIRatio: 1
macrobase.analysis.useZScore: false
macrobase.analysis.usePercentile: true
macrobase.analysis.targetPercentile: 0.99
macrobase.analysis.timeseries.tupleWindow: 100
logging:
level: WARN
loggers:
macrobase.analysis.outlier: DEBUG
'''
conf_file = '/tmp/macrobase_nab.conf'
result_file = '/tmp/macrobase_results.out'
mb_detector_name = 'macrobase_ma'
def main():
if len(sys.argv) != 2:
print 'Usage: numenta_benchmark.py PATH_TO_NAB_ROOT'
sys.exit(1)
with open(conf_file, 'w') as f:
f.write(config)
bench_dir = sys.argv[1]
with open(os.path.join(bench_dir, 'labels', 'combined_windows.json')) as jf:
label_windows = json.load(jf)
for path, labels in label_windows.iteritems():
label_windows[path] = map(lambda pair: map(tounixdate, pair), labels)
data_dir = os.path.join(bench_dir, 'data')
for dirpath, dirnames, filenames in os.walk(data_dir):
for f in filenames:
if not f.endswith('.csv'):
continue
reldir = os.path.relpath(dirpath, data_dir)
print reldir
path = os.path.join(dirpath, f)
relpath = os.path.join(reldir, f)
outpath = os.path.join(bench_dir, 'results', mb_detector_name, reldir, '%s_%s' % (mb_detector_name, f))
run(path, outpath, label_windows[relpath])
print 'Done! Inside the NAB directory run `python run.py -d %s --optimize --score --normalize`' % mb_detector_name
def run(csvpath, outpath, labels):
conn = psycopg2.connect("dbname='postgres' host='localhost'")
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS nab; CREATE TABLE nab ( timestamp bigint NOT NULL, value numeric NOT NULL );")
print 'Loading ' + csvpath
with open(csvpath, 'rb') as in_csv:
reader = csv.reader(in_csv)
for row in skip_first(reader): # Skip header
cur.execute("INSERT INTO nab VALUES ('%s', '%s');" % (tounixdate(row[0]), row[1]))
conn.commit()
with open(conf_file, 'w') as f:
f.write(config)
print 'Running macrobase...'
# Quick n dirty
cmd = '''java ${{JAVA_OPTS}} \\
-cp "src/main/resources/:target/classes:target/lib/*:target/dependency/*" \\
macrobase.MacroBase batch {conf_file} \\
> {result_file}'''.format(conf_file=conf_file, result_file=result_file)
os.system(cmd)
with open(result_file) as f:
# Remove all log superfluous text
it = skip_first(itertools.dropwhile(lambda l: 'Starting scoring...' not in l, iter(f)))
it = itertools.takewhile(lambda l: re.match(r'\d', l), it)
out_csv = open(outpath, 'w')
in_csv = open(csvpath, 'rb')
reader = skip_first(csv.reader(in_csv))
out_csv.write('timestamp,value,anomaly_score,label\n')
for scoreline, inrow in zip(it, reader):
[timestamp, per_diff] = scoreline.strip().split(',')
timestamp = int(timestamp)
label = 1 if any(map(lambda t: t[0] <= timestamp <= t[1], labels)) else 0
out_csv.write('%s,%s,%f,%d\n' % (inrow[0], inrow[1], toscore(float(per_diff)), label))
in_csv.close()
out_csv.close()
print 'Output to ' + outpath
def todate(sdt):
return dateutil.parser.parse(sdt)
def tounixdate(sdt):
return int((todate(sdt) - datetime(1970, 1, 1)).total_seconds())
def datetostr(dt):
return datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
def skip_first(it):
return itertools.islice(it, 1, None)
def toscore(per_diff):
# Arbitrary function to map differences in [0, inf] to [0, 1].
return 1 / (-per_diff - 1) + 1
if __name__ == '__main__':
main()
|
{
"content_hash": "48d3eb9d4171d2221dee279b4a0f3340",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 118,
"avg_line_length": 34.588235294117645,
"alnum_prop": 0.6513605442176871,
"repo_name": "kexinrong/macrobase",
"id": "4dbb712521295fc4da70016e2a3be20170ed3710",
"size": "4704",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/numenta_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "881"
},
{
"name": "C++",
"bytes": "94076"
},
{
"name": "CSS",
"bytes": "985"
},
{
"name": "HTML",
"bytes": "20671"
},
{
"name": "Java",
"bytes": "1044218"
},
{
"name": "JavaScript",
"bytes": "30435"
},
{
"name": "Lex",
"bytes": "4363"
},
{
"name": "Makefile",
"bytes": "5370"
},
{
"name": "Python",
"bytes": "1696"
},
{
"name": "Shell",
"bytes": "3665"
},
{
"name": "Yacc",
"bytes": "25274"
}
],
"symlink_target": ""
}
|
"""Dev CLI entry point for QtPy, a compat layer for the Python Qt bindings."""
import qtpy.cli
def main():
return qtpy.cli.main()
if __name__ == "__main__":
main()
|
{
"content_hash": "0004f1f5d1b341223f573e6922d2074f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 78,
"avg_line_length": 16.09090909090909,
"alnum_prop": 0.6101694915254238,
"repo_name": "spyder-ide/qtpy",
"id": "a8f993c01acebb4f86bea6e65e2918df7e8b895c",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qtpy/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "171859"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2013 Timon Wong
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sublime
import sublime_plugin
import codecs
import os
import locale
import subprocess
import sys
import tempfile
import threading
import time
import types
from functools import partial
PY3K = sys.version_info >= (3, 0, 0)
if PY3K:
from imp import reload
# Reloading modules
for key in sys.modules.keys():
if key.find('OmniMarkupLib') >= 0:
try:
mod = sys.modules[key]
if isinstance(mod, types.ModuleType):
reload(mod)
except:
pass
if PY3K:
from .OmniMarkupLib import log, Server
from .OmniMarkupLib.Setting import Setting
from .OmniMarkupLib.RendererManager import RenderedMarkupCache, RendererManager
from .OmniMarkupLib.Common import Singleton
from .OmniMarkupLib import desktop
else:
exec('import OmniMarkupLib.LinuxModuleChecker')
from OmniMarkupLib import log, Server
from OmniMarkupLib.Setting import Setting
from OmniMarkupLib.RendererManager import RenderedMarkupCache, RendererManager
from OmniMarkupLib.Common import Singleton
from OmniMarkupLib import desktop
def launching_web_browser_for_url(url, success_msg_default=None, success_msg_user=None):
try:
setting = Setting.instance()
if setting.browser_command:
browser_command = [os.path.expandvars(arg).format(url=url)
for arg in setting.browser_command]
if os.name == 'nt':
# unicode arguments broken under windows
encoding = locale.getpreferredencoding()
browser_command = [arg.encode(encoding) for arg in browser_command]
subprocess.Popen(browser_command)
if success_msg_user:
sublime.status_message(success_msg_user)
else:
# Default web browser
desktop.open(url)
if success_msg_default:
sublime.status_message(success_msg_default)
except:
if setting.browser_command:
log.exception('Error while launching user defined web browser')
else:
log.exception('Error while launching default web browser')
class OmniMarkupPreviewCommand(sublime_plugin.TextCommand):
def run(self, edit, immediate=True):
# Whether RendererManager is finished loading?
if not RendererManager.ensure_started():
sublime.status_message('OmniMarkupPreviewer have not yet started')
return
buffer_id = self.view.buffer_id()
# Opened in a tab already?
opened = False
for view in self.view.window().views():
if view.buffer_id() == buffer_id:
opened = True
break
if not opened:
RendererManager.enqueue_view(self.view, immediate=True)
host = Setting.instance().server_host
port = Setting.instance().server_port
if host == '0.0.0.0':
host = '127.0.0.1'
url = 'http://%s:%d/view/%d' % (host, port, buffer_id)
# Open with the default browser
log.info('Launching web browser for %s', url)
launching_web_browser_for_url(
url,
success_msg_default='Preview launched in default web browser',
success_msg_user='Preview launched in user defined web browser')
def is_enabled(self):
return RendererManager.any_available_renderer_for_view(self.view)
class OmniMarkupCleanCacheCommand(sublime_plugin.ApplicationCommand):
def run(self):
storage = RenderedMarkupCache.instance()
storage.clean()
class OmniMarkupExportCommand(sublime_plugin.TextCommand):
def copy_to_clipboard(self, html_content):
sublime.set_clipboard(html_content)
sublime.status_message('Exported result copied to clipboard')
def write_to_file(self, html_content, setting):
target_folder = setting.export_options.get('target_folder', '.')
if target_folder is not None:
fullpath = self.view.file_name() or ''
timestamp_format = setting.export_options.get('timestamp_format', '_%y%m%d%H%M%S')
timestr = time.strftime(timestamp_format, time.localtime())
if (not os.path.exists(fullpath) and target_folder == '.') or \
not os.path.isdir(target_folder):
target_folder = None
elif target_folder == '.':
fn_base, _ = os.path.splitext(fullpath)
html_fn = '%s%s.html' % (fn_base, timestr)
elif not os.path.exists(fullpath):
html_fn = os.path.join(target_folder, 'Untitled%s.html' % timestr)
else:
fn_base = os.path.basename(fullpath)
html_fn = os.path.join(target_folder, '%s%s.html' % (fn_base, timestr))
# No target folder, create file in temporary directory
if target_folder is None:
with tempfile.NamedTemporaryFile(delete=False, suffix='.html') as f:
html_fn = f.name
with codecs.open(html_fn, 'w', encoding='utf-8') as html_file:
html_file.write(html_content)
log.info('Successfully exported to: %s', html_fn)
return html_fn
def run(self, edit, clipboard_only=False):
view = self.view
try:
html_content = RendererManager.render_view_as_html(view)
if clipboard_only:
self.copy_to_clipboard(html_content)
return
setting = Setting.instance()
html_fn = self.write_to_file(html_content, setting)
# Copy contents to clipboard
if setting.export_options.get('copy_to_clipboard', False):
self.copy_to_clipboard(html_content)
# Open output file if necessary
if setting.export_options.get('open_after_exporting', False):
log.info('Launching web browser for %s', html_fn)
launching_web_browser_for_url(html_fn)
except NotImplementedError:
pass
except:
sublime.error_message('Error while exporting, please check your console for more information.')
log.exception('Error while exporting')
def is_enabled(self):
return RendererManager.any_available_renderer_for_view(self.view)
class ThrottleQueue(threading.Thread):
WAIT_TIMEOUT = 0.02
class Entry(object):
def __init__(self, view, timeout):
self.view = view
self.filename = view.file_name()
self.timeout = timeout
def __cmp__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __init__(self):
threading.Thread.__init__(self)
self.mutex = threading.Lock()
self.cond = threading.Condition(self.mutex)
self.stopping = False
self.last_signaled = time.time()
self.view_entry_mapping = {}
def put(self, view, preemptive=True, timeout=0.5):
if not RendererManager.any_available_renderer_for_view(view):
return
view_id = view.id()
now = time.time()
with self.mutex:
if view_id in self.view_entry_mapping:
# Too fast, cancel this operation
if now - self.last_signaled <= 0.01:
return
if preemptive:
# Cancel pending actions
with self.cond:
if view_id in self.view_entry_mapping:
del self.view_entry_mapping[view_id]
self.cond.notify()
RendererManager.enqueue_view(view, only_exists=True)
self.last_signaled = now
else:
with self.cond:
filename = view.file_name()
if view_id not in self.view_entry_mapping:
self.view_entry_mapping[view_id] = self.Entry(view, timeout)
else:
entry = self.view_entry_mapping[view_id]
entry.view = view
entry.filename = filename
entry.timeout = timeout
self.cond.notify()
def enqueue_view_to_renderer_manager(self, view, filename):
if view.is_loading() or view.file_name() != filename:
return
if RendererManager.any_available_renderer_for_view(view):
RendererManager.enqueue_view(view, only_exists=True)
self.last_signaled = time.time()
def run(self):
prev_time = time.time()
while True:
with self.cond:
if self.stopping:
break
self.cond.wait(self.WAIT_TIMEOUT)
if self.stopping:
break
if len(self.view_entry_mapping) > 0:
now = time.time()
diff_time = now - prev_time
prev_time = time.time()
for view_id in list(self.view_entry_mapping.keys()):
o = self.view_entry_mapping[view_id]
o.timeout -= max(diff_time, self.WAIT_TIMEOUT)
if o.timeout <= 0:
del self.view_entry_mapping[view_id]
sublime.set_timeout(partial(self.enqueue_view_to_renderer_manager,
o.view, o.filename), 0)
else:
# No more items, sleep
self.cond.wait()
def stop(self):
with self.cond:
self.stopping = True
self.cond.notify()
self.join()
class PluginEventListener(sublime_plugin.EventListener):
def __init__(self):
self.throttle = ThrottleQueue()
self.throttle.start()
def __del__(self):
self.throttle.stop()
def on_query_context(self, view, key, operator, operand, match_all):
# `omp_is_enabled` for backwards compatibility
if key == 'omnimarkup_is_enabled' or key == 'omp_is_enabled':
return RendererManager.any_available_renderer_for_view(view)
return None
def _on_close(self, view):
storage = RenderedMarkupCache.instance()
entry = storage.get_entry(view.buffer_id())
if entry is not None:
entry.disconnected = True
def _on_modified(self, view):
# Prevent rare complaintion about slow callback
def callback():
setting = Setting.instance()
if not setting.refresh_on_modified:
return
timeout = setting.refresh_on_modified_delay / 1000.0
self.throttle.put(view, preemptive=False, timeout=timeout)
if PY3K:
callback()
else:
sublime.set_timeout(callback, 0)
def _on_post_save(self, view):
if not Setting.instance().refresh_on_saved:
return
self.throttle.put(view, preemptive=True)
if PY3K:
on_close_async = _on_close
on_modified_async = _on_modified
on_post_save_async = _on_post_save
else:
on_close = _on_close
on_modified = _on_modified
on_post_save = _on_post_save
g_server = None
@Singleton
class PluginManager(object):
def __init__(self):
setting = Setting.instance()
self.on_setting_changing(setting)
def on_setting_changing(self, setting):
self.old_server_host = setting.server_host
self.old_server_port = setting.server_port
self.old_ajax_polling_interval = setting.ajax_polling_interval
self.old_html_template_name = setting.html_template_name
def on_setting_changed(self, setting):
if (setting.ajax_polling_interval != self.old_ajax_polling_interval or
setting.html_template_name != self.old_html_template_name):
sublime.status_message('OmniMarkupPreviewer requires a browser reload to apply changes')
need_server_restart = (setting.server_host != self.old_server_host or
setting.server_port != self.old_server_port)
if need_server_restart:
self.restart_server()
def subscribe_setting_events(self):
Setting.instance().subscribe('changing', self.on_setting_changing)
Setting.instance().subscribe('changed', self.on_setting_changed)
def restart_server(self):
global g_server
if g_server is not None:
self.stop_server()
setting = Setting.instance()
g_server = Server.Server(host=setting.server_host, port=setting.server_port)
def stop_server(self):
global g_server
if g_server is not None:
g_server.stop()
g_server = None
def unload_handler():
log.info('Unloading plugin...')
# Cleaning up resources...
PluginManager.instance().stop_server()
# Stopping renderer worker
RendererManager.stop()
def plugin_loaded():
Server.init()
# Setting must be the first to initialize.
Setting.instance().init()
PluginManager.instance().subscribe_setting_events()
RendererManager.start()
PluginManager.instance().restart_server()
if not PY3K:
plugin_loaded()
|
{
"content_hash": "da9db1c9e39c2cfae002c52de6acd5b0",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 107,
"avg_line_length": 35.62686567164179,
"alnum_prop": 0.6055718475073314,
"repo_name": "timonwong/OmniMarkupPreviewer",
"id": "fd6fe178b558c61cee3250c5d9ebcbc32cb06082",
"size": "14322",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "OmniMarkupPreviewer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "135028"
},
{
"name": "HTML",
"bytes": "550"
},
{
"name": "JavaScript",
"bytes": "366577"
},
{
"name": "Python",
"bytes": "8024507"
},
{
"name": "Ruby",
"bytes": "1168"
},
{
"name": "Smarty",
"bytes": "31148"
},
{
"name": "TeX",
"bytes": "3054"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, division
import operator
from petl.compat import string_types, izip
from petl.errors import ArgumentError
from petl.util.base import Table, dicts
def fromtextindex(index_or_dirname, indexname=None, docnum_field=None):
"""
Extract all documents from a Whoosh index. E.g.::
>>> import petl as etl
>>> import os
>>> # set up an index and load some documents via the Whoosh API
... from whoosh.index import create_in
>>> from whoosh.fields import *
>>> schema = Schema(title=TEXT(stored=True), path=ID(stored=True),
... content=TEXT)
>>> dirname = 'example.whoosh'
>>> if not os.path.exists(dirname):
... os.mkdir(dirname)
...
>>> index = create_in(dirname, schema)
>>> writer = index.writer()
>>> writer.add_document(title=u"First document", path=u"/a",
... content=u"This is the first document we've added!")
>>> writer.add_document(title=u"Second document", path=u"/b",
... content=u"The second one is even more interesting!")
>>> writer.commit()
>>> # extract documents as a table
... table = etl.fromtextindex(dirname)
>>> table
+------+-------------------+
| path | title |
+======+===================+
| '/a' | 'First document' |
+------+-------------------+
| '/b' | 'Second document' |
+------+-------------------+
Keyword arguments:
index_or_dirname
Either an instance of `whoosh.index.Index` or a string containing the
directory path where the index is stored.
indexname
String containing the name of the index, if multiple indexes are stored
in the same directory.
docnum_field
If not None, an extra field will be added to the output table containing
the internal document number stored in the index. The name of the field
will be the value of this argument.
"""
return TextIndexView(index_or_dirname, indexname=indexname,
docnum_field=docnum_field)
class TextIndexView(Table):
def __init__(self, index_or_dirname, indexname=None, docnum_field=None):
self.index_or_dirname = index_or_dirname
self.indexname = indexname
self.docnum_field = docnum_field
def __iter__(self):
return itertextindex(self.index_or_dirname, self.indexname,
self.docnum_field)
def itertextindex(index_or_dirname, indexname, docnum_field):
import whoosh.index
if isinstance(index_or_dirname, string_types):
dirname = index_or_dirname
index = whoosh.index.open_dir(dirname, indexname=indexname,
readonly=True)
needs_closing = True
elif isinstance(index_or_dirname, whoosh.index.Index):
index = index_or_dirname
needs_closing = False
else:
raise ArgumentError('expected string or index, found %r'
% index_or_dirname)
try:
if docnum_field is None:
# figure out the field names
hdr = tuple(index.schema.stored_names())
yield hdr
# yield all documents
astuple = operator.itemgetter(*index.schema.stored_names())
for _, stored_fields_dict in index.reader().iter_docs():
yield astuple(stored_fields_dict)
else:
# figure out the field names
hdr = (docnum_field,) + tuple(index.schema.stored_names())
yield hdr
# yield all documents
astuple = operator.itemgetter(*index.schema.stored_names())
for docnum, stored_fields_dict in index.reader().iter_docs():
yield (docnum,) + astuple(stored_fields_dict)
except:
raise
finally:
if needs_closing:
# close the index if we're the ones who opened it
index.close()
def totextindex(table, index_or_dirname, schema=None, indexname=None,
merge=False, optimize=False):
"""
Load all rows from `table` into a Whoosh index. N.B., this will clear any
existing data in the index before loading. E.g.::
>>> import petl as etl
>>> import datetime
>>> import os
>>> # here is the table we want to load into an index
... table = (('f0', 'f1', 'f2', 'f3', 'f4'),
... ('AAA', 12, 4.3, True, datetime.datetime.now()),
... ('BBB', 6, 3.4, False, datetime.datetime(1900, 1, 31)),
... ('CCC', 42, 7.8, True, datetime.datetime(2100, 12, 25)))
>>> # define a schema for the index
... from whoosh.fields import *
>>> schema = Schema(f0=TEXT(stored=True),
... f1=NUMERIC(int, stored=True),
... f2=NUMERIC(float, stored=True),
... f3=BOOLEAN(stored=True),
... f4=DATETIME(stored=True))
>>> # load index
... dirname = 'example.whoosh'
>>> if not os.path.exists(dirname):
... os.mkdir(dirname)
...
>>> etl.totextindex(table, dirname, schema=schema)
Keyword arguments:
table
A table container with the data to be loaded.
index_or_dirname
Either an instance of `whoosh.index.Index` or a string containing the
directory path where the index is to be stored.
schema
Index schema to use if creating the index.
indexname
String containing the name of the index, if multiple indexes are stored
in the same directory.
merge
Merge small segments during commit?
optimize
Merge all segments together?
"""
import whoosh.index
import whoosh.writing
# deal with polymorphic argument
if isinstance(index_or_dirname, string_types):
dirname = index_or_dirname
index = whoosh.index.create_in(dirname, schema,
indexname=indexname)
needs_closing = True
elif isinstance(index_or_dirname, whoosh.index.Index):
index = index_or_dirname
needs_closing = False
else:
raise ArgumentError('expected string or index, found %r'
% index_or_dirname)
writer = index.writer()
try:
for d in dicts(table):
writer.add_document(**d)
writer.commit(merge=merge, optimize=optimize,
mergetype=whoosh.writing.CLEAR)
except:
writer.cancel()
raise
finally:
if needs_closing:
index.close()
def appendtextindex(table, index_or_dirname, indexname=None, merge=True,
optimize=False):
"""
Load all rows from `table` into a Whoosh index, adding them to any existing
data in the index.
Keyword arguments:
table
A table container with the data to be loaded.
index_or_dirname
Either an instance of `whoosh.index.Index` or a string containing the
directory path where the index is to be stored.
indexname
String containing the name of the index, if multiple indexes are stored
in the same directory.
merge
Merge small segments during commit?
optimize
Merge all segments together?
"""
import whoosh.index
# deal with polymorphic argument
if isinstance(index_or_dirname, string_types):
dirname = index_or_dirname
index = whoosh.index.open_dir(dirname, indexname=indexname,
readonly=False)
needs_closing = True
elif isinstance(index_or_dirname, whoosh.index.Index):
index = index_or_dirname
needs_closing = False
else:
raise ArgumentError('expected string or index, found %r'
% index_or_dirname)
writer = index.writer()
try:
for d in dicts(table):
writer.add_document(**d)
writer.commit(merge=merge, optimize=optimize)
except Exception:
writer.cancel()
raise
finally:
if needs_closing:
index.close()
def searchtextindex(index_or_dirname, query, limit=10, indexname=None,
docnum_field=None, score_field=None, fieldboosts=None,
search_kwargs=None):
"""
Search a Whoosh index using a query. E.g.::
>>> import petl as etl
>>> import os
>>> # set up an index and load some documents via the Whoosh API
... from whoosh.index import create_in
>>> from whoosh.fields import *
>>> schema = Schema(title=TEXT(stored=True), path=ID(stored=True),
... content=TEXT)
>>> dirname = 'example.whoosh'
>>> if not os.path.exists(dirname):
... os.mkdir(dirname)
...
>>> index = create_in('example.whoosh', schema)
>>> writer = index.writer()
>>> writer.add_document(title=u"Oranges", path=u"/a",
... content=u"This is the first document we've added!")
>>> writer.add_document(title=u"Apples", path=u"/b",
... content=u"The second document is even more "
... u"interesting!")
>>> writer.commit()
>>> # demonstrate the use of searchtextindex()
... table1 = etl.searchtextindex('example.whoosh', 'oranges')
>>> table1
+------+-----------+
| path | title |
+======+===========+
| '/a' | 'Oranges' |
+------+-----------+
>>> table2 = etl.searchtextindex('example.whoosh', 'doc*')
>>> table2
+------+-----------+
| path | title |
+======+===========+
| '/a' | 'Oranges' |
+------+-----------+
| '/b' | 'Apples' |
+------+-----------+
Keyword arguments:
index_or_dirname
Either an instance of `whoosh.index.Index` or a string containing the
directory path where the index is to be stored.
query
Either a string or an instance of `whoosh.query.Query`. If a string,
it will be parsed as a multi-field query, i.e., any terms not bound
to a specific field will match **any** field.
limit
Return at most `limit` results.
indexname
String containing the name of the index, if multiple indexes are stored
in the same directory.
docnum_field
If not None, an extra field will be added to the output table containing
the internal document number stored in the index. The name of the field
will be the value of this argument.
score_field
If not None, an extra field will be added to the output table containing
the score of the result. The name of the field will be the value of this
argument.
fieldboosts
An optional dictionary mapping field names to boosts.
search_kwargs
Any extra keyword arguments to be passed through to the Whoosh
`search()` method.
"""
return SearchTextIndexView(index_or_dirname, query, limit=limit,
indexname=indexname, docnum_field=docnum_field,
score_field=score_field, fieldboosts=fieldboosts,
search_kwargs=search_kwargs)
def searchtextindexpage(index_or_dirname, query, pagenum, pagelen=10,
indexname=None, docnum_field=None, score_field=None,
fieldboosts=None, search_kwargs=None):
"""
Search an index using a query, returning a result page.
Keyword arguments:
index_or_dirname
Either an instance of `whoosh.index.Index` or a string containing the
directory path where the index is to be stored.
query
Either a string or an instance of `whoosh.query.Query`. If a string,
it will be parsed as a multi-field query, i.e., any terms not bound
to a specific field will match **any** field.
pagenum
Number of the page to return (e.g., 1 = first page).
pagelen
Number of results per page.
indexname
String containing the name of the index, if multiple indexes are stored
in the same directory.
docnum_field
If not None, an extra field will be added to the output table containing
the internal document number stored in the index. The name of the field
will be the value of this argument.
score_field
If not None, an extra field will be added to the output table containing
the score of the result. The name of the field will be the value of this
argument.
fieldboosts
An optional dictionary mapping field names to boosts.
search_kwargs
Any extra keyword arguments to be passed through to the Whoosh
`search()` method.
"""
return SearchTextIndexView(index_or_dirname, query, pagenum=pagenum,
pagelen=pagelen, indexname=indexname,
docnum_field=docnum_field,
score_field=score_field, fieldboosts=fieldboosts,
search_kwargs=search_kwargs)
class SearchTextIndexView(Table):
def __init__(self, index_or_dirname, query, limit=None, pagenum=None,
pagelen=None, indexname=None, docnum_field=None,
score_field=None, fieldboosts=None, search_kwargs=None):
self._index_or_dirname = index_or_dirname
self._query = query
self._limit = limit
self._pagenum = pagenum
self._pagelen = pagelen
self._indexname = indexname
self._docnum_field = docnum_field
self._score_field = score_field
self._fieldboosts = fieldboosts
self._search_kwargs = search_kwargs
def __iter__(self):
return itersearchindex(self._index_or_dirname, self._query,
self._limit, self._pagenum, self._pagelen,
self._indexname, self._docnum_field,
self._score_field, self._fieldboosts,
self._search_kwargs)
def itersearchindex(index_or_dirname, query, limit, pagenum, pagelen, indexname,
docnum_field, score_field, fieldboosts, search_kwargs):
import whoosh.index
import whoosh.query
import whoosh.qparser
if not search_kwargs:
search_kwargs = dict()
if isinstance(index_or_dirname, string_types):
dirname = index_or_dirname
index = whoosh.index.open_dir(dirname,
indexname=indexname,
readonly=True)
needs_closing = True
elif isinstance(index_or_dirname, whoosh.index.Index):
index = index_or_dirname
needs_closing = False
else:
raise ArgumentError('expected string or index, found %r'
% index_or_dirname)
try:
# figure out header
hdr = tuple()
if docnum_field is not None:
hdr += (docnum_field,)
if score_field is not None:
hdr += (score_field,)
stored_names = tuple(index.schema.stored_names())
hdr += stored_names
yield hdr
# parse the query
if isinstance(query, string_types):
# search all fields by default
parser = whoosh.qparser.MultifieldParser(
index.schema.names(),
index.schema,
fieldboosts=fieldboosts
)
query = parser.parse(query)
elif isinstance(query, whoosh.query.Query):
pass
else:
raise ArgumentError(
'expected string or whoosh.query.Query, found %r' % query
)
# make a function to turn docs into tuples
astuple = operator.itemgetter(*index.schema.stored_names())
with index.searcher() as searcher:
if limit is not None:
results = searcher.search(query, limit=limit,
**search_kwargs)
else:
results = searcher.search_page(query, pagenum,
pagelen=pagelen,
**search_kwargs)
if docnum_field is None and score_field is None:
for doc in results:
yield astuple(doc)
else:
for (docnum, score), doc in izip(results.items(), results):
row = tuple()
if docnum_field is not None:
row += (docnum,)
if score_field is not None:
row += (score,)
row += astuple(doc)
yield row
except:
raise
finally:
if needs_closing:
# close the index if we're the ones who opened it
index.close()
# TODO guess schema
|
{
"content_hash": "a1314781e2c394690ef961952d9b1fc1",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 84,
"avg_line_length": 35.209349593495936,
"alnum_prop": 0.5582751255556196,
"repo_name": "alimanfoo/petl",
"id": "e20b4f9646f974493bd1ba45d9a2cf8f3c19ce32",
"size": "17347",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "petl/io/whoosh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "146738"
},
{
"name": "Python",
"bytes": "881057"
}
],
"symlink_target": ""
}
|
import argparse
import mir3.data.score as score
import mir3.lib.midi.MidiOutFile as MidiOutFile
import mir3.module
class Score2Midi(mir3.module.Module):
def get_help(self):
return """convert the internal score representation to midi"""
def build_arguments(self, parser):
parser.add_argument('infile', type=argparse.FileType('r'), help="""score
file""")
parser.add_argument('outfile', type=argparse.FileType('w'), help="""midi
file""")
def run(self, args):
s = score.Score().load(args.infile)
with MidiWriter(args.outfile) as mw:
for n in s.data:
mw.add_note(n)
class MidiWriter:
"""Helper to write MIDI files.
Uses the external midi library to create midi files from a score. The events
are added to a dictionary, because MIDI requires things to be written in
order, so we can't add them instantly. Not every feature available in the
MIDI format is available here yet.
The class provides safeguards to save the events to the MIDI file on object
destruction. It can also be used with the 'with' statement.
Any events are destructed when a new file is opened.
Attributes:
division: number of divisions.
events: dictionary of events, where the keys are timestamps and values
are list of events.
midi: midi file used to write. The value is None if no file is open.
"""
def __init__(self, file_handle, division=96, bmp=60):
"""Starts a new MIDI file.
Creates the file and write header and starting BMP.
Args:
file_handle: handle for the file to be written.
divisions: number of divisions. Default: 96.
bmp: beats per minute at the start. Default: 60.
"""
self.midi = MidiOutFile.MidiOutFile(file_handle)
self.division = division
self.midi.header(division = division)
self.midi.start_of_track()
self.events = {}
self.set_BMP(bmp)
def add_event_at(self, time, event):
"""Adds an event to a certain time.
If no event exists on the time, starts the list. The event is stored at
the list's end.
Args:
time: timestamp for the event.
event: event description.
Returns:
self
"""
if time not in self.events:
self.events[time] = []
self.events[time].append(event)
return self
def add_note(self, note, channel=0):
"""Adds a Note object to a channel.
Uses the note onset and offset to compute the time.
Args:
note: Note object.
channel: channel to store the note. Default: 0.
Returns:
self
"""
if note is None:
raise ValueError, 'Invalid note.'
onset = int(note.data.onset * self.division)
onset_event = {'name': 'onset', 'pitch': note.data.pitch,
'channel': channel}
self.add_event_at(onset, onset_event)
offset = int(note.data.offset * self.division)
offset_event = {'name': 'offset', 'pitch': note.data.pitch,
'channel': channel}
self.add_event_at(offset, offset_event)
return self
def set_BMP(self, bmp, time=0):
"""Sets the BMP at a certain time.
Args:
bmp: beats per minute values.
time: timestamp.
Returns:
self
"""
time = int(time * self.division)
event = {'name': 'tempo', 'value': 60000000/int(bmp)}
self.add_event_at(time, event)
return self
def write_events(self):
"""Writes the events stored and close the file.
If there's no file, nothing is done. The events dictionary is cleaned
upon completion.
Returns:
self
"""
if self.midi is not None:
time_scale = 1
last_time = None
for time in sorted(self.events):
if last_time is None:
self.midi.update_time(int(time), relative = False)
else:
self.midi.update_time(int((time-last_time)*time_scale),
relative = True)
last_time = time
for event in self.events[time]:
if event['name'] == 'tempo':
self.midi.tempo(event['value'])
time_scale = 1/(event['value']*1e-6)
elif event['name'] == 'onset':
self.midi.note_on(channel = event['channel'],
note = event['pitch'])
elif event['name'] == 'offset':
self.midi.note_off(channel = event['channel'],
note = event['pitch'])
else:
raise ValueError, 'Unknown MIDI event.'
self.events = {}
self.close()
def close(self):
"""Closes the file and add tail data.
If the file is open, writes the things that the file format requires at
the end.
Returns:
self
"""
if self.midi is not None:
self.midi.update_time(0)
self.midi.end_of_track()
self.midi.eof()
self.midi = None
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.write_events()
def __del__(self):
self.write_events()
|
{
"content_hash": "803ec100689a2927612a857934a1d9fd",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 80,
"avg_line_length": 31.373626373626372,
"alnum_prop": 0.5381786339754816,
"repo_name": "pymir3/pymir3",
"id": "ef028146e8f3063cc82306c0f099c0abccebf0c9",
"size": "5710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mir3/modules/tool/score2midi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "535278"
},
{
"name": "Shell",
"bytes": "70718"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
from ... import tools
class Command(BaseCommand):
help = 'Grab data from VoxImplant'
def handle(self, *args, **options):
print('Load scenarios...')
tools.scenarios_download()
print('Load applications...')
tools.apps_download()
print('Load rules...')
tools.rules_download()
|
{
"content_hash": "7175f2d885d3860398dcb77171edb868",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 51,
"avg_line_length": 22.764705882352942,
"alnum_prop": 0.627906976744186,
"repo_name": "telminov/django-voximplant",
"id": "a202335c25ad2ecc6c1a412af058a45f6da0cc05",
"size": "403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voximplant/management/commands/vox_download.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42058"
}
],
"symlink_target": ""
}
|
import logging
from operator import attrgetter
from telemetry.page import page_measurement
from telemetry.web_perf.metrics import rendering_frame
# These are LatencyInfo component names indicating the various components
# that the input event has travelled through.
# This is when the input event first reaches chrome.
UI_COMP_NAME = 'INPUT_EVENT_LATENCY_UI_COMPONENT'
# This is when the input event was originally created by OS.
ORIGINAL_COMP_NAME = 'INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT'
# This is when the input event was sent from browser to renderer.
BEGIN_COMP_NAME = 'INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT'
# This is when the input event has reached swap buffer.
END_COMP_NAME = 'INPUT_EVENT_LATENCY_TERMINATED_FRAME_SWAP_COMPONENT'
class NotEnoughFramesError(page_measurement.MeasurementFailure):
def __init__(self, frame_count):
super(NotEnoughFramesError, self).__init__(
'Only %i frame timestamps were collected ' % frame_count +
'(at least two are required).\n'
'Issues that have caused this in the past:\n' +
'- Browser bugs that prevents the page from redrawing\n' +
'- Bugs in the synthetic gesture code\n' +
'- Page and benchmark out of sync (e.g. clicked element was renamed)\n' +
'- Pages that render extremely slow\n' +
'- Pages that can\'t be scrolled')
def GetInputLatencyEvents(process, timeline_range):
"""Get input events' LatencyInfo from the process's trace buffer that are
within the timeline_range.
Input events dump their LatencyInfo into trace buffer as async trace event
with name "InputLatency". The trace event has a memeber 'data' containing
its latency history.
"""
input_events = []
if not process:
return input_events
for event in process.IterAllAsyncSlicesOfName('InputLatency'):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
for ss in event.sub_slices:
if 'data' in ss.args:
input_events.append(ss)
return input_events
def ComputeInputEventLatency(input_events):
""" Compute the input event latency.
Input event latency is the time from when the input event is created to
when its resulted page is swap buffered.
Input event on differnt platforms uses different LatencyInfo component to
record its creation timestamp. We go through the following component list
to find the creation timestamp:
1. INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT -- when event is created in OS
2. INPUT_EVENT_LATENCY_UI_COMPONENT -- when event reaches Chrome
3. INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT -- when event reaches RenderWidget
"""
input_event_latency = []
for event in input_events:
data = event.args['data']
if END_COMP_NAME in data:
end_time = data[END_COMP_NAME]['time']
if ORIGINAL_COMP_NAME in data:
latency = end_time - data[ORIGINAL_COMP_NAME]['time']
elif UI_COMP_NAME in data:
latency = end_time - data[UI_COMP_NAME]['time']
elif BEGIN_COMP_NAME in data:
latency = end_time - data[BEGIN_COMP_NAME]['time']
else:
raise ValueError, 'LatencyInfo has no begin component'
input_event_latency.append(latency / 1000.0)
return input_event_latency
def HasRenderingStats(process):
""" Returns True if the process contains at least one
BenchmarkInstrumentation::*RenderingStats event with a frame.
"""
if not process:
return False
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::MainThreadRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::ImplThreadRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
return False
class RenderingStats(object):
def __init__(self, renderer_process, browser_process, timeline_ranges):
"""
Utility class for extracting rendering statistics from the timeline (or
other loggin facilities), and providing them in a common format to classes
that compute benchmark metrics from this data.
Stats are lists of lists of numbers. The outer list stores one list per
timeline range.
All *_time values are measured in milliseconds.
"""
assert(len(timeline_ranges) > 0)
# Find the top level process with rendering stats (browser or renderer).
if HasRenderingStats(browser_process):
timestamp_process = browser_process
else:
timestamp_process = renderer_process
self.frame_timestamps = []
self.frame_times = []
self.paint_times = []
self.painted_pixel_counts = []
self.record_times = []
self.recorded_pixel_counts = []
self.rasterize_times = []
self.rasterized_pixel_counts = []
self.approximated_pixel_percentages = []
# End-to-end latency for input event - from when input event is
# generated to when the its resulted page is swap buffered.
self.input_event_latency = []
self.frame_queueing_durations = []
for timeline_range in timeline_ranges:
self.frame_timestamps.append([])
self.frame_times.append([])
self.paint_times.append([])
self.painted_pixel_counts.append([])
self.record_times.append([])
self.recorded_pixel_counts.append([])
self.rasterize_times.append([])
self.rasterized_pixel_counts.append([])
self.approximated_pixel_percentages.append([])
self.input_event_latency.append([])
if timeline_range.is_empty:
continue
self._InitFrameTimestampsFromTimeline(timestamp_process, timeline_range)
self._InitMainThreadRenderingStatsFromTimeline(
renderer_process, timeline_range)
self._InitImplThreadRenderingStatsFromTimeline(
renderer_process, timeline_range)
self._InitInputLatencyStatsFromTimeline(
browser_process, renderer_process, timeline_range)
self._InitFrameQueueingDurationsFromTimeline(
renderer_process, timeline_range)
# Check if we have collected at least 2 frames in every range. Otherwise we
# can't compute any meaningful metrics.
for segment in self.frame_timestamps:
if len(segment) < 2:
raise NotEnoughFramesError(len(segment))
def _InitInputLatencyStatsFromTimeline(
self, browser_process, renderer_process, timeline_range):
latency_events = GetInputLatencyEvents(browser_process, timeline_range)
# Plugin input event's latency slice is generated in renderer process.
latency_events.extend(GetInputLatencyEvents(renderer_process,
timeline_range))
self.input_event_latency[-1] = ComputeInputEventLatency(latency_events)
def _GatherEvents(self, event_name, process, timeline_range):
events = []
for event in process.IterAllSlicesOfName(event_name):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
if 'data' not in event.args:
continue
events.append(event)
events.sort(key=attrgetter('start'))
return events
def _AddFrameTimestamp(self, event):
frame_count = event.args['data']['frame_count']
if frame_count > 1:
raise ValueError('trace contains multi-frame render stats')
if frame_count == 1:
self.frame_timestamps[-1].append(
event.start)
if len(self.frame_timestamps[-1]) >= 2:
self.frame_times[-1].append(round(self.frame_timestamps[-1][-1] -
self.frame_timestamps[-1][-2], 2))
def _InitFrameTimestampsFromTimeline(self, process, timeline_range):
event_name = 'BenchmarkInstrumentation::MainThreadRenderingStats'
for event in self._GatherEvents(event_name, process, timeline_range):
self._AddFrameTimestamp(event)
event_name = 'BenchmarkInstrumentation::ImplThreadRenderingStats'
for event in self._GatherEvents(event_name, process, timeline_range):
self._AddFrameTimestamp(event)
def _InitMainThreadRenderingStatsFromTimeline(self, process, timeline_range):
event_name = 'BenchmarkInstrumentation::MainThreadRenderingStats'
for event in self._GatherEvents(event_name, process, timeline_range):
data = event.args['data']
self.paint_times[-1].append(1000.0 * data['paint_time'])
self.painted_pixel_counts[-1].append(data['painted_pixel_count'])
self.record_times[-1].append(1000.0 * data['record_time'])
self.recorded_pixel_counts[-1].append(data['recorded_pixel_count'])
def _InitImplThreadRenderingStatsFromTimeline(self, process, timeline_range):
event_name = 'BenchmarkInstrumentation::ImplThreadRenderingStats'
for event in self._GatherEvents(event_name, process, timeline_range):
data = event.args['data']
self.rasterize_times[-1].append(1000.0 * data['rasterize_time'])
self.rasterized_pixel_counts[-1].append(data['rasterized_pixel_count'])
if data.get('visible_content_area', 0):
self.approximated_pixel_percentages[-1].append(
round(float(data['approximated_visible_content_area']) /
float(data['visible_content_area']) * 100.0, 3))
else:
self.approximated_pixel_percentages[-1].append(0.0)
def _InitFrameQueueingDurationsFromTimeline(self, process, timeline_range):
try:
events = rendering_frame.GetFrameEventsInsideRange(process,
timeline_range)
new_frame_queueing_durations = [e.queueing_duration for e in events]
self.frame_queueing_durations.append(new_frame_queueing_durations)
except rendering_frame.NoBeginFrameIdException:
logging.warning('Current chrome version does not support the queueing '
'delay metric.')
|
{
"content_hash": "a3dbe4ac8d392a68a6ec315939dc9b01",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 79,
"avg_line_length": 42.63478260869565,
"alnum_prop": 0.6984499286151336,
"repo_name": "chromium2014/src",
"id": "bd8c826d440c907e5b6e18975665143056d3d62f",
"size": "9968",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/web_perf/metrics/rendering_stats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1889381"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "39993418"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "220757674"
},
{
"name": "CSS",
"bytes": "973910"
},
{
"name": "Java",
"bytes": "6583410"
},
{
"name": "JavaScript",
"bytes": "20967999"
},
{
"name": "Mercury",
"bytes": "9480"
},
{
"name": "Objective-C",
"bytes": "943237"
},
{
"name": "Objective-C++",
"bytes": "7190130"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "674461"
},
{
"name": "Python",
"bytes": "10430892"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1337040"
},
{
"name": "Standard ML",
"bytes": "3705"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
}
|
"""
Requires simplejson; can be downloaded from
http://cheeseshop.python.org/pypi/simplejson
"""
import xmlrpclib
from datetime import datetime
try:
import json
except ImportError:
import simplejson as json
# From xmlrpclib.
SERVER_ERROR = xmlrpclib.SERVER_ERROR
NOT_WELLFORMED_ERROR = xmlrpclib.NOT_WELLFORMED_ERROR
UNSUPPORTED_ENCODING = xmlrpclib.UNSUPPORTED_ENCODING
INVALID_ENCODING_CHAR = xmlrpclib.INVALID_ENCODING_CHAR
INVALID_JSONRPC = xmlrpclib.INVALID_XMLRPC
METHOD_NOT_FOUND = xmlrpclib.METHOD_NOT_FOUND
INVALID_METHOD_PARAMS = xmlrpclib.INVALID_METHOD_PARAMS
INTERNAL_ERROR = xmlrpclib.INTERNAL_ERROR
# Custom errors.
METHOD_NOT_CALLABLE = -32604
# Version constants.
VERSION_PRE1 = 0
VERSION_1 = 1
VERSION_2 = 2
class Fault(xmlrpclib.Fault):
pass
class NoSuchFunction(Fault):
"""
There is no function by the given name.
"""
class JSONRPCEncoder(json.JSONEncoder):
"""
Provide custom serializers for JSON-RPC.
"""
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime("%Y%m%dT%H:%M:%S")
raise TypeError("%r is not JSON serializable" % (obj,))
def dumps(obj, **kwargs):
try:
version = kwargs.pop("version")
except KeyError:
version = VERSION_PRE1
try:
id = kwargs.pop("id")
except KeyError:
id = None
if isinstance(obj, Exception):
result = None
if version!=VERSION_2:
error = {'fault': obj.__class__.__name__,
'faultCode': obj.faultCode,
'faultString': obj.faultString}
else:
error = {'message': obj.faultString,
'code': obj.faultCode,
'data': ''}
else:
result = obj
error = None
if version == VERSION_PRE1:
if result:
obj = result
else:
obj = error
elif version == VERSION_1:
obj = {"result": result, "error": error, "id": id}
elif version == VERSION_2:
if error:
obj = {"jsonrpc": "2.0", "error": error, "id": id}
else:
obj = {"jsonrpc": "2.0", "result": result, "id": id}
else:
obj = {"result": result, "error": error, "id": id}
return json.dumps(obj, cls=JSONRPCEncoder, **kwargs)
def loads(string, **kws):
unmarshalled = json.loads(string, **kws)
# XXX there's going to need to be some version-conditional code here...
# for versions greater than VERSION_PRE1, we'll have to check for the
# "error" key, not the "fault" key... and then raise if "fault" is not
# None.
if (isinstance(unmarshalled, dict) and "fault" in unmarshalled):
raise Fault(unmarshalled['faultCode'], unmarshalled['faultString'])
if (isinstance(unmarshalled, dict) and "error" in unmarshalled):
if "jsonrpc" in unmarshalled and unmarshalled["jsonrpc"] == "2.0":
raise Fault(unmarshalled["error"]['code'], unmarshalled["error"]['data'])
if unmarshalled['error']:
raise Fault(unmarshalled["error"]['faultCode'], unmarshalled["error"]['faultString'])
return unmarshalled
class SimpleParser(object):
buffer = ''
def feed(self, data):
self.buffer += data
def close(self):
self.data = loads(self.buffer)
class SimpleUnmarshaller(object):
def getmethodname(self):
return self.parser.data.get("method")
def getid(self):
return self.parser.data.get("id")
def close(self):
if isinstance(self.parser.data, dict):
return self.parser.data.get("params")
return self.parser.data
def getparser():
parser = SimpleParser()
marshaller = SimpleUnmarshaller()
marshaller.parser = parser
return parser, marshaller
class Transport(xmlrpclib.Transport):
"""
Handles an HTTP transaction to an XML-RPC server.
"""
user_agent = "jsonrpclib.py (by txJSON-RPC)"
def getparser(self):
"""
Get Parser and unmarshaller.
"""
return getparser()
def _preV1Request(method="", params=[], *args):
return dumps({"method": method, "params": params})
def _v1Request(method="", params=[], id="", *args):
return dumps(
{"method": method, "params": params, "id": id})
def _v1Notification(method="", params=[], *args):
return _v1Request(method=method, params=params, id=None)
def _v2Request(method="", params=[], id="", *args):
return dumps({
"jsonrpc": "2.0", "method": method, "params": params, "id": id})
def _v2Notification(method="", params=[], *args):
return _v2Request(method=method, params=params, id=None)
class ServerProxy(xmlrpclib.ServerProxy):
"""
XXX add missing docstring
"""
def __init__(self, uri, transport=Transport(), version=VERSION_PRE1, *args,
**kwds):
xmlrpclib.ServerProxy.__init__(self, uri, transport, *args, **kwds)
self.version = version
def __request(self, *args):
"""
Call a method on the remote server.
XXX Is there any way to indicate that we'd want a notification request
instead of a regular request?
"""
request = self._getVersionedRequest(*args)
# XXX do a check here for id; if null, skip the response
# XXX in order to do this effectively, we might have to change the
# request functions to objects, so that we can get at an id attribute
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def _getVersionedRequest(self, *args):
if self.version == VERSION_PRE1:
return _preV1Request(*args)
elif self.version == VERSION_1:
return _v1Request(*args)
elif self.version == VERSION_2:
return _v2Request(*args)
|
{
"content_hash": "9f9521ca4b3907c4902f15966b09a08e",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 97,
"avg_line_length": 28.42654028436019,
"alnum_prop": 0.6085361787262421,
"repo_name": "oubiwann/txjsonrpc",
"id": "bf97a991f093fa331333cff2f50844c8349093cb",
"size": "5998",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "txjsonrpc/jsonrpclib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5256"
},
{
"name": "Python",
"bytes": "71319"
},
{
"name": "Shell",
"bytes": "3434"
}
],
"symlink_target": ""
}
|
def bag_of_words(text):
"""Returns bag-of-words representation of the input text.
Args:
text: A string containing the text.
Returns:
A dictionary of strings to integers.
"""
bag = {}
for word in text.lower().split():
bag[word] = bag.get(word, 0) + 1
return bag
|
{
"content_hash": "ebe9202ab17e3ece315217d15cd0691c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 22.23076923076923,
"alnum_prop": 0.6401384083044983,
"repo_name": "DasAllFolks/pylang",
"id": "47d9a9bfa92048a45290bece85b68e05997e1a18",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "289"
}
],
"symlink_target": ""
}
|
import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config import BrainFileConfiguration
class BasicTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(BasicTestClient, self).load_configuration(arguments)
self.configuration.brain_configuration._aiml_files = BrainFileConfiguration(os.path.dirname(__file__), ".aiml", False)
self.configuration.brain_configuration._person2 = os.path.dirname(__file__)+ "/person2.txt"
class Person2AIMLTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
Person2AIMLTests.test_client = BasicTestClient()
def test_person2(self):
response = Person2AIMLTests.test_client.bot.ask_question("test", "TEST PERSON2")
self.assertIsNotNone(response)
self.assertEqual(response, "he or she was going")
|
{
"content_hash": "700891357ca1182f4a14a8cbb0a376a6",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 126,
"avg_line_length": 36.72,
"alnum_prop": 0.7200435729847494,
"repo_name": "Thielak/program-y",
"id": "071ade860fb9260c30f2cb789f22e86e051d0066",
"size": "918",
"binary": false,
"copies": "1",
"ref": "refs/heads/rc",
"path": "src/test/aiml_tests/person2_tests/test_person2_aiml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "704509"
},
{
"name": "Shell",
"bytes": "1930"
}
],
"symlink_target": ""
}
|
import libopenid.conf
import logging
import django_openid_auth
import desktop.conf
from desktop.lib import security_util
__all__ = ['OPENID_CONFIG', 'OPENID_CREATE_USERS', 'OPENID_SSO_SERVER_URL', 'OPENID_USE_EMAIL_FOR_USERNAME', 'OPENID_IDENTITY_URL_PREFIX']
OPENID_CONFIG = {
# set to 1 to output debugging information
'debug': 1,
}
# openid sso endpoint url
OPENID_SSO_SERVER_URL = libopenid.conf.SERVER_ENDPOINT_URL.get();
OPENID_CREATE_USERS = libopenid.conf.CREATE_USERS_ON_LOGIN.get();
OPENID_USE_EMAIL_FOR_USERNAME = libopenid.conf.USE_EMAIL_FOR_USERNAME.get();
OPENID_IDENTITY_URL_PREFIX = libopenid.conf.IDENTITY_URL_PREFIX.get();
|
{
"content_hash": "97028544578d20174dbaf718871400e4",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 138,
"avg_line_length": 26.32,
"alnum_prop": 0.7462006079027356,
"repo_name": "xq262144/hue",
"id": "b6b7c338854173ec1ed7a435490c0a75db63c85e",
"size": "1429",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/libs/libopenid/src/libopenid/openid_settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2692409"
},
{
"name": "C++",
"bytes": "199897"
},
{
"name": "CSS",
"bytes": "521820"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "Groff",
"bytes": "16669"
},
{
"name": "HTML",
"bytes": "24188238"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "4987047"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "144341"
},
{
"name": "Mako",
"bytes": "3052598"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "44291483"
},
{
"name": "Shell",
"bytes": "44147"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "518588"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from ratelimit import VERSION
setup(
name='django-ratelimit',
version='.'.join(map(str, VERSION)),
description='Cache-based rate-limiting for Django.',
long_description=open('README.rst').read(),
author='James Socol',
author_email='james@mozilla.com',
url='http://github.com/jsocol/django-ratelimit',
license='BSD',
packages=find_packages(),
include_package_data=True,
package_data = { '': ['README.rst'] },
install_requires=['django'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Environment :: Web Environment :: Mozilla',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
{
"content_hash": "37530862383886aa6c5b91af4834c529",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 33.724137931034484,
"alnum_prop": 0.6288343558282209,
"repo_name": "haoqili/MozSecWorld",
"id": "d07c6c1dc0f97fd26401da810203d9fb60053d40",
"size": "978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor-local/packages/django-ratelimit/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "35965"
},
{
"name": "Python",
"bytes": "978579"
},
{
"name": "Shell",
"bytes": "1848"
}
],
"symlink_target": ""
}
|
__all__ = [
'map_readers', 'buffered', 'compose', 'chain', 'shuffle',
'ComposeNotAligned', 'firstn', 'xmap_readers'
]
import itertools
import random
from Queue import Queue
from threading import Thread
def map_readers(func, *readers):
"""
Creates a data reader that outputs return value of function using
output of each data readers as arguments.
:param func: function to use. The type of func should be (Sample) => Sample
:type: callable
:param readers: readers whose outputs will be used as arguments of func.
:return: the created data reader.
:rtype: callable
"""
def reader():
rs = []
for r in readers:
rs.append(r())
for e in itertools.imap(func, *rs):
yield e
return reader
def shuffle(reader, buf_size):
"""
Creates a data reader whose data output is shuffled.
Output from the iterator that created by original reader will be
buffered into shuffle buffer, and then shuffled. The size of shuffle buffer
is determined by argument buf_size.
:param reader: the original reader whose output will be shuffled.
:type reader: callable
:param buf_size: shuffle buffer size.
:type buf_size: int
:return: the new reader whose output is shuffled.
:rtype: callable
"""
def data_reader():
buf = []
for e in reader():
buf.append(e)
if len(buf) >= buf_size:
random.shuffle(buf)
for b in buf:
yield b
buf = []
if len(buf) > 0:
random.shuffle(buf)
for b in buf:
yield b
return data_reader
def chain(*readers):
"""
Creates a data reader whose output is the outputs of input data
readers chained together.
If input readers output following data entries:
[0, 0, 0]
[1, 1, 1]
[2, 2, 2]
The chained reader will output:
[0, 0, 0, 1, 1, 1, 2, 2, 2]
:param readers: input readers.
:return: the new data reader.
:rtype: callable
"""
def reader():
rs = []
for r in readers:
rs.append(r())
for e in itertools.chain(*rs):
yield e
return reader
class ComposeNotAligned(ValueError):
pass
def compose(*readers, **kwargs):
"""
Creates a data reader whose output is the combination of input readers.
If input readers output following data entries:
(1, 2) 3 (4, 5)
The composed reader will output:
(1, 2, 3, 4, 5)
:param readers: readers that will be composed together.
:param check_alignment: if True, will check if input readers are aligned
correctly. If False, will not check alignment and trailing outputs
will be discarded. Defaults to True.
:type check_alignment: bool
:return: the new data reader.
:raises ComposeNotAligned: outputs of readers are not aligned.
Will not raise when check_alignment is set to False.
"""
check_alignment = kwargs.pop('check_alignment', True)
def make_tuple(x):
if isinstance(x, tuple):
return x
else:
return (x, )
def reader():
rs = []
for r in readers:
rs.append(r())
if not check_alignment:
for outputs in itertools.izip(*rs):
yield sum(map(make_tuple, outputs), ())
else:
for outputs in itertools.izip_longest(*rs):
for o in outputs:
if o is None:
# None will be not be present if compose is aligned
raise ComposeNotAligned(
"outputs of readers are not aligned.")
yield sum(map(make_tuple, outputs), ())
return reader
def buffered(reader, size):
"""
Creates a buffered data reader.
The buffered data reader will read and save data entries into a
buffer. Reading from the buffered data reader will proceed as long
as the buffer is not empty.
:param reader: the data reader to read from.
:type reader: callable
:param size: max buffer size.
:type size: int
:returns: the buffered data reader.
"""
class EndSignal():
pass
end = EndSignal()
def read_worker(r, q):
for d in r:
q.put(d)
q.put(end)
def data_reader():
r = reader()
q = Queue(maxsize=size)
t = Thread(
target=read_worker, args=(
r,
q, ))
t.daemon = True
t.start()
e = q.get()
while e != end:
yield e
e = q.get()
return data_reader
def firstn(reader, n):
"""
Limit the max number of samples that reader could return.
:param reader: the data reader to read from.
:type reader: callable
:param n: the max number of samples that return.
:type n: int
:return: the decorated reader.
:rtype: callable
"""
# TODO(yuyang18): Check if just drop the reader, could clean the opened
# resource or not?
def firstn_reader():
for i, item in enumerate(reader()):
if i == n:
break
yield item
return firstn_reader
class XmapEndSignal():
pass
def xmap_readers(mapper, reader, process_num, buffer_size, order=False):
"""
Use multiprocess to map samples from reader by a mapper defined by user.
And this function contains a buffered decorator.
:param mapper: a function to map sample.
:type mapper: callable
:param reader: the data reader to read from
:type reader: callable
:param process_num: process number to handle original sample
:type process_num: int
:param buffer_size: max buffer size
:type buffer_size: int
:param order: keep the order of reader
:type order: bool
:return: the decarated reader
:rtype: callable
"""
end = XmapEndSignal()
# define a worker to read samples from reader to in_queue
def read_worker(reader, in_queue):
for i in reader():
in_queue.put(i)
in_queue.put(end)
# define a worker to read samples from reader to in_queue with order flag
def order_read_worker(reader, in_queue):
in_order = 0
for i in reader():
in_queue.put((in_order, i))
in_order += 1
in_queue.put(end)
# define a worker to handle samples from in_queue by mapper
# and put mapped samples into out_queue
def handle_worker(in_queue, out_queue, mapper):
sample = in_queue.get()
while not isinstance(sample, XmapEndSignal):
r = mapper(sample)
out_queue.put(r)
sample = in_queue.get()
in_queue.put(end)
out_queue.put(end)
# define a worker to handle samples from in_queue by mapper
# and put mapped samples into out_queue by order
def order_handle_worker(in_queue, out_queue, mapper, out_order):
ins = in_queue.get()
while not isinstance(ins, XmapEndSignal):
order, sample = ins
r = mapper(sample)
while order != out_order[0]:
pass
out_queue.put(r)
out_order[0] += 1
ins = in_queue.get()
in_queue.put(end)
out_queue.put(end)
def xreader():
in_queue = Queue(buffer_size)
out_queue = Queue(buffer_size)
out_order = [0]
# start a read worker in a thread
target = order_read_worker if order else read_worker
t = Thread(target=target, args=(reader, in_queue))
t.daemon = True
t.start()
# start several handle_workers
target = order_handle_worker if order else handle_worker
args = (in_queue, out_queue, mapper, out_order) if order else (
in_queue, out_queue, mapper)
workers = []
for i in xrange(process_num):
worker = Thread(target=target, args=args)
worker.daemon = True
workers.append(worker)
for w in workers:
w.start()
sample = out_queue.get()
while not isinstance(sample, XmapEndSignal):
yield sample
sample = out_queue.get()
finish = 1
while finish < process_num:
sample = out_queue.get()
if isinstance(sample, XmapEndSignal):
finish += 1
else:
yield sample
return xreader
|
{
"content_hash": "dbe1d8fe8de22d817d36ec0f37913855",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 79,
"avg_line_length": 27.488745980707396,
"alnum_prop": 0.5784302257573986,
"repo_name": "yu239/Paddle",
"id": "45a4288751e37b99dd1005ec78f30a98044926ff",
"size": "9159",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/paddle/v2/reader/decorator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "260981"
},
{
"name": "C++",
"bytes": "4147051"
},
{
"name": "CMake",
"bytes": "187456"
},
{
"name": "CSS",
"bytes": "21730"
},
{
"name": "Cuda",
"bytes": "624060"
},
{
"name": "Go",
"bytes": "99765"
},
{
"name": "HTML",
"bytes": "8941"
},
{
"name": "JavaScript",
"bytes": "1025"
},
{
"name": "Perl",
"bytes": "11452"
},
{
"name": "Python",
"bytes": "1408875"
},
{
"name": "Shell",
"bytes": "132549"
}
],
"symlink_target": ""
}
|
""" Tests for yatsm.config_parser
"""
import os
import pytest
from yatsm import config_parser
from yatsm.regression.packaged import packaged_regressions
# YATSM: SECTION PARSING
@pytest.fixture(scope='function')
def YATSM_cfg(request):
cfg = {
'YATSM': {
'prediction': packaged_regressions[0],
'refit': {
'prefix': [reg for reg in packaged_regressions],
'prediction': [reg for reg in packaged_regressions]
}
}
}
return cfg
def test_parse_YATSM_config_1(YATSM_cfg):
""" Test retrieval of packaged estimators
"""
for estimator in packaged_regressions:
YATSM_cfg['YATSM']['prediction'] = estimator
config_parser._parse_YATSM_config(YATSM_cfg)
def test_parse_YATSM_config_2(YATSM_cfg):
""" Test retrieval of packaged estimators that don't exist
"""
with pytest.raises(KeyError):
YATSM_cfg['YATSM']['prediction'] = 'not_an_estimator'
config_parser._parse_YATSM_config(YATSM_cfg)
def test_parse_YATSM_config_3(YATSM_cfg):
""" Test parsing of config without "refit" section
"""
del YATSM_cfg['YATSM']['refit']
cfg = config_parser._parse_YATSM_config(YATSM_cfg)
assert 'refit' in cfg['YATSM']
assert cfg['YATSM']['refit']['prefix'] == []
assert cfg['YATSM']['refit']['prediction'] == []
def test_parse_YATSM_config_4(YATSM_cfg):
""" Test parsing of config with "refit" estimators that don't exist
"""
YATSM_cfg['YATSM']['refit']['prediction'] = 'not_an_estimator'
with pytest.raises(KeyError):
config_parser._parse_YATSM_config(YATSM_cfg)
def test_parse_YATSM_config_5(YATSM_cfg):
""" Test parsing of config with misspecified "refit" section
"""
YATSM_cfg['YATSM']['refit']['prefix'] = ['just_one_prefix']
with pytest.raises(KeyError):
config_parser._parse_YATSM_config(YATSM_cfg)
def test_parse_YATSM_config_6(YATSM_cfg):
""" Test parsing of config with "stay_regularized" section
"""
YATSM_cfg['YATSM']['refit']['stay_regularized'] = True
config_parser._parse_YATSM_config(YATSM_cfg)
def test_parse_YATSM_config_7(YATSM_cfg):
""" Test parsing of config with "stay_regularized" section
"""
n = len(YATSM_cfg['YATSM']['refit']['prediction'])
YATSM_cfg['YATSM']['refit']['stay_regularized'] = [True] * n
config_parser._parse_YATSM_config(YATSM_cfg)
# ENVIRONMENT VARIABLE PARSING
def test_get_envvars():
truth = {
'YATSM': {
'algo': 'CCDC',
'jobno': '1'
},
'dataset': {
'dataset': '/tmp/images.csv',
'cache': '/tmp/cache'
}
}
d = {
'YATSM': {
'algo': 'CCDC',
'jobno': '$JOBNO'
},
'dataset': {
'dataset': '$ROOTDIR/images.csv',
'cache': '$ROOTDIR/cache'
}
}
envvars = {
'JOBNO': '1',
'ROOTDIR': '/tmp'
}
# Backup and replace environment
backup = os.environ.copy()
for k in envvars:
os.environ[k] = envvars[k]
expanded = config_parser.expand_envvars(d)
os.environ.update(backup)
assert truth == expanded
|
{
"content_hash": "2c67f172721d0b488a81a2e56acd4a4f",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 71,
"avg_line_length": 27.220338983050848,
"alnum_prop": 0.5933997509339975,
"repo_name": "ceholden/yatsm",
"id": "d214ed751a1ef1b1f223ea2121a7948ec6b10bd4",
"size": "3212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_config_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "72629"
},
{
"name": "Python",
"bytes": "311968"
},
{
"name": "Shell",
"bytes": "2391"
}
],
"symlink_target": ""
}
|
import sys
import latex2mathml.converter
latex_input = sys.argv[1]
mathml_output = latex2mathml.converter.convert(latex_input)
print mathml_output
|
{
"content_hash": "ef44b03cce0920c9b1eee05fbf980c6c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 59,
"avg_line_length": 25,
"alnum_prop": 0.8066666666666666,
"repo_name": "TU-Berlin/mathosphere",
"id": "fece1ec8d70f3872fb8e1d48dabb6418c568e19a",
"size": "150",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pomlp/lib/latex2mathml/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "392362"
},
{
"name": "Java",
"bytes": "804830"
},
{
"name": "Scala",
"bytes": "4939"
},
{
"name": "TeX",
"bytes": "89"
},
{
"name": "XSLT",
"bytes": "67046"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="textfont", parent_name="scatter", **kwargs):
super(TextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Textfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
|
{
"content_hash": "20528e26d20a0528f71127d6386447a6",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 40.52173913043478,
"alnum_prop": 0.5343347639484979,
"repo_name": "plotly/python-api",
"id": "1798fb8cf4a6af8d59ff17947ef2c3f1bd4d02b0",
"size": "1864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatter/_textfont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe, copy, json
from frappe import _, msgprint
from frappe.utils import cint
import frappe.share
rights = ("read", "write", "create", "delete", "submit", "cancel", "amend",
"print", "email", "report", "import", "export", "set_user_permissions", "share")
def check_admin_or_system_manager(user=None):
if not user: user = frappe.session.user
if ("System Manager" not in frappe.get_roles(user)) and (user!="Administrator"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
def has_permission(doctype, ptype="read", doc=None, verbose=False, user=None):
"""Returns True if user has permission `ptype` for given `doctype`.
If `doc` is passed, it also checks user, share and owner permissions.
Note: if Table DocType is passed, it always returns True.
"""
if not user: user = frappe.session.user
if frappe.is_table(doctype):
if verbose: print "Table type, always true"
return True
meta = frappe.get_meta(doctype)
if ptype=="submit" and not cint(meta.is_submittable):
if verbose: print "Not submittable"
return False
if ptype=="import" and not cint(meta.allow_import):
if verbose: print "Not importable"
return False
if user=="Administrator":
if verbose: print "Administrator"
return True
def false_if_not_shared():
if ptype in ("read", "write", "share", "email", "print"):
shared = frappe.share.get_shared(doctype, user,
["read" if ptype in ("email", "print") else ptype])
if doc:
doc_name = doc if isinstance(doc, basestring) else doc.name
if doc_name in shared:
if verbose: print "Shared"
if ptype in ("read", "write", "share") or meta.permissions[0].get(ptype):
if verbose: print "Is shared"
return True
elif shared:
# if atleast one shared doc of that type, then return True
# this is used in db_query to check if permission on DocType
if verbose: print "Has a shared document"
return True
if verbose: print "Not Shared"
return False
role_permissions = get_role_permissions(meta, user=user, verbose=verbose)
if not role_permissions.get(ptype):
return false_if_not_shared()
perm = True
if doc:
if isinstance(doc, basestring):
doc = frappe.get_doc(meta.name, doc)
owner_perm = user_perm = controller_perm = None
if role_permissions["if_owner"].get(ptype) and ptype!="create":
owner_perm = doc.owner == frappe.session.user
if verbose: print "Owner permission: {0}".format(owner_perm)
# check if user permission
if not owner_perm and role_permissions["apply_user_permissions"].get(ptype):
user_perm = user_has_permission(doc, verbose=verbose, user=user,
user_permission_doctypes=role_permissions.get("user_permission_doctypes", {}).get(ptype) or [])
if verbose: print "User permission: {0}".format(user_perm)
if not owner_perm and not user_perm:
controller_perm = has_controller_permissions(doc, ptype, user=user)
if verbose: print "Controller permission: {0}".format(controller_perm)
# permission true if any one condition is explicitly True or all permissions are undefined (None)
perm = any([owner_perm, user_perm, controller_perm]) or \
all([owner_perm==None, user_perm==None, controller_perm==None])
if not perm:
perm = false_if_not_shared()
if verbose: print "Final Permission: {0}".format(perm)
return perm
def get_doc_permissions(doc, verbose=False, user=None):
"""Returns a dict of evaluated permissions for given `doc` like `{"read":1, "write":1}`"""
if not user: user = frappe.session.user
if frappe.is_table(doc.doctype):
return {"read":1, "write":1}
meta = frappe.get_meta(doc.doctype)
role_permissions = copy.deepcopy(get_role_permissions(meta, user=user, verbose=verbose))
if not cint(meta.is_submittable):
role_permissions["submit"] = 0
if not cint(meta.allow_import):
role_permissions["import"] = 0
if role_permissions.get("apply_user_permissions"):
# no user permissions, switch off all user-level permissions
for ptype in role_permissions:
if role_permissions["apply_user_permissions"].get(ptype) and not user_has_permission(doc, verbose=verbose, user=user,
user_permission_doctypes=role_permissions.get("user_permission_doctypes", {}).get(ptype) or []):
role_permissions[ptype] = 0
# apply owner permissions on top of existing permissions
if doc.owner == frappe.session.user:
role_permissions.update(role_permissions.if_owner)
update_share_permissions(role_permissions, doc, user)
return role_permissions
def update_share_permissions(role_permissions, doc, user):
"""Updates share permissions on `role_permissions` for given doc, if shared"""
share_ptypes = ("read", "write", "share")
permissions_by_share = frappe.db.get_value("DocShare",
{"share_doctype": doc.doctype, "share_name": doc.name, "user": user},
share_ptypes, as_dict=True)
if permissions_by_share:
for ptype in share_ptypes:
if permissions_by_share[ptype]:
role_permissions[ptype] = 1
def get_role_permissions(meta, user=None, verbose=False):
"""Returns dict of evaluated role permissions like `{"read": True, "write":False}`
If user permissions are applicable, it adds a dict of user permissions like
{
// user permissions will apply on these rights
"apply_user_permissions": {"read": 1, "write": 1},
// doctypes that will be applicable for each right
"user_permission_doctypes": {
"read": [
// AND between "DocType 1" and "DocType 2"
["DocType 1", "DocType 2"],
// OR
["DocType 3"]
]
}
"if_owner": {"read": 1, "write": 1}
}
"""
if not user: user = frappe.session.user
cache_key = (meta.name, user)
if not frappe.local.role_permissions.get(cache_key):
perms = frappe._dict({ "apply_user_permissions": {}, "user_permission_doctypes": {}, "if_owner": {} })
user_roles = frappe.get_roles(user)
dont_match = []
has_a_role_with_apply_user_permissions = False
for p in meta.permissions:
if cint(p.permlevel)==0 and (p.role in user_roles):
# apply only for level 0
for ptype in rights:
# build if_owner dict if applicable for this right
perms[ptype] = perms.get(ptype, 0) or cint(p.get(ptype))
if ptype != "set_user_permissions" and p.get(ptype):
perms["apply_user_permissions"][ptype] = (perms["apply_user_permissions"].get(ptype, 1)
and p.get("apply_user_permissions"))
if p.if_owner and p.get(ptype):
perms["if_owner"][ptype] = 1
if p.get(ptype) and not p.if_owner and not p.get("apply_user_permissions"):
dont_match.append(ptype)
if p.apply_user_permissions:
has_a_role_with_apply_user_permissions = True
if p.user_permission_doctypes:
# set user_permission_doctypes in perms
user_permission_doctypes = json.loads(p.user_permission_doctypes)
else:
user_permission_doctypes = get_linked_doctypes(meta.name)
if user_permission_doctypes:
# perms["user_permission_doctypes"][ptype] would be a list of list like [["User", "Blog Post"], ["User"]]
for ptype in rights:
if p.get(ptype):
perms["user_permission_doctypes"].setdefault(ptype, []).append(user_permission_doctypes)
# if atleast one record having both Apply User Permission and If Owner unchecked is found,
# don't match for those rights
for ptype in rights:
if ptype in dont_match:
if perms["apply_user_permissions"].get(ptype):
del perms["apply_user_permissions"][ptype]
if perms["if_owner"].get(ptype):
del perms["if_owner"][ptype]
# if one row has only "Apply User Permissions" checked and another has only "If Owner" checked,
# set Apply User Permissions as checked
# i.e. the case when there is a role with apply_user_permissions as 1, but resultant apply_user_permissions is 0
if has_a_role_with_apply_user_permissions:
for ptype in rights:
if perms["if_owner"].get(ptype) and perms["apply_user_permissions"].get(ptype)==0:
perms["apply_user_permissions"][ptype] = 1
# delete 0 values
for key, value in perms.get("apply_user_permissions").items():
if not value:
del perms["apply_user_permissions"][key]
frappe.local.role_permissions[cache_key] = perms
return frappe.local.role_permissions[cache_key]
def user_has_permission(doc, verbose=True, user=None, user_permission_doctypes=None):
from frappe.defaults import get_user_permissions
user_permissions = get_user_permissions(user)
user_permission_doctypes = get_user_permission_doctypes(user_permission_doctypes, user_permissions)
def check_user_permission(d):
meta = frappe.get_meta(d.get("doctype"))
end_result = False
messages = {}
# check multiple sets of user_permission_doctypes using OR condition
for doctypes in user_permission_doctypes:
result = True
for df in meta.get_fields_to_check_permissions(doctypes):
if (d.get(df.fieldname)
and d.get(df.fieldname) not in user_permissions.get(df.options, [])):
result = False
if verbose:
msg = _("Not allowed to access {0} with {1} = {2}").format(df.options, _(df.label), d.get(df.fieldname))
if d.parentfield:
msg = "{doctype}, {row} #{idx}, ".format(doctype=_(d.doctype),
row=_("Row"), idx=d.idx) + msg
messages[df.fieldname] = msg
end_result = end_result or result
if not end_result and messages:
for fieldname, msg in messages.items():
msgprint(msg)
return end_result
_user_has_permission = check_user_permission(doc)
for d in doc.get_all_children():
_user_has_permission = check_user_permission(d) and _user_has_permission
return _user_has_permission
def has_controller_permissions(doc, ptype, user=None):
"""Returns controller permissions if defined. None if not defined"""
if not user: user = frappe.session.user
methods = frappe.get_hooks("has_permission").get(doc.doctype, [])
if not methods:
return None
for method in methods:
controller_permission = frappe.call(frappe.get_attr(method), doc=doc, ptype=ptype, user=user)
if controller_permission is not None:
return controller_permission
# controller permissions could not decide on True or False
return None
def can_set_user_permissions(doctype, docname=None):
# System Manager can always set user permissions
if frappe.session.user == "Administrator" or "System Manager" in frappe.get_roles():
return True
meta = frappe.get_meta(doctype)
# check if current user has read permission for docname
if docname and not has_permission(doctype, "read", docname):
return False
# check if current user has a role that can set permission
if get_role_permissions(meta).set_user_permissions!=1:
return False
return True
def set_user_permission_if_allowed(doctype, name, user, with_message=False):
if get_role_permissions(frappe.get_meta(doctype), user).set_user_permissions!=1:
add_user_permission(doctype, name, user, with_message)
def add_user_permission(doctype, name, user, with_message=False):
if name not in frappe.defaults.get_user_permissions(user).get(doctype, []):
if not frappe.db.exists(doctype, name):
frappe.throw(_("{0} {1} not found").format(_(doctype), name), frappe.DoesNotExistError)
frappe.defaults.add_default(doctype, name, user, "User Permission")
elif with_message:
msgprint(_("Permission already set"))
def remove_user_permission(doctype, name, user, default_value_name=None):
frappe.defaults.clear_default(key=doctype, value=name, parent=user, parenttype="User Permission",
name=default_value_name)
def clear_user_permissions_for_doctype(doctype):
frappe.defaults.clear_default(parenttype="User Permission", key=doctype)
def can_import(doctype, raise_exception=False):
if not ("System Manager" in frappe.get_roles() or has_permission(doctype, "import")):
if raise_exception:
raise frappe.PermissionError("You are not allowed to import: {doctype}".format(doctype=doctype))
else:
return False
return True
def can_export(doctype, raise_exception=False):
if not ("System Manager" in frappe.get_roles() or has_permission(doctype, "export")):
if raise_exception:
raise frappe.PermissionError("You are not allowed to export: {doctype}".format(doctype=doctype))
else:
return False
return True
def apply_user_permissions(doctype, ptype, user=None):
"""Check if apply_user_permissions is checked for a doctype, perm type, user combination"""
role_permissions = get_role_permissions(frappe.get_meta(doctype), user=user)
return role_permissions.get("apply_user_permissions", {}).get(ptype)
def get_user_permission_doctypes(user_permission_doctypes, user_permissions):
"""returns a list of list like [["User", "Blog Post"], ["User"]]"""
if cint(frappe.db.get_single_value("System Settings", "ignore_user_permissions_if_missing")):
# select those user permission doctypes for which user permissions exist!
user_permission_doctypes = [list(set(doctypes).intersection(set(user_permissions.keys())))
for doctypes in user_permission_doctypes]
if len(user_permission_doctypes) > 1:
# OPTIMIZATION
# if intersection exists, use that to reduce the amount of querying
# for example, [["Blogger", "Blog Category"], ["Blogger"]], should only search in [["Blogger"]] as the first and condition becomes redundant
common = user_permission_doctypes[0]
for i in xrange(1, len(user_permission_doctypes), 1):
common = list(set(common).intersection(set(user_permission_doctypes[i])))
if not common:
break
if common:
# is common one of the user_permission_doctypes set?
for doctypes in user_permission_doctypes:
# are these lists equal?
if set(common) == set(doctypes):
user_permission_doctypes = [common]
break
return user_permission_doctypes
def reset_perms(doctype):
"""Reset permissions for given doctype."""
from frappe.desk.notifications import delete_notification_count_for
delete_notification_count_for(doctype)
frappe.db.sql("""delete from tabDocPerm where parent=%s""", doctype)
frappe.reload_doc(frappe.db.get_value("DocType", doctype, "module"),
"DocType", doctype, force=True)
def get_linked_doctypes(dt):
return list(set([dt] + [d.options for d in
frappe.get_meta(dt).get("fields", {
"fieldtype":"Link",
"ignore_user_permissions":("!=", 1),
"options": ("!=", "[Select]")
})
]))
|
{
"content_hash": "5cc5b9ddb50db8a6b9c3a28ecf3a197c",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 142,
"avg_line_length": 35.32258064516129,
"alnum_prop": 0.706989813839129,
"repo_name": "elba7r/frameworking",
"id": "e76603193b035bc34d7dfa8946bef41f3a3ced91",
"size": "14336",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "frappe/permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "336604"
},
{
"name": "HTML",
"bytes": "198854"
},
{
"name": "JavaScript",
"bytes": "1262443"
},
{
"name": "Python",
"bytes": "1614773"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 5, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 0);
|
{
"content_hash": "0a45ebe63ae9b38436988d102f732711",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 167,
"avg_line_length": 38.142857142857146,
"alnum_prop": 0.7078651685393258,
"repo_name": "antoinecarme/pyaf",
"id": "d49b7b4a37e199c34035aa67ff1d521468e3b2f3",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_MovingMedian/cycle_5/ar_/test_artificial_128_Difference_MovingMedian_5__0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from flask.ext.mongoset import Model
from conftest import BaseModelTest, app, mongo
class NewModel(Model):
__collection__ = 'notdbrefstests'
inc_id = True
indexes = ['id', 'name']
class InsideModel(Model):
__collection__ = 'inside'
class TestModelDecorator(BaseModelTest):
def setUp(self):
app.config['MONGODB_HOST'] = "localhost"
app.config['MONGODB_PORT'] = 27017
app.config['MONGODB_DATABASE'] = "testdb"
app.config['MONGODB_AUTOREF'] = False
app.config['MONGODB_AUTOINCREMENT'] = True
app.config['TESTING'] = True
mongo.init_app(app)
self.app = app
self.mongo = mongo
self.model = NewModel
self.insideModel = InsideModel
self.mongo.register(self.model)
def test_autoincrement(self):
result = self.model.create(name='Hello')
assert result._int_id == 1
def test_handle_auto_object_inside_a_list(self):
parent = self.model.get_or_create({'test': 'hellotest'})
child = self.model.create(test="testing",
parents=[parent], parent=parent)
child = self.model.query.find_one({"test": "testing"})
assert child.parents[0].test == "hellotest"
assert child.parents[0].__class__.__name__ == self.model.__name__
assert isinstance(child, self.model)
assert isinstance(child.parents[0], self.model)
parent = self.model.create(test="test_two")
child = child.update_with_reload({
'parents': [parent]})
assert child.parents[0].test == "test_two"
def test_other_object_inside(self):
child = self.insideModel({'inside': True,
'_ns': self.insideModel.__collection__})
parent = self.model.create({'test': 'hellotest',
'children': [child], 'names': ['ddd']})
assert isinstance(parent.children[0], self.insideModel)
assert self.insideModel.query.find_one() is None
|
{
"content_hash": "8cae4a81ce2c739287f5dcba49867233",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 75,
"avg_line_length": 35,
"alnum_prop": 0.5916256157635468,
"repo_name": "Fibio/flask-mongoset",
"id": "6c7c7c5534a389df480563aa60b80d8bb17852cc",
"size": "2030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models_without_DBrefs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60594"
}
],
"symlink_target": ""
}
|
from attest import Tests, assert_hook
samples = Tests()
@samples.test
def number_and_sequence():
number = 2 + 3
sequence = [1, 2, 3]
assert number in sequence and isinstance(number, float)
if __name__ == '__main__':
samples.main()
|
{
"content_hash": "b4aabda40c76711c0a078ee610e94d10",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 59,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.64,
"repo_name": "dag/attest",
"id": "e21702a4a98b498412707345b0ee895138790fe4",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sampletests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "131193"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
import subprocess
import sys
import os
path = os.path.dirname(os.path.realpath(__file__))
submodule_path = os.path.join(path, "spacescout_admin")
subprocess.call(["git", "submodule", "init"], cwd=path)
subprocess.call(["git", "submodule", "update"], cwd=path)
subprocess.call(["git", "submodule", "foreach", "git", "pull", "origin", "master"], cwd=path)
setup(name='SpaceScout-Admin-Wrapper',
version='1.0',
)
subprocess.call(["pip", "install", "-r", "requirements.txt"], cwd=submodule_path)
|
{
"content_hash": "881e434743eaacae8b8668a1e92a593d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 93,
"avg_line_length": 28.263157894736842,
"alnum_prop": 0.6871508379888268,
"repo_name": "vegitron/spacescout_admin_wrapper",
"id": "707975dfd85a21067655bc4e993a375d23536554",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "560"
}
],
"symlink_target": ""
}
|
"""
Common code and definitions used by Link extractors (located in
scrapy.contrib.linkextractor).
"""
import re
from six.moves.urllib.parse import urlparse
from scrapy.selector.csstranslator import ScrapyHTMLTranslator
from scrapy.utils.url import url_is_from_any_domain
from scrapy.utils.url import canonicalize_url, url_is_from_any_domain, url_has_any_extension
from scrapy.utils.misc import arg_to_iter
# common file extensions that are not followed if they occur in links
IGNORED_EXTENSIONS = [
# images
'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',
'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',
# audio
'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',
# video
'3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',
'm4a',
# office suites
'xls', 'xlsx', 'ppt', 'pptx', 'doc', 'docx', 'odt', 'ods', 'odg', 'odp',
# other
'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar',
]
_re_type = type(re.compile("", 0))
_matches = lambda url, regexs: any((r.search(url) for r in regexs))
_is_valid_url = lambda url: url.split('://', 1)[0] in set(['http', 'https', 'file'])
class FilteringLinkExtractor(object):
_csstranslator = ScrapyHTMLTranslator()
def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains,
restrict_xpaths, canonicalize, deny_extensions, restrict_css):
self.link_extractor = link_extractor
self.allow_res = [x if isinstance(x, _re_type) else re.compile(x) for x in arg_to_iter(allow)]
self.deny_res = [x if isinstance(x, _re_type) else re.compile(x) for x in arg_to_iter(deny)]
self.allow_domains = set(arg_to_iter(allow_domains))
self.deny_domains = set(arg_to_iter(deny_domains))
self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))
self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath,
arg_to_iter(restrict_css)))
self.canonicalize = canonicalize
if deny_extensions is None:
deny_extensions = IGNORED_EXTENSIONS
self.deny_extensions = set(['.' + e for e in arg_to_iter(deny_extensions)])
def _link_allowed(self, link):
if not _is_valid_url(link.url):
return False
if self.allow_res and not _matches(link.url, self.allow_res):
return False
if self.deny_res and _matches(link.url, self.deny_res):
return False
parsed_url = urlparse(link.url)
if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains):
return False
if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):
return False
if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions):
return False
return True
def matches(self, url):
if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):
return False
if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):
return False
allowed = [regex.search(url) for regex in self.allow_res] if self.allow_res else [True]
denied = [regex.search(url) for regex in self.deny_res] if self.deny_res else []
return any(allowed) and not any(denied)
def _process_links(self, links):
links = [x for x in links if self._link_allowed(x)]
if self.canonicalize:
for link in links:
link.url = canonicalize_url(urlparse(link.url))
links = self.link_extractor._process_links(links)
return links
def _extract_links(self, *args, **kwargs):
return self.link_extractor._extract_links(*args, **kwargs)
|
{
"content_hash": "2bb24660e815ac137adbfd63a7317bdb",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 102,
"avg_line_length": 38.19,
"alnum_prop": 0.6203194553548049,
"repo_name": "Partoo/scrapy",
"id": "227d79b46a6c53271ce1c0174fe52604769669f4",
"size": "3819",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scrapy/linkextractor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Groff",
"bytes": "2008"
},
{
"name": "HTML",
"bytes": "1809"
},
{
"name": "Python",
"bytes": "1244620"
},
{
"name": "Shell",
"bytes": "673"
}
],
"symlink_target": ""
}
|
from operator import itemgetter, attrgetter
from webapp.models.consts import ADMINS
from webapp.models.notify import Notify
from libs import store
from libs import doubandb, doubanfs, Employee, doubanmc, store, User
from datetime import datetime, timedelta
class Answer(object):
def __init__(self, id, blog_id, question_id, author_id, rtime):
self.id = str(id)
self.blog_id = str(blog_id)
self.question_id = str(question_id)
self.author_id = str(author_id)
self.rtime = rtime
@classmethod
def get(cls, id):
r = store.execute("select id, blog_id, question_id, author_id, rtime from me_answer where id=%s", id)
if r:
return cls(*r[0])
@classmethod
def get_by_question(cls, question_id):
r = store.execute("select id, blog_id, question_id, author_id, rtime from me_answer"
" where question_id=%s", question_id)
if r:
return cls(*r[0])
@classmethod
def num_by_card(cls, user_id):
r = store.execute("select count(id) from me_answer where author_id=%s", user_id)
return r[0][0]
@classmethod
def new(cls, question_id, author_id, content, filename='', ftype=''):
q = Question.get(question_id)
if q and q.user_id == author_id:
blog_id = '0'
store.execute("insert into me_answer(question_id, author_id, blog_id)"
" values(%s,%s,%s)", (question_id, author_id, blog_id))
id = store.get_cursor(table="me_answer").lastrowid
from webapp.models.blog import Blog
blog_id = Blog.new(author_id, Blog.TYPE_BLOG, '', content, filename, ftype, extra={'question_id':q.id})
store.execute("update me_answer set blog_id=%s, rtime=rtime where id=%s", (blog_id, id))
store.commit()
Notify.new(q.author_id, q.user_id, Notify.TYPE_ANSWER, extra={"question_id":q.id, "blog_id":blog_id})
return id
@property
def author(self):
return User(self.author_id)
@property
def author_card(self):
from webapp.models.card import Card
return Card.get(self.author_id)
@property
def blog(self):
from webapp.models.blog import Blog
return Blog.get(self.blog_id)
@property
def card(self):
from webapp.models.card import Card
return Card.get(self.user_id)
class Question(object):
FLAG_ANONYMOUS = 'A'
FLAG_NORMAL = 'N'
def __init__(self, id, content, user_id, author_id, flag, rtime):
self.id = str(id)
self.content = content
self.user_id = str(user_id)
self.author_id = str(author_id)
self.flag = str(flag)
self.rtime = rtime
@classmethod
def get(cls, id):
r = store.execute("select id, content, user_id, author_id, flag, rtime from me_question where id=%s", id)
if r:
return cls(*r[0])
@classmethod
def gets_by_card(cls, user_id):
rs = store.execute("select id, content, user_id, author_id, flag, rtime from me_question"
" where user_id=%s order by rtime desc", user_id)
if rs:
return [cls(*r) for r in rs]
return []
@property
def answer(self):
return Answer.get_by_question(self.id)
@property
def is_anonymous(self):
return self.flag == self.FLAG_ANONYMOUS
@property
def title(self):
return self.content.replace("?","").replace("?", "") + "?"
@classmethod
def new(cls, user_id, author_id, content, anonymous):
if user_id != author_id:
flag = anonymous and cls.FLAG_ANONYMOUS or cls.FLAG_NORMAL
store.execute("insert into me_question(user_id, author_id, content, flag)"
" values(%s,%s,%s,%s)", (user_id, author_id, content, flag))
id = store.get_cursor(table="me_question").lastrowid
store.commit()
Notify.new(user_id, author_id, Notify.TYPE_QUESTION, extra={"question_id":id})
return id
@property
def author(self):
return User(self.author_id)
@property
def card(self):
from webapp.models.card import Card
return Card.get(self.user_id)
@property
def author_card(self):
from webapp.models.card import Card
return Card.get(self.author_id)
|
{
"content_hash": "75f18944850ea9dccf42edeb410dfa64",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 115,
"avg_line_length": 33.84496124031008,
"alnum_prop": 0.5968850206138342,
"repo_name": "liangsun/me",
"id": "f05c105845b77f26e4424ddb87d70a745f5bb4da",
"size": "4413",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webapp/models/question.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "120029"
},
{
"name": "JavaScript",
"bytes": "575915"
},
{
"name": "Python",
"bytes": "218405"
}
],
"symlink_target": ""
}
|
'''@author: Difan Zhang <tifan@ifanr.com>
@license: New BSD License
@see: README.rst
A simple Flask based Event API handler.
'''
from flask import Flask, request
from flask.ext.restful import Api, Resource
import pprint
app = Flask(__name__)
api = Api(app)
class EventAPIDemo(Resource):
def post(self):
return message_handler(request.json), 200
def message_handler(msg):
pprint.pprint(msg)
return {'message_type': 'text',
'payload': 'Yay, you said %s' % msg['message_content']}
api.add_resource(EventAPIDemo, '/demo')
if __name__ == "__main__":
app.run()
|
{
"content_hash": "4498acd2bb2002374847a652c85ef79f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 65,
"avg_line_length": 17.38235294117647,
"alnum_prop": 0.6700507614213198,
"repo_name": "ifanrx/socialbase-eventapi",
"id": "351ea0e2e3e556c65d46290bb213efd8ffe6c6da",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "690"
}
],
"symlink_target": ""
}
|
import pygame
"""
Screens are statically rendered with no interactivity
"""
PAUSE_COLOR = (0,0,0)
PAUSE_ALPHA = 150
class PauseScreen(object):
def __init__(self, config, enabled=False):
self.enabled = enabled
def doFrame(self, screen, delta, events):
if self.enabled:
surface = pygame.Surface(screen.get_size())
surface.fill(PAUSE_COLOR)
surface.set_alpha(PAUSE_ALPHA)
screen.blit(surface, (0,0))
COMPLETE_COLOR = (0, 255, 0)
COMPLETE_ALPHA = 150
class CompleteScreen(object):
def __init__(self, config, enabled=False):
self.enabled = enabled
def doFrame(self, screen, delta, events):
if self.enabled:
surface = pygame.Surface(screen.get_size())
surface.fill(COMPLETE_COLOR)
surface.set_alpha(COMPLETE_ALPHA)
screen.blit(surface, (0,0))
GAME_OVER_COLOR = (0, 0, 0)
GAME_OVER_ALPHA = 150
class GameOverScreen(object):
def __init__(self, config, enabled=False):
self.enabled = enabled
def doFrame(self, screen, delta, events):
if self.enabled:
surface = pygame.Surface(screen.get_size())
surface.fill(GAME_OVER_COLOR)
surface.set_alpha(GAME_OVER_ALPHA)
screen.blit(surface, (0,0))
|
{
"content_hash": "7631f214ef68083bf58bc13fdc1f3a4c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 55,
"avg_line_length": 27.76595744680851,
"alnum_prop": 0.61455938697318,
"repo_name": "Desolace/LudumDare32",
"id": "c2bfb48b8c317840f224202368c7836c4c8583d7",
"size": "1305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "screens.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76217"
}
],
"symlink_target": ""
}
|
from django.contrib.sitemaps import Sitemap
from django.conf import settings
from ietf.liaisons.models import LiaisonDetail
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
from ietf.liaisons.proxy import LiaisonDetailProxy as LiaisonDetail
class LiaisonMap(Sitemap):
changefreq = "never"
def items(self):
return LiaisonDetail.objects.all()
def location(self, obj):
return "/liaison/%d/" % obj.detail_id
def lastmod(self, obj):
return obj.last_modified_date
|
{
"content_hash": "1ee5c32644a8994fe705821f5c808d6d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 71,
"avg_line_length": 32.266666666666666,
"alnum_prop": 0.7541322314049587,
"repo_name": "mcr/ietfdb",
"id": "156ffed4c45c1d807cc4b31519a7c85cd0d913b7",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ietf/liaisons/sitemaps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "239198"
},
{
"name": "JavaScript",
"bytes": "450755"
},
{
"name": "Perl",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "10286676"
},
{
"name": "Ruby",
"bytes": "3468"
},
{
"name": "Shell",
"bytes": "39950"
},
{
"name": "TeX",
"bytes": "23944"
}
],
"symlink_target": ""
}
|
import sys
import os
import glob
import yaml
from string import Template
import tempfile
import subprocess
template="""format_version: '1.0'
input_files: '$target'
properties:
- property_file: ../properties/termination.prp
expected_verdict: true
"""
tasks = sys.argv[1:]
for task in tasks:
with open(os.path.join("tasks",task)) as file:
task_info = yaml.load(file, Loader=yaml.FullLoader)
name = os.path.basename(task[:-4])
task_c = os.path.join("tasks",
os.path.dirname(task),
task_info['input_files'])
with tempfile.TemporaryDirectory() as tmp_dir:
goto_file = "%s/%s.goto" % (tmp_dir,name)
instr_file = "%s/%s.instr.c" % (tmp_dir,name)
target_file = "tasks/bitprecise/%s.c" % name
target_yml = "tasks/bitprecise/%s.yml" % name
subprocess.call(['goto-cc', task_c, '-c', '-o', goto_file])
subprocess.call(['goto-instrument',
'--dump-c',
'-signed-overflow-check',
goto_file,
instr_file])
subprocess.call(['gcc', '-E', '-P', "-I.", instr_file, '-o', target_file])
subst = dict(target = ("%s.c" % name))
out = open(target_yml, "w")
out.write(Template(template).substitute(subst))
out.close()
print(target_yml)
|
{
"content_hash": "9d4af928a2b2e6704fd67f94d1f91f06",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 86,
"avg_line_length": 33.54545454545455,
"alnum_prop": 0.5203252032520326,
"repo_name": "zkincaid/duet",
"id": "b686fead682920a62f15ba68fe84f92c71c06740",
"size": "1492",
"binary": false,
"copies": "1",
"ref": "refs/heads/modern",
"path": "bench/overflow.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1212"
},
{
"name": "C",
"bytes": "4122992"
},
{
"name": "M4",
"bytes": "504"
},
{
"name": "Makefile",
"bytes": "1499"
},
{
"name": "OCaml",
"bytes": "1740191"
},
{
"name": "Python",
"bytes": "25649"
},
{
"name": "SWIG",
"bytes": "138985"
},
{
"name": "Shell",
"bytes": "7552"
}
],
"symlink_target": ""
}
|
import sys
from pymatic.scripting import Automation, Settings, NaiveInterpreter, ScriptFiles
def test1():
auto = Automation(Settings(300))
auto.startapp("gedit")
auto.alttab()
auto.say("Hello World!")
auto.close()
def test2():
sf = ScriptFiles("../scripts")
sf.runscriptbyfirstname("helloworlds", Settings())
def test3():
script = """
msgbox("hello world")
msgbox("hi there")
"""
interp = NaiveInterpreter(Settings())
interp.run_this(script)
interp.close()
def test4():
# TODO An environment variable for where to look for files
settings = Settings()
sf = ScriptFiles("../scripts")
sf.runscriptbyfirstname("startcgoban", settings)
def test5():
# nonfunctional test
# login
settings = Settings()
sf = ScriptFiles("../scripts")
print("starting in the background...")
sf.runscriptbyfirstname("startcgoban", settings)
print("started? alt tab now")
auto = Automation(settings)
auto.alttab()
print("sending a click")
emu = auto.getemu()
emu.move(100, 500)
emu.click()
auto.close()
def test6():
settings = Settings()
sf = ScriptFiles("../scripts")
print("Starting Cgoban...")
sf.runscriptbyfirstname("startcgoban_xvfb", settings)
sf.runscriptbyfirstname("login", settings)
sf.runscriptbyfirstname("killxvfb", settings)
def main():
if len(sys.argv) > 1:
scriptfirstname = sys.argv[1].strip()
settings = Settings()
sf = ScriptFiles("../scripts")
sf.runscriptbyfirstname(scriptfirstname, settings)
else:
test1()
#test2()
#test3()
#test4()
#test5()
#test6()
if __name__ == "__main__":
main()
|
{
"content_hash": "30efe23ed3510ce40e3c7ab222b055c0",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 81,
"avg_line_length": 25.115942028985508,
"alnum_prop": 0.621465666474322,
"repo_name": "xyproto/pymatic",
"id": "35aa2cd1b189b9cff8efce539825b1d227094646",
"size": "1796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/main.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from django import forms
from django.contrib.contenttypes.models import ContentType
from .models import Comment
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = [
"name", "email", "website", "comment"
]
def __init__(self, *args, **kwargs):
self.request = kwargs.pop("request", None)
self.obj = kwargs.pop("obj")
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
if self.user is not None and not self.user.is_anonymous:
del self.fields["name"]
del self.fields["email"]
del self.fields["website"]
def save(self, commit=True):
comment = super().save(commit=False)
comment.ip_address = self.request.META.get("REMOTE_ADDR", None)
comment.content_type = ContentType.objects.get_for_model(self.obj)
comment.object_id = self.obj.pk
if self.user is not None and not self.user.is_anonymous:
comment.author = self.user
if commit:
comment.save()
return comment
|
{
"content_hash": "fde0d1a68dd88d52fc685ca531d11290",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 74,
"avg_line_length": 32.294117647058826,
"alnum_prop": 0.5983606557377049,
"repo_name": "pinax/pinax-comments",
"id": "ef76f97e849927f390eab3d581bdafecbd5ac34f",
"size": "1098",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pinax/comments/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "284"
},
{
"name": "Makefile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "31075"
}
],
"symlink_target": ""
}
|
"""minimail URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.views.generic import TemplateView
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^templates/', include('template_management.urls')),
url(r'^subscribers/', include('subscriber_management.urls')),
url(r'^campaigns/', include('campaign_management.urls')),
url(r'^user/', include('user_management.urls')),
url(r'^analytics/', include('analytics_management.urls')),
url(r'^', include('user_management.urls')),
]
|
{
"content_hash": "71f7a1c6cb7c291467de7745aa93f316",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 40.51724137931034,
"alnum_prop": 0.6961702127659575,
"repo_name": "minimail/minimail",
"id": "db639c1b8a6e0ed50eca42157b84ba070527eff9",
"size": "1175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "minimail/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10251"
},
{
"name": "HTML",
"bytes": "129381"
},
{
"name": "Makefile",
"bytes": "697"
},
{
"name": "Python",
"bytes": "122173"
},
{
"name": "Shell",
"bytes": "378"
}
],
"symlink_target": ""
}
|
import os
import fnmatch
from multiprocessing import Process
import process_cup
def process_set(cups_dir, set_tag, idx_start, idx_stop, sym_Y = False):
"""
Process all shots for both collimators for a given cup tag
Parameters
----------
cups_dir: string
location of the directories with shots for all cups for both collimators
set_tag: string
set tag (R8O3IL or similar)
idx_start: int
start cup index
idx_stop: int
stop cup index, inclusive! So cups would be processed in the range [start, stop+1)
out_dir: string
output directory
zshift: float
cup Z shift relative to shot, mm
"""
sy = sym_Y
pps = []
for k in range(idx_start, idx_stop + 1):
cup_tag = "{}{:02d}".format(set_tag, k)
p = Process(target=process_cup.process_cup, args=(cups_dir, cup_tag, out_dir, zshift, sy)) # calls process_cup.process_cup(cups_dir, cup_tag, out_dir, zshift, sy)
p.start()
pps.append(p)
for p in pps:
p.join()
if __name__ == "__main__":
process_set("/home/sphinx/gcloud", "R8O1IS", 1, 9)
process_set("/home/sphinx/gcloud", "R8O2IM", 1, 10)
process_set("/home/sphinx/gcloud", "R8O3IL", 1, 9)
|
{
"content_hash": "0a5c3d71e9e224fb7d0224cc5a37529e",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 170,
"avg_line_length": 24.94,
"alnum_prop": 0.6110665597433841,
"repo_name": "Iwan-Zotow/runEGS",
"id": "dc15aa5e731ccd14b8959ca6fddb9c8845e649b3",
"size": "1266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "XcVV/dmax_set.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "12348489"
},
{
"name": "Python",
"bytes": "205708"
}
],
"symlink_target": ""
}
|
import sys
from Calculation_variable import Calculation_variable
#Check Python version and import appropriate tkinter module
if sys.version_info.major == 3:
from tkinter import *
else:
from Tkinter import *
class Std_button(Button):
"""Declare button with sandard hight and width"""
def __init__(self, master=None, cnf={}, **kw):
Button.__init__(self, master, kw)
self.config(height=5, width=12)
class Tall_button(Button):
"""Declare button with sandard hight and width"""
def __init__(self, master=None, cnf={}, **kw):
Button.__init__(self, master, kw)
self.config(height=11, width=12)
class Wide_button(Button):
"""Declare button with sandard hight and width"""
def __init__(self, master=None, cnf={}, **kw):
Button.__init__(self, master, kw)
self.config(height=5, width=26)
def is_integer_value(string_value):
"""
This will assume a value is a float only if its value after the decimal place
is above 0. Also assumes only the float, self.total, will ever get passed to it
"""
temp_index = string_value.index('.')
temp_value = string_value[temp_index+1:]
return int(temp_value) == 0
# from inspect import getsource
# print getsource(Button)
#CONSTS
DIVIDE = u"\u00F7"
MULTIPLY= u"\u00D7"
class App():
def __init__(self, master):
self.master = master
self.master.geometry("405x565")
self.master.resizable(width=False, height=False)
self.total = 0.0
self.next_input = ""
self.operator_value = None
self.is_new_calculation = True #Basically keeps track of whether equals has been pressed
self.display_variable = StringVar()
self.calculation_variable = Calculation_variable()
display_frame = Frame(master)
self.total_display = Entry(display_frame, textvariable=self.display_variable, width=19) #width is number of characters
self.total_display.config(font=('times', 29), justify=RIGHT, borderwidth=5, relief=FLAT)
self.total_display.pack()
self.display_variable.set("0")
self.total_calcuation_display = Entry(display_frame, textvariable=self.calculation_variable, width=24) #width is number of characters
self.total_calcuation_display.config(font=('times', 24), justify=RIGHT, borderwidth=5, relief=FLAT)
self.total_calcuation_display.pack()
self.calculation_variable.set("")
#Basically, whatever gets typed in goes into the cacluation display...
#This should only disappear if
# 1) equals button is pressed or
# 2) the clear button is pressed
display_frame.grid(row=0, column=0, columnspan=4, padx=5, pady=5)
#############
#Key Bindings
#############
#Keys 0-9
for i in xrange(10):
#Function creates new scope, so values are passed to lambda
# in key binding, rather than the variable itself.
def make_lambda(x):
return lambda val: self.get_input(str(x))
self.master.bind(i, make_lambda(i))
#Keys +, -, *, /
self.master.bind("-", lambda val: self.set_command('-'))
self.master.bind("+", lambda val: self.set_command('+'))
self.master.bind("*", lambda val: self.set_command(MULTIPLY))
self.master.bind("/", lambda val: self.set_command(DIVIDE))
#keys = and <return>
self.master.bind('=', lambda val: self.display_output())
self.master.bind('<Return>', lambda val: self.display_output())
#BackSpace and del keys
self.master.bind('<Delete>', lambda val: self.delete_value())
self.master.bind('<BackSpace>', lambda val: self.delete_value())
#Decimal Place key
self.master.bind('.', lambda val: self.add_decimal())
#Clear button mapped to 'c' key and Num_lock
self.master.bind('c', lambda val: self.clear_command())
##################
#Interface Buttons
##################
#Clear Button
clear_button = Std_button(master, text="C")
clear_button.config(command=self.clear_command)
clear_button.grid(row=1, column=0, pady=2)
#Divide Button
divide_button = Std_button(master, text=DIVIDE)
divide_button.config(command=lambda:self.set_command(DIVIDE))
divide_button.grid(row=1, column=1, pady=2)
#Multiple Button
multiply_button = Std_button(master, text=MULTIPLY)
multiply_button.config(command = lambda:self.set_command(MULTIPLY))
multiply_button.grid(row=1, column=2, pady=2)
#Delete Button
delete_button = Std_button(master, text="Del")
delete_button.config(command=self.delete_value)
delete_button.grid(row=1, column=3, pady=2)
#Set One
button_1 = Std_button(master, text="1", command = lambda:self.get_input("1"))
button_1.grid(row=4, column=0, pady=2)
#Set Two
button_2 = Std_button(master, text="2", command = lambda:self.get_input("2"))
button_2.grid(row=4, column=1, pady=2)
#Set Three
button_3 = Std_button(master, text="3", command = lambda:self.get_input("3"))
button_3.grid(row=4, column=2, pady=2)
#Set Four
button_4 = Std_button(master, text="4", command = lambda:self.get_input("4"))
button_4.grid(row=3, column=0, pady=2)
#Set Five
button_5 = Std_button(master, text="5", command = lambda:self.get_input("5"))
button_5.grid(row=3, column=1, pady=2)
#Set Six
button_6 = Std_button(master, text="6", command = lambda:self.get_input("6"))
button_6.grid(row=3, column=2, pady=2)
#Set Seven
button_7 = Std_button(master, text="7", command = lambda:self.get_input("7"))
button_7.grid(row=2, column=0, pady=2)
#Set Eight
button_8 = Std_button(master, text="8", command = lambda:self.get_input("8"))
button_8.grid(row=2, column=1, pady=2)
#Set Nine
button_9 = Std_button(master, text="9", command = lambda:self.get_input("9"))
button_9.grid(row=2, column=2, pady=2)
#set_zero
button_0 = Wide_button(master, text="0", command = lambda:self.get_input("0"))
button_0.grid(row=5, column=0, columnspan=2, pady=2)
#decimal button
dot_button = Std_button(master, text='.', command = self.add_decimal)
dot_button.grid(row=5, column=2, pady=2)
#Plus button
plus_button = Std_button(master, text='+', command = lambda: self.set_command('+'))
plus_button.grid(row=2, column=3, pady=2)
#Minus button
minus_button = Std_button(master, text='-', command = lambda:self.set_command('-'))
minus_button.grid(row=3, column=3, pady=2)
#Equals button
equals_button = Tall_button(master, text='=', command = self.display_output)
equals_button.grid(row=4, column=3, rowspan=2, pady=2)
def clear_command(self):
self.next_input = "0"
self.calculation_variable.clear()
self.total = 0.0
self.operator_value = None
self.display_output()
#Called when next_input needs to be set to zero
# def set_null(self):
# self.next_input = "0"
def delete_value(self):
if len(self.next_input) > 1:
self.next_input = self.next_input[:-1]
else:
self.next_input = "0"
self.calculation_variable.delete()
self.display_variable.set(self.next_input)
def recalculate_total(self):
temp_value = self.calculation_variable.get()
i = 0
for i, char in enumerate(temp_value):
if char.isdigit() == False and char != '.':
break
temp_total = str(self.total)
if is_integer_value(temp_total):
temp_total = temp_total[:-2]
new_value = temp_total + temp_value[i:]
self.calculation_variable.set(new_value)
def calculate_total(self):
#Need an 'if new_calculation' here
if self.is_new_calculation:
self.recalculate_total()
self.total = float(self.calculation_variable.calculate_total())
# input_float = float(self.next_input)
# if self.operator_value == None:
# self.total = input_float
# elif self.operator_value == '+':
# self.total += input_float
# elif self.operator_value == '-':
# self.total -= input_float
# elif self.operator_value == MULTIPLY:
# self.total *= input_float
# elif self.operator_value == DIVIDE:
# self.total /= input_float
def set_command(self, command_value):
if self.is_new_calculation == True:
self.next_input = ""
temp_total = str(self.total)
if is_integer_value(temp_total):
temp_total = temp_total[:-2]
self.calculation_variable.set(temp_total)
self.is_new_calculation = False
else:
self.calculate_total()
self.operator_value = command_value
self.calculation_variable.add(command_value)
self.next_input = "0"
def get_input(self, input_value):
if self.is_new_calculation == True:
self.is_new_calculation = False
self.operator_value = None
if self.next_input != "0":
self.next_input += input_value
else:
self.next_input = input_value
self.display_variable.set(self.next_input)
self.calculation_variable.add(input_value)
def add_decimal(self):
if self.is_new_calculation == True:
self.is_new_calculation = False
self.operator_value = None
if '.' not in self.next_input:
self.next_input += '.'
self.calculation_variable.add('.')
self.display_variable.set(self.next_input)
def display_output(self):
try:
self.calculate_total()
"""
In future, this should be done based on the self.calculation_varible value
"""
temp_total = str(self.total)
if is_integer_value(temp_total):
temp_total = temp_total[:-2]
self.display_variable.set(temp_total)
except ZeroDivisionError:
self.clear_command()
self.display_variable.set("Zero division error")
self.is_new_calculation = True
if __name__ == "__main__":
root = Tk()
app = App(root)
root.mainloop()
|
{
"content_hash": "abf64e7d733095b26367ca31e6cad343",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 141,
"avg_line_length": 31.20234604105572,
"alnum_prop": 0.5912593984962407,
"repo_name": "rowan08/Python_test_code",
"id": "6d6cfe2099c5f81bd3329d645748451e46a8d29f",
"size": "10640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "butttons_TK.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14287"
}
],
"symlink_target": ""
}
|
"""
Support for Insteon Hub.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/insteon_hub/
"""
import logging
from homeassistant.const import CONF_API_KEY, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import validate_config, discovery
DOMAIN = "insteon_hub"
REQUIREMENTS = ['insteon_hub==0.4.5']
INSTEON = None
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Setup Insteon Hub component.
This will automatically import associated lights.
"""
if not validate_config(
config,
{DOMAIN: [CONF_USERNAME, CONF_PASSWORD, CONF_API_KEY]},
_LOGGER):
return False
import insteon
username = config[DOMAIN][CONF_USERNAME]
password = config[DOMAIN][CONF_PASSWORD]
api_key = config[DOMAIN][CONF_API_KEY]
global INSTEON
INSTEON = insteon.Insteon(username, password, api_key)
if INSTEON is None:
_LOGGER.error("Could not connect to Insteon service.")
return
for component in 'light':
discovery.load_platform(hass, component, DOMAIN, None, config)
return True
|
{
"content_hash": "a6fa7819cbc89c4329c5491cac792a48",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 75,
"avg_line_length": 25.933333333333334,
"alnum_prop": 0.6846615252784919,
"repo_name": "mikaelboman/home-assistant",
"id": "bd12f32b39d85d9e1410af497cd3dbb7cdf94879",
"size": "1167",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/insteon_hub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1308067"
},
{
"name": "JavaScript",
"bytes": "10846"
},
{
"name": "Python",
"bytes": "2499063"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from shop.models.order import OrderModel
class PaymentProvider(object):
"""
Base class for all Payment Service Providers.
"""
@property
def namespace(self):
"""
Use a unique namespace for this payment provider. It is used to build the communication URLs
exposed to an external payment service provider.
"""
msg = "The attribute `namespace` must be implemented by the class `{}`"
raise NotImplementedError(msg.format(self.__class__.__name__))
def get_urls(self):
"""
Return a list of URL patterns for external communication with the payment service provider.
"""
return []
def get_payment_request(self, cart, request):
"""
Build a JavaScript expression which is evaluated by the success handler on the page
submitting the purchase command. When redirecting to another page, use:
```
window.location.href="URL-of-other-page";
```
since this expression is evaluated inside an AngularJS directive.
"""
return 'alert("Please implement method `get_payment_request` in the Python class inheriting from `PaymentProvider`!");'
class ForwardFundPayment(PaymentProvider):
"""
Provides a simple prepayment payment provider.
"""
namespace = 'forward-fund-payment'
def __init__(self):
if (not (callable(getattr(OrderModel, 'no_payment_required', None)) and callable(
getattr(OrderModel, 'awaiting_payment', None)))):
msg = "Missing methods in Order model. Add 'shop.payment.workflows.ManualPaymentWorkflowMixin' to SHOP_ORDER_WORKFLOWS."
raise ImproperlyConfigured(msg)
super(ForwardFundPayment, self).__init__()
def get_payment_request(self, cart, request):
order = OrderModel.objects.create_from_cart(cart, request)
order.populate_from_cart(cart, request)
if order.total == 0:
order.no_payment_required()
else:
order.awaiting_payment()
order.save(with_notification=True)
return 'window.location.href="{}";'.format(order.get_absolute_url())
|
{
"content_hash": "0142f8dfbba07a08872ef51476da9e21",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 132,
"avg_line_length": 38.389830508474574,
"alnum_prop": 0.6547461368653421,
"repo_name": "divio/django-shop",
"id": "e9fd543eb0e5f59f6860fd0a581d6b7216edd5a6",
"size": "2289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/payment/providers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9363"
},
{
"name": "HTML",
"bytes": "73945"
},
{
"name": "JavaScript",
"bytes": "50702"
},
{
"name": "Python",
"bytes": "437028"
}
],
"symlink_target": ""
}
|
import os, sys
import string
import glob
debug=0
#debugsym='ignorableWhitespaceSAXFunc'
debugsym=None
#
# C parser analysis code
#
ignored_files = {
"trio": "too many non standard macros",
"trio.c": "too many non standard macros",
"trionan.c": "too many non standard macros",
"triostr.c": "too many non standard macros",
"acconfig.h": "generated portability layer",
"config.h": "generated portability layer",
"libxml.h": "internal only",
"testOOM.c": "out of memory tester",
"testOOMlib.h": "out of memory tester",
"testOOMlib.c": "out of memory tester",
"rngparser.c": "not yet integrated",
"rngparser.h": "not yet integrated",
"elfgcchack.h": "not a normal header",
"testHTML.c": "test tool",
"testReader.c": "test tool",
"testSchemas.c": "test tool",
"testXPath.c": "test tool",
"testAutomata.c": "test tool",
"testModule.c": "test tool",
"testRegexp.c": "test tool",
"testThreads.c": "test tool",
"testC14N.c": "test tool",
"testRelax.c": "test tool",
"testThreadsWin32.c": "test tool",
"testSAX.c": "test tool",
"testURI.c": "test tool",
"testapi.c": "generated regression tests",
"runtest.c": "regression tests program",
"runsuite.c": "regression tests program",
"tst.c": "not part of the library",
"test.c": "not part of the library",
"testdso.c": "test for dynamid shared libraries",
}
ignored_words = {
"WINAPI": (0, "Windows keyword"),
"LIBXML_DLL_IMPORT": (0, "Special macro to flag external keywords"),
"XMLPUBVAR": (0, "Special macro for extern vars for win32"),
"XSLTPUBVAR": (0, "Special macro for extern vars for win32"),
"EXSLTPUBVAR": (0, "Special macro for extern vars for win32"),
"XMLPUBFUN": (0, "Special macro for extern funcs for win32"),
"XSLTPUBFUN": (0, "Special macro for extern funcs for win32"),
"EXSLTPUBFUN": (0, "Special macro for extern funcs for win32"),
"XMLCALL": (0, "Special macro for win32 calls"),
"XSLTCALL": (0, "Special macro for win32 calls"),
"XMLCDECL": (0, "Special macro for win32 calls"),
"EXSLTCALL": (0, "Special macro for win32 calls"),
"__declspec": (3, "Windows keyword"),
"__stdcall": (0, "Windows keyword"),
"ATTRIBUTE_UNUSED": (0, "macro keyword"),
"LIBEXSLT_PUBLIC": (0, "macro keyword"),
"X_IN_Y": (5, "macro function builder"),
}
def escape(raw):
raw = string.replace(raw, '&', '&')
raw = string.replace(raw, '<', '<')
raw = string.replace(raw, '>', '>')
raw = string.replace(raw, "'", ''')
raw = string.replace(raw, '"', '"')
return raw
def uniq(items):
d = {}
for item in items:
d[item]=1
return d.keys()
class identifier:
def __init__(self, name, header=None, module=None, type=None, lineno = 0,
info=None, extra=None, conditionals = None):
self.name = name
self.header = header
self.module = module
self.type = type
self.info = info
self.extra = extra
self.lineno = lineno
self.static = 0
if conditionals == None or len(conditionals) == 0:
self.conditionals = None
else:
self.conditionals = conditionals[:]
if self.name == debugsym:
print "=> define %s : %s" % (debugsym, (module, type, info,
extra, conditionals))
def __repr__(self):
r = "%s %s:" % (self.type, self.name)
if self.static:
r = r + " static"
if self.module != None:
r = r + " from %s" % (self.module)
if self.info != None:
r = r + " " + `self.info`
if self.extra != None:
r = r + " " + `self.extra`
if self.conditionals != None:
r = r + " " + `self.conditionals`
return r
def set_header(self, header):
self.header = header
def set_module(self, module):
self.module = module
def set_type(self, type):
self.type = type
def set_info(self, info):
self.info = info
def set_extra(self, extra):
self.extra = extra
def set_lineno(self, lineno):
self.lineno = lineno
def set_static(self, static):
self.static = static
def set_conditionals(self, conditionals):
if conditionals == None or len(conditionals) == 0:
self.conditionals = None
else:
self.conditionals = conditionals[:]
def get_name(self):
return self.name
def get_header(self):
return self.module
def get_module(self):
return self.module
def get_type(self):
return self.type
def get_info(self):
return self.info
def get_lineno(self):
return self.lineno
def get_extra(self):
return self.extra
def get_static(self):
return self.static
def get_conditionals(self):
return self.conditionals
def update(self, header, module, type = None, info = None, extra=None,
conditionals=None):
if self.name == debugsym:
print "=> update %s : %s" % (debugsym, (module, type, info,
extra, conditionals))
if header != None and self.header == None:
self.set_header(module)
if module != None and (self.module == None or self.header == self.module):
self.set_module(module)
if type != None and self.type == None:
self.set_type(type)
if info != None:
self.set_info(info)
if extra != None:
self.set_extra(extra)
if conditionals != None:
self.set_conditionals(conditionals)
class index:
def __init__(self, name = "noname"):
self.name = name
self.identifiers = {}
self.functions = {}
self.variables = {}
self.includes = {}
self.structs = {}
self.enums = {}
self.typedefs = {}
self.macros = {}
self.references = {}
self.info = {}
def add_ref(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
if name[0:2] == '__':
return None
d = None
try:
d = self.identifiers[name]
d.update(header, module, type, lineno, info, extra, conditionals)
except:
d = identifier(name, header, module, type, lineno, info, extra, conditionals)
self.identifiers[name] = d
if d != None and static == 1:
d.set_static(1)
if d != None and name != None and type != None:
self.references[name] = d
if name == debugsym:
print "New ref: %s" % (d)
return d
def add(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
if name[0:2] == '__':
return None
d = None
try:
d = self.identifiers[name]
d.update(header, module, type, lineno, info, extra, conditionals)
except:
d = identifier(name, header, module, type, lineno, info, extra, conditionals)
self.identifiers[name] = d
if d != None and static == 1:
d.set_static(1)
if d != None and name != None and type != None:
if type == "function":
self.functions[name] = d
elif type == "functype":
self.functions[name] = d
elif type == "variable":
self.variables[name] = d
elif type == "include":
self.includes[name] = d
elif type == "struct":
self.structs[name] = d
elif type == "enum":
self.enums[name] = d
elif type == "typedef":
self.typedefs[name] = d
elif type == "macro":
self.macros[name] = d
else:
print "Unable to register type ", type
if name == debugsym:
print "New symbol: %s" % (d)
return d
def merge(self, idx):
for id in idx.functions.keys():
#
# macro might be used to override functions or variables
# definitions
#
if self.macros.has_key(id):
del self.macros[id]
if self.functions.has_key(id):
print "function %s from %s redeclared in %s" % (
id, self.functions[id].header, idx.functions[id].header)
else:
self.functions[id] = idx.functions[id]
self.identifiers[id] = idx.functions[id]
for id in idx.variables.keys():
#
# macro might be used to override functions or variables
# definitions
#
if self.macros.has_key(id):
del self.macros[id]
if self.variables.has_key(id):
print "variable %s from %s redeclared in %s" % (
id, self.variables[id].header, idx.variables[id].header)
else:
self.variables[id] = idx.variables[id]
self.identifiers[id] = idx.variables[id]
for id in idx.structs.keys():
if self.structs.has_key(id):
print "struct %s from %s redeclared in %s" % (
id, self.structs[id].header, idx.structs[id].header)
else:
self.structs[id] = idx.structs[id]
self.identifiers[id] = idx.structs[id]
for id in idx.typedefs.keys():
if self.typedefs.has_key(id):
print "typedef %s from %s redeclared in %s" % (
id, self.typedefs[id].header, idx.typedefs[id].header)
else:
self.typedefs[id] = idx.typedefs[id]
self.identifiers[id] = idx.typedefs[id]
for id in idx.macros.keys():
#
# macro might be used to override functions or variables
# definitions
#
if self.variables.has_key(id):
continue
if self.functions.has_key(id):
continue
if self.enums.has_key(id):
continue
if self.macros.has_key(id):
print "macro %s from %s redeclared in %s" % (
id, self.macros[id].header, idx.macros[id].header)
else:
self.macros[id] = idx.macros[id]
self.identifiers[id] = idx.macros[id]
for id in idx.enums.keys():
if self.enums.has_key(id):
print "enum %s from %s redeclared in %s" % (
id, self.enums[id].header, idx.enums[id].header)
else:
self.enums[id] = idx.enums[id]
self.identifiers[id] = idx.enums[id]
def merge_public(self, idx):
for id in idx.functions.keys():
if self.functions.has_key(id):
# check that function condition agrees with header
if idx.functions[id].conditionals != \
self.functions[id].conditionals:
print "Header condition differs from Function for %s:" \
% id
print " H: %s" % self.functions[id].conditionals
print " C: %s" % idx.functions[id].conditionals
up = idx.functions[id]
self.functions[id].update(None, up.module, up.type, up.info, up.extra)
# else:
# print "Function %s from %s is not declared in headers" % (
# id, idx.functions[id].module)
# TODO: do the same for variables.
def analyze_dict(self, type, dict):
count = 0
public = 0
for name in dict.keys():
id = dict[name]
count = count + 1
if id.static == 0:
public = public + 1
if count != public:
print " %d %s , %d public" % (count, type, public)
elif count != 0:
print " %d public %s" % (count, type)
def analyze(self):
self.analyze_dict("functions", self.functions)
self.analyze_dict("variables", self.variables)
self.analyze_dict("structs", self.structs)
self.analyze_dict("typedefs", self.typedefs)
self.analyze_dict("macros", self.macros)
class CLexer:
"""A lexer for the C language, tokenize the input by reading and
analyzing it line by line"""
def __init__(self, input):
self.input = input
self.tokens = []
self.line = ""
self.lineno = 0
def getline(self):
line = ''
while line == '':
line = self.input.readline()
if not line:
return None
self.lineno = self.lineno + 1
line = string.lstrip(line)
line = string.rstrip(line)
if line == '':
continue
while line[-1] == '\\':
line = line[:-1]
n = self.input.readline()
self.lineno = self.lineno + 1
n = string.lstrip(n)
n = string.rstrip(n)
if not n:
break
else:
line = line + n
return line
def getlineno(self):
return self.lineno
def push(self, token):
self.tokens.insert(0, token);
def debug(self):
print "Last token: ", self.last
print "Token queue: ", self.tokens
print "Line %d end: " % (self.lineno), self.line
def token(self):
while self.tokens == []:
if self.line == "":
line = self.getline()
else:
line = self.line
self.line = ""
if line == None:
return None
if line[0] == '#':
self.tokens = map((lambda x: ('preproc', x)),
string.split(line))
break;
l = len(line)
if line[0] == '"' or line[0] == "'":
end = line[0]
line = line[1:]
found = 0
tok = ""
while found == 0:
i = 0
l = len(line)
while i < l:
if line[i] == end:
self.line = line[i+1:]
line = line[:i]
l = i
found = 1
break
if line[i] == '\\':
i = i + 1
i = i + 1
tok = tok + line
if found == 0:
line = self.getline()
if line == None:
return None
self.last = ('string', tok)
return self.last
if l >= 2 and line[0] == '/' and line[1] == '*':
line = line[2:]
found = 0
tok = ""
while found == 0:
i = 0
l = len(line)
while i < l:
if line[i] == '*' and i+1 < l and line[i+1] == '/':
self.line = line[i+2:]
line = line[:i-1]
l = i
found = 1
break
i = i + 1
if tok != "":
tok = tok + "\n"
tok = tok + line
if found == 0:
line = self.getline()
if line == None:
return None
self.last = ('comment', tok)
return self.last
if l >= 2 and line[0] == '/' and line[1] == '/':
line = line[2:]
self.last = ('comment', line)
return self.last
i = 0
while i < l:
if line[i] == '/' and i+1 < l and line[i+1] == '/':
self.line = line[i:]
line = line[:i]
break
if line[i] == '/' and i+1 < l and line[i+1] == '*':
self.line = line[i:]
line = line[:i]
break
if line[i] == '"' or line[i] == "'":
self.line = line[i:]
line = line[:i]
break
i = i + 1
l = len(line)
i = 0
while i < l:
if line[i] == ' ' or line[i] == '\t':
i = i + 1
continue
o = ord(line[i])
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
(o >= 48 and o <= 57):
s = i
while i < l:
o = ord(line[i])
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
(o >= 48 and o <= 57) or string.find(
" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1:
i = i + 1
else:
break
self.tokens.append(('name', line[s:i]))
continue
if string.find("(){}:;,[]", line[i]) != -1:
# if line[i] == '(' or line[i] == ')' or line[i] == '{' or \
# line[i] == '}' or line[i] == ':' or line[i] == ';' or \
# line[i] == ',' or line[i] == '[' or line[i] == ']':
self.tokens.append(('sep', line[i]))
i = i + 1
continue
if string.find("+-*><=/%&!|.", line[i]) != -1:
# if line[i] == '+' or line[i] == '-' or line[i] == '*' or \
# line[i] == '>' or line[i] == '<' or line[i] == '=' or \
# line[i] == '/' or line[i] == '%' or line[i] == '&' or \
# line[i] == '!' or line[i] == '|' or line[i] == '.':
if line[i] == '.' and i + 2 < l and \
line[i+1] == '.' and line[i+2] == '.':
self.tokens.append(('name', '...'))
i = i + 3
continue
j = i + 1
if j < l and (
string.find("+-*><=/%&!|", line[j]) != -1):
# line[j] == '+' or line[j] == '-' or line[j] == '*' or \
# line[j] == '>' or line[j] == '<' or line[j] == '=' or \
# line[j] == '/' or line[j] == '%' or line[j] == '&' or \
# line[j] == '!' or line[j] == '|'):
self.tokens.append(('op', line[i:j+1]))
i = j + 1
else:
self.tokens.append(('op', line[i]))
i = i + 1
continue
s = i
while i < l:
o = ord(line[i])
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
(o >= 48 and o <= 57) or (
string.find(" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1):
# line[i] != ' ' and line[i] != '\t' and
# line[i] != '(' and line[i] != ')' and
# line[i] != '{' and line[i] != '}' and
# line[i] != ':' and line[i] != ';' and
# line[i] != ',' and line[i] != '+' and
# line[i] != '-' and line[i] != '*' and
# line[i] != '/' and line[i] != '%' and
# line[i] != '&' and line[i] != '!' and
# line[i] != '|' and line[i] != '[' and
# line[i] != ']' and line[i] != '=' and
# line[i] != '*' and line[i] != '>' and
# line[i] != '<'):
i = i + 1
else:
break
self.tokens.append(('name', line[s:i]))
tok = self.tokens[0]
self.tokens = self.tokens[1:]
self.last = tok
return tok
class CParser:
"""The C module parser"""
def __init__(self, filename, idx = None):
self.filename = filename
if len(filename) > 2 and filename[-2:] == '.h':
self.is_header = 1
else:
self.is_header = 0
self.input = open(filename)
self.lexer = CLexer(self.input)
if idx == None:
self.index = index()
else:
self.index = idx
self.top_comment = ""
self.last_comment = ""
self.comment = None
self.collect_ref = 0
self.no_error = 0
self.conditionals = []
self.defines = []
def collect_references(self):
self.collect_ref = 1
def stop_error(self):
self.no_error = 1
def start_error(self):
self.no_error = 0
def lineno(self):
return self.lexer.getlineno()
def index_add(self, name, module, static, type, info=None, extra = None):
if self.is_header == 1:
self.index.add(name, module, module, static, type, self.lineno(),
info, extra, self.conditionals)
else:
self.index.add(name, None, module, static, type, self.lineno(),
info, extra, self.conditionals)
def index_add_ref(self, name, module, static, type, info=None,
extra = None):
if self.is_header == 1:
self.index.add_ref(name, module, module, static, type,
self.lineno(), info, extra, self.conditionals)
else:
self.index.add_ref(name, None, module, static, type, self.lineno(),
info, extra, self.conditionals)
def warning(self, msg):
if self.no_error:
return
print msg
def error(self, msg, token=-1):
if self.no_error:
return
print "Parse Error: " + msg
if token != -1:
print "Got token ", token
self.lexer.debug()
sys.exit(1)
def debug(self, msg, token=-1):
print "Debug: " + msg
if token != -1:
print "Got token ", token
self.lexer.debug()
def parseTopComment(self, comment):
res = {}
lines = string.split(comment, "\n")
item = None
for line in lines:
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
while line != "" and line[0] == '*':
line = line[1:]
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
try:
(it, line) = string.split(line, ":", 1)
item = it
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
if res.has_key(item):
res[item] = res[item] + " " + line
else:
res[item] = line
except:
if item != None:
if res.has_key(item):
res[item] = res[item] + " " + line
else:
res[item] = line
self.index.info = res
def parseComment(self, token):
if self.top_comment == "":
self.top_comment = token[1]
if self.comment == None or token[1][0] == '*':
self.comment = token[1];
else:
self.comment = self.comment + token[1]
token = self.lexer.token()
if string.find(self.comment, "DOC_DISABLE") != -1:
self.stop_error()
if string.find(self.comment, "DOC_ENABLE") != -1:
self.start_error()
return token
#
# Parse a comment block associate to a typedef
#
def parseTypeComment(self, name, quiet = 0):
if name[0:2] == '__':
quiet = 1
args = []
desc = ""
if self.comment == None:
if not quiet:
self.warning("Missing comment for type %s" % (name))
return((args, desc))
if self.comment[0] != '*':
if not quiet:
self.warning("Missing * in type comment for %s" % (name))
return((args, desc))
lines = string.split(self.comment, '\n')
if lines[0] == '*':
del lines[0]
if lines[0] != "* %s:" % (name):
if not quiet:
self.warning("Misformatted type comment for %s" % (name))
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
return((args, desc))
del lines[0]
while len(lines) > 0 and lines[0] == '*':
del lines[0]
desc = ""
while len(lines) > 0:
l = lines[0]
while len(l) > 0 and l[0] == '*':
l = l[1:]
l = string.strip(l)
desc = desc + " " + l
del lines[0]
desc = string.strip(desc)
if quiet == 0:
if desc == "":
self.warning("Type comment for %s lack description of the macro" % (name))
return(desc)
#
# Parse a comment block associate to a macro
#
def parseMacroComment(self, name, quiet = 0):
if name[0:2] == '__':
quiet = 1
args = []
desc = ""
if self.comment == None:
if not quiet:
self.warning("Missing comment for macro %s" % (name))
return((args, desc))
if self.comment[0] != '*':
if not quiet:
self.warning("Missing * in macro comment for %s" % (name))
return((args, desc))
lines = string.split(self.comment, '\n')
if lines[0] == '*':
del lines[0]
if lines[0] != "* %s:" % (name):
if not quiet:
self.warning("Misformatted macro comment for %s" % (name))
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
return((args, desc))
del lines[0]
while lines[0] == '*':
del lines[0]
while len(lines) > 0 and lines[0][0:3] == '* @':
l = lines[0][3:]
try:
(arg, desc) = string.split(l, ':', 1)
desc=string.strip(desc)
arg=string.strip(arg)
except:
if not quiet:
self.warning("Misformatted macro comment for %s" % (name))
self.warning(" problem with '%s'" % (lines[0]))
del lines[0]
continue
del lines[0]
l = string.strip(lines[0])
while len(l) > 2 and l[0:3] != '* @':
while l[0] == '*':
l = l[1:]
desc = desc + ' ' + string.strip(l)
del lines[0]
if len(lines) == 0:
break
l = lines[0]
args.append((arg, desc))
while len(lines) > 0 and lines[0] == '*':
del lines[0]
desc = ""
while len(lines) > 0:
l = lines[0]
while len(l) > 0 and l[0] == '*':
l = l[1:]
l = string.strip(l)
desc = desc + " " + l
del lines[0]
desc = string.strip(desc)
if quiet == 0:
if desc == "":
self.warning("Macro comment for %s lack description of the macro" % (name))
return((args, desc))
#
# Parse a comment block and merge the informations found in the
# parameters descriptions, finally returns a block as complete
# as possible
#
def mergeFunctionComment(self, name, description, quiet = 0):
if name == 'main':
quiet = 1
if name[0:2] == '__':
quiet = 1
(ret, args) = description
desc = ""
retdesc = ""
if self.comment == None:
if not quiet:
self.warning("Missing comment for function %s" % (name))
return(((ret[0], retdesc), args, desc))
if self.comment[0] != '*':
if not quiet:
self.warning("Missing * in function comment for %s" % (name))
return(((ret[0], retdesc), args, desc))
lines = string.split(self.comment, '\n')
if lines[0] == '*':
del lines[0]
if lines[0] != "* %s:" % (name):
if not quiet:
self.warning("Misformatted function comment for %s" % (name))
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
return(((ret[0], retdesc), args, desc))
del lines[0]
while lines[0] == '*':
del lines[0]
nbargs = len(args)
while len(lines) > 0 and lines[0][0:3] == '* @':
l = lines[0][3:]
try:
(arg, desc) = string.split(l, ':', 1)
desc=string.strip(desc)
arg=string.strip(arg)
except:
if not quiet:
self.warning("Misformatted function comment for %s" % (name))
self.warning(" problem with '%s'" % (lines[0]))
del lines[0]
continue
del lines[0]
l = string.strip(lines[0])
while len(l) > 2 and l[0:3] != '* @':
while l[0] == '*':
l = l[1:]
desc = desc + ' ' + string.strip(l)
del lines[0]
if len(lines) == 0:
break
l = lines[0]
i = 0
while i < nbargs:
if args[i][1] == arg:
args[i] = (args[i][0], arg, desc)
break;
i = i + 1
if i >= nbargs:
if not quiet:
self.warning("Unable to find arg %s from function comment for %s" % (
arg, name))
while len(lines) > 0 and lines[0] == '*':
del lines[0]
desc = ""
while len(lines) > 0:
l = lines[0]
while len(l) > 0 and l[0] == '*':
l = l[1:]
l = string.strip(l)
if len(l) >= 6 and l[0:6] == "return" or l[0:6] == "Return":
try:
l = string.split(l, ' ', 1)[1]
except:
l = ""
retdesc = string.strip(l)
del lines[0]
while len(lines) > 0:
l = lines[0]
while len(l) > 0 and l[0] == '*':
l = l[1:]
l = string.strip(l)
retdesc = retdesc + " " + l
del lines[0]
else:
desc = desc + " " + l
del lines[0]
retdesc = string.strip(retdesc)
desc = string.strip(desc)
if quiet == 0:
#
# report missing comments
#
i = 0
while i < nbargs:
if args[i][2] == None and args[i][0] != "void" and args[i][1] != None:
self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
i = i + 1
if retdesc == "" and ret[0] != "void":
self.warning("Function comment for %s lacks description of return value" % (name))
if desc == "":
self.warning("Function comment for %s lacks description of the function" % (name))
return(((ret[0], retdesc), args, desc))
def parsePreproc(self, token):
if debug:
print "=> preproc ", token, self.lexer.tokens
name = token[1]
if name == "#include":
token = self.lexer.token()
if token == None:
return None
if token[0] == 'preproc':
self.index_add(token[1], self.filename, not self.is_header,
"include")
return self.lexer.token()
return token
if name == "#define":
token = self.lexer.token()
if token == None:
return None
if token[0] == 'preproc':
# TODO macros with arguments
name = token[1]
lst = []
token = self.lexer.token()
while token != None and token[0] == 'preproc' and \
token[1][0] != '#':
lst.append(token[1])
token = self.lexer.token()
try:
name = string.split(name, '(') [0]
except:
pass
info = self.parseMacroComment(name, not self.is_header)
self.index_add(name, self.filename, not self.is_header,
"macro", info)
return token
#
# Processing of conditionals modified by Bill 1/1/05
#
# We process conditionals (i.e. tokens from #ifdef, #ifndef,
# #if, #else and #endif) for headers and mainline code,
# store the ones from the header in libxml2-api.xml, and later
# (in the routine merge_public) verify that the two (header and
# mainline code) agree.
#
# There is a small problem with processing the headers. Some of
# the variables are not concerned with enabling / disabling of
# library functions (e.g. '__XML_PARSER_H__'), and we don't want
# them to be included in libxml2-api.xml, or involved in
# the check between the header and the mainline code. To
# accomplish this, we ignore any conditional which doesn't include
# the string 'ENABLED'
#
if name == "#ifdef":
apstr = self.lexer.tokens[0][1]
try:
self.defines.append(apstr)
if string.find(apstr, 'ENABLED') != -1:
self.conditionals.append("defined(%s)" % apstr)
except:
pass
elif name == "#ifndef":
apstr = self.lexer.tokens[0][1]
try:
self.defines.append(apstr)
if string.find(apstr, 'ENABLED') != -1:
self.conditionals.append("!defined(%s)" % apstr)
except:
pass
elif name == "#if":
apstr = ""
for tok in self.lexer.tokens:
if apstr != "":
apstr = apstr + " "
apstr = apstr + tok[1]
try:
self.defines.append(apstr)
if string.find(apstr, 'ENABLED') != -1:
self.conditionals.append(apstr)
except:
pass
elif name == "#else":
if self.conditionals != [] and \
string.find(self.defines[-1], 'ENABLED') != -1:
self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
elif name == "#endif":
if self.conditionals != [] and \
string.find(self.defines[-1], 'ENABLED') != -1:
self.conditionals = self.conditionals[:-1]
self.defines = self.defines[:-1]
token = self.lexer.token()
while token != None and token[0] == 'preproc' and \
token[1][0] != '#':
token = self.lexer.token()
return token
#
# token acquisition on top of the lexer, it handle internally
# preprocessor and comments since they are logically not part of
# the program structure.
#
def token(self):
global ignored_words
token = self.lexer.token()
while token != None:
if token[0] == 'comment':
token = self.parseComment(token)
continue
elif token[0] == 'preproc':
token = self.parsePreproc(token)
continue
elif token[0] == "name" and token[1] == "__const":
token = ("name", "const")
return token
elif token[0] == "name" and token[1] == "__attribute":
token = self.lexer.token()
while token != None and token[1] != ";":
token = self.lexer.token()
return token
elif token[0] == "name" and ignored_words.has_key(token[1]):
(n, info) = ignored_words[token[1]]
i = 0
while i < n:
token = self.lexer.token()
i = i + 1
token = self.lexer.token()
continue
else:
if debug:
print "=> ", token
return token
return None
#
# Parse a typedef, it records the type and its name.
#
def parseTypedef(self, token):
if token == None:
return None
token = self.parseType(token)
if token == None:
self.error("parsing typedef")
return None
base_type = self.type
type = base_type
#self.debug("end typedef type", token)
while token != None:
if token[0] == "name":
name = token[1]
signature = self.signature
if signature != None:
type = string.split(type, '(')[0]
d = self.mergeFunctionComment(name,
((type, None), signature), 1)
self.index_add(name, self.filename, not self.is_header,
"functype", d)
else:
if base_type == "struct":
self.index_add(name, self.filename, not self.is_header,
"struct", type)
base_type = "struct " + name
else:
# TODO report missing or misformatted comments
info = self.parseTypeComment(name, 1)
self.index_add(name, self.filename, not self.is_header,
"typedef", type, info)
token = self.token()
else:
self.error("parsing typedef: expecting a name")
return token
#self.debug("end typedef", token)
if token != None and token[0] == 'sep' and token[1] == ',':
type = base_type
token = self.token()
while token != None and token[0] == "op":
type = type + token[1]
token = self.token()
elif token != None and token[0] == 'sep' and token[1] == ';':
break;
elif token != None and token[0] == 'name':
type = base_type
continue;
else:
self.error("parsing typedef: expecting ';'", token)
return token
token = self.token()
return token
#
# Parse a C code block, used for functions it parse till
# the balancing } included
#
def parseBlock(self, token):
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseBlock(token)
elif token[0] == "sep" and token[1] == "}":
self.comment = None
token = self.token()
return token
else:
if self.collect_ref == 1:
oldtok = token
token = self.token()
if oldtok[0] == "name" and oldtok[1][0:3] == "xml":
if token[0] == "sep" and token[1] == "(":
self.index_add_ref(oldtok[1], self.filename,
0, "function")
token = self.token()
elif token[0] == "name":
token = self.token()
if token[0] == "sep" and (token[1] == ";" or
token[1] == "," or token[1] == "="):
self.index_add_ref(oldtok[1], self.filename,
0, "type")
elif oldtok[0] == "name" and oldtok[1][0:4] == "XML_":
self.index_add_ref(oldtok[1], self.filename,
0, "typedef")
elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXML_":
self.index_add_ref(oldtok[1], self.filename,
0, "typedef")
else:
token = self.token()
return token
#
# Parse a C struct definition till the balancing }
#
def parseStruct(self, token):
fields = []
#self.debug("start parseStruct", token)
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
elif token[0] == "sep" and token[1] == "}":
self.struct_fields = fields
#self.debug("end parseStruct", token)
#print fields
token = self.token()
return token
else:
base_type = self.type
#self.debug("before parseType", token)
token = self.parseType(token)
#self.debug("after parseType", token)
if token != None and token[0] == "name":
fname = token[1]
token = self.token()
if token[0] == "sep" and token[1] == ";":
self.comment = None
token = self.token()
fields.append((self.type, fname, self.comment))
self.comment = None
else:
self.error("parseStruct: expecting ;", token)
elif token != None and token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
if token != None and token[0] == "name":
token = self.token()
if token != None and token[0] == "sep" and token[1] == ";":
token = self.token()
else:
self.error("parseStruct: expecting ;", token)
else:
self.error("parseStruct: name", token)
token = self.token()
self.type = base_type;
self.struct_fields = fields
#self.debug("end parseStruct", token)
#print fields
return token
#
# Parse a C enum block, parse till the balancing }
#
def parseEnumBlock(self, token):
self.enums = []
name = None
self.comment = None
comment = ""
value = "0"
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
elif token[0] == "sep" and token[1] == "}":
if name != None:
if self.comment != None:
comment = self.comment
self.comment = None
self.enums.append((name, value, comment))
token = self.token()
return token
elif token[0] == "name":
if name != None:
if self.comment != None:
comment = string.strip(self.comment)
self.comment = None
self.enums.append((name, value, comment))
name = token[1]
comment = ""
token = self.token()
if token[0] == "op" and token[1][0] == "=":
value = ""
if len(token[1]) > 1:
value = token[1][1:]
token = self.token()
while token[0] != "sep" or (token[1] != ',' and
token[1] != '}'):
value = value + token[1]
token = self.token()
else:
try:
value = "%d" % (int(value) + 1)
except:
self.warning("Failed to compute value of enum %s" % (name))
value=""
if token[0] == "sep" and token[1] == ",":
token = self.token()
else:
token = self.token()
return token
#
# Parse a C definition block, used for structs it parse till
# the balancing }
#
def parseTypeBlock(self, token):
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
elif token[0] == "sep" and token[1] == "}":
token = self.token()
return token
else:
token = self.token()
return token
#
# Parse a type: the fact that the type name can either occur after
# the definition or within the definition makes it a little harder
# if inside, the name token is pushed back before returning
#
def parseType(self, token):
self.type = ""
self.struct_fields = []
self.signature = None
if token == None:
return token
while token[0] == "name" and (
token[1] == "const" or \
token[1] == "unsigned" or \
token[1] == "signed"):
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
token = self.token()
if token[0] == "name" and (token[1] == "long" or token[1] == "short"):
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
if token[0] == "name" and token[1] == "int":
if self.type == "":
self.type = tmp[1]
else:
self.type = self.type + " " + tmp[1]
elif token[0] == "name" and token[1] == "struct":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
token = self.token()
nametok = None
if token[0] == "name":
nametok = token
token = self.token()
if token != None and token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseStruct(token)
elif token != None and token[0] == "op" and token[1] == "*":
self.type = self.type + " " + nametok[1] + " *"
token = self.token()
while token != None and token[0] == "op" and token[1] == "*":
self.type = self.type + " *"
token = self.token()
if token[0] == "name":
nametok = token
token = self.token()
else:
self.error("struct : expecting name", token)
return token
elif token != None and token[0] == "name" and nametok != None:
self.type = self.type + " " + nametok[1]
return token
if nametok != None:
self.lexer.push(token)
token = nametok
return token
elif token[0] == "name" and token[1] == "enum":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
self.enums = []
token = self.token()
if token != None and token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseEnumBlock(token)
else:
self.error("parsing enum: expecting '{'", token)
enum_type = None
if token != None and token[0] != "name":
self.lexer.push(token)
token = ("name", "enum")
else:
enum_type = token[1]
for enum in self.enums:
self.index_add(enum[0], self.filename,
not self.is_header, "enum",
(enum[1], enum[2], enum_type))
return token
elif token[0] == "name":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
else:
self.error("parsing type %s: expecting a name" % (self.type),
token)
return token
token = self.token()
while token != None and (token[0] == "op" or
token[0] == "name" and token[1] == "const"):
self.type = self.type + " " + token[1]
token = self.token()
#
# if there is a parenthesis here, this means a function type
#
if token != None and token[0] == "sep" and token[1] == '(':
self.type = self.type + token[1]
token = self.token()
while token != None and token[0] == "op" and token[1] == '*':
self.type = self.type + token[1]
token = self.token()
if token == None or token[0] != "name" :
self.error("parsing function type, name expected", token);
return token
self.type = self.type + token[1]
nametok = token
token = self.token()
if token != None and token[0] == "sep" and token[1] == ')':
self.type = self.type + token[1]
token = self.token()
if token != None and token[0] == "sep" and token[1] == '(':
token = self.token()
type = self.type;
token = self.parseSignature(token);
self.type = type;
else:
self.error("parsing function type, '(' expected", token);
return token
else:
self.error("parsing function type, ')' expected", token);
return token
self.lexer.push(token)
token = nametok
return token
#
# do some lookahead for arrays
#
if token != None and token[0] == "name":
nametok = token
token = self.token()
if token != None and token[0] == "sep" and token[1] == '[':
self.type = self.type + nametok[1]
while token != None and token[0] == "sep" and token[1] == '[':
self.type = self.type + token[1]
token = self.token()
while token != None and token[0] != 'sep' and \
token[1] != ']' and token[1] != ';':
self.type = self.type + token[1]
token = self.token()
if token != None and token[0] == 'sep' and token[1] == ']':
self.type = self.type + token[1]
token = self.token()
else:
self.error("parsing array type, ']' expected", token);
return token
elif token != None and token[0] == "sep" and token[1] == ':':
# remove :12 in case it's a limited int size
token = self.token()
token = self.token()
self.lexer.push(token)
token = nametok
return token
#
# Parse a signature: '(' has been parsed and we scan the type definition
# up to the ')' included
def parseSignature(self, token):
signature = []
if token != None and token[0] == "sep" and token[1] == ')':
self.signature = []
token = self.token()
return token
while token != None:
token = self.parseType(token)
if token != None and token[0] == "name":
signature.append((self.type, token[1], None))
token = self.token()
elif token != None and token[0] == "sep" and token[1] == ',':
token = self.token()
continue
elif token != None and token[0] == "sep" and token[1] == ')':
# only the type was provided
if self.type == "...":
signature.append((self.type, "...", None))
else:
signature.append((self.type, None, None))
if token != None and token[0] == "sep":
if token[1] == ',':
token = self.token()
continue
elif token[1] == ')':
token = self.token()
break
self.signature = signature
return token
#
# Parse a global definition, be it a type, variable or function
# the extern "C" blocks are a bit nasty and require it to recurse.
#
def parseGlobal(self, token):
static = 0
if token[1] == 'extern':
token = self.token()
if token == None:
return token
if token[0] == 'string':
if token[1] == 'C':
token = self.token()
if token == None:
return token
if token[0] == 'sep' and token[1] == "{":
token = self.token()
# print 'Entering extern "C line ', self.lineno()
while token != None and (token[0] != 'sep' or
token[1] != "}"):
if token[0] == 'name':
token = self.parseGlobal(token)
else:
self.error(
"token %s %s unexpected at the top level" % (
token[0], token[1]))
token = self.parseGlobal(token)
# print 'Exiting extern "C" line', self.lineno()
token = self.token()
return token
else:
return token
elif token[1] == 'static':
static = 1
token = self.token()
if token == None or token[0] != 'name':
return token
if token[1] == 'typedef':
token = self.token()
return self.parseTypedef(token)
else:
token = self.parseType(token)
type_orig = self.type
if token == None or token[0] != "name":
return token
type = type_orig
self.name = token[1]
token = self.token()
while token != None and (token[0] == "sep" or token[0] == "op"):
if token[0] == "sep":
if token[1] == "[":
type = type + token[1]
token = self.token()
while token != None and (token[0] != "sep" or \
token[1] != ";"):
type = type + token[1]
token = self.token()
if token != None and token[0] == "op" and token[1] == "=":
#
# Skip the initialization of the variable
#
token = self.token()
if token[0] == 'sep' and token[1] == '{':
token = self.token()
token = self.parseBlock(token)
else:
self.comment = None
while token != None and (token[0] != "sep" or \
(token[1] != ';' and token[1] != ',')):
token = self.token()
self.comment = None
if token == None or token[0] != "sep" or (token[1] != ';' and
token[1] != ','):
self.error("missing ';' or ',' after value")
if token != None and token[0] == "sep":
if token[1] == ";":
self.comment = None
token = self.token()
if type == "struct":
self.index_add(self.name, self.filename,
not self.is_header, "struct", self.struct_fields)
else:
self.index_add(self.name, self.filename,
not self.is_header, "variable", type)
break
elif token[1] == "(":
token = self.token()
token = self.parseSignature(token)
if token == None:
return None
if token[0] == "sep" and token[1] == ";":
d = self.mergeFunctionComment(self.name,
((type, None), self.signature), 1)
self.index_add(self.name, self.filename, static,
"function", d)
token = self.token()
elif token[0] == "sep" and token[1] == "{":
d = self.mergeFunctionComment(self.name,
((type, None), self.signature), static)
self.index_add(self.name, self.filename, static,
"function", d)
token = self.token()
token = self.parseBlock(token);
elif token[1] == ',':
self.comment = None
self.index_add(self.name, self.filename, static,
"variable", type)
type = type_orig
token = self.token()
while token != None and token[0] == "sep":
type = type + token[1]
token = self.token()
if token != None and token[0] == "name":
self.name = token[1]
token = self.token()
else:
break
return token
def parse(self):
self.warning("Parsing %s" % (self.filename))
token = self.token()
while token != None:
if token[0] == 'name':
token = self.parseGlobal(token)
else:
self.error("token %s %s unexpected at the top level" % (
token[0], token[1]))
token = self.parseGlobal(token)
return
self.parseTopComment(self.top_comment)
return self.index
class docBuilder:
"""A documentation builder"""
def __init__(self, name, directories=['.'], excludes=[]):
self.name = name
self.directories = directories
self.excludes = excludes + ignored_files.keys()
self.modules = {}
self.headers = {}
self.idx = index()
self.xref = {}
self.index = {}
if name == 'libxml2':
self.basename = 'libxml'
else:
self.basename = name
def indexString(self, id, str):
if str == None:
return
str = string.replace(str, "'", ' ')
str = string.replace(str, '"', ' ')
str = string.replace(str, "/", ' ')
str = string.replace(str, '*', ' ')
str = string.replace(str, "[", ' ')
str = string.replace(str, "]", ' ')
str = string.replace(str, "(", ' ')
str = string.replace(str, ")", ' ')
str = string.replace(str, "<", ' ')
str = string.replace(str, '>', ' ')
str = string.replace(str, "&", ' ')
str = string.replace(str, '#', ' ')
str = string.replace(str, ",", ' ')
str = string.replace(str, '.', ' ')
str = string.replace(str, ';', ' ')
tokens = string.split(str)
for token in tokens:
try:
c = token[0]
if string.find(string.letters, c) < 0:
pass
elif len(token) < 3:
pass
else:
lower = string.lower(token)
# TODO: generalize this a bit
if lower == 'and' or lower == 'the':
pass
elif self.xref.has_key(token):
self.xref[token].append(id)
else:
self.xref[token] = [id]
except:
pass
def analyze(self):
print "Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys()))
self.idx.analyze()
def scanHeaders(self):
for header in self.headers.keys():
parser = CParser(header)
idx = parser.parse()
self.headers[header] = idx;
self.idx.merge(idx)
def scanModules(self):
for module in self.modules.keys():
parser = CParser(module)
idx = parser.parse()
# idx.analyze()
self.modules[module] = idx
self.idx.merge_public(idx)
def scan(self):
for directory in self.directories:
files = glob.glob(directory + "/*.c")
for file in files:
skip = 0
for excl in self.excludes:
if string.find(file, excl) != -1:
skip = 1;
break
if skip == 0:
self.modules[file] = None;
files = glob.glob(directory + "/*.h")
for file in files:
skip = 0
for excl in self.excludes:
if string.find(file, excl) != -1:
skip = 1;
break
if skip == 0:
self.headers[file] = None;
self.scanHeaders()
self.scanModules()
def modulename_file(self, file):
module = os.path.basename(file)
if module[-2:] == '.h':
module = module[:-2]
elif module[-2:] == '.c':
module = module[:-2]
return module
def serialize_enum(self, output, name):
id = self.idx.enums[name]
output.write(" <enum name='%s' file='%s'" % (name,
self.modulename_file(id.header)))
if id.info != None:
info = id.info
if info[0] != None and info[0] != '':
try:
val = eval(info[0])
except:
val = info[0]
output.write(" value='%s'" % (val));
if info[2] != None and info[2] != '':
output.write(" type='%s'" % info[2]);
if info[1] != None and info[1] != '':
output.write(" info='%s'" % escape(info[1]));
output.write("/>\n")
def serialize_macro(self, output, name):
id = self.idx.macros[name]
output.write(" <macro name='%s' file='%s'>\n" % (name,
self.modulename_file(id.header)))
if id.info != None:
try:
(args, desc) = id.info
if desc != None and desc != "":
output.write(" <info>%s</info>\n" % (escape(desc)))
self.indexString(name, desc)
for arg in args:
(name, desc) = arg
if desc != None and desc != "":
output.write(" <arg name='%s' info='%s'/>\n" % (
name, escape(desc)))
self.indexString(name, desc)
else:
output.write(" <arg name='%s'/>\n" % (name))
except:
pass
output.write(" </macro>\n")
def serialize_typedef(self, output, name):
id = self.idx.typedefs[name]
if id.info[0:7] == 'struct ':
output.write(" <struct name='%s' file='%s' type='%s'" % (
name, self.modulename_file(id.header), id.info))
name = id.info[7:]
if self.idx.structs.has_key(name) and ( \
type(self.idx.structs[name].info) == type(()) or
type(self.idx.structs[name].info) == type([])):
output.write(">\n");
try:
for field in self.idx.structs[name].info:
desc = field[2]
self.indexString(name, desc)
if desc == None:
desc = ''
else:
desc = escape(desc)
output.write(" <field name='%s' type='%s' info='%s'/>\n" % (field[1] , field[0], desc))
except:
print "Failed to serialize struct %s" % (name)
output.write(" </struct>\n")
else:
output.write("/>\n");
else :
output.write(" <typedef name='%s' file='%s' type='%s'" % (
name, self.modulename_file(id.header), id.info))
try:
desc = id.extra
if desc != None and desc != "":
output.write(">\n <info>%s</info>\n" % (escape(desc)))
output.write(" </typedef>\n")
else:
output.write("/>\n")
except:
output.write("/>\n")
def serialize_variable(self, output, name):
id = self.idx.variables[name]
if id.info != None:
output.write(" <variable name='%s' file='%s' type='%s'/>\n" % (
name, self.modulename_file(id.header), id.info))
else:
output.write(" <variable name='%s' file='%s'/>\n" % (
name, self.modulename_file(id.header)))
def serialize_function(self, output, name):
id = self.idx.functions[name]
if name == debugsym:
print "=>", id
output.write(" <%s name='%s' file='%s' module='%s'>\n" % (id.type,
name, self.modulename_file(id.header),
self.modulename_file(id.module)))
#
# Processing of conditionals modified by Bill 1/1/05
#
if id.conditionals != None:
apstr = ""
for cond in id.conditionals:
if apstr != "":
apstr = apstr + " && "
apstr = apstr + cond
output.write(" <cond>%s</cond>\n"% (apstr));
try:
(ret, params, desc) = id.info
output.write(" <info>%s</info>\n" % (escape(desc)))
self.indexString(name, desc)
if ret[0] != None:
if ret[0] == "void":
output.write(" <return type='void'/>\n")
else:
output.write(" <return type='%s' info='%s'/>\n" % (
ret[0], escape(ret[1])))
self.indexString(name, ret[1])
for param in params:
if param[0] == 'void':
continue
if param[2] == None:
output.write(" <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
else:
output.write(" <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
self.indexString(name, param[2])
except:
print "Failed to save function %s info: " % name, `id.info`
output.write(" </%s>\n" % (id.type))
def serialize_exports(self, output, file):
module = self.modulename_file(file)
output.write(" <file name='%s'>\n" % (module))
dict = self.headers[file]
if dict.info != None:
for data in ('Summary', 'Description', 'Author'):
try:
output.write(" <%s>%s</%s>\n" % (
string.lower(data),
escape(dict.info[data]),
string.lower(data)))
except:
print "Header %s lacks a %s description" % (module, data)
if dict.info.has_key('Description'):
desc = dict.info['Description']
if string.find(desc, "DEPRECATED") != -1:
output.write(" <deprecated/>\n")
ids = dict.macros.keys()
ids.sort()
for id in uniq(ids):
# Macros are sometime used to masquerade other types.
if dict.functions.has_key(id):
continue
if dict.variables.has_key(id):
continue
if dict.typedefs.has_key(id):
continue
if dict.structs.has_key(id):
continue
if dict.enums.has_key(id):
continue
output.write(" <exports symbol='%s' type='macro'/>\n" % (id))
ids = dict.enums.keys()
ids.sort()
for id in uniq(ids):
output.write(" <exports symbol='%s' type='enum'/>\n" % (id))
ids = dict.typedefs.keys()
ids.sort()
for id in uniq(ids):
output.write(" <exports symbol='%s' type='typedef'/>\n" % (id))
ids = dict.structs.keys()
ids.sort()
for id in uniq(ids):
output.write(" <exports symbol='%s' type='struct'/>\n" % (id))
ids = dict.variables.keys()
ids.sort()
for id in uniq(ids):
output.write(" <exports symbol='%s' type='variable'/>\n" % (id))
ids = dict.functions.keys()
ids.sort()
for id in uniq(ids):
output.write(" <exports symbol='%s' type='function'/>\n" % (id))
output.write(" </file>\n")
def serialize_xrefs_files(self, output):
headers = self.headers.keys()
headers.sort()
for file in headers:
module = self.modulename_file(file)
output.write(" <file name='%s'>\n" % (module))
dict = self.headers[file]
ids = uniq(dict.functions.keys() + dict.variables.keys() + \
dict.macros.keys() + dict.typedefs.keys() + \
dict.structs.keys() + dict.enums.keys())
ids.sort()
for id in ids:
output.write(" <ref name='%s'/>\n" % (id))
output.write(" </file>\n")
pass
def serialize_xrefs_functions(self, output):
funcs = {}
for name in self.idx.functions.keys():
id = self.idx.functions[name]
try:
(ret, params, desc) = id.info
for param in params:
if param[0] == 'void':
continue
if funcs.has_key(param[0]):
funcs[param[0]].append(name)
else:
funcs[param[0]] = [name]
except:
pass
typ = funcs.keys()
typ.sort()
for type in typ:
if type == '' or type == 'void' or type == "int" or \
type == "char *" or type == "const char *" :
continue
output.write(" <type name='%s'>\n" % (type))
ids = funcs[type]
ids.sort()
pid = '' # not sure why we have dups, but get rid of them!
for id in ids:
if id != pid:
output.write(" <ref name='%s'/>\n" % (id))
pid = id
output.write(" </type>\n")
def serialize_xrefs_constructors(self, output):
funcs = {}
for name in self.idx.functions.keys():
id = self.idx.functions[name]
try:
(ret, params, desc) = id.info
if ret[0] == "void":
continue
if funcs.has_key(ret[0]):
funcs[ret[0]].append(name)
else:
funcs[ret[0]] = [name]
except:
pass
typ = funcs.keys()
typ.sort()
for type in typ:
if type == '' or type == 'void' or type == "int" or \
type == "char *" or type == "const char *" :
continue
output.write(" <type name='%s'>\n" % (type))
ids = funcs[type]
ids.sort()
for id in ids:
output.write(" <ref name='%s'/>\n" % (id))
output.write(" </type>\n")
def serialize_xrefs_alpha(self, output):
letter = None
ids = self.idx.identifiers.keys()
ids.sort()
for id in ids:
if id[0] != letter:
if letter != None:
output.write(" </letter>\n")
letter = id[0]
output.write(" <letter name='%s'>\n" % (letter))
output.write(" <ref name='%s'/>\n" % (id))
if letter != None:
output.write(" </letter>\n")
def serialize_xrefs_references(self, output):
typ = self.idx.identifiers.keys()
typ.sort()
for id in typ:
idf = self.idx.identifiers[id]
module = idf.header
output.write(" <reference name='%s' href='%s'/>\n" % (id,
'html/' + self.basename + '-' +
self.modulename_file(module) + '.html#' +
id))
def serialize_xrefs_index(self, output):
index = self.xref
typ = index.keys()
typ.sort()
letter = None
count = 0
chunk = 0
chunks = []
for id in typ:
if len(index[id]) > 30:
continue
if id[0] != letter:
if letter == None or count > 200:
if letter != None:
output.write(" </letter>\n")
output.write(" </chunk>\n")
count = 0
chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
output.write(" <chunk name='chunk%s'>\n" % (chunk))
first_letter = id[0]
chunk = chunk + 1
elif letter != None:
output.write(" </letter>\n")
letter = id[0]
output.write(" <letter name='%s'>\n" % (letter))
output.write(" <word name='%s'>\n" % (id))
tokens = index[id];
tokens.sort()
tok = None
for token in tokens:
if tok == token:
continue
tok = token
output.write(" <ref name='%s'/>\n" % (token))
count = count + 1
output.write(" </word>\n")
if letter != None:
output.write(" </letter>\n")
output.write(" </chunk>\n")
if count != 0:
chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
output.write(" <chunks>\n")
for ch in chunks:
output.write(" <chunk name='%s' start='%s' end='%s'/>\n" % (
ch[0], ch[1], ch[2]))
output.write(" </chunks>\n")
def serialize_xrefs(self, output):
output.write(" <references>\n")
self.serialize_xrefs_references(output)
output.write(" </references>\n")
output.write(" <alpha>\n")
self.serialize_xrefs_alpha(output)
output.write(" </alpha>\n")
output.write(" <constructors>\n")
self.serialize_xrefs_constructors(output)
output.write(" </constructors>\n")
output.write(" <functions>\n")
self.serialize_xrefs_functions(output)
output.write(" </functions>\n")
output.write(" <files>\n")
self.serialize_xrefs_files(output)
output.write(" </files>\n")
output.write(" <index>\n")
self.serialize_xrefs_index(output)
output.write(" </index>\n")
def serialize(self):
filename = "%s-api.xml" % self.name
print "Saving XML description %s" % (filename)
output = open(filename, "w")
output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
output.write("<api name='%s'>\n" % self.name)
output.write(" <files>\n")
headers = self.headers.keys()
headers.sort()
for file in headers:
self.serialize_exports(output, file)
output.write(" </files>\n")
output.write(" <symbols>\n")
macros = self.idx.macros.keys()
macros.sort()
for macro in macros:
self.serialize_macro(output, macro)
enums = self.idx.enums.keys()
enums.sort()
for enum in enums:
self.serialize_enum(output, enum)
typedefs = self.idx.typedefs.keys()
typedefs.sort()
for typedef in typedefs:
self.serialize_typedef(output, typedef)
variables = self.idx.variables.keys()
variables.sort()
for variable in variables:
self.serialize_variable(output, variable)
functions = self.idx.functions.keys()
functions.sort()
for function in functions:
self.serialize_function(output, function)
output.write(" </symbols>\n")
output.write("</api>\n")
output.close()
filename = "%s-refs.xml" % self.name
print "Saving XML Cross References %s" % (filename)
output = open(filename, "w")
output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
output.write("<apirefs name='%s'>\n" % self.name)
self.serialize_xrefs(output)
output.write("</apirefs>\n")
output.close()
def rebuild():
builder = None
if glob.glob("parser.c") != [] :
print "Rebuilding API description for libxml2"
builder = docBuilder("libxml2", [".", "."],
["xmlwin32version.h", "tst.c"])
elif glob.glob("../parser.c") != [] :
print "Rebuilding API description for libxml2"
builder = docBuilder("libxml2", ["..", "../include/libxml"],
["xmlwin32version.h", "tst.c"])
elif glob.glob("../libxslt/transform.c") != [] :
print "Rebuilding API description for libxslt"
builder = docBuilder("libxslt", ["../libxslt"],
["win32config.h", "libxslt.h", "tst.c"])
else:
print "rebuild() failed, unable to guess the module"
return None
builder.scan()
builder.analyze()
builder.serialize()
if glob.glob("../libexslt/exslt.c") != [] :
extra = docBuilder("libexslt", ["../libexslt"], ["libexslt.h"])
extra.scan()
extra.analyze()
extra.serialize()
return builder
#
# for debugging the parser
#
def parse(filename):
parser = CParser(filename)
idx = parser.parse()
return idx
if __name__ == "__main__":
if len(sys.argv) > 1:
debug = 1
parse(sys.argv[1])
else:
rebuild()
|
{
"content_hash": "fe58791f0220df07c613efe89b75098b",
"timestamp": "",
"source": "github",
"line_count": 2125,
"max_line_length": 117,
"avg_line_length": 30.21976470588235,
"alnum_prop": 0.543532086519146,
"repo_name": "relokin/parsec",
"id": "1e73725064fce7e1c17e67b62980c4c1b670c46c",
"size": "64415",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "pkgs/libs/libxml2/src/doc/apibuild.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "2965111"
},
{
"name": "Awk",
"bytes": "142"
},
{
"name": "C",
"bytes": "78031615"
},
{
"name": "C#",
"bytes": "54113"
},
{
"name": "C++",
"bytes": "14801114"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CSS",
"bytes": "20961"
},
{
"name": "Emacs Lisp",
"bytes": "9437"
},
{
"name": "FORTRAN",
"bytes": "6058"
},
{
"name": "Java",
"bytes": "291"
},
{
"name": "JavaScript",
"bytes": "37584"
},
{
"name": "Logos",
"bytes": "108920"
},
{
"name": "Lua",
"bytes": "9"
},
{
"name": "Objective-C",
"bytes": "362901"
},
{
"name": "PHP",
"bytes": "20640"
},
{
"name": "Pascal",
"bytes": "40318"
},
{
"name": "Perl",
"bytes": "2133525"
},
{
"name": "Pike",
"bytes": "1350"
},
{
"name": "Prolog",
"bytes": "3350"
},
{
"name": "Python",
"bytes": "871836"
},
{
"name": "Rebol",
"bytes": "106436"
},
{
"name": "Ruby",
"bytes": "1237"
},
{
"name": "Scheme",
"bytes": "4249"
},
{
"name": "Shell",
"bytes": "3229646"
},
{
"name": "Tcl",
"bytes": "2809"
},
{
"name": "VimL",
"bytes": "7550"
},
{
"name": "XSLT",
"bytes": "167372"
},
{
"name": "eC",
"bytes": "4568"
}
],
"symlink_target": ""
}
|
def write_file(filename, value):
with open(filename, "w") as f:
f.write(value)
def write_ignore_busy(filename, value):
try:
write_file(filename, value)
except IOError as e:
if e.errno == 16:
pass
else:
raise e
def read_file(filename):
with open(filename) as f:
return f.read()
|
{
"content_hash": "36e60a7e73562c041e79e4c1de8a4bca",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 39,
"avg_line_length": 20.055555555555557,
"alnum_prop": 0.556786703601108,
"repo_name": "K900/intel_iot",
"id": "6ed1e1ef01998bce13ded1418f032dea5eced589",
"size": "361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intel_iot/util/file.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22665"
}
],
"symlink_target": ""
}
|
"""
Support for Zyxel Keenetic NDMS2 based routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.keenetic_ndms2/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_PASSWORD, CONF_USERNAME
)
REQUIREMENTS = ['ndms2_client==0.0.4']
_LOGGER = logging.getLogger(__name__)
# Interface name to track devices for. Most likely one will not need to
# change it from default 'Home'. This is needed not to track Guest WI-FI-
# clients and router itself
CONF_INTERFACE = 'interface'
DEFAULT_INTERFACE = 'Home'
DEFAULT_PORT = 23
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_INTERFACE, default=DEFAULT_INTERFACE): cv.string,
})
def get_scanner(_hass, config):
"""Validate the configuration and return a Nmap scanner."""
scanner = KeeneticNDMS2DeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class KeeneticNDMS2DeviceScanner(DeviceScanner):
"""This class scans for devices using keenetic NDMS2 web interface."""
def __init__(self, config):
"""Initialize the scanner."""
from ndms2_client import Client, TelnetConnection
self.last_results = []
self._interface = config[CONF_INTERFACE]
self._client = Client(TelnetConnection(
config.get(CONF_HOST),
config.get(CONF_PORT),
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
))
self.success_init = self._update_info()
_LOGGER.info("Scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
name = next((
result.name for result in self.last_results
if result.mac == device), None)
return name
def get_extra_attributes(self, device):
"""Return the IP of the given device."""
attributes = next((
{'ip': result.ip} for result in self.last_results
if result.mac == device), {})
return attributes
def _update_info(self):
"""Get ARP from keenetic router."""
_LOGGER.debug("Fetching devices from router...")
from ndms2_client import ConnectionException
try:
self.last_results = [
dev
for dev in self._client.get_devices()
if dev.interface == self._interface
]
_LOGGER.debug("Successfully fetched data from router")
return True
except ConnectionException:
_LOGGER.error("Error fetching data from router")
return False
|
{
"content_hash": "dbd27c0108a46f2b7a16083bee6497fb",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 75,
"avg_line_length": 31.611650485436893,
"alnum_prop": 0.6514127764127764,
"repo_name": "persandstrom/home-assistant",
"id": "4be6d96eb5ac4c03bd172843b02a0b76725782cb",
"size": "3256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/device_tracker/keenetic_ndms2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
}
|
from pyinfra import host, local
if "misc_servers" in host.groups:
local.include("apt.py")
local.include("npm.py")
local.include("files.py")
local.include("server.py")
local.include("virtualbox/virtualbox.py")
if "docker_servers" in host.groups:
local.include("docker_ce.py")
|
{
"content_hash": "75c0537de7ea68ab878a7f22a76564b0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 45,
"avg_line_length": 27.363636363636363,
"alnum_prop": 0.6877076411960132,
"repo_name": "Fizzadar/pyinfra",
"id": "a2aa48b259e58229ba0d1501442bb778e1b82b62",
"size": "301",
"binary": false,
"copies": "1",
"ref": "refs/heads/2.x",
"path": "examples/do_deploy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "57"
},
{
"name": "Python",
"bytes": "861601"
},
{
"name": "Shell",
"bytes": "3448"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import sys
fwd_operations = [
'ALTER TABLE `auth_user` MODIFY `password` varchar(128) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `username` varchar(30) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `first_name` varchar(30) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `last_name` varchar(30) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `email` varchar(254) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `django_admin_log` MODIFY `object_id` longtext CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `django_admin_log` MODIFY `object_repr` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `django_admin_log` MODIFY `change_message` longtext CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_hunt` MODIFY `hunt_name` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_hunt` MODIFY `location` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_hunt` MODIFY `template` longtext CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_huntassetfile` MODIFY `file` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_message` MODIFY `text` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_person` MODIFY `phone` varchar(20) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_person` MODIFY `comments` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_person` MODIFY `allergies` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_name` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_id` varchar(8) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `answer` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `link` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_response` MODIFY `regex` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_response` MODIFY `text` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_submission` MODIFY `submission_text` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_submission` MODIFY `response_text` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_team` MODIFY `team_name` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_team` MODIFY `location` varchar(80) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_team` MODIFY `join_code` varchar(5) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content_type` varchar(3) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content` varchar(500) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
]
reverse_operations = [
'ALTER TABLE `auth_user` MODIFY `password` varchar(128) ',
'ALTER TABLE `auth_user` MODIFY `username` varchar(30) ',
'ALTER TABLE `auth_user` MODIFY `first_name` varchar(30) ',
'ALTER TABLE `auth_user` MODIFY `last_name` varchar(30) ',
'ALTER TABLE `auth_user` MODIFY `email` varchar(254) ',
'ALTER TABLE `django_admin_log` MODIFY `object_id` longtext ',
'ALTER TABLE `django_admin_log` MODIFY `object_repr` varchar(200) ',
'ALTER TABLE `django_admin_log` MODIFY `change_message` longtext ',
'ALTER TABLE `huntserver_hunt` MODIFY `hunt_name` varchar(200) ',
'ALTER TABLE `huntserver_hunt` MODIFY `location` varchar(100) ',
'ALTER TABLE `huntserver_hunt` MODIFY `template` longtext ',
'ALTER TABLE `huntserver_huntassetfile` MODIFY `file` varchar(100) ',
'ALTER TABLE `huntserver_message` MODIFY `text` varchar(400) ',
'ALTER TABLE `huntserver_person` MODIFY `phone` varchar(20) ',
'ALTER TABLE `huntserver_person` MODIFY `comments` varchar(400) ',
'ALTER TABLE `huntserver_person` MODIFY `allergies` varchar(400) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_name` varchar(200) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_id` varchar(8) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `answer` varchar(100) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `link` varchar(200) ',
'ALTER TABLE `huntserver_response` MODIFY `regex` varchar(400) ',
'ALTER TABLE `huntserver_response` MODIFY `text` varchar(400) ',
'ALTER TABLE `huntserver_submission` MODIFY `submission_text` varchar(100) ',
'ALTER TABLE `huntserver_submission` MODIFY `response_text` varchar(400) ',
'ALTER TABLE `huntserver_team` MODIFY `team_name` varchar(200) ',
'ALTER TABLE `huntserver_team` MODIFY `location` varchar(80) ',
'ALTER TABLE `huntserver_team` MODIFY `join_code` varchar(5) ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content_type` varchar(3) ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content` varchar(500) ',
]
def forwards(apps, schema_editor):
if not schema_editor.connection.vendor.startswith('mysql'):
return
for command in fwd_operations:
schema_editor.execute(command)
def backwards(apps, schema_editor):
if not schema_editor.connection.vendor.startswith('mysql'):
return
for command in fwd_operations:
schema_editor.execute(command)
class Migration(migrations.Migration):
dependencies = [
('huntserver', '0021_auto_20180402_2224'),
]
operations = [
migrations.RunPython(forwards, backwards, atomic=False)
]
|
{
"content_hash": "3fe0a1a2b1786456dd6939548bce80e3",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 124,
"avg_line_length": 63.33684210526316,
"alnum_prop": 0.7266079441582184,
"repo_name": "dlareau/puzzlehunt_server",
"id": "aa1812f0fc8a5510d0598d437bea1ade6056f017",
"size": "6041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huntserver/migrations/0022_switch_to_utf8mb4_columns.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6291"
},
{
"name": "Dockerfile",
"bytes": "391"
},
{
"name": "HTML",
"bytes": "89184"
},
{
"name": "JavaScript",
"bytes": "33484"
},
{
"name": "Python",
"bytes": "292406"
},
{
"name": "Shell",
"bytes": "920"
}
],
"symlink_target": ""
}
|
"""
Django settings for reactlibapp project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
import dotenv
from datetime import timedelta
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
dotenv_path = os.path.normpath(os.getcwd() + os.sep + os.pardir + '/.env')
dotenv.load(dotenv_path)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'rest_framework.authtoken',
'rest_framework',
'rest_framework_swagger',
'libraryapp.apps.LibraryappConfig',
'libraryapi.apps.LibraryapiConfig',
'webpack_loader',
)
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'libraryapi.jwt_authentication.JWTAuthenticationMiddleware',
]
ROOT_URLCONF = 'reactlibapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'libraryapp.context_processors.set_environment',
],
},
},
]
WSGI_APPLICATION = 'reactlibapp.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Africa/Lagos'
USE_I18N = True
USE_L10N = True
USE_TZ = True
REST_FRAMEWORK = {
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'PAGINATE_BY': 10, # Default to 10
'PAGINATE_BY_PARAM': 'page_size', # Allow client to override, using `?page_size=xxx`.
'MAX_PAGINATE_BY': 100 # Maximum limit allowed when using `?page_size=xxx`.
}
JWT_AUTH = {
'JWT_RESPONSE_PAYLOAD_HANDLER':
'libraryapi.authentication.jwt_response_payload_handler',
'JWT_EXPIRATION_DELTA': timedelta(days=1),
'JWT_ALLOW_REFRESH': True,
'JWT_REFRESH_EXPIRATION_DELTA': timedelta(days=3),
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': '/',
'STATS_FILE': os.path.join(BASE_DIR, 'client/webpack-stats.json'),
# 'IGNORE': ['.+\.hot-update.js', '.+\.map']
}
}
|
{
"content_hash": "83ee08e18d271712dc3394fd3e83abb6",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 91,
"avg_line_length": 28.830065359477125,
"alnum_prop": 0.6871457719338019,
"repo_name": "andela-sjames/Django-ReactJS-Library-App",
"id": "0694b63e9212d27b2184a0536a3fde02394c82f5",
"size": "4411",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "reactlibapp/reactlibapp/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5647"
},
{
"name": "HTML",
"bytes": "1815"
},
{
"name": "Python",
"bytes": "48881"
},
{
"name": "Shell",
"bytes": "11132"
},
{
"name": "TypeScript",
"bytes": "34383"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.