text stringlengths 4 1.02M | meta dict |
|---|---|
import os
import shutil
import unittest
from unittest import TestCase
from wopmars.tests.resource.wrapper.FooWrapper10 import FooWrapper10
from wopmars.tests.resource.wrapper.FooWrapper4 import FooWrapper4
from wopmars.tests.resource.wrapper.FooWrapper5 import FooWrapper5
from wopmars.tests.resource.wrapper.FooWrapper6 import FooWrapper6
from wopmars.tests.resource.wrapper.FooWrapper7 import FooWrapper7
from wopmars.tests.resource.wrapper.FooWrapper8 import FooWrapper8
from wopmars.tests.resource.wrapper.FooWrapper9 import FooWrapper9
from wopmars.SQLManager import SQLManager
from wopmars.models.TableInputOutputInformation import TableInputOutputInformation
from wopmars.models.FileInputOutputInformation import FileInputOutputInformation
from wopmars.models.TypeInputOrOutput import TypeInputOrOutput
from wopmars.DAG import DAG
from wopmars.Parser import Parser
from wopmars.utils.OptionManager import OptionManager
from wopmars.utils.PathManager import PathManager
from wopmars.utils.WopMarsException import WopMarsException
class TestParser(TestCase):
def setUp(self):
OptionManager.initial_test_setup() # Set tests arguments
SQLManager.instance().create_all() # Create database with tables
session = SQLManager.instance().get_session()
session.get_or_create(TypeInputOrOutput, defaults={"is_input": True}, is_input=True)
session.get_or_create(TypeInputOrOutput, defaults={"is_input": False}, is_input=False)
session.commit()
self.__test_path = PathManager.get_test_path()
# self.__test_path = PathManager.get_package_path()
self.__parser = Parser()
def tearDown(self):
SQLManager.instance().get_session().close()
SQLManager.instance().drop_all()
# PathManager.dir_content_remove("outdir")
shutil.rmtree("outdir", ignore_errors=True)
OptionManager._drop()
SQLManager._drop()
def test_parse(self):
OptionManager.initial_test_setup()
# The good --------------------------:
input_entry = TypeInputOrOutput(is_input=True)
output_entry = TypeInputOrOutput(is_input=False)
f1 = FileInputOutputInformation(file_key="input1", path="resource/input_files/input_file1.txt")
f1.relation_file_or_tableioinfo_to_typeio = input_entry
f2 = FileInputOutputInformation(file_key="output1", path="outdir/output_file1.txt")
f2.relation_file_or_tableioinfo_to_typeio = output_entry
f3 = FileInputOutputInformation(file_key="input1", path="outdir/output_file1.txt")
f3.relation_file_or_tableioinfo_to_typeio = input_entry
f3bis = FileInputOutputInformation(file_key="input1", path="outdir/output_file1.txt")
f3bis.relation_file_or_tableioinfo_to_typeio = input_entry
f4 = FileInputOutputInformation(file_key="output1", path="outdir/output_file2.txt")
f4.relation_file_or_tableioinfo_to_typeio = output_entry
f5 = FileInputOutputInformation(file_key="output1", path="outdir/output_file3.txt")
f5.relation_file_or_tableioinfo_to_typeio = output_entry
f6 = FileInputOutputInformation(file_key="output2", path="outdir/output_file4.txt")
f6.relation_file_or_tableioinfo_to_typeio = output_entry
f7 = FileInputOutputInformation(file_key="input1", path="outdir/output_file3.txt")
f7.relation_file_or_tableioinfo_to_typeio = input_entry
f8 = FileInputOutputInformation(file_key="input2", path="outdir/output_file2.txt")
f8.relation_file_or_tableioinfo_to_typeio = input_entry
f9 = FileInputOutputInformation(file_key="output1", path="outdir/output_file5.txt")
f9.relation_file_or_tableioinfo_to_typeio = output_entry
f10 = FileInputOutputInformation(file_key="input1", path="outdir/output_file4.txt")
f10.relation_file_or_tableioinfo_to_typeio = input_entry
f11 = FileInputOutputInformation(file_key="output1", path="outdir/output_file6.txt")
f11.relation_file_or_tableioinfo_to_typeio = output_entry
f12 = FileInputOutputInformation(file_key="input1", path="outdir/output_file1.txt")
f12.relation_file_or_tableioinfo_to_typeio = input_entry
f13 = FileInputOutputInformation(file_key="input2", path="outdir/output_file5.txt")
f13.relation_file_or_tableioinfo_to_typeio = input_entry
f14 = FileInputOutputInformation(file_key="input3", path="outdir/output_file6.txt")
f14.relation_file_or_tableioinfo_to_typeio = input_entry
f15 = FileInputOutputInformation(file_key="output1", path="outdir/output_file7.txt")
f15.relation_file_or_tableioinfo_to_typeio = output_entry
t1 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
t1.relation_file_or_tableioinfo_to_typeio = output_entry
t1bis = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
t1bis.relation_file_or_tableioinfo_to_typeio = input_entry
t2 = TableInputOutputInformation(model_py_path="FooBase2", table_key="FooBase2", table_name="FooBase2")
t2.relation_file_or_tableioinfo_to_typeio = output_entry
t2bis = TableInputOutputInformation(model_py_path="FooBase2", table_key="FooBase2", table_name="FooBase2")
t2bis.relation_file_or_tableioinfo_to_typeio = input_entry
tw1 = FooWrapper4(rule_name="rule1")
tw1.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
tw2 = FooWrapper5(rule_name="rule2")
tw2.relation_toolwrapper_to_fileioinfo.extend([f3, f4])
tw2.relation_toolwrapper_to_tableioinfo.extend([t1])
tw3 = FooWrapper6(rule_name="rule3")
tw3.relation_toolwrapper_to_fileioinfo.extend([f3bis, f5, f6])
tw4 = FooWrapper7(rule_name="rule4")
tw4.relation_toolwrapper_to_tableioinfo.extend([t1bis, t2])
tw5 = FooWrapper8(rule_name="rule5")
tw5.relation_toolwrapper_to_fileioinfo.extend([f8, f7, f9])
tw6 = FooWrapper9(rule_name="rule6")
tw6.relation_toolwrapper_to_fileioinfo.extend([f10, f11])
tw6.relation_toolwrapper_to_tableioinfo.extend([t2bis])
tw7 = FooWrapper10(rule_name="rule7")
tw7.relation_toolwrapper_to_fileioinfo.extend([f12, f13, f14, f15])
set_toolwrappers = set([tw1, tw2, tw3, tw4, tw5, tw6, tw7])
OptionManager.instance()["--dot"] = None
dag_expected = DAG(set_toolwrappers)
OptionManager.instance()["--wopfile"] = os.path.join(self.__test_path, "resource/wopfile/example_def_file1.yml")
dag_obtained = self.__parser.parse()
self.assertEqual(dag_expected, dag_obtained)
OptionManager.instance()["--wopfile"] = os.path.join(self.__test_path, "resource/wopfile/example_def_file_not_a_dag.yml")
with self.assertRaises(WopMarsException):
self.__parser.parse()
# Verify the dot file ----------------:
OptionManager.instance()["--wopfile"] = os.path.join(self.__test_path, "resource/wopfile/example_def_file1.yml")
#dot_path = os.path.join(self.__package_path, "test_bak.dot")
#OptionManager.instance()["--dot"] = dot_path
self.__parser.parse()
#self.assertTrue(os.path.isfile(dot_path))
#os.remove(dot_path)
#os.remove(dot_path[:-4] + ".ps")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "835d192303e6a465ccea1d24b4c9f521",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 129,
"avg_line_length": 48.82236842105263,
"alnum_prop": 0.7005794367335939,
"repo_name": "aitgon/wopmars",
"id": "3691e8781535f90240098eedbfd984e9893aa1af",
"size": "7421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wopmars/tests/test_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "338509"
},
{
"name": "Shell",
"bytes": "1526"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="token", parent_name="parcats.stream", **kwargs):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "info"),
strict=kwargs.pop("strict", True),
**kwargs
)
| {
"content_hash": "eefbc20474b830141bc3d435fb137a0e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 84,
"avg_line_length": 38.57142857142857,
"alnum_prop": 0.5925925925925926,
"repo_name": "plotly/python-api",
"id": "eb75236212300dfb222c756bec4edcf4480678db",
"size": "540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/parcats/stream/_token.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import pytest
from six import iteritems
from datadog_checks.mesos_master import MesosMaster
from .common import BASIC_METRICS, CHECK_NAME, INSTANCE, not_windows_ci
pytestmark = not_windows_ci
@pytest.mark.integration
@pytest.mark.usefixtures("dd_environment")
def test_check_integration(instance, aggregator):
check = MesosMaster('mesos_master', {}, [instance])
check.check(instance)
assert_metric_coverage(aggregator)
@pytest.mark.e2e
def test_check_e2e(dd_agent_check):
aggregator = dd_agent_check(INSTANCE, rate=True)
assert_metric_coverage(aggregator)
def assert_metric_coverage(aggregator):
check = MesosMaster(CHECK_NAME, {}, {})
metrics = BASIC_METRICS
for d in (
check.ROLE_RESOURCES_METRICS,
check.CLUSTER_TASKS_METRICS,
check.CLUSTER_SLAVES_METRICS,
check.CLUSTER_RESOURCES_METRICS,
check.CLUSTER_FRAMEWORK_METRICS,
check.STATS_METRICS,
):
for _, m in iteritems(d):
metrics.append(m[0])
for m in metrics:
aggregator.assert_metric(m)
aggregator.assert_all_metrics_covered()
aggregator.assert_service_check('mesos_master.can_connect', status=check.OK)
| {
"content_hash": "513abf4f28db28d25fb5c84c4d297c54",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 80,
"avg_line_length": 26.6,
"alnum_prop": 0.6992481203007519,
"repo_name": "DataDog/integrations-core",
"id": "5c308680cc8d2d4365a6a63e424636cffab09f7b",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesos_master/tests/test_integration_e2e.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
"""This module contains Dyn Message Management accounts features. It's important
to note that any/all timestamps are expected as `datetime.datetime` instances
and will be returned as such.
"""
from datetime import datetime
from ..core import cleared_class_dict
from .utils import str_to_date, date_to_str, APIDict
from .errors import NoSuchAccountError
from .session import MMSession
__author__ = 'jnappi'
def get_all_accounts():
"""Return a list of all :class:`~dyn.mm.accounts.Account`'s accessible to
the currently authenticated user
"""
uri = '/accounts'
response = MMSession.get_session().execute(uri, 'GET')
accounts = []
for account in response['accounts']:
username = account.pop('username')
cap = response['emailcap']
accounts.append(Account(username, api=False, emailcap=cap, **account))
return accounts
def get_all_senders(start_index=0):
"""Return a list of all :class:`~dyn.mm.accounts.ApprovedSenders`'s
accessible to the currently authenticated user
"""
uri = '/senders'
args = {'start_index': start_index}
response = MMSession.get_session().execute(uri, 'GET', args)
senders = []
for sender in response['senders']:
email = sender.pop('emailaddress')
senders.append(ApprovedSender(email, api=False, **sender))
return senders
def get_all_suppressions(startdate=None, enddate=None, startindex=0):
"""Return a list of all :class:`~dyn.mm.accounts.Suppression`'s"""
uri = '/suppressions'
args = {'start_index': startindex}
if startdate:
args['startdate'] = date_to_str(startdate)
enddate = enddate or datetime.now()
args['enddate'] = date_to_str(enddate)
response = MMSession.get_session().execute(uri, 'GET', args)
suppressions = []
for suppression in response['suppressions']:
email = suppression.pop('emailaddress')
suppress_time = suppression.pop('suppresstime')
reason_type = suppression.pop('reasontype')
suppressions.append(Suppression(email, api=False,
reasontype=reason_type,
suppresstime=suppress_time))
return suppressions
class Account(object):
"""A Message Management account instance. password, companyname, and phone
are required for creating a new account. To access an existing Account,
simply provide the username of the account you wish to access.
"""
uri = '/accounts'
def __init__(self, username, *args, **kwargs):
"""Create a new :class:`~dyn.mm.accounts.Account` object
:param username: The username for this
:class:`~dyn.mm.accounts.Account` - must be a valid email address,
and must be unique among all other sub-accounts.
:param password: :class:`~dyn.mm.accounts.Account` password to be
assigned. May be passed as clear text or MD5-encrypted with "md5-"
as a prefix
:param companyname: Name of the company assigned to this
:class:`~dyn.mm.accounts.Account`
:param phone: Contact Phone number for this
:class:`~dyn.mm.accounts.Account`
:param address: The primary address associated with this
:class:`~dyn.mm.accounts.Account`
:param city: The City associated with this
:class:`~dyn.mm.accounts.Account`
:param state: The State associated with this
:class:`~dyn.mm.accounts.Account`
:param zipcode: The Zipcode associated with this
:class:`~dyn.mm.accounts.Account`
:param country: Two-letter English ISO 3166 country code
:param timezone: The timezone of the account, in [+/-]h.mm format
:param bounceurl: Bounce postback URL
:param spamurl: Spam postback URL
:param unsubscribeurl: Unsubscribe postback URL
:param trackopens: Toggle open tracking (1 or 0).
:param tracklinks: Toggle click tracking (1 or 0).
:param trackunsubscribes: Toggle automatic list-unsubscribe support
(1 or 0).
:param generatenewapikey: Used to create a new API key for an existing
account (1 or 0).
"""
super(Account, self).__init__()
self._username = username
self._accountname = self._address = self._apikey = self._city = None
self._companyname = self._contactname = self._country = None
self._created = self._emailsent = self._max_sample_count = None
self._phone = self._state = self._timezone = self._tracklinks = None
self._trackopens = self._trackunsubscribes = self._usertype = None
self._zipcode = self._password = self._emailcap = None
if 'api' in kwargs:
del kwargs['api']
self._update(kwargs)
elif len(args) + len(kwargs) == 0:
self._get()
else:
self._post(*args, **kwargs)
self._xheaders = None
def _post(self, password, companyname, phone, address=None, city=None,
state=None, zipcode=None, country=None, timezone=None,
bounceurl=None, spamurl=None, unsubscribeurl=None, trackopens=0,
tracklinks=0, trackunsubscribes=0, generatenewapikey=0):
"""Create a new :class:`~dyn.mm.accounts.Account` on the Dyn Email
System
"""
self._password = password
self._companyname = companyname
self._phone = phone
self._address = address
self._city = city
self._state = state
self._zipcode = zipcode
self._country = country
self._timezone = timezone
self._bounceurl = bounceurl
self._spamurl = spamurl
self._unsubscribeurl = unsubscribeurl
self._trackopens = trackopens
self._tracklinks = tracklinks
self._trackunsubscribes = trackunsubscribes
self._generatenewapikey = generatenewapikey
valid = ('username', 'password', 'companyname', 'phone', 'address',
'city', 'state', 'zipcode', 'country', 'timezone', 'bounceurl',
'spamurl', 'unsubscribeurl', 'trackopens', 'tracklinks',
'trackunsubscribes', 'generatenewapikey')
d = cleared_class_dict(self.__dict__)
api_args = {x[1:]: d[x] for x in d if d[x] is not None and
x[1:] in valid}
response = MMSession.get_session().execute(self.uri, 'POST', api_args)
for key, val in response.items():
setattr(self, '_' + key, val)
def _get(self):
"""Retrieve an existing :class:`~dyn.mm.accounts.Account` from the Dyn
Email System
"""
accounts = get_all_accounts()
found = False
for account in accounts:
if account.username == self._username:
self._update(cleared_class_dict(account.__dict__))
found = True
if not found:
raise NoSuchAccountError('No such Account')
def _update(self, data):
"""Update the fields in this object with the provided data dict"""
resp = MMSession.get_session().execute(self.uri, 'POST', data)
for key, val in resp.items():
setattr(self, '_' + key, val)
@property
def xheaders(self):
"""A list of the configured custom x-header field names associated
with this :class:`~dyn.mm.accounts.Account`.
"""
if self._xheaders is None:
self._get_xheaders()
return self._xheaders
@xheaders.setter
def xheaders(self, value):
if isinstance(value, dict) and not isinstance(value, APIDict):
new_xheaders = APIDict(MMSession.get_session)
for key, val in value.items():
new_xheaders[key] = val
new_xheaders.uri = '/accounts/xheaders'
self._xheaders = new_xheaders
elif isinstance(value, APIDict):
self._xheaders = value
@property
def username(self):
"""A list of the configured custom x-header field names associated
with this :class:`~dyn.mm.accounts.Account`.
"""
return self._username
@username.setter
def username(self, value):
pass
@property
def account_name(self):
return self._accountname
@account_name.setter
def account_name(self, value):
pass
@property
def address(self):
"""The primary address associated with this
:class:`~dyn.mm.accounts.Account`
"""
return self._address
@address.setter
def address(self, value):
pass
@property
def apikey(self):
"""The apikey for this account"""
return self._apikey
@apikey.setter
def apikey(self, value):
pass
@property
def city(self):
"""The City associated with this :class:`~dyn.mm.accounts.Account`"""
return self._city
@city.setter
def city(self, value):
pass
@property
def company_name(self):
"""The name of the company this :class:`~dyn.mm.accounts.Account` is
registered under
"""
return self._companyname
@company_name.setter
def company_name(self, value):
pass
@property
def contact_name(self):
"""The name of the contact associated with this
:class:`~dyn.mm.accounts.Account`
"""
return self._contactname
@contact_name.setter
def contact_name(self, value):
pass
@property
def country(self):
"""The Two letter country code associated with this
:class:`~dyn.mm.accounts.Account`
"""
return self._country
@country.setter
def country(self, value):
pass
@property
def created(self):
return self._created
@created.setter
def created(self, value):
pass
@property
def email_sent(self):
return self._emailsent
@email_sent.setter
def email_sent(self, value):
pass
@property
def max_sample_count(self):
return self._max_sample_count
@max_sample_count.setter
def max_sample_count(self, value):
pass
@property
def phone(self):
"""The primary telephone number of the contact associated with this
:class:`~dyn.mm.accounts.Account`"""
return self._phone
@phone.setter
def phone(self, value):
pass
@property
def state(self):
"""The state associated with this :class:`~dyn.mm.accounts.Account`"""
return self._state
@state.setter
def state(self, value):
pass
@property
def timezone(self):
"""The current timezone of the primary user of this
:class:`~dyn.mm.accounts.Account`
"""
return self._timezone
@timezone.setter
def timezone(self, value):
pass
@property
def track_links(self):
"""A settings flag determining whether or not emails sent from this
:class:`~dyn.mm.accounts.Account` will be monitored for followed links
"""
return self._tracklinks == 1
@track_links.setter
def track_links(self, value):
pass
@property
def track_opens(self):
"""A settings flag determining whether or not emails sent from this
:class:`~dyn.mm.accounts.Account` will be monitored for opens
"""
return self._trackopens == 1
@track_opens.setter
def track_opens(self, value):
pass
@property
def track_unsubscribes(self):
"""A settings flag determining whether or not emails sent from this
:class:`~dyn.mm.accounts.Account` will be monitored for unsubscribes
"""
return self._trackunsubscribes == 1
@track_unsubscribes.setter
def track_unsubscribes(self, value):
pass
@property
def user_type(self):
return self._usertype
@user_type.setter
def user_type(self, value):
pass
@property
def zipcode(self):
"""The zipcode of this :class:`~dyn.mm.accounts.Account`
"""
return self._zipcode
@zipcode.setter
def zipcode(self, value):
pass
@property
def password(self):
"""The password for this :class:`~dyn.mm.accounts.Account`. Note:
Unless you've just created this :class:`~dyn.mm.accounts.Account`,
this field will be *None*.
"""
return self._password
@password.setter
def password(self, value):
pass
@property
def emailcap(self):
return self._emailcap
@emailcap.setter
def emailcap(self, value):
pass
def _get_xheaders(self):
"""Build the list of the configured custom x-header field names
associated with this :class:`~dyn.mm.accounts.Account`.
"""
uri = '/accounts/xheaders'
api_args = {}
response = MMSession.get_session().execute(uri, 'GET', api_args)
xheaders = {}
for key, val in response.items():
xheaders[key] = val
self._xheaders = APIDict(MMSession.get_session, '/accounts/xheaders',
xheaders)
def delete(self):
"""Delete this :class:`~dyn.mm.accounts.Account` from the Dyn Email
System
"""
uri = '/accounts/delete'
api_args = {'username': self._username}
MMSession.get_session().execute(uri, 'POST', api_args)
def __str__(self):
"""str override"""
return '<MM Account>: {}'.format(self._username)
__repr__ = __unicode__ = __str__
class ApprovedSender(object):
"""An email address that is able to be used in the "from" field of messages
"""
uri = '/senders'
def __init__(self, emailaddress, *args, **kwargs):
"""Create an :class:`~dyn.mm.accounts.ApprovedSender` object
:param emailaddress: The email address of this
:class:`~dyn.mm.accounts.ApprovedSender`
:param seeding: 1 to opt this approved sender in for seeding; 0
(default)to opt them out. Seeding is used to provide insight into
inbox placement. See the `Approved Senders
<https://help.dynect.net/email/control-panel/senders/>`_. page for
more information.
"""
self._emailaddress = emailaddress
self._seeding = self._status = self._dkim = self._spf = None
self._dkimval = None
if 'api' in kwargs:
del kwargs['api']
for key, val in kwargs.items():
setattr(self, '_' + key, val)
elif len(args) + len(kwargs) > 0:
self._post(*args, **kwargs)
else:
self._get()
def _post(self, seeding=0):
"""Create or update a :class:`~dyn.mm.accounts.ApprovedSender` on the
Dyn Message Management System.
:param seeding:
"""
self._seeding = seeding
api_args = {'emailaddress': self._emailaddress,
'seeding': self._seeding}
response = MMSession.get_session().execute(self.uri, 'POST', api_args)
for key, val in response.items():
setattr(self, '_' + key, val)
def _get(self):
"""Get an existing :class:`~dyn.mm.accounts.ApprovedSender` from the
Dyn Message Management System.
"""
uri = '/senders/details'
api_args = {'emailaddress': self._emailaddress}
response = MMSession.get_session().execute(uri, 'GET', api_args)
for key, val in response.items():
setattr(self, '_' + key, val)
def _update(self, api_args):
"""Update this :class:`~dyn.mm.accounts.ApprovedSender` object."""
if 'emailaddress' not in api_args:
api_args['emailaddress'] = self._emailaddress
response = MMSession.get_session().execute(self.uri, 'POST', api_args)
for key, val in response.items():
setattr(self, '_' + key, val)
@property
def seeding(self):
"""1 to opt this approved sender in for seeding; 0 to opt them out
(default). Seeding is used to provide insight into inbox placement.
See the `Approved Senders
<https://help.dynect.net/email/control-panel/senders/>`_. page for more
information.
"""
if self._seeding is None:
self._seeding = self.status
return self._seeding
@seeding.setter
def seeding(self, value):
if value in range(0, 2):
self._update({'seeding': value})
@property
def status(self):
"""Retrieves the status of an approved sender -- whether or not it is
ready for use in sending. This is most useful when you create a new
approved sender and need to know for sure whether it is ready for use.
"""
uri = '/senders/status'
args = {'emailaddress': self._emailaddress}
response = MMSession.get_session().execute(uri, 'GET', args)
for key in response:
self._status = response[key]
return self._status
@status.setter
def status(self, value):
pass
@property
def dkim(self):
"""DKIM identifier for this approved sender - identifier may contain
only aplanumeric characters, dashes, or underscores.
"""
return self._dkim
@dkim.setter
def dkim(self, value):
uri = '/senders/dkim'
api_args = {'emailaddress': self._emailaddress, 'dkim': value}
response = MMSession.get_session().execute(uri, 'POST', api_args)
for key, val in response.items():
setattr(self, '_' + key, val)
@property
def spf(self):
"""SPF for this :class:`~dyn.mm.accounts.ApprovedSender`"""
return self._spf
@spf.setter
def spf(self, value):
pass
@property
def dkimval(self):
"""DKIM val for this :class:`~dyn.mm.accounts.ApprovedSender`"""
return self._dkimval
@dkimval.setter
def dkimval(self, value):
pass
def delete(self):
"""Delete this :class:`~dyn.mm.accounts.ApprovedSender`"""
uri = '/senders/delete'
api_args = {'emailaddress': self._emailaddress}
MMSession.get_session().execute(uri, 'POST', api_args)
def __str__(self):
"""str override"""
return '<MM ApprovedSender>: {}'.format(self._emailaddress)
__repr__ = __unicode__ = __str__
class Recipient(object):
"""A :class:`~dyn.mm.accounts.Recipient` is an email address that is
capable of recieving email.
"""
def __init__(self, emailaddress, method='GET'):
"""Create a :class:`~dyn.mm.accounts.Recipient` object
:param emailaddress: This :class:`~dyn.mm.accounts.Recipient`'s email
address.
:param method: A Flag specifying whether you're looking for an existing
:class:`~dyn.mm.accounts.Recipient` or if you want to create a new
one. Because both GET and POST calls accept the same requirements
there's no way to automatically deduce what the user is trying to
do so you must specify either GET or POST in the constructor
"""
self.emailaddress = emailaddress
self.status = self.unsuppressed = self.pending_addition = None
self.suppressed = self.pending_removal = None
if method == 'GET':
self._get()
else:
self._post()
def _get(self):
"""Private getter method"""
uri = '/recipients/status'
api_args = {'emailaddress': self.emailaddress}
response = MMSession.get_session().execute(uri, 'GET', api_args)
for key, val in response.items():
setattr(self, key, val)
def _post(self):
"""Activate a new recipient"""
uri = '/recipients/activate'
api_args = {'emailaddress': self.emailaddress}
# Note: this api call returns nothing, so we won't parse it for data
MMSession.get_session().execute(uri, 'POST', api_args)
def activate(self):
"""Updates the status of this recipient to active which allows them to
receive email.
"""
uri = '/recipients/activate'
api_args = {'emailaddress': self.emailaddress}
MMSession.get_session().execute(uri, 'POST', api_args)
class Suppression(object):
"""A :class:`~dyn.mm.accounts.Supression` representing a suppressed email
"""
uri = '/suppressions'
def __init__(self, emailaddress, *args, **kwargs):
"""Create a :class:`~dyn.mm.accounts.Suppression` object.
:param emailaddress: This email address of for the
:class:`~dyn.mm.accounts.Suppression`'s to apply to.
"""
self.emailaddress = emailaddress
self._count = self._suppresstime = None
if 'api' in kwargs:
del kwargs['api']
for key, val in kwargs.items():
if key == 'suppresstime':
self._suppresstime = str_to_date(val)
else:
setattr(self, '_' + key, val)
elif len(args) + len(kwargs) == 0:
self._post()
def _post(self):
"""Activate a new recipient"""
api_args = {'emailaddress': self.emailaddress}
# Note: this api call returns nothing, so we won't parse it for data
MMSession.get_session().execute(self.uri, 'POST', api_args)
def get_count(self, startdate=None, enddate=None):
"""Get the count attribute of this suppression for the provided range
"""
if startdate:
startdate = date_to_str(startdate)
enddate = enddate or datetime.now()
enddate = date_to_str(enddate)
api_args = {'startdate': startdate, 'enddate': enddate}
else:
api_args = None
uri = self.uri + '/count'
response = MMSession.get_session().execute(uri, 'GET', api_args)
self._count = int(response['count'])
return self._count
@property
def count(self):
"""the total number of email addresses in the suppression list for the
specified account, filtered by date range.
"""
return self._count
@count.setter
def count(self, value):
pass
def activate(self):
"""Removes a :class:`~dyn.mm.accounts.Recipient` from the user's
suppression list. This will not unbounce/uncomplain the
:class:`~dyn.mm.accounts.Recipient`, but you will be permitted to send
to them again.
"""
uri = self.uri + '/activate'
api_args = {'emailaddress': self.emailaddress}
MMSession.get_session().execute(uri, 'POST', api_args)
| {
"content_hash": "5ebec146357c57f5b77edf3d4cd106d6",
"timestamp": "",
"source": "github",
"line_count": 648,
"max_line_length": 80,
"avg_line_length": 35.050925925925924,
"alnum_prop": 0.5978074230616828,
"repo_name": "mjhennig/dyn-python",
"id": "508d62959be3e3b4a670e1ec1fc41ac8a95346df",
"size": "22737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dyn/mm/accounts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "548842"
}
],
"symlink_target": ""
} |
"""
solving mnist classification problem using tensorflow
one fully connected layer
"""
# Load MNIST dataset
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# Import TensorFlow and start interactive Session
import tensorflow as tf
session = tf.InteractiveSession()
# Create tf placeholders for input data and predictions
# x will be a 2d tensor with all images of the current batch * flattened pixel
# of the input image.
# y_ will be the probabilities for every image in the batch and every digit
# class
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
# Define Model parameters as variables and initialize them with zeros
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
session.run(tf.global_variables_initializer())
# Regression model (without softmax)
y = tf.matmul(x,W) + b
# Loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
# train the model
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
for _ in range(1000):
batch = mnist.train.next_batch(100)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
# evaluate model
# get vector of booleans if prediction was correct
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# calculate accuracy by converting booleans to floats (0.0 || 1.0) and calculate the mean
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# print accuracy of test-data
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
| {
"content_hash": "9a02aad4845ded3abc8d3e71f6b7d2e5",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 92,
"avg_line_length": 35.234042553191486,
"alnum_prop": 0.7463768115942029,
"repo_name": "mnannt/mnist_experiments",
"id": "bd9a958000f74d902d9663c89facf5590300b74e",
"size": "1656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mnist/1-fcl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10827"
}
],
"symlink_target": ""
} |
__author__ = 'uolter'
import urllib
import sys
base_url = "http://ichart.finance.yahoo.com/table.csv?s="
def make_url(ticker_symbol):
return base_url + ticker_symbol
output_path = "/Users/uolter/src/pycode/finance"
def make_filename(ticker_symbol, directory):
return output_path + "/" + directory + "/" + ticker_symbol + ".csv"
def pull_historical_data(ticker_symbol, directory="data"):
try:
urllib.urlretrieve(make_url(ticker_symbol), make_filename(ticker_symbol, directory))
except urllib.ContentTooShortError as e:
outfile = open(make_filename(ticker_symbol, directory), "w")
outfile.write(e.content)
outfile.close()
if __name__ == '__main__':
ticker_symbol = sys.argv[1]
pull_historical_data(ticker_symbol) | {
"content_hash": "4ae9a7614dc8971789417ec1c34e6d77",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 92,
"avg_line_length": 23.636363636363637,
"alnum_prop": 0.6717948717948717,
"repo_name": "uolter/BitCoin",
"id": "0091c437b8564b7b6369ca146117904525cfe918",
"size": "780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "company_hist_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1237"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
} |
""" Test of AQGD optimizer """
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
from qiskit import Aer
from qiskit.circuit.library import RealAmplitudes
from qiskit.utils import QuantumInstance, algorithm_globals
from qiskit.opflow import PauliSumOp
from qiskit.algorithms.optimizers import AQGD
from qiskit.algorithms import VQE, AlgorithmError
from qiskit.opflow.gradients import Gradient
from qiskit.test import slow_test
@unittest.skipUnless(Aer, "Aer is required to run these tests")
class TestOptimizerAQGD(QiskitAlgorithmsTestCase):
"""Test AQGD optimizer using RY for analytic gradient with VQE"""
def setUp(self):
super().setUp()
self.seed = 50
algorithm_globals.random_seed = self.seed
self.qubit_op = PauliSumOp.from_list(
[
("II", -1.052373245772859),
("IZ", 0.39793742484318045),
("ZI", -0.39793742484318045),
("ZZ", -0.01128010425623538),
("XX", 0.18093119978423156),
]
)
@slow_test
def test_simple(self):
"""test AQGD optimizer with the parameters as single values."""
q_instance = QuantumInstance(
Aer.get_backend("aer_simulator_statevector"),
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
)
aqgd = AQGD(momentum=0.0)
vqe = VQE(
ansatz=RealAmplitudes(),
optimizer=aqgd,
gradient=Gradient("fin_diff"),
quantum_instance=q_instance,
)
result = vqe.compute_minimum_eigenvalue(operator=self.qubit_op)
self.assertAlmostEqual(result.eigenvalue.real, -1.857, places=3)
def test_list(self):
"""test AQGD optimizer with the parameters as lists."""
q_instance = QuantumInstance(
Aer.get_backend("aer_simulator_statevector"),
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
)
aqgd = AQGD(maxiter=[1000, 1000, 1000], eta=[1.0, 0.5, 0.3], momentum=[0.0, 0.5, 0.75])
vqe = VQE(ansatz=RealAmplitudes(), optimizer=aqgd, quantum_instance=q_instance)
result = vqe.compute_minimum_eigenvalue(operator=self.qubit_op)
self.assertAlmostEqual(result.eigenvalue.real, -1.857, places=3)
def test_raises_exception(self):
"""tests that AQGD raises an exception when incorrect values are passed."""
self.assertRaises(AlgorithmError, AQGD, maxiter=[1000], eta=[1.0, 0.5], momentum=[0.0, 0.5])
@slow_test
def test_int_values(self):
"""test AQGD with int values passed as eta and momentum."""
q_instance = QuantumInstance(
Aer.get_backend("aer_simulator_statevector"),
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
)
aqgd = AQGD(maxiter=1000, eta=1, momentum=0)
vqe = VQE(
ansatz=RealAmplitudes(),
optimizer=aqgd,
gradient=Gradient("lin_comb"),
quantum_instance=q_instance,
)
result = vqe.compute_minimum_eigenvalue(operator=self.qubit_op)
self.assertAlmostEqual(result.eigenvalue.real, -1.857, places=3)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "e22ab6608d36cdb6c8f9b4bff635e4c2",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 100,
"avg_line_length": 37.72222222222222,
"alnum_prop": 0.6353460972017673,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "37c12aa9550376ac27285bf6d733090605652103",
"size": "3878",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/python/algorithms/optimizers/test_optimizer_aqgd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
import json
import requests
import urllib
import time
import math
import itertools
from models import CROP_NAME
from collections import defaultdict
from models import CropPrice
from django.utils.functional import lazy, memoize
import parse_nass
def fcRequest(req):
def waitForData(result):
if result.startswith('"completed'):
return result
else:
try:
hash = result[result.index("hash=") + 5:]
except ValueError:
print "Oops: result: \n", result
return
while True:
result = requests.get("http://fetchclimate2-dev.cloudapp.net/api/status?hash=" + hash).text
if result.startswith('"completed'):
return result
time.sleep(4)
def getData(query):
blob = query[query.index("Blob=") + 5: -1]
result = requests.get("http://fetchclimate2-dev.cloudapp.net/jsproxy/data?uri=" + urllib.quote_plus(
"msds:ab?AccountName=fc2cache&Container=requests&Blob=" + blob) + "&variables=lat,lon,values").text
return result
def compute():
r = requests.post("http://fetchclimate2-dev.cloudapp.net/api/compute", json=req,
headers={'content-type': 'application/json'})
return r.text
query = compute()
query = waitForData(query)
return getData(query)
def toRadians(degrees):
return float(degrees) / 180 * math.pi
def toDegrees(radians):
return float(radians) / math.pi * 180
EARTH_RADIUS = 6371009.0
# Note, we don't handle discontinuities at the moment
def circle_lat_lon_radii(lat, radius):
# returns the latitude and longitude radii for a circle at the given latitude and radius in meters
lat_delta = toDegrees(float(radius) / EARTH_RADIUS)
lon_delta = toDegrees(float(radius) / EARTH_RADIUS / math.cos(toRadians(lat)))
return (lat_delta, lon_delta)
def distance(lat1, lon1, lat2, lon2):
# Only accurate for short distances (less than a hundred miles or so) not near the poles.
average_phi = (toRadians(lat1) + toRadians(lat2)) / 2.0
phi_delta = toRadians(lat2) - toRadians(lat1)
lambda_delta = toRadians(lon2) - toRadians(lon1)
return EARTH_RADIUS * math.sqrt(phi_delta ** 2 + (lambda_delta * math.cos(average_phi)) ** 2)
CROP_YEARS = range(2010, 2015)
def cropscape_json(lat, lon, radius):
(lat_delta, lon_delta) = circle_lat_lon_radii(lat, radius)
lats = [lat - lat_delta + (lat_delta / 10) * i for i in xrange(21)]
lons = [lon - lon_delta + (lon_delta / 10) * i for i in xrange(21)]
json = {
"EnvironmentVariableName": "CropScape",
"Domain": {
"Mask": None,
"SpatialRegionType": "CellGrid",
"Lats": lats,
"Lons": lons,
"Lats2": None,
"Lons2": None,
"TimeRegion": {
"Years": CROP_YEARS,
"Days": [1, 366],
"Hours": [0, 24],
"IsIntervalsGridYears": False,
"IsIntervalsGridDays": True,
"IsIntervalsGridHours": True
}
},
"ParticularDataSources": {},
"ReproducibilityTimestamp": int(time.time() * 1000)
}
return json
def divider_midpoints(dividers):
# Returns the midpoints of each grid cell range
return [(x1 + x2) / 2 for (x1, x2) in zip(dividers[:-1], dividers[1:])]
def crop_name(index):
return CROP_NAME[index]
def commodity_name(crop_name):
upper_name = crop_name.upper()
if upper_name.startswith("DOUBLE CROP"):
crops = upper_name.split(" - ")[1].split("/")
commodities = []
for c in crops:
commodities += commodity_name(c)
return commodities
if upper_name.endswith("WHEAT"):
wheat_variety = upper_name.split()[0]
if wheat_variety == "DURUM":
wheat_variety = "SPRING, DURUM"
return [("WHEAT", wheat_variety)]
elif crop_name == "Dry Beans":
return [("BEANS", "DRY EDIBLE")]
elif crop_name == "Other Hay (non-alfalfa)":
return [("HAY", "(EXCL ALFALFA)")]
elif upper_name == "ALFALFA":
return [("HAY", "ALFALFA")]
elif crop_name == "Honeydew Melons":
return [("MELONS", "HONEYDEW")]
elif crop_name == "Watermelons":
return [("MELONS", "WATERMELON")]
else:
return [(upper_name, None)]
def make_top_crop_chart_data(crops):
top_crop_chart_data_points = []
for crop in crops:
top_crop_chart_data_points.append({
"x": crop["shortName"],
"y": [crop["pctAcres"]],
"tooltip": "%0.1f%%" % (crop["pctAcres"] * 100.0)
})
return {
"series": ["% Acres"],
"data": top_crop_chart_data_points
}
def make_acre_change_chart(crop_data):
data_points = []
for year, pct_acres in zip(CROP_YEARS, crop_data["history"]):
data_points.append({
"x": int(year),
"y": [pct_acres],
"tooltip": "%s: %0.1f%%" % (year, pct_acres * 100.0)
})
change = "rose" if crop_data["pctAcresDelta"] > 0 else "fell"
title = "%s: acreage %s %0.0f%%" % (crop_data["name"], change, abs(crop_data["pctAcresDelta"] * 100.0))
return {
"title": title,
"series": ["% Acres"],
"data": data_points
}
def make_price_change_chart(price_data, min_year):
data_points = []
years = price_data["annualPrices"].keys()
# Produce a value for every year, inserting a 0 if no data for a year.
for year in xrange(max(min_year, min(years)), max(years) + 1):
try:
price = price_data["annualPrices"][year]
except KeyError:
price = 0
price_string = "No data"
else:
price_string = "$" + make_price_string(price)
tooltip = "%s: %s" % (year, price_string)
data_points.append({
"x": int(year),
"y": [float(price)],
"tooltip": tooltip
})
change = "rose" if price_data["priceDeltaPct"] > 0 else "fell"
# strip leading sign from the pctAcresDelta value
title = "%s: prices %s %0.0f%%" % (price_data["crop"], change, abs(price_data["priceDeltaPct"] * 100.0))
return {
"title": title,
"series": ["$ / %s" % price_data["unit"]],
"data": data_points
}
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = itertools.cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = itertools.cycle(itertools.islice(nexts, pending))
def make_top_trends_chart_data(crops):
# Find the top two greatest acre changes and price changes.
topAcreChanges = sorted(crops, key=lambda x: -abs(x["pctAcresDelta"]))[:2]
# assumes that the first price is the one to use, hopefully "all utilization practices". Might miss some double crop cases.
cropPrices = [c["priceHistory"][0] for c in crops if c["priceHistory"]]
topPriceChanges = sorted(cropPrices, key=lambda x: -abs(x["priceDeltaPct"]))[:2]
return list(roundrobin([make_acre_change_chart(c) for c in topAcreChanges], [make_price_change_chart(c, CROP_YEARS[0]) for c in topPriceChanges]))
def make_crop_details_chart_data(crops):
crop_details = {}
for crop in crops:
price_chart_data = []
for price in crop["priceHistory"]:
price_chart_data.append(make_price_change_chart(price, 0))
crop_details[crop["name"]] = {
"acreageChartData": make_acre_change_chart(crop),
"priceChartData": price_chart_data,
"priceTable": crop["priceHistory"]
}
return crop_details
def make_price_string(price):
if "." in str(price):
return "%0.2f" % float(price)
else:
return str(price)
def make_price_delta_string(old_price, new_price):
if "." in str(new_price):
return "%+0.2f" % (float(new_price) - float(old_price))
else:
return "%+d" % (int(new_price) - int(old_price))
def short_crop(crop_name):
return crop_name.replace('Double Crop - ', '')
def crops_in_circle(request):
lat = float(request.GET.get("lat"))
lon = float(request.GET.get("lon"))
radius = float(request.GET.get("radius"))
location = request.GET.get("location", "US TOTAL")
response_data = memoize(crops_in_circle_helper, {}, 4)(lat, lon, radius, location)
response_json = json.dumps(response_data)
return HttpResponse(response_json, content_type="application/json")
def crops_in_circle_helper(lat, lon, radius, location):
request_json = cropscape_json(lat, lon, radius)
cropscape_data = json.loads(fcRequest(request_json))
# example response:
# {"lat":[29.1462,30.348708333333335,30.551116666666665,30.753525],"lon":[-88.4743,-88.325066666666672,-88.17583333333333,-88.0266],"values":[[0.0,190.0,142.0],[0.0,142.0,142.0],[0.0,0.0,121.0]]}
crop_table = defaultdict(lambda: defaultdict(int))
total = 0
lats = divider_midpoints(cropscape_data["lat"])
lons = divider_midpoints(cropscape_data["lon"])
for (entry_lat, row) in zip(lats, cropscape_data["values"]):
for entry_lon, entry_crop in zip(lons, row):
if (distance(lat, lon, entry_lat, entry_lon) < radius):
for year, crop in zip(CROP_YEARS, entry_crop):
crop_table[year][int(crop)] += 1
total += 1
crops = []
for (crop, count) in sorted(crop_table[CROP_YEARS[-1]].items(), key=lambda x: -x[1]):
history = []
for year in CROP_YEARS:
history.append(float(crop_table[year][crop]) / total)
name = crop_name(crop) # used in CropScape
commodities = commodity_name(name) # used in NASS data
pct_acres = float(count) / total
name = short_crop(name)
if name and pct_acres > 0.01: # only show crops that cover more than 1% of the area.
pct_acres_delta = history[-1] - history[0] # change since five years ago
price_history = defaultdict(lambda: defaultdict(int))
for commodity, variety in commodities:
# only include dollar prices, not percentages of parity
crop_prices = CropPrice.objects.filter(commodity=commodity, location=location, unit__contains="$").order_by("-year", "commodity", "variety", "util_practice")
if variety:
crop_prices = crop_prices.filter(variety__startswith=variety)
for crop_price in crop_prices:
price_history[commodity, crop_price.variety, crop_price.unit, crop_price.util_practice][crop_price.year] = crop_price.price
price_history_list = []
for ((commodity, variety, unit, util_practice), annual_prices) in sorted(price_history.iteritems()):
full_crop = "%s, %s" % (commodity.capitalize(), variety.capitalize())
if len(commodities) > 1:
full_variety = full_crop
else:
full_variety = variety.capitalize()
last_price = make_price_string(annual_prices[max(annual_prices.keys())])
price_string = make_price_string(annual_prices[CROP_YEARS[-1]])
old_price = annual_prices[CROP_YEARS[0]]
price_delta = make_price_delta_string(old_price, annual_prices[CROP_YEARS[-1]])
price_history_list.append({
"shortCrop": short_crop(name),
"crop": name,
"fullCrop": full_crop,
"variety": full_variety,
"unit": str(unit).translate(None, "$/").strip().lower(),
"utilPractice": util_practice.capitalize(),
"price": price_string,
"lastPrice": last_price,
"priceDelta": price_delta,
"priceDeltaPct": float(price_delta) / float(old_price) if float(old_price) > 0 else 0.0,
"annualPrices": {int(key): float(value) for (key, value) in annual_prices.iteritems() if float(value) > 0}
})
crops.append({
"shortName": short_crop(name),
"name": name,
"pctAcres": pct_acres,
"pctAcresDelta": pct_acres_delta,
"history": history,
"priceHistory": price_history_list
})
response_data = {
"lat": lat,
"lon": lon,
"radius": radius,
"crops": crops,
"topCropChartData": make_top_crop_chart_data(crops),
"topTrendsChartData": make_top_trends_chart_data(crops),
"cropDetailsChartData": make_crop_details_chart_data(crops)
}
return response_data
def reload_data(request):
parsed_lines = parse_nass.load_crop_prices()
return HttpResponse("Finished loading %s rows" % parsed_lines)
| {
"content_hash": "6f71ae92cf2421acaf20dac607b828a1",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 197,
"avg_line_length": 35.56508875739645,
"alnum_prop": 0.639547458614092,
"repo_name": "georgejlee/croptrends",
"id": "f7838dd16da06548223515cecc80c3fbb4c6281a",
"size": "12021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "browser/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10660"
},
{
"name": "JavaScript",
"bytes": "11741"
},
{
"name": "Python",
"bytes": "27439"
}
],
"symlink_target": ""
} |
import argparse
import sys
from kvirt.config import Kconfig
from kvirt.common import pprint, error, success
import pyghmi.ipmi.bmc as bmc
class KBmc(bmc.Bmc):
def __init__(self, authdata, port, name, client):
super(KBmc, self).__init__(authdata, port)
self.bootdevice = 'default'
self.k = Kconfig(client=client).k
if not self.k.exists(name):
error('%s not found.Leaving' % name)
sys.exit(1)
else:
status = self.k.info(name)['status']
self.powerstate = 'off' if status.lower() not in ['up', 'poweredon'] else 'on'
pprint('Handling vm %s on port %s' % (name, port))
pprint('Initial state for vm %s: %s' % (name, self.powerstate))
self.name = name
def get_boot_device(self):
return self.bootdevice
def set_boot_device(self, bootdevice):
self.bootdevice = bootdevice
def cold_reset(self):
pprint('shutting down in response to BMC cold reset request')
sys.exit(0)
def get_power_state(self):
return self.powerstate
def power_off(self):
result = self.k.stop(self.name)
if result['result'] == 'success':
success('%s powered off!' % self.name)
self.powerstate = 'off'
else:
error('%s not powered off because %s' % (self.name, result['reason']))
def power_on(self):
result = self.k.start(self.name)
if result['result'] == 'success':
self.powerstate = 'on'
success('%s powered on!' % self.name)
else:
error('%s not powered on because %s' % (self.name, result['reason']))
def power_reset(self):
pass
def power_shutdown(self):
self.power_off()
def is_active(self):
return self.powerstate == 'on'
def iohandler(self, data):
print(data)
if self.sol:
self.sol.send_data(data)
def main():
parser = argparse.ArgumentParser(prog='kbmc', description='BMC using kcli')
parser.add_argument('-C', '--client', dest='client', type=str, help='Client to use')
parser.add_argument('--user', dest='user', type=str, default='admin', help='User to use. Defaults to admin')
parser.add_argument('--password', dest='password', type=str, default='password',
help='Password to use. Defaults to password')
parser.add_argument('--port', dest='port', type=int, default=6230, help='Port to listen on. Defaults to 6230')
# parser.add_argument('-f', '--foreground', action='store_true', help='Stay in foreground')
parser.add_argument('name', type=str, help='Vm to handle')
args = parser.parse_args()
kbmc = KBmc({args.user: args.password}, port=args.port, name=args.name, client=args.client)
kbmc.listen()
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "78925831f34a7b30f06dec6ee65f1f83",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 114,
"avg_line_length": 34.566265060240966,
"alnum_prop": 0.5956779365632625,
"repo_name": "karmab/kcli",
"id": "93c64835678f495897267e623e7b5ca55e13ef54",
"size": "2869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kvirt/kbmc.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "27073"
},
{
"name": "Dockerfile",
"bytes": "173"
},
{
"name": "HTML",
"bytes": "59660"
},
{
"name": "JavaScript",
"bytes": "688801"
},
{
"name": "Jinja",
"bytes": "25491"
},
{
"name": "Makefile",
"bytes": "871"
},
{
"name": "Python",
"bytes": "1995130"
},
{
"name": "Shell",
"bytes": "61221"
}
],
"symlink_target": ""
} |
import datetime
import logging
import re
import time as py_time
from django.conf import settings
from common import api
from common import clock
from common import exception
from common.protocol import sms
from common.protocol import xmpp
utcnow = lambda: clock.utcnow()
_re_match_url = re.compile(r'(http://[^/]+(/[^\s]+))', re.M)
def get_url(s):
m = _re_match_url.search(s)
if not m:
return None
return m.group(1)
def get_relative_url(s):
m = _re_match_url.search(s)
if not m:
return None
return m.group(2)
def exhaust_queue(nick):
for i in xrange(1000):
try:
api.task_process_actor(api.ROOT, nick)
except exception.ApiNoTasks:
break
def exhaust_queue_any():
for i in xrange(1000):
try:
api.task_process_any(api.ROOT)
except exception.ApiNoTasks:
break
class TestXmppConnection(xmpp.XmppConnection):
def send_message(self, to_jid_list, message, html_message=None,
atom_message=None):
logging.debug('XMPP SEND -> %s: %s, html_message=%s, atom_message=%s',
to_jid_list,
message,
html_message,
atom_message)
for jid in to_jid_list:
xmpp.outbox.append((jid, message, html_message))
class TestSmsConnection(sms.SmsConnection):
def send_message(self, to_list, message):
to_list = self.filter_targets(to_list, message)
logging.debug('SMS SEND -> %s: %s', to_list, message)
for recp in to_list:
sms.outbox.append((recp, message))
class FakeRequest(object):
def __init__(self, **kw):
self.user = kw.get('user', None)
self.POST = kw.get('post', {})
self.GET = kw.get('get', {})
@property
def REQUEST(self):
return dict(list(self.POST.items()) + list(self.GET.items()))
class FakeMemcache(object):
""" a disappointingly full-featured fake memcache :( """
def __init__(self, *args, **kw):
self._data = {}
pass
def _get_valid(self, key):
if key not in self._data:
return None
data = self._data[key]
if data[1]:
now = py_time.mktime(utcnow().timetuple())
if now >= data[1]:
#logging.info('invalid key, %s, %s > %s', key, now, data[1])
return None
#logging.info('valid key, %s returning: %s', key, data[0])
return data[0]
def set(self, key, value, time=0):
if time:
if time < 2592000: # approx 1 month
time = py_time.mktime(utcnow().timetuple()) + time
#logging.info('setting key %s to %s', key, (value, time))
self._data[key] = (value, time)
return True
def set_multi(self, mapping, time=0, key_prefix=''):
for k, v in mapping.iteritems():
self.set(key_prefix + k, v, time=time)
return []
def add(self, key, value, time=0):
if self._get_valid(key) is not None:
return False
self.set(key, value, time)
return True
def add_multi(self, mapping, time=0, key_prefix=''):
o = []
for k, v in mapping.iteritems():
success = self.add(key_prefix + k, v, time=time)
if not success:
o.append[k]
return o
def incr(self, key, delta=1):
data = self._get_valid(key)
if data is None:
return None
data_tup = self._data[key]
try:
count = int(data)
except ValueError:
return None
count += delta
self.set(key, count, time=data_tup[1])
return count
def decr(self, key, delta=1):
return incr(key, delta=-(delta))
def delete(self, key, seconds=0):
# NOTE: doesn't support seconds
try:
del self._data[key]
return 2
except KeyError:
return 1
def delete_multi(self, keys, key_prefix=''):
o = []
for k in keys:
success = self.delete(key_prefix + k)
if success != 2:
o.append[k]
return o
def get(self, key):
return self._get_valid(key)
def get_multi(self, keys, key_prefix=''):
out = {}
for k in keys:
v = self._get_valid(key_prefix + k)
out[k] = v
return out
class ClockOverride(object):
old = None
kw = None
def __init__(self, module, **kw):
self.kw = kw
self.old = {}
self.module = module
def override(self):
self.old = getattr(self.module, 'utcnow')
new_utcnow = lambda: (datetime.datetime.utcnow() +
datetime.timedelta(**self.kw))
setattr(self.module, 'utcnow', new_utcnow)
def reset(self):
setattr(self.module, 'utcnow', self.old)
def override_clock(module, **kw):
o = ClockOverride(module, **kw)
o.override()
return o
class SettingsOverride(object):
old = None
kw = None
def __init__(self, **kw):
self.kw = kw
self.old = {}
def override(self):
for k, v in self.kw.iteritems():
self.old[k] = getattr(settings, k, None)
setattr(settings, k, v)
def reset(self):
for k, v in self.old.iteritems():
setattr(settings, k, v)
def override(**kw):
o = SettingsOverride(**kw)
o.override()
return o
| {
"content_hash": "bf6578c77b73fc517dd1292a5c67081b",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 74,
"avg_line_length": 24.24390243902439,
"alnum_prop": 0.5989939637826962,
"repo_name": "CollabQ/CollabQ",
"id": "ff9ba0783735e2c4612d79c9be66351dcce4041d",
"size": "5586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/test/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "JavaScript",
"bytes": "327809"
},
{
"name": "Python",
"bytes": "6590397"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "5624"
}
],
"symlink_target": ""
} |
import unittest
from progdb.ctags_parser import *
class TestCTagsParser(unittest.TestCase):
def test_parse_from_lines(self):
def doFile(filename):
f = open(filename,"r")
tags = parse_ctags_from_ctags_output(f.readlines())
for t in tags:
if t.needs_determining:
t.determine_line_number(filename)
doFile("tests/resources/ctags_output_1")
doFile("tests/resources/ctags_output_2")
doFile("tests/resources/ctags_output_3")
doFile("tests/resources/ctags_output_4")
def test_parse_from_source(self):
tags = parse_ctags_from_source("tests/resources/ctags_test1.cpp")
| {
"content_hash": "4648fb93dcf3bc3a6bfc6e6d51ea6698",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 69,
"avg_line_length": 33,
"alnum_prop": 0.6921850079744817,
"repo_name": "natduca/ndbg",
"id": "91ca7b5b4262482bac1ce9d03cd805ed3c6c14a0",
"size": "1203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/progdb/test_ctags_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4728"
},
{
"name": "C++",
"bytes": "5787"
},
{
"name": "Emacs Lisp",
"bytes": "5014"
},
{
"name": "JavaScript",
"bytes": "237"
},
{
"name": "Python",
"bytes": "554374"
},
{
"name": "Shell",
"bytes": "781"
},
{
"name": "VimL",
"bytes": "1848"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import sys, os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
NEWS = open(os.path.join(here, 'NEWS.txt')).read()
version = '0.1'
install_requires = [
# List your project dependencies here.
# For more details, see:
# http://packages.python.org/distribute/setuptools.html#declaring-dependencies
]
setup(name='python-ga',
version=version,
description="Implementation of a generic server-side Google Analytics client in Python",
long_description=README + '\n\n' + NEWS,
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
],
keywords='google analytics',
author='Travis Chase',
author_email='travis@travischase.me',
url='',
license='MIT',
packages=find_packages('src'),
package_dir = {'': 'src'},include_package_data=True,
zip_safe=False,
install_requires=install_requires,
entry_points={
'console_scripts':
['python-ga=pythonga:main']
}
)
| {
"content_hash": "c983572b7959aee056cb24e339ec84b4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 92,
"avg_line_length": 28.44736842105263,
"alnum_prop": 0.6651248843663274,
"repo_name": "supercodepoet/python-ga",
"id": "2b1ffa81f51a51d51e5d1b5fcb8318edeaf06e5a",
"size": "1081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4678"
}
],
"symlink_target": ""
} |
import os
import copy
import time
import urllib
import json
from xbmcswift2 import xbmc, xbmcplugin, xbmcvfs
from meta import plugin, import_tmdb, import_tvdb, LANG
from meta.gui import dialogs
from meta.info import get_tvshow_metadata_tvdb, get_tvshow_metadata_tmdb, get_season_metadata_tvdb, get_episode_metadata_tvdb, get_tvshow_metadata_trakt, get_season_metadata_trakt, get_episode_metadata_trakt
from meta.utils.text import page_redux, parse_year, is_ascii, to_utf8
from meta.utils.executor import execute
from meta.utils.properties import set_property
from meta.utils.rpc import RPC
from meta.library.tvshows import setup_library, add_tvshow_to_library, batch_add_tvshows_to_library, update_library
from meta.library.tools import scan_library
from meta.play.base import active_players, get_players, active_channelers
from meta.play.tvshows import play_episode, play_episode_from_guide, tmdb_play_episode, trakt_play_episode, tvmaze_play_episode
from meta.play.channelers import ADDON_STANDARD, ADDON_PICKER
from meta.play.players import ADDON_DEFAULT, ADDON_SELECTOR
from meta.navigation.base import get_icon_path, get_genre_icon, get_background_path, get_genres, get_tv_genres, caller_name, caller_args
from language import get_string as _
from settings import CACHE_TTL, SETTING_TV_LIBRARY_FOLDER, SETTING_TV_DEFAULT_AUTO_ADD, SETTING_TV_PLAYED_BY_ADD, SETTING_TV_DEFAULT_PLAYER_FROM_LIBRARY, SETTING_TV_BATCH_ADD_FILE_PATH, SETTING_MOVIES_BATCH_ADD_FILE_PATH, SETTING_FORCE_VIEW, SETTING_MAIN_VIEW, SETTING_TVSHOWS_VIEW, SETTING_SEASONS_VIEW, SETTING_EPISODES_VIEW, SETTING_AIRED_UNKNOWN, SETTING_INCLUDE_SPECIALS
if RPC.settings.get_setting_value(setting="filelists.ignorethewhensorting") == {u'value': True}:
SORT = [xbmcplugin.SORT_METHOD_UNSORTED, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE, xbmcplugin.SORT_METHOD_VIDEO_YEAR, xbmcplugin.SORT_METHOD_GENRE, xbmcplugin.SORT_METHOD_VIDEO_RATING, xbmcplugin.SORT_METHOD_PLAYCOUNT]
SORTRAKT = [xbmcplugin.SORT_METHOD_VIDEO_YEAR, xbmcplugin.SORT_METHOD_GENRE, xbmcplugin.SORT_METHOD_VIDEO_RATING, xbmcplugin.SORT_METHOD_PLAYCOUNT, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE, xbmcplugin.SORT_METHOD_DURATION, xbmcplugin.SORT_METHOD_MPAA_RATING, xbmcplugin.SORT_METHOD_UNSORTED]
else:
SORT = [xbmcplugin.SORT_METHOD_UNSORTED, xbmcplugin.SORT_METHOD_VIDEO_YEAR, xbmcplugin.SORT_METHOD_GENRE, xbmcplugin.SORT_METHOD_VIDEO_RATING, xbmcplugin.SORT_METHOD_PLAYCOUNT]
SORTRAKT = [xbmcplugin.SORT_METHOD_VIDEO_YEAR, xbmcplugin.SORT_METHOD_GENRE, xbmcplugin.SORT_METHOD_VIDEO_RATING, xbmcplugin.SORT_METHOD_PLAYCOUNT, xbmcplugin.SORT_METHOD_LABEL, xbmcplugin.SORT_METHOD_DURATION, xbmcplugin.SORT_METHOD_MPAA_RATING, xbmcplugin.SORT_METHOD_UNSORTED]
FORCE = plugin.get_setting(SETTING_FORCE_VIEW, bool)
VIEW_MAIN = plugin.get_setting(SETTING_MAIN_VIEW, int)
VIEW = plugin.get_setting(SETTING_TVSHOWS_VIEW, int)
VIEW_TVSHOWS = plugin.get_setting(SETTING_TVSHOWS_VIEW, int)
VIEW_SEASONS = plugin.get_setting(SETTING_SEASONS_VIEW, int)
VIEW_EPISODES = plugin.get_setting(SETTING_EPISODES_VIEW, int)
# 10751|10762|10763|
@plugin.route('/tv')
def tv():
items = [
{
'label': "{0}: {1}".format(_("Search"), _("TV show")),
'path': plugin.url_for("tv_search"),
'icon': get_icon_path("search"),
},
{
'label': "{0} ({1})".format(_("Genres"), "TMDb"),
'path': plugin.url_for("tmdb_tv_genres"),
'icon': get_icon_path("genres"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("tmdb_tv_genre_to_library", id="10759|16|35|80|99|18|9648|10764|10765|10766|10767|10768|37", page='1', confirm="yes"))
),
],
},
{
'label': "{0} ({1})".format("On the air", "TMDb"),
'path': plugin.url_for("tmdb_tv_now_playing", page='1'),
'icon': get_icon_path("ontheair"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("tmdb_tv_now_playing_to_library", page='1', confirm="yes"))
),
],
},
{
'label': "{0} ({1})".format("Popular", "TMDb"),
'path': plugin.url_for("tmdb_tv_most_popular", page='1'),
'icon': get_icon_path("popular"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("tmdb_tv_most_popular_to_library", page='1', confirm="yes"))
),
],
},
{
'label': "{0} ({1})".format("Top rated", "TMDb"),
'path': plugin.url_for("tmdb_tv_top_rated", page='1'),
'icon': get_icon_path("top_rated"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("tmdb_tv_top_rated_to_library", page='1', confirm="yes"))
),
],
},
{
'label': "{0} ({1})".format("Most played", "Trakt"),
'path': plugin.url_for("trakt_tv_played", page='1'),
'icon': get_icon_path("player"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("trakt_tv_played_to_library", page='1', confirm="yes"))
),
],
},
{
'label': "{0} ({1})".format("Most watched", "Trakt"),
'path': plugin.url_for("trakt_tv_watched", page='1'),
'icon': get_icon_path("traktwatchlist"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("trakt_tv_watched_to_library", page='1', confirm="yes"))
),
],
},
{
'label': "{0} ({1})".format("Most collected", "Trakt"),
'path': plugin.url_for("trakt_tv_collected", page='1'),
'icon': get_icon_path("traktcollection"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("trakt_tv_collected_to_library", page='1', confirm="yes"))
),
],
},
{
'label': "{0} ({1})".format("Popular", "Trakt"),
'path': plugin.url_for("tv_trakt_popular", page='1'),
'icon': get_icon_path("traktrecommendations"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("trakt_tv_popular_to_library", page='1', confirm="yes"))
),
],
},
{
'label': "{0} ({1})".format("Trending", "Trakt"),
'path': plugin.url_for("trakt_tv_trending", page='1'),
'icon': get_icon_path("trending"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("trakt_tv_trending_to_library", page='1', confirm="yes"))
),
],
},
{
'label': "{0} {1}".format(_("Use your"), "Trakt"),
'path': plugin.url_for("trakt_my_tv"),
'icon': get_icon_path("trakt"),
}
]
for item in items: item['properties'] = {'fanart_image' : get_background_path()}
if FORCE == True: plugin.finish(items=items, view_mode=VIEW)
else: return plugin.finish(items=items)
@plugin.route('/tv/trakt/search')
def trakt_tv_search():
term = plugin.keyboard(heading=_("Enter search string"))
if term != None and term != "": return trakt_tv_search_term(term, 1)
else: return
@plugin.route('/tv/trakt/search_term/<term>/<page>')
def trakt_tv_search_term(term, page):
from trakt import trakt
results, pages = trakt.search_for_tvshow_paginated(term, page)
return list_trakt_search_items(results, pages, page)
def list_trakt_search_items(results, pages, page):
from trakt import trakt
shows = [get_tvshow_metadata_trakt(item["show"], None) for item in results]
items = [make_tvshow_item(show) for show in shows if show.get('tvdb_id')]
page = int(page)
pages = int(pages)
if pages > 1:
args = caller_args()
nextpage = page + 1
args['page'] = page + 1
items.append({
'label': _("Next page").format() + " >> (%d/%d)" % (nextpage, pages),
'path': plugin.url_for(caller_name(), **args),
'icon': get_icon_path("item_next"),
'properties' : {'fanart_image' : get_background_path()}})
if FORCE == True: plugin.finish(items=items, view_mode=VIEW)
else: return plugin.finish(items=items)
@plugin.route('/tv/trakt/personal')
def trakt_my_tv():
items = [
{
'label': "{0} ({1})".format(_("Library"), "Trakt Collection"),
'path': plugin.url_for("trakt_tv_collection"),
'icon': get_icon_path("traktcollection"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("trakt_tv_collection_to_library"))
)
],
},
{
'label': "{0} {1} ({2})".format(_("Unwatched"), _("TV shows").lower(), "Trakt watchlist"),
'path': plugin.url_for("trakt_tv_watchlist"),
'icon': get_icon_path("traktwatchlist"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("trakt_tv_watchlist_to_library"))
)
],
},
{
'label': "{0}{1} ({2})".format(_("Next recording").replace(_("Recording").lower(), ""), _("episodes"), "Trakt Next Episodes"),
'path': plugin.url_for("trakt_tv_next_episodes"),
'icon': get_icon_path("traktnextepisodes"),
'context_menu': [
(
"{0} ({1})".format(_("Play"), _("Random").lower()),
"RunPlugin({0})".format(plugin.url_for("trakt_tv_play_random_next_episode"))
)
]
},
{
'label': "{0}{1} ({2})".format(_("Upcoming recordings").replace(_("Recordings").lower(), ""), _("episodes"), "Trakt Calendar"),
'path': plugin.url_for("trakt_tv_calendar"),
'icon': get_icon_path("traktcalendar"),
},
{
'label':"{0} ({1})".format(_("Find similar"), "Trakt Recommendations"),
'path': plugin.url_for("trakt_tv_recommendations"),
'icon': get_icon_path("traktrecommendations"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("trakt_tv_recommendations_to_library"))
)
],
},
]
for item in items: item['properties'] = {'fanart_image' : get_background_path()}
if FORCE == True: plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
@plugin.route('/tv/trakt/aired_yesterday/<page>')
def trakt_tv_aired_yesterday(page):
from trakt import trakt
result = trakt.trakt_get_aired_yesterday(page)
return list_aired_episodes(result)
@plugin.route('/tv/trakt/premiered_last_week/<page>')
def trakt_tv_premiered_last_week(page):
from trakt import trakt
result = trakt.trakt_get_premiered_last_week(page)
return list_aired_episodes(result)
def list_aired_episodes(result):
genres_dict = trakt_get_genres()
items = []
count = 1
if not result:
return None
for day in result.iteritems():
day_nr = 1
for episode in day[1]:
banner = episode["show"]["images"]["banner"]["full"]
fanart = episode["show"]["images"]["fanart"]["full"]
poster = episode["show"]["images"]["poster"]["full"]
if episode["episode"]["title"] != None:
episode_title = (episode["episode"]["title"]).encode('utf-8')
elif episode["episode"]["title"] == None:
episode_title = "TBA"
try: id = episode["show"]["ids"].get("tvdb")
except: id = episode["show"]["ids"]["tvdb"]
if not id:
continue
season_num = episode["episode"]["season"]
episode_num = episode["episode"]["number"]
tvshow_title = (episode["show"]["title"]).encode('utf-8')
info = get_tvshow_metadata_trakt(episode["show"], genres_dict)
info['season'] = episode["episode"]["season"]
info['episode'] = episode["episode"]["number"]
info['title'] = episode["episode"]["title"]
info['aired'] = episode["episode"].get('first_aired','')
info['premiered'] = episode["episode"].get('first_aired','')
info['rating'] = episode["episode"].get('rating', '')
info['plot'] = episode["episode"].get('overview','')
info['tagline'] = episode["episode"].get('tagline')
info['votes'] = episode["episode"].get('votes','')
info['showtitle'] = episode["show"]["title"]
#info['poster'] = episode['images']['poster']['thumb']
label = "{0} - S{1:02d}E{2:02d} - {3}".format(tvshow_title, season_num, episode_num, episode_title)
context_menu = [
(
"{0} {1}...".format(_("Select"), _("Stream").lower()),
"PlayMedia({0})".format(plugin.url_for("tv_play", id=id, season=season_num, episode=episode_num, mode='select'))
),
(
_("TV show information"),
'Action(Info)'
),
(
_("Add to playlist"),
"RunPlugin({0})".format(plugin.url_for("lists_add_episode_to_list", src='tvdb', id=id,
season=season_num, episode=episode_num))
),
]
items.append({'label': label,
'path': plugin.url_for("tv_play", id=id, season=season_num, episode=episode_num, mode='default'),
'context_menu': context_menu,
'info': info,
'is_playable': True,
'info_type': 'video',
'stream_info': {'video': {}},
'thumbnail': info['poster'],
'poster': info['poster'],
'icon': "DefaultVideo.png",
'properties' : {'fanart_image' : info['fanart']},
})
day_nr = day_nr +1
plugin.set_content('episodes')
if FORCE == True: return plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
@plugin.route('/tv/trakt/played/<page>')
def trakt_tv_played(page, raw=False):
from trakt import trakt
results, pages = trakt.trakt_get_played_shows_paginated(page)
plugin.log.info(results)
if raw: return results
else: return list_trakt_tvshows_played_paginated(results, pages, page)
@plugin.route('/tv/trakt/played_to_library/<page>/<confirm>')
def trakt_tv_played_to_library(page, confirm, uncached=False):
try:
page = int(page)
pages = [page]
except: pages = page_redux(page)
if confirm == "no" or dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}[CR]{2}".format(_("Library"), _("Add %s") % ("'{0} ({1}) {2} {3}'".format("Most played", "Trakt", _("page"), ','.join([str(i) for i in pages]))),_("Are you sure?"))):
items = {}
tv = []
for i in pages: tv = tv + [m for m in trakt_tv_played(i, True) if m not in tv]
if uncached: tv_add_all_to_library(tv, True)
else: tv_add_all_to_library(tv)
def list_trakt_tvshows_played_paginated(results, total_items, page):
from trakt import trakt
results = sorted(results,key=lambda item: item["show"]["title"].lower().replace("the ", ""))
genres_dict = trakt_get_genres()
shows = [get_tvshow_metadata_trakt(item["show"], genres_dict) for item in results]
items = [make_tvshow_item(show) for show in shows if show.get('tvdb_id')]
nextpage = int(page) + 1
pages = int(total_items) // 99 + (int(total_items) % 99 > 0)
if int(pages) > int(page):
items.append({
'label': _("Next page").format() + " >> (%s/%s)" % (nextpage, pages),
'path': plugin.url_for("trakt_tv_played", page=int(page) + 1),
'icon': get_icon_path("item_next"),
})
if FORCE == True: return plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
@plugin.route('/tv/trakt/watched/<page>')
def trakt_tv_watched(page, raw=False):
from trakt import trakt
results, total_items = trakt.trakt_get_watched_shows_paginated(page)
if raw: return results
else: return list_trakt_tvshows_watched_paginated(results, total_items, page)
@plugin.route('/tv/trakt/watched_to_library/<page>/<confirm>')
def trakt_tv_watched_to_library(page, confirm, uncached=False):
try:
page = int(page)
pages = [page]
except: pages = page_redux(page)
if confirm == "no" or dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}[CR]{2}".format(_("Library"), _("Add %s") % ("'{0} ({1}) {2} {3}'".format("Most watched", "Trakt", _("page"), ','.join([str(i) for i in pages]))),_("Are you sure?"))):
items = {}
tv = []
for i in pages: tv = tv + [m for m in trakt_tv_watched(i, True) if m not in tv]
if uncached: tv_add_all_to_library(tv, True)
else: tv_add_all_to_library(tv)
def list_trakt_tvshows_watched_paginated(results, total_items, page):
from trakt import trakt
results = sorted(results,key=lambda item: item["show"]["title"].lower().replace("the ", ""))
genres_dict = trakt_get_genres()
shows = [get_tvshow_metadata_trakt(item["show"], genres_dict) for item in results]
items = [make_tvshow_item(show) for show in shows if show.get('tvdb_id')]
nextpage = int(page) + 1
pages = int(total_items) // 99 + (int(total_items) % 99 > 0)
if int(pages) > int(page):
items.append({
'label': _("Next page").format() + " >> (%s/%s)" % (nextpage, pages),
'path': plugin.url_for("trakt_tv_watched", page=int(page) + 1),
'icon': get_icon_path("item_next"),
})
if FORCE == True: return plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
@plugin.route('/tv/trakt/collected/<page>')
def trakt_tv_collected(page, raw=False):
from trakt import trakt
results, total_items = trakt.trakt_get_collected_shows_paginated(page)
if raw: return results
else: return list_trakt_tvshows_watched_paginated(results, total_items, page)
@plugin.route('/tv/trakt/collected_to_library/<page>/<confirm>')
def trakt_tv_collected_to_library(page, confirm, uncached=False):
try:
page = int(page)
pages = [page]
except: pages = page_redux(page)
if confirm == "no" or dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}[CR]{2}".format(_("Library"), _("Add %s") % ("'{0} ({1}) {2} {3}'".format("Most collected", "Trakt", _("page"), ','.join([str(i) for i in pages]))),_("Are you sure?"))):
items = {}
tv = []
for i in pages: tv = tv + [m for m in trakt_tv_collected(i, True) if m not in tv]
if uncached: tv_add_all_to_library(tv, True)
else: tv_add_all_to_library(tv)
def list_trakt_tvshows_collected_paginated(results, total_items, page):
from trakt import trakt
results = sorted(results,key=lambda item: item["show"]["title"].lower().replace("the ", ""))
genres_dict = trakt_get_genres()
shows = [get_tvshow_metadata_trakt(item["show"], genres_dict) for item in results]
items = [make_tvshow_item(show) for show in shows if show.get('tvdb_id')]
nextpage = int(page) + 1
pages = int(total_items) // 99 + (int(total_items) % 99 > 0)
if int(pages) > int(page):
items.append({
'label': _("Next page").format() + " >> (%s/%s)" % (nextpage, pages),
'path': plugin.url_for("trakt_tv_collected", page=int(page) + 1),
'icon': get_icon_path("item_next"),
})
if FORCE == True: return plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
@plugin.route('/tv/trakt/popular/<page>')
def tv_trakt_popular(page, raw=False):
from trakt import trakt
results, pages = trakt.trakt_get_popular_shows_paginated(page)
if raw: return results
else: return list_trakt_tvshows_popular_paginated(results, pages, page)
@plugin.route('/tv/trakt/popular_to_library/<page>/<confirm>')
def trakt_tv_popular_to_library(page, confirm, uncached=False):
try:
page = int(page)
pages = [page]
except: pages = page_redux(page)
if confirm == "no" or dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}[CR]{2}".format(_("Library"), _("Add %s") % ("'{0} ({1}) {2} {3}'".format("Popular", "Trakt", _("page"), ','.join([str(i) for i in pages]))),_("Are you sure?"))):
items = {}
tv = []
for i in pages: tv = tv + [m for m in tv_trakt_popular(i, True) if m not in tv]
if uncached: tv_add_all_to_library([{u'show': m} for m in tv], True)
else: tv_add_all_to_library([{u'show': m} for m in tv])
def list_trakt_tvshows_popular_paginated(results, pages, page):
from trakt import trakt
results = sorted(results,key=lambda item: item["title"].lower().replace("the ", ""))
genres_dict = trakt_get_genres()
shows = [get_tvshow_metadata_trakt(item, genres_dict) for item in results]
items = [make_tvshow_item(show) for show in shows if show.get('tvdb_id')]
nextpage = int(page) + 1
if pages > page:
items.append({
'label': _("Next page").format() + " >> (%s/%s)" % (nextpage, pages),
'path': plugin.url_for("tv_trakt_popular", page=int(page) + 1),
'icon': get_icon_path("item_next"),
})
if FORCE == True: return plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
@plugin.route('/tv/trakt/trending/<page>')
def trakt_tv_trending(page, raw=False):
from trakt import trakt
results, pages = trakt.trakt_get_trending_shows_paginated(page)
if raw: return results
else: list_trakt_tvshows_trending_paginated(results, pages, page)
@plugin.route('/tv/trakt/trending_to_library/<page>/<confirm>')
def trakt_tv_trending_to_library(page, confirm, uncached=False):
try:
page = int(page)
pages = [page]
except: pages = page_redux(page)
if confirm == "no" or dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}[CR]{2}".format(_("Library"), _("Add %s") % ("'{0} ({1}) {2} {3}'".format("Trending", "Trakt", _("page"), ','.join([str(i) for i in pages]))),_("Are you sure?"))):
items = {}
tv = []
for i in pages: tv = tv + [m for m in trakt_tv_trending(i, True) if m not in tv]
if uncached: tv_add_all_to_library(tv, True)
else: tv_add_all_to_library(tv)
def list_trakt_tvshows_trending_paginated(results, pages, page):
from trakt import trakt
results = sorted(results,key=lambda item: item["show"]["title"].lower().replace("the ", ""))
genres_dict = trakt_get_genres()
shows = [get_tvshow_metadata_trakt(item["show"], genres_dict) for item in results]
items = [make_tvshow_item(show) for show in shows if show.get('tvdb_id')]
nextpage = int(page) + 1
if pages > page:
items.append({
'label': _("Next page").format() + " >> (%s/%s)" % (nextpage, pages),
'path': plugin.url_for("trakt_tv_trending", page=int(page) + 1),
'icon': get_icon_path("item_next"),
})
if FORCE == True: return plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
@plugin.route('/tv/search')
def tv_search():
term = plugin.keyboard(heading=_("Enter search string"))
if term != None and term != "": return tv_search_term(term, 1)
else: return
@plugin.route('/tv/search/edit/<term>')
def tv_search_edit(term):
term = plugin.keyboard(default=term, heading=_("Enter search string"))
if term != None and term != "": return tv_search_term(term, 1)
else: return
@plugin.route('/tv/search_term/<term>/<page>')
def tv_search_term(term, page):
items = [
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("TV shows"), "TMDb"),
'path': plugin.url_for("tmdb_tv_search_term", term=term, page='1'),
'icon': get_icon_path("tv"),
'thumbnail': get_icon_path("tv"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("TV shows"), "Trakt"),
'path': plugin.url_for("trakt_tv_search_term", term=term, page='1'),
'icon': get_icon_path("tv"),
'thumbnail': get_icon_path("tv"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("TV shows"), "TVDb"),
'path': plugin.url_for("tvdb_tv_search_term", term=term, page='1'),
'icon': get_icon_path("search"),
'thumbnail': get_icon_path("search"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Albums"), "LastFM"),
'path': plugin.url_for("music_search_album_term", term=term, page='1'),
'icon': get_icon_path("music"),
'thumbnail': get_icon_path("music"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Artists"), "LastFM"),
'path': plugin.url_for("music_search_artist_term", term=term, page='1'),
'icon': get_icon_path("music"),
'thumbnail': get_icon_path("music"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Tracks"), "LastFM"),
'path': plugin.url_for("music_search_track_term", term=term, page='1'),
'icon': get_icon_path("music"),
'thumbnail': get_icon_path("music"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Channels"), "Live addons"),
'path': plugin.url_for("live_search_term", term=term),
'icon': get_icon_path("live"),
'thumbnail': get_icon_path("live"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Playlists"), "Trakt"),
'path': plugin.url_for("lists_search_for_lists_term", term=term, page='1'),
'icon': get_icon_path("lists"),
'thumbnail': get_icon_path("lists"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Movies"), "TMDb"),
'path': plugin.url_for("tmdb_movies_search_term", term=term, page='1'),
'icon': get_icon_path("movies"),
'thumbnail': get_icon_path("movies"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Movies"), "Trakt"),
'path': plugin.url_for("trakt_movies_search_term", term=term, page='1'),
'icon': get_icon_path("movies"),
'thumbnail': get_icon_path("movies"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("TV shows"), plugin.addon.getAddonInfo('name')),
'path': plugin.url_for("tv_search_term", term=term, page='1'),
'icon': get_icon_path("search"),
'thumbnail': get_icon_path("search"),
},
{
'label': "{0} {1}".format(_("Edit"), _("Search string").lower()),
'path': plugin.url_for("tv_search_edit", term=term),
'icon': get_icon_path("search"),
'thumbnail': get_icon_path("search"),
},
]
for item in items:
item['properties'] = {'fanart_image' : get_background_path()}
return items
@plugin.route('/tv/tmdb/search')
def tmdb_tv_search():
""" Activate t search """
term = plugin.keyboard(heading=_("Enter search string"))
if term != None and term != "": return tmdb_tv_search_term(term, 1)
else: return
@plugin.route('/tv/tmdb/search_term/<term>/<page>')
def tmdb_tv_search_term(term, page):
""" Perform search of a specified <term>"""
import_tmdb()
result = tmdb.Search().tv(query=term, language = LANG, page = page)
items = list_tvshows(result)
if FORCE == True: return plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
@plugin.cached_route('/tv/tmdb/genres', TTL=CACHE_TTL)
def tmdb_tv_genres():
""" TV genres list """
genres = get_tv_genres()
items = sorted([{'label': name,
'icon': get_genre_icon(id),
'path': plugin.url_for("tmdb_tv_genre", id=id, page='1'),
'context_menu': [(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("tmdb_tv_genre_to_library", id=id, page='1', confirm="yes"))
)]} for id, name in genres.items()], key=lambda k: k['label'])
for item in items: item['properties'] = {'fanart_image' : get_background_path()}
return items
@plugin.cached_route('/tv/genre/<id>/<page>', TTL=CACHE_TTL)
def tmdb_tv_genre(id, page, raw=False):
""" Shows by genre """
if FORCE == True: plugin.set_view_mode(VIEW)
import_tmdb()
result = tmdb.Discover().tv(with_genres=id, page=page, language=LANG)
if raw: return result
else: return list_tvshows(result)
@plugin.route('/tv/genre_to_library/<id>/<page>/<confirm>')
def tmdb_tv_genre_to_library(id, page, confirm):
genre_names = get_tv_genres()
if "|" in id:
genres = id.split("|")
name = ' + '.join([genre_names.get(int(i), None) for i in genres])
else:
genres = [id]
name = genre_names.get(int(id), None)
try:
page = int(page)
pages = [page]
except: pages = page_redux(page)
if confirm == "no" or dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}[CR]{2}".format(_("Library"), _("Add %s") % ("'{0} ({1}) {2} {3}'".format(name, "TMDb", _("page"), ','.join([str(i) for i in pages]))),_("Are you sure?"))):
items = {}
tv = []
for g in genres:
for i in pages: tv = tv + [m for m in tmdb_tv_genre(g, i, True)["results"] if m not in tv]
items["results"] = tv
tv_add_all_to_library(items)
@plugin.cached_route('/tv/tmdb/now_playing/<page>', TTL=CACHE_TTL)
def tmdb_tv_now_playing(page, raw=False):
""" On the air shows """
if FORCE == True: plugin.set_view_mode(VIEW)
import_tmdb()
result = tmdb.TV().on_the_air(page=page, language=LANG)
if raw: return result
else: return list_tvshows(result)
@plugin.route('/tv/tmdb/now_playing_to_library/<page>/<confirm>')
def tmdb_tv_now_playing_to_library(page, confirm):
try:
page = int(page)
pages = [page]
except: pages = page_redux(page)
if confirm == "no" or dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}[CR]{2}".format(_("Library"), _("Add %s") % ("'{0} ({1}) {2} {3}'".format("On the air", "TMDb", _("page"), ','.join([str(i) for i in pages]))),_("Are you sure?"))):
items = {}
tv = []
for i in pages: tv = tv + [m for m in tmdb_tv_now_playing(i, True)["results"] if m not in tv]
items["results"] = tv
tv_add_all_to_library(items)
@plugin.cached_route('/tv/tmdb/most_popular/<page>', TTL=CACHE_TTL)
def tmdb_tv_most_popular(page, raw=False):
""" Most popular shows """
if FORCE == True: plugin.set_view_mode(VIEW)
import_tmdb()
result = tmdb.TV().popular(page=page, language=LANG)
if raw: return result
else: return list_tvshows(result)
@plugin.route('/tv/tmdb/most_popular_to_library/<page>/<confirm>')
def tmdb_tv_most_popular_to_library(page, confirm):
try:
page = int(page)
pages = [page]
except: pages = page_redux(page)
if confirm == "no" or dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}[CR]{2}".format(_("Library"), _("Add %s") % ("'{0} ({1}) {2} {3}'".format("Popular", "TMDb", _("page"), ','.join([str(i) for i in pages]))),_("Are you sure?"))):
items = {}
tv = []
for i in pages: tv = tv + [m for m in tmdb_tv_most_popular(i, True)["results"] if m not in tv]
items["results"] = tv
tv_add_all_to_library(items)
@plugin.cached_route('/tv/tmdb/top_rated/<page>', TTL=CACHE_TTL)
def tmdb_tv_top_rated(page, raw=False):
""" Top rated shows """
if FORCE == True: plugin.set_view_mode(VIEW)
import_tmdb()
result = tmdb.TV().top_rated(page=page, language=LANG)
if raw: return result
else: return list_tvshows(result)
@plugin.route('/tv/tmdb/top_rated_to_library/<page>/<confirm>')
def tmdb_tv_top_rated_to_library(page, confirm):
try:
page = int(page)
pages = [page]
except: pages = page_redux(page)
if confirm == "no" or dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}[CR]{2}".format(_("Library"), _("Add %s") % ("'{0} ({1}) {2} {3}'".format("Top rated", "TMDb", _("page"), ','.join([str(i) for i in pages]))),_("Are you sure?"))):
items = {}
tv = []
for i in pages: tv = tv + [m for m in tmdb_tv_top_rated(i, True)["results"] if m not in tv]
items["results"] = tv
tv_add_all_to_library(items)
@plugin.route('/tv/tvdb/search')
def tvdb_tv_search():
""" Activate tv search """
term = plugin.keyboard(heading=_("Enter search string"))
if term != None and term != "": return tvdb_tv_search_term(term, 1)
else: return
@plugin.route('/tv/tvdb/search_term/<term>/<page>')
def tvdb_tv_search_term(term, page):
""" Perform search of a specified <term>"""
if FORCE == True: plugin.set_view_mode(VIEW)
import_tvdb()
search_results = tvdb.search(term, language=LANG)
items = []
load_full_tvshow = lambda tvshow : tvdb.get_show(tvshow['id'], full=True)
for tvdb_show in execute(load_full_tvshow, search_results, workers=10):
info = build_tvshow_info(tvdb_show)
items.append(make_tvshow_item(info))
return items
@plugin.route('/tv/trakt/personal/collection')
def trakt_tv_collection():
from trakt import trakt
result = trakt.trakt_get_collection("shows")
items = list_trakt_tvshows(result)
if FORCE == True: return plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
@plugin.route('/tv/trakt/personal/watchlist')
def trakt_tv_watchlist():
from trakt import trakt
result = trakt.trakt_get_watchlist("shows")
items = list_trakt_tvshows(result)
if FORCE == True: return plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
@plugin.route('/tv/trakt/personal/next_episodes')
def trakt_tv_next_episodes(raw=False):
from trakt import trakt
list = []
result = trakt.trakt_get_next_episodes()
for episode in result:
trakt_id = episode["show"]["ids"]["trakt"]
episode_info = trakt.get_episode(trakt_id, episode["season"], episode["number"])
first_aired_string = episode_info["first_aired"]
episode["first_aired"] = first_aired_string
if int(first_aired_string[:4]) < 1970:
list.append(episode)
elif first_aired_string:
first_aired = time.mktime(time.strptime(first_aired_string[:19], "%Y-%m-%dT%H:%M:%S"))
if first_aired < time.time():
list.append(episode)
if raw: return list
else: items = list_trakt_episodes(list, with_time=True)
@plugin.route('/tv/trakt/personal/random_next_episode')
def trakt_tv_play_random_next_episode():
from meta.utils.playrandom import trakt_play_random
episodes = trakt_tv_next_episodes(raw=True)
for episode in episodes:
episode["type"] = "episode"
trakt_play_random(episodes)
@plugin.route('/tv/trakt/personal/calendar')
def trakt_tv_calendar(raw=False):
from trakt import trakt
result = trakt.trakt_get_calendar()
if raw: return result
else: items = list_trakt_episodes(result, with_time=True)
@plugin.route('/tv/trakt/personal/recommendations')
def trakt_tv_recommendations():
from trakt import trakt
genres_dict = trakt.trakt_get_genres("tv")
shows = trakt.get_recommendations("shows")
items = []
for show in shows:
items.append(make_tvshow_item(get_tvshow_metadata_trakt(show, genres_dict)))
if FORCE == True: return plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
def get_tvdb_id_from_name(name, lang):
import_tvdb()
search_results = tvdb.search(name, language=lang)
if not search_results:
dialogs.ok(_("%s not found") % _("TV show"), "{0} {1} in tvdb".format(_("no show information found for"), to_utf8(name)))
return
items = []
for show in search_results:
if "firstaired" in show:
show["year"] = int(show['firstaired'].split("-")[0].strip())
else:
show["year"] = 0
items.append(show)
if len(items) > 1:
selection = dialogs.select(_("Choose Show"), ["{0} ({1})".format(
to_utf8(s["seriesname"]), s["year"]) for s in items])
else:
selection = 0
if selection != -1:
return items[selection]["id"]
def get_tvdb_id_from_imdb_id(imdb_id):
import_tvdb()
tvdb_id = tvdb.search_by_imdb(imdb_id)
if not tvdb_id:
dialogs.ok(_("%s not found") % _("TV show"), "{0} {1} in tvdb".format(_("no show information found for"), imdb_id))
return
return tvdb_id
@plugin.route('/tv/trakt/personal/collection_to_library')
def trakt_tv_collection_to_library(preaprove=False, uncached=False):
from trakt import trakt
if preaprove or dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}".format(_("Add %s") % ("'{0} {1} {2}'".format("Trakt", _("TV"), _("Collection").lower())),_("Are you sure?"))):
if uncached: tv_add_all_to_library(trakt.trakt_get_collection_uncached("shows"), True)
else: tv_add_all_to_library(trakt.trakt_get_collection("shows"))
@plugin.route('/tv/trakt/personal/watchlist_to_library')
def trakt_tv_watchlist_to_library(preaprove=False, uncached=False):
from trakt import trakt
if preaprove or dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}".format(_("Add %s") % ("'{0} {1} {2}'".format("Trakt", _("TV"), _("Watchlist").lower())),_("Are you sure?"))):
if uncached: tv_add_all_to_library(trakt.trakt_get_watchlist_uncached("shows"), True)
else: tv_add_all_to_library(trakt.trakt_get_watchlist("shows"))
@plugin.route('/tv/trakt/personal/recommendations_to_library')
def trakt_tv_recommendations_to_library():
from trakt import trakt
if dialogs.yesno(_("Scan item to library"), "{0}[CR]{1}".format(_("Add %s") % ("'{0} {1} {2}'".format("Trakt", _("TV"), _("Recommendations").lower())),_("Are you sure?"))):
if uncached: tv_add_all_to_library(trakt.get_recommendations("shows"), True)
else: tv_add_all_to_library(trakt.get_recommendations("shows"))
@plugin.route('/tv/trakt/updated/<page>')
def tv_trakt_updated(page):
from trakt import trakt
results, pages = trakt.trakt_updated_shows(page)
return list_trakt_tvshows_trending_paginated(results, pages, page)
def list_trakt_tvshows_updated_paginated(results, pages, page):
from trakt import trakt
results = sorted(results,key=lambda item: item["show"]["title"].lower().replace("the ", ""))
genres_dict = trakt_get_genres()
shows = [get_tvshow_metadata_trakt(item["show"], genres_dict) for item in results]
items = [make_tvshow_item(show) for show in shows if show.get('tvdb_id')]
nextpage = int(page) + 1
if pages > page:
items.append({
'label': _("Next page").format() + " >> (%s/%s)" % (nextpage, pages),
'path': plugin.url_for("tv_trakt_updated", page=int(page) + 1),
'icon': get_icon_path("item_next"),
})
if FORCE == True: plugin.set_view_mode(VIEW); return items
else: return items
@plugin.route('/tv/play_by_name/<name>/<season>/<episode>/<lang>/<mode>', options = {"lang": "en", "mode": "context"})
def tv_play_by_name(name, season, episode, lang, mode):
""" Activate tv search """
tvdb_id = get_tvdb_id_from_name(name, lang)
if tvdb_id:
tv_play(tvdb_id, season, episode, mode)
if plugin.get_setting(SETTING_TV_PLAYED_BY_ADD, bool) == True:
tv_add_to_library(tvdb_id)
@plugin.route('/tv/play_by_source/<source>/<id>/<season>/<episode>/<mode>')
def tv_play_by_source(source, id, season, episode, mode):
if source == "tmdb": tmdb_play_episode(id, season, episode, mode)
elif source == "trakt": trakt_play_episode(id, season, episode, mode)
elif source == "tvmaze": tvmaze_play_episode(id, season, episode, mode)
else: play_episode(id, season, episode, mode)
@plugin.route('/tv/play_by_name_guide/<name>/<season>/<episode>/<lang>', options = {"lang": "en"})
def guide_tv_play_by_name(name, season, episode, lang):
""" Activate tv search """
tvdb_id = get_tvdb_id_from_name(name, lang)
if tvdb_id:
guide_tv_play(tvdb_id, season, episode, "default")
if plugin.get_setting(SETTING_TV_PLAYED_BY_ADD, bool) == True:
tv_add_to_library(tvdb_id)
@plugin.route('/tv/play_by_name_only/<name>/<lang>', options = {"lang": "en"})
def tv_play_by_name_only(name, lang):
tvdb_id = get_tvdb_id_from_name(name, lang)
if tvdb_id:
season = None
episode = None
show = tv_tvshow(tvdb_id)
while season is None or episode is None: # don't exit completely if pressing back from episode selector
selection = dialogs.select(_("Choose season"), [item["label"] for item in show])
if selection != -1:
season = show[selection]["info"]["season"]
season = int(season)
else:
return
items = []
episodes = tv_season(tvdb_id, season)
for item in episodes:
label = "S{0}E{1} - {2}".format(item["info"]["season"], item["info"]["episode"],
to_utf8(item["info"]["title"]))
if item["info"]["plot"] is not None:
label += " - {0}".format(to_utf8(item["info"]["plot"]))
items.append(label)
selection = dialogs.select(_("Choose episode"), items)
if selection != -1:
episode = episodes[selection]["info"]["episode"]
episode = int(episode)
tv_play(tvdb_id, season, episode, "context")
if plugin.get_setting(SETTING_TV_PLAYED_BY_ADD, bool) == True:
tv_add_to_library(tvdb_id)
@plugin.route('/tv/play_by_name_only_guide/<name>/<lang>', options = {"lang": "en"})
def guide_tv_play_by_name_only(name, lang):
tvdb_id = get_tvdb_id_from_name(name, lang)
if tvdb_id:
season = None
episode = None
show = tv_tvshow(tvdb_id)
while season is None or episode is None:
selection = dialogs.select(_("Choose season"), [item["label"] for item in show])
if selection != -1:
season = show[selection]["info"]["season"]
season = int(season)
else:
return
items = []
episodes = tv_season(tvdb_id, season)
for item in episodes:
label = "S{0}E{1} - {2}".format(item["info"]["season"], item["info"]["episode"],
to_utf8(item["info"]["title"]))
if item["info"]["plot"] is not None:
label += " - {0}".format(to_utf8(item["info"]["plot"]))
items.append(label)
selection = dialogs.select(_("Choose episode"), items)
if selection != -1:
episode = episodes[selection]["info"]["episode"]
episode = int(episode)
guide_tv_play(tvdb_id, season, episode, "default")
if plugin.get_setting(SETTING_TV_PLAYED_BY_ADD, bool) == True:
tv_add_to_library(tvdb_id)
@plugin.route('/tv/play_latest/<id>/<mode>', options = {"mode": "default"})
def tv_play_latest_episode(id, mode):
from trakt import trakt
episode = trakt.get_latest_episode(id)
show = trakt.get_show(id)
if show['ids']['tvdb']: tv_play(show['ids']['tvdb'], episode['season'], episode['number'], mode)
elif show['ids']['tmdb']: tv_play_by_source("tmdb", show['ids']['tmdb'], episode['season'], episode['season'], mode)
elif show['ids']['trakt']: tv_play_by_source("trakt", show['ids']['tmdb'], episode['season'], episode['season'], mode)
else: dialogs.notify(msg='No tvdb/tmdb/trakt-id', title='Available', delay=3000, image=get_icon_path("tv"))
@plugin.route('/tv/guide_play_latest/<id>')
def guide_tv_play_latest_episode(id):
from trakt import trakt
episode = trakt.get_latest_episode(id)
show = trakt.get_show(id)
if show['ids']['tvdb']:
dialogs.notify(msg='%s' % show['title'], title='S%sE%s - %s' % (episode['season'], episode['number'], episode['title']), delay=5000, image=get_icon_path("tv"))
tv_play(show['ids']['tvdb'], episode['season'], episode['number'], "default")
else: dialogs.notify(msg='No tvdb id', title='Available', delay=3000, image=get_icon_path("tv"))
@plugin.route('/tv/tvdb/<id>')
def tv_tvshow(id):
""" All seasons of a TV show """
plugin.set_content('seasons')
if FORCE == True: return plugin.finish(items=list_seasons_tvdb(id), sort_methods=SORT, view_mode=VIEW_SEASONS)
else: return plugin.finish(items=list_seasons_tvdb(id), sort_methods=SORT)
@plugin.route('/tv/tvdb/<id>/<season_num>')
def tv_season(id, season_num):
""" All episodes of a TV season """
plugin.set_content('episodes')
if FORCE == True: return plugin.finish(items=list_episodes_tvdb(id, season_num), sort_methods=SORT, view_mode=VIEW_EPISODES)
else: return plugin.finish(items=list_episodes_tvdb(id, season_num), sort_methods=SORT)
def set_library_player(path, players):
players.insert(0, ADDON_SELECTOR)
players.insert(0, ADDON_DEFAULT)
# let the user select one player
selection = dialogs.select(_("Select default player"), [p.title for p in players])
if selection == -1:
return
# get selected player
player = players[selection]
# Create play with file
player_filepath = os.path.join(path, 'player.info')
player_file = xbmcvfs.File(player_filepath, 'w')
content = "{0}".format(player.id)
player_file.write(content)
player_file.close()
@plugin.route('/tv/set_library_player/<path>')
def set_tv_library_player(path):
# get active players
players = active_players("tvshows")
set_library_player(path, players)
@plugin.route('/tv/set_live_library_player/<path>')
def set_live_tv_library_player(path):
# get active players
players = active_players("live")
set_library_player(path, players)
def set_live_library_player(path):
# get active players
players = active_players("live")
players.insert(0, ADDON_SELECTOR)
players.insert(0, ADDON_DEFAULT)
# let the user select one player
selection = dialogs.select(_("Select default player"), [p.title for p in players])
if selection == -1:
return
# get selected player
player = players[selection]
# Create play with file
player_filepath = os.path.join(path, 'player.info')
player_file = xbmcvfs.File(player_filepath, 'w')
content = "{0}".format(player.id)
player_file.write(content)
player_file.close()
def tv_add_all_to_library(items, noscan = False):
library_folder = setup_library(plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode))
ids = ""
import_tvdb()
if "results" in items: ids = '\n'.join([str(show["id"]) for show, item in execute(tmdb_to_tvdb, items["results"], workers=10)])
else: ids = '\n'.join([str(i["show"]["ids"]["tvdb"]) if i["show"]["ids"]["tvdb"] != None and i["show"]["ids"]["tvdb"] != "" else i["show"]["ids"]["imdb"] for i in items])
shows_batch_add_file = plugin.get_setting(SETTING_TV_BATCH_ADD_FILE_PATH, unicode)
if xbmcvfs.exists(shows_batch_add_file):
batch_add_file = xbmcvfs.File(shows_batch_add_file)
pre_ids = batch_add_file.read()
xids = pre_ids.split("\n")
for id in xids:
if id != "" and id != None and id not in ids: ids = ids + str(id) + '\n'
batch_add_file.close()
xbmcvfs.delete(shows_batch_add_file)
batch_add_file = xbmcvfs.File(shows_batch_add_file, 'w')
batch_add_file.write(str(ids))
batch_add_file.close()
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.metalliq/tv/batch_add_to_library)")
@plugin.route('/tv/add_to_library_parsed/<id>/<player>')
def tv_add_to_library_parsed(id, player):
import_tvdb()
if id.startswith("tt"):
try: id = tvdb.search_by_imdb(id)
except: return dialogs.ok(_("%s not found") % _("TV show"), "{0} {1} in TheTVDb".format(_("no show information found for"), id))
library_folder = setup_library(plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode))
show = tvdb[int(id)]
imdb = show['imdb_id']
library_folder = setup_library(plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode))
# add to library
if add_tvshow_to_library(library_folder, show, player): set_property("clean_library", 1)
scan_library(type="video", path=plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode))
@plugin.route('/tv/add_to_library/<id>')
def tv_add_to_library(id):
import_tvdb()
library_folder = setup_library(plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode))
show = tvdb[int(id)]
imdb = show['imdb_id']
# get active players
players = active_players("tvshows", filters = {'network': show.get('network')})
# get selected player
if plugin.get_setting(SETTING_TV_DEFAULT_AUTO_ADD, bool) == True:
player = plugin.get_setting(SETTING_TV_DEFAULT_PLAYER_FROM_LIBRARY, unicode)
else:
players.insert(0, ADDON_SELECTOR)
players.insert(0, ADDON_DEFAULT)
selection = dialogs.select(_("Play with..."), [p.title for p in players])
if selection == -1:
return
player = players[selection]
# setup library folder
library_folder = setup_library(plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode))
# add to library
if plugin.get_setting(SETTING_TV_DEFAULT_AUTO_ADD, bool) == True:
if add_tvshow_to_library(library_folder, show, player):
set_property("clean_library", 1)
else:
if add_tvshow_to_library(library_folder, show, player.id):
set_property("clean_library", 1)
# start scan
scan_library(type="video", path=plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode))
@plugin.route('/tv/batch_add_to_library')
def tv_batch_add_to_library():
""" Batch add tv shows to library """
tv_batch_file = plugin.get_setting(SETTING_TV_BATCH_ADD_FILE_PATH, unicode)
if xbmcvfs.exists(tv_batch_file):
try:
f = open(xbmc.translatePath(tv_batch_file), 'r')
r = f.read()
f.close()
ids = r.split('\n')
except: return dialogs.notify(msg='TVShows Batch Add File', title=_("%s not found").replace("%s ",""), delay=3000, image=get_icon_path("tv"))
library_folder = setup_library(plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode))
import_tvdb()
import xbmcgui
ids_index = 0
ids_size = len(ids)
dialogs.notify(msg='Adding ' + str(ids_size) + " items", title="To Kodi library", delay=500, image=get_icon_path("tv"))
for id in ids:
items_left = ids_size - ids_index
if str(items_left).endswith("0"): dialogs.notify(msg=str(items_left), title="Items left", delay=8000, image=get_icon_path("tv"))
if id == None or id == "None": pass
elif "," in id:
csvs = id.split(',')
for csv in csvs:
if csv == None or csv == "None": pass
elif str(csv).startswith("tt") and csv != "": tvdb_id = get_tvdb_id_from_imdb_id(csv)
else: tvdb_id = csv
show = tvdb[int(tvdb_id)]
batch_add_tvshows_to_library(library_folder, show)
else:
if id == None or id == "None" or id == "": pass
elif str(id).startswith("tt"): tvdb_id = get_tvdb_id_from_imdb_id(id)
else: tvdb_id = id
try:
show = tvdb[int(tvdb_id)]
batch_add_tvshows_to_library(library_folder, show)
except:
dialogs.notify(msg='failed to add', title='%s' % id, delay=3000, image=get_icon_path("tv"))
xbmc.log("MetalliQ failed to add: {0}".format(id),xbmc.LOGNOTICE)
ids_index += 1
if xbmcvfs.exists(tv_batch_file): os.remove(xbmc.translatePath(tv_batch_file))
dialogs.notify(msg='Starting library scan afterwards', title='Adding tvshow strm-files', delay=5000, image=get_icon_path("tv"))
update_library()
return True
@plugin.route('/tv/play/<id>/<season>/<episode>/<mode>')
def tv_play(id, season, episode, mode):
play_episode(id, season, episode, mode)
@plugin.route('/tv/play_guide/<id>/<season>/<episode>/<mode>')
def guide_tv_play(id, season, episode, mode):
play_episode_from_guide(id, season, episode, mode)
def list_tvshows(response):
if FORCE == True: plugin.set_view_mode(VIEW)
""" TV shows listing """
import_tvdb()
# Attach TVDB data to TMDB results
items = []
results = response['results']
for tvdb_show, tmdb_show in execute(tmdb_to_tvdb, results, workers=10):
if tvdb_show is not None:
info = build_tvshow_info(tvdb_show, tmdb_show)
items.append(make_tvshow_item(info))
if xbmc.abortRequested:
return
# Paging
if 'page' in response:
page = response['page']
args = caller_args()
if page < response['total_pages']:
args['page'] = str(page + 1)
items.append({
'label': _("Next page").format() + " >> (%s/%s)" % (page + 1, response['total_pages']),
'icon': get_icon_path("item_next"),
'path': plugin.url_for(caller_name(), **args)
})
return items
def trakt_get_genres():
from trakt import trakt
genres_dict = dict([(x['slug'], x['name']) for x in trakt.trakt_get_genres("movies")])
genres_dict.update(dict([(x['slug'], x['name']) for x in trakt.trakt_get_genres("shows")]))
return genres_dict
def list_trakt_tvshows(results):
from trakt import trakt
results = sorted(results,key=lambda item: item["show"]["title"].lower().replace("the ", ""))
genres_dict = trakt_get_genres()
shows = [get_tvshow_metadata_trakt(item["show"], genres_dict) for item in results]
items = [make_tvshow_item(show) for show in shows if show.get('tvdb_id')]
if FORCE == True: return plugin.finish(items=items, sort_methods=SORT, view_mode=VIEW)
else: return plugin.finish(items=items, sort_methods=SORT)
def list_trakt_episodes(result, with_time=False):
genres_dict = trakt_get_genres()
items = []
for item in result:
if 'episode' in item: episode = item['episode']
else: episode = item
if "show" in item: show = item['show']
try: id = episode["show"]["ids"]["tvdb"]
except: id = episode["ids"].get("tvdb")
if not id: continue
try: season_num = episode["season"]
except: season_num = episode.get("season")
try: episode_num = episode["number"]
except: episode_num = episode.get("number")
if show: tvshow_title = (show.get("title")).encode('utf-8')
else:
try: tvshow_title = (episode["show"]["title"]).encode('utf-8')
except: tvshow_title = str(episode.get("title")).encode('utf-8')
if episode["title"] != None:
try: episode_title = (episode["title"]).encode('utf-8')
except: episode_title = (episode.get("title")).encode('utf-8')
else: episode_title = "TBA"
info = get_tvshow_metadata_trakt(item["show"], genres_dict)
info['season'] = episode["season"]
info['episode'] = episode["number"]
info['title'] = episode["title"]
info['aired'] = episode.get('first_aired','')
info['premiered'] = episode.get('first_aired','')
info['rating'] = episode.get('rating', '')
info['plot'] = episode.get('overview','')
info['tagline'] = episode.get('tagline')
info['votes'] = episode.get('votes','')
#info['poster'] = episode['images']['poster']['thumb']
label = "{0} - S{1:02d}E{2:02d} - {3}".format(tvshow_title, season_num, episode_num, episode_title)
if with_time and info['premiered']:
airtime = time.strptime(item["first_aired"], "%Y-%m-%dt%H:%M:%S.000Z")
airtime = time.strftime("%Y-%m-%d %H:%M", airtime)
label = "{0} - S{1:02d}E{2:02d} - {3}".format(tvshow_title, season_num, episode_num, episode_title)
context_menu = [
(
"{0} {1}...".format(_("Select"), _("Stream").lower()),
"PlayMedia({0})".format(plugin.url_for("tv_play", id=id, season=season_num, episode=episode_num, mode='select'))
),
(
"%s %s" % (_("Episode"), _("Information").lower()),
'Action(Info)'
),
(
_("Add to playlist"),
"RunPlugin({0})".format(plugin.url_for("lists_add_episode_to_list", src='tvdb', id=id,
season=season_num, episode=episode_num))
),
]
items.append({'label': label,
'path': plugin.url_for("tv_play", id=id, season=season_num, episode=episode_num, mode='default'),
'context_menu': context_menu,
'info': info,
'is_playable': True,
'info_type': 'video',
'stream_info': {'video': {}},
'thumbnail': info['poster'],
'poster': info['poster'],
'icon': "DefaultVideo.png",
'properties' : {'fanart_image' : info['fanart']},
})
plugin.set_content('episodes')
if FORCE == True: return plugin.finish(items=items, sort_methods=SORTRAKT, view_mode=VIEW, cache_to_disc=False, update_listing=True)
else: return plugin.finish(items=items, sort_methods=SORTRAKT, cache_to_disc=False, update_listing=True)
def build_tvshow_info(tvdb_show, tmdb_show=None):
tvdb_info = get_tvshow_metadata_tvdb(tvdb_show)
tmdb_info = get_tvshow_metadata_tmdb(tmdb_show)
info = {}
info.update(tvdb_info)
info.update(dict((k,v) for k,v in tmdb_info.iteritems() if v))
# Prefer translated info
if LANG != "en":
for key in ('name', 'title', 'plot'):
if is_ascii(info.get(key,'')) and not is_ascii(tvdb_info.get(key,'')):
info[key] = tvdb_info[key]
return info
def make_tvshow_item(info):
try: tvdb_id = info['tvdb']
except: tvdb_id = ""
if tvdb_id == "":
try: tvdb_id = info['tvdb_id']
except: tvdb_id = ""
try: tmdb_id = info['tmdb']
except: tmdb_id = ""
if tmdb_id == "":
try: tmdb_id = info['id']
except: tmdb_id = ""
try: imdb_id = info['imdb_id']
except: imdb_id = ""
if imdb_id == "":
try: imdb_id = info['imdb']
except: imdb_id = ""
if not info['poster']: info['poster'] = None
if not info['fanart']: info['fanart'] = None
if info['poster'] == None or info['poster'] == "":
if tmdb_id != None and tmdb_id != "":
import_tmdb()
show = tmdb.TV(tmdb_id).info()
if show['poster_path'] != None and show['poster_path'] != "": info['poster'] = u'%s%s' % ("http://image.tmdb.org/t/p/w500", show['poster_path'])
if info['fanart'] == None or info['fanart'] == "":
if show['backdrop_path'] != None and show['backdrop_path'] != "": info['fanart'] = u'%s%s' % ("http://image.tmdb.org/t/p/original", show['backdrop_path'])
if info['poster'] == None or info['poster'] == "":
if tvdb_id != None and tvdb_id != "":
import_tvdb()
show = tvdb.get_show(int(tvdb_id), full=False)
if show != None:
if show['seriesname'] != None and show['seriesname'] != "":
if show.get('poster', '') != None and show.get('poster', '') != "": info['poster'] = show.get('poster', '')
if info['fanart'] == None or info['fanart'] == "":
if show.get('fanart', '') != None and show.get('fanart', '') != "": info['fanart'] = show.get('fanart', '')
if info['poster'] == None or info['poster'] == "":
if imdb_id != None and imdb_id != "":
import_tmdb()
preshow = tmdb.Find(imdb_id).info(external_source="imdb_id")
proshow = preshow['tv_results']
if proshow != []: show = proshow[0]
else: show = []
if show != []:
if show['poster_path'] != None and show['poster_path'] != "": info['poster'] = u'%s%s' % ("http://image.tmdb.org/t/p/w500", show['poster_path'])
if info['fanart'] == None or info['fanart'] == "":
if show['backdrop_path'] != None and show['backdrop_path'] != "": info['fanart'] = u'%s%s' % ("http://image.tmdb.org/t/p/original", show['backdrop_path'])
if info['poster'] == None or info['poster'] == "": info['poster'] = "https://raw.githubusercontent.com/OpenELEQ/Style/master/MetalliQ/default/unavailable.png"
if info['fanart'] == None or info['fanart'] == "": info['fanart'] = get_background_path()
if xbmc.getCondVisibility("system.hasaddon(script.qlickplay)"): context_menu = [(_("Scan item to library"),"RunPlugin({0})".format(plugin.url_for("tv_add_to_library", id=tvdb_id))), ("%s %s" % (_("TV"), _("Trailer").lower()),"RunScript(script.qlickplay,info=playtvtrailer,tvdb_id={0})".format(tvdb_id)), ("[COLOR ff0084ff]Q[/COLOR]lick[COLOR ff0084ff]P[/COLOR]lay", "RunScript(script.qlickplay,info=tvinfo,tvdb_id={0})".format(tvdb_id)), ("%s %s (%s)" % ("Recommended", _("TV shows"), "TMDb"),"ActivateWindow(10025,plugin://script.qlickplay/?info=similartvshows&tvdb_id={0})".format(tvdb_id))]
elif xbmc.getCondVisibility("system.hasaddon(script.extendedinfo)"): context_menu = [(_("Scan item to library"),"RunPlugin({0})".format(plugin.url_for("tv_add_to_library", id=tvdb_id))), ("%s %s" % (_("TV"), _("Trailer").lower()),"RunScript(script.extendedinfo,info=playtvtrailer,tvdb_id={0})".format(tvdb_id)), (_("Extended TV show info"), "RunScript(script.extendedinfo,info=extendedtvinfo,tvdb_id={0})".format(tvdb_id)), ("%s %s (%s)" % ("Recommended", _("TV shows"), "TMDb"),"ActivateWindow(10025,plugin://script.extendedinfo/?info=similartvshows&tvdb_id={0})".format(tvdb_id))]
else: context_menu = [(_("Scan item to library"),"RunPlugin({0})".format(plugin.url_for("tv_add_to_library", id=tvdb_id)))]
context_menu.append((_("Add to playlist"), "RunPlugin({0})".format(plugin.url_for("lists_add_show_to_list", src='tvdb', id=tvdb_id))))
context_menu.append((_("TV show information"),'Action(Info)'))
return {'label': to_utf8(info['title']),
'path': plugin.url_for("tv_tvshow", id=tvdb_id),
'context_menu': context_menu,
'thumbnail': info['poster'],
'icon': "DefaultVideo.png",
'poster': info['poster'],
'properties' : {'fanart_image' : info['fanart']},
'info_type': 'video',
'stream_info': {'video': {}},
'info': info}
@plugin.cached(TTL=CACHE_TTL)
def list_seasons_tvdb(id):
import_tvdb()
id = int(id)
show = tvdb[id]
show_info = get_tvshow_metadata_tvdb(show, banners=False)
title = show_info['name']
items = []
for (season_num, season) in show.items():
if season_num == 0 and not plugin.get_setting(SETTING_INCLUDE_SPECIALS, bool): continue
elif not season.has_aired(flexible=plugin.get_setting(SETTING_AIRED_UNKNOWN, bool) ): continue
season_info = get_season_metadata_tvdb(show_info, season)
if xbmc.getCondVisibility("system.hasaddon(script.qlickplay)"): context_menu = [("[COLOR ff0084ff]Q[/COLOR]lick[COLOR ff0084ff]P[/COLOR]lay", "RunScript(script.qlickplay,info=seasoninfo,tvshow={0},season={1})".format(title, season_num)), ("%s %s" % (_("TV"), _("Trailer").lower()),"RunScript(script.qlickplay,info=playtvtrailer,tvdb_id={0})".format(id)), (_("Recommended tv shows") + " (TMDb)","ActivateWindow(10025,plugin://script.qlickplay/?info=similartvshows&tvdb_id={0})".format(id))]
elif xbmc.getCondVisibility("system.hasaddon(script.extendedinfo)"): context_menu = [(_("Extended season info"), "RunScript(script.extendedinfo,info=seasoninfo,tvshow={0},season={1})".format(title, season_num)), ("%s %s" % (_("TV"), _("Trailer").lower()),"RunScript(script.extendedinfo,info=playtvtrailer,tvdb_id={0})".format(id)), (_("Recommended tv shows") + " (TMDb)","ActivateWindow(10025,plugin://script.extendedinfo/?info=similartvshows&tvdb_id={0})".format(id))]
else: context_menu = []
items.append({'label': u"%s %d" % (_("Season"), season_num),
'path': plugin.url_for("tv_season", id=id, season_num=season_num),
'context_menu': context_menu,
'info': season_info,
'thumbnail': season_info['poster'],
'icon': "DefaultVideo.png",
'poster': season_info['poster'],
'properties' : {'fanart_image' : season_info['fanart']},
})
if FORCE == True: plugin.set_view_mode(VIEW); return items
else: return items
@plugin.cached(TTL=CACHE_TTL)
def list_episodes_tvdb(id, season_num):
import_tvdb()
id = int(id)
season_num = int(season_num)
show = tvdb[id]
show_info = get_tvshow_metadata_tvdb(show, banners=False)
title = show_info['name']
season = show[season_num]
season_info = get_season_metadata_tvdb(show_info, season, banners=True)
items = []
for (episode_num, episode) in season.items():
if not season_num == 0 and not episode.has_aired(flexible=plugin.get_setting(SETTING_AIRED_UNKNOWN, bool)): break
episode_info = get_episode_metadata_tvdb(season_info, episode)
if xbmc.getCondVisibility("system.hasaddon(script.qlickplay)"): context_menu = [("[COLOR ff0084ff]Q[/COLOR]lick[COLOR ff0084ff]P[/COLOR]lay", "RunScript(script.qlickplay,info=episodeinfo,tvshow={0},season={1},episode={2})".format(title, season_num, episode_num)), ("%s %s" % (_("TV"), _("Trailer").lower()),"RunScript(script.qlickplay,info=playtvtrailer,tvdb_id={0})".format(id)), (_("Recommended tv shows") + " (TMDb)","ActivateWindow(10025,plugin://script.qlickplay/?info=similartvshows&tvdb_id={0})".format(id))]
elif xbmc.getCondVisibility("system.hasaddon(script.extendedinfo)"): context_menu = [(_("Extended episode info"), "RunScript(script.extendedinfo,info=episodeinfo,tvshow={0},season={1},episode={2})".format(title, season_num, episode_num)), ("%s %s" % (_("TV"), _("Trailer").lower()),"RunScript(script.extendedinfo,info=playtvtrailer,tvdb_id={0})".format(id)), (_("Recommended tv shows") + " (TMDb)","ActivateWindow(10025,plugin://script.extendedinfo/?info=similartvshows&tvdb_id={0})".format(id))]
else: context_menu = []
context_menu.append(("{0} {1}...".format(_("Select"), _("Stream").lower()),"PlayMedia({0})".format(plugin.url_for("tv_play", id=id, season=season_num, episode=episode_num, mode='select'))))
context_menu.append((_("Add to playlist"), "RunPlugin({0})".format(plugin.url_for("lists_add_episode_to_list", src='tvdb', id=id, season=season_num, episode = episode_num))))
context_menu.append(("%s %s" % (_("Episode"), _("Information").lower()),'Action(Info)'))
items.append({'label': episode_info.get('title'),
'path': plugin.url_for("tv_play", id=id, season=season_num, episode=episode_num, mode='default'),
'context_menu': context_menu,
'info': episode_info,
'is_playable': True,
'info_type': 'video',
'stream_info': {'video': {}},
'thumbnail': episode_info['poster'],
'poster': season_info['poster'],
'icon': "DefaultVideo.png",
'properties' : {'fanart_image' : episode_info['fanart']},
})
return items
def tmdb_to_tvdb(tmdb_show):
tvdb_show = None
# Search by name and year
name = tmdb_show['original_name']
try: year = int(parse_year(tmdb_show['first_air_date']))
except: year = ""
results = [x['id'] for x in tvdb.search(name, year)]
# Get by id if not a single result
if len(results) != 1:
id = tmdb.TV(tmdb_show['id']).external_ids().get('tvdb_id', None)
if id:
results = [id]
# Use first result if still have many
if results:
tvdb_show = tvdb[results[0]]
return tvdb_show, tmdb_show
| {
"content_hash": "7151a059456447e82d8a884a76209d5e",
"timestamp": "",
"source": "github",
"line_count": 1425,
"max_line_length": 597,
"avg_line_length": 49.719298245614034,
"alnum_prop": 0.5774453069865914,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "d85626964c4991f63f841afe6a28564559f8c4e5",
"size": "70850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin.video.metalliq/resources/lib/meta/navigation/tvshows.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
"""Common code for converting proto to other formats, such as JSON."""
import base64
import collections
import datetime
import json
import logging
from protorpc import message_types
from protorpc import messages
from protorpc import protojson
import six
from googlecloudapis.apitools.base.py import exceptions
__all__ = [
'CopyProtoMessage',
'JsonToMessage',
'MessageToJson',
'DictToMessage',
'MessageToDict',
'PyValueToMessage',
'MessageToPyValue',
'MessageToRepr',
'GetCustomJsonFieldMapping',
'AddCustomJsonFieldMapping',
'GetCustomJsonEnumMapping',
'AddCustomJsonEnumMapping',
]
_Codec = collections.namedtuple('_Codec', ['encoder', 'decoder'])
CodecResult = collections.namedtuple('CodecResult', ['value', 'complete'])
# TODO(user): Make these non-global.
_UNRECOGNIZED_FIELD_MAPPINGS = {}
_CUSTOM_MESSAGE_CODECS = {}
_CUSTOM_FIELD_CODECS = {}
_FIELD_TYPE_CODECS = {}
def MapUnrecognizedFields(field_name):
"""Register field_name as a container for unrecognized fields in message."""
def Register(cls):
_UNRECOGNIZED_FIELD_MAPPINGS[cls] = field_name
return cls
return Register
def RegisterCustomMessageCodec(encoder, decoder):
"""Register a custom encoder/decoder for this message class."""
def Register(cls):
_CUSTOM_MESSAGE_CODECS[cls] = _Codec(encoder=encoder, decoder=decoder)
return cls
return Register
def RegisterCustomFieldCodec(encoder, decoder):
"""Register a custom encoder/decoder for this field."""
def Register(field):
_CUSTOM_FIELD_CODECS[field] = _Codec(encoder=encoder, decoder=decoder)
return field
return Register
def RegisterFieldTypeCodec(encoder, decoder):
"""Register a custom encoder/decoder for all fields of this type."""
def Register(field_type):
_FIELD_TYPE_CODECS[field_type] = _Codec(encoder=encoder, decoder=decoder)
return field_type
return Register
# TODO(user): Delete this function with the switch to proto2.
def CopyProtoMessage(message):
codec = protojson.ProtoJson()
return codec.decode_message(type(message), codec.encode_message(message))
def MessageToJson(message, include_fields=None):
"""Convert the given message to JSON."""
result = _ProtoJsonApiTools.Get().encode_message(message)
return _IncludeFields(result, message, include_fields)
def JsonToMessage(message_type, message):
"""Convert the given JSON to a message of type message_type."""
return _ProtoJsonApiTools.Get().decode_message(message_type, message)
# TODO(user): Do this directly, instead of via JSON.
def DictToMessage(d, message_type):
"""Convert the given dictionary to a message of type message_type."""
return JsonToMessage(message_type, json.dumps(d))
def MessageToDict(message):
"""Convert the given message to a dictionary."""
return json.loads(MessageToJson(message))
def PyValueToMessage(message_type, value):
"""Convert the given python value to a message of type message_type."""
return JsonToMessage(message_type, json.dumps(value))
def MessageToPyValue(message):
"""Convert the given message to a python value."""
return json.loads(MessageToJson(message))
def MessageToRepr(msg, multiline=False, **kwargs):
"""Return a repr-style string for a protorpc message.
protorpc.Message.__repr__ does not return anything that could be considered
python code. Adding this function lets us print a protorpc message in such
a way that it could be pasted into code later, and used to compare against
other things.
Args:
msg: protorpc.Message, the message to be repr'd.
multiline: bool, True if the returned string should have each field
assignment on its own line.
**kwargs: {str:str}, Additional flags for how to format the string.
Known **kwargs:
shortstrings: bool, True if all string values should be truncated at
100 characters, since when mocking the contents typically don't matter
except for IDs, and IDs are usually less than 100 characters.
no_modules: bool, True if the long module name should not be printed with
each type.
Returns:
str, A string of valid python (assuming the right imports have been made)
that recreates the message passed into this function.
"""
# TODO(user): craigcitro suggests a pretty-printer from apitools/gen.
indent = kwargs.get('indent', 0)
def IndentKwargs(kwargs):
kwargs = dict(kwargs)
kwargs['indent'] = kwargs.get('indent', 0) + 4
return kwargs
if isinstance(msg, list):
s = '['
for item in msg:
if multiline:
s += '\n' + ' '*(indent + 4)
s += MessageToRepr(
item, multiline=multiline, **IndentKwargs(kwargs)) + ','
if multiline:
s += '\n' + ' '*indent
s += ']'
return s
if isinstance(msg, messages.Message):
s = type(msg).__name__ + '('
if not kwargs.get('no_modules'):
s = msg.__module__ + '.' + s
names = sorted([field.name for field in msg.all_fields()])
for name in names:
field = msg.field_by_name(name)
if multiline:
s += '\n' + ' '*(indent + 4)
value = getattr(msg, field.name)
s += field.name + '=' + MessageToRepr(
value, multiline=multiline, **IndentKwargs(kwargs)) + ','
if multiline:
s += '\n'+' '*indent
s += ')'
return s
if isinstance(msg, six.string_types):
if kwargs.get('shortstrings') and len(msg) > 100:
msg = msg[:100]
if isinstance(msg, datetime.datetime):
class SpecialTZInfo(datetime.tzinfo):
def __init__(self, offset):
super(SpecialTZInfo, self).__init__()
self.offset = offset
def __repr__(self):
s = 'TimeZoneOffset(' + repr(self.offset) + ')'
if not kwargs.get('no_modules'):
s = 'protorpc.util.' + s
return s
msg = datetime.datetime(
msg.year, msg.month, msg.day, msg.hour, msg.minute, msg.second,
msg.microsecond, SpecialTZInfo(msg.tzinfo.utcoffset(0)))
return repr(msg)
def _GetField(message, field_path):
for field in field_path:
if field not in dir(message):
raise KeyError('no field "%s"' % field)
message = getattr(message, field)
return message
def _SetField(dictblob, field_path, value):
for field in field_path[:-1]:
dictblob[field] = {}
dictblob = dictblob[field]
dictblob[field_path[-1]] = value
def _IncludeFields(encoded_message, message, include_fields):
"""Add the requested fields to the encoded message."""
if include_fields is None:
return encoded_message
result = json.loads(encoded_message)
for field_name in include_fields:
try:
value = _GetField(message, field_name.split('.'))
nullvalue = None
if isinstance(value, list):
nullvalue = []
except KeyError:
raise exceptions.InvalidDataError(
'No field named %s in message of type %s' % (
field_name, type(message)))
_SetField(result, field_name.split('.'), nullvalue)
return json.dumps(result)
def _GetFieldCodecs(field, attr):
result = [
getattr(_CUSTOM_FIELD_CODECS.get(field), attr, None),
getattr(_FIELD_TYPE_CODECS.get(type(field)), attr, None),
]
return [x for x in result if x is not None]
class _ProtoJsonApiTools(protojson.ProtoJson):
"""JSON encoder used by apitools clients."""
_INSTANCE = None
@classmethod
def Get(cls):
if cls._INSTANCE is None:
cls._INSTANCE = cls()
return cls._INSTANCE
def decode_message(self, message_type, encoded_message):
if message_type in _CUSTOM_MESSAGE_CODECS:
return _CUSTOM_MESSAGE_CODECS[message_type].decoder(encoded_message)
# We turn off the default logging in protorpc. We may want to
# remove this later.
old_level = logging.getLogger().level
logging.getLogger().setLevel(logging.ERROR)
result = _DecodeCustomFieldNames(message_type, encoded_message)
result = super(_ProtoJsonApiTools, self).decode_message(
message_type, result)
logging.getLogger().setLevel(old_level)
result = _ProcessUnknownEnums(result, encoded_message)
result = _ProcessUnknownMessages(result, encoded_message)
return _DecodeUnknownFields(result, encoded_message)
def decode_field(self, field, value):
"""Decode the given JSON value.
Args:
field: a messages.Field for the field we're decoding.
value: a python value we'd like to decode.
Returns:
A value suitable for assignment to field.
"""
for decoder in _GetFieldCodecs(field, 'decoder'):
result = decoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.MessageField):
field_value = self.decode_message(field.message_type, json.dumps(value))
elif isinstance(field, messages.EnumField):
value = GetCustomJsonEnumMapping(field.type, json_name=value) or value
try:
field_value = super(_ProtoJsonApiTools, self).decode_field(field, value)
except messages.DecodeError:
if not isinstance(value, six.string_types):
raise
field_value = None
else:
field_value = super(_ProtoJsonApiTools, self).decode_field(field, value)
return field_value
def encode_message(self, message):
if isinstance(message, messages.FieldList):
return '[%s]' % (', '.join(self.encode_message(x) for x in message))
if type(message) in _CUSTOM_MESSAGE_CODECS:
return _CUSTOM_MESSAGE_CODECS[type(message)].encoder(message)
message = _EncodeUnknownFields(message)
result = super(_ProtoJsonApiTools, self).encode_message(message)
return _EncodeCustomFieldNames(message, result)
def encode_field(self, field, value):
"""Encode the given value as JSON.
Args:
field: a messages.Field for the field we're encoding.
value: a value for field.
Returns:
A python value suitable for json.dumps.
"""
for encoder in _GetFieldCodecs(field, 'encoder'):
result = encoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.EnumField):
if field.repeated:
remapped_value = [GetCustomJsonEnumMapping(
field.type, python_name=e.name) or e.name for e in value]
else:
remapped_value = GetCustomJsonEnumMapping(
field.type, python_name=value.name)
if remapped_value:
return remapped_value
if (isinstance(field, messages.MessageField) and
not isinstance(field, message_types.DateTimeField)):
value = json.loads(self.encode_message(value))
return super(_ProtoJsonApiTools, self).encode_field(field, value)
# TODO(user): Fold this and _IncludeFields in as codecs.
def _DecodeUnknownFields(message, encoded_message):
"""Rewrite unknown fields in message into message.destination."""
destination = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if destination is None:
return message
pair_field = message.field_by_name(destination)
if not isinstance(pair_field, messages.MessageField):
raise exceptions.InvalidDataFromServerError(
'Unrecognized fields must be mapped to a compound '
'message type.')
pair_type = pair_field.message_type
# TODO(user): Add more error checking around the pair
# type being exactly what we suspect (field names, etc).
if isinstance(pair_type.value, messages.MessageField):
new_values = _DecodeUnknownMessages(
message, json.loads(encoded_message), pair_type)
else:
new_values = _DecodeUnrecognizedFields(message, pair_type)
setattr(message, destination, new_values)
# We could probably get away with not setting this, but
# why not clear it?
setattr(message, '_Message__unrecognized_fields', {})
return message
def _DecodeUnknownMessages(message, encoded_message, pair_type):
"""Process unknown fields in encoded_message of a message type."""
field_type = pair_type.value.type
new_values = []
all_field_names = [x.name for x in message.all_fields()]
for name, value_dict in six.iteritems(encoded_message):
if name in all_field_names:
continue
value = PyValueToMessage(field_type, value_dict)
new_pair = pair_type(key=name, value=value)
new_values.append(new_pair)
return new_values
def _DecodeUnrecognizedFields(message, pair_type):
"""Process unrecognized fields in message."""
new_values = []
for unknown_field in message.all_unrecognized_fields():
# TODO(user): Consider validating the variant if
# the assignment below doesn't take care of it. It may
# also be necessary to check it in the case that the
# type has multiple encodings.
value, _ = message.get_unrecognized_field_info(unknown_field)
value_type = pair_type.field_by_name('value')
if isinstance(value_type, messages.MessageField):
decoded_value = DictToMessage(value, pair_type.value.message_type)
else:
decoded_value = value
new_pair = pair_type(key=str(unknown_field), value=decoded_value)
new_values.append(new_pair)
return new_values
def _EncodeUnknownFields(message):
"""Remap unknown fields in message out of message.source."""
source = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if source is None:
return message
result = CopyProtoMessage(message)
pairs_field = message.field_by_name(source)
if not isinstance(pairs_field, messages.MessageField):
raise exceptions.InvalidUserInputError(
'Invalid pairs field %s' % pairs_field)
pairs_type = pairs_field.message_type
value_variant = pairs_type.field_by_name('value').variant
pairs = getattr(message, source)
for pair in pairs:
if value_variant == messages.Variant.MESSAGE:
encoded_value = MessageToDict(pair.value)
else:
encoded_value = pair.value
result.set_unrecognized_field(pair.key, encoded_value, value_variant)
setattr(result, source, [])
return result
def _SafeEncodeBytes(field, value):
"""Encode the bytes in value as urlsafe base64."""
try:
if field.repeated:
result = [base64.urlsafe_b64encode(byte) for byte in value]
else:
result = base64.urlsafe_b64encode(value)
complete = True
except TypeError:
result = value
complete = False
return CodecResult(value=result, complete=complete)
def _SafeDecodeBytes(unused_field, value):
"""Decode the urlsafe base64 value into bytes."""
try:
result = base64.urlsafe_b64decode(str(value))
complete = True
except TypeError:
result = value
complete = False
return CodecResult(value=result, complete=complete)
def _ProcessUnknownEnums(message, encoded_message):
"""Add unknown enum values from encoded_message as unknown fields.
ProtoRPC diverges from the usual protocol buffer behavior here and
doesn't allow unknown fields. Throwing on unknown fields makes it
impossible to let servers add new enum values and stay compatible
with older clients, which isn't reasonable for us. We simply store
unrecognized enum values as unknown fields, and all is well.
Args:
message: Proto message we've decoded thus far.
encoded_message: JSON string we're decoding.
Returns:
message, with any unknown enums stored as unrecognized fields.
"""
if not encoded_message:
return message
decoded_message = json.loads(encoded_message)
for field in message.all_fields():
if (isinstance(field, messages.EnumField) and
field.name in decoded_message and
message.get_assigned_value(field.name) is None):
message.set_unrecognized_field(field.name, decoded_message[field.name],
messages.Variant.ENUM)
return message
def _ProcessUnknownMessages(message, encoded_message):
"""Store any remaining unknown fields as strings.
ProtoRPC currently ignores unknown values for which no type can be
determined (and logs a "No variant found" message). For the purposes
of reserializing, this is quite harmful (since it throws away
information). Here we simply add those as unknown fields of type
string (so that they can easily be reserialized).
Args:
message: Proto message we've decoded thus far.
encoded_message: JSON string we're decoding.
Returns:
message, with any remaining unrecognized fields saved.
"""
if not encoded_message:
return message
decoded_message = json.loads(encoded_message)
message_fields = [x.name for x in message.all_fields()] + list(
message.all_unrecognized_fields())
missing_fields = [x for x in six.iterkeys(decoded_message)
if x not in message_fields]
for field_name in missing_fields:
message.set_unrecognized_field(field_name, decoded_message[field_name],
messages.Variant.STRING)
return message
RegisterFieldTypeCodec(_SafeEncodeBytes, _SafeDecodeBytes)(messages.BytesField)
# Note that these could share a dictionary, since they're keyed by
# distinct types, but it's not really worth it.
_JSON_ENUM_MAPPINGS = {}
_JSON_FIELD_MAPPINGS = {}
def AddCustomJsonEnumMapping(enum_type, python_name, json_name):
"""Add a custom wire encoding for a given enum value.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
enum_type: (messages.Enum) An enum type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire.
"""
if not issubclass(enum_type, messages.Enum):
raise exceptions.TypecheckError(
'Cannot set JSON enum mapping for non-enum "%s"' % enum_type)
enum_name = enum_type.definition_name()
if python_name not in enum_type.names():
raise exceptions.InvalidDataError(
'Enum value %s not a value for type %s' % (python_name, enum_type))
field_mappings = _JSON_ENUM_MAPPINGS.setdefault(enum_name, {})
_CheckForExistingMappings('enum', enum_type, python_name, json_name)
field_mappings[python_name] = json_name
def AddCustomJsonFieldMapping(message_type, python_name, json_name):
"""Add a custom wire encoding for a given message field.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
message_type: (messages.Message) A message type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire.
"""
if not issubclass(message_type, messages.Message):
raise exceptions.TypecheckError(
'Cannot set JSON field mapping for non-message "%s"' % message_type)
message_name = message_type.definition_name()
try:
_ = message_type.field_by_name(python_name)
except KeyError:
raise exceptions.InvalidDataError(
'Field %s not recognized for type %s' % (python_name, message_type))
field_mappings = _JSON_FIELD_MAPPINGS.setdefault(message_name, {})
_CheckForExistingMappings('field', message_type, python_name, json_name)
field_mappings[python_name] = json_name
def GetCustomJsonEnumMapping(enum_type, python_name=None, json_name=None):
"""Return the appropriate remapping for the given enum, or None."""
return _FetchRemapping(enum_type.definition_name(), 'enum',
python_name=python_name, json_name=json_name,
mappings=_JSON_ENUM_MAPPINGS)
def GetCustomJsonFieldMapping(message_type, python_name=None, json_name=None):
"""Return the appropriate remapping for the given field, or None."""
return _FetchRemapping(message_type.definition_name(), 'field',
python_name=python_name, json_name=json_name,
mappings=_JSON_FIELD_MAPPINGS)
def _FetchRemapping(type_name, mapping_type, python_name=None, json_name=None,
mappings=None):
"""Common code for fetching a key or value from a remapping dict."""
if python_name and json_name:
raise exceptions.InvalidDataError(
'Cannot specify both python_name and json_name for %s remapping' % (
mapping_type,))
if not (python_name or json_name):
raise exceptions.InvalidDataError(
'Must specify either python_name or json_name for %s remapping' % (
mapping_type,))
field_remappings = mappings.get(type_name, {})
if field_remappings:
if python_name:
return field_remappings.get(python_name)
elif json_name:
if json_name in list(field_remappings.values()):
return [k for k in field_remappings
if field_remappings[k] == json_name][0]
return None
def _CheckForExistingMappings(mapping_type, message_type,
python_name, json_name):
"""Validate that no mappings exist for the given values."""
if mapping_type == 'field':
getter = GetCustomJsonFieldMapping
elif mapping_type == 'enum':
getter = GetCustomJsonEnumMapping
remapping = getter(message_type, python_name=python_name)
if remapping is not None:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, python_name, remapping))
remapping = getter(message_type, json_name=json_name)
if remapping is not None:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, json_name, remapping))
def _EncodeCustomFieldNames(message, encoded_value):
message_name = type(message).definition_name()
field_remappings = list(_JSON_FIELD_MAPPINGS.get(message_name, {}).items())
if field_remappings:
decoded_value = json.loads(encoded_value)
for python_name, json_name in field_remappings:
if python_name in encoded_value:
decoded_value[json_name] = decoded_value.pop(python_name)
encoded_value = json.dumps(decoded_value)
return encoded_value
def _DecodeCustomFieldNames(message_type, encoded_message):
message_name = message_type.definition_name()
field_remappings = _JSON_FIELD_MAPPINGS.get(message_name, {})
if field_remappings:
decoded_message = json.loads(encoded_message)
for python_name, json_name in list(field_remappings.items()):
if json_name in decoded_message:
decoded_message[python_name] = decoded_message.pop(json_name)
encoded_message = json.dumps(decoded_message)
return encoded_message
| {
"content_hash": "4aecc3c583cae7374adf592a426307ee",
"timestamp": "",
"source": "github",
"line_count": 635,
"max_line_length": 80,
"avg_line_length": 35.13385826771653,
"alnum_prop": 0.6946212460779919,
"repo_name": "ychen820/microblog",
"id": "02474aba8ef783e69dfe0f06707907bfad5e75b8",
"size": "22310",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/.install/.backup/lib/googlecloudapis/apitools/base/py/encoding.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
''' handler module '''
__all__ = ['handlers']
from heron.ui.src.python.handlers import api
from heron.ui.src.python.handlers.base import BaseHandler
from heron.ui.src.python.handlers.mainhandler import MainHandler
from heron.ui.src.python.handlers.notfound import NotFoundHandler
################################################################################
# Handlers for topology related requests
################################################################################
from heron.ui.src.python.handlers.topology import ContainerFileDataHandler
from heron.ui.src.python.handlers.topology import ContainerFileDownloadHandler
from heron.ui.src.python.handlers.topology import ContainerFileHandler
from heron.ui.src.python.handlers.topology import ContainerFileStatsHandler
from heron.ui.src.python.handlers.topology import ListTopologiesHandler
from heron.ui.src.python.handlers.topology import TopologyPlanHandler
from heron.ui.src.python.handlers.topology import TopologyConfigHandler
from heron.ui.src.python.handlers.topology import TopologyExceptionsPageHandler
| {
"content_hash": "ef218e1cc297108712be1059a2f95438",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 80,
"avg_line_length": 54.05,
"alnum_prop": 0.7215541165587419,
"repo_name": "cliffyg/heron",
"id": "60da8dbaa199c424a307ce5160b01f0b2ed3e504",
"size": "1081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heron/ui/src/python/handlers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8537"
},
{
"name": "C++",
"bytes": "1059998"
},
{
"name": "CSS",
"bytes": "106404"
},
{
"name": "HTML",
"bytes": "153565"
},
{
"name": "Java",
"bytes": "2135618"
},
{
"name": "JavaScript",
"bytes": "165310"
},
{
"name": "M4",
"bytes": "17941"
},
{
"name": "Makefile",
"bytes": "498"
},
{
"name": "Objective-C",
"bytes": "1445"
},
{
"name": "Perl",
"bytes": "9085"
},
{
"name": "Protocol Buffer",
"bytes": "18193"
},
{
"name": "Python",
"bytes": "658792"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "4640"
},
{
"name": "Shell",
"bytes": "128229"
},
{
"name": "Thrift",
"bytes": "915"
}
],
"symlink_target": ""
} |
import itertools
from typing import List
from django.db import connection
from django.db.models import Manager, Model
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal()
"""
providing_args=['model']
"""
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
"""
Used by bulk_upsert to gather lists of models that should be updated and created.
"""
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value
def _fetch_models_by_pk(queryset: QuerySet, models: List[Model]) -> List[Model]:
"""
If the given list of model objects is not empty, return a list of newly fetched models.
This is important when dependent consumers need relationships hydrated after bulk creating models
"""
if not models:
return models
return list(
queryset.filter(pk__in=[model.pk for model in models])
)
def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of models to upsert.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
"""
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
created_models = queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return model_objs_to_update, _fetch_models_by_pk(queryset, created_models)
if return_upserts:
# return a union list of created and updated models
return model_objs_to_update + _fetch_models_by_pk(queryset, created_models)
def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert.
"""
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result.
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def get_or_none(queryset, **query_params):
"""
Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1
"""
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def bulk_update(manager, model_objs, fields_to_update):
"""
Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0
"""
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model)
def upsert(manager, defaults=None, updates=None, **kwargs):
"""
Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0
"""
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
| {
"content_hash": "35da734764433b2d24739550dac13af0",
"timestamp": "",
"source": "github",
"line_count": 744,
"max_line_length": 120,
"avg_line_length": 41.748655913978496,
"alnum_prop": 0.6563858214481182,
"repo_name": "ambitioninc/django-manager-utils",
"id": "e4cb65defd339cce176d575f64129e9b3a4920ac",
"size": "31061",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "manager_utils/manager_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "149279"
}
],
"symlink_target": ""
} |
"""Contains the volume perturb augmentation model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from data_utils.augmentor.base import AugmentorBase
class ShiftPerturbAugmentor(AugmentorBase):
"""Augmentation model for adding random shift perturbation.
:param rng: Random generator object.
:type rng: random.Random
:param min_shift_ms: Minimal shift in milliseconds.
:type min_shift_ms: float
:param max_shift_ms: Maximal shift in milliseconds.
:type max_shift_ms: float
"""
def __init__(self, rng, min_shift_ms, max_shift_ms):
self._min_shift_ms = min_shift_ms
self._max_shift_ms = max_shift_ms
self._rng = rng
def transform_audio(self, audio_segment):
"""Shift audio.
Note that this is an in-place transformation.
:param audio_segment: Audio segment to add effects to.
:type audio_segment: AudioSegmenet|SpeechSegment
"""
shift_ms = self._rng.uniform(self._min_shift_ms, self._max_shift_ms)
audio_segment.shift(shift_ms)
| {
"content_hash": "da86899d5f0943812e1d697ce1a231cb",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 76,
"avg_line_length": 32.970588235294116,
"alnum_prop": 0.6735057983942908,
"repo_name": "xinghai-sun/models",
"id": "c4cbe3e172f6b291f3b778b748affda0341a3181",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "deep_speech_2/data_utils/augmentor/shift_perturb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "34663"
},
{
"name": "HTML",
"bytes": "174618"
},
{
"name": "Python",
"bytes": "509771"
},
{
"name": "Shell",
"bytes": "30390"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
def ndargmax(arr):
return np.unravel_index(np.argmax(np.ravel(arr)),arr.shape)
def match_by_overlap(a,b):
assert a.ndim == b.ndim == 1 and a.shape[0] == b.shape[0]
ais, bjs = list(set(a)), list(set(b))
scores = np.zeros((len(ais),len(bjs)))
for i,ai in enumerate(ais):
for j,bj in enumerate(bjs):
scores[i,j] = np.dot(a==ai,b==bj)
flip = len(bjs) > len(ais)
if flip:
ais, bjs = bjs, ais
scores = scores.T
matching = []
while scores.size > 0:
i,j = ndargmax(scores)
matching.append((ais[i],bjs[j]))
scores = np.delete(np.delete(scores,i,0),j,1)
ais = np.delete(ais,i)
bjs = np.delete(bjs,j)
return matching if not flip else [(x,y) for y,x in matching]
def hamming_error(a,b):
return (a!=b).sum()
def stateseq_hamming_error(sampledstates,truestates):
sampledstates = np.array(sampledstates,ndmin=2).copy()
errors = np.zeros(sampledstates.shape[0])
for idx,s in enumerate(sampledstates):
# match labels by maximum overlap
matching = match_by_overlap(s,truestates)
s2 = s.copy()
for i,j in matching:
s2[s==i] = j
errors[idx] = hamming_error(s2,truestates)
return errors if errors.shape[0] > 1 else errors[0]
def scoreatpercentile(data,per,axis):
'''
like the function in scipy.stats but with an axis argument, and works on
arrays.
'''
a = np.sort(data,axis=axis)
idx = per/100. * (data.shape[axis]-1)
if (idx % 1 == 0):
return a[[slice(None) if ii != axis else idx for ii in range(a.ndim)]]
else:
lowerweight = 1-(idx % 1)
upperweight = (idx % 1)
idx = int(np.floor(idx))
return lowerweight * a[[slice(None) if ii != axis else idx for ii in range(a.ndim)]] \
+ upperweight * a[[slice(None) if ii != axis else idx+1 for ii in range(a.ndim)]]
def get_autocorr(chains):
'''
component-by-component
'''
chains = np.array(chains)
results = np.zeros(chains.shape)
for chainidx, chain in enumerate(chains):
for idx in range(chain.shape[1]):
temp = chain[:,idx] - chain[:,idx].mean(0)
temp = np.correlate(temp,temp,'full')
results[chainidx,:,idx] = temp[temp.shape[0]//2:]
return results
| {
"content_hash": "e266ef48de9bf4081c1c004d042036da",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 97,
"avg_line_length": 31.407894736842106,
"alnum_prop": 0.5890238793464599,
"repo_name": "mattjj/pyhsmm-collapsedinfinite",
"id": "672122ba3f663e925f8e0f27f37d357ea1c58293",
"size": "2387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38539"
}
],
"symlink_target": ""
} |
import abc
from abc import ABC, abstractmethod
class A(ABC):
def print_function(self):
print("Abstract Base Class")
class SubClassB(A):
def print_function(self):
A.print_function(self)
print("Subclass")
if __name__ == "__main__":
B = SubClassB()
B.print_function() | {
"content_hash": "51d7ceee17b1c64119609b7b14bad032",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 36,
"avg_line_length": 18.176470588235293,
"alnum_prop": 0.6148867313915858,
"repo_name": "AnhellO/DAS_Sistemas",
"id": "847e2c8bc1eb805c2ef177ef6d51e0916cf443e9",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/ene-jun-2022",
"path": "Ene-Jun-2022/juan-pablo-sarmiento-cervera/práctica-5/ejercicio-3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "8515"
},
{
"name": "Go",
"bytes": "25845"
},
{
"name": "HTML",
"bytes": "36671"
},
{
"name": "Python",
"bytes": "716604"
}
],
"symlink_target": ""
} |
"""
Defines a few (slightly silly) Django models that we can use in unit
tests.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
from django.db import models
class Specie(models.Model):
common_name =\
models.CharField(
db_index = True,
max_length = 40,
unique = True,
)
binomial_name =\
models.CharField(
db_index = True,
max_length = 40,
unique = True,
)
is_cuddly =\
models.NullBooleanField()
last_sighted =\
models.DateTimeField(
null = True,
)
color =\
models.CharField(
max_length = 24,
default = 'green',
)
| {
"content_hash": "f026275b921e79f4ab3919a7b88b3de3",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 68,
"avg_line_length": 18.76923076923077,
"alnum_prop": 0.5437158469945356,
"repo_name": "eflglobal/filters-django",
"id": "71d3886005f18848cef8f08503013fc177d0d4bf",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13189"
}
],
"symlink_target": ""
} |
"""
Support for Qwikswitch Sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.qwikswitch/
"""
import logging
from homeassistant.core import callback
from . import DOMAIN as QWIKSWITCH, QSEntity
DEPENDENCIES = [QWIKSWITCH]
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, _, add_entities, discovery_info=None):
"""Add sensor from the main Qwikswitch component."""
if discovery_info is None:
return
qsusb = hass.data[QWIKSWITCH]
_LOGGER.debug("Setup qwikswitch.sensor %s, %s", qsusb, discovery_info)
devs = [QSSensor(sensor) for sensor in discovery_info[QWIKSWITCH]]
add_entities(devs)
class QSSensor(QSEntity):
"""Sensor based on a Qwikswitch relay/dimmer module."""
_val = None
def __init__(self, sensor):
"""Initialize the sensor."""
from pyqwikswitch import SENSORS
super().__init__(sensor['id'], sensor['name'])
self.channel = sensor['channel']
sensor_type = sensor['type']
self._decode, self.unit = SENSORS[sensor_type]
if isinstance(self.unit, type):
self.unit = "{}:{}".format(sensor_type, self.channel)
@callback
def update_packet(self, packet):
"""Receive update packet from QSUSB."""
val = self._decode(packet, channel=self.channel)
_LOGGER.debug("Update %s (%s:%s) decoded as %s: %s",
self.entity_id, self.qsid, self.channel, val, packet)
if val is not None:
self._val = val
self.async_schedule_update_ha_state()
@property
def state(self):
"""Return the value of the sensor."""
return str(self._val)
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
return "qs{}:{}".format(self.qsid, self.channel)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self.unit
| {
"content_hash": "c12ac759d3104b13a7e95409ff338fc0",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 75,
"avg_line_length": 29.536231884057973,
"alnum_prop": 0.6319921491658489,
"repo_name": "jamespcole/home-assistant",
"id": "07d0247e4f60b5322f8874da2dc1cfb8a3f8ad6a",
"size": "2038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/qwikswitch/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14822074"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
"""
Catmull-Rom interpolation.
http://www2.cs.uregina.ca/~anima/408/Notes/Interpolation/Parameterized-Curves-Summary.htm
"""
from .bspline import InterpolatorBSpline
from ..interpolate import Interpolator, Interpolate
from .. import algebra as alg
from ..types import Vector
from typing import Optional, Callable, Mapping, List, Union, Sequence, Dict, Any, Type, TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from ..color import Color
class InterpolatorCatmullRom(InterpolatorBSpline):
"""Interpolate with Catmull-Rom spline."""
def setup(self) -> None:
"""Setup."""
super().setup()
self.spline = alg.catrom
class CatmullRom(Interpolate):
"""Catmull-Rom interpolation plugin."""
NAME = "catrom"
def interpolator(
self,
coordinates: List[Vector],
channel_names: Sequence[str],
create: Type['Color'],
easings: List[Optional[Callable[..., float]]],
stops: Dict[int, float],
space: str,
out_space: str,
progress: Optional[Union[Mapping[str, Callable[..., float]], Callable[..., float]]],
premultiplied: bool,
extrapolate: bool = False,
**kwargs: Any
) -> Interpolator:
"""Return the Catmull-Rom interpolator."""
return InterpolatorCatmullRom(
coordinates,
channel_names,
create,
easings,
stops,
space,
out_space,
progress,
premultiplied,
extrapolate
)
| {
"content_hash": "82c0d6d8016c666fa3b7ccd7a03c4653",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 101,
"avg_line_length": 26.844827586206897,
"alnum_prop": 0.6062941554271034,
"repo_name": "facelessuser/ColorHelper",
"id": "fcacf2d39ac921e134a0106e93aafbef86a72084",
"size": "1557",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/coloraide/interpolate/catmull_rom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "4989"
},
{
"name": "Python",
"bytes": "624749"
},
{
"name": "Shell",
"bytes": "19"
}
],
"symlink_target": ""
} |
import os
import subprocess
import shutil
import tempfile
from ..environment import detect_ninja, detect_scanbuild
def scanbuild(exelist, srcdir, blddir, privdir, logdir, args):
with tempfile.TemporaryDirectory(dir=privdir) as scandir:
meson_cmd = exelist + args
build_cmd = exelist + ['-o', logdir, detect_ninja(), '-C', scandir]
rc = subprocess.call(meson_cmd + [srcdir, scandir])
if rc != 0:
return rc
return subprocess.call(build_cmd)
def run(args):
srcdir = args[0]
blddir = args[1]
meson_cmd = args[2:]
privdir = os.path.join(blddir, 'meson-private')
logdir = os.path.join(blddir, 'meson-logs/scanbuild')
shutil.rmtree(logdir, ignore_errors=True)
exelist = detect_scanbuild()
if not exelist:
print('Could not execute scan-build "%s"' % ' '.join(exelist))
return 1
return scanbuild(exelist, srcdir, blddir, privdir, logdir, meson_cmd)
| {
"content_hash": "168908ddcd83532eb100f1cc526a2867",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 30.774193548387096,
"alnum_prop": 0.6530398322851153,
"repo_name": "becm/meson",
"id": "b9e96d20e355b44b513de4d30e237457fc4ed7f4",
"size": "1542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesonbuild/scripts/scanbuild.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4190"
},
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "167971"
},
{
"name": "C#",
"bytes": "1130"
},
{
"name": "C++",
"bytes": "51171"
},
{
"name": "CMake",
"bytes": "27103"
},
{
"name": "Cuda",
"bytes": "7454"
},
{
"name": "D",
"bytes": "5313"
},
{
"name": "Dockerfile",
"bytes": "1960"
},
{
"name": "Emacs Lisp",
"bytes": "919"
},
{
"name": "Fortran",
"bytes": "11539"
},
{
"name": "Genie",
"bytes": "341"
},
{
"name": "HTML",
"bytes": "117"
},
{
"name": "Inno Setup",
"bytes": "354"
},
{
"name": "Java",
"bytes": "2570"
},
{
"name": "JavaScript",
"bytes": "136"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "139"
},
{
"name": "Meson",
"bytes": "454262"
},
{
"name": "Objective-C",
"bytes": "1235"
},
{
"name": "Objective-C++",
"bytes": "381"
},
{
"name": "PowerShell",
"bytes": "2242"
},
{
"name": "Python",
"bytes": "2912935"
},
{
"name": "Roff",
"bytes": "569"
},
{
"name": "Rust",
"bytes": "1079"
},
{
"name": "Shell",
"bytes": "6800"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10025"
},
{
"name": "Verilog",
"bytes": "709"
},
{
"name": "Vim script",
"bytes": "9919"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
} |
import numpy as np
from pgmpy.estimators import ParameterEstimator
from pgmpy.factors.discrete import TabularCPD
from pgmpy.models import BayesianModel
class BayesianEstimator(ParameterEstimator):
def __init__(self, model, data, **kwargs):
"""
Class used to compute parameters for a model using Bayesian Parameter Estimation.
See `MaximumLikelihoodEstimator` for constructor parameters.
"""
if not isinstance(model, BayesianModel):
raise NotImplementedError("Bayesian Parameter Estimation is only implemented for BayesianModel")
super(BayesianEstimator, self).__init__(model, data, **kwargs)
def get_parameters(self, prior_type='BDeu', equivalent_sample_size=5, pseudo_counts=None):
"""
Method to estimate the model parameters (CPDs).
Parameters
----------
prior_type: 'dirichlet', 'BDeu', or 'K2'
string indicting which type of prior to use for the model parameters.
- If 'prior_type' is 'dirichlet', the following must be provided:
'pseudo_counts' = dirichlet hyperparameters; a dict containing, for each variable, a list
with a "virtual" count for each variable state, that is added to the state counts.
(lexicographic ordering of states assumed)
- If 'prior_type' is 'BDeu', then an 'equivalent_sample_size'
must be specified instead of 'pseudo_counts'. This is equivalent to
'prior_type=dirichlet' and using uniform 'pseudo_counts' of
`equivalent_sample_size/(node_cardinality*np.prod(parents_cardinalities))` for each node.
'equivalent_sample_size' can either be a numerical value or a dict that specifies
the size for each variable seperately.
- A prior_type of 'K2' is a shorthand for 'dirichlet' + setting every pseudo_count to 1,
regardless of the cardinality of the variable.
Returns
-------
parameters: list
List of TabularCPDs, one for each variable of the model
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import BayesianEstimator
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 4)),
... columns=['A', 'B', 'C', 'D'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'))
>>> estimator = BayesianEstimator(model, values)
>>> estimator.get_parameters(prior_type='BDeu', equivalent_sample_size=5)
[<TabularCPD representing P(C:2) at 0x7f7b534251d0>,
<TabularCPD representing P(B:2 | C:2, A:2) at 0x7f7b4dfd4da0>,
<TabularCPD representing P(A:2) at 0x7f7b4dfd4fd0>,
<TabularCPD representing P(D:2 | C:2) at 0x7f7b4df822b0>]
"""
parameters = []
for node in self.model.nodes():
_equivalent_sample_size = equivalent_sample_size[node] if isinstance(equivalent_sample_size, dict) else \
equivalent_sample_size
_pseudo_counts = pseudo_counts[node] if isinstance(pseudo_counts, dict) else pseudo_counts
cpd = self.estimate_cpd(node,
prior_type=prior_type,
equivalent_sample_size=_equivalent_sample_size,
pseudo_counts=_pseudo_counts)
parameters.append(cpd)
return parameters
def estimate_cpd(self, node, prior_type='BDeu', pseudo_counts=[], equivalent_sample_size=5):
"""
Method to estimate the CPD for a given variable.
Parameters
----------
node: int, string (any hashable python object)
The name of the variable for which the CPD is to be estimated.
prior_type: 'dirichlet', 'BDeu', 'K2',
string indicting which type of prior to use for the model parameters.
- If 'prior_type' is 'dirichlet', the following must be provided:
'pseudo_counts' = dirichlet hyperparameters; a list or dict
with a "virtual" count for each variable state.
The virtual counts are added to the actual state counts found in the data.
(if a list is provided, a lexicographic ordering of states is assumed)
- If 'prior_type' is 'BDeu', then an 'equivalent_sample_size'
must be specified instead of 'pseudo_counts'. This is equivalent to
'prior_type=dirichlet' and using uniform 'pseudo_counts' of
`equivalent_sample_size/(node_cardinality*np.prod(parents_cardinalities))`.
- A prior_type of 'K2' is a shorthand for 'dirichlet' + setting every pseudo_count to 1,
regardless of the cardinality of the variable.
Returns
-------
CPD: TabularCPD
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import BayesianEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> estimator = BayesianEstimator(model, data)
>>> cpd_C = estimator.estimate_cpd('C', prior_type="dirichlet", pseudo_counts=[1, 2])
>>> print(cpd_C)
╒══════╤══════╤══════╤══════╤════════════════════╕
│ A │ A(0) │ A(0) │ A(1) │ A(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ B │ B(0) │ B(1) │ B(0) │ B(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(0) │ 0.25 │ 0.25 │ 0.5 │ 0.3333333333333333 │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(1) │ 0.75 │ 0.75 │ 0.5 │ 0.6666666666666666 │
╘══════╧══════╧══════╧══════╧════════════════════╛
"""
node_cardinality = len(self.state_names[node])
parents = sorted(self.model.get_parents(node))
parents_cardinalities = [len(self.state_names[parent]) for parent in parents]
if prior_type == 'K2':
pseudo_counts = [1] * node_cardinality
elif prior_type == 'BDeu':
alpha = float(equivalent_sample_size) / (node_cardinality * np.prod(parents_cardinalities))
pseudo_counts = [alpha] * node_cardinality
elif prior_type == 'dirichlet':
if not len(pseudo_counts) == node_cardinality:
raise ValueError("'pseudo_counts' should have length {0}".format(node_cardinality))
if isinstance(pseudo_counts, dict):
pseudo_counts = sorted(pseudo_counts.values())
else:
raise ValueError("'prior_type' not specified")
state_counts = self.state_counts(node)
bayesian_counts = (state_counts.T + pseudo_counts).T
cpd = TabularCPD(node, node_cardinality, np.array(bayesian_counts),
evidence=parents,
evidence_card=parents_cardinalities,
state_names=self.state_names)
cpd.normalize()
return cpd
| {
"content_hash": "f525702dfb96af40b3ea4772e414f48e",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 117,
"avg_line_length": 49.10738255033557,
"alnum_prop": 0.5623889572229056,
"repo_name": "abinashpanda/pgmpy",
"id": "60931dd1b64dc597028dda34c7aff577e560b8ba",
"size": "7881",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pgmpy/estimators/BayesianEstimator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1273659"
},
{
"name": "Shell",
"bytes": "1026"
}
],
"symlink_target": ""
} |
import six
import six.moves
from six import text_type
from mwclient.util import parse_timestamp
import mwclient.page
import mwclient.image
class List(object):
"""Base class for lazy iteration over api response content
This is a class providing lazy iteration. This means that the
content is loaded in chunks as long as the response hints at
continuing content.
"""
def __init__(self, site, list_name, prefix,
limit=None, return_values=None, max_items=None,
*args, **kwargs):
# NOTE: Fix limit
self.site = site
self.list_name = list_name
self.generator = 'list'
self.prefix = prefix
kwargs.update(args)
self.args = kwargs
if limit is None:
limit = site.api_limit
self.args[self.prefix + 'limit'] = text_type(limit)
self.count = 0
self.max_items = max_items
self._iter = iter(six.moves.range(0))
self.last = False
self.result_member = list_name
self.return_values = return_values
def __iter__(self):
return self
def __next__(self):
if self.max_items is not None:
if self.count >= self.max_items:
raise StopIteration
try:
item = six.next(self._iter)
except StopIteration:
if self.last:
raise
self.load_chunk()
item = six.next(self._iter)
self.count += 1
if 'timestamp' in item:
item['timestamp'] = parse_timestamp(item['timestamp'])
if isinstance(self, GeneratorList):
return item
if type(self.return_values) is tuple:
return tuple((item[i] for i in self.return_values))
if self.return_values is not None:
return item[self.return_values]
return item
def next(self, *args, **kwargs):
""" For Python 2.x support """
return self.__next__(*args, **kwargs)
def load_chunk(self):
"""Query a new chunk of data
If the query is empty, `raise StopIteration`.
Else, update the iterator accordingly.
If 'continue' is in the response, it is added to `self.args`
(new style continuation, added in MediaWiki 1.21).
If not, but 'query-continue' is in the response, query its
item called `self.list_name` and add this to `self.args` (old
style continuation).
Else, set `self.last` to True.
"""
data = self.site.get(
'query', (self.generator, self.list_name),
*[(text_type(k), v) for k, v in six.iteritems(self.args)]
)
if not data:
# Non existent page
raise StopIteration
self.set_iter(data)
if data.get('continue'):
# New style continuation, added in MediaWiki 1.21
self.args.update(data['continue'])
elif self.list_name in data.get('query-continue', ()):
# Old style continuation
self.args.update(data['query-continue'][self.list_name])
else:
self.last = True
def set_iter(self, data):
"""Set `self._iter` to the API response `data`."""
if self.result_member not in data['query']:
self._iter = iter(six.moves.range(0))
elif type(data['query'][self.result_member]) is list:
self._iter = iter(data['query'][self.result_member])
else:
self._iter = six.itervalues(data['query'][self.result_member])
def __repr__(self):
return "<List object '%s' for %s>" % (self.list_name, self.site)
@staticmethod
def generate_kwargs(_prefix, *args, **kwargs):
kwargs.update(args)
for key, value in six.iteritems(kwargs):
if value is not None and value is not False:
yield _prefix + key, value
@staticmethod
def get_prefix(prefix, generator=False):
return ('g' if generator else '') + prefix
@staticmethod
def get_list(generator=False):
return GeneratorList if generator else List
class NestedList(List):
def __init__(self, nested_param, *args, **kwargs):
super(NestedList, self).__init__(*args, **kwargs)
self.nested_param = nested_param
def set_iter(self, data):
self._iter = iter(data['query'][self.result_member][self.nested_param])
class GeneratorList(List):
"""Lazy-loaded list of Page, Image or Category objects
While the standard List class yields raw response data
(optionally filtered based on the value of List.return_values),
this subclass turns the data into Page, Image or Category objects.
"""
def __init__(self, site, list_name, prefix, *args, **kwargs):
super(GeneratorList, self).__init__(site, list_name, prefix,
*args, **kwargs)
self.args['g' + self.prefix + 'limit'] = self.args[self.prefix + 'limit']
del self.args[self.prefix + 'limit']
self.generator = 'generator'
self.args['prop'] = 'info|imageinfo'
self.args['inprop'] = 'protection'
self.result_member = 'pages'
self.page_class = mwclient.page.Page
def __next__(self):
info = super(GeneratorList, self).__next__()
if info['ns'] == 14:
return Category(self.site, u'', info)
if info['ns'] == 6:
return mwclient.image.Image(self.site, u'', info)
return mwclient.page.Page(self.site, u'', info)
def load_chunk(self):
# Put this here so that the constructor does not fail
# on uninitialized sites
self.args['iiprop'] = 'timestamp|user|comment|url|size|sha1|metadata|archivename'
return super(GeneratorList, self).load_chunk()
class Category(mwclient.page.Page, GeneratorList):
def __init__(self, site, name, info=None, namespace=None):
mwclient.page.Page.__init__(self, site, name, info)
kwargs = {}
kwargs['gcmtitle'] = self.name
if namespace:
kwargs['gcmnamespace'] = namespace
GeneratorList.__init__(self, site, 'categorymembers', 'cm', **kwargs)
def __repr__(self):
return "<Category object '%s' for %s>" % (self.name.encode('utf-8'), self.site)
def members(self, prop='ids|title', namespace=None, sort='sortkey',
dir='asc', start=None, end=None, generator=True):
prefix = self.get_prefix('cm', generator)
kwargs = dict(self.generate_kwargs(prefix, prop=prop, namespace=namespace,
sort=sort, dir=dir, start=start, end=end, title=self.name))
return self.get_list(generator)(self.site, 'categorymembers', 'cm', **kwargs)
class PageList(GeneratorList):
def __init__(self, site, prefix=None, start=None, namespace=0, redirects='all', end=None):
self.namespace = namespace
kwargs = {}
if prefix:
kwargs['gapprefix'] = prefix
if start:
kwargs['gapfrom'] = start
if end:
kwargs['gapto'] = end
super(PageList, self).__init__(site, 'allpages', 'ap',
gapnamespace=text_type(namespace),
gapfilterredir=redirects,
**kwargs)
def __getitem__(self, name):
return self.get(name, None)
def get(self, name, info=()):
"""Return the page of name `name` as an object.
If self.namespace is not zero, use {namespace}:{name} as the
page name, otherwise guess the namespace from the name using
`self.guess_namespace`.
Returns:
One of Category, Image or Page (default), according to namespace.
"""
if self.namespace != 0:
full_page_name = u"{namespace}:{name}".format(
namespace=self.site.namespaces[self.namespace],
name=name,
)
namespace = self.namespace
else:
full_page_name = name
try:
namespace = self.guess_namespace(name)
except AttributeError:
# raised when `namespace` doesn't have a `startswith` attribute
namespace = 0
cls = {
14: Category,
6: mwclient.image.Image,
}.get(namespace, mwclient.page.Page)
return cls(self.site, full_page_name, info)
def guess_namespace(self, name):
"""Guess the namespace from name
If name starts with any of the site's namespaces' names or
default_namespaces, use that. Else, return zero.
Args:
name (str): The pagename as a string (having `.startswith`)
Returns:
The id of the guessed namespace or zero.
"""
for ns in self.site.namespaces:
if ns == 0:
continue
if name.startswith(u'%s:' % self.site.namespaces[ns].replace(' ', '_')):
return ns
elif ns in self.site.default_namespaces:
if name.startswith(u'%s:' % self.site.default_namespaces[ns].replace(' ', '_')):
return ns
return 0
class PageProperty(List):
def __init__(self, page, prop, prefix, *args, **kwargs):
super(PageProperty, self).__init__(page.site, prop, prefix,
titles=page.name,
*args, **kwargs)
self.page = page
self.generator = 'prop'
def set_iter(self, data):
for page in six.itervalues(data['query']['pages']):
if page['title'] == self.page.name:
self._iter = iter(page.get(self.list_name, ()))
return
raise StopIteration
class PagePropertyGenerator(GeneratorList):
def __init__(self, page, prop, prefix, *args, **kwargs):
super(PagePropertyGenerator, self).__init__(page.site, prop, prefix,
titles=page.name,
*args, **kwargs)
self.page = page
class RevisionsIterator(PageProperty):
def load_chunk(self):
if 'rvstartid' in self.args and 'rvstart' in self.args:
del self.args['rvstart']
return super(RevisionsIterator, self).load_chunk()
| {
"content_hash": "bd2cffe1005e192bc83edcd68a97670a",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 102,
"avg_line_length": 33.549839228295816,
"alnum_prop": 0.5615296147211041,
"repo_name": "ubibene/mwclient",
"id": "34d4d72fd6d8a72fb9f05beb189360cea05edfd2",
"size": "10434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mwclient/listing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120000"
}
],
"symlink_target": ""
} |
class Solution:
def largestRectangleArea(self, height):
if not height:
return 0
stack = []
max_area = 0
for i, h in enumerate(height):
if not stack or height[stack[-1]] <= h:
stack.append(i)
continue
while stack and height[stack[-1]] > h:
ind = stack.pop()
left = stack[-1] + 1 if stack else 0
right = i
trial = height[ind] * (right - left)
max_area = max(max_area, trial)
stack.append(i)
right = len(height)
while stack:
ind = stack.pop()
left = stack[-1] + 1 if stack else 0
trial = height[ind] * (right - left)
max_area = max(max_area, trial)
return max_area
| {
"content_hash": "309d07f4b84cbfee64acfd6f37b831fc",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 52,
"avg_line_length": 28,
"alnum_prop": 0.4412442396313364,
"repo_name": "rahul-ramadas/leetcode",
"id": "377d13fce1e8b70fb0fc018093480ee4b0fb8a5a",
"size": "868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "largest-rectangle-in-histogram/Solution.9029769.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "121"
},
{
"name": "C++",
"bytes": "107572"
},
{
"name": "Python",
"bytes": "167196"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
app_name = 'voucher_table'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^results/$', views.results, name='results'),
]
| {
"content_hash": "07fa710c56f2aaa6ecfba868fa732e45",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 54,
"avg_line_length": 19.9,
"alnum_prop": 0.6482412060301508,
"repo_name": "carlosp420/VoSeq",
"id": "0c950a4f22c9a2c36084aec38878f50a255934ab",
"size": "199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voucher_table/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19319"
},
{
"name": "HTML",
"bytes": "95764"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "357630"
},
{
"name": "Shell",
"bytes": "11587"
}
],
"symlink_target": ""
} |
import unittest
import os
import comm
import commands
import urllib2
import json
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_target_version(self):
comm.setUp()
comm.clear("org.xwalk.test")
cmd = "which android"
androidpath = commands.getstatusoutput(cmd)
targetversionpath = os.path.dirname(os.path.dirname(androidpath[1]))
os.chdir(targetversionpath)
movepath = os.path.dirname(os.path.dirname(targetversionpath)) + "/new-platforms/"
commands.getstatusoutput("mv platforms/ " + movepath)
os.chdir(comm.XwalkPath)
createcmd = comm.PackTools + "crosswalk-app create org.xwalk.test"
packstatus = commands.getstatusoutput(createcmd)
os.chdir(movepath + "../")
commands.getstatusoutput("mv new-platforms/ " + targetversionpath + "/platforms/")
comm.clear("org.xwalk.test")
self.assertNotEquals(packstatus[0], 0)
self.assertIn("target version", packstatus[1])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e41a81a8d52a9acb31f1707012fd4c37",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 90,
"avg_line_length": 37.785714285714285,
"alnum_prop": 0.6729678638941399,
"repo_name": "pk-sam/crosswalk-test-suite",
"id": "2a97ec654a7bbdbf7adeb03d9a9bc941388d5030",
"size": "2605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apptools/apptools-android-tests/apptools/check_target_version.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "28136"
},
{
"name": "CSS",
"bytes": "697706"
},
{
"name": "CoffeeScript",
"bytes": "18978"
},
{
"name": "Cucumber",
"bytes": "63597"
},
{
"name": "GLSL",
"bytes": "3495"
},
{
"name": "Groff",
"bytes": "12"
},
{
"name": "HTML",
"bytes": "39810614"
},
{
"name": "Java",
"bytes": "602994"
},
{
"name": "JavaScript",
"bytes": "17479410"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "PHP",
"bytes": "44946"
},
{
"name": "Python",
"bytes": "4304927"
},
{
"name": "Shell",
"bytes": "1100341"
},
{
"name": "XSLT",
"bytes": "767778"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 0.5, s, t 1, s, t 1.5, s, t 2.1, s, q"
tags = "FlipAngular3DTransition"
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from cocos.scenes import *
from cocos.sprite import *
import pyglet
from pyglet.gl import *
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
glColor4ub(255, 255, 255, 255)
glPushMatrix()
self.transform()
self.img.blit(0,0)
glPopMatrix()
def main():
director.init( resizable=True )
scene1 = cocos.scene.Scene()
scene2 = cocos.scene.Scene()
colorl = ColorLayer(32,32,255,255)
sprite = Sprite( 'grossini.png', (320,240) )
colorl.add( sprite )
scene1.add( BackgroundLayer(), z=0 )
scene2.add( colorl, z=0 )
director.run( FlipAngular3DTransition( scene1, 2, scene2 ) )
if __name__ == '__main__':
main()
| {
"content_hash": "5640fd1c41ab419f9da7e6b8ea7f1774",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 65,
"avg_line_length": 25.88888888888889,
"alnum_prop": 0.6128755364806867,
"repo_name": "shadowmint/nwidget",
"id": "98f52c29332e7e1eee0448bd1fefb33f9ab2e3c1",
"size": "1239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/cocos2d-0.5.5/test/test_transition_flip_angular.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11298"
},
{
"name": "JavaScript",
"bytes": "17394"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "9815941"
},
{
"name": "Shell",
"bytes": "10521"
}
],
"symlink_target": ""
} |
import htmlmin # html minifier
import slimit # js minifier
import cssutils
import bs4
import urllib2
import urlparse
import wsgiref
def minify(html, base_url='', memcache=None):
m = Minifier(url)
m.memcache = memcache
return m.minify(html, 'text/html')
class WSGIMiddleware:
def __init__(self, app, memcache=None):
self.app = app
self.memcache = memcache
def __call__(self, environ, start_response):
if self.app.debug:
return self.app(environ, start_response)
content_type = ['application/octet-stream']
def new_start_response(status, headers):
for header, value in headers:
if header == 'Content-Type':
content_type[0] = value
return start_response(status, headers)
results = self.app(environ, new_start_response)
content = "".join(results).decode('utf-8')
base_url = wsgiref.util.request_uri(environ)
m = Minifier(base_url)
m.memcache = self.memcache
return [m.minify(content, content_type[0]).encode('utf-8')]
class Minifier(object):
memcache = None
cache_time = 60 * 60
def __init__(self, base_url=''):
self.base_url = base_url
def minify(self, _content, content_type):
def _minify():
content = _content
if content_type.startswith('text/html'):
soup = bs4.BeautifulSoup(content)
self.inline_and_minify_js(soup)
self.inline_and_minify_css(soup)
content = unicode(soup)
content = htmlmin.minify(content, remove_comments=True, remove_empty_space=True, remove_all_empty_space=True, reduce_empty_attributes=True, reduce_boolean_attributes=True)
elif content_type.startswith('text/javascript'):
content = self.minified_js(content)
elif content_type.startswith('text/css'):
content = self.minified_css(content)
return content
return self.cached((_content + content_type).encode('utf-8'), _minify)
def inline_and_minify_js(self, soup):
for script in soup.find_all('script'):
minify_this = not script.has_attr('no-minify')
if script.has_attr('src'):
if script.has_attr('no-inline'): continue
url = urlparse.urljoin(self.base_url, script['src'])
script_src = self.fetch_url(url).decode('utf-8')
if url.endswith('.min.js'):
minify_this = False
script.clear()
script.append(soup.new_string(script_src))
del script['src']
if minify_this:
script_contents = u"".join(script.strings)
script.clear()
script.append(soup.new_string(self.minified_js(script_contents)))
def minified_js(self, js):
def _minified():
return slimit.minify(js, mangle=False)
return self.cached(js.encode('utf-8'), _minified)
def inline_and_minify_css(self, soup):
for style in soup.find_all('style'):
if style.has_attr('no-minify'): continue
content = u"".join(style.strings)
content = self.minified_css(content)
style.clear()
style.append(soup.new_string(content))
for link in soup.find_all('link', rel='stylesheet'):
if link.has_attr('href') and not link.has_attr('no-inline'):
css = self.fetch_url(urlparse.urljoin(self.base_url, link['href'])).decode('utf-8')
content = css if link.has_key('no-minify') else self.minified_css(css)
style_tag = soup.new_tag('style')
style_tag.append(soup.new_string(content))
link.replace_with(style_tag)
def minified_css(self, css):
def _minified():
href = self.base_url if self.base_url != '' else None
sheet = cssutils.parseString(css, href=href)
sheet = cssutils.resolveImports(sheet)
cssutils.ser.prefs.useMinified()
return sheet.cssText
return self.cached(css.encode('utf-8'), _minified)
def cached(self, key, func):
result = self.memcache.get(key) if self.memcache else None
if not result:
result = func()
if self.memcache:
try:
self.memcache.add(key, result, self.cache_time)
except Exception:
pass
return result
def fetch_url(self, url):
def _fetch():
return urllib2.urlopen(url).read()
return self.cached(url, _fetch)
| {
"content_hash": "cb51177e03cb59dc8ee001cbeada506d",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 175,
"avg_line_length": 33.02542372881356,
"alnum_prop": 0.6920708237105466,
"repo_name": "nate-parrott/m.py",
"id": "4ac67356af12f9f9c2dc1ba18800fe5725e3fadd",
"size": "3897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "m.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3897"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class VirtualMachineScaleSetStorageProfile(Model):
"""Describes a virtual machine scale set storage profile.
:param image_reference: Specifies information about the image to use. You
can specify information about platform images, marketplace images, or
virtual machine images. This element is required when you want to use a
platform image, marketplace image, or virtual machine image, but is not
used in other creation operations.
:type image_reference:
~azure.mgmt.compute.v2017_03_30.models.ImageReference
:param os_disk: Specifies information about the operating system disk used
by the virtual machines in the scale set. <br><br> For more information
about disks, see [About disks and VHDs for Azure virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:type os_disk:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetOSDisk
:param data_disks: Specifies the parameters that are used to add data
disks to the virtual machines in the scale set. <br><br> For more
information about disks, see [About disks and VHDs for Azure virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:type data_disks:
list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetDataDisk]
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'VirtualMachineScaleSetOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[VirtualMachineScaleSetDataDisk]'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSetStorageProfile, self).__init__(**kwargs)
self.image_reference = kwargs.get('image_reference', None)
self.os_disk = kwargs.get('os_disk', None)
self.data_disks = kwargs.get('data_disks', None)
| {
"content_hash": "7f8f5dc9845e3fa06e745d99eaa0cb9d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 161,
"avg_line_length": 55.6578947368421,
"alnum_prop": 0.7219858156028369,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "eea90cc9e86ea5544b7993a38cb7f95f2536d5df",
"size": "2589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_scale_set_storage_profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
from django.conf.urls import url, include, patterns
from userflow import conf, views
sign_urls = patterns('',
url('^signin/', views.sign.signin.SigninView.as_view(), name='signin'),
url('^signup/', views.sign.signup.SignupView.as_view(), name='signup'),
url('^signout/$', views.sign.signout.SignoutView.as_view(), name='signout'),
)
reset_urls = patterns('',
url('^request/$', views.reset.request.PasswordResetView.as_view(), name='request'),
url('^wait/(?P<pk>\d+)/(?P<key>[^/]+)/$', views.reset.wait.ResetWaitView.as_view(), name='wait'),
url('^change/(?P<pk>\d+)/(?P<key>[^/]+)/$', views.reset.change.SetPasswordView.as_view(), name='confirm'),
)
verify_urls = patterns('',
url('^confirm/(?P<pk>\d+)/(?P<key>[^/]+)/$', views.verify.confirm.ConfirmEmailView.as_view(), name='confirm'),
url('^wait/(?P<pk>\d+)/(?P<key>[^/]+)/$', views.verify.wait.WaitConfirmEmailView.as_view(), name='wait'),
url('^request/(?P<pk>\d+)/$', views.verify.request.RequestConfirmEmailView.as_view(), name='request'),
)
profile_view_urls = patterns('',
url('^$', views.profile.user.UserProfileView.as_view(), name='view'),
url('^(?P<pk>\d+)/$', views.profile.user.UserProfileView.as_view(), name='view'),
)
profile_edit_urls = patterns('',
url(r'^$', views.profile.edit.PersonalEditView.as_view(), name='edit'),
url(r'^(?P<name>personal)/$', views.profile.edit.PersonalEditView.as_view(), name='edit'),
url(r'^(?P<name>about)/$', views.profile.edit.AboutEditView.as_view(), name='edit'),
url(r'^(?P<name>password)/$', views.profile.edit.PasswordView.as_view(), name='edit'),
)
emails_urls = patterns('',
url(r'^(?P<pk>\d+)/(?P<action>public|private)/$', views.profile.emails.public.PublicUpdateView.as_view(), name='public'),
url(r'^(?P<pk>\d+)/primary/$', views.profile.emails.primary.PrimaryUpdateView.as_view(), name='primary'),
url(r'^(?P<pk>\d+)/remove/$', views.profile.emails.remove.RemoveView.as_view(), name='remove'),
url(r'^(?P<pk>\d+)/verify/$', views.profile.emails.verify.VerifyView.as_view(), name='verify'),
url(r'^add/$', views.profile.emails.create.AddEmailView.as_view(), name='add'),
)
contacts_urls = patterns('',
url(r'^(?P<pk>\d+)/remove/$', views.profile.contacts.remove.RemoveView.as_view(), name='remove'),
url(r'^add/$', views.profile.contacts.create.AddContactView.as_view(), name='add'),
)
profile_urls = patterns('',
url(r'^', include(profile_view_urls)),
url(r'^edit/', include(profile_edit_urls)),
url(r'^emails/', include(emails_urls, namespace='emails')),
url(r'contacts/', include(contacts_urls, namespace='contacts')),
)
if conf.USERS_CAN_SUICIDE:
profile_urls.append(url('^delete/', views.profile.suicide.SuicideView.as_view(), name='suicide'))
urlpatterns = patterns('',
url(r'^', include(patterns('',
url(r'^', include(sign_urls)),
url(r'^reset/', include(reset_urls, namespace='reset')),
url(r'^verify/', include(verify_urls, namespace='verify')),
url(r'^profile/', include(profile_urls, namespace='profile')),
), namespace='users')),
)
| {
"content_hash": "8a14bbb3bfaa0c158045288c3a4527bf",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 125,
"avg_line_length": 47.92307692307692,
"alnum_prop": 0.6414125200642055,
"repo_name": "alexey-grom/django-userflow",
"id": "627d970ebe78edb7f93b94de364270ef8f6395e4",
"size": "3134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userflow/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "28584"
},
{
"name": "Python",
"bytes": "63319"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import nox
@nox.session
def default(session):
return unit(session, 'default')
@nox.session
@nox.parametrize('py', ['2.7', '3.5', '3.6', '3.7'])
def unit(session, py):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
if py != 'default':
session.interpreter = 'python{}'.format(py)
# Set the virtualenv directory name.
session.virtualenv_dirname = 'unit-' + py
# Install all test dependencies, then install this package in-place.
session.install('pytest', 'mock')
session.install('-e', '.')
# Run py.test against the unit tests.
session.run('py.test', '--quiet', os.path.join('tests', 'unit'))
@nox.session
@nox.parametrize('py', ['2.7', '3.7'])
def system(session, py):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Run unit tests against all supported versions of Python.
session.interpreter = 'python{}'.format(py)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'sys-' + py
# Install all test dependencies, then install this package in-place.
session.install('pytest')
session.install('-e', '.')
# Run py.test against the unit tests.
session.run('py.test', '--quiet', os.path.join('tests', 'system'),
*session.posargs)
@nox.session
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.interpreter = 'python3.6'
session.install('docutils', 'pygments')
session.run('python', 'setup.py', 'check', '--restructuredtext',
'--strict')
| {
"content_hash": "29ef54f92939da126ff22ca80c009f8d",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 77,
"avg_line_length": 29.629032258064516,
"alnum_prop": 0.6456178551986935,
"repo_name": "google/streetview-publish-client-libraries",
"id": "c3504011aaf4a77605a76a7a1cc8348990884329",
"size": "2439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client_libraries/python_library/nox.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "131651"
},
{
"name": "Go",
"bytes": "42411"
},
{
"name": "Java",
"bytes": "248514"
},
{
"name": "JavaScript",
"bytes": "104298"
},
{
"name": "PHP",
"bytes": "57032"
},
{
"name": "Python",
"bytes": "113820"
},
{
"name": "Ruby",
"bytes": "65959"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: oneview_logical_switch_group_facts
short_description: Retrieve facts about OneView Logical Switch Groups.
description:
- Retrieve facts about the Logical Switch Groups of the OneView.
version_added: "2.3"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 5.4.0"
author: "Gustavo Hennig (@GustavoHennig)"
options:
name:
description:
- Logical Switch Group name.
required: false
notes:
- This resource is only available on C7000 enclosures
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Logical Switch Groups
oneview_logical_switch_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
delegate_to: localhost
- debug: var=logical_switch_groups
- name: Gather paginated, filtered and sorted facts about Logical Switch Groups
oneview_logical_switch_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
params:
start: 0
count: 3
sort: 'name:descending'
filter: "name='Logical_Switch_Group+56'"
- debug: var=logical_switch_groups
- name: Gather facts about a Logical Switch Group by name
oneview_logical_switch_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
name: "LogicalSwitchGroupDemo"
delegate_to: localhost
- debug: var=logical_switch_groups
'''
RETURN = '''
logical_switch_groups:
description: Has all the OneView facts about the Logical Switch Groups.
returned: Always, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule
class LogicalSwitchGroupFactsModule(OneViewModule):
argument_spec = dict(
name=dict(required=False, type='str'),
params=dict(required=False, type='dict'),
)
def __init__(self):
super(LogicalSwitchGroupFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.resource_client = self.oneview_client.logical_switch_groups
def execute_module(self):
if self.module.params.get('name'):
logical_switch_groups = self.resource_client.get_by('name', self.module.params['name'])
else:
logical_switch_groups = self.resource_client.get_all(**self.facts_params)
return dict(changed=False,
ansible_facts=dict(logical_switch_groups=logical_switch_groups))
def main():
LogicalSwitchGroupFactsModule().run()
if __name__ == '__main__':
main()
| {
"content_hash": "2050de9351340cf14327f104d0a17169",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 99,
"avg_line_length": 27.294117647058822,
"alnum_prop": 0.6770833333333334,
"repo_name": "HewlettPackard/oneview-ansible",
"id": "d9526978797a17e760e5425f67e537f1ae8e24cb",
"size": "3443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/oneview_logical_switch_group_facts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1716153"
},
{
"name": "Shell",
"bytes": "5675"
}
],
"symlink_target": ""
} |
import functools
import typing as t
import click
def user_credential_id_arg(
f: t.Optional[t.Callable] = None, *, metavar: str = "USER_CREDENTIAL_ID"
):
if f is None:
return functools.partial(user_credential_id_arg, metavar=metavar)
return click.argument("user_credential_id", metavar=metavar, type=click.UUID)(f)
def user_credential_create_and_update_params(
f: t.Optional[t.Callable] = None, *, create: bool = False
) -> t.Callable:
"""
Collection of options consumed by user credential create and update.
Passing create as True makes any values required for create
arguments instead of options.
"""
if f is None:
return functools.partial(
user_credential_create_and_update_params, create=create
)
# identity_id, username, and storage gateway are required for create
# and immutable on update
if create:
f = click.argument("local-username")(f)
f = click.argument("globus-identity")(f)
f = click.argument("storage-gateway", type=click.UUID)(f)
f = click.option("--display-name", help="Display name for the credential.")(f)
return f
| {
"content_hash": "393e66f2fb065d5986ec0d9bfee72a1e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 84,
"avg_line_length": 31.37837837837838,
"alnum_prop": 0.6709732988802756,
"repo_name": "globus/globus-cli",
"id": "e7b79b2039de62316c52d88ee32148b5ec511c9e",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/globus_cli/commands/endpoint/user_credential/_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "425"
},
{
"name": "Makefile",
"bytes": "764"
},
{
"name": "Python",
"bytes": "746729"
},
{
"name": "Shell",
"bytes": "776"
}
],
"symlink_target": ""
} |
"""
File: otp.py
Author: Matthew Cole, Anh Quach, Dan Townley
Date: 11/20/16
"""
import math
import time
import hashlib
import hmac
import base64
"""
Constants specified by RFC 6238
"""
DIGESTS = {"sha1":hashlib.sha1, "sha256":hashlib.sha256, "sha512":hashlib.sha512}
"""
Notations specified by RFC 4226 and RFC 6238
C : the 8-byte counter value, the moving factor. This value must be synchronized
between client and server.
K : the shared secret between client and server; each HOTP generator has a
different and unique secret K.
X : the time step in seconds. Default: X = 30
TO: the unix time to start counting time steps (i.e. the Unix epoch)
T0 = Thu Jan 1 00:00:00 1970
T : number of time steps between the initial counter time T0 and the current
Unix time
digit : the number of digits in the output, left padded with '0' as required
"""
def provision(tup):
"""
Convert a type URI 9-tuple into a well-formed URI string
"""
#Partitioning
type, name, secret, issuer, algorithm, digits, period = tup
#Assembling
uri = "otpauth://" + type + "/" + issuer + ":" + name + "?" +\
"secret=" + secret +\
"&issuer=" + issuer +\
"&algorithm=" + algorithm.upper() +\
"&digits=" + str(digits) +\
"&period=" + str(period)
return uri
def deprovision(uri):
"""
Convert a URI string into a typed URI 9-tuple
"""
#Partitioning
_,_,uri = uri.partition("://")
type,_,uri = uri.partition("/")
_,_,uri = uri.partition(":")
name,_,uri = uri.partition("?")
secret,issuer,algorithm,digits,period = uri.split("&")
#Parsing
secret = secret.split("=")[1]
issuer = issuer.split("=")[1]
algorithm = algorithm.split("=")[1].lower()
digits = int(digits.split("=")[1])
period = int(period.split("=")[1])
tup = (type, name, secret, issuer, algorithm, digits, period)
return tup
def T(t=time.time(), X=30):
"""
Return number of time steps between T0 (the Unix epoch) and t.
"""
#time.time() provides a time as a floating point integer,
#this can cause problems later
if type(t) == float:
t = int(math.floor(t))
#Down-cast struct_time types to an integer
#number of seconds, if needed
if type(t) == time.struct_time:
t = time.mktime(t)
# Calculate time steps using default floor function
# and integer division
return int(t // X)
def HOTP(K,C,digit=6, digest=hashlib.sha1):
"""
Return the HOTP-value for base-32 representation Key <K>,
Counter <C>, with <digits> width, and hashing mode <mode>.
Return type is a string.
"""
#Sanity check on digest mode
try:
if type(digest) == str:
digest = DIGESTS[digest]
except ValueError:
print("Digest mode (%s) not one of: %s" % (str(digest), str(hashlib.algorithms_guaranteed)))
# Pad the secret; it must have a length of
# a multiple of block size (8)
# Then decode it to a bytes object
K = str(K)
pad = len(K) % 8
if pad != 0:
K += '=' * (8 - pad)
K = base64.b32decode(K, casefold=True)
# Convert the counter to a byte array
ba = bytearray()
while C != 0:
ba.append(C & 0xFF)
C >>= 8
C = bytes(bytearray(reversed(ba)).rjust(8, b'\0'))
# Initialize the HMAC-SHA hasher
hasher = hmac.new(K,C, digest)
hm = bytearray(hasher.digest())
# Truncate as specified in RFC
offset = hm[-1] & 0xf
code = ((hm[offset] & 0x7f) << 24 | (hm[offset + 1] & 0xff) << 16 | (hm[offset + 2] & 0xff) << 8 | (hm[offset + 3] & 0xff))
str_code = str(code % 10 ** digit)
# Left pad with zeros
str_code = str_code.rjust(digit, '0')
return str_code
def TOTP(K,C=None,digit=6,digest=hashlib.sha1):
"""
Return the TOTP-value for Key <K>, Time Step as Counter <C>,
with <digits> width, and hash digest <mode>. Return type is a string.
"""
if not C:
C = T()
return HOTP(K,C,digit=digit,digest=digest)
if __name__ == "__main__":
"""
This code is used for the test target in the Makefile.
It's a series of assertions and output messages that aren't particularly
useful to the end-user, but they are specified by the RFCs.
"""
| {
"content_hash": "51d07d7555033dbf78c753b362a3b73b",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 124,
"avg_line_length": 26.82312925170068,
"alnum_prop": 0.6601572406796855,
"repo_name": "colematt/pyOTP",
"id": "f88036ecc16be413ac016bfcf75459c117499c4d",
"size": "3967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/otp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11622"
}
],
"symlink_target": ""
} |
from maya import cmds
def main():
current = cmds.currentTime(q=True)
min, max = cmds.playbackOptions(q=True,min=True), cmds.playbackOptions(q=True,max=True)
cmds.undoInfo(stateWithoutFlush=False)
if current == max:
cmds.currentTime(min)
else:
cmds.currentTime(current+1)
cmds.undoInfo(stateWithoutFlush=True) | {
"content_hash": "a44ba10c9812230fc49a5a63af551f3b",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 88,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.7625,
"repo_name": "momotarou-zamurai/kibidango",
"id": "0a292eb32e0b7a21ae2d45528cacf575f0a15004",
"size": "703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maya/python/animation/timeslider/custom_next_frame.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13350"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
"""CloudStack Python utility library"""
import sys, os, subprocess, errno, re, time, glob
import urllib2
import xml.dom.minidom
import logging
import socket
# exit() error constants
E_GENERIC= 1
E_NOKVM = 2
E_NODEFROUTE = 3
E_DHCP = 4
E_NOPERSISTENTNET = 5
E_NETRECONFIGFAILED = 6
E_VIRTRECONFIGFAILED = 7
E_FWRECONFIGFAILED = 8
E_AGENTRECONFIGFAILED = 9
E_AGENTFAILEDTOSTART = 10
E_NOFQDN = 11
E_SELINUXENABLED = 12
try: E_USAGE = os.EX_USAGE
except AttributeError: E_USAGE = 64
E_NEEDSMANUALINTERVENTION = 13
E_INTERRUPTED = 14
E_SETUPFAILED = 15
E_UNHANDLEDEXCEPTION = 16
E_MISSINGDEP = 17
Unknown = 0
Fedora = 1
CentOS = 2
Ubuntu = 3
RHEL6 = 4
IPV4 = 4
IPV6 = 6
#=================== DISTRIBUTION DETECTION =================
if os.path.exists("/etc/fedora-release"): distro = Fedora
elif os.path.exists("/etc/centos-release"): distro = CentOS
elif os.path.exists("/etc/redhat-release"):
version = file("/etc/redhat-release").readline()
if version.find("Red Hat Enterprise Linux Server release 6") != -1:
distro = RHEL6
elif version.find("CentOS") != -1:
distro = CentOS
else:
distro = CentOS
elif os.path.exists("/etc/legal") and "Ubuntu" in file("/etc/legal").read(-1): distro = Ubuntu
else: distro = Unknown
logFileName=None
# ================== LIBRARY UTILITY CODE=============
def setLogFile(logFile):
global logFileName
logFileName=logFile
def read_properties(propfile):
if not hasattr(propfile,"read"): propfile = file(propfile)
properties = propfile.read().splitlines()
properties = [ s.strip() for s in properties ]
properties = [ s for s in properties if
s and
not s.startswith("#") and
not s.startswith(";") ]
#[ logging.debug("Valid config file line: %s",s) for s in properties ]
proppairs = [ s.split("=",1) for s in properties ]
return dict(proppairs)
def stderr(msgfmt,*args):
"""Print a message to stderr, optionally interpolating the arguments into it"""
msgfmt += "\n"
if logFileName != None:
sys.stderr = open(logFileName, 'a+')
if args: sys.stderr.write(msgfmt%args)
else: sys.stderr.write(msgfmt)
def exit(errno=E_GENERIC,message=None,*args):
"""Exit with an error status code, printing a message to stderr if specified"""
if message: stderr(message,*args)
sys.exit(errno)
def resolve(host,port):
return [ (x[4][0],len(x[4])+2) for x in socket.getaddrinfo(host,port,socket.AF_UNSPEC,socket.SOCK_STREAM, 0, socket.AI_PASSIVE) ]
def resolves_to_ipv6(host,port):
return resolve(host,port)[0][1] == IPV6
###add this to Python 2.4, patching the subprocess module at runtime
if hasattr(subprocess,"check_call"):
from subprocess import CalledProcessError, check_call
else:
class CalledProcessError(Exception):
def __init__(self, returncode, cmd):
self.returncode = returncode ; self.cmd = cmd
def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
subprocess.CalledProcessError = CalledProcessError
def check_call(*popenargs, **kwargs):
retcode = subprocess.call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None: cmd = popenargs[0]
if retcode: raise subprocess.CalledProcessError(retcode, cmd)
return retcode
subprocess.check_call = check_call
# python 2.4 does not have this
try:
any = any
all = all
except NameError:
def any(sequence):
for i in sequence:
if i: return True
return False
def all(sequence):
for i in sequence:
if not i: return False
return True
class Command:
"""This class simulates a shell command"""
def __init__(self,name,parent=None):
self.__name = name
self.__parent = parent
def __getattr__(self,name):
if name == "_print": name = "print"
return Command(name,self)
def __call__(self,*args,**kwargs):
cmd = self.__get_recursive_name() + list(args)
#print " ",cmd
kwargs = dict(kwargs)
if "stdout" not in kwargs: kwargs["stdout"] = subprocess.PIPE
if "stderr" not in kwargs: kwargs["stderr"] = subprocess.PIPE
popen = subprocess.Popen(cmd,**kwargs)
m = popen.communicate()
ret = popen.wait()
if ret:
e = CalledProcessError(ret,cmd)
e.stdout,e.stderr = m
raise e
class CommandOutput:
def __init__(self,stdout,stderr):
self.stdout = stdout
self.stderr = stderr
return CommandOutput(*m)
def __lt__(self,other):
cmd = self.__get_recursive_name()
#print " ",cmd,"<",other
popen = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
m = popen.communicate(other)
ret = popen.wait()
if ret:
e = CalledProcessError(ret,cmd)
e.stdout,e.stderr = m
raise e
class CommandOutput:
def __init__(self,stdout,stderr):
self.stdout = stdout
self.stderr = stderr
return CommandOutput(*m)
def __get_recursive_name(self,sep=None):
m = self
l = []
while m is not None:
l.append(m.__name)
m = m.__parent
l.reverse()
if sep: return sep.join(l)
else: return l
def __str__(self):
return '<Command %r>'%self.__get_recursive_name(sep=" ")
def __repr__(self): return self.__str__()
kvmok = Command("kvm-ok")
getenforce = Command("/usr/sbin/getenforce")
ip = Command("ip")
service = Command("service")
chkconfig = Command("chkconfig")
updatercd = Command("update-rc.d")
ufw = Command("ufw")
iptables = Command("iptables")
iptablessave = Command("iptables-save")
augtool = Command("augtool")
ifconfig = Command("ifconfig")
ifdown = Command("ifdown")
ifup = Command("ifup")
brctl = Command("brctl")
uuidgen = Command("uuidgen")
def is_service_running(servicename):
try:
o = service(servicename,"status")
if distro is Ubuntu:
# status in ubuntu does not signal service status via return code
if "start/running" in o.stdout: return True
return False
else:
# retcode 0, service running
return True
except CalledProcessError,e:
# retcode nonzero, service not running
return False
def stop_service(servicename,force=False):
# This function is idempotent. N number of calls have the same result as N+1 number of calls.
if is_service_running(servicename) or force: service(servicename,"stop",stdout=None,stderr=None)
def disable_service(servicename):
# Stops AND disables the service
stop_service(servicename)
if distro is Ubuntu:
updatercd("-f",servicename,"remove",stdout=None,stderr=None)
else:
chkconfig("--del",servicename,stdout=None,stderr=None)
def start_service(servicename,force=False):
# This function is idempotent unless force is True. N number of calls have the same result as N+1 number of calls.
if not is_service_running(servicename) or force: service(servicename,"start",stdout=None,stderr=None)
def enable_service(servicename,forcestart=False):
# Stops AND disables the service
if distro is Ubuntu:
updatercd("-f",servicename,"remove",stdout=None,stderr=None)
updatercd("-f",servicename,"start","2","3","4","5",".",stdout=None,stderr=None)
else:
chkconfig("--add",servicename,stdout=None,stderr=None)
chkconfig("--level","345",servicename,"on",stdout=None,stderr=None)
start_service(servicename,force=forcestart)
def replace_line(f,startswith,stanza,always_add=False):
lines = [ s.strip() for s in file(f).readlines() ]
newlines = []
replaced = False
for line in lines:
if line.startswith(startswith):
newlines.append(stanza)
replaced = True
else: newlines.append(line)
if not replaced and always_add: newlines.append(stanza)
newlines = [ s + '\n' for s in newlines ]
file(f,"w").writelines(newlines)
def replace_or_add_line(f,startswith,stanza):
return replace_line(f,startswith,stanza,always_add=True)
# ==================================== CHECK FUNCTIONS ==========================
# If they return without exception, it's okay. If they raise a CheckFailed exception, that means a condition
# (generallly one that needs administrator intervention) was detected.
class CheckFailed(Exception): pass
#check function
def check_hostname():
"""If the hostname is a non-fqdn, fail with CalledProcessError. Else return 0."""
try: check_call(["hostname",'--fqdn'])
except CalledProcessError:
raise CheckFailed("This machine does not have an FQDN (fully-qualified domain name) for a hostname")
#check function
def check_kvm():
if distro in (Fedora,CentOS,RHEL6):
if os.path.exists("/dev/kvm"): return True
raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
else:
try:
kvmok()
return True
except CalledProcessError:
raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
except OSError,e:
if e.errno is errno.ENOENT: raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
raise
return True
raise AssertionError, "check_kvm() should have never reached this part"
def check_cgroups():
return glob.glob("/*/cpu.shares")
#check function
def check_selinux():
if distro not in [Fedora,CentOS,RHEL6]: return # no selinux outside of those
enforcing = False
config_enforcing = False
try:
output = getenforce().stdout.strip()
if "nforcing" in output:
enforcing = True
if any ( [ s.startswith("SELINUX=enforcing") for s in file("/etc/selinux/config").readlines() ] ):
config_enforcing = True
else:
config_enforcing = False
except (IOError,OSError),e:
if e.errno == 2: pass
else: raise CheckFailed("An unknown error (%s) took place while checking for SELinux"%str(e))
if enforcing:
raise CheckFailed('''SELinux is set to enforcing. There are two options:
1> Set it permissive in /etc/selinux/config, then reboot the machine.
2> Type 'setenforce Permissive' in commandline, after which you can run this program again.
We strongly suggest you doing the option 1 that makes sure SELinux goes into permissive after system reboot.\n''')
if config_enforcing:
print "WARNING: We detected that your SELinux is not configured in permissive. to make sure cloudstack won't block by \
SELinux after system reboot, we strongly suggest you setting it in permissive in /etc/selinux/config, then reboot the machine."
def preflight_checks(do_check_kvm=True):
if distro is Ubuntu:
preflight_checks = [
(check_hostname,"Checking hostname"),
]
else:
preflight_checks = [
(check_hostname,"Checking hostname"),
(check_selinux,"Checking if SELinux is disabled"),
]
#preflight_checks.append( (check_cgroups,"Checking if the control groups /cgroup filesystem is mounted") )
if do_check_kvm: preflight_checks.append( (check_kvm,"Checking for KVM") )
return preflight_checks
# ========================== CONFIGURATION TASKS ================================
# A Task is a function that runs within the context of its run() function that runs the function execute(), which does several things, reporting back to the caller as it goes with the use of yield
# the done() method ought to return true if the task has run in the past
# the execute() method must implement the configuration act itself
# run() wraps the output of execute() within a Starting taskname and a Completed taskname message
# tasks have a name
class TaskFailed(Exception): pass
#def __init__(self,code,msg):
#Exception.__init__(self,msg)
#self.code = code
class ConfigTask:
name = "generic config task"
autoMode=False
def __init__(self): pass
def done(self):
"""Returns true if the config task has already been done in the past, false if it hasn't"""
return False
def execute(self):
"""Executes the configuration task. Must not be run if test() returned true.
Must yield strings that describe the steps in the task.
Raises TaskFailed if the task failed at some step.
"""
def run (self):
stderr("Starting %s"%self.name)
it = self.execute()
if not it:
pass # not a yielding iterable
else:
for msg in it: stderr(msg)
stderr("Completed %s"%self.name)
def setAutoMode(self, autoMode):
self.autoMode = autoMode
def isAutoMode(self):
return self.autoMode
# ============== these are some configuration tasks ==================
class SetupNetworking(ConfigTask):
name = "network setup"
def __init__(self,brname, pubNic, prvNic):
ConfigTask.__init__(self)
self.brname = brname
self.pubNic = pubNic
self.prvNic = prvNic
self.runtime_state_changed = False
self.was_nm_service_running = None
self.was_net_service_running = None
if distro in (Fedora, CentOS, RHEL6):
self.nmservice = 'NetworkManager'
self.netservice = 'network'
else:
self.nmservice = 'network-manager'
self.netservice = 'networking'
def done(self):
try:
alreadysetup = False
if distro in (Fedora,CentOS, RHEL6):
if self.pubNic != None:
alreadysetup = alreadysetup or augtool._print("/files/etc/sysconfig/network-scripts/ifcfg-%s"%self.pubNic).stdout.strip()
if self.prvNic != None:
alreadysetup = alreadysetup or augtool._print("/files/etc/sysconfig/network-scripts/ifcfg-%s"%self.prvNic).stdout.strip()
if not alreadysetup:
alreadysetup = augtool._print("/files/etc/sysconfig/network-scripts/ifcfg-%s"%self.brname).stdout.strip()
else:
if self.pubNic != None:
alreadysetup = alreadysetup or augtool._print("/files/etc/network/interfaces/iface",self.pubNic).stdout.strip()
if self.prvNic != None:
alreadysetup = alreadysetup or augtool._print("/files/etc/network/interfaces/iface",self.prvNic).stdout.strip()
if not alreadysetup:
alreadysetup = augtool.match("/files/etc/network/interfaces/iface",self.brname).stdout.strip()
return alreadysetup
except OSError,e:
if e.errno is 2: raise TaskFailed("augtool has not been properly installed on this system")
raise
def restore_state(self):
if not self.runtime_state_changed: return
try:
o = ifconfig(self.brname)
bridge_exists = True
except CalledProcessError,e:
print e.stdout + e.stderr
bridge_exists = False
if bridge_exists:
ifconfig(self.brname,"0.0.0.0")
if hasattr(self,"old_net_device"):
ifdown(self.old_net_device)
ifup(self.old_net_device)
try: ifdown(self.brname)
except CalledProcessError: pass
try: ifconfig(self.brname,"down")
except CalledProcessError: pass
try: brctl("delbr",self.brname)
except CalledProcessError: pass
try: ifdown("--force",self.brname)
except CalledProcessError: pass
if self.was_net_service_running is None:
# we do nothing
pass
elif self.was_net_service_running == False:
stop_service(self.netservice,force=True)
time.sleep(1)
else:
# we altered service configuration
stop_service(self.netservice,force=True)
time.sleep(1)
try: start_service(self.netservice,force=True)
except CalledProcessError,e:
if e.returncode == 1: pass
else: raise
time.sleep(1)
if self.was_nm_service_running is None:
# we do nothing
pass
elif self.was_nm_service_running == False:
stop_service(self.nmservice,force=True)
time.sleep(1)
else:
# we altered service configuration
stop_service(self.nmservice,force=True)
time.sleep(1)
start_service(self.nmservice,force=True)
time.sleep(1)
self.runtime_state_changed = False
def execute(self):
yield "Determining default route"
routes = ip.route().stdout.splitlines()
defaultroute = [ x for x in routes if x.startswith("default") ]
if not defaultroute: raise TaskFailed("Your network configuration does not have a default route")
dev = defaultroute[0].split()[4]
yield "Default route assigned to device %s"%dev
self.old_net_device = dev
if distro in (Fedora, CentOS, RHEL6):
inconfigfile = "/".join(augtool.match("/files/etc/sysconfig/network-scripts/*/DEVICE",dev).stdout.strip().split("/")[:-1])
if not inconfigfile: raise TaskFailed("Device %s has not been set up in /etc/sysconfig/network-scripts"%dev)
pathtoconfigfile = inconfigfile[6:]
if distro in (Fedora, CentOS, RHEL6):
automatic = augtool.match("%s/ONBOOT"%inconfigfile,"yes").stdout.strip()
else:
automatic = augtool.match("/files/etc/network/interfaces/auto/*/",dev).stdout.strip()
if not automatic:
if distro is Fedora: raise TaskFailed("Device %s has not been set up in %s as automatic on boot"%dev,pathtoconfigfile)
else: raise TaskFailed("Device %s has not been set up in /etc/network/interfaces as automatic on boot"%dev)
if distro not in (Fedora , CentOS, RHEL6):
inconfigfile = augtool.match("/files/etc/network/interfaces/iface",dev).stdout.strip()
if not inconfigfile: raise TaskFailed("Device %s has not been set up in /etc/network/interfaces"%dev)
if distro in (Fedora, CentOS, RHEL6):
isstatic = augtool.match(inconfigfile + "/BOOTPROTO","none").stdout.strip()
if not isstatic: isstatic = augtool.match(inconfigfile + "/BOOTPROTO","static").stdout.strip()
else:
isstatic = augtool.match(inconfigfile + "/method","static").stdout.strip()
if not isstatic:
if distro in (Fedora, CentOS, RHEL6): raise TaskFailed("Device %s has not been set up as a static device in %s"%(dev,pathtoconfigfile))
else: raise TaskFailed("Device %s has not been set up as a static device in /etc/network/interfaces"%dev)
if is_service_running(self.nmservice):
self.was_nm_service_running = True
yield "Stopping NetworkManager to avoid automatic network reconfiguration"
disable_service(self.nmservice)
else:
self.was_nm_service_running = False
if is_service_running(self.netservice):
self.was_net_service_running = True
else:
self.was_net_service_running = False
yield "Creating Cloud bridging device and making device %s member of this bridge"%dev
if distro in (Fedora, CentOS, RHEL6):
ifcfgtext = file(pathtoconfigfile).read()
newf = "/etc/sysconfig/network-scripts/ifcfg-%s"%self.brname
#def restore():
#try: os.unlink(newf)
#except OSError,e:
#if errno == 2: pass
#raise
#try: file(pathtoconfigfile,"w").write(ifcfgtext)
#except OSError,e: raise
f = file(newf,"w") ; f.write(ifcfgtext) ; f.flush() ; f.close()
innewconfigfile = "/files" + newf
script = """set %s/DEVICE %s
set %s/NAME %s
set %s/BRIDGE_PORTS %s
set %s/TYPE Bridge
rm %s/HWADDR
rm %s/UUID
rm %s/HWADDR
rm %s/IPADDR
rm %s/DEFROUTE
rm %s/NETMASK
rm %s/GATEWAY
rm %s/BROADCAST
rm %s/NETWORK
set %s/BRIDGE %s
save"""%(innewconfigfile,self.brname,innewconfigfile,self.brname,innewconfigfile,dev,
innewconfigfile,innewconfigfile,innewconfigfile,innewconfigfile,
inconfigfile,inconfigfile,inconfigfile,inconfigfile,inconfigfile,inconfigfile,
inconfigfile,self.brname)
yield "Executing the following reconfiguration script:\n%s"%script
try:
returned = augtool < script
if "Saved 2 file" not in returned.stdout:
print returned.stdout + returned.stderr
#restore()
raise TaskFailed("Network reconfiguration failed.")
else:
yield "Network reconfiguration complete"
except CalledProcessError,e:
#restore()
print e.stdout + e.stderr
raise TaskFailed("Network reconfiguration failed")
else: # Not fedora
backup = file("/etc/network/interfaces").read(-1)
#restore = lambda: file("/etc/network/interfaces","w").write(backup)
script = """set %s %s
set %s %s
set %s/bridge_ports %s
save"""%(automatic,self.brname,inconfigfile,self.brname,inconfigfile,dev)
yield "Executing the following reconfiguration script:\n%s"%script
try:
returned = augtool < script
if "Saved 1 file" not in returned.stdout:
#restore()
raise TaskFailed("Network reconfiguration failed.")
else:
yield "Network reconfiguration complete"
except CalledProcessError,e:
#restore()
print e.stdout + e.stderr
raise TaskFailed("Network reconfiguration failed")
yield "We are going to restart network services now, to make the network changes take effect. Hit ENTER when you are ready."
if self.isAutoMode(): pass
else:
raw_input()
# if we reach here, then if something goes wrong we should attempt to revert the runinng state
# if not, then no point
self.runtime_state_changed = True
yield "Enabling and restarting non-NetworkManager networking"
if distro is Ubuntu: ifup(self.brname,stdout=None,stderr=None)
stop_service(self.netservice)
try: enable_service(self.netservice,forcestart=True)
except CalledProcessError,e:
if e.returncode == 1: pass
else: raise
yield "Verifying that the bridge is up"
try:
o = ifconfig(self.brname)
except CalledProcessError,e:
print e.stdout + e.stderr
raise TaskFailed("The bridge could not be set up properly")
yield "Networking restart done"
class SetupCgConfig(ConfigTask):
name = "control groups configuration"
def done(self):
try:
return "group virt" in file("/etc/cgconfig.conf","r").read(-1)
except IOError,e:
if e.errno is 2: raise TaskFailed("cgconfig has not been properly installed on this system")
raise
def execute(self):
cgconfig = file("/etc/cgconfig.conf","r").read(-1)
cgconfig = cgconfig + """
group virt {
cpu {
cpu.shares = 9216;
}
}
"""
file("/etc/cgconfig.conf","w").write(cgconfig)
stop_service("cgconfig")
enable_service("cgconfig",forcestart=True)
class SetupCgRules(ConfigTask):
name = "control group rules setup"
cfgline = "root:/usr/sbin/libvirtd cpu virt/"
def done(self):
try:
return self.cfgline in file("/etc/cgrules.conf","r").read(-1)
except IOError,e:
if e.errno is 2: raise TaskFailed("cgrulesd has not been properly installed on this system")
raise
def execute(self):
cgrules = file("/etc/cgrules.conf","r").read(-1)
cgrules = cgrules + "\n" + self.cfgline + "\n"
file("/etc/cgrules.conf","w").write(cgrules)
stop_service("cgred")
enable_service("cgred")
class SetupSecurityDriver(ConfigTask):
name = "security driver setup"
cfgline = "security_driver = \"none\""
filename = "/etc/libvirt/qemu.conf"
def done(self):
try:
return self.cfgline in file(self.filename,"r").read(-1)
except IOError,e:
if e.errno is 2: raise TaskFailed("qemu has not been properly installed on this system")
raise
def execute(self):
libvirtqemu = file(self.filename,"r").read(-1)
libvirtqemu = libvirtqemu + "\n" + self.cfgline + "\n"
file("/etc/libvirt/qemu.conf","w").write(libvirtqemu)
class SetupLibvirt(ConfigTask):
name = "libvirt setup"
cfgline = "export CGROUP_DAEMON='cpu:/virt'"
def done(self):
try:
if distro in (Fedora,CentOS, RHEL6): libvirtfile = "/etc/sysconfig/libvirtd"
elif distro is Ubuntu: libvirtfile = "/etc/default/libvirt-bin"
else: raise AssertionError, "We should not reach this"
return self.cfgline in file(libvirtfile,"r").read(-1)
except IOError,e:
if e.errno is 2: raise TaskFailed("libvirt has not been properly installed on this system")
raise
def execute(self):
if distro in (Fedora,CentOS, RHEL6): libvirtfile = "/etc/sysconfig/libvirtd"
elif distro is Ubuntu: libvirtfile = "/etc/default/libvirt-bin"
else: raise AssertionError, "We should not reach this"
libvirtbin = file(libvirtfile,"r").read(-1)
libvirtbin = libvirtbin + "\n" + self.cfgline + "\n"
file(libvirtfile,"w").write(libvirtbin)
if distro in (CentOS, Fedora, RHEL6): svc = "libvirtd"
else: svc = "libvirt-bin"
stop_service(svc)
enable_service(svc)
class SetupLiveMigration(ConfigTask):
name = "live migration setup"
stanzas = (
"listen_tcp=1",
'tcp_port="16509"',
'auth_tcp="none"',
"listen_tls=0",
)
def done(self):
try:
lines = [ s.strip() for s in file("/etc/libvirt/libvirtd.conf").readlines() ]
if all( [ stanza in lines for stanza in self.stanzas ] ): return True
except IOError,e:
if e.errno is 2: raise TaskFailed("libvirt has not been properly installed on this system")
raise
def execute(self):
for stanza in self.stanzas:
startswith = stanza.split("=")[0] + '='
replace_or_add_line("/etc/libvirt/libvirtd.conf",startswith,stanza)
if distro in (Fedora, RHEL6):
replace_or_add_line("/etc/sysconfig/libvirtd","LIBVIRTD_ARGS=","LIBVIRTD_ARGS=-l")
elif distro is Ubuntu:
if os.path.exists("/etc/init/libvirt-bin.conf"):
replace_line("/etc/init/libvirt-bin.conf", "exec /usr/sbin/libvirtd","exec /usr/sbin/libvirtd -d -l")
else:
replace_or_add_line("/etc/default/libvirt-bin","libvirtd_opts=","libvirtd_opts='-l -d'")
else:
raise AssertionError("Unsupported distribution")
if distro in (CentOS, Fedora, RHEL6): svc = "libvirtd"
else: svc = "libvirt-bin"
stop_service(svc)
enable_service(svc)
class SetupRequiredServices(ConfigTask):
name = "required services setup"
def done(self):
if distro in (Fedora, RHEL6): nfsrelated = "rpcbind nfslock"
elif distro is CentOS: nfsrelated = "portmap nfslock"
else: return True
return all( [ is_service_running(svc) for svc in nfsrelated.split() ] )
def execute(self):
if distro in (Fedora, RHEL6): nfsrelated = "rpcbind nfslock"
elif distro is CentOS: nfsrelated = "portmap nfslock"
else: raise AssertionError("Unsupported distribution")
for svc in nfsrelated.split(): enable_service(svc)
class SetupFirewall(ConfigTask):
name = "firewall setup"
def done(self):
if distro in (Fedora, CentOS,RHEL6):
if not os.path.exists("/etc/sysconfig/iptables"): return True
if ":on" not in chkconfig("--list","iptables").stdout: return True
else:
if "Status: active" not in ufw.status().stdout: return True
if not os.path.exists("/etc/ufw/before.rules"): return True
rule = "-p tcp -m tcp --dport 16509 -j ACCEPT"
if rule in iptablessave().stdout: return True
return False
def execute(self):
ports = "22 1798 16509".split()
if distro in (Fedora , CentOS, RHEL6):
for p in ports: iptables("-I","INPUT","1","-p","tcp","--dport",p,'-j','ACCEPT')
o = service.iptables.save() ; print o.stdout + o.stderr
else:
for p in ports: ufw.allow(p)
class SetupFirewall2(ConfigTask):
# this closes bug 4371
name = "additional firewall setup"
def __init__(self,brname):
ConfigTask.__init__(self)
self.brname = brname
def done(self):
if distro in (Fedora, CentOS, RHEL6):
if not os.path.exists("/etc/sysconfig/iptables"): return True
if ":on" not in chkconfig("--list","iptables").stdout: return True
return False
else:
if "Status: active" not in ufw.status().stdout: return True
if not os.path.exists("/etc/ufw/before.rules"): return True
return False
def execute(self):
yield "Permitting traffic in the bridge interface, migration port and for VNC ports"
if distro in (Fedora , CentOS, RHEL6):
for rule in (
"-I INPUT 1 -p tcp --dport 5900:6100 -j ACCEPT",
"-I INPUT 1 -p tcp --dport 49152:49216 -j ACCEPT",
):
args = rule.split()
o = iptables(*args)
service.iptables.save(stdout=None,stderr=None)
else:
ufw.allow.proto.tcp("from","any","to","any","port","5900:6100")
ufw.allow.proto.tcp("from","any","to","any","port","49152:49216")
stop_service("ufw")
start_service("ufw")
# Tasks according to distribution -- at some point we will split them in separate modules
def config_tasks(brname, pubNic, prvNic):
if distro is CentOS:
config_tasks = (
SetupNetworking(brname, pubNic, prvNic),
SetupLibvirt(),
SetupRequiredServices(),
SetupFirewall(),
SetupFirewall2(brname),
)
elif distro in (Ubuntu,Fedora, RHEL6):
config_tasks = (
SetupNetworking(brname, pubNic, prvNic),
SetupCgConfig(),
SetupCgRules(),
SetupSecurityDriver(),
SetupLibvirt(),
SetupLiveMigration(),
SetupRequiredServices(),
SetupFirewall(),
SetupFirewall2(brname),
)
else:
raise AssertionError("Unknown distribution")
return config_tasks
def backup_etc(targetdir):
if not targetdir.endswith("/"): targetdir += "/"
check_call( ["mkdir","-p",targetdir] )
rsynccall = ["rsync","-ax","--delete"] + ["/etc/",targetdir]
check_call( rsynccall )
def restore_etc(targetdir):
if not targetdir.endswith("/"): targetdir += "/"
rsynccall = ["rsync","-ax","--delete"] + [targetdir,"/etc/"]
check_call( rsynccall )
def remove_backup(targetdir):
check_call( ["rm","-rf",targetdir] )
def list_zonespods(host):
text = urllib2.urlopen('http://%s:8096/client/api?command=listPods'%host).read(-1)
dom = xml.dom.minidom.parseString(text)
x = [ (zonename,podname)
for pod in dom.childNodes[0].childNodes
for podname in [ x.childNodes[0].wholeText for x in pod.childNodes if x.tagName == "name" ]
for zonename in [ x.childNodes[0].wholeText for x in pod.childNodes if x.tagName == "zonename" ]
]
return x
def prompt_for_hostpods(zonespods):
"""Ask user to select one from those zonespods
Returns (zone,pod) or None if the user made the default selection."""
while True:
stderr("Type the number of the zone and pod combination this host belongs to (hit ENTER to skip this step)")
print " N) ZONE, POD"
print "================"
for n,(z,p) in enumerate(zonespods):
print "%3d) %s, %s"%(n,z,p)
print "================"
print "> ",
zoneandpod = raw_input().strip()
if not zoneandpod:
# we go with default, do not touch anything, just break
return None
try:
# if parsing fails as an int, just vomit and retry
zoneandpod = int(zoneandpod)
if zoneandpod >= len(zonespods) or zoneandpod < 0: raise ValueError, "%s out of bounds"%zoneandpod
except ValueError,e:
stderr(str(e))
continue # re-ask
# oh yeah, the int represents an valid zone and pod index in the array
return zonespods[zoneandpod]
# this configures the agent
def device_exist(devName):
try:
alreadysetup = False
if distro in (Fedora,CentOS, RHEL6):
alreadysetup = augtool._print("/files/etc/sysconfig/network-scripts/ifcfg-%s"%devName).stdout.strip()
else:
alreadysetup = augtool.match("/files/etc/network/interfaces/iface",devName).stdout.strip()
return alreadysetup
except OSError,e:
return False
def setup_agent_config(configfile, host, zone, pod, cluster, guid, pubNic, prvNic):
stderr("Examining Agent configuration")
fn = configfile
text = file(fn).read(-1)
lines = [ s.strip() for s in text.splitlines() ]
confopts = dict([ m.split("=",1) for m in lines if "=" in m and not m.startswith("#") ])
confposes = dict([ (m.split("=",1)[0],n) for n,m in enumerate(lines) if "=" in m and not m.startswith("#") ])
if guid != None:
confopts['guid'] = guid
else:
if not "guid" in confopts:
stderr("Generating GUID for this Agent")
confopts['guid'] = uuidgen().stdout.strip()
if host == None:
try: host = confopts["host"]
except KeyError: host = "localhost"
stderr("Please enter the host name of the management server that this agent will connect to: (just hit ENTER to go with %s)",host)
print "> ",
newhost = raw_input().strip()
if newhost: host = newhost
confopts["host"] = host
if pubNic != None and device_exist(pubNic):
confopts["public.network.device"] = pubNic
if prvNic == None or not device_exist(prvNic):
confopts["private.network.device"] = pubNic
if prvNic != None and device_exist(prvNic):
confopts["private.network.device"] = prvNic
if pubNic == None or not device_exist(pubNic):
confopts["public.network.device"] = prvNic
stderr("Querying %s for zones and pods",host)
try:
if zone == None or pod == None:
x = list_zonespods(confopts['host'])
zoneandpod = prompt_for_hostpods(x)
if zoneandpod:
confopts["zone"],confopts["pod"] = zoneandpod
stderr("You selected zone %s pod %s",confopts["zone"],confopts["pod"])
else:
stderr("Skipped -- using the previous zone %s pod %s",confopts["zone"],confopts["pod"])
else:
confopts["zone"] = zone
confopts["pod"] = pod
confopts["cluster"] = cluster
except (urllib2.URLError,urllib2.HTTPError),e:
stderr("Query failed: %s. Defaulting to zone %s pod %s",str(e),confopts["zone"],confopts["pod"])
for opt,val in confopts.items():
line = "=".join([opt,val])
if opt not in confposes: lines.append(line)
else: lines[confposes[opt]] = line
text = "\n".join(lines)
file(fn,"w").write(text)
def setup_consoleproxy_config(configfile, host, zone, pod):
stderr("Examining Console Proxy configuration")
fn = configfile
text = file(fn).read(-1)
lines = [ s.strip() for s in text.splitlines() ]
confopts = dict([ m.split("=",1) for m in lines if "=" in m and not m.startswith("#") ])
confposes = dict([ (m.split("=",1)[0],n) for n,m in enumerate(lines) if "=" in m and not m.startswith("#") ])
if not "guid" in confopts:
stderr("Generating GUID for this Console Proxy")
confopts['guid'] = uuidgen().stdout.strip()
if host == None:
try: host = confopts["host"]
except KeyError: host = "localhost"
stderr("Please enter the host name of the management server that this console-proxy will connect to: (just hit ENTER to go with %s)",host)
print "> ",
newhost = raw_input().strip()
if newhost: host = newhost
confopts["host"] = host
stderr("Querying %s for zones and pods",host)
try:
if zone == None or pod == None:
x = list_zonespods(confopts['host'])
zoneandpod = prompt_for_hostpods(x)
if zoneandpod:
confopts["zone"],confopts["pod"] = zoneandpod
stderr("You selected zone %s pod %s",confopts["zone"],confopts["pod"])
else:
stderr("Skipped -- using the previous zone %s pod %s",confopts["zone"],confopts["pod"])
else:
confopts["zone"] = zone
confopts["pod"] = pod
except (urllib2.URLError,urllib2.HTTPError),e:
stderr("Query failed: %s. Defaulting to zone %s pod %s",str(e),confopts["zone"],confopts["pod"])
for opt,val in confopts.items():
line = "=".join([opt,val])
if opt not in confposes: lines.append(line)
else: lines[confposes[opt]] = line
text = "\n".join(lines)
file(fn,"w").write(text)
# =========================== DATABASE MIGRATION SUPPORT CODE ===================
# Migrator, Migratee and Evolvers -- this is the generic infrastructure.
class MigratorException(Exception): pass
class NoMigrationPath(MigratorException): pass
class NoMigrator(MigratorException): pass
INITIAL_LEVEL = '-'
class Migrator:
"""Migrator class.
The migrator gets a list of Python objects, and discovers MigrationSteps in it. It then sorts the steps into a chain, based on the attributes from_level and to_level in each one of the steps.
When the migrator's run(context) is called, the chain of steps is applied sequentially on the context supplied to run(), in the order of the chain of steps found at discovery time. See the documentation for the MigrationStep class for information on how that happens.
"""
def __init__(self,evolver_source):
self.discover_evolvers(evolver_source)
self.sort_evolvers()
def discover_evolvers(self,source):
self.evolvers = []
for val in source:
if hasattr(val,"from_level") and hasattr(val,"to_level") and val.to_level:
self.evolvers.append(val)
def sort_evolvers(self):
new = []
while self.evolvers:
if not new:
try: idx= [ i for i,s in enumerate(self.evolvers)
if s.from_level == INITIAL_LEVEL ][0] # initial evolver
except IndexError,e:
raise IndexError, "no initial evolver (from_level is None) could be found"
else:
try: idx= [ i for i,s in enumerate(self.evolvers)
if new[-1].to_level == s.from_level ][0]
except IndexError,e:
raise IndexError, "no evolver could be found to evolve from level %s"%new[-1].to_level
new.append(self.evolvers.pop(idx))
self.evolvers = new
def get_evolver_chain(self):
return [ (s.from_level, s.to_level, s) for s in self.evolvers ]
def get_evolver_by_starting_level(self,level):
try: return [ s for s in self.evolvers if s.from_level == level][0]
except IndexError: raise NoMigrator, "No evolver knows how to evolve the database from schema level %r"%level
def get_evolver_by_ending_level(self,level):
try: return [ s for s in self.evolvers if s.to_level == level][0]
except IndexError: raise NoMigrator, "No evolver knows how to evolve the database to schema level %r"%level
def run(self, context, dryrun = False, starting_level = None, ending_level = None):
"""Runs each one of the steps in sequence, passing the migration context to each. At the end of the process, context.commit() is called to save the changes, or context.rollback() is called if dryrun = True.
If starting_level is not specified, then the context.get_schema_level() is used to find out at what level the context is at. Then starting_level is set to that.
If ending_level is not specified, then the evolvers will run till the end of the chain."""
assert dryrun is False # NOT IMPLEMENTED, prolly gonna implement by asking the context itself to remember its state
starting_level = starting_level or context.get_schema_level() or self.evolvers[0].from_level
ending_level = ending_level or self.evolvers[-1].to_level
evolution_path = self.evolvers
idx = evolution_path.index(self.get_evolver_by_starting_level(starting_level))
evolution_path = evolution_path[idx:]
try: idx = evolution_path.index(self.get_evolver_by_ending_level(ending_level))
except ValueError:
raise NoEvolutionPath, "No evolution path from schema level %r to schema level %r" % \
(starting_level,ending_level)
evolution_path = evolution_path[:idx+1]
logging.info("Starting migration on %s"%context)
for ec in evolution_path:
assert ec.from_level == context.get_schema_level()
evolver = ec(context=context)
logging.info("%s (from level %s to level %s)",
evolver,
evolver.from_level,
evolver.to_level)
#try:
evolver.run()
#except:
#context.rollback()
#raise
context.set_schema_level(evolver.to_level)
#context.commit()
logging.info("%s is now at level %s",context,context.get_schema_level())
#if dryrun: # implement me with backup and restore
#logging.info("Rolling back changes on %s",context)
#context.rollback()
#else:
#logging.info("Committing changes on %s",context)
#context.commit()
logging.info("Migration finished")
class MigrationStep:
"""Base MigrationStep class, aka evolver.
You develop your own steps, and then pass a list of those steps to the
Migrator instance that will run them in order.
When the migrator runs, it will take the list of steps you gave him,
and, for each step:
a) instantiate it, passing the context you gave to the migrator
into the step's __init__().
b) run() the method in the migration step.
As you can see, the default MigrationStep constructor makes the passed
context available as self.context in the methods of your step.
Each step has two member vars that determine in which order they
are run, and if they need to run:
- from_level = the schema level that the database should be at,
before running the evolver
The value None has special meaning here, it
means the first evolver that should be run if the
database does not have a schema level yet.
- to_level = the schema level number that the database will be at
after the evolver has run
"""
# Implement these attributes in your steps
from_level = None
to_level = None
def __init__(self,context):
self.context = context
def run(self):
raise NotImplementedError
class MigrationContext:
def __init__(self): pass
def commit(self):raise NotImplementedError
def rollback(self):raise NotImplementedError
def get_schema_level(self):raise NotImplementedError
def set_schema_level(self,l):raise NotImplementedError
| {
"content_hash": "b075b87df8fa4964a5351b66b1831775",
"timestamp": "",
"source": "github",
"line_count": 1205,
"max_line_length": 269,
"avg_line_length": 33.47385892116183,
"alnum_prop": 0.6962266957556525,
"repo_name": "resmo/cloudstack",
"id": "43c93c8763160a54a304d833b8b47f0c5cc38ffc",
"size": "40336",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/lib/cloud_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1451"
},
{
"name": "Batchfile",
"bytes": "11926"
},
{
"name": "C#",
"bytes": "2356211"
},
{
"name": "CSS",
"bytes": "335738"
},
{
"name": "FreeMarker",
"bytes": "4917"
},
{
"name": "Groovy",
"bytes": "153137"
},
{
"name": "HTML",
"bytes": "151164"
},
{
"name": "Java",
"bytes": "33712712"
},
{
"name": "JavaScript",
"bytes": "7719277"
},
{
"name": "Python",
"bytes": "11019815"
},
{
"name": "Ruby",
"bytes": "896"
},
{
"name": "Shell",
"bytes": "770039"
}
],
"symlink_target": ""
} |
""" py.test dynamic configuration.
For details needed to understand these tests, refer to:
https://pytest.org/
http://pythontesting.net/start-here/
"""
# Copyright © {{ cookiecutter.year }} {{ cookiecutter.full_name }} <{{ cookiecutter.email }}>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
# Globally available fixtures
@pytest.fixture(scope='session')
def logger():
"""Test logger instance as a fixture."""
logging.basicConfig(level=logging.DEBUG)
return logging.getLogger('tests')
| {
"content_hash": "e23ac2b8ec5f19983b8d841809fbb33f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 94,
"avg_line_length": 33.903225806451616,
"alnum_prop": 0.7288296860133207,
"repo_name": "1and1/py-generic-project",
"id": "691c4c3633f3f89e88e9d1c0e4a7632c107faaf4",
"size": "1095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.repo_name}}/src/tests/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10813"
}
],
"symlink_target": ""
} |
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from activity.models import Event
@login_required
def event_update(request, content_id, object_id, *args, **kwargs):
content_id = content_id
object_id = object_id
Event.objects.filter(user=request.user,
content_type_id=content_id,
object_id=object_id).delete()
return HttpResponse(status=200)
| {
"content_hash": "6f80b9e89f2713635dc23a14c112cb4d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 66,
"avg_line_length": 32.642857142857146,
"alnum_prop": 0.6783369803063457,
"repo_name": "owais/django-simple-activity",
"id": "186b274054af7716f74c3848d6e11476d3c5df27",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_activity/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9574"
}
],
"symlink_target": ""
} |
import zeit.cms.testing
ZCML_LAYER = zeit.cms.testing.ZCMLLayer(
'ftesting.zcml', product_config=zeit.cms.testing.cms_product_config)
| {
"content_hash": "35a76497a97e8f6ab0652eb4ba2b02fd",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 72,
"avg_line_length": 28,
"alnum_prop": 0.7642857142857142,
"repo_name": "ZeitOnline/zeit.content.text",
"id": "ee36c10effd88117905dc7e0260c592dec7abf26",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zeit/content/text/testing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24215"
}
],
"symlink_target": ""
} |
"""
A drop-in replacement for the Mic class that allows for all I/O to occur
over the terminal. Useful for debugging. Unlike with the typical Mic
implementation, Jasper is always active listening with local_mic.
"""
class Mic:
prev = None
def __init__(self, speaker, passive_stt_engine, active_stt_engine):
return
def passiveListen(self, PERSONA):
return True, "JASPER"
def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True,
MUSIC=False):
return [self.activeListen(THRESHOLD=THRESHOLD, LISTEN=LISTEN,
MUSIC=MUSIC)]
def activeListen(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
if not LISTEN:
return self.prev
input = raw_input("YOU: ")
self.prev = input
return input
def say(self, phrase, OPTIONS=None):
print "JASPER: " + phrase
| {
"content_hash": "8102f5e4381507578a3219e33a8dda4f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 29.612903225806452,
"alnum_prop": 0.6198257080610022,
"repo_name": "tdmike/SASCHA",
"id": "a58370dad3936c3a3729f7741a5b9d49d2a6abb1",
"size": "941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sascha/local_mic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "7143"
},
{
"name": "Arduino",
"bytes": "28602"
},
{
"name": "C",
"bytes": "9133"
},
{
"name": "C++",
"bytes": "166918"
},
{
"name": "CSS",
"bytes": "56479"
},
{
"name": "Elixir",
"bytes": "391"
},
{
"name": "JavaScript",
"bytes": "5155"
},
{
"name": "PHP",
"bytes": "17610"
},
{
"name": "Processing",
"bytes": "106955"
},
{
"name": "Python",
"bytes": "109227"
},
{
"name": "Shell",
"bytes": "7183"
},
{
"name": "XSLT",
"bytes": "2042"
}
],
"symlink_target": ""
} |
"""
Import Redbridge
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseCsvStationsCsvAddressesImporter
from data_finder.helpers import geocode_point_only, PostcodeError
class Command(BaseCsvStationsCsvAddressesImporter):
"""
Imports the Polling Station data from Redbridge Council
"""
council_id = 'E09000026'
addresses_name = 'rev01-2016/LLPG Addresses - Polling Station Finder - EU referendum.csv'
stations_name = 'rev01-2016/Polling Stations - EU referendum.csv'
elections = [
'ref.2016-06-23'
]
def station_record_to_dict(self, record):
# no points supplied, so attempt to attach them by geocoding
if record.postcode:
try:
gridref = geocode_point_only(record.postcode)
location = Point(gridref['wgs84_lon'], gridref['wgs84_lat'], srid=4326)
except PostcodeError:
location = None
else:
location = None
return {
'internal_council_id': record.district_code.strip(),
'postcode' : record.postcode.strip(),
'address' : record.address.strip(),
'location' : location
}
def address_record_to_dict(self, record):
address_parts = record.postal_address.strip().split(", ")
postcode = address_parts[-1] # every address has a postcode :D
address = ", ".join(address_parts[:-1])
# There are 6 addresses which don't
# map to any station - exclude them
if not record.district_code:
return None
# 20 exact dupes will be discarded
return {
'address' : address,
'postcode' : postcode,
'polling_station_id': record.district_code.strip()
}
| {
"content_hash": "e9ecaaec764e138ad774bb560c9bfe28",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 94,
"avg_line_length": 34.535714285714285,
"alnum_prop": 0.5966907962771458,
"repo_name": "chris48s/UK-Polling-Stations",
"id": "edcd767e8da360927908b1405b846509990c5855",
"size": "1934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/data_collection/management/commands/import_redbridge.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "347"
},
{
"name": "Gherkin",
"bytes": "3720"
},
{
"name": "HTML",
"bytes": "30715"
},
{
"name": "JavaScript",
"bytes": "3226"
},
{
"name": "Python",
"bytes": "589520"
}
],
"symlink_target": ""
} |
import unittest
from mock import patch
from app.brain.admin.all_data import AllData
from app.constants import USERS_CONSTANTS, TAXONOMY_CONSTANTS, HISTORY_CONSTANTS
class AllDataTests(unittest.TestCase):
@patch('app.service.RepExercisesHistoryService.get_list_of_all_history')
@patch('app.service.RepExercisesTaxonomyService.get_list_of_all_exercises')
@patch('app.service.UsersService.get_list_of_all_users')
def test_empty_return_values(self, users_mock, taxonomy_mock, history_mock):
users_mock.return_value = []
taxonomy_mock.return_value = []
history_mock.return_value = []
result = AllData.get_all_data()
expected_result = {
USERS_CONSTANTS.GROUP_NAME: [],
TAXONOMY_CONSTANTS.GROUP_NAME: [],
HISTORY_CONSTANTS.GROUP_NAME: []
}
self.assertEqual(result, expected_result)
| {
"content_hash": "857b94f5f87c8c37562627f49c0a8339",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 34.23076923076923,
"alnum_prop": 0.6853932584269663,
"repo_name": "pbraunstein/trackercise",
"id": "87fa4a5c6b6c654ab4021f5aa16a87ffb7fbfefc",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/brain/admin/tests/all_data_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16360"
},
{
"name": "JavaScript",
"bytes": "2356"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "141017"
},
{
"name": "Shell",
"bytes": "5507"
},
{
"name": "TypeScript",
"bytes": "43836"
}
],
"symlink_target": ""
} |
from toontown.toonbase import ToontownGlobals
ALLOW_TEMP_MINIGAMES = config.GetBool('allow-temp-minigames', False)
TEMP_MG_ID_COUNTER = ToontownGlobals.TravelGameId - 1
TempMgCtors = {}
def _printMessage(message):
print '\n\n!!!', message, '\n\n'
def _registerTempMinigame(name, Class, id, minPlayers = 1, maxPlayers = 4):
if not ALLOW_TEMP_MINIGAMES:
_printMessage('registerTempMinigame WARNING: allow-temp-minigames config is set to false, but we are trying to register temp minigame ' + name)
import traceback
traceback.print_stack()
return
ToontownGlobals.MinigameIDs += (id,)
ToontownGlobals.MinigameNames[name] = id
TempMgCtors[id] = Class
for i in range(minPlayers, maxPlayers):
ToontownGlobals.MinigamePlayerMatrix[i] += (id,)
_printMessage('registerTempMinigame: ' + name)
if ALLOW_TEMP_MINIGAMES:
pass
| {
"content_hash": "eaf05922420213529945a1d33cfb4426",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 151,
"avg_line_length": 34.23076923076923,
"alnum_prop": 0.7078651685393258,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "3023993ab78a054cae2006332bd13441dc759ec6",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/minigame/TempMinigameAI.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
import itertools
import os
import struct
import json
import errno
from math import isnan
from os.path import isdir, exists, join, dirname, abspath, getsize, getmtime
from glob import glob
from bisect import bisect_left
izip = getattr(itertools, 'izip', zip)
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK_WRITES = False
TIMESTAMP_FORMAT = "!L"
TIMESTAMP_SIZE = struct.calcsize(TIMESTAMP_FORMAT)
DATAPOINT_FORMAT = "!d"
DATAPOINT_SIZE = struct.calcsize(DATAPOINT_FORMAT)
NAN = float('nan')
PACKED_NAN = struct.pack(DATAPOINT_FORMAT, NAN)
MAX_SLICE_GAP = 80
DEFAULT_TIMESTEP = 60
DEFAULT_NODE_CACHING_BEHAVIOR = 'all'
DEFAULT_SLICE_CACHING_BEHAVIOR = 'none'
SLICE_AGGREGATION_METHODS = ['average', 'sum', 'last', 'max', 'min']
SLICE_PERMS = 0o644
DIR_PERMS = 0o755
class CeresTree(object):
"""Represents a tree of Ceres metrics contained within a single path on disk
This is the primary Ceres API.
:param root: The directory root of the Ceres tree
.. note:: Use :func:`createTree` to initialize and instantiate a new CeresTree
.. seealso:: :func:`setDefaultNodeCachingBehavior` to adjust caching behavior
"""
def __init__(self, root):
if isdir(root):
self.root = abspath(root)
else:
raise ValueError("Invalid root directory '%s'" % root)
self.nodeCache = {}
self.nodeCachingBehavior = DEFAULT_NODE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresTree[0x%x]: %s>" % (id(self), self.root)
__str__ = __repr__
@classmethod
def createTree(cls, root, **props):
"""Create and returns a new Ceres tree with the given properties
:param root: The root directory of the new Ceres tree
:param \*\*props: Arbitrary key-value properties to store as tree metadata
:returns: :class:`CeresTree`
"""
ceresDir = join(root, '.ceres-tree')
if not isdir(ceresDir):
os.makedirs(ceresDir, DIR_PERMS)
for prop, value in props.items():
propFile = join(ceresDir, prop)
with open(propFile, 'w') as fh:
fh.write(str(value))
return cls(root)
def walk(self, **kwargs):
"""Iterate through the nodes contained in this :class:`CeresTree`
:param \*\*kwargs: Options to pass to :func:`os.walk`
:returns: An iterator yielding :class:`CeresNode` objects
"""
for (fsPath, subdirs, filenames) in os.walk(self.root, **kwargs):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
yield CeresNode(self, nodePath, fsPath)
def getFilesystemPath(self, nodePath):
"""Get the on-disk path of a Ceres node given a metric name
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: The Ceres node path on disk"""
return join(self.root, nodePath.replace('.', os.sep))
def getNodePath(self, fsPath):
"""Get the metric name of a Ceres node given the on-disk path
:param fsPath: The filesystem path of a Ceres node
:returns: A metric name
:raises ValueError: When `fsPath` is not a path within the :class:`CeresTree`
"""
fsPath = abspath(fsPath)
if not fsPath.startswith(self.root):
raise ValueError("path '%s' not beneath tree root '%s'" % (fsPath, self.root))
nodePath = fsPath[len(self.root):].strip(os.sep).replace(os.sep, '.')
return nodePath
def hasNode(self, nodePath):
"""Returns whether the Ceres tree contains the given metric
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: `True` or `False`"""
return isdir(self.getFilesystemPath(nodePath))
def setNodeCachingBehavior(self, behavior):
"""Set node caching behavior.
:param behavior: See :func:`getNode` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.nodeCachingBehavior = behavior
self.nodeCache = {}
def getNode(self, nodePath):
"""Returns a Ceres node given a metric name. Because nodes are looked up in
every read and write, a caching mechanism is provided. Cache behavior is set
using :func:`setNodeCachingBehavior` and defaults to the value set in
``DEFAULT_NODE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` - Node is read from the filesystem at every access.
* `all` (default) - All nodes are cached.
:param nodePath: A metric name
:returns: :class:`CeresNode` or `None`
"""
if self.nodeCachingBehavior == 'all':
if nodePath not in self.nodeCache:
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
self.nodeCache[nodePath] = CeresNode(self, nodePath, fsPath)
else:
return None
return self.nodeCache[nodePath]
elif self.nodeCachingBehavior == 'none':
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
return CeresNode(self, nodePath, fsPath)
else:
return None
else:
raise ValueError("invalid caching behavior configured '%s'" % self.nodeCachingBehavior)
def find(self, nodePattern, fromTime=None, untilTime=None):
"""Find nodes which match a wildcard pattern, optionally filtering on
a time range
:param nodePattern: A glob-style metric wildcard
:param fromTime: Optional interval start time in unix-epoch.
:param untilTime: Optional interval end time in unix-epoch.
:returns: An iterator yielding :class:`CeresNode` objects
"""
for fsPath in glob(self.getFilesystemPath(nodePattern)):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
node = self.getNode(nodePath)
if fromTime is None and untilTime is None:
yield node
elif node.hasDataForInterval(fromTime, untilTime):
yield node
def createNode(self, nodePath, **properties):
"""Creates a new metric given a new metric name and optional per-node metadata
:param nodePath: The new metric name.
:param \*\*properties: Arbitrary key-value properties to store as metric metadata.
:returns: :class:`CeresNode`
"""
return CeresNode.create(self, nodePath, **properties)
def store(self, nodePath, datapoints):
"""Store a list of datapoints associated with a metric
:param nodePath: The metric name to write to e.g. ``carbon.agents.graphite-a.cpuUsage``
:param datapoints: A list of datapoint tuples: ``[(timestamp, value), ...]``
"""
node = self.getNode(nodePath)
if node is None:
raise NodeNotFound("The node '%s' does not exist in this tree" % nodePath)
node.write(datapoints)
def fetch(self, nodePath, fromTime, untilTime):
"""Fetch data within a given interval from the given metric
:param nodePath: The metric name to fetch from
:param fromTime: Requested interval start time in unix-epoch.
:param untilTime: Requested interval end time in unix-epoch.
:returns: :class:`TimeSeriesData`
:raises: :class:`NodeNotFound`, :class:`InvalidRequest`
"""
node = self.getNode(nodePath)
if not node:
raise NodeNotFound("the node '%s' does not exist in this tree" % nodePath)
return node.read(fromTime, untilTime)
class CeresNode(object):
"""A :class:`CeresNode` represents a single time-series metric of a given `timeStep`
(its seconds-per-point resolution) and containing arbitrary key-value metadata.
A :class:`CeresNode` is associated with its most precise `timeStep`. This `timeStep` is the finest
resolution that can be used for writing, though a :class:`CeresNode` can contain and read data with
other, less-precise `timeStep` values in its underlying :class:`CeresSlice` data.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param fsPath: The filesystem path of this metric
.. note:: This class generally should be instantiated through use of :class:`CeresTree`. See
:func:`CeresTree.createNode` and :func:`CeresTree.getNode`
.. seealso:: :func:`setDefaultSliceCachingBehavior` to adjust caching behavior
"""
__slots__ = ('tree', 'nodePath', 'fsPath',
'metadataFile', 'timeStep', 'aggregationMethod',
'sliceCache', 'sliceCachingBehavior')
def __init__(self, tree, nodePath, fsPath):
self.tree = tree
self.nodePath = nodePath
self.fsPath = fsPath
self.metadataFile = join(fsPath, '.ceres-node')
self.timeStep = None
self.aggregationMethod = 'average'
self.sliceCache = None
self.sliceCachingBehavior = DEFAULT_SLICE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresNode[0x%x]: %s>" % (id(self), self.nodePath)
__str__ = __repr__
@classmethod
def create(cls, tree, nodePath, **properties):
"""Create a new :class:`CeresNode` on disk with the specified properties.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param \*\*properties: A set of key-value properties to be associated with this node
A :class:`CeresNode` always has the `timeStep` property which is an integer value representing
the precision of the node in seconds-per-datapoint. E.g. a value of ``60`` represents one datapoint
per minute. If no `timeStep` is specified at creation, the value of ``ceres.DEFAULT_TIMESTEP`` is
used
:returns: :class:`CeresNode`
"""
# Create the node directory
fsPath = tree.getFilesystemPath(nodePath)
os.makedirs(fsPath, DIR_PERMS)
properties['timeStep'] = properties.get('timeStep', DEFAULT_TIMESTEP)
# Create the initial metadata
node = cls(tree, nodePath, fsPath)
node.writeMetadata(properties)
# Create the initial data file
# timeStep = properties['timeStep']
# now = int( time.time() )
# baseTime = now - (now % timeStep)
# slice = CeresSlice.create(node, baseTime, timeStep)
return node
@staticmethod
def isNodeDir(path):
"""Tests whether the given path is a :class:`CeresNode`
:param path: Path to test
:returns `True` or `False`
"""
return isdir(path) and exists(join(path, '.ceres-node'))
@classmethod
def fromFilesystemPath(cls, fsPath):
"""Instantiate a :class:`CeresNode` from the on-disk path of an existing node
:params fsPath: The filesystem path of an existing node
:returns: :class:`CeresNode`
"""
dirPath = dirname(fsPath)
while True:
ceresDir = join(dirPath, '.ceres-tree')
if isdir(ceresDir):
tree = CeresTree(dirPath)
nodePath = tree.getNodePath(fsPath)
return cls(tree, nodePath, fsPath)
dirPath = dirname(dirPath)
if dirPath == '/':
raise ValueError("the path '%s' is not in a ceres tree" % fsPath)
@property
def slice_info(self):
"""A property providing a list of current information about each slice
:returns: ``[(startTime, endTime, timeStep), ...]``
"""
return [(slice.startTime, slice.endTime, slice.timeStep) for slice in self.slices]
def readMetadata(self):
"""Update node metadata from disk
:raises: :class:`CorruptNode`
"""
with open(self.metadataFile, 'r') as fh:
try:
metadata = json.load(fh)
self.timeStep = int(metadata['timeStep'])
if metadata.get('aggregationMethod'):
self.aggregationMethod = metadata['aggregationMethod']
return metadata
except (KeyError, IOError, ValueError) as e:
raise CorruptNode(self, "Unable to parse node metadata: %s" % e.args)
def writeMetadata(self, metadata):
"""Writes new metadata to disk
:param metadata: a JSON-serializable dict of node metadata
"""
self.timeStep = int(metadata['timeStep'])
with open(self.metadataFile, 'w') as fh:
json.dump(metadata, fh)
@property
def slices(self):
"""A property providing access to information about this node's underlying slices. Because this
information is accessed in every read and write, a caching mechanism is provided. Cache behavior is
set using :func:`setSliceCachingBehavior` and defaults to the value set in
``DEFAULT_SLICE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` (default) - Slice information is read from the filesystem at every access
* `latest` - The latest slice is served from cache, all others from disk. Reads and writes of recent
data are most likely to be in the latest slice
* `all` - All slices are cached. The cache is only refreshed on new slice creation or deletion
:returns: ``[(startTime, timeStep), ...]``
"""
if self.sliceCache:
if self.sliceCachingBehavior == 'all':
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
yield self.sliceCache
infos = self.readSlices()
for info in infos[1:]:
yield CeresSlice(self, *info)
else:
if self.sliceCachingBehavior == 'all':
self.sliceCache = [CeresSlice(self, *info) for info in self.readSlices()]
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
infos = self.readSlices()
if infos:
self.sliceCache = CeresSlice(self, *infos[0])
yield self.sliceCache
for info in infos[1:]:
yield CeresSlice(self, *info)
elif self.sliceCachingBehavior == 'none':
for info in self.readSlices():
yield CeresSlice(self, *info)
else:
raise ValueError("invalid caching behavior configured '%s'" % self.sliceCachingBehavior)
def readSlices(self):
"""Read slice information from disk
:returns: ``[(startTime, timeStep), ...]``
"""
if not exists(self.fsPath):
raise NodeDeleted()
slice_info = []
for filename in os.listdir(self.fsPath):
if filename.endswith('.slice'):
startTime, timeStep = filename[:-6].split('@')
slice_info.append((int(startTime), int(timeStep)))
slice_info.sort(reverse=True)
return slice_info
def setSliceCachingBehavior(self, behavior):
"""Set slice caching behavior.
:param behavior: See :func:`slices` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.sliceCachingBehavior = behavior
self.sliceCache = None
def clearSliceCache(self):
"""Clear slice cache, forcing a refresh from disk at the next access"""
self.sliceCache = None
def hasDataForInterval(self, fromTime, untilTime):
"""Test whether this node has any data in the given time interval. All slices are inspected
which will trigger a read of slice information from disk if slice cache behavior is set to `latest`
or `none` (See :func:`slices`)
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns `True` or `False`
"""
slices = list(self.slices)
if not slices:
return False
earliestData = slices[-1].startTime
latestData = slices[0].endTime
return ((fromTime is None) or (fromTime < latestData)) and \
((untilTime is None) or (untilTime > earliestData))
def read(self, fromTime, untilTime):
"""Read data from underlying slices and return as a single time-series
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns: :class:`TimeSeriesData`
"""
if self.timeStep is None:
self.readMetadata()
# Normalize the timestamps to fit proper intervals
fromTime = int(fromTime - (fromTime % self.timeStep))
untilTime = int(untilTime - (untilTime % self.timeStep))
sliceBoundary = None # to know when to split up queries across slices
resultValues = []
earliestData = None
timeStep = self.timeStep
method = self.aggregationMethod
for slice in self.slices:
# If there was a prior slice covering the requested interval, dont ask for that data again
if (sliceBoundary is not None) and untilTime > sliceBoundary:
requestUntilTime = sliceBoundary
else:
requestUntilTime = untilTime
# if the requested interval starts after the start of this slice
if fromTime >= slice.startTime:
try:
series = slice.read(fromTime, requestUntilTime)
except NoData:
break
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
break
# or if slice contains data for part of the requested interval
elif untilTime >= slice.startTime:
try:
series = slice.read(slice.startTime, requestUntilTime)
except NoData:
continue
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
# this is the right-side boundary on the next iteration
sliceBoundary = slice.startTime
# The end of the requested interval predates all slices
if earliestData is None:
missing = int(untilTime - fromTime) // timeStep
resultValues = [None for i in range(missing)]
# Left pad nulls if the start of the requested interval predates all slices
else:
leftMissing = (earliestData - fromTime) // timeStep
leftNulls = [None for i in range(leftMissing)]
resultValues = leftNulls + resultValues
return TimeSeriesData(fromTime, untilTime, timeStep, resultValues)
def write(self, datapoints):
"""Writes datapoints to underlying slices. Datapoints that round to the same timestamp for the
node's `timeStep` will be treated as duplicates and dropped.
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
"""
if self.timeStep is None:
self.readMetadata()
if not datapoints:
return
sequences = self.compact(datapoints)
needsEarlierSlice = [] # keep track of sequences that precede all existing slices
while sequences:
sequence = sequences.pop()
timestamps = [t for t, v in sequence]
beginningTime = timestamps[0]
endingTime = timestamps[-1]
sliceBoundary = None # used to prevent writing sequences across slice boundaries
slicesExist = False
for slice in self.slices:
if slice.timeStep != self.timeStep:
continue
slicesExist = True
# truncate sequence so it doesn't cross the slice boundaries
if beginningTime >= slice.startTime:
if sliceBoundary is None:
sequenceWithinSlice = sequence
else:
# index of highest timestamp that doesn't exceed sliceBoundary
boundaryIndex = bisect_left(timestamps, sliceBoundary)
sequenceWithinSlice = sequence[:boundaryIndex]
try:
slice.write(sequenceWithinSlice)
except SliceGapTooLarge:
newSlice = CeresSlice.create(self, beginningTime, slice.timeStep)
newSlice.write(sequenceWithinSlice)
self.sliceCache = None
except SliceDeleted:
self.sliceCache = None
self.write(datapoints) # recurse to retry
return
sequence = []
break
# sequence straddles the current slice, write the right side
# left side will be taken up in the next slice down
elif endingTime >= slice.startTime:
# index of lowest timestamp that doesn't precede slice.startTime
boundaryIndex = bisect_left(timestamps, slice.startTime)
sequenceWithinSlice = sequence[boundaryIndex:]
# write the leftovers on the next earlier slice
sequence = sequence[:boundaryIndex]
slice.write(sequenceWithinSlice)
if not sequence:
break
sliceBoundary = slice.startTime
else: # slice list exhausted with stuff still to write
needsEarlierSlice.append(sequence)
if not slicesExist:
sequences.append(sequence)
needsEarlierSlice = sequences
break
for sequence in needsEarlierSlice:
slice = CeresSlice.create(self, int(sequence[0][0]), self.timeStep)
slice.write(sequence)
self.clearSliceCache()
def compact(self, datapoints):
"""Compacts datapoints into a list of contiguous, sorted lists of points with duplicate
timestamps and null values removed
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
:returns: A list of lists of contiguous sorted datapoint tuples
``[[(timestamp, value), ...], ...]``
"""
datapoints = sorted(((int(timestamp), float(value))
for timestamp, value in datapoints if value is not None),
key=lambda datapoint: datapoint[0])
sequences = []
sequence = []
minimumTimestamp = 0 # used to avoid duplicate intervals
for timestamp, value in datapoints:
timestamp -= timestamp % self.timeStep # round it down to a proper interval
if not sequence:
sequence.append((timestamp, value))
else:
if timestamp == minimumTimestamp: # overwrite duplicate intervals with latest value
sequence[-1] = (timestamp, value)
continue
if timestamp == sequence[-1][0] + self.timeStep: # append contiguous datapoints
sequence.append((timestamp, value))
else: # start a new sequence if not contiguous
sequences.append(sequence)
sequence = [(timestamp, value)]
minimumTimestamp = timestamp
if sequence:
sequences.append(sequence)
return sequences
class CeresSlice(object):
__slots__ = ('node', 'startTime', 'timeStep', 'fsPath')
def __init__(self, node, startTime, timeStep):
self.node = node
self.startTime = startTime
self.timeStep = timeStep
self.fsPath = join(node.fsPath, '%d@%d.slice' % (startTime, timeStep))
def __repr__(self):
return "<CeresSlice[0x%x]: %s>" % (id(self), self.fsPath)
__str__ = __repr__
@property
def isEmpty(self):
return getsize(self.fsPath) == 0
@property
def endTime(self):
return self.startTime + ((getsize(self.fsPath) // DATAPOINT_SIZE) * self.timeStep)
@property
def mtime(self):
return getmtime(self.fsPath)
@classmethod
def create(cls, node, startTime, timeStep):
slice = cls(node, startTime, timeStep)
fileHandle = open(slice.fsPath, 'wb')
fileHandle.close()
os.chmod(slice.fsPath, SLICE_PERMS)
return slice
def read(self, fromTime, untilTime):
timeOffset = int(fromTime) - self.startTime
if timeOffset < 0:
raise InvalidRequest("requested time range (%d, %d) precedes this slice: %d" % (
fromTime, untilTime, self.startTime))
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if byteOffset >= getsize(self.fsPath):
raise NoData()
with open(self.fsPath, 'rb') as fileHandle:
fileHandle.seek(byteOffset)
timeRange = int(untilTime - fromTime)
pointRange = timeRange // self.timeStep
byteRange = pointRange * DATAPOINT_SIZE
packedValues = fileHandle.read(byteRange)
pointsReturned = len(packedValues) // DATAPOINT_SIZE
format = '!' + ('d' * pointsReturned)
values = struct.unpack(format, packedValues)
values = [v if not isnan(v) else None for v in values]
endTime = fromTime + (len(values) * self.timeStep)
# print '[DEBUG slice.read] startTime=%s fromTime=%s untilTime=%s' % (
# self.startTime, fromTime, untilTime)
# print '[DEBUG slice.read] timeInfo = (%s, %s, %s)' % (fromTime, endTime, self.timeStep)
# print '[DEBUG slice.read] values = %s' % str(values)
return TimeSeriesData(fromTime, endTime, self.timeStep, values)
def write(self, sequence):
beginningTime = sequence[0][0]
timeOffset = beginningTime - self.startTime
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
values = [v for t, v in sequence]
format = '!' + ('d' * len(values))
packedValues = struct.pack(format, *values)
try:
filesize = getsize(self.fsPath)
except OSError as e:
if e.errno == errno.ENOENT:
raise SliceDeleted()
else:
raise
byteGap = byteOffset - filesize
if byteGap > 0: # pad the allowable gap with nan's
pointGap = byteGap // DATAPOINT_SIZE
if pointGap > MAX_SLICE_GAP:
raise SliceGapTooLarge()
else:
packedGap = PACKED_NAN * pointGap
packedValues = packedGap + packedValues
byteOffset -= byteGap
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
try:
fileHandle.seek(byteOffset)
except IOError:
# print " IOError: fsPath=%s byteOffset=%d size=%d sequence=%s" % (
# self.fsPath, byteOffset, filesize, sequence)
raise
fileHandle.write(packedValues)
def deleteBefore(self, t):
if not exists(self.fsPath):
raise SliceDeleted()
if t % self.timeStep != 0:
t = t - (t % self.timeStep) + self.timeStep
timeOffset = t - self.startTime
if timeOffset < 0:
return
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if not byteOffset:
return
self.node.clearSliceCache()
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
fileHandle.seek(byteOffset)
fileData = fileHandle.read()
if fileData:
fileHandle.seek(0)
fileHandle.write(fileData)
fileHandle.truncate()
fileHandle.close()
newFsPath = join(dirname(self.fsPath), "%d@%d.slice" % (t, self.timeStep))
os.rename(self.fsPath, newFsPath)
else:
os.unlink(self.fsPath)
raise SliceDeleted()
def __lt__(self, other):
return self.startTime < other.startTime
class TimeSeriesData(object):
__slots__ = ('startTime', 'endTime', 'timeStep', 'values')
def __init__(self, startTime, endTime, timeStep, values):
self.startTime = startTime
self.endTime = endTime
self.timeStep = timeStep
self.values = values
@property
def timestamps(self):
return range(self.startTime, self.endTime, self.timeStep)
def __iter__(self):
return izip(self.timestamps, self.values)
def __len__(self):
return len(self.values)
def merge(self, other):
for timestamp, value in other:
if value is None:
continue
timestamp -= timestamp % self.timeStep
if timestamp < self.startTime:
continue
index = int((timestamp - self.startTime) // self.timeStep)
try:
if self.values[index] is None:
self.values[index] = value
except IndexError:
continue
class CorruptNode(Exception):
def __init__(self, node, problem):
Exception.__init__(self, problem)
self.node = node
self.problem = problem
class NoData(Exception):
pass
class NodeNotFound(Exception):
pass
class NodeDeleted(Exception):
pass
class InvalidRequest(Exception):
pass
class InvalidAggregationMethod(Exception):
pass
class SliceGapTooLarge(Exception):
"For internal use only"
class SliceDeleted(Exception):
pass
def aggregate(aggregationMethod, values):
# Filter out None values
knownValues = list(filter(lambda x: x is not None, values))
if len(knownValues) is 0:
return None
# Aggregate based on method
if aggregationMethod == 'average':
return float(sum(knownValues)) / float(len(knownValues))
elif aggregationMethod == 'sum':
return float(sum(knownValues))
elif aggregationMethod == 'last':
return knownValues[-1]
elif aggregationMethod == 'max':
return max(knownValues)
elif aggregationMethod == 'min':
return min(knownValues)
else:
raise InvalidAggregationMethod("Unrecognized aggregation method %s" %
aggregationMethod)
def aggregateSeries(method, oldTimeStep, newTimeStep, values):
# Aggregate current values to fit newTimeStep.
# Makes the assumption that the caller has already guaranteed
# that newTimeStep is bigger than oldTimeStep.
factor = int(newTimeStep // oldTimeStep)
newValues = []
subArr = []
for val in values:
subArr.append(val)
if len(subArr) == factor:
newValues.append(aggregate(method, subArr))
subArr = []
if len(subArr):
newValues.append(aggregate(method, subArr))
return newValues
def getTree(path):
while path not in (os.sep, ''):
if isdir(join(path, '.ceres-tree')):
return CeresTree(path)
path = dirname(path)
def setDefaultNodeCachingBehavior(behavior):
global DEFAULT_NODE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_NODE_CACHING_BEHAVIOR = behavior
def setDefaultSliceCachingBehavior(behavior):
global DEFAULT_SLICE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_SLICE_CACHING_BEHAVIOR = behavior
| {
"content_hash": "d57ed0788131e25f9370883c21a638b2",
"timestamp": "",
"source": "github",
"line_count": 952,
"max_line_length": 100,
"avg_line_length": 32.58298319327731,
"alnum_prop": 0.6658499629259486,
"repo_name": "graphite-project/ceres",
"id": "5a61134864abb17fb347b2dfeecc299c52ad81ed",
"size": "31634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceres.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "114920"
}
],
"symlink_target": ""
} |
import proto # type: ignore
from google.ads.googleads.v12.common.types import tag_snippet
from google.ads.googleads.v12.enums.types import (
attribution_model as gage_attribution_model,
)
from google.ads.googleads.v12.enums.types import conversion_action_category
from google.ads.googleads.v12.enums.types import conversion_action_counting_type
from google.ads.googleads.v12.enums.types import conversion_action_status
from google.ads.googleads.v12.enums.types import conversion_action_type
from google.ads.googleads.v12.enums.types import conversion_origin
from google.ads.googleads.v12.enums.types import (
data_driven_model_status as gage_data_driven_model_status,
)
from google.ads.googleads.v12.enums.types import (
mobile_app_vendor as gage_mobile_app_vendor,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v12.resources",
marshal="google.ads.googleads.v12",
manifest={"ConversionAction",},
)
class ConversionAction(proto.Message):
r"""A conversion action.
Attributes:
resource_name (str):
Immutable. The resource name of the conversion action.
Conversion action resource names have the form:
``customers/{customer_id}/conversionActions/{conversion_action_id}``
id (int):
Output only. The ID of the conversion action.
This field is a member of `oneof`_ ``_id``.
name (str):
The name of the conversion action.
This field is required and should not be empty
when creating new conversion actions.
This field is a member of `oneof`_ ``_name``.
status (google.ads.googleads.v12.enums.types.ConversionActionStatusEnum.ConversionActionStatus):
The status of this conversion action for
conversion event accrual.
type_ (google.ads.googleads.v12.enums.types.ConversionActionTypeEnum.ConversionActionType):
Immutable. The type of this conversion
action.
origin (google.ads.googleads.v12.enums.types.ConversionOriginEnum.ConversionOrigin):
Output only. The conversion origin of this
conversion action.
primary_for_goal (bool):
If a conversion action's primary_for_goal bit is false, the
conversion action is non-biddable for all campaigns
regardless of their customer conversion goal or campaign
conversion goal. However, custom conversion goals do not
respect primary_for_goal, so if a campaign has a custom
conversion goal configured with a primary_for_goal = false
conversion action, that conversion action is still biddable.
By default, primary_for_goal will be true if not set. In V9,
primary_for_goal can only be set to false after creation
through an 'update' operation because it's not declared as
optional.
This field is a member of `oneof`_ ``_primary_for_goal``.
category (google.ads.googleads.v12.enums.types.ConversionActionCategoryEnum.ConversionActionCategory):
The category of conversions reported for this
conversion action.
owner_customer (str):
Output only. The resource name of the
conversion action owner customer, or null if
this is a system-defined conversion action.
This field is a member of `oneof`_ ``_owner_customer``.
include_in_conversions_metric (bool):
Whether this conversion action should be
included in the "conversions" metric.
This field is a member of `oneof`_ ``_include_in_conversions_metric``.
click_through_lookback_window_days (int):
The maximum number of days that may elapse
between an interaction (for example, a click)
and a conversion event.
This field is a member of `oneof`_ ``_click_through_lookback_window_days``.
view_through_lookback_window_days (int):
The maximum number of days which may elapse
between an impression and a conversion without
an interaction.
This field is a member of `oneof`_ ``_view_through_lookback_window_days``.
value_settings (google.ads.googleads.v12.resources.types.ConversionAction.ValueSettings):
Settings related to the value for conversion
events associated with this conversion action.
counting_type (google.ads.googleads.v12.enums.types.ConversionActionCountingTypeEnum.ConversionActionCountingType):
How to count conversion events for the
conversion action.
attribution_model_settings (google.ads.googleads.v12.resources.types.ConversionAction.AttributionModelSettings):
Settings related to this conversion action's
attribution model.
tag_snippets (Sequence[google.ads.googleads.v12.common.types.TagSnippet]):
Output only. The snippets used for tracking
conversions.
phone_call_duration_seconds (int):
The phone call duration in seconds after
which a conversion should be reported for this
conversion action.
The value must be between 0 and 10000,
inclusive.
This field is a member of `oneof`_ ``_phone_call_duration_seconds``.
app_id (str):
App ID for an app conversion action.
This field is a member of `oneof`_ ``_app_id``.
mobile_app_vendor (google.ads.googleads.v12.enums.types.MobileAppVendorEnum.MobileAppVendor):
Output only. Mobile app vendor for an app
conversion action.
firebase_settings (google.ads.googleads.v12.resources.types.ConversionAction.FirebaseSettings):
Output only. Firebase settings for Firebase
conversion types.
third_party_app_analytics_settings (google.ads.googleads.v12.resources.types.ConversionAction.ThirdPartyAppAnalyticsSettings):
Output only. Third Party App Analytics
settings for third party conversion types.
"""
class AttributionModelSettings(proto.Message):
r"""Settings related to this conversion action's attribution
model.
Attributes:
attribution_model (google.ads.googleads.v12.enums.types.AttributionModelEnum.AttributionModel):
The attribution model type of this conversion
action.
data_driven_model_status (google.ads.googleads.v12.enums.types.DataDrivenModelStatusEnum.DataDrivenModelStatus):
Output only. The status of the data-driven
attribution model for the conversion action.
"""
attribution_model = proto.Field(
proto.ENUM,
number=1,
enum=gage_attribution_model.AttributionModelEnum.AttributionModel,
)
data_driven_model_status = proto.Field(
proto.ENUM,
number=2,
enum=gage_data_driven_model_status.DataDrivenModelStatusEnum.DataDrivenModelStatus,
)
class ValueSettings(proto.Message):
r"""Settings related to the value for conversion events
associated with this conversion action.
Attributes:
default_value (float):
The value to use when conversion events for
this conversion action are sent with an invalid,
disallowed or missing value, or when this
conversion action is configured to always use
the default value.
This field is a member of `oneof`_ ``_default_value``.
default_currency_code (str):
The currency code to use when conversion
events for this conversion action are sent with
an invalid or missing currency code, or when
this conversion action is configured to always
use the default value.
This field is a member of `oneof`_ ``_default_currency_code``.
always_use_default_value (bool):
Controls whether the default value and
default currency code are used in place of the
value and currency code specified in conversion
events for this conversion action.
This field is a member of `oneof`_ ``_always_use_default_value``.
"""
default_value = proto.Field(proto.DOUBLE, number=4, optional=True,)
default_currency_code = proto.Field(
proto.STRING, number=5, optional=True,
)
always_use_default_value = proto.Field(
proto.BOOL, number=6, optional=True,
)
class ThirdPartyAppAnalyticsSettings(proto.Message):
r"""Settings related to a third party app analytics conversion
action.
Attributes:
event_name (str):
Output only. The event name of a third-party
app analytics conversion.
This field is a member of `oneof`_ ``_event_name``.
provider_name (str):
Output only. Name of the third-party app
analytics provider.
"""
event_name = proto.Field(proto.STRING, number=2, optional=True,)
provider_name = proto.Field(proto.STRING, number=3,)
class FirebaseSettings(proto.Message):
r"""Settings related to a Firebase conversion action.
Attributes:
event_name (str):
Output only. The event name of a Firebase
conversion.
This field is a member of `oneof`_ ``_event_name``.
project_id (str):
Output only. The Firebase project ID of the
conversion.
This field is a member of `oneof`_ ``_project_id``.
property_id (int):
Output only. The GA property ID of the
conversion.
property_name (str):
Output only. The GA property name of the
conversion.
"""
event_name = proto.Field(proto.STRING, number=3, optional=True,)
project_id = proto.Field(proto.STRING, number=4, optional=True,)
property_id = proto.Field(proto.INT64, number=5,)
property_name = proto.Field(proto.STRING, number=6,)
resource_name = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.INT64, number=21, optional=True,)
name = proto.Field(proto.STRING, number=22, optional=True,)
status = proto.Field(
proto.ENUM,
number=4,
enum=conversion_action_status.ConversionActionStatusEnum.ConversionActionStatus,
)
type_ = proto.Field(
proto.ENUM,
number=5,
enum=conversion_action_type.ConversionActionTypeEnum.ConversionActionType,
)
origin = proto.Field(
proto.ENUM,
number=30,
enum=conversion_origin.ConversionOriginEnum.ConversionOrigin,
)
primary_for_goal = proto.Field(proto.BOOL, number=31, optional=True,)
category = proto.Field(
proto.ENUM,
number=6,
enum=conversion_action_category.ConversionActionCategoryEnum.ConversionActionCategory,
)
owner_customer = proto.Field(proto.STRING, number=23, optional=True,)
include_in_conversions_metric = proto.Field(
proto.BOOL, number=24, optional=True,
)
click_through_lookback_window_days = proto.Field(
proto.INT64, number=25, optional=True,
)
view_through_lookback_window_days = proto.Field(
proto.INT64, number=26, optional=True,
)
value_settings = proto.Field(
proto.MESSAGE, number=11, message=ValueSettings,
)
counting_type = proto.Field(
proto.ENUM,
number=12,
enum=conversion_action_counting_type.ConversionActionCountingTypeEnum.ConversionActionCountingType,
)
attribution_model_settings = proto.Field(
proto.MESSAGE, number=13, message=AttributionModelSettings,
)
tag_snippets = proto.RepeatedField(
proto.MESSAGE, number=14, message=tag_snippet.TagSnippet,
)
phone_call_duration_seconds = proto.Field(
proto.INT64, number=27, optional=True,
)
app_id = proto.Field(proto.STRING, number=28, optional=True,)
mobile_app_vendor = proto.Field(
proto.ENUM,
number=17,
enum=gage_mobile_app_vendor.MobileAppVendorEnum.MobileAppVendor,
)
firebase_settings = proto.Field(
proto.MESSAGE, number=18, message=FirebaseSettings,
)
third_party_app_analytics_settings = proto.Field(
proto.MESSAGE, number=19, message=ThirdPartyAppAnalyticsSettings,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "0c437433c4fe75761c76f3c0f2ce0bcc",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 134,
"avg_line_length": 42.420529801324506,
"alnum_prop": 0.6449925844976973,
"repo_name": "googleads/google-ads-python",
"id": "f749a0eea329ece290906336abb8f8de7fe81003",
"size": "13411",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/resources/types/conversion_action.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
import os
import argparse
import datetime
import logging
import urlparse
import codecs
import gzip
import pymongo
from logger.ratchet import Local
from logger.accesschecker import AccessChecker
from logger import utils
logger = logging.getLogger(__name__)
COUNTER_COMPLIANT = int(
utils.settings.get('counter_compliant', 1))
COUNTER_COMPLIANT_SKIPPED_LOG_DIR = utils.settings.get(
'counter_compliant_skipped_log_dir', None)
MONGO_URI = utils.settings.get(
'mongo_uri', 'mongodb://127.0.0.1:27017/database_name')
MONGO_URI_COUNTER = utils.settings.get(
'mongo_uri_counter', 'mongodb://127.0.0.1:27017/database_name')
LOGS_SOURCE = utils.settings.get(
'logs_source', '.')
def _config_logging(logging_level='INFO', logging_file=None):
allowed_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(allowed_levels.get(logging_level, 'INFO'))
if logging_file:
hl = logging.FileHandler(logging_file, mode='a')
else:
hl = logging.StreamHandler()
hl.setFormatter(formatter)
hl.setLevel(allowed_levels.get(logging_level, 'INFO'))
logger.addHandler(hl)
class Bulk(object):
def __init__(self, collection, mongo_uri=MONGO_URI, logs_source=LOGS_SOURCE, counter_compliant=None, skipped_log_dir=None):
self._mongo_uri = "%s_%s" % (mongo_uri, collection)
self._proc_coll = self.get_proc_collection()
self._collection = collection
self._logs_source = logs_source
self._counter_compliant = counter_compliant
self._ts = utils.TimedSet(expired=utils.checkdatelock)
self._skipped_log_dir = skipped_log_dir
self._skipped_log = None
self._ac = AccessChecker(self._collection)
def __enter__(self):
if self._skipped_log_dir:
now = datetime.datetime.now().isoformat()
skipped_log = '/'.join([self._skipped_log_dir, now]).replace('//', '/')
try:
self._skipped_log = open(skipped_log, 'w')
except ValueError:
raise "Invalid directory or file name: %s" % skipped_log
return self
def __exit__(self, exc_type, exc_value, traceback):
self._ts = None
self._ac = None
if self._skipped_log:
self._skipped_log.close()
def _mongodb_connect(self, mdb_database):
db_url = urlparse.urlparse(self._mongo_uri)
conn = pymongo.MongoClient(host=db_url.hostname, port=db_url.port)
db = conn[db_url.path[1:]]
if db_url.username and db_url.password:
db.authenticate(db_url.username, db_url.password)
return db[mdb_database]
def get_proc_collection(self):
"""
The proc collection is a mongodb database that keeps the name of each
processed file, to avoid processing these files again.
"""
coll = self._mongodb_connect('proc_files')
coll.ensure_index('file_name')
return coll
def write_skipped_log(self, line):
if self._skipped_log:
self._skipped_log.write("%s \r\n" % line)
def read_log(self, logfile):
logfile = logfile.strip()
# Verifica se arquivo já foi processado.
if self._proc_coll.find({'file_name': logfile}).count() > 0:
logger.info('File already processed %s' % logfile)
return None
reader = codecs
if utils.check_file_format(logfile) == 'gzip':
reader = gzip
# Registra em base de dados de arquivos processados o novo arquivo.
logger.info("Processing: %s" % logfile)
self._proc_coll.insert({'file_name': logfile})
with reader.open(logfile, 'rb') as f:
with Local(self._mongo_uri, self._collection) as rq:
log_file_line = 0
for raw_line in f:
log_file_line += 1
logger.debug("Reading line {0} from file {1}".format(
str(log_file_line), logfile))
logger.debug(raw_line)
try:
parsed_line = self._ac.parsed_access(raw_line)
except ValueError as e:
logger.error("%s: %s" % (e.message, raw_line))
continue
if not parsed_line:
continue
if COUNTER_COMPLIANT:
# Counter Mode Accesses
locktime = 10
if parsed_line['access_type'] == "PDF":
locktime = 30
try:
lockid = '_'.join([parsed_line['ip'],
parsed_line['code'],
parsed_line['script']])
self._ts.add(lockid, parsed_line['iso_datetime'], locktime)
rq.register_access(parsed_line)
except ValueError:
self.write_skipped_log('; '.join([lockid, parsed_line['original_date'], parsed_line['original_agent']]))
continue
else:
pass
# SciELO Mode Accesses
rq.register_access(parsed_line)
rq.send()
def run(self):
for logfile in os.popen('ls %s/*' % self._logs_source):
self.read_log(logfile)
def main():
parser = argparse.ArgumentParser(
description="Run the processing to read the access log files and register accesses into Ratchet"
)
parser.add_argument(
'-c',
'--collection',
default=None,
help='Three letters collection id'
)
parser.add_argument(
'--logs_source',
'-s',
help='Full path to the directory with apache log files'
)
parser.add_argument(
'--logging_file',
'-o',
help='Full path to the log file'
)
parser.add_argument(
'--logging_level',
'-l',
default='DEBUG',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Logggin level'
)
args = parser.parse_args()
_config_logging(args.logging_level, args.logging_file)
bk = Bulk(
collection=args.collection,
mongo_uri=MONGO_URI,
logs_source=args.logs_source,
counter_compliant=COUNTER_COMPLIANT,
skipped_log_dir=COUNTER_COMPLIANT_SKIPPED_LOG_DIR
)
bk.run()
| {
"content_hash": "a44c1ce5b34d71e6b0c9dd9374fb7e39",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 132,
"avg_line_length": 31.995283018867923,
"alnum_prop": 0.5532950022114109,
"repo_name": "fabiobatalha/Logger",
"id": "24a799a19644d6eea349a6c280d67a5f46ef1636",
"size": "6822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logger/scielo.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "137907"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.test.guessittest import *
class TestMovie(TestGuessit):
def testMovies(self):
self.checkMinimumFieldsCorrect(filetype='movie',
filename='movies.yaml')
| {
"content_hash": "5739ea8b6019078fc01897c3d4bca5d1",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 82,
"avg_line_length": 33.44444444444444,
"alnum_prop": 0.6644518272425249,
"repo_name": "Kallehz/Python",
"id": "c0e28de420298f1c3f13f9a7e3a6047ac215922f",
"size": "1114",
"binary": false,
"copies": "33",
"ref": "refs/heads/master",
"path": "Verkefni 4/guessit/test/test_movie.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "267463"
},
{
"name": "Python",
"bytes": "475606"
},
{
"name": "Tcl",
"bytes": "3559"
}
],
"symlink_target": ""
} |
from os.path import abspath, dirname, join
from tornado.ioloop import IOLoop
from tornado.web import StaticFileHandler
from tornado.websocket import WebSocketHandler, WebSocketClosedError
from .nt_serial import NTSerial
import logging
logger = logging.getLogger("net2js")
__all__ = ["get_handlers", "NetworkTablesWebSocket", "NonCachingStaticFileHandler"]
class NetworkTablesWebSocket(WebSocketHandler):
"""
A tornado web handler that forwards values between NetworkTables
and a webpage via a websocket
"""
ntserial = None
def open(self):
logger.info("NetworkTables websocket opened")
self.ioloop = IOLoop.current()
self.ntserial = NTSerial(self.send_msg_threadsafe)
def check_origin(self, origin):
"""
Allow CORS requests
"""
return True
def on_message(self, message):
if self.ntserial is not None:
self.ntserial.process_update(message)
def send_msg(self, msg):
try:
self.write_message(msg, binary=True)
except WebSocketClosedError:
logger.warning("websocket closed when sending message")
def send_msg_threadsafe(self, data):
self.ioloop.add_callback(self.send_msg, data)
def on_close(self):
logger.info("NetworkTables websocket closed")
if self.ntserial is not None:
self.ntserial.close()
class NonCachingStaticFileHandler(StaticFileHandler):
"""
This static file handler disables caching, to allow for easy
development of your Dashboard
"""
# This is broken in tornado, disable it
def check_etag_header(self):
return False
def set_extra_headers(self, path):
# Disable caching
self.set_header(
"Cache-Control", "no-store, no-cache, must-revalidate, max-age=0"
)
def get_handlers():
"""
Returns a list that can be concatenated to the list of handlers
passed to the ``tornado.web.Application`` object. This list contains
handlers for the NetworkTables websocket and the necessary javascript
to use it.
Example usage::
import pynetworktables2js
import tornado.web
...
app = tornado.web.Application(
pynetworktables2js.get_handlers() + [
# tornado handlers here
])
"""
js_path_opts = {"path": abspath(join(dirname(__file__), "js"))}
return [
("/networktables/ws", NetworkTablesWebSocket),
("/networktables/(.*)", NonCachingStaticFileHandler, js_path_opts),
]
| {
"content_hash": "c1134fc3720ce8d851b29eacdb943482",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 83,
"avg_line_length": 28.09375,
"alnum_prop": 0.6218020022246941,
"repo_name": "amorygalili/pynetworktables2js",
"id": "5f9bb3830c3747926c52cefee34e5b6b51d272c6",
"size": "2697",
"binary": false,
"copies": "1",
"ref": "refs/heads/add-connect-function",
"path": "pynetworktables2js/tornado_handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6459"
},
{
"name": "JavaScript",
"bytes": "23241"
},
{
"name": "Python",
"bytes": "19599"
},
{
"name": "Shell",
"bytes": "411"
}
],
"symlink_target": ""
} |
"""
Created on 2017-9-25
@author: cheng.li
"""
import unittest
from collections import deque
import numpy as np
import pandas as pd
from alphamind.execution.pipeline import ExecutionPipeline
from alphamind.execution.targetvolexecutor import TargetVolExecutor
from alphamind.execution.thresholdexecutor import ThresholdExecutor
class TestExecutionPipeline(unittest.TestCase):
def test_execution_pipeline(self):
n = 100
window = 60
target_vol = 0.01
turn_over_threshold = 0.5
executor1 = TargetVolExecutor(window=window, target_vol=target_vol)
executor2 = ThresholdExecutor(turn_over_threshold=turn_over_threshold)
execution_pipeline = ExecutionPipeline(executors=[executor1, executor2])
return_1 = np.random.randn(2000, n) * 0.05
return_2 = np.random.randn(2000, n) * 0.2
return_total = np.concatenate((return_1, return_2))
codes = np.array(list(range(n)))
ret_deq = deque(maxlen=window)
for i, row in enumerate(return_total):
weights = np.random.randint(0, 100, n)
weights = weights / weights.sum()
pos = pd.DataFrame({'code': codes, 'weight': weights})
turn_over, executed_pos = execution_pipeline.execute(pos)
daily_return = row @ executed_pos.weight.values.flatten()
data_dict = {'return': daily_return}
execution_pipeline.update(data_dict=data_dict)
ret_deq.append(daily_return)
| {
"content_hash": "595f214fd85bc9156d1447b020678e0c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 32.58695652173913,
"alnum_prop": 0.6657771847898599,
"repo_name": "wegamekinglc/alpha-mind",
"id": "032e71250da837b302ec44b28b732d0dab23ee54",
"size": "1523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alphamind/tests/execution/test_pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "300425"
},
{
"name": "C++",
"bytes": "8627875"
},
{
"name": "CMake",
"bytes": "102"
},
{
"name": "Jupyter Notebook",
"bytes": "2257816"
},
{
"name": "Objective-C",
"bytes": "85"
},
{
"name": "Python",
"bytes": "323052"
},
{
"name": "SourcePawn",
"bytes": "2021"
}
],
"symlink_target": ""
} |
"""Convolution methods on CPU."""
# These are extremely slow.
# Their main purpose is testing fast GPU implementations.
import numpy as np
def DivUp(a, b):
return (a + b - 1) / b
def ConvUp(images, filters, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
input_data = np.zeros((num_images, kernel_size_x * kernel_size_y * num_input_channels), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
offset = y_pos * num_modules_x + x_pos
for c in xrange(num_input_channels):
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_index = (c * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = (c * image_size_y + y) * image_size_x + x
input_data[:, input_data_index] = images[:, images_index]
output_data = np.dot(input_data, filters.T)
for c in xrange(num_output_channels):
output[:, offset + c * num_modules_x * num_modules_y] = output_data[:, c]
return output
def ConvDown(derivs, filters, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
deriv = np.zeros((num_images, num_output_channels), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
offset = y_pos * num_modules_x + x_pos
for c in xrange(num_output_channels):
deriv[:, c] = derivs[:, offset + c * num_modules_x * num_modules_y]
d_input = np.dot(deriv, filters)
for c in xrange(num_input_channels):
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_index = (c * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = (c * image_size_y + y) * image_size_x + x
output[:, images_index] += d_input[:, input_data_index]
return output
def ConvOutp(images, derivs, image_shape, conv_spec, partial_sum_y=0, partial_sum_x=0):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
if partial_sum_x == 0:
partial_sum_x = num_modules_x
if partial_sum_y == 0:
partial_sum_y = num_modules_y
partial_sum_locs_x = DivUp(num_modules_x, partial_sum_x)
partial_sum_locs_y = DivUp(num_modules_y, partial_sum_y)
input_size = kernel_size_y * kernel_size_x * num_input_channels
output = np.zeros((num_output_channels, input_size), dtype=np.float32)
output2 = np.zeros((num_output_channels, input_size), dtype=np.float32)
output_psums = np.zeros((num_output_channels, input_size * partial_sum_locs_x * partial_sum_locs_y), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
input_data = np.zeros((num_images, input_size), dtype=np.float32)
deriv_data = np.zeros((num_images, num_output_channels), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
offset = y_pos * num_modules_x + x_pos
for c in xrange(num_input_channels):
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_index = (c * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = (c * image_size_y + y) * image_size_x + x
input_data[:, input_data_index] = images[:, images_index]
for c in xrange(num_output_channels):
deriv_data[:, c] = derivs[:, offset + c * num_modules_x * num_modules_y]
partial_sum_id_y = y_pos / partial_sum_y
partial_sum_id_x = x_pos / partial_sum_x
partial_sum_id = partial_sum_id_y * partial_sum_locs_x + partial_sum_id_x
outp = np.dot(deriv_data.T, input_data)
output_psums[:, partial_sum_id * input_size : (partial_sum_id + 1) * input_size] += outp
output += outp
for partial_sum_id_y in xrange(partial_sum_locs_y):
for partial_sum_id_x in xrange(partial_sum_locs_x):
partial_sum_id = partial_sum_id_y * partial_sum_locs_x + partial_sum_id_x
output2 += output_psums[:, partial_sum_id * input_size : (partial_sum_id + 1) * input_size]
if not np.allclose(output2, output):
print 'Error', np.abs(output - output2).max()
print output
print output2
return output, output_psums
def MaxPool(images, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
offset = y_pos * num_modules_x + x_pos
for c in xrange(num_input_channels):
input_data = np.zeros(num_images, dtype=np.float32) - np.inf
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = (c * image_size_y + y) * image_size_x + x
input_data = np.maximum(input_data, images[:, images_index])
output[:, offset + c * num_modules_x * num_modules_y] = input_data
return output
def MaxPool3D(images, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels * num_modules_t), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_t = t_pos * stride_t - padding_t
start_y = y_pos * stride_y - padding_y
start_x = x_pos * stride_x - padding_x
offset = (t_pos * num_output_channels * num_modules_y + y_pos) * num_modules_x + x_pos
for c in xrange(num_input_channels):
input_data = np.zeros(num_images, dtype=np.float32) - np.inf
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
input_data = np.maximum(input_data, images[:, images_index])
output[:, offset + c * num_modules_x * num_modules_y] = input_data
return output
def AvgPool3D(images, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels * num_modules_t), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_t = t_pos * stride_t - padding_t
start_y = y_pos * stride_y - padding_y
start_x = x_pos * stride_x - padding_x
offset = (t_pos * num_output_channels * num_modules_y + y_pos) * num_modules_x + x_pos
for c in xrange(num_input_channels):
input_data = np.zeros(num_images, dtype=np.float32)
region_size = 0
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
input_data += images[:, images_index]
region_size += 1
output[:, offset + c * num_modules_x * num_modules_y] = input_data / region_size
return output
def AvgPool(images, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
offset = y_pos * num_modules_x + x_pos
for c in xrange(num_input_channels):
input_data = np.zeros(num_images, dtype=np.float32)
region_size = 0
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = (c * image_size_y + y) * image_size_x + x
input_data += images[:, images_index]
region_size += 1
output[:, offset + c * num_modules_x * num_modules_y] = input_data / region_size
return output
def MaxPoolUndo(images, maxes, derivs, image_shape, deriv_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros(images.shape, dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
for c in xrange(num_input_channels):
offset = x_pos + num_modules_x * (y_pos + num_modules_y * c)
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = (c * image_size_y + y) * image_size_x + x
for i in xrange(num_images):
if images[i, images_index] == maxes[i, offset]:
output[i, images_index] += derivs[i, offset]
return output
def MaxPool3DUndo(images, maxes, derivs, image_shape, deriv_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros(images.shape, dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
start_t = t_pos * stride_t - padding_t
for c in xrange(num_input_channels):
offset = ((t_pos * num_output_channels + c) * num_modules_y + y_pos) * num_modules_x + x_pos
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
for i in xrange(num_images):
if images[i, images_index] == maxes[i, offset]:
output[i, images_index] += derivs[i, offset]
return output
def AvgPool3DUndo(derivs, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
assert (num_output_channels == num_input_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels * image_size_t), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
start_t = t_pos * stride_t - padding_t
end_y = start_y + kernel_size_y
end_x = start_x + kernel_size_x
end_t = start_t + kernel_size_t
start2_y = min(max(start_y, 0), image_size_y)
start2_x = min(max(start_x, 0), image_size_x)
start2_t = min(max(start_t, 0), image_size_t)
end_y = min(max(end_y, 0), image_size_y)
end_x = min(max(end_x, 0), image_size_x)
end_t = min(max(end_t, 0), image_size_t)
region_size = (end_y - start2_y) * (end_x - start2_x) * (end_t - start2_t)
for c in xrange(num_input_channels):
offset = ((t_pos * num_output_channels + c) * num_modules_y + y_pos) * num_modules_x + x_pos
for t in xrange(start2_t, end_t):
for y in xrange(start2_y, end_y):
for x in xrange(start2_x, end_x):
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
output[:, images_index] += derivs[:, offset] / region_size
return output
def AvgPoolUndo(derivs, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x = conv_spec
assert (num_input_channels == num_output_channels)
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels), dtype=np.float32)
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
end_y = start_y + kernel_size_y
end_x = start_x + kernel_size_x
start2_y = min(max(start_y, 0), image_size_y)
start2_x = min(max(start_x, 0), image_size_x)
end_y = min(max(end_y, 0), image_size_y)
end_x = min(max(end_x, 0), image_size_x)
region_size = (end_y - start2_y) * (end_x - start2_x)
for c in xrange(num_input_channels):
offset = (c * num_modules_y + y_pos) * num_modules_x + x_pos
for y in xrange(start2_y, end_y):
for x in xrange(start2_x, end_x):
images_index = (c * image_size_y + y) * image_size_x + x
output[:, images_index] += derivs[:, offset] / region_size
return output
def GetBounds(i, numF, num_channels, blocked):
if blocked:
startPos = (i / numF) * numF
else:
startPos = i - numF/2
endPos = min(startPos + numF, num_channels)
startPos = max(0, startPos)
return startPos, endPos
def GetBoundsInv(i, numF, num_channels, blocked):
"""Return the set of filters such that i appears in their normalization group."""
if blocked:
startPos = (i / numF) * numF
else:
startPos = i - numF + numF/2 + 1
endPos = min(startPos + numF, num_channels)
startPos = max(0, startPos)
return startPos, endPos
def ComputeDenoms(data, numF, blocked, addScale):
denoms = np.zeros(data.shape, dtype=data.dtype)
num_images, num_channels = data.shape
for i in xrange(num_channels):
startPos, endPos = GetBounds(i, numF, num_channels, blocked)
for j in xrange(startPos, endPos):
denoms[:, i] += data[:, j]**2
denoms = 1 + addScale * denoms
return denoms
def ResponseNormCrossMap(images, image_shape, numF, add_scale, pow_scale, blocked):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels), dtype=np.float32)
for y_pos in xrange(image_size_y):
for x_pos in xrange(image_size_x):
this_loc_all_channels = np.zeros((num_images, num_input_channels), dtype=np.float32)
for c in xrange(num_input_channels):
loc_id = x_pos + image_size_x * (y_pos + image_size_y * c)
this_loc_all_channels[:, c] = images[:, loc_id]
denoms = ComputeDenoms(this_loc_all_channels, numF, blocked, add_scale)
this_loc_all_channels *= np.power(denoms, -pow_scale)
for c in xrange(num_input_channels):
loc_id = x_pos + image_size_x * (y_pos + image_size_y * c)
output[:, loc_id] = this_loc_all_channels[:, c]
return output
def ResponseNormCrossMapUndo(derivs, images, image_shape, numF, add_scale, pow_scale, blocked):
num_images, image_size_x, image_size_y, num_input_channels = image_shape
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels), dtype=np.float32)
for y_pos in xrange(image_size_y):
for x_pos in xrange(image_size_x):
this_loc_all_channels_data = np.zeros((num_images, num_input_channels), dtype=np.float32)
this_loc_all_channels_deriv = np.zeros((num_images, num_input_channels), dtype=np.float32)
for c in xrange(num_input_channels):
loc_id = x_pos + image_size_x * (y_pos + image_size_y * c)
this_loc_all_channels_data[:, c] = images[:, loc_id]
this_loc_all_channels_deriv[:, c] = derivs[:, loc_id]
denoms = ComputeDenoms(this_loc_all_channels_data, numF, blocked, add_scale)
for c in xrange(num_input_channels):
loc_id = x_pos + image_size_x * (y_pos + image_size_y * c)
startPos, endPos = GetBoundsInv(c, numF, num_input_channels, blocked)
output[:, loc_id] = this_loc_all_channels_deriv[:, c] * np.power(denoms[:, c], -pow_scale) \
- 2 * add_scale * pow_scale * this_loc_all_channels_data[:, c] * \
(this_loc_all_channels_deriv[:, startPos:endPos] \
* this_loc_all_channels_data[:, startPos:endPos] \
* np.power(denoms[:, startPos:endPos], -pow_scale-1)).sum(axis=1)
return output
def ConvUp3D(images, filters, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros((num_images, num_modules_x * num_modules_y * num_output_channels * num_modules_t), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
input_data = np.zeros((num_images, kernel_size_x * kernel_size_y * num_input_channels * kernel_size_t), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
start_t = t_pos * stride_t - padding_t
for c in xrange(num_input_channels):
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_t = t - start_t
input_data_index = ((input_data_t * num_input_channels + c) * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
input_data[:, input_data_index] = images[:, images_index]
output_data = np.dot(input_data, filters.T)
for c in xrange(num_output_channels):
offset = ((t_pos * num_output_channels + c) * num_modules_y + y_pos) * num_modules_x + x_pos
output[:, offset] = output_data[:, c]
return output
def ConvDown3D(derivs, filters, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
output = np.zeros((num_images, image_size_x * image_size_y * num_input_channels * image_size_t), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
deriv = np.zeros((num_images, num_output_channels), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
start_t = t_pos * stride_t - padding_t
for c in xrange(num_output_channels):
offset = ((t_pos * num_output_channels + c) * num_modules_y + y_pos) * num_modules_x + x_pos
deriv[:, c] = derivs[:, offset]
d_input = np.dot(deriv, filters)
for c in xrange(num_input_channels):
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_t = t - start_t
input_data_index = ((input_data_t * num_input_channels + c) * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
output[:, images_index] += d_input[:, input_data_index]
return output
def ConvOutp3D(images, derivs, image_shape, conv_spec):
num_images, image_size_x, image_size_y, num_input_channels, image_size_t = image_shape
num_output_channels, kernel_size_y, kernel_size_x, kernel_size_t, stride_y, stride_x, stride_t, padding_y, padding_x, padding_t = conv_spec
num_modules_y = (image_size_y + 2 * padding_y - kernel_size_y) / stride_y + 1
num_modules_x = (image_size_x + 2 * padding_x - kernel_size_x) / stride_x + 1
num_modules_t = (image_size_t + 2 * padding_t - kernel_size_t) / stride_t + 1
input_size = kernel_size_y * kernel_size_x * num_input_channels * kernel_size_t
output = np.zeros((num_output_channels, input_size), dtype=np.float32)
for t_pos in xrange(num_modules_t):
for y_pos in xrange(num_modules_y):
for x_pos in xrange(num_modules_x):
input_data = np.zeros((num_images, input_size), dtype=np.float32)
deriv = np.zeros((num_images, num_output_channels), dtype=np.float32)
start_x = x_pos * stride_x - padding_x
start_y = y_pos * stride_y - padding_y
start_t = t_pos * stride_t - padding_t
for c in xrange(num_input_channels):
for t in xrange(start_t, start_t + kernel_size_t):
if t < 0 or t >= image_size_t:
continue
for y in xrange(start_y, start_y + kernel_size_y):
if y < 0 or y >= image_size_y:
continue
for x in xrange(start_x, start_x + kernel_size_x):
if x < 0 or x >= image_size_x:
continue
input_data_x = x - start_x
input_data_y = y - start_y
input_data_t = t - start_t
input_data_index = ((input_data_t * num_input_channels + c) * kernel_size_y + input_data_y) * kernel_size_x + input_data_x
images_index = ((t * num_input_channels + c) * image_size_y + y) * image_size_x + x
input_data[:, input_data_index] = images[:, images_index]
for c in xrange(num_output_channels):
offset = ((t_pos * num_output_channels + c) * num_modules_y + y_pos) * num_modules_x + x_pos
deriv[:, c] = derivs[:, offset]
output += np.dot(deriv.T, input_data)
return output
| {
"content_hash": "d70505b5b1820f0562a37519e6a53e46",
"timestamp": "",
"source": "github",
"line_count": 593,
"max_line_length": 141,
"avg_line_length": 49.10623946037099,
"alnum_prop": 0.5995535714285715,
"repo_name": "DeercoderResearch/convnet",
"id": "6865600a9c9e6b4487cb5407439b5f66c45eae4c",
"size": "29120",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/conv_cpu.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "192369"
},
{
"name": "C++",
"bytes": "339300"
},
{
"name": "Cuda",
"bytes": "1047676"
},
{
"name": "Makefile",
"bytes": "6022"
},
{
"name": "Python",
"bytes": "170332"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('problem', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='code',
name='text',
field=models.TextField(default=''),
),
]
| {
"content_hash": "67d2a3ddf55badbae2ae39e4173b29d8",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 47,
"avg_line_length": 20,
"alnum_prop": 0.5722222222222222,
"repo_name": "afg984/happycoding",
"id": "3d01c0d1244f4aad091be1de29f6ee826c69b60a",
"size": "384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problem/migrations/0002_auto_20151128_0752.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2526"
},
{
"name": "HTML",
"bytes": "18448"
},
{
"name": "Python",
"bytes": "37989"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
class ResponseSet:
# TODO - As currently written this code skips the second row of data,
# because that's what we want to do for Qualtrics csv results, but this is
# potentially a big gotcha, so document well or change.
def __init__(self, response_file, codebook,
skiprows = [1],
encoding="utf8",
grouping_var=None,
group_dict=None):
df = pd.read_csv(response_file , skiprows=skiprows, encoding=encoding)
# go through each variable in the codebook and make sure the corresponding
# column is integer coded
matched_questions = []
for q in codebook.get_questions():
matched = True
for v in q.get_variable_names():
if v not in df:
print("Warning: Expected variable {} not found in data file {}".format(v, response_file))
matched = False
elif df[v].dtype not in [np.int64, np.float64]:
print("Converting variable {} to integer from {}".format(v, df[v].dtype))
df[v] = df[v].convert_objects(convert_numeric=True)
if matched: matched_questions.append(q)
self.data = df
self.matched_questions = matched_questions
self.codebook = codebook
self.grouping_var = grouping_var
self.group_dict = group_dict
def get_data(self):
if not self.grouping_var or (self.group_dict and self.grouping_var):
group_var = 'z'
while group_var in self.data.columns:
group_var += 'z'
if not self.grouping_var:
self.data[group_var] = 0
else:
# Create a special variable to sort the data by so that groups
# end up in correct order
sort_var = group_var + "z"
while sort_var in self.data.columns:
sort_var += 'z'
value_order = list(self.group_dict.keys())
asc = list(range(0,len(value_order)))
self.data[sort_var] = self.data[self.grouping_var].replace(
value_order, asc)
self.data.sort(sort_var, inplace=True)
# Then replace with labels
self.data[group_var] = self.data[self.grouping_var].replace(
self.group_dict)
else:
group_var = self.grouping_var
groups = self.data.groupby(group_var, sort=False)
return(groups)
| {
"content_hash": "bcfd35388c0e846b5f92ea1160f14ac0",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 109,
"avg_line_length": 43.916666666666664,
"alnum_prop": 0.5411764705882353,
"repo_name": "ctesta01/surveyhelper",
"id": "3aab6ca3e00b105dc70e2ebf7f0065d279440e7c",
"size": "2635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "surveyhelper/response_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "11523"
},
{
"name": "Python",
"bytes": "49880"
}
],
"symlink_target": ""
} |
import math
from random import choice, randint
import numpy as np
from neupy.network.utils import shuffle
__all__ = ('make_reber', 'is_valid_by_reber', 'make_reber_classification')
avaliable_letters = 'TVPXS'
reber_rules = {
0: [('T', 1), ('V', 2)],
1: [('P', 1), ('T', 3)],
2: [('X', 2), ('V', 4)],
3: [('X', 2), ('S', None)],
4: [('P', 3), ('S', None)],
}
def is_valid_by_reber(word):
""" Сhecks whether a word belongs to grammar Reber.
Parameters
----------
word : str or list of letters
The word that you want to test.
Returns
-------
bool
`True` if word valid by Reber grammar, `False` otherwise.
Examples
--------
>>> from neupy.datasets import is_valid_by_reber
>>>
>>> is_valid_by_reber('TTS')
True
>>> is_valid_by_reber('STS')
False
>>>
>>> is_valid_by_reber(['T', 'T', 'S'])
True
>>> is_valid_by_reber(['S', 'T', 'S'])
False
"""
if not word.endswith("S"):
return False
position = 0
for letter in word:
possible_letters = reber_rules[position]
letters = [step[0] for step in possible_letters]
if letter not in letters:
return False
_, position = possible_letters[letters.index(letter)]
return True
def make_reber(n_words=100):
""" Generate list of Reber grammar words.
Parameters
----------
n_words : int
Number of reber words, defaults to `100`.
Returns
-------
list
List of Reber words.
Examples
--------
>>> from neupy.datasets import make_reber
>>>
>>> make_reber(4)
['TPTXVS', 'VXXVS', 'TPPTS', 'TTXVPXXVS']
"""
if n_words < 1:
raise ValueError("Must be at least one word")
words = []
for i in range(n_words):
position = 0
word = []
while position is not None:
possible_letters = reber_rules[position]
letter, position = choice(possible_letters)
word.append(letter)
words.append(''.join(word))
return words
def make_reber_classification(n_samples, invalid_size=0.5):
""" Generate random dataset for Reber grammar classification.
Invalid words contains the same letters as at Reber grammar, but
they are build whithout grammar rules.
Parameters
----------
n_samples : int
Number of samples in dataset.
invalid_size : float
Proportion of invalid words in dataset, defaults to `0.5`. Value
must be between 0 and 1, but not equal to them.
Returns
-------
tuple
Return two lists. First contains words and second - labels for them.
Examples
--------
>>> from neupy.datasets import make_reber_classification
>>>
>>> data, labels = make_reber_classification(10, invalid_size=0.5)
>>> data
array(['SXSXVSXXVX', 'VVPS', 'VVPSXTTS', 'VVS', 'VXVS', 'VVS',
'PPTTTXPSPTV', 'VTTSXVPTXVXT', 'VSSXSTX', 'TTXVS'],
dtype='<U12')
>>> labels
array([0, 1, 0, 1, 1, 1, 0, 0, 0, 1])
"""
if n_samples < 2:
raise ValueError("There are must be at least 2 samples.")
if invalid_size <= 0 or invalid_size >= 1:
raise ValueError("`invalid_size` property must be between zero and"
"one, but not equal.")
n_valid_words = int(math.ceil(n_samples * invalid_size))
n_invalid_words = n_samples - n_valid_words
valid_words = make_reber(n_valid_words)
valid_labels = [1] * n_valid_words
invalid_words = []
invalid_labels = [0] * n_valid_words
for i in range(n_invalid_words):
word_length = randint(3, 14)
word = [choice(avaliable_letters) for _ in range(word_length)]
invalid_words.append(''.join(word))
return shuffle(
np.array(valid_words + invalid_words),
np.array(valid_labels + invalid_labels)
)
| {
"content_hash": "21f7e24c3372c0041396b4f37c7d88a0",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 76,
"avg_line_length": 25.607843137254903,
"alnum_prop": 0.5684022460439,
"repo_name": "stczhc/neupy",
"id": "63dbfc09f43da4041140ee30a78e874e91598cd3",
"size": "3943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neupy/datasets/reber.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "163909"
},
{
"name": "HTML",
"bytes": "5245"
},
{
"name": "JavaScript",
"bytes": "3570"
},
{
"name": "Makefile",
"bytes": "485"
},
{
"name": "Python",
"bytes": "598531"
},
{
"name": "Shell",
"bytes": "372"
}
],
"symlink_target": ""
} |
from . import views
def register_in(router):
router.register(r'openstacktenant', views.OpenStackServiceViewSet, base_name='openstacktenant')
router.register(r'openstacktenant-service-project-link', views.OpenStackServiceProjectLinkViewSet,
base_name='openstacktenant-spl')
router.register(r'openstacktenant-images', views.ImageViewSet, base_name='openstacktenant-image')
router.register(r'openstacktenant-flavors', views.FlavorViewSet, base_name='openstacktenant-flavor')
router.register(r'openstacktenant-floating-ips', views.FloatingIPViewSet, base_name='openstacktenant-fip')
router.register(r'openstacktenant-security-groups', views.SecurityGroupViewSet, base_name='openstacktenant-sgp')
router.register(r'openstacktenant-volumes', views.VolumeViewSet, base_name='openstacktenant-volume')
router.register(r'openstacktenant-snapshots', views.SnapshotViewSet, base_name='openstacktenant-snapshot')
router.register(r'openstacktenant-instances', views.InstanceViewSet, base_name='openstacktenant-instance')
router.register(r'openstacktenant-backups', views.BackupViewSet, base_name='openstacktenant-backup')
router.register(r'openstacktenant-backup-schedules', views.BackupScheduleViewSet,
base_name='openstacktenant-backup-schedule')
router.register(r'openstacktenant-snapshot-schedules', views.SnapshotScheduleViewSet,
base_name='openstacktenant-snapshot-schedule')
router.register(r'openstacktenant-subnets', views.SubNetViewSet, base_name='openstacktenant-subnet')
router.register(r'openstacktenant-networks', views.NetworkViewSet, base_name='openstacktenant-network')
| {
"content_hash": "9e6632ae8bb273f3c5d5c43e01dfa8a4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 116,
"avg_line_length": 80.52380952380952,
"alnum_prop": 0.7794204612655233,
"repo_name": "opennode/nodeconductor-openstack",
"id": "d87d266474b886407a347d03ba094bb0dfbb0689",
"size": "1691",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/waldur_openstack/openstack_tenant/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "792426"
}
],
"symlink_target": ""
} |
from pyscf.prop.nmr import rhf
from pyscf.prop.nmr import uhf
from pyscf.prop.nmr import dhf
RHF = rhf.NMR
UHF = uhf.NMR
DHF = dhf.NMR
try:
from pyscf.prop.nmr import rks
from pyscf.prop.nmr import uks
RKS = rks.NMR
UKS = uks.NMR
except ImportError:
pass
| {
"content_hash": "1a1d995c507bd69670aba2ddb082fcbd",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 34,
"avg_line_length": 18.466666666666665,
"alnum_prop": 0.7003610108303249,
"repo_name": "gkc1000/pyscf",
"id": "a2d1cb997d49069557e1e88568cd4643e0869b5f",
"size": "959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyscf/prop/nmr/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2749942"
},
{
"name": "C++",
"bytes": "20522"
},
{
"name": "CMake",
"bytes": "29300"
},
{
"name": "Common Lisp",
"bytes": "40269"
},
{
"name": "Cuda",
"bytes": "12405"
},
{
"name": "Fortran",
"bytes": "1104054"
},
{
"name": "Jupyter Notebook",
"bytes": "42844"
},
{
"name": "Makefile",
"bytes": "6797"
},
{
"name": "Python",
"bytes": "10739278"
},
{
"name": "Shell",
"bytes": "5480"
},
{
"name": "VBA",
"bytes": "577"
}
],
"symlink_target": ""
} |
from ciscoconfparse.ciscoconfparse import CiscoConfParse as CCP
from pprint import pprint as pp
with open('cisco_config.txt') as file:
parse = CCP(file)
pp(parse.find_objects(r'crypto map CRYPTO'))
ff_parents = parse.find_objects(r'crypto map CRYPTO')
i = 0
while i < len(parents):
print i
print parents[i]
i += 1
| {
"content_hash": "f5c7e36ed18e80f45e7e195d60f211c3",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 63,
"avg_line_length": 22.2,
"alnum_prop": 0.7117117117117117,
"repo_name": "nlinkov/pynet_test",
"id": "6951f9ef3f00d3c8a02313ffc6bb181b3b3a6ef3",
"size": "356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test8_1.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Emacs Lisp",
"bytes": "334"
},
{
"name": "HTML",
"bytes": "28262"
},
{
"name": "Python",
"bytes": "730134"
},
{
"name": "Shell",
"bytes": "14276"
},
{
"name": "Vim script",
"bytes": "118"
}
],
"symlink_target": ""
} |
from flask import Flask, request, render_template
from random import shuffle
app = Flask(__name__)
app.config.update(dict(
DEBUG=True,
))
@app.route('/', methods=['GET', 'POST'])
def root():
if request.method == 'POST':
file = request.files['file']
content = file.stream.read(1024 * 1024)
words = content.strip().split(' ')
shuffle(words)
return ' '.join(words)
else:
return render_template('main.html')
if __name__ == "__main__":
app.run()
| {
"content_hash": "abfa96e30afbca9143d441d5f2d91408",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 49,
"avg_line_length": 23.136363636363637,
"alnum_prop": 0.5854616895874263,
"repo_name": "EUDAT-DLC/Shuffler",
"id": "10273872438a8619dc4292a7e5db9813b6a2788d",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "820"
},
{
"name": "Python",
"bytes": "509"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.redirectAppointment),
url(r'^view/$', views.viewAppointment, name='view'),
url(r'^schedule/$', views.scheduleAppointment, name='schedule'),
url(r'^doccreate/$', views.scheduleDoctor, name='scheduleDoctor'),
url(r'^doccreate/(?P<patient>[0-9]+)/$', views.scheduleDoctor, name='scheduleDoctor'), # Pass in a patient
url(r'^nurcreate/$', views.scheduleNurse, name='scheduleNurse'),
url(r'^nurcreate/(?P<patient>[0-9]+)/$', views.scheduleNurse, name='scheduleNurse'), # Pass in a patient
url(r'^update/(?P<pk>[0-9]+)/$', views.updateAppointment, name='update'),
url(r'^delete/(?P<pk>[0-9]+)/$', views.deleteAppointment, name='delete'),
]
| {
"content_hash": "f0f9507fd921cbb98b07207100c849bb",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 110,
"avg_line_length": 38.3,
"alnum_prop": 0.6566579634464752,
"repo_name": "blackpan2/HealthNet",
"id": "ad486149f389557bb376ba203af7a5c335f606bc",
"size": "766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/healthnet/appointment/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36066"
},
{
"name": "HTML",
"bytes": "214171"
},
{
"name": "JavaScript",
"bytes": "319793"
},
{
"name": "Python",
"bytes": "279530"
}
],
"symlink_target": ""
} |
class AcquirerStatusResponse( object ):
"""
This class contains all necessary data that can be returned from a iDEAL AcquirerTrxRequest.
"""
def __init__( self ):
self.acquirerID = ''
self.consumerName = ''
self.consumerAccountNumber = ''
self.consumerCity = ''
self.transactionID = ''
self.status = ''
self.errorMessage = False
def getAcquirerID( self ):
"""
@return Returns the acquirerID.
"""
return self.acquirerID
def setAcquirerID( self, acquirerID ):
"""
@param acquirerID The acquirerID to set. (mandatory)
"""
self.acquirerID = acquirerID
def getConsumerAccountNumber( self ):
"""
@return Returns the consumerAccountNumber.
"""
return self.consumerAccountNumber
def setConsumerAccountNumber( self, consumerAccountNumber ):
"""
@param consumerAccountNumber The consumerAccountNumber to set.
"""
self.consumerAccountNumber = consumerAccountNumber
def getConsumerCity( self ):
"""
@return Returns the consumerCity.
"""
return self.consumerCity
def setConsumerCity( self, consumerCity ):
"""
@param consumerCity The consumerCity to set.
"""
self.consumerCity = consumerCity
def getConsumerName( self ):
"""
@return Returns the consumerName.
"""
return self.consumerName
def setConsumerName( self, consumerName ):
"""
@param consumerName The consumerName to set.
"""
self.consumerName = consumerName
def getTransactionID( self ):
"""
@return Returns the transactionID.
"""
return self.transactionID
def setTransactionID( self, transactionID ):
"""
@param transactionID The transactionID to set.
"""
self.transactionID = transactionID
def getStatus( self ):
"""
@return Returns the status. See the definitions
"""
return self.status
def setStatus( self, status ):
"""
@param status The status to set. See the definitions
"""
self.status = status
def IsResponseError( self ):
return False
| {
"content_hash": "36ad366cff2f556c056915e40a6c8f8d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 94,
"avg_line_length": 22.402298850574713,
"alnum_prop": 0.698306824012314,
"repo_name": "wyleung/python-ideal",
"id": "6a3bc03a69a5c5374ffeacece2ac63c4a215acbb",
"size": "2236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-ideal/AcquirerStatusResponse.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "42864"
}
],
"symlink_target": ""
} |
"""
An http client that can make requests similar to how the google checkout
bot when it implements the notification api, version 2.5
"""
from django.test.client import Client
class GCClient(Client):
def post_notification(self, path, raw_post_data):
"""
Post a notificaiton similar to how google checkout bot does it.
"""
content_type = "application/xml; charset=UTF-8"
return self.post(path, data=raw_post_data, content_type=content_type)
| {
"content_hash": "22f4aa19f5002e8094defeb76cc6098b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 77,
"avg_line_length": 34.714285714285715,
"alnum_prop": 0.6954732510288066,
"repo_name": "mfogel/django-npo-google-checkout",
"id": "15585f29d9aa57dda056a0ef686442e90f272d17",
"size": "486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "npo_google_checkout/tests/google_checkout_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "40292"
}
],
"symlink_target": ""
} |
import asyncio
import inspect
import json
import logging
import time
from collections import defaultdict
from aiohttp.web import Response
from sirbot.core import registry
from sirbot.utils import ensure_future
from .dispatcher import SlackDispatcher
from .. import database
logger = logging.getLogger(__name__)
IGNORING = ['channel_join', 'channel_leave', 'bot_message']
SUBTYPE_TO_EVENT = ['message_changed', 'message_deleted']
class EventDispatcher(SlackDispatcher):
def __init__(self, http_client, users, channels, groups, plugins,
event_save, message_dispatcher, loop, token):
super().__init__(
http_client=http_client,
users=users,
channels=channels,
groups=groups,
plugins=plugins,
save=event_save,
loop=loop
)
self._endpoints = defaultdict(list)
self._message_dispatcher = message_dispatcher
self._token = token
self.bot = None
async def incoming(self, item):
pass
async def incoming_rtm(self, event):
try:
if event['type'] == 'message':
await self._incoming_message(event)
else:
await self._incoming(event)
except Exception as e:
logger.exception(e)
async def incoming_web(self, request):
payload = await request.json()
if payload['token'] != self._token:
return Response(text='Invalid')
if payload['type'] == 'url_verification':
body = json.dumps({'challenge': payload['challenge']})
return Response(body=body, status=200)
try:
if payload['event']['type'] == 'message':
ensure_future(
self._incoming_message(payload['event']),
loop=self._loop,
logger=logger
)
else:
ensure_future(
self._incoming(payload['event']),
loop=self._loop,
logger=logger
)
return Response(status=200)
except Exception as e:
logger.exception(e)
return Response(status=500)
async def _incoming_message(self, event):
subtype = event.get('subtype') or event.get('message', {}).get(
'subtype', 'message')
if subtype in IGNORING:
return
elif subtype in SUBTYPE_TO_EVENT:
event['type'] = subtype
await self._incoming(event)
else:
await self._message_dispatcher.incoming(event)
async def _incoming(self, event):
logger.debug('Event handler received: %s', event)
slack = registry.get('slack')
if isinstance(self._save, list) and event['type'] in self._save \
or self._save is True:
db = registry.get('database')
await self._store_incoming(event, db)
for func in self._endpoints.get(event['type'], list()):
f = func(event, slack)
ensure_future(coroutine=f, loop=self._loop, logger=logger)
def register(self, event, func):
logger.debug('Registering event: %s, %s from %s',
event,
func.__name__,
inspect.getabsfile(func))
if not asyncio.iscoroutinefunction(func):
func = asyncio.coroutine(func)
self._endpoints[event].append(func)
async def _store_incoming(self, event, db):
"""
Store incoming event in db
:param msg: message
:param db: db plugin
:return: None
"""
ts = event.get('ts') or time.time()
user = event.get('user')
if isinstance(user, dict):
user = user.get('id')
logger.debug('Saving incoming event %s from %s', event['type'], user)
await database.__dict__[db.type].dispatcher.save_incoming_event(
ts=ts, user=user, event=event, db=db
)
await db.commit()
| {
"content_hash": "c4b39c8a4122f00c04944e62068bf24f",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 77,
"avg_line_length": 29.751824817518248,
"alnum_prop": 0.5525024533856723,
"repo_name": "pyslackers/sirbot-slack",
"id": "f2f71d97ca660c43461a36bf73a1b04e90fbe5eb",
"size": "4076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sirbot/slack/dispatcher/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96573"
}
],
"symlink_target": ""
} |
from socket import *
from time import sleep
host = '10.0.0.41'
port = 12000
clientSocket = socket(AF_INET, SOCK_DGRAM)
clientSocket.sendto('plex', (host, port))
message, address = clientSocket.recvfrom(1024)
print message
clientSocket.close()
| {
"content_hash": "4bdcf661034603eb3df3fe37d4ba530f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 46,
"avg_line_length": 19.071428571428573,
"alnum_prop": 0.6891385767790262,
"repo_name": "KevinMidboe/statusHandler",
"id": "7bfc2344f068b2bda671b4181ef6383f95eea5d0",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_v0.1/plexLoop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1241"
},
{
"name": "Python",
"bytes": "1562145"
},
{
"name": "Shell",
"bytes": "3268"
}
],
"symlink_target": ""
} |
import pytest
from django.core.exceptions import ValidationError
from ...core.notification.validation import validate_and_get_channel
from ...graphql.notifications.error_codes import ExternalNotificationErrorCodes
def test_validate_and_get_channel_for_non_existing_slug():
with pytest.raises(ValidationError):
validate_and_get_channel(
{"channel": "test-slug"}, ExternalNotificationErrorCodes
)
def test_validate_and_get_channel_for_inactive_channel(channel_PLN):
channel_PLN.is_active = False
channel_PLN.save()
assert not channel_PLN.is_active
with pytest.raises(ValidationError):
validate_and_get_channel(
{"channel": channel_PLN.slug}, ExternalNotificationErrorCodes
)
def test_validate_and_get_channel_for_lack_of_input():
with pytest.raises(ValidationError):
validate_and_get_channel({}, ExternalNotificationErrorCodes)
def test_validate_and_get_channel(channel_PLN):
result = validate_and_get_channel(
{"channel": channel_PLN.slug}, ExternalNotificationErrorCodes
)
assert result == channel_PLN.slug
| {
"content_hash": "a4ce3980d314b4ffd37d3557eb8a0d84",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 33.05882352941177,
"alnum_prop": 0.7259786476868327,
"repo_name": "mociepka/saleor",
"id": "3718a146c07ca43f14abe41619db8e305811a566",
"size": "1124",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "saleor/core/tests/test_notification.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
"""Crypto-Balances
Usage:
run.py [(address [(--add|--remove) <address_type> <address>...])]
[(exclusion [(--add|--remove) <asset>...])]
[base <currency> [--precision <n>]]
[-m --minimum <balance>]
[-i --itemize]
Options:
-h --help Show this screen
-v --version Show version
-a --add
-r --remove
-i --itemize Show asset balance for individual addresses
-b --base <currency> Asset value base denomination
-p --precision <n> Base currency decimal places
-m --minimum <balance> Threshold asset balance for print
"""
from docopt import docopt
from src import config, util, portfolio
def main(argv):
config_manip = [argv['--add'], argv['--remove']]
if argv['address']:
if argv['--add']:
config.add_address(argv['<address_type>'], argv['<address>'])
if argv['--remove']:
config.remove_address(argv['<address_type>'], argv['<address>'])
if not any(config_manip):
config.display_addresses()
if argv['exclusion']:
if argv['--add']:
config.add_exclusion(argv['<asset>'])
if argv['--remove']:
config.remove_exclusion(argv['<asset>'])
if not any(config_manip):
config.display_exclusions()
base_currency = argv['<currency>'] or 'BTC'
base_precision = argv['--precision'] or 8
min_balance = argv['--minimum'] or 0
addr_data = util.json_from_file(config.addr_data_file)
addr_config = util.json_from_file(config.addr_config_file)
excluded_assets = util.list_from_file(config.excluded_assets_file)
P = portfolio.Portfolio(addr_data, addr_config, excluded_assets)
P.filter_addr_assets(min_balance)
P.retrieve_asset_prices(base_currency)
if P.isempty():
print('No addresses have been added')
elif argv['--itemize']:
P.print_address_balances(8, base_precision)
else:
P.print_total_balances(8, base_precision)
if __name__ == '__main__':
args = docopt(__doc__, version='Crypto-Balances v1.0.0')
main(args)
# """
#
# KNOWN BUGS
# --
#
# TO DO
# change config file so that absolute paths to json values can be used
#
# """
| {
"content_hash": "8ac90193661bb9460439a5dab34aed7c",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 76,
"avg_line_length": 31.154929577464788,
"alnum_prop": 0.5976491862567812,
"repo_name": "evanhenri/Crypto-Balances",
"id": "480a7a64e70a16f328e72d2e6c01eafa8d18fc88",
"size": "2212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cryptobalances.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22672"
}
],
"symlink_target": ""
} |
"""
Parser for silme-compatible translation formats.
"""
import codecs
import silme
from collections import OrderedDict
from copy import copy
from silme.format.dtd import FormatParser as DTDParser
from silme.format.ini import FormatParser as IniParser
from silme.format.inc import FormatParser as IncParser
from silme.format.properties import FormatParser as PropertiesParser
from pontoon.sync.exceptions import SyncError
from pontoon.sync.utils import (
create_parent_directory,
escape_quotes,
unescape_quotes,
)
from pontoon.sync.formats.base import ParsedResource
from pontoon.sync.vcs.models import VCSTranslation
class SilmeEntity(VCSTranslation):
def __init__(self, silme_object, comments=None, order=0, copy_string=True):
"""
:param copy_string:
If True, copy the string from the silme_object. Otherwise,
self.strings will be an empty dict. Used for creating empty
copies of translations from source resources.
"""
self.silme_object = silme_object
self.comments = comments or []
self.order = order
if copy_string:
self.strings = {None: self.silme_object.value}
else:
self.strings = {}
@property
def key(self):
return self.silme_object.id
@property
def source_string(self):
return self.silme_object.value
@property
def source_string_plural(self):
return ""
@property
def fuzzy(self):
return False
@fuzzy.setter
def fuzzy(self, fuzzy):
pass # We don't use fuzzy in silme
@property
def source(self):
return []
def __eq__(self, other):
return self.key == other.key and self.strings.get(None) == other.strings.get(
None
)
def __ne__(self, other):
return not self.__eq__(other)
def __bool__(self):
# python 3
return bool(self.strings)
class SilmeResource(ParsedResource):
def __init__(self, parser, path, source_resource=None):
self.parser = parser
self.path = path
self.source_resource = source_resource
self.entities = OrderedDict() # Preserve entity order.
# Bug 1193860: unescape quotes in some files
self.escape_quotes_on = "mobile/android/base" in path and parser is DTDParser
# Copy entities from the source_resource if it's available.
if source_resource:
for key, entity in source_resource.entities.items():
self.entities[key] = copy_source_entity(entity)
try:
# Only uncomment MOZ_LANGPACK_CONTRIBUTORS if this is a .inc
# file and a source resource (i.e. it has no source resource
# itself).
self.structure = parser.get_structure(
read_file(
path,
uncomment_moz_langpack=parser is IncParser and not source_resource,
)
)
except IOError:
# If the file doesn't exist, but we have a source resource,
# we can keep going, we'll just not have any translations.
if source_resource:
return
else:
raise
comments = []
current_order = 0
for obj in self.structure:
if isinstance(obj, silme.core.entity.Entity):
if self.escape_quotes_on:
obj.value = unescape_quotes(obj.value)
entity = SilmeEntity(obj, comments, current_order)
self.entities[entity.key] = entity
current_order += 1
comments = []
elif isinstance(obj, silme.core.structure.Comment):
for comment in obj:
# Silme groups comments together, so we strip
# whitespace and split them up.
lines = str(comment).strip().split("\n")
comments += [line.strip() for line in lines]
@property
def translations(self):
return list(self.entities.values())
def save(self, locale):
"""
Load the source resource, modify it with changes made to this
Resource instance, and save it over the locale-specific
resource.
"""
if self.source_resource is None:
raise SyncError(
"Cannot save silme resource {0}: No source resource given.".format(
self.path
)
)
# Only uncomment MOZ_LANGPACK_CONTRIBUTORS if we have a
# translation for it
new_structure = self.parser.get_structure(
read_file(
self.source_resource.path,
uncomment_moz_langpack=self.entities.get(
"MOZ_LANGPACK_CONTRIBUTORS", False
),
)
)
# Update translations in the copied resource.
entities = [
SilmeEntity(obj)
for obj in new_structure
if isinstance(obj, silme.core.entity.Entity)
]
for silme_entity in entities:
key = silme_entity.key
translated_entity = self.entities.get(key)
if translated_entity and None in translated_entity.strings:
translation = translated_entity.strings[None]
if self.escape_quotes_on:
translation = escape_quotes(translation)
new_structure.modify_entity(key, translation)
else:
# Remove untranslated entity and following newline
pos = new_structure.entity_pos(key)
new_structure.remove_entity(key)
try:
line = new_structure[pos]
except IndexError:
# No newline at end of file
continue
if isinstance(line, str) and line.startswith("\n"):
line = line[len("\n") :]
new_structure[pos] = line
if len(line) == 0:
new_structure.remove_element(pos)
# Temporary fix for bug 1236281 until bug 721211 lands
if (
self.path.endswith("browser/chrome/browser/browser.properties")
and locale.code == "zh-CN"
):
new_entity = silme.core.entity.Entity(
"browser.startup.homepage", "https://start.firefoxchina.cn"
)
new_structure.add_entity(new_entity)
new_structure.add_string("\n")
create_parent_directory(self.path)
with codecs.open(self.path, "w", "utf-8") as f:
f.write(self.parser.dump_structure(new_structure))
def read_file(path, uncomment_moz_langpack=False):
"""Read the resource at the given path."""
with codecs.open(path, "r", "utf-8") as f:
# .inc files have a special commented-out entity called
# MOZ_LANGPACK_CONTRIBUTORS. We optionally un-comment it before
# parsing so locales can translate it.
if uncomment_moz_langpack:
lines = []
for line in f:
if line.startswith("# #define MOZ_LANGPACK_CONTRIBUTORS"):
line = line[2:]
lines.append(line)
content = "".join(lines)
else:
content = f.read()
return content
def copy_source_entity(entity):
"""
Copy an entity from a source file to a new SilmeEntity instance.
The new copy will have an empty strings attribute so that entities
that are copied but not modified during sync will not be saved in
the translated resource.
"""
return SilmeEntity(
entity.silme_object,
copy(entity.comments), # Members are strings, shallow is fine.
entity.order,
copy_string=False,
)
def parse(parser, path, source_path=None, locale=None):
# TODO: Cache the source resource to avoid re-parsing it a bunch.
if source_path is not None:
source_resource = SilmeResource(parser, source_path)
else:
source_resource = None
return SilmeResource(parser, path, source_resource=source_resource)
def parse_properties(path, source_path=None, locale=None):
return parse(PropertiesParser, path, source_path)
def parse_ini(path, source_path=None, locale=None):
return parse(IniParser, path, source_path)
def parse_inc(path, source_path=None, locale=None):
return parse(IncParser, path, source_path)
def parse_dtd(path, source_path=None, locale=None):
return parse(DTDParser, path, source_path)
| {
"content_hash": "88cb3cae968384e15f44844812832acc",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 87,
"avg_line_length": 32.26119402985075,
"alnum_prop": 0.5874392782789729,
"repo_name": "jotes/pontoon",
"id": "ac54ecfeb07282e12c7639c69c3c3708c334722b",
"size": "8646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pontoon/sync/formats/silme.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226580"
},
{
"name": "Dockerfile",
"bytes": "2640"
},
{
"name": "FreeMarker",
"bytes": "35248"
},
{
"name": "HTML",
"bytes": "151639"
},
{
"name": "JavaScript",
"bytes": "1332848"
},
{
"name": "Makefile",
"bytes": "3551"
},
{
"name": "Python",
"bytes": "1391398"
},
{
"name": "Shell",
"bytes": "3676"
}
],
"symlink_target": ""
} |
class ReferService(object):
u"""マージリクエスト参照サービス"""
def __init__(self):
from git_lab.apis.mergerequest.repositories import MergeRequestRepository
self.repo = MergeRequestRepository()
def show_merge_request(self, req_id):
u"""指定IDのマージリクエストを表示します
@param req_id : マージリクエストのID
@type req_id : int
"""
assert isinstance(req_id, int)
mr = self.repo.get_request(req_id)
if mr is not None:
merge_request = u"""
[ {state} ] #{iid} {title}
--------------------------------------------------------------------------------
merge request id : {id}
author : {author}
assignee : {assignee}
opened at : {opened_at}
upvotes/downbotes : +{up} / -{down}
[ merge to ] : {target} <----- [ merge from ] {source}
--------------------------------------------------------------------------------""".format(
state=mr.get_state(),
iid=mr.get_iid(),
title=mr.get_title(),
id=mr.get_id(),
author=mr.get_author().get_name(),
assignee=mr.get_assignee().get_name(),
opened_at=mr.get_author().get_created_at(),
up=mr.get_upvotes(),
down=mr.get_downvotes(),
target=mr.get_target_branch(),
source=mr.get_source_branch(),
)
print merge_request
notes = self.repo.get_notes(req_id)
for note in notes:
n = u"""
{author} posted at {posted_at}
----------------------------------------------------------------------------
{body}""".format(
author=note.get_author().get_name(),
posted_at=note.get_created_at(),
body=note.get_body()
)
print n
def list_merge_request(self, page, count):
u"""指定条件でマージリクエストの一覧を取得して表示します
@param page : 表示するページ番号
@type page : int
@param page : ページあたりに表示する件数
@type page : int
"""
assert isinstance(page, int)
assert isinstance(count, int)
mrs = self.repo.get_requests(page, count)
if len(mrs) != 0:
header = u"""
id mid state author title
+--------+--------+--------+----------+----------------------------------------+"""
print header
for mr in mrs:
m = u" #{id: <7} #{mid: <7} {state: ^8} {author: ^10} {title: <40}".format(
id=mr.get_iid(),
mid=mr.get_id(),
state=mr.get_state(),
author=mr.get_author().get_name(),
title=mr.get_title(),
)
print m
class EditService(object):
u"""マージリクエスト編集サービス"""
def __init__(self):
from git_lab.apis.mergerequest.repositories import MergeRequestRepository
from git_lab.apis.projects.repositories import ProjectsRepository
self.request_repo = MergeRequestRepository()
self.project_repo = ProjectsRepository()
def create_request(self, destination, source, title):
u"""マージリクエストを作成します
@param destination : 宛先({namespace}/{project}:{branch})
@type destination : str
@param source : 作成元ブランチ名
@type source : str | None
@param title : タイトル
@type title : str | None
"""
from git_lab.utils import get_current_branch
dest_info = EditService.parse_dest_spec(destination.decode("utf-8") if destination is not None else None)
target_project = self.project_repo.get_project(dest_info["dest_project"])
target_project_id = target_project.get_id() if target_project is not None else None
target_branch = dest_info["dest_branch"]
source_branch = source.decode("utf-8") if source is not None else get_current_branch()
title = title.decode("utf-8") if title is not None else source_branch
print u"creating merge request [%s] %s ---> %s:%s" % (
title, source_branch, target_project.get_name_with_namespace(), target_branch
)
if self.request_repo.create_requests(source_branch, target_project_id, target_branch, title):
print "created"
else:
print "failed"
@staticmethod
def parse_dest_spec(dest):
u"""宛先記述をパースする
None -> このプロジェクトのmaster
":{branch}" -> このプロジェクトの{branch}
"{namespace/project}:" -> {namespace/project}のmaster
"{namespace/project}" -> {namespace/project}のmaster
"{namespace/project}:{branch}" -> {namespace/project}の{branch}
@type dest : str | None
@rtype : dict
"""
from git_lab.utils import get_project
from string import replace
if dest is None:
return {
"dest_project": get_project(),
"dest_branch": "master"
}
elif ":" in dest:
proj, br = dest.split(":", 2)
project = replace(proj, u"/", u"%2F") if proj != "" else get_project()
branch = br if br != "" else "master"
return {
"dest_project": project,
"dest_branch": branch
}
elif ":" not in dest:
return {
"dest_project": replace(dest, u"/", u"%2F"),
"dest_branch": "master"
}
else:
project, branch = dest.split(":", 2)
return {
"dest_project": replace(project, u"/", u"%2F"),
"dest_branch": branch
}
| {
"content_hash": "42e65fc500ea142ddfb7d831cd6c4783",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 113,
"avg_line_length": 32.331428571428575,
"alnum_prop": 0.4931071049840933,
"repo_name": "kamekoopa/git-lab",
"id": "9014171b530545bbd499c55f7fac1b44cba34f29",
"size": "6006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "git_lab/apis/mergerequest/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23845"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages, Command
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys,subprocess
errno = subprocess.call([sys.executable, 'runtests.py', 'tests'])
raise SystemExit(errno)
class PyTestWithCov(PyTest):
def run(self):
import sys,subprocess
errno = subprocess.call([sys.executable, 'runtests.py', 'tests', '--cov-report=html', '--cov=.', '--pdb'])
raise SystemExit(errno)
setup(name="jinja2-precompiler",
version="0.2",
description="Pre-compile Jinja2 templates to Python byte code",
long_description=open('README.rst').read(),
author="ENDOH takanao",
license="BSD",
url="http://github.com/MiCHiLU/jinja2-precompiler",
keywords=' '.join([
'jinja2',
'python',
'template',
]
),
classifiers=[
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Utilities',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
],
py_modules=['jinja2precompiler'],
install_requires=['jinja2'],
entry_points={
'console_scripts': [
'jinja2precompiler = jinja2precompiler:main',
]
},
zip_safe=False,
cmdclass = {
'test': PyTest,
'cov': PyTestWithCov,
},
)
| {
"content_hash": "a848163735f19a13d841815f92b9cbbb",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 114,
"avg_line_length": 33.01587301587302,
"alnum_prop": 0.5759615384615384,
"repo_name": "MiCHiLU/jinja2-precompiler",
"id": "00d0740f160274fe1d33d9a0a3cc3d4ec189d55c",
"size": "2127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "202727"
}
],
"symlink_target": ""
} |
"""FMCS - Find Maximum Common Substructure
This software finds the maximum common substructure of a set of
structures and reports it as a SMARTS strings.
This implements what I think is a new algorithm for the MCS problem.
The core description is:
best_substructure = None
pick one structure as the query, and other as the targets
for each substructure in the query graph:
convert it to a SMARTS string based on the desired match properties
if the SMARTS pattern exists in all of the targets:
then this is a common substructure
keep track of the maximum such common structure,
The SMARTS string depends on the desired match properties. For
example, if ring atoms are only allowed to match ring atoms then an
aliphatic ring carbon in the query is converted to the SMARTS "[C;R]",
and the double-bond ring bond converted to "=;@" while the respectice
chain-only version are "[C;!R]" and "=;!@".
The algorithm I outlined earlier will usually take a long time. There
are several ways to speed it up.
== Bond elimination ==
As the first step, remove bonds which obviously cannot be part of the
MCS.
This requires atom and bond type information, which I store as SMARTS
patterns. A bond can only be in the MCS if its canonical bond type is
present in all of the structures. A bond type is string made of the
SMARTS for one atom, the SMARTS for the bond, and the SMARTS for the
other atom. The canonical bond type is the lexographically smaller of
the two possible bond types for a bond.
The atom and bond SMARTS depend on the type comparison used.
The "ring-matches-ring-only" option adds an "@" or "!@" to the bond
SMARTS, so that the canonical bondtype for "C-C" becomes [#6]-@[#6] or
[#6]-!@[#6] if the bond is in a ring or not in a ring, and if atoms
are compared by element and bonds are compared by bondtype. (This
option does not add "R" or "!R" to the atom SMARTS because there
should be a single bond in the MCS of c1ccccc1O and CO.)
The result of all of this atom and bond typing is a "TypedMolecule"
for each input structure.
I then find which canonical bondtypes are present in all of the
structures. I convert each TypedMolecule into a
FragmentedTypedMolecule which has the same atom information but only
those bonds whose bondtypes are in all of the structures. This can
break a structure into multiple, disconnected fragments, hence the
name.
(BTW, I would like to use the fragmented molecules as the targets
because I think the SMARTS match would go faster, but the RDKit SMARTS
matcher doesn't like them. I think it's because the new molecule
hasn't been sanitized and the underlying data structure the ring
information doesn't exist. Instead, I use the input structures for the
SMARTS match.)
== Use the structure with the smallest largest fragment as the query ==
== and sort the targets by the smallest largest fragment ==
I pick one of the FragmentedTypedMolecule instances as the source of
substructure enumeration. Which one?
My heuristic is to use the one with the smallest largest fragment.
Hopefully it produces the least number of subgraphs, but that's also
related to the number of rings, so a large linear graph will product
fewer subgraphs than a small fused ring system. I don't know how to
quantify that.
For each of the fragmented structures, I find the number of atoms in
the fragment with the most atoms, and I find the number of bonds in
the fragment with the most bonds. These might not be the same
fragment.
I sort the input structures by the number of bonds in the largest
fragment, with ties broken first on the number of atoms, and then on
the input order. The smallest such structure is the query structure,
and the remaining are the targets.
== Use a breadth-first search and a priority queue to ==
== enumerate the fragment subgraphs ==
I extract each of the fragments from the FragmentedTypedMolecule into
a TypedFragment, which I use to make an EnumerationMolecule. An
enumeration molecule contains a pair of directed edges for each atom,
which simplifies the enumeration algorithm.
The enumeration algorithm is based around growing a seed. A seed
contains the current subgraph atoms and bonds as well as an exclusion
set of bonds which cannot be used for future grown. The initial seed
is the first bond in the fragment, which may potentially grow to use
the entire fragment. The second seed is the second bond in the
fragment, which is excluded from using the first bond in future
growth. The third seed starts from the third bond, which may not use
the first or second bonds during growth, and so on.
A seed can grow along bonds connected to an atom in the seed but which
aren't already in the seed and aren't in the set of excluded bonds for
the seed. If there are no such bonds then subgraph enumeration ends
for this fragment. Given N bonds there are 2**N-1 possible ways to
grow, which is just the powerset of the available bonds, excluding the
no-growth case.
This breadth-first growth takes into account all possibilties of using
the available N bonds so all of those bonds are added to the exclusion
set of the newly expanded subgraphs.
For performance reasons, the bonds used for growth are separated into
'internal' bonds, which connect two atoms already in the subgraph, and
'external' bonds, which lead outwards to an atom not already in the
subgraph.
Each seed growth can add from 0 to N new atoms and bonds. The goal is
to maximize the subgraph size so the seeds are stored in a priority
queue, ranked so the seed with the most bonds is processed first. This
turns the enumeration into something more like a depth-first search.
== Prune seeds which aren't found in all of the structures ==
At each stage of seed growth I check that the new seed exists in all
of the original structures. (Well, all except the one which I
enumerate over in the first place; by definition that one will match.)
If it doesn't match then there's no reason to include this seed or any
larger seeds made from it.
The check is easy; I turn the subgraph into its corresponding SMARTS
string and use RDKit's normal SMARTS matcher to test for a match.
There are three ways to generate a SMARTS string: 1) arbitrary, 2)
canonical, 3) hybrid.
I have not tested #1. During most of the development I assumed that
SMARTS matches across a few hundred structures would be slow, so that
the best solution is to generate a *canonical* SMARTS and cache the
match information.
Well, it turns out that my canonical SMARTS match code takes up most
of the FMCS run-time. If I drop the canonicalization step then the
code averages about 5-10% faster. This isn't the same as #1 - I still
do the initial atom assignment based on its neighborhood, which is
like a circular fingerprint of size 2 and *usually* gives a consistent
SMARTS pattern, which I can then cache.
However, there are times when the non-canonical SMARTS code is slower.
Obviously one is if there are a lot of structures, and another if is
there is a lot of symmetry. I'm still working on characterizing this.
== Maximize atoms? or bonds? ==
The above algorithm enumerates all subgraphs of the query and
identifies those subgraphs which are common to all input structures.
It's trivial then to keep track of the current "best" subgraph, which
can defined as having the subgraph with the most atoms, or the most
bonds. Both of those options are implemented.
It would not be hard to keep track of all other subgraphs which are
the same size.
== --complete-ring-only implementation ==
The "complete ring only" option is implemented by first enabling the
"ring-matches-ring-only" option, as otherwise it doesn't make sense.
Second, in order to be a "best" subgraph, all bonds in the subgraph
which are ring bonds in the original molecule must also be in a ring
in the subgraph. This is handled as a post-processing step.
(Note: some possible optimizations, like removing ring bonds from
structure fragments which are not in a ring, are not yet implemented.)
== Prune seeds which have no potential for growing large enough ==
Given a seed, its set of edges available for growth, and the set of
excluded bonds, figure out the maximum possible growth for the seed.
If this maximum possible is less than the current best subgraph then
prune.
This requires a graph search, currently done in Python, which is a bit
expensive. To speed things up, I precompute some edge information.
That is, if I know that a given bond is a chain bond (not in a ring)
then I can calculate the maximum number of atoms and bonds for seed
growth along that bond, in either direction. However, precomputation
doesn't take into account the excluded bonds, so after a while the
predicted value is too high.
Again, I'm still working on characterizing this, and an implementation
in C++ would have different tradeoffs.
"""
__version__ = "1.1"
__version_info = (1, 1, 0)
import sys
try:
from rdkit import Chem
from rdkit.six import next
from rdkit.six.moves import range
except ImportError:
sys.stderr.write("Please install RDKit from http://www.rdkit.org/\n")
raise
import copy
import itertools
import re
import weakref
from heapq import heappush, heappop, heapify
from itertools import chain, combinations, product
import collections
from collections import defaultdict
import time
### A place to set global options
# (Is this really useful?)
class Default(object):
timeout = None
timeoutString = "none"
maximize = "bonds"
atomCompare = "elements"
bondCompare = "bondtypes"
matchValences = False
ringMatchesRingOnly = False
completeRingsOnly = False
####### Atom type and bond type information #####
# Lookup up the atomic symbol given its atomic number
_get_symbol = Chem.GetPeriodicTable().GetElementSymbol
# Lookup table to get the SMARTS for an atom given its element
# This uses the '#<n>' notation for atoms which may be aromatic.
# Eg, '#6' for carbon, instead of 'C,c'.
# Use the standard element symbol for atoms which can't be aromatic.
class AtomSmartsNoAromaticity(dict):
def __missing__(self, eleno):
value = _get_symbol(eleno)
self[eleno] = value
return value
_atom_smarts_no_aromaticity = AtomSmartsNoAromaticity()
# Initialize to the ones which need special treatment
# RDKit supports b, c, n, o, p, s, se, and te.
# Daylight and OpenSMILES don't 'te' but do support 'as'
# I don't want 'H'-is-hydrogen to get confused with 'H'-as-has-hydrogens.
# For better portability, I use the '#' notation for all of them.
for eleno in (1, 5, 6, 7, 8, 15, 16, 33, 34, 52):
_atom_smarts_no_aromaticity[eleno] = "#" + str(eleno)
assert _atom_smarts_no_aromaticity[6] == "#6"
assert _atom_smarts_no_aromaticity[2] == "He"
# Match any atom
def atom_typer_any(atoms):
return ["*"] * len(atoms)
# Match atom by atomic element; usually by symbol
def atom_typer_elements(atoms):
return [_atom_smarts_no_aromaticity[atom.GetAtomicNum()] for atom in atoms]
# Match atom by isotope number. This depends on the RDKit version
if hasattr(Chem.Atom, "GetIsotope"):
def atom_typer_isotopes(atoms):
return ["%d*" % atom.GetIsotope() for atom in atoms]
else:
# Before mid-2012, RDKit only supported atomic mass, not isotope.
# [12*] matches atoms whose mass is 12.000 +/- 0.5/1000
# This generally works, excepting elements which have no
# Tc, Pm, Po, At, Rn, Fr, Ra, Ac, Np, Pu, Am, Cm,
# Bk, Cf, Es, Fm, Md, No, Lr
# natural abundance; [98Tc] is the same as [Tc], etc.
# This leads to problems because I don't have a way to
# define the SMARTS for "no defined isotope." In SMILES/SMARTS
# that's supposed to be through isotope 0.
# The best I can do is force the non-integer masses to 0 and
# use isotope 0 to match them. That's clumsy, but it gives
# the expected result.
def atom_typer_isotopes(atoms):
atom_smarts_types = []
for atom in atoms:
mass = atom.GetMass()
int_mass = int(round(mass * 1000))
if int_mass % 1000 == 0:
# This is close enough that RDKit's match will work
atom_smarts = "%d*" % (int_mass // 1000)
else:
# Probably in natural abundance. In any case,
# there's no SMARTS for this pattern, so force
# everything to 0.
atom.SetMass(0.0) # XX warning; in-place modification of the input!
atom_smarts = "0*"
atom_smarts_types.append(atom_smarts)
return atom_smarts_types
# Match any bond
def bond_typer_any(bonds):
return ["~"] * len(bonds)
# Match bonds based on bond type, including aromaticity
def bond_typer_bondtypes(bonds):
# Aromaticity matches are important
bond_smarts_types = []
for bond in bonds:
bond_term = bond.GetSmarts()
if not bond_term:
# The SMILES "", means "single or aromatic" as SMARTS.
# Figure out which one.
if bond.GetIsAromatic():
bond_term = ':'
else:
bond_term = '-'
bond_smarts_types.append(bond_term)
return bond_smarts_types
atom_typers = {
"any": atom_typer_any,
"elements": atom_typer_elements,
"isotopes": atom_typer_isotopes,
}
bond_typers = {
"any": bond_typer_any,
"bondtypes": bond_typer_bondtypes,
}
default_atom_typer = atom_typers[Default.atomCompare]
default_bond_typer = bond_typers[Default.bondCompare]
####### Support code for handling user-defined atom classes
# User-defined atom classes are handled in a round-about fashion. The
# fmcs code doesn't know atom classes, but it can handle isotopes.
# It's easy to label the atom isotopes and do an "isotopes" atom
# comparison. The hard part is if you want to get the match
# information back using the original structure data, without the
# tweaked isotopes.
# My solution uses "save_isotopes" and "save_atom_classes" to store
# the old isotope information and the atom class assignments (both
# ordered by atom position), associated with the molecule.
# Use "restore_isotopes()" to restore the molecule's isotope values
# from the saved values. Ise "get_selected_atom_classes" to get the
# atom classes used by specified atom indices.
if hasattr(Chem.Atom, "GetIsotope"):
def get_isotopes(mol):
return [atom.GetIsotope() for atom in mol.GetAtoms()]
def set_isotopes(mol, isotopes):
if mol.GetNumAtoms() != len(isotopes):
raise ValueError("Mismatch between the number of atoms and the number of isotopes")
for atom, isotope in zip(mol.GetAtoms(), isotopes):
atom.SetIsotope(isotope)
else:
# Backards compatibility. Before mid-2012, RDKit only supported atomic mass, not isotope.
def get_isotopes(mol):
return [atom.GetMass() for atom in mol.GetAtoms()]
def set_isotopes(mol, isotopes):
if mol.GetNumAtoms() != len(isotopes):
raise ValueError("Mismatch between the number of atoms and the number of isotopes")
for atom, isotope in zip(mol.GetAtoms(), isotopes):
atom.SetMass(isotope)
_isotope_dict = weakref.WeakKeyDictionary()
_atom_class_dict = weakref.WeakKeyDictionary()
def save_isotopes(mol, isotopes):
_isotope_dict[mol] = isotopes
def save_atom_classes(mol, atom_classes):
_atom_class_dict[mol] = atom_classes
def get_selected_atom_classes(mol, atom_indices):
atom_classes = _atom_class_dict.get(mol, None)
if atom_classes is None:
return None
return [atom_classes[index] for index in atom_indices]
def restore_isotopes(mol):
try:
isotopes = _isotope_dict[mol]
except KeyError:
raise ValueError("no isotopes to restore")
set_isotopes(mol, isotopes)
def assign_isotopes_from_class_tag(mol, atom_class_tag):
try:
atom_classes = mol.GetProp(atom_class_tag)
except KeyError:
raise ValueError("Missing atom class tag %r" % (atom_class_tag, ))
fields = atom_classes.split()
if len(fields) != mol.GetNumAtoms():
raise ValueError(
"Mismatch between the number of atoms (#%d) and the number of atom classes (%d)" % (
mol.GetNumAtoms(), len(fields)))
new_isotopes = []
for field in fields:
if not field.isdigit():
raise ValueError("Atom class %r from tag %r must be a number" % (field, atom_class_tag))
isotope = int(field)
if not (1 <= isotope <= 10000):
raise ValueError("Atom class %r from tag %r must be in the range 1 to 10000" %
(field, atom_class_tag))
new_isotopes.append(isotope)
save_isotopes(mol, get_isotopes(mol))
save_atom_classes(mol, new_isotopes)
set_isotopes(mol, new_isotopes)
### Different ways of storing atom/bond information about the input structures ###
# A TypedMolecule contains the input molecule, unmodified, along with
# atom type, and bond type information; both as SMARTS fragments. The
# "canonical_bondtypes" uniquely charactizes a bond; two bonds will
# match if and only if their canonical bondtypes match. (Meaning:
# bonds must be of equivalent type, and must go between atoms of
# equivalent types.)
class TypedMolecule(object):
def __init__(self, rdmol, rdmol_atoms, rdmol_bonds, atom_smarts_types, bond_smarts_types,
canonical_bondtypes):
self.rdmol = rdmol
# These exist as a performance hack. It's faster to store the
# atoms and bond as a Python list than to do GetAtoms() and
# GetBonds() again. The stage 2 TypedMolecule does not use
# these.
self.rdmol_atoms = rdmol_atoms
self.rdmol_bonds = rdmol_bonds
# List of SMARTS to use for each atom and bond
self.atom_smarts_types = atom_smarts_types
self.bond_smarts_types = bond_smarts_types
# List of canonical bondtype strings
self.canonical_bondtypes = canonical_bondtypes
# Question: Do I also want the original_rdmol_indices? With
# the normal SMARTS I can always do the substructure match
# again to find the indices, but perhaps this will be needed
# when atom class patterns are fully implemented.
# Start with a set of TypedMolecules. Find the canonical_bondtypes
# which only exist in all them, then fragment each TypedMolecule to
# produce a FragmentedTypedMolecule containing the same atom
# information but containing only bonds with those
# canonical_bondtypes.
class FragmentedTypedMolecule(object):
def __init__(self, rdmol, rdmol_atoms, orig_atoms, orig_bonds, atom_smarts_types,
bond_smarts_types, canonical_bondtypes):
self.rdmol = rdmol
self.rdmol_atoms = rdmol_atoms
self.orig_atoms = orig_atoms
self.orig_bonds = orig_bonds
# List of SMARTS to use for each atom and bond
self.atom_smarts_types = atom_smarts_types
self.bond_smarts_types = bond_smarts_types
# List of canonical bondtype strings
self.canonical_bondtypes = canonical_bondtypes
# A FragmentedTypedMolecule can contain multiple fragments. Once I've
# picked the FragmentedTypedMolecule to use for enumeration, I extract
# each of the fragments as the basis for an EnumerationMolecule.
class TypedFragment(object):
def __init__(self, rdmol, orig_atoms, orig_bonds, atom_smarts_types, bond_smarts_types,
canonical_bondtypes):
self.rdmol = rdmol
self.orig_atoms = orig_atoms
self.orig_bonds = orig_bonds
self.atom_smarts_types = atom_smarts_types
self.bond_smarts_types = bond_smarts_types
self.canonical_bondtypes = canonical_bondtypes
# The two possible bond types are
# atom1_smarts + bond smarts + atom2_smarts
# atom2_smarts + bond smarts + atom1_smarts
# The canonical bond type is the lexically smaller of these two.
def get_canonical_bondtypes(rdmol, bonds, atom_smarts_types, bond_smarts_types):
canonical_bondtypes = []
for bond, bond_smarts in zip(bonds, bond_smarts_types):
atom1_smarts = atom_smarts_types[bond.GetBeginAtomIdx()]
atom2_smarts = atom_smarts_types[bond.GetEndAtomIdx()]
if atom1_smarts > atom2_smarts:
atom1_smarts, atom2_smarts = atom2_smarts, atom1_smarts
canonical_bondtypes.append("[%s]%s[%s]" % (atom1_smarts, bond_smarts, atom2_smarts))
return canonical_bondtypes
# Create a TypedMolecule using the element-based typing scheme
# TODO: refactor this. It doesn't seem right to pass boolean flags.
def get_typed_molecule(rdmol, atom_typer, bond_typer, matchValences=Default.matchValences,
ringMatchesRingOnly=Default.ringMatchesRingOnly):
atoms = list(rdmol.GetAtoms())
atom_smarts_types = atom_typer(atoms)
# Get the valence information, if requested
if matchValences:
new_atom_smarts_types = []
for (atom, atom_smarts_type) in zip(atoms, atom_smarts_types):
valence = atom.GetImplicitValence() + atom.GetExplicitValence()
valence_str = "v%d" % valence
if "," in atom_smarts_type:
atom_smarts_type += ";" + valence_str
else:
atom_smarts_type += valence_str
new_atom_smarts_types.append(atom_smarts_type)
atom_smarts_types = new_atom_smarts_types
# Store and reuse the bond information because I use it twice.
# In a performance test, the times went from 2.0 to 1.4 seconds by doing this.
bonds = list(rdmol.GetBonds())
bond_smarts_types = bond_typer(bonds)
if ringMatchesRingOnly:
new_bond_smarts_types = []
for bond, bond_smarts in zip(bonds, bond_smarts_types):
if bond.IsInRing():
if bond_smarts == ":":
# No need to do anything; it has to be in a ring
pass
else:
if "," in bond_smarts:
bond_smarts += ";@"
else:
bond_smarts += "@"
else:
if "," in bond_smarts:
bond_smarts += ";!@"
else:
bond_smarts += "!@"
new_bond_smarts_types.append(bond_smarts)
bond_smarts_types = new_bond_smarts_types
canonical_bondtypes = get_canonical_bondtypes(rdmol, bonds, atom_smarts_types, bond_smarts_types)
return TypedMolecule(rdmol, atoms, bonds, atom_smarts_types, bond_smarts_types,
canonical_bondtypes)
# Create a TypedMolecule using the user-defined atom classes (Not implemented!)
def get_specified_types(rdmol, atom_types, ringMatchesRingOnly):
raise NotImplementedError("not tested!")
# Make a copy because I will do some destructive edits
rdmol = copy.copy(rdmol)
atom_smarts_types = []
atoms = list(mol.GetAtoms())
for atom, atom_type in zip(atoms, atom_types):
atom.SetAtomicNum(0)
atom.SetMass(atom_type)
atom_term = "%d*" % (atom_type, )
if ringMatchesRingOnly:
if atom.IsInRing():
atom_term += "R"
else:
atom_term += "!R"
atom_smarts_types.append('[' + atom_term + ']')
bonds = list(rdmol.GetBonds())
bond_smarts_types = get_bond_smarts_types(mol, bonds, ringMatchesRingOnly)
canonical_bondtypes = get_canonical_bondtypes(mol, bonds, atom_smarts_types, bond_smarts_types)
return TypedMolecule(mol, atoms, bonds, atom_smarts_types, bond_smarts_types, canonical_bondtypes)
def convert_input_to_typed_molecules(mols, atom_typer, bond_typer, matchValences,
ringMatchesRingOnly):
typed_mols = []
for molno, rdmol in enumerate(mols):
typed_mol = get_typed_molecule(rdmol, atom_typer, bond_typer, matchValences=matchValences,
ringMatchesRingOnly=ringMatchesRingOnly)
typed_mols.append(typed_mol)
return typed_mols
def _check_atom_classes(molno, num_atoms, atom_classes):
if num_atoms != len(atom_classes):
raise ValueError("mols[%d]: len(atom_classes) must be the same as the number of atoms" %
(molno, ))
for atom_class in atom_classes:
if not isinstance(atom_class, int):
raise ValueError("mols[%d]: atom_class elements must be integers" % (molno, ))
if not (1 <= atom_class < 1000):
raise ValueError("mols[%d]: atom_class elements must be in the range 1 <= value < 1000" %
(molno, ))
#############################################
# This section deals with finding the canonical bondtype counts and
# making new TypedMolecule instances where the atoms contain only the
# bond types which are in all of the structures.
# In the future I would like to keep track of the bond types which are
# in the current subgraph. If any subgraph bond type count is ever
# larger than the maximum counts computed across the whole set, then
# prune. But so far I don't have a test set which drives the need for
# that.
# Return a dictionary mapping iterator item to occurence count
def get_counts(it):
d = defaultdict(int)
for item in it:
d[item] += 1
return dict(d)
# Merge two count dictionaries, returning the smallest count for any
# entry which is in both.
def intersect_counts(counts1, counts2):
d = {}
for k, v1 in counts1.iteritems():
if k in counts2:
v = min(v1, counts2[k])
d[k] = v
return d
# Figure out which canonical bonds SMARTS occur in every molecule
def get_canonical_bondtype_counts(typed_mols):
overall_counts = defaultdict(list)
for typed_mol in typed_mols:
bondtype_counts = get_counts(typed_mol.canonical_bondtypes)
for k, v in bondtype_counts.items():
overall_counts[k].append(v)
return overall_counts
# If I know which bondtypes exist in all of the structures, I can
# remove all bonds which aren't in all structures. RDKit's Molecule
# class doesn't let me edit in-place, so I end up making a new one
# which doesn't have unsupported bond types.
def remove_unknown_bondtypes(typed_mol, supported_canonical_bondtypes):
emol = Chem.EditableMol(Chem.Mol())
# Copy all of the atoms, even those which don't have any bonds.
for atom in typed_mol.rdmol_atoms:
emol.AddAtom(atom)
# Copy over all the bonds with a supported bond type.
# Make sure to update the bond SMARTS and canonical bondtype lists.
orig_bonds = []
new_bond_smarts_types = []
new_canonical_bondtypes = []
for bond, bond_smarts, canonical_bondtype in zip(
typed_mol.rdmol_bonds, typed_mol.bond_smarts_types, typed_mol.canonical_bondtypes):
if canonical_bondtype in supported_canonical_bondtypes:
orig_bonds.append(bond)
new_bond_smarts_types.append(bond_smarts)
new_canonical_bondtypes.append(canonical_bondtype)
emol.AddBond(bond.GetBeginAtomIdx(), bond.GetEndAtomIdx(), bond.GetBondType())
new_mol = emol.GetMol()
return FragmentedTypedMolecule(new_mol, list(new_mol.GetAtoms()), typed_mol.rdmol_atoms,
orig_bonds, typed_mol.atom_smarts_types, new_bond_smarts_types,
new_canonical_bondtypes)
# The molecule at this point has been (potentially) fragmented by
# removing bonds with unsupported bond types. The MCS cannot contain
# more atoms than the fragment of a given molecule with the most
# atoms, and the same for bonds. Find those upper limits. Note that
# the fragment with the most atoms is not necessarily the one with the
# most bonds.
def find_upper_fragment_size_limits(rdmol, atoms):
max_num_atoms = max_twice_num_bonds = 0
for atom_indices in Chem.GetMolFrags(rdmol):
num_atoms = len(atom_indices)
if num_atoms > max_num_atoms:
max_num_atoms = num_atoms
# Every bond is connected to two atoms, so this is the
# simplest way to count the number of bonds in the fragment.
twice_num_bonds = 0
for atom_index in atom_indices:
# XXX Why is there no 'atom.GetNumBonds()'?
twice_num_bonds += sum(1 for bond in atoms[atom_index].GetBonds())
if twice_num_bonds > max_twice_num_bonds:
max_twice_num_bonds = twice_num_bonds
return max_num_atoms, max_twice_num_bonds // 2
####### Convert the selected TypedMolecule into an EnumerationMolecule
# I convert one of the typed fragment molecules (specifically, the one
# with the smallest largest fragment score) into a list of
# EnumerationMolecule instances. Each fragment from the typed molecule
# gets turned into an EnumerationMolecule.
# An EnumerationMolecule contains the data I need to enumerate all of
# its subgraphs.
# An EnumerationMolecule contains a list of 'Atom's and list of 'Bond's.
# Atom and Bond indices are offsets into those respective lists.
# An Atom has a list of "bond_indices", which are offsets into the bonds.
# A Bond has a 2-element list of "atom_indices", which are offsets into the atoms.
EnumerationMolecule = collections.namedtuple("Molecule", "rdmol atoms bonds directed_edges")
Atom = collections.namedtuple("Atom", "real_atom atom_smarts bond_indices is_in_ring")
Bond = collections.namedtuple("Bond",
"real_bond bond_smarts canonical_bondtype atom_indices is_in_ring")
# A Bond is linked to by two 'DirectedEdge's; one for each direction.
# The DirectedEdge.bond_index references the actual RDKit bond instance.
# 'end_atom_index' is the index of the destination atom of the directed edge
# This is used in a 'directed_edges' dictionary so that
# [edge.end_atom_index for edge in directed_edges[atom_index]]
# is the list of all atom indices connected to 'atom_index'
DirectedEdge = collections.namedtuple("DirectedEdge", "bond_index end_atom_index")
# A Subgraph is a list of atom and bond indices in an EnumerationMolecule
Subgraph = collections.namedtuple("Subgraph", "atom_indices bond_indices")
def get_typed_fragment(typed_mol, atom_indices):
rdmol = typed_mol.rdmol
rdmol_atoms = typed_mol.rdmol_atoms
# I need to make a new RDKit Molecule containing only the fragment.
# XXX Why is that? Do I use the molecule for more than the number of atoms and bonds?
# Copy over the atoms
emol = Chem.EditableMol(Chem.Mol())
atom_smarts_types = []
atom_map = {}
for i, atom_index in enumerate(atom_indices):
atom = rdmol_atoms[atom_index]
emol.AddAtom(atom)
atom_smarts_types.append(typed_mol.atom_smarts_types[atom_index])
atom_map[atom_index] = i
# Copy over the bonds.
orig_bonds = []
bond_smarts_types = []
new_canonical_bondtypes = []
for bond, orig_bond, bond_smarts, canonical_bondtype in zip(
rdmol.GetBonds(), typed_mol.orig_bonds, typed_mol.bond_smarts_types,
typed_mol.canonical_bondtypes):
begin_atom_idx = bond.GetBeginAtomIdx()
end_atom_idx = bond.GetEndAtomIdx()
count = (begin_atom_idx in atom_map) + (end_atom_idx in atom_map)
# Double check that I have a proper fragment
if count == 2:
bond_smarts_types.append(bond_smarts)
new_canonical_bondtypes.append(canonical_bondtype)
emol.AddBond(atom_map[begin_atom_idx], atom_map[end_atom_idx], bond.GetBondType())
orig_bonds.append(orig_bond)
elif count == 1:
raise AssertionError("connected/disconnected atoms?")
return TypedFragment(emol.GetMol(),
[typed_mol.orig_atoms[atom_index] for atom_index in atom_indices],
orig_bonds, atom_smarts_types, bond_smarts_types, new_canonical_bondtypes)
def fragmented_mol_to_enumeration_mols(typed_mol, minNumAtoms=2):
if minNumAtoms < 2:
raise ValueError("minNumAtoms must be at least 2")
fragments = []
for atom_indices in Chem.GetMolFrags(typed_mol.rdmol):
# No need to even look at fragments which are too small.
if len(atom_indices) < minNumAtoms:
continue
# Convert a fragment from the TypedMolecule into a new
# TypedMolecule containing only that fragment.
# You might think I could merge 'get_typed_fragment()' with
# the code to generate the EnumerationMolecule. You're
# probably right. This code reflects history. My original code
# didn't break the typed molecule down to its fragments.
typed_fragment = get_typed_fragment(typed_mol, atom_indices)
rdmol = typed_fragment.rdmol
atoms = []
for atom, orig_atom, atom_smarts_type in zip(rdmol.GetAtoms(), typed_fragment.orig_atoms,
typed_fragment.atom_smarts_types):
bond_indices = [bond.GetIdx() for bond in atom.GetBonds()]
#assert atom.GetSymbol() == orig_atom.GetSymbol()
atom_smarts = '[' + atom_smarts_type + ']'
atoms.append(Atom(atom, atom_smarts, bond_indices, orig_atom.IsInRing()))
directed_edges = collections.defaultdict(list)
bonds = []
for bond_index, (bond, orig_bond, bond_smarts, canonical_bondtype) in enumerate(
zip(rdmol.GetBonds(), typed_fragment.orig_bonds, typed_fragment.bond_smarts_types,
typed_fragment.canonical_bondtypes)):
atom_indices = [bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()]
bonds.append(Bond(bond, bond_smarts, canonical_bondtype, atom_indices, orig_bond.IsInRing()))
directed_edges[atom_indices[0]].append(DirectedEdge(bond_index, atom_indices[1]))
directed_edges[atom_indices[1]].append(DirectedEdge(bond_index, atom_indices[0]))
fragment = EnumerationMolecule(rdmol, atoms, bonds, dict(directed_edges))
fragments.append(fragment)
# Optimistically try the largest fragments first
fragments.sort(key=lambda fragment: len(fragment.atoms), reverse=True)
return fragments
####### Canonical SMARTS generation using Weininger, Weininger, and Weininger's CANGEN
# CANGEN "combines two separate algorithms, CANON and GENES. The
# first stage, CANON, labels a molecualr structure with canonical
# labels. ... Each atom is given a numerical label on the basis of its
# topology. In the second stage, GENES generates the unique SMILES
# ... . [It] selects the starting atom and makes branching decisions
# by referring to the canonical labels as needed."
# CANON is based on the fundamental theorem of arithmetic, that is,
# the unique prime factorization theorem. Which means I need about as
# many primes as I have atoms.
# I could have a fixed list of a few thousand primes but I don't like
# having a fixed upper limit to my molecule size. I modified the code
# Georg Schoelly posted at http://stackoverflow.com/a/568618/64618 .
# This is one of many ways to generate an infinite sequence of primes.
def gen_primes():
d = defaultdict(list)
q = 2
while 1:
if q not in d:
yield q
d[q * q].append(q)
else:
for p in d[q]:
d[p + q].append(p)
del d[q]
q += 1
_prime_stream = gen_primes()
# Code later on uses _primes[n] and if that fails, calls _get_nth_prime(n)
_primes = []
def _get_nth_prime(n):
# Keep appending new primes from the stream until I have enough.
current_size = len(_primes)
while current_size <= n:
_primes.append(next(_prime_stream))
current_size += 1
return _primes[n]
# Prime it with more values then will likely occur
_get_nth_prime(1000)
###
# The CANON algorithm is documented as:
# (1) Set atomic vector to initial invariants. Go to step 3.
# (2) Set vector to product of primes corresponding to neighbors' ranks.
# (3) Sort vector, maintaining stability over previous ranks.
# (4) Rank atomic vector.
# (5) If not invariants partitioning, go to step 2.
# (6) On first pass, save partitioning as symmetry classes [fmcs doesn't need this]
# (7) If highest rank is smaller than number of nodes, break ties, go to step 2
# (8) ... else done.
# I track the atom information as a list of CangenNode instances.
class CangenNode(object):
# Using __slots__ improves get_initial_cangen_nodes performance by over 10%
# and dropped my overall time (in one benchmark) from 0.75 to 0.73 seconds
__slots__ = ["index", "atom_smarts", "value", "neighbors", "rank", "outgoing_edges"]
def __init__(self, index, atom_smarts):
self.index = index
self.atom_smarts = atom_smarts # Used to generate the SMARTS output
self.value = 0
self.neighbors = []
self.rank = 0
self.outgoing_edges = []
# The outgoing edge information is used to generate the SMARTS output
# The index numbers are offsets in the subgraph, not in the original molecule
OutgoingEdge = collections.namedtuple(
"OutgoingEdge", "from_atom_index bond_index bond_smarts other_node_idx other_node")
# Convert a Subgraph of a given EnumerationMolecule into a list of
# CangenNodes. This contains the more specialized information I need
# for canonicalization and for SMARTS generation.
def get_initial_cangen_nodes(subgraph, enumeration_mol, atom_assignment,
do_initial_assignment=True):
# The subgraph contains a set of atom and bond indices in the enumeration_mol.
# The CangenNode corresponds to an atom in the subgraph, plus relations
# to other atoms in the subgraph.
# I need to convert from offsets in molecule space to offset in subgraph space.
# Map from enumeration mol atom indices to subgraph/CangenNode list indices
atom_map = {}
cangen_nodes = []
atoms = enumeration_mol.atoms
canonical_labels = []
for i, atom_index in enumerate(subgraph.atom_indices):
atom_map[atom_index] = i
cangen_nodes.append(CangenNode(i, atoms[atom_index].atom_smarts))
canonical_labels.append([])
# Build the neighbor and directed edge lists
for bond_index in subgraph.bond_indices:
bond = enumeration_mol.bonds[bond_index]
from_atom_index, to_atom_index = bond.atom_indices
from_subgraph_atom_index = atom_map[from_atom_index]
to_subgraph_atom_index = atom_map[to_atom_index]
from_node = cangen_nodes[from_subgraph_atom_index]
to_node = cangen_nodes[to_subgraph_atom_index]
from_node.neighbors.append(to_node)
to_node.neighbors.append(from_node)
canonical_bondtype = bond.canonical_bondtype
canonical_labels[from_subgraph_atom_index].append(canonical_bondtype)
canonical_labels[to_subgraph_atom_index].append(canonical_bondtype)
from_node.outgoing_edges.append(
OutgoingEdge(from_subgraph_atom_index, bond_index, bond.bond_smarts, to_subgraph_atom_index,
to_node))
to_node.outgoing_edges.append(
OutgoingEdge(to_subgraph_atom_index, bond_index, bond.bond_smarts, from_subgraph_atom_index,
from_node))
if do_initial_assignment:
# Do the initial graph invariant assignment. (Step 1 of the CANON algorithm)
# These are consistent only inside of the given 'atom_assignment' lookup.
for atom_index, node, canonical_label in zip(subgraph.atom_indices, cangen_nodes,
canonical_labels):
# The initial invariant is the sorted canonical bond labels
# plus the atom smarts, separated by newline characters.
#
# This is equivalent to a circular fingerprint of width 2, and
# gives more unique information than the Weininger method.
canonical_label.sort()
canonical_label.append(atoms[atom_index].atom_smarts)
label = "\n".join(canonical_label)
# The downside of using a string is that I need to turn it
# into a number which is consistent across all of the SMARTS I
# generate as part of the MCS search. Use a lookup table for
# that which creates a new number of the label wasn't seen
# before, or uses the old one if it was.
node.value = atom_assignment[label]
return cangen_nodes
# Rank a sorted list (by value) of CangenNodes
def rerank(cangen_nodes):
rank = 0 # Note: Initial rank is 1, in line with the Weininger paper
prev_value = -1
for node in cangen_nodes:
if node.value != prev_value:
rank += 1
prev_value = node.value
node.rank = rank
# Given a start/end range in the CangenNodes, sorted by value,
# find the start/end for subranges with identical values
def find_duplicates(cangen_nodes, start, end):
result = []
prev_value = -1
count = 0
for index in xrange(start, end):
node = cangen_nodes[index]
if node.value == prev_value:
count += 1
else:
if count > 1:
# New subrange containing duplicates
result.append((start, index))
count = 1
prev_value = node.value
start = index
if count > 1:
# Last elements were duplicates
result.append((start, end))
return result
#@profile
def canon(cangen_nodes):
# Precondition: node.value is set to the initial invariant
# (1) Set atomic vector to initial invariants (assumed on input)
# Do the initial ranking
cangen_nodes.sort(key=lambda node: node.value)
rerank(cangen_nodes)
# Keep refining the sort order until it's unambiguous
master_sort_order = cangen_nodes[:]
# Find the start/end range for each stretch of duplicates
duplicates = find_duplicates(cangen_nodes, 0, len(cangen_nodes))
PRIMES = _primes # micro-optimization; make this a local name lookup
while duplicates:
# (2) Set vector to product of primes corresponding to neighbor's ranks
for node in cangen_nodes:
try:
node.value = PRIMES[node.rank]
except IndexError:
node.value = _get_nth_prime(node.rank)
for node in cangen_nodes:
# Apply the fundamental theorem of arithmetic; compute the
# product of the neighbors' primes
p = 1
for neighbor in node.neighbors:
p *= neighbor.value
node.value = p
# (3) Sort vector, maintaining stability over previous ranks
# (I maintain stability by refining ranges in the
# master_sort_order based on the new ranking)
cangen_nodes.sort(key=lambda node: node.value)
# (4) rank atomic vector
rerank(cangen_nodes)
# See if any of the duplicates have been resolved.
new_duplicates = []
unchanged = True # This is buggy? Need to check the entire state XXX
for (start, end) in duplicates:
# Special case when there's only two elements to store.
# This optimization sped up cangen by about 8% because I
# don't go through the sort machinery
if start + 2 == end:
node1, node2 = master_sort_order[start], master_sort_order[end - 1]
if node1.value > node2.value:
master_sort_order[start] = node2
master_sort_order[end - 1] = node1
else:
subset = master_sort_order[start:end]
subset.sort(key=lambda node: node.value)
master_sort_order[start:end] = subset
subset_duplicates = find_duplicates(master_sort_order, start, end)
new_duplicates.extend(subset_duplicates)
if unchanged:
# Have we distinguished any of the duplicates?
if not (len(subset_duplicates) == 1 and subset_duplicates[0] == (start, end)):
unchanged = False
# (8) ... else done
# Yippee! No duplicates left. Everything has a unique value.
if not new_duplicates:
break
# (5) If not invariant partitioning, go to step 2
if not unchanged:
duplicates = new_duplicates
continue
duplicates = new_duplicates
# (6) On first pass, save partitioning as symmetry classes
pass # I don't need this information
# (7) If highest rank is smaller than number of nodes, break ties, go to step 2
# I follow the Weininger algorithm and use 2*rank or 2*rank-1.
# This requires that the first rank is 1, not 0.
for node in cangen_nodes:
node.value = node.rank * 2
# The choice of tie is arbitrary. Weininger breaks the first tie.
# I break the last tie because it's faster in Python to delete
# from the end than the beginning.
start, end = duplicates[-1]
cangen_nodes[start].value -= 1
if end == start + 2:
# There were only two nodes with the same value. Now there
# are none. Remove information about that duplicate.
del duplicates[-1]
else:
# The first N-1 values are still duplicates.
duplicates[-1] = (start + 1, end)
rerank(cangen_nodes)
# Restore to the original order (ordered by subgraph atom index)
# because the bond information used during SMARTS generation
# references atoms by that order.
cangen_nodes.sort(key=lambda node: node.index)
def get_closure_label(bond_smarts, closure):
if closure < 10:
return bond_smarts + str(closure)
else:
return bond_smarts + "%%%02d" % closure
# Precompute the initial closure heap. *Overall* performance went from 0.73 to 0.64 seconds!
_available_closures = list(range(1, 101))
heapify(_available_closures)
# The Weininger paper calls this 'GENES'; I call it "generate_smiles."
# I use a different algorithm than GENES. It's still use two
# passes. The first pass identifies the closure bonds using a
# depth-first search. The second pass builds the SMILES string.
def generate_smarts(cangen_nodes):
start_index = 0
best_rank = cangen_nodes[0].rank
for i, node in enumerate(cangen_nodes):
if node.rank < best_rank:
best_rank = node.rank
start_index = i
node.outgoing_edges.sort(key=lambda edge: edge.other_node.rank)
visited_atoms = [0] * len(cangen_nodes)
closure_bonds = set()
## First, find the closure bonds using a DFS
stack = []
atom_idx = start_index
stack.extend(reversed(cangen_nodes[atom_idx].outgoing_edges))
visited_atoms[atom_idx] = True
while stack:
edge = stack.pop()
if visited_atoms[edge.other_node_idx]:
closure_bonds.add(edge.bond_index)
else:
visited_atoms[edge.other_node_idx] = 1
for next_edge in reversed(cangen_nodes[edge.other_node_idx].outgoing_edges):
if next_edge.other_node_idx == edge.from_atom_index:
# Don't worry about going back along the same route
continue
stack.append(next_edge)
available_closures = _available_closures[:]
unclosed_closures = {}
# I've identified the closure bonds.
# Use a stack machine to traverse the graph and build the SMARTS.
# The instruction contains one of 4 instructions, with associated data
# 0: add the atom's SMARTS and put its connections on the machine
# 1: add the bond's SMARTS and put the other atom on the machine
# 3: add a ')' to the SMARTS
# 4: add a '(' and the bond SMARTS
smiles_terms = []
stack = [(0, (start_index, -1))]
while stack:
action, data = stack.pop()
if action == 0:
# Add an atom.
# The 'while 1:' emulates a goto for the special case
# where the atom is connected to only one other atom. I
# don't need to use the stack machinery for that case, and
# can speed up this function by about 10%.
while 1:
# Look at the bonds starting from this atom
num_neighbors = 0
atom_idx, prev_bond_idx = data
smiles_terms.append(cangen_nodes[atom_idx].atom_smarts)
outgoing_edges = cangen_nodes[atom_idx].outgoing_edges
for outgoing_edge in outgoing_edges:
bond_idx = outgoing_edge.bond_index
# Is this a ring closure bond?
if bond_idx in closure_bonds:
# Have we already seen it before?
if bond_idx not in unclosed_closures:
# This is new. Add as a ring closure.
closure = heappop(available_closures)
smiles_terms.append(get_closure_label(outgoing_edge.bond_smarts, closure))
unclosed_closures[bond_idx] = closure
else:
closure = unclosed_closures[bond_idx]
smiles_terms.append(get_closure_label(outgoing_edge.bond_smarts, closure))
heappush(available_closures, closure)
del unclosed_closures[bond_idx]
else:
# This is a new outgoing bond.
if bond_idx == prev_bond_idx:
# Don't go backwards along the bond I just came in on
continue
if num_neighbors == 0:
# This is the first bond. There's a good chance that
# it's the only bond.
data = (outgoing_edge.other_node_idx, bond_idx)
bond_smarts = outgoing_edge.bond_smarts
else:
# There are multiple bonds. Can't shortcut.
if num_neighbors == 1:
# Capture the information for the first bond
# This direction doesn't need the (branch) characters.
stack.append((0, data))
stack.append((1, bond_smarts))
# Add information for this bond
stack.append((3, None))
stack.append((0, (outgoing_edge.other_node_idx, bond_idx)))
stack.append((4, outgoing_edge.bond_smarts))
num_neighbors += 1
if num_neighbors != 1:
# If there's only one item then goto action==0 again.
break
smiles_terms.append(bond_smarts)
elif action == 1:
# Process a bond which does not need '()'s
smiles_terms.append(data) # 'data' is bond_smarts
continue
elif action == 3:
smiles_terms.append(')')
elif action == 4:
smiles_terms.append('(' + data) # 'data' is bond_smarts
else:
raise AssertionError
return "".join(smiles_terms)
# Full canonicalization is about 5% slower unless there are well over 100 structures
# in the data set, which is not expected to be common.
# Commented out the canon() step until there's a better solution (eg, adapt based
# in the input size.)
def make_canonical_smarts(subgraph, enumeration_mol, atom_assignment):
cangen_nodes = get_initial_cangen_nodes(subgraph, enumeration_mol, atom_assignment, True)
#canon(cangen_nodes)
smarts = generate_smarts(cangen_nodes)
return smarts
## def make_semicanonical_smarts(subgraph, enumeration_mol, atom_assignment):
## cangen_nodes = get_initial_cangen_nodes(subgraph, enumeration_mol, atom_assignment, True)
## # There's still some order because of the canonical bond typing, but it isn't perfect
## #canon(cangen_nodes)
## smarts = generate_smarts(cangen_nodes)
## return smarts
def make_arbitrary_smarts(subgraph, enumeration_mol, atom_assignment):
cangen_nodes = get_initial_cangen_nodes(subgraph, enumeration_mol, atom_assignment, False)
# Use an arbitrary order
for i, node in enumerate(cangen_nodes):
node.value = i
smarts = generate_smarts(cangen_nodes)
return smarts
############## Subgraph enumeration ##################
# A 'seed' is a subgraph containing a subset of the atoms and bonds in
# the graph. The idea is to try all of the ways in which to grow the
# seed to make a new seed which contains the original seed.
# There are two ways to grow a seed:
# - add a bond which is not in the seed but where both of its
# atoms are in the seed
# - add a bond which is not in the seed but where one of its
# atoms is in the seed (and the other is not)
# The algorithm takes the seed, and finds all of both categories of
# bonds. If there are N total such bonds then there are 2**N-1
# possible new seeds which contain the original seed. This is simply
# the powerset of the possible bonds, excepting the case with no
# bonds.
# Generate all 2**N-1 new seeds. Place the new seeds back in the
# priority queue to check for additional growth.
# I place the seeds in priority queue, sorted by score (typically the
# number of atoms) to preferentially search larger structures first. A
# simple stack or deque wouldn't work because the new seeds have
# between 1 to N-1 new atoms and bonds.
# Some useful preamble code
# Taken from the Python documentation
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
# Same as the above except the empty term is not returned
def nonempty_powerset(iterable):
"nonempty_powerset([1,2,3]) --> (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
it = chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
next(it)
return it
# Call this to get a new unique function. Used to break ties in the
# priority queue.
#tiebreaker = itertools.count().next
def _Counter():
c = itertools.count()
return lambda: next(c)
tiebreaker = _Counter()
### The enumeration code
# Given a set of atoms, find all of the ways to leave those atoms.
# There are two possibilities:
# 1) bonds; which connect two atoms which are already in 'atom_indices'
# 2) directed edges; which go to atoms that aren't in 'atom_indices'
# and which aren't already in visited_bond_indices. These are external
# to the subgraph.
# The return is a 2-element tuple containing:
# (the list of bonds from (1), the list of directed edges from (2))
def find_extensions(atom_indices, visited_bond_indices, directed_edges):
internal_bonds = set()
external_edges = []
for atom_index in atom_indices:
for directed_edge in directed_edges[atom_index]:
# Skip outgoing edges which have already been evaluated
if directed_edge.bond_index in visited_bond_indices:
continue
if directed_edge.end_atom_index in atom_indices:
# case 1: This bond goes to another atom which is already in the subgraph.
internal_bonds.add(directed_edge.bond_index)
else:
# case 2: This goes to a new (external) atom
external_edges.append(directed_edge)
# I don't think I need the list()
return list(internal_bonds), external_edges
# Given the 2-element tuple (internal_bonds, external_edges),
# construct all of the ways to combine them to generate a new subgraph
# from the old one. This is done via a powerset.
# This generates a two-element tuple containing:
# - the set of newly added atom indices (or None)
# - the new subgraph
def all_subgraph_extensions(enumeration_mol, subgraph, visited_bond_indices, internal_bonds,
external_edges):
#print "Subgraph", len(subgraph.atom_indices), len(subgraph.bond_indices), "X", enumeration_mol.rdmol.GetNumAtoms()
#print "subgraph atoms", subgraph.atom_indices
#print "subgraph bonds", subgraph.bond_indices
#print "internal", internal_bonds, "external", external_edges
# only internal bonds
if not external_edges:
#assert internal_bonds, "Must have at least one internal bond"
it = nonempty_powerset(internal_bonds)
for internal_bond in it:
# Make the new subgraphs
bond_indices = set(subgraph.bond_indices)
bond_indices.update(internal_bond)
yield None, Subgraph(subgraph.atom_indices, frozenset(bond_indices)), 0, 0
return
# only external edges
if not internal_bonds:
it = nonempty_powerset(external_edges)
exclude_bonds = set(chain(visited_bond_indices, (edge.bond_index for edge in external_edges)))
for external_ext in it:
new_atoms = frozenset(ext.end_atom_index for ext in external_ext)
atom_indices = frozenset(chain(subgraph.atom_indices, new_atoms))
bond_indices = frozenset(
chain(subgraph.bond_indices, (ext.bond_index for ext in external_ext)))
num_possible_atoms, num_possible_bonds = find_extension_size(enumeration_mol, new_atoms,
exclude_bonds, external_ext)
#num_possible_atoms = len(enumeration_mol.atoms) - len(atom_indices)
#num_possible_bonds = len(enumeration_mol.bonds) - len(bond_indices)
yield new_atoms, Subgraph(atom_indices, bond_indices), num_possible_atoms, num_possible_bonds
return
# Both internal bonds and external edges
internal_powerset = list(powerset(internal_bonds))
external_powerset = powerset(external_edges)
exclude_bonds = set(chain(visited_bond_indices, (edge.bond_index for edge in external_edges)))
for external_ext in external_powerset:
if not external_ext:
# No external extensions. Must have at least one internal bond.
for internal_bond in internal_powerset[1:]:
bond_indices = set(subgraph.bond_indices)
bond_indices.update(internal_bond)
yield None, Subgraph(subgraph.atom_indices, bond_indices), 0, 0
else:
new_atoms = frozenset(ext.end_atom_index for ext in external_ext)
atom_indices = frozenset(chain(subgraph.atom_indices, new_atoms))
# no_go_bond_indices = set(chain(visited_bond_indices, extern
bond_indices = frozenset(
chain(subgraph.bond_indices, (ext.bond_index for ext in external_ext)))
num_possible_atoms, num_possible_bonds = find_extension_size(enumeration_mol, atom_indices,
exclude_bonds, external_ext)
#num_possible_atoms = len(enumeration_mol.atoms) - len(atom_indices)
for internal_bond in internal_powerset:
bond_indices2 = frozenset(chain(bond_indices, internal_bond))
#num_possible_bonds = len(enumeration_mol.bonds) - len(bond_indices2)
yield new_atoms, Subgraph(atom_indices,
bond_indices2), num_possible_atoms, num_possible_bonds
def find_extension_size(enumeration_mol, known_atoms, exclude_bonds, directed_edges):
num_remaining_atoms = num_remaining_bonds = 0
visited_atoms = set(known_atoms)
visited_bonds = set(exclude_bonds)
#print "start atoms", visited_atoms
#print "start bonds", visited_bonds
#print "Along", [directed_edge.bond_index for directed_edge in directed_edges]
for directed_edge in directed_edges:
#print "Take", directed_edge
stack = [directed_edge.end_atom_index]
# simple depth-first search search
while stack:
atom_index = stack.pop()
for next_edge in enumeration_mol.directed_edges[atom_index]:
#print "Visit", next_edge.bond_index, next_edge.end_atom_index
bond_index = next_edge.bond_index
if bond_index in visited_bonds:
#print "Seen bond", bond_index
continue
num_remaining_bonds += 1
visited_bonds.add(bond_index)
#print "New BOND!", bond_index, "count", num_remaining_bonds
next_atom_index = next_edge.end_atom_index
if next_atom_index in visited_atoms:
#print "Seen atom"
continue
num_remaining_atoms += 1
#print "New atom!", next_atom_index, "count", num_remaining_atoms
visited_atoms.add(next_atom_index)
stack.append(next_atom_index)
#print "==>", num_remaining_atoms, num_remaining_bonds
return num_remaining_atoms, num_remaining_bonds
# Check if a SMARTS is in all targets.
# Uses a dictionary-style API, but please only use matcher[smarts]
# Caches all previous results.
class CachingTargetsMatcher(dict):
def __init__(self, targets, required_match_count=None):
self.targets = targets
if required_match_count is None:
required_match_count = len(targets)
self.required_match_count = required_match_count
self._num_allowed_errors = len(targets) - required_match_count
super(dict, self).__init__()
def shift_targets(self):
assert self._num_allowed_errors >= 0, (self.required_match_count, self._num_allowed_errors)
self.targets = self.targets[1:]
self._num_allowed_errors = len(self.targets) - self.required_match_count
def __missing__(self, smarts):
num_allowed_errors = self._num_allowed_errors
if num_allowed_errors < 0:
raise AssertionError("I should never be called")
self[smarts] = False
return False
pat = Chem.MolFromSmarts(smarts)
if pat is None:
raise AssertionError("Bad SMARTS: %r" % (smarts, ))
num_allowed_errors = self._num_allowed_errors
for target in self.targets:
if not MATCH(target, pat):
if num_allowed_errors == 0:
# Does not match. No need to continue processing
self[smarts] = False
return False
num_allowed_errors -= 1
# Matches enough structures, which means it will always
# match enough structures. (Even after shifting.)
self[smarts] = True
return True
class VerboseCachingTargetsMatcher(object):
def __init__(self, targets, required_match_count=None):
self.targets = targets
if required_match_count is None:
required_match_count = len(targets)
self.cache = {}
self.required_match_count = required_match_count
self._num_allowed_errors = len(targets) - required_match_count
self.num_lookups = self.num_cached_true = self.num_cached_false = 0
self.num_search_true = self.num_search_false = self.num_matches = 0
def shift_targets(self):
assert self._num_allowed_errors >= 0, (self.required_match_count, self._num_allowed_errors)
if self._num_allowed_errors > 1:
self.targets = self.targets[1:]
self._num_allowed_errors = len(self.targets) - self.required_match_count
def __getitem__(self, smarts, missing=object()):
self.num_lookups += 1
x = self.cache.get(smarts, missing)
if x is not missing:
if x:
self.num_cached_true += 1
else:
self.num_cached_false += 1
return x
pat = Chem.MolFromSmarts(smarts)
if pat is None:
raise AssertionError("Bad SMARTS: %r" % (smarts, ))
for i, target in enumerate(self.targets):
if not MATCH(target, pat):
# Does not match. No need to continue processing
self.num_search_false += 1
self.num_matches += i + 1
self.cache[smarts] = False
N = len(self.targets)
return False
# TODO: should I move the mismatch structure forward
# so that it's tested earlier next time?
# Matches everything
self.num_matches += i + 1
self.num_search_true += 1
self.cache[smarts] = True
return True
def report(self):
print >> sys.stderr, "%d tests of %d unique SMARTS, cache: %d True %d False, search: %d True %d False (%d substructure tests)" % (
self.num_lookups, len(self.cache), self.num_cached_true, self.num_cached_false,
self.num_search_true, self.num_search_false, self.num_matches)
##### Different maximization algorithms ######
def prune_maximize_bonds(subgraph, mol, num_remaining_atoms, num_remaining_bonds, best_sizes):
# Quick check if this is a viable search direction
num_atoms = len(subgraph.atom_indices)
num_bonds = len(subgraph.bond_indices)
best_num_atoms, best_num_bonds = best_sizes
# Prune subgraphs which are too small can never become big enough
diff_bonds = (num_bonds + num_remaining_bonds) - best_num_bonds
if diff_bonds < 0:
return True
elif diff_bonds == 0:
# Then we also maximize the number of atoms
diff_atoms = (num_atoms + num_remaining_atoms) - best_num_atoms
if diff_atoms <= 0:
return True
return False
def prune_maximize_atoms(subgraph, mol, num_remaining_atoms, num_remaining_bonds, best_sizes):
# Quick check if this is a viable search direction
num_atoms = len(subgraph.atom_indices)
num_bonds = len(subgraph.bond_indices)
best_num_atoms, best_num_bonds = best_sizes
# Prune subgraphs which are too small can never become big enough
diff_atoms = (num_atoms + num_remaining_atoms) - best_num_atoms
if diff_atoms < 0:
return True
elif diff_atoms == 0:
diff_bonds = (num_bonds + num_remaining_bonds) - best_num_bonds
if diff_bonds <= 0:
return True
else:
#print "Could still have", diff_atoms
#print num_atoms, num_remaining_atoms, best_num_atoms
pass
return False
##### Callback handlers for storing the "best" information #####x
class _SingleBest(object):
def __init__(self, timer, verbose):
self.best_num_atoms = self.best_num_bonds = -1
self.best_smarts = None
self.sizes = (-1, -1)
self.timer = timer
self.verbose = verbose
def _new_best(self, num_atoms, num_bonds, smarts):
self.best_num_atoms = num_atoms
self.best_num_bonds = num_bonds
self.best_smarts = smarts
self.sizes = sizes = (num_atoms, num_bonds)
self.timer.mark("new best")
if self.verbose:
dt = self.timer.mark_times["new best"] - self.timer.mark_times["start fmcs"]
sys.stderr.write("Best after %.1fs: %d atoms %d bonds %s\n" %
(dt, num_atoms, num_bonds, smarts))
return sizes
def get_result(self, completed):
return MCSResult(self.best_num_atoms, self.best_num_bonds, self.best_smarts, completed)
class MCSResult(object):
def __init__(self, num_atoms, num_bonds, smarts, completed):
self.num_atoms = num_atoms
self.num_bonds = num_bonds
self.smarts = smarts
self.completed = completed
def __nonzero__(self):
return self.smarts is not None
class SingleBestAtoms(_SingleBest):
def add_new_match(self, subgraph, mol, smarts):
sizes = self.sizes
# See if the subgraph match is better than the previous best
num_subgraph_atoms = len(subgraph.atom_indices)
if num_subgraph_atoms < sizes[0]:
return sizes
num_subgraph_bonds = len(subgraph.bond_indices)
if num_subgraph_atoms == sizes[0]:
if num_subgraph_bonds <= sizes[1]:
return sizes
return self._new_best(num_subgraph_atoms, num_subgraph_bonds, smarts)
class SingleBestBonds(_SingleBest):
def add_new_match(self, subgraph, mol, smarts):
sizes = self.sizes
# See if the subgraph match is better than the previous best
num_subgraph_bonds = len(subgraph.bond_indices)
if num_subgraph_bonds < sizes[1]:
return sizes
num_subgraph_atoms = len(subgraph.atom_indices)
if num_subgraph_bonds == sizes[1] and num_subgraph_atoms <= sizes[0]:
return sizes
return self._new_best(num_subgraph_atoms, num_subgraph_bonds, smarts)
### Check if there are any ring atoms; used in --complete-rings-only
# This is (yet) another depth-first graph search algorithm
def check_completeRingsOnly(smarts, subgraph, enumeration_mol):
#print "check", smarts, len(subgraph.atom_indices), len(subgraph.bond_indices)
atoms = enumeration_mol.atoms
bonds = enumeration_mol.bonds
# First, are any of bonds in the subgraph ring bonds in the original structure?
ring_bonds = []
for bond_index in subgraph.bond_indices:
bond = bonds[bond_index]
if bond.is_in_ring:
ring_bonds.append(bond_index)
#print len(ring_bonds), "ring bonds"
if not ring_bonds:
# No need to check .. this is an acceptable structure
return True
if len(ring_bonds) <= 2:
# No need to check .. there are no rings of size 2
return False
# Otherwise there's more work. Need to ensure that
# all ring atoms are still in a ring in the subgraph.
confirmed_ring_bonds = set()
subgraph_ring_bond_indices = set(ring_bonds)
for bond_index in ring_bonds:
#print "start with", bond_index, "in?", bond_index in confirmed_ring_bonds
if bond_index in confirmed_ring_bonds:
continue
# Start a new search, starting from this bond
from_atom_index, to_atom_index = bonds[bond_index].atom_indices
# Map from atom index to depth in the bond stack
atom_depth = {from_atom_index: 0, to_atom_index: 1}
bond_stack = [bond_index]
backtrack_stack = []
prev_bond_index = bond_index
current_atom_index = to_atom_index
while 1:
# Dive downwards, ever downwards
next_bond_index = next_atom_index = None
this_is_a_ring = False
for outgoing_edge in enumeration_mol.directed_edges[current_atom_index]:
if outgoing_edge.bond_index == prev_bond_index:
# Don't loop back
continue
if outgoing_edge.bond_index not in subgraph_ring_bond_indices:
# Only advance along ring edges which are in the subgraph
continue
if outgoing_edge.end_atom_index in atom_depth:
#print "We have a ring"
# It's a ring! Mark everything as being in a ring
confirmed_ring_bonds.update(bond_stack[atom_depth[outgoing_edge.end_atom_index]:])
confirmed_ring_bonds.add(outgoing_edge.bond_index)
if len(confirmed_ring_bonds) == len(ring_bonds):
#print "Success!"
return True
this_is_a_ring = True
continue
# New atom. Need to explore it.
#print "we have a new bond", outgoing_edge.bond_index, "to atom", outgoing_edge.end_atom_index
if next_bond_index is None:
# This will be the immediate next bond to search in the DFS
next_bond_index = outgoing_edge.bond_index
next_atom_index = outgoing_edge.end_atom_index
else:
# Otherwise, backtrack and examine the other bonds
backtrack_stack.append(
(len(bond_stack), outgoing_edge.bond_index, outgoing_edge.end_atom_index))
if next_bond_index is None:
# Could not find a path to take. Might be because we looped back.
if this_is_a_ring:
#assert prev_bond_index in confirmed_ring_bonds, (prev_bond_index, confirmed_ring_bonds)
# We did! That means we can backtrack
while backtrack_stack:
old_size, prev_bond_index, current_atom_index = backtrack_stack.pop()
if bond_index not in confirmed_ring_bonds:
# Need to explore this path.
# Back up and start the search from here
del bond_stack[old_size:]
break
else:
# No more backtracking. We fail. Try next bond?
# (If it had been sucessful then the
# len(confirmed_ring_bonds) == len(ring_bonds)
# would have return True)
break
else:
# Didn't find a ring, nowhere to advance
return False
else:
# Continue deeper
bond_stack.append(next_bond_index)
atom_depth[next_atom_index] = len(bond_stack)
prev_bond_index = next_bond_index
current_atom_index = next_atom_index
# If we reached here then try the next bond
#print "Try again"
class SingleBestAtomsCompleteRingsOnly(_SingleBest):
def add_new_match(self, subgraph, mol, smarts):
sizes = self.sizes
# See if the subgraph match is better than the previous best
num_subgraph_atoms = len(subgraph.atom_indices)
if num_subgraph_atoms < sizes[0]:
return sizes
num_subgraph_bonds = len(subgraph.bond_indices)
if num_subgraph_atoms == sizes[0] and num_subgraph_bonds <= sizes[1]:
return sizes
if check_completeRingsOnly(smarts, subgraph, mol):
return self._new_best(num_subgraph_atoms, num_subgraph_bonds, smarts)
return sizes
class SingleBestBondsCompleteRingsOnly(_SingleBest):
def add_new_match(self, subgraph, mol, smarts):
sizes = self.sizes
# See if the subgraph match is better than the previous best
num_subgraph_bonds = len(subgraph.bond_indices)
if num_subgraph_bonds < sizes[1]:
return sizes
num_subgraph_atoms = len(subgraph.atom_indices)
if num_subgraph_bonds == sizes[1] and num_subgraph_atoms <= sizes[0]:
return sizes
if check_completeRingsOnly(smarts, subgraph, mol):
return self._new_best(num_subgraph_atoms, num_subgraph_bonds, smarts)
return sizes
_maximize_options = {
("atoms", False): (prune_maximize_atoms, SingleBestAtoms),
("atoms", True): (prune_maximize_atoms, SingleBestAtomsCompleteRingsOnly),
("bonds", False): (prune_maximize_bonds, SingleBestBonds),
("bonds", True): (prune_maximize_bonds, SingleBestBondsCompleteRingsOnly),
}
###### The engine of the entire system. Enumerate subgraphs and see if they match. #####
def enumerate_subgraphs(enumeration_mols, prune, atom_assignment, matches_all_targets, hits,
timeout, heappush, heappop):
if timeout is None:
end_time = None
else:
end_time = time.time() + timeout
seeds = []
best_sizes = (0, 0)
# Do a quick check for the not uncommon case where one of the input fragments
# is the largest substructure or one off from the largest.
for mol in enumeration_mols:
atom_range = range(len(mol.atoms))
bond_set = set(range(len(mol.bonds)))
subgraph = Subgraph(atom_range, bond_set)
if not prune(subgraph, mol, 0, 0, best_sizes):
# Micro-optimization: the largest fragment SMARTS doesn't
# need to be canonicalized because there will only ever be
# one match. It's also unlikely that the other largest
# fragments need canonicalization.
smarts = make_arbitrary_smarts(subgraph, mol, atom_assignment)
if matches_all_targets[smarts]:
best_sizes = hits.add_new_match(subgraph, mol, smarts)
for mol in enumeration_mols:
directed_edges = mol.directed_edges
# Using 20001 random ChEMBL pairs, timeout=15.0 seconds
# 1202.6s with original order
# 1051.9s sorting by (bond.is_in_ring, bond_index)
# 1009.7s sorting by (bond.is_in_ring + atom1.is_in_ring + atom2.is_in_ring)
# 1055.2s sorting by (if bond.is_in_ring: 2; else: -(atom1.is_in_ring + atom2.is_in_ring))
# 1037.4s sorting by (atom1.is_in_ring + atom2.is_in_ring)
sorted_bonds = list(enumerate(mol.bonds))
def get_bond_ring_score(bond_data, atoms=mol.atoms):
bond_index, bond = bond_data
a1, a2 = bond.atom_indices
return bond.is_in_ring + atoms[a1].is_in_ring + atoms[a2].is_in_ring
sorted_bonds.sort(key=get_bond_ring_score)
visited_bond_indices = set()
num_remaining_atoms = len(mol.atoms) - 2
num_remaining_bonds = len(mol.bonds)
for bond_index, bond in sorted_bonds: #enumerate(mol.bonds): #
#print "bond_index", bond_index, len(mol.bonds)
visited_bond_indices.add(bond_index)
num_remaining_bonds -= 1
subgraph = Subgraph(bond.atom_indices, frozenset([bond_index]))
# I lie about the remaining atom/bond sizes here.
if prune(subgraph, mol, num_remaining_atoms, num_remaining_bonds, best_sizes):
continue
# bond.canonical_bondtype doesn't necessarily give the same
# SMARTS as make_canonical_smarts, but that doesn't matter.
# 1) I know it's canonical, 2) it's faster, and 3) there is
# no place else which generates single-bond canonical SMARTS.
#smarts = make_canonical_smarts(subgraph, mol, atom_assignment)
smarts = bond.canonical_bondtype
if matches_all_targets[smarts]:
best_sizes = hits.add_new_match(subgraph, mol, smarts)
else:
# This can happen if there's a threshold
#raise AssertionError("This should never happen: %r" % (smarts,))
continue
a1, a2 = bond.atom_indices
outgoing_edges = [
e for e in (directed_edges[a1] + directed_edges[a2])
if e.end_atom_index not in bond.atom_indices and e.bond_index not in visited_bond_indices
]
empty_internal = frozenset()
if not outgoing_edges:
pass
else:
# The priority is the number of bonds in the subgraph, ordered so
# that the subgraph with the most bonds comes first. Since heapq
# puts the smallest value first, I reverse the number. The initial
# subgraphs have 1 bond, so the initial score is -1.
heappush(seeds, (-1, tiebreaker(), subgraph, visited_bond_indices.copy(), empty_internal,
outgoing_edges, mol, directed_edges))
# I made so many subtle mistakes where I used 'subgraph' instead
# of 'new_subgraph' in the following section that I finally
# decided to get rid of 'subgraph' and use 'old_subgraph' instead.
del subgraph
while seeds:
if end_time:
if time.time() >= end_time:
return False
#print "There are", len(seeds), "seeds", seeds[0][:2]
score, _, old_subgraph, visited_bond_indices, internal_bonds, external_edges, mol, directed_edges = heappop(
seeds)
new_visited_bond_indices = visited_bond_indices.copy()
new_visited_bond_indices.update(internal_bonds)
## for edge in external_edges:
## assert edge.bond_index not in new_visited_bond_indices
new_visited_bond_indices.update(edge.bond_index for edge in external_edges)
for new_atoms, new_subgraph, num_remaining_atoms, num_remaining_bonds in \
all_subgraph_extensions(mol, old_subgraph, visited_bond_indices, internal_bonds, external_edges):
if prune(new_subgraph, mol, num_remaining_atoms, num_remaining_bonds, best_sizes):
#print "PRUNE", make_canonical_smarts(new_subgraph, mol, atom_assignment)
continue
smarts = make_canonical_smarts(new_subgraph, mol, atom_assignment)
if matches_all_targets[smarts]:
#print "YES", smarts
best_sizes = hits.add_new_match(new_subgraph, mol, smarts)
else:
#print "NO", smarts
continue
if not new_atoms:
continue
new_internal_bonds, new_external_edges = find_extensions(new_atoms, new_visited_bond_indices,
directed_edges)
if new_internal_bonds or new_external_edges:
# Rank so the subgraph with the highest number of bonds comes first
heappush(seeds, (-len(new_subgraph.bond_indices), tiebreaker(), new_subgraph,
new_visited_bond_indices, new_internal_bonds, new_external_edges, mol,
directed_edges))
return True
# Assign a unique identifier to every unique key
class Uniquer(dict):
def __init__(self):
self.counter = _Counter()
def __missing__(self, key):
self[key] = count = self.counter()
return count
# This is here only so I can see it in the profile statistics
def MATCH(mol, pat):
return mol.HasSubstructMatch(pat)
class VerboseHeapOps(object):
def __init__(self, trigger, verboseDelay):
self.num_seeds_added = 0
self.num_seeds_processed = 0
self.verboseDelay = verboseDelay
self._time_for_next_report = time.time() + verboseDelay
self.trigger = trigger
def heappush(self, seeds, item):
self.num_seeds_added += 1
return heappush(seeds, item)
def heappop(self, seeds):
if time.time() >= self._time_for_next_report:
self.trigger()
self.report()
self._time_for_next_report = time.time() + self.verboseDelay
self.num_seeds_processed += 1
return heappop(seeds)
def trigger_report(self):
self.trigger()
self.report()
def report(self):
print >> sys.stderr, " %d subgraphs enumerated, %d processed" % (self.num_seeds_added,
self.num_seeds_processed)
def compute_mcs(fragmented_mols, typed_mols, minNumAtoms, threshold_count=None,
maximize=Default.maximize, completeRingsOnly=Default.completeRingsOnly,
timeout=Default.timeout, timer=None, verbose=False, verboseDelay=1.0):
assert timer is not None
assert 0 < threshold_count <= len(fragmented_mols), threshold_count
assert len(fragmented_mols) == len(typed_mols)
assert len(fragmented_mols) >= 2
if threshold_count is None:
threshold_count = len(fragmented_mols)
else:
assert threshold_count >= 2, threshold_count
atom_assignment = Uniquer()
if verbose:
if verboseDelay < 0.0:
raise ValueError("verboseDelay may not be negative")
matches_all_targets = VerboseCachingTargetsMatcher(typed_mols[1:], threshold_count - 1)
heapops = VerboseHeapOps(matches_all_targets.report, verboseDelay)
push = heapops.heappush
pop = heapops.heappop
end_verbose = heapops.trigger_report
else:
matches_all_targets = CachingTargetsMatcher(typed_mols[1:], threshold_count - 1)
push = heappush
pop = heappop
end_verbose = lambda: 1
try:
prune, hits_class = _maximize_options[(maximize, bool(completeRingsOnly))]
except KeyError:
raise ValueError("Unknown 'maximize' option %r" % (maximize, ))
hits = hits_class(timer, verbose)
remaining_time = None
if timeout is not None:
stop_time = time.time() + timeout
for query_index, fragmented_query_mol in enumerate(fragmented_mols):
enumerated_query_fragments = fragmented_mol_to_enumeration_mols(fragmented_query_mol,
minNumAtoms)
targets = typed_mols
if timeout is not None:
remaining_time = stop_time - time.time()
success = enumerate_subgraphs(enumerated_query_fragments, prune, atom_assignment,
matches_all_targets, hits, remaining_time, push, pop)
if query_index + threshold_count >= len(fragmented_mols):
break
if not success:
break
matches_all_targets.shift_targets()
end_verbose()
result = hits.get_result(success)
if result.num_atoms < minNumAtoms:
return MCSResult(-1, -1, None, result.completed)
return result
########## Main driver for the MCS code
class Timer(object):
def __init__(self):
self.mark_times = {}
def mark(self, name):
self.mark_times[name] = time.time()
def _update_times(timer, times):
if times is None:
return
for (dest, start, end) in (
("fragment", "start fmcs", "end fragment"), ("select", "end fragment", "end select"),
("enumerate", "end select", "end fmcs"), ("best_found", "start fmcs", "new best"),
("mcs", "start fmcs", "end fmcs")):
try:
diff = timer.mark_times[end] - timer.mark_times[start]
except KeyError:
diff = None
times[dest] = diff
def _get_threshold_count(num_mols, threshold):
if threshold is None:
return num_mols
x = num_mols * threshold
threshold_count = int(x)
if threshold_count < x:
threshold_count += 1
if threshold_count < 2:
# You can specify 0.00001 or -2.3 but you'll still get
# at least one *common* substructure.
threshold_count = 2
return threshold_count
def fmcs(mols,
minNumAtoms=2,
maximize=Default.maximize,
atomCompare=Default.atomCompare,
bondCompare=Default.bondCompare,
threshold=1.0,
matchValences=Default.matchValences,
ringMatchesRingOnly=False,
completeRingsOnly=False,
timeout=Default.timeout,
times=None,
verbose=False,
verboseDelay=1.0, ):
timer = Timer()
timer.mark("start fmcs")
if minNumAtoms < 2:
raise ValueError("minNumAtoms must be at least 2")
if timeout is not None:
if timeout <= 0.0:
raise ValueError("timeout must be None or a positive value")
threshold_count = _get_threshold_count(len(mols), threshold)
if threshold_count > len(mols):
# Threshold is too high. No possible matches.
return MCSResult(-1, -1, None, 1)
if completeRingsOnly:
ringMatchesRingOnly = True
try:
atom_typer = atom_typers[atomCompare]
except KeyError:
raise ValueError("Unknown atomCompare option %r" % (atomCompare, ))
try:
bond_typer = bond_typers[bondCompare]
except KeyError:
raise ValueError("Unknown bondCompare option %r" % (bondCompare, ))
# Make copies of all of the molecules so I can edit without worrying about the original
typed_mols = convert_input_to_typed_molecules(mols, atom_typer, bond_typer,
matchValences=matchValences,
ringMatchesRingOnly=ringMatchesRingOnly)
bondtype_counts = get_canonical_bondtype_counts(typed_mols)
supported_bondtypes = set()
for bondtype, count_list in bondtype_counts.items():
if len(count_list) >= threshold_count:
supported_bondtypes.add(bondtype)
# For better filtering, find the largest count which is in threshold
# Keep track of the counts while building the subgraph.
# The subgraph can never have more types of a given count.
fragmented_mols = [remove_unknown_bondtypes(typed_mol, bondtype_counts)
for typed_mol in typed_mols]
timer.mark("end fragment")
sizes = []
max_num_atoms = fragmented_mols[0].rdmol.GetNumAtoms()
max_num_bonds = fragmented_mols[0].rdmol.GetNumBonds()
ignored_count = 0
for tiebreaker, (typed_mol, fragmented_mol) in enumerate(zip(typed_mols, fragmented_mols)):
num_atoms, num_bonds = find_upper_fragment_size_limits(fragmented_mol.rdmol,
fragmented_mol.rdmol_atoms)
if num_atoms < minNumAtoms:
# This isn't big enough to be in the MCS
ignored_count += 1
if ignored_count + threshold_count > len(mols):
# I might be able to exit because enough of the molecules don't have
# a large enough fragment to be part of the MCS
timer.mark("end select")
timer.mark("end fmcs")
_update_times(timer, times)
return MCSResult(-1, -1, None, True)
else:
if num_atoms < max_num_atoms:
max_num_atoms = num_atoms
if num_bonds < max_num_bonds:
max_num_bonds = num_bonds
sizes.append((num_bonds, num_atoms, tiebreaker, typed_mol, fragmented_mol))
if len(sizes) < threshold_count:
timer.mark("end select")
timer.mark("end fmcs")
_update_times(timer, times)
return MCSResult(-1, -1, None, True)
assert min(size[1] for size in sizes) >= minNumAtoms
# Sort so the molecule with the smallest largest fragment (by bonds) comes first.
# Break ties with the smallest number of atoms.
# Break secondary ties by position.
sizes.sort()
#print "Using", Chem.MolToSmiles(sizes[0][4].rdmol)
timer.mark("end select")
# Extract the (typed mol, fragmented mol) pairs.
fragmented_mols = [size_info[4] for size_info in sizes] # used as queries
typed_mols = [size_info[3].rdmol for size_info in sizes] # used as targets
timer.mark("start enumeration")
mcs_result = compute_mcs(fragmented_mols, typed_mols, minNumAtoms,
threshold_count=threshold_count, maximize=maximize,
completeRingsOnly=completeRingsOnly, timeout=timeout, timer=timer,
verbose=verbose, verboseDelay=verboseDelay)
timer.mark("end fmcs")
_update_times(timer, times)
return mcs_result
######### Helper functions to generate structure/fragment output given an MCS match
# Given a Subgraph (with atom and bond indices) describing a
# fragment, make a new molecule object with only that fragment
def subgraph_to_fragment(mol, subgraph):
emol = Chem.EditableMol(Chem.Mol())
atom_map = {}
for atom_index in subgraph.atom_indices:
emol.AddAtom(mol.GetAtomWithIdx(atom_index))
atom_map[atom_index] = len(atom_map)
for bond_index in subgraph.bond_indices:
bond = mol.GetBondWithIdx(bond_index)
emol.AddBond(atom_map[bond.GetBeginAtomIdx()], atom_map[bond.GetEndAtomIdx()],
bond.GetBondType())
return emol.GetMol()
# Convert a subgraph into a SMILES
def make_fragment_smiles(mcs, mol, subgraph, args=None):
fragment = subgraph_to_fragment(mol, subgraph)
new_smiles = Chem.MolToSmiles(fragment)
return "%s %s\n" % (new_smiles, mol.GetProp("_Name"))
def _copy_sd_tags(mol, fragment):
fragment.SetProp("_Name", mol.GetProp("_Name"))
# Copy the existing names over
for name in mol.GetPropNames():
if name.startswith("_"):
continue
fragment.SetProp(name, mol.GetProp(name))
def _MolToSDBlock(mol):
# Huh?! There's no way to get the entire SD record?
mol_block = Chem.MolToMolBlock(mol, kekulize=False)
tag_data = []
for name in mol.GetPropNames():
if name.startswith("_"):
continue
value = mol.GetProp(name)
tag_data.append("> <" + name + ">\n")
tag_data.append(value + "\n")
tag_data.append("\n")
tag_data.append("$$$$\n")
return mol_block + "".join(tag_data)
def _save_other_tags(mol, fragment, mcs, orig_mol, subgraph, args):
if args.save_counts_tag is not None:
if not mcs:
line = "-1 -1 -1"
elif mcs.num_atoms == 0:
line = "0 0 0"
else:
line = "1 %d %d" % (mcs.num_atoms, mcs.num_bonds)
mol.SetProp(args.save_counts_tag, line)
if args.save_smiles_tag is not None:
if mcs and mcs.num_atoms > 0:
smiles = Chem.MolToSmiles(fragment)
else:
smiles = "-"
mol.SetProp(args.save_smiles_tag, smiles)
if args.save_smarts_tag is not None:
if mcs and mcs.num_atoms > 0:
smarts = mcs.smarts
else:
smarts = "-"
mol.SetProp(args.save_smarts_tag, smarts)
# Convert a subgraph into an SD file
def make_fragment_sdf(mcs, mol, subgraph, args):
fragment = subgraph_to_fragment(mol, subgraph)
Chem.FastFindRings(fragment)
_copy_sd_tags(mol, fragment)
if args.save_atom_class_tag is not None:
output_tag = args.save_atom_class_tag
atom_classes = get_selected_atom_classes(mol, subgraph.atom_indices)
if atom_classes is not None:
fragment.SetProp(output_tag, " ".join(map(str, atom_classes)))
_save_other_tags(fragment, fragment, mcs, mol, subgraph, args)
return _MolToSDBlock(fragment)
#
def make_complete_sdf(mcs, mol, subgraph, args):
fragment = copy.copy(mol)
_copy_sd_tags(mol, fragment)
if args.save_atom_indices_tag is not None:
output_tag = args.save_atom_indices_tag
s = " ".join(str(index) for index in subgraph.atom_indices)
fragment.SetProp(output_tag, s)
_save_other_tags(fragment, subgraph_to_fragment(mol, subgraph), mcs, mol, subgraph, args)
return _MolToSDBlock(fragment)
structure_format_functions = {
"fragment-smiles": make_fragment_smiles,
"fragment-sdf": make_fragment_sdf,
"complete-sdf": make_complete_sdf,
}
def make_structure_format(format_name, mcs, mol, subgraph, args):
try:
func = structure_format_functions[format_name]
except KeyError:
raise ValueError("Unknown format %r" % (format_name, ))
return func(mcs, mol, subgraph, args)
def parse_num_atoms(s):
num_atoms = int(s)
if num_atoms < 2:
raise argparse.ArgumentTypeError("must be at least 2, not %s" % s)
return num_atoms
def parse_threshold(s):
try:
import fractions
except ImportError:
threshold = float(s)
one = 1.0
else:
threshold = fractions.Fraction(s)
one = fractions.Fraction(1)
if not (0 <= threshold <= one):
raise argparse.ArgumentTypeError("must be a value between 0.0 and 1.0, not %s" % s)
return threshold
def parse_timeout(s):
if s == "none":
return None
timeout = float(s)
if timeout < 0.0:
raise argparse.ArgumentTypeError("Must be a non-negative value, not %r" % (s, ))
return timeout
class starting_from(object):
def __init__(self, left):
self.left = left
def __contains__(self, value):
return self.left <= value
range_pat = re.compile(r"(\d+)-(\d*)")
value_pat = re.compile("(\d+)")
def parse_select(s):
ranges = []
start = 0
while 1:
m = range_pat.match(s, start)
if m is not None:
# Selected from 'left' to (and including) 'right'
# Convert into xrange fields, starting from 0
left = int(m.group(1))
right = m.group(2)
if not right:
ranges.append(starting_from(left - 1))
else:
ranges.append(xrange(left - 1, int(right)))
else:
# Selected a single value
m = value_pat.match(s, start)
if m is not None:
val = int(m.group(1))
ranges.append(xrange(val - 1, val))
else:
raise argparse.ArgumentTypeError("Unknown character at position %d of %r" % (start + 1, s))
start = m.end()
# Check if this is the end of string or a ','
t = s[start:start + 1]
if not t:
break
if t == ",":
start += 1
continue
raise argparse.ArgumentTypeError("Unknown character at position %d of %r" % (start + 1, s))
return ranges
compare_shortcuts = {
"topology": ("any", "any"),
"elements": ("elements", "any"),
"types": ("elements", "bondtypes"),
}
# RDKit's match function only returns the atom indices of the match.
# To get the bond indices, I need to go through the pattern molecule.
def _get_match_bond_indices(pat, mol, match_atom_indices):
bond_indices = []
for bond in pat.GetBonds():
mol_atom1 = match_atom_indices[bond.GetBeginAtomIdx()]
mol_atom2 = match_atom_indices[bond.GetEndAtomIdx()]
bond = mol.GetBondBetweenAtoms(mol_atom1, mol_atom2)
assert bond is not None
bond_indices.append(bond.GetIdx())
return bond_indices
def main(args=None):
parser = argparse.ArgumentParser(
description="Find the maximum common substructure of a set of structures",
epilog="For more details on these options, see https://bitbucket.org/dalke/fmcs/")
parser.add_argument("filename", nargs=1, help="SDF or SMILES file")
parser.add_argument("--maximize", choices=["atoms", "bonds"], default=Default.maximize,
help="Maximize the number of 'atoms' or 'bonds' in the MCS. (Default: %s)" %
(Default.maximize, ))
parser.add_argument("--min-num-atoms", type=parse_num_atoms, default=2, metavar="INT",
help="Minimimum number of atoms in the MCS (Default: 2)")
class CompareAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
atomCompare_name, bondCompare_name = compare_shortcuts[value]
namespace.atomCompare = atomCompare_name
namespace.bondCompare = bondCompare_name
parser.add_argument(
"--compare", choices=["topology", "elements", "types"], default=None, action=CompareAction,
help="Use 'topology' as a shorthand for '--atom-compare any --bond-compare any', "
"'elements' is '--atom-compare elements --bond-compare any', "
"and 'types' is '--atom-compare elements --bond-compare bondtypes' "
"(Default: types)")
parser.add_argument(
"--atom-compare", choices=["any", "elements", "isotopes"], default=None, help=(
"Specify the atom comparison method. With 'any', every atom matches every "
"other atom. With 'elements', atoms match only if they contain the same element. "
"With 'isotopes', atoms match only if they have the same isotope number; element "
"information is ignored so [5C] and [5P] are identical. This can be used to "
"implement user-defined atom typing. "
"(Default: elements)"))
parser.add_argument("--bond-compare", choices=["any", "bondtypes"], default="bondtypes", help=(
"Specify the bond comparison method. With 'any', every bond matches every "
"other bond. With 'bondtypes', bonds are the same only if their bond types "
"are the same. (Default: bondtypes)"))
parser.add_argument(
"--threshold", default="1.0", type=parse_threshold,
help="Minimum structure match threshold. A value of 1.0 means that the common "
"substructure must be in all of the input structures. A value of 0.8 finds "
"the largest substructure which is common to at least 80%% of the input "
"structures. (Default: 1.0)")
parser.add_argument(
"--atom-class-tag", metavar="TAG",
help="Use atom class assignments from the field 'TAG'. The tag data must contain a space "
"separated list of integers in the range 1-10000, one for each atom. Atoms are "
"identical if and only if their corresponding atom classes are the same. Note "
"that '003' and '3' are treated as identical values. (Not used by default)")
## parser.add_argument("--match-valences", action="store_true",
## help=
## "Modify the atom comparison so that two atoms must also have the same total "
## "bond order in order to match.")
parser.add_argument(
"--ring-matches-ring-only", action="store_true",
help="Modify the bond comparison so that ring bonds only match ring bonds and chain "
"bonds only match chain bonds. (Ring atoms can still match non-ring atoms.) ")
parser.add_argument(
"--complete-rings-only", action="store_true",
help="If a bond is a ring bond in the input structures and a bond is in the MCS "
"then the bond must also be in a ring in the MCS. Selecting this option also "
"enables --ring-matches-ring-only.")
parser.add_argument(
"--select", type=parse_select, action="store", default="1-",
help="Select a subset of the input records to process. Example: 1-10,13,20,50- "
"(Default: '1-', which selects all structures)")
parser.add_argument("--timeout", type=parse_timeout, metavar="SECONDS", default=Default.timeout,
help="Report the best solution after running for at most 'timeout' seconds. "
"Use 'none' for no timeout. (Default: %s)" % (Default.timeoutString, ))
parser.add_argument("--output", "-o", metavar="FILENAME",
help="Write the results to FILENAME (Default: use stdout)")
parser.add_argument(
"--output-format", choices=["smarts", "fragment-smiles", "fragment-sdf", "complete-sdf"],
default="smarts",
help="'smarts' writes the SMARTS pattern including the atom and bond criteria. "
"'fragment-smiles' writes a matching fragment as a SMILES string. "
"'fragment-sdf' writes a matching fragment as a SD file; see --save-atom-class for "
"details on how atom class information is saved. "
"'complete-sdf' writes the entire SD file with the fragment information stored in "
"the tag specified by --save-fragment-indices-tag. (Default: smarts)")
parser.add_argument(
"--output-all", action="store_true",
help="By default the structure output formats only show an MCS for the first input structure. "
"If this option is enabled then an MCS for all of the structures are shown.")
parser.add_argument(
"--save-atom-class-tag", metavar="TAG",
help="If atom classes are specified (via --class-tag) and the output format is 'fragment-sdf' "
"then save the substructure atom classes to the tag TAG, in fragment atom order. By "
"default this is the value of --atom-class-tag.")
parser.add_argument(
"--save-counts-tag", metavar="TAG",
help="Save the fragment count, atom count, and bond count to the specified SD tag as "
"space separated integers, like '1 9 8'. (The fragment count will not be larger than "
"1 until fmcs supports disconnected MCSes.)")
parser.add_argument("--save-atom-indices-tag", metavar="TAG",
help="If atom classes are specified and the output format is 'complete-sdf' "
"then save the MCS fragment atom indices to the tag TAG, in MCS order. "
"(Default: mcs-atom-indices)")
parser.add_argument(
"--save-smarts-tag", metavar="TAG",
help="Save the MCS SMARTS to the specified SD tag. Uses '-' if there is no MCS")
parser.add_argument(
"--save-smiles-tag", metavar="TAG",
help="Save the fragment SMILES to the specified SD tag. Uses '-' if there is no MCS")
parser.add_argument("--times", action="store_true", help="Print timing information to stderr")
parser.add_argument("-v", "--verbose", action="count", dest="verbosity",
help="Print progress statistics to stderr. Use twice for higher verbosity.")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
args = parser.parse_args(args)
filename = args.filename[0]
fname = filename.lower()
if fname.endswith(".smi"):
try:
reader = Chem.SmilesMolSupplier(filename, titleLine=False)
except IOError:
raise SystemExit("Unable to open SMILES file %r" % (filename, ))
elif fname.endswith(".sdf"):
try:
reader = Chem.SDMolSupplier(filename)
except IOError:
raise SystemExit("Unable to open SD file %r" % (filename, ))
elif fname.endswith(".gz"):
raise SystemExit("gzip compressed files not yet supported")
else:
raise SystemExit("Only SMILES (.smi) and SDF (.sdf) files are supported")
if args.minNumAtoms < 2:
parser.error("--min-num-atoms must be at least 2")
if args.atomCompare is None:
if args.atom_class_tag is None:
args.atomCompare = "elements" # Default atom comparison
else:
args.atomCompare = "isotopes" # Assing the atom classes to the isotope fields
else:
if args.atom_class_tag is not None:
parser.error("Cannot specify both --atom-compare and --atom-class-tag fields")
# RDKit uses special property names starting with "_"
# It's dangerous to use some of them directly
for name in ("atom_class_tag", "save_atom_class_tag", "save_counts_tag", "save_atom_indices_tag",
"save_smarts_tag", "save_smiles_tag"):
value = getattr(args, name)
if value is not None:
if value.startswith("_"):
parser.error("--%s value may not start with a '_': %r" % (name.replace("_", "-"), value))
# Set up some defaults depending on the output format
atom_class_tag = args.atom_class_tag
if args.output_format == "fragment-sdf":
if atom_class_tag is not None:
if args.save_atom_class_tag is None:
args.save_atom_class_tag = atom_class_tag
if args.output_format == "complete-sdf":
if (args.save_atom_indices_tag is None and args.save_counts_tag is None and
args.save_smiles_tag is None and args.save_smarts_tag is None):
parser.error("Using --output-format complete-sdf is useless without at least one "
"of --save-atom-indices-tag, --save-smarts-tag, --save-smiles-tag, "
"or --save-counts-tag")
t1 = time.time()
structures = []
if args.verbosity > 1:
sys.stderr.write("Loading structures from %s ..." % (filename, ))
for molno, mol in enumerate(reader):
if not any(molno in range_ for range_ in args.select):
continue
if mol is None:
print >> sys.stderr, "Skipping unreadable structure #%d" % (molno + 1, )
continue
if atom_class_tag is not None:
try:
assign_isotopes_from_class_tag(mol, atom_class_tag)
except ValueError as err:
raise SystemExit("Structure #%d: %s" % (molno + 1, err))
structures.append(mol)
if args.verbosity > 1:
if len(structures) % 100 == 0:
sys.stderr.write("\rLoaded %d structures from %s ..." % (len(structures), filename))
sys.stderr.flush() # not needed; it's stderr. But I'm cautious.
if args.verbosity > 1:
sys.stderr.write("\r")
times = {"load": time.time() - t1}
if args.verbosity:
print >> sys.stderr, "Loaded", len(structures), "structures from", filename, " "
if len(structures) < 2:
raise SystemExit("Input file %r must contain at least two structures" % (filename, ))
mcs = fmcs(structures,
minNumAtoms=args.minNumAtoms,
maximize=args.maximize,
atomCompare=args.atomCompare,
bondCompare=args.bondCompare,
threshold=args.threshold,
#matchValences = args.matchValences,
matchValences=False, # Do I really want to support this?
ringMatchesRingOnly=args.ringMatchesRingOnly,
completeRingsOnly=args.completeRingsOnly,
timeout=args.timeout,
times=times,
verbose=args.verbosity > 1,
verboseDelay=1.0, )
msg_format = "Total time %(total).2f seconds: load %(load).2f fragment %(fragment).2f select %(select).2f enumerate %(enumerate).2f"
times["total"] = times["mcs"] + times["load"]
if mcs and mcs.completed:
msg_format += " (MCS found after %(best_found).2f)"
del mol
if args.output:
outfile = open(args.output, "w")
else:
outfile = sys.stdout
if args.output_format == "smarts":
if not mcs:
outfile.write("No MCS found\n")
else:
if mcs.completed:
status = "(complete search)"
else:
status = "(timed out)"
outfile.write("%s %d atoms %d bonds %s\n" % (mcs.smarts, mcs.num_atoms, mcs.num_bonds,
status))
else:
if mcs.smarts is None:
# There is no MCS. Use something which can't match.
pat = Chem.MolFromSmarts("[CN]")
else:
# Need to make a structure output
pat = Chem.MolFromSmarts(mcs.smarts)
for structure in structures:
atom_indices = structure.GetSubstructMatch(pat)
if not atom_indices:
# The only time that a SMARTS shouldn't match an input
# structure is if there's a threshold cutoff and this
# structure didn't make it.
assert args.threshold < 1, "No indices but should have matched everything!"
continue
bond_indices = _get_match_bond_indices(pat, structure, atom_indices)
subgraph = Subgraph(atom_indices, bond_indices)
if atom_class_tag:
restore_isotopes(structure)
outfile.write(make_structure_format(args.output_format, mcs, structure, subgraph, args))
if not args.output_all:
break
if args.output:
outfile.close()
if args.times or args.verbosity:
print >> sys.stderr, msg_format % times
if __name__ == "__main__":
import argparse
main(sys.argv[1:])
| {
"content_hash": "5787c4c86841106d868ef407a155465e",
"timestamp": "",
"source": "github",
"line_count": 2765,
"max_line_length": 134,
"avg_line_length": 37.65461121157324,
"alnum_prop": 0.6784613168131394,
"repo_name": "jandom/rdkit",
"id": "e1a7b15001761571cd77622b11c2aa0c63e81bdb",
"size": "105694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdkit/Chem/fmcs/fmcs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "385"
},
{
"name": "C",
"bytes": "226290"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "7893576"
},
{
"name": "CMake",
"bytes": "611439"
},
{
"name": "CSS",
"bytes": "3231"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "HTML",
"bytes": "63047"
},
{
"name": "Java",
"bytes": "291815"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "LLVM",
"bytes": "29594"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "15435"
},
{
"name": "Objective-C",
"bytes": "298"
},
{
"name": "Python",
"bytes": "3119784"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "Shell",
"bytes": "12651"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "49429"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="x", parent_name="layout.scene.camera.up", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "camera"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "e379e4a7a8ed1f6513225c9023482f61",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 88,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.6,
"repo_name": "plotly/python-api",
"id": "5a5b79998ed7e7542d4a8d3ed7169d4088a11b64",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/scene/camera/up/_x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import os, sys, hashlib
# Reproduce this output with slashes consistent for Windows systems
#ba2812a436909554688154be461d976c A\SEC575-Clown-Chat\nvram
# Optimized for low-memory systems, read whole file with blocksize=0
def md5sum(filename, blocksize=65536):
hash = hashlib.md5()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(blocksize), ""):
hash.update(block)
return hash.hexdigest()
def usage():
print "Usage: md5deep.py [OPTIONS] [FILES]"
print "-r - recursive mode, all subdirectories are traversed."
print "-X <file> - enables negative matching mode."
print "-f - speed up hash calculations, using more memory."
print "-0 - Uses a NULL character (/0) to terminate each line instead of a newline. Useful for processing filenames with strange characters."
def validate_hashes(hashfile, hashlist):
# Open file and build a new hashlist
hashlistrec = []
with open(hashfile, "r") as f:
for line in f:
filehash,filename = line.rstrip().split(" ")
# Convert to platform covention directory separators
filename = normfname(filename)
# Add entry to hashlistrec
hashlistrec.append((filename, filehash))
for diff in list(set(hashlistrec) - set(hashlist)):
# Replicate "-n" md5deep functionality; print only the filename
# if the file is missing in the filename list; print the hash
# of the current file if it is different from the negative match
# file.
if (not os.path.isfile(diff[0])):
# File from negative match list is missing, just print filename
print winfname(diff[0])
else:
print diff[0] + " " + winfname(diff[1])
# Produce a Windows-style filename
def winfname(filename):
return filename.replace("/","\\")
# Normalize filename based on platform
def normfname(filename):
if os.name == 'nt': # Windows
return filename.replace("/", "\\")
else:
return filename.replace("\\","/")
if __name__ == '__main__':
opt_recursive = None
opt_negmatch = None
opt_fast = None
opt_null = None
opt_files = []
if len(sys.argv) == 1:
usage()
sys.exit(0)
args = sys.argv[1:]
it = iter(args)
for i in it:
if i == '-r':
opt_recursive = True
continue
elif i == '-0':
opt_null = True
continue
elif i == '-f':
opt_fast = True
elif i == '-X':
opt_negmatch = next(it)
if not os.path.isfile(opt_negmatch):
sys.stdout.write("Cannot open negative match file %s\n"%opt_negmatch)
sys.exit(-1)
continue
else:
opt_files.append(i)
if opt_fast:
md5blocklen=0
else:
# Default to optimize for low-memory systems
md5blocklen=65536
# Build a list of (hash,filename) for each file, regardless of specified
# options
hashlist = []
# Hash files in the current directory
for f in opt_files:
if os.path.isfile(f):
hashlist.append((f, md5sum(f, md5blocklen)))
# Walk all subdirectories
if opt_recursive:
for start in sys.argv[1:]:
for (directory, _, files) in os.walk(start):
for f in files:
path = os.path.join(directory, f)
hashlist.append((path, md5sum(path, md5blocklen)))
# With the hashlist built, compare to the negative match list, or print
# the results.
if opt_negmatch:
validate_hashes(opt_negmatch, hashlist)
else:
# Just print out the list with Windows-syle filenames
for hash in hashlist:
if opt_null:
print "%s %s\0"%(hash[1],winfname(hash[0]))
else:
print "%s %s"%(hash[1],winfname(hash[0]))
| {
"content_hash": "c27c026bfd97e6982b7db8e2d8daf03d",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 152,
"avg_line_length": 33.88135593220339,
"alnum_prop": 0.5795397698849425,
"repo_name": "joswr1ght/md5deep",
"id": "75051fb2331190beaf5c1cd8670161962153e9d7",
"size": "4121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "md5deep.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4121"
}
],
"symlink_target": ""
} |
import re
from utils.cache import cache
@cache(use_mem=True,use_file=True)
def text_to_segment_list(text):
'''
分词 将文本转换成单词list
:param text:
:return:
'''
return re.split('[^a-z]+', text.lower()) if text else []
| {
"content_hash": "a774e5300242f6f5b1d9b2ae7d57912b",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 60,
"avg_line_length": 21.363636363636363,
"alnum_prop": 0.6212765957446809,
"repo_name": "DannyLee1991/article_cosine_similarity",
"id": "e33712a831eaca91e802cb2dd4e46c6ff9a7c1d2",
"size": "255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/segment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12528"
}
],
"symlink_target": ""
} |
"""Moira lists app"""
from django.apps import AppConfig
class MoiraListsConfig(AppConfig):
"""MoiraLists AppConfig"""
name = "moira_lists"
| {
"content_hash": "fe9230f0493bbd2a9a792dadffdedc0b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 34,
"avg_line_length": 18.75,
"alnum_prop": 0.7,
"repo_name": "mitodl/open-discussions",
"id": "f0bf80700877eda481c183d58c3d91fe114a1fdc",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moira_lists/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "78316"
},
{
"name": "JavaScript",
"bytes": "1704037"
},
{
"name": "Procfile",
"bytes": "675"
},
{
"name": "Python",
"bytes": "2264549"
},
{
"name": "SCSS",
"bytes": "133442"
},
{
"name": "Shell",
"bytes": "11787"
},
{
"name": "TypeScript",
"bytes": "307134"
}
],
"symlink_target": ""
} |
from awsutils.exceptions.aws import AWSException
class SDBException(AWSException):
pass
class AccessFailure(SDBException):
#Access to the resource is denied
HTTP_STATUS = 403
class AttributeDoesNotExist(SDBException):
#Attribute does not exist
HTTP_STATUS = 404
class AuthFailure(SDBException):
#AWS was not able to validate the provided access credentials.
HTTP_STATUS = 403
class AuthMissingFailure(SDBException):
#AWS was not able to authenticate the request: access credentials are missing.
HTTP_STATUS = 403
class ConditionalCheckFailed(SDBException):
#Conditional check failed.
# Attribute (" + name + ") value exists. OR
# Attribute ("+ name +") value is ("+ value +") but was expected ("+ expValue +")
HTTP_STATUS = 409
class ExistsAndExpectedValue(SDBException):
#Expected.Exists=false and Expected.Value cannot be specified together
HTTP_STATUS = 400
class FeatureDeprecated(SDBException):
#The replace flag must be specified per attribute, not per item.
HTTP_STATUS = 400
class IncompleteExpectedExpression(SDBException):
#If Expected.Exists=true or unspecified, then Expected.Value has to be specified
HTTP_STATUS = 400
class InternalError(SDBException):
#Request could not be executed due to an internal service error.
HTTP_STATUS = 500
class InvalidAction(SDBException):
#The action " + actionName + " is not valid for this web service.
HTTP_STATUS = 400
class InvalidHTTPAuthHeader(SDBException):
#The HTTP authorization header is bad, use " + correctFormat".
HTTP_STATUS = 400
class InvalidHttpRequest(SDBException):
#The HTTP request is invalid. Reason: " + reason".
HTTP_STATUS = 400
class InvalidLiteral(SDBException):
#Illegal literal in the filter expression.
HTTP_STATUS = 400
class InvalidNextToken(SDBException):
#The specified next token is not valid.
HTTP_STATUS = 400
class InvalidNumberPredicates(SDBException):
#Too many predicates in the query expression.
HTTP_STATUS = 400
class InvalidNumberValueTests(SDBException):
#Too many value tests per predicate in the query expression.
HTTP_STATUS = 400
class InvalidParameterCombination(SDBException):
#The parameter " + param1 + " cannot be used with the parameter " + param2".
HTTP_STATUS = 400
class InvalidParameterValue(SDBException):
#Value (" + value + ") for parameter MaxNumberOfDomains is invalid.
# MaxNumberOfDomains must be between 1 and 100. See "Amazon SimpleDB Error Codes"
HTTP_STATUS = 400
class InvalidQueryExpression(SDBException):
#The specified query expression syntax is not valid.
HTTP_STATUS = 400
class InvalidResponseGroups(SDBException):
#The following response groups are invalid: " + invalidRGStr.
HTTP_STATUS = 400
class InvalidService(SDBException):
#The Web Service " + serviceName + " does not exist.
HTTP_STATUS = 400
class InvalidSortExpression(SDBException):
#The sort attribute must be present in at least one of the predicates,
# and the predicate cannot contain the is null operator.
HTTP_STATUS = 400
class InvalidURI(SDBException):
#The URI " + requestURI + " is not valid.
HTTP_STATUS = 400
class InvalidWSAddressingProperty(SDBException):
#WS-Addressing parameter " + paramName + " has a wrong value: " + paramValue".
HTTP_STATUS = 400
class InvalidWSDLVersion(SDBException):
#Parameter (" + parameterName +") is only supported in WSDL version 2009-04-15 or beyond.
# Please upgrade to new version
HTTP_STATUS = 400
class MissingAction(SDBException):
#No action was supplied with this request.
HTTP_STATUS = 400
class MissingParameter(SDBException):
#The request must contain the specified missing parameter.
HTTP_STATUS = 400
class MissingWSAddressingProperty(SDBException):
#WS-Addressing is missing a required parameter (" + paramName + ")".
HTTP_STATUS = 400
class MultipleExistsConditions(SDBException):
#Only one Exists condition can be specified
HTTP_STATUS = 400
class MultipleExpectedNames(SDBException):
#Only one Expected.Name can be specified
HTTP_STATUS = 400
class MultipleExpectedValues(SDBException):
#Only one Expected.Value can be specified
HTTP_STATUS = 400
class MultiValuedAttribute(SDBException):
#Attribute (" + name + ") is multi-valued.
# Conditional check can only be performed on a single-valued attribute
HTTP_STATUS = 409
class NoSuchDomain(SDBException):
#The specified domain does not exist.
HTTP_STATUS = 400
class NoSuchVersion(SDBException):
#The requested version (" + version + ") of service " + service + " does not exist.
HTTP_STATUS = 400
class NotYetImplemented(SDBException):
#Feature " + feature + " is not yet available".
HTTP_STATUS = 401
class NumberDomainsExceeded(SDBException):
#The domain limit was exceeded.
HTTP_STATUS = 409
class NumberDomainAttributesExceeded(SDBException):
#Too many attributes in this domain.
HTTP_STATUS = 409
class NumberDomainBytesExceeded(SDBException):
#Too many bytes in this domain.
HTTP_STATUS = 409
class NumberItemAttributesExceeded(SDBException):
#Too many attributes in this item.
HTTP_STATUS = 409
class NumberSubmittedAttributesExceeded(SDBException):
#Too many attributes in a single call.
HTTP_STATUS = 409
class NumberSubmittedAttributesExceeded(SDBException):
#Too many attributes for item itemName in a single call.
# Up to 256 attributes per call allowed.
HTTP_STATUS = 409
class NumberSubmittedItemsExceeded(SDBException):
#Too many items in a single call. Up to 25 items per call allowed.
HTTP_STATUS = 409
class RequestExpired(SDBException):
#Request has expired. " + paramType + " date is " + date".
HTTP_STATUS = 400
class QueryTimeout(SDBException):
#A timeout occurred when attempting to query domain <domain name>
# with query expression <query expression>. BoxUsage [<box usage value>]".
HTTP_STATUS = 408
class ServiceUnavailable(SDBException):
#Service Amazon SimpleDB is busy handling other requests, likely due to too many simultaneous requests.
# Consider reducing the frequency of your requests, and try again. See About Response Code 503.
HTTP_STATUS = 503
class TooManyRequestedAttributes(SDBException):
#Too many attributes requested.
HTTP_STATUS = 400
class UnsupportedHttpVerb(SDBException):
#The requested HTTP verb is not supported: " + verb".
HTTP_STATUS = 400
class UnsupportedNextToken(SDBException):
#The specified next token is no longer supported. Please resubmit your query.
HTTP_STATUS = 400
class URITooLong(SDBException):
#The URI exceeded the maximum limit of "+ maxLength".
HTTP_STATUS = 400
| {
"content_hash": "c2933648a86d0a5b488d204d0ccea5be",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 107,
"avg_line_length": 33.371428571428574,
"alnum_prop": 0.716466894977169,
"repo_name": "sanyi/awsutils",
"id": "26f684dacda9648ccc7dc594f2b1cb908b0ea8b8",
"size": "7213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awsutils/exceptions/sdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "185753"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
# Third Party Stuff
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('proposals', '0008_auto_20150528_2243'),
]
operations = [
migrations.CreateModel(
name='ProposalSectionReviewerVoteValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created At')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='Last Modified At')),
('vote_value', models.SmallIntegerField()),
('description', models.CharField(max_length=255)),
('created_by', models.ForeignKey(related_name='created_proposalsectionreviewervotevalue_set', verbose_name='Created By', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='updated_proposalsectionreviewervotevalue_set', verbose_name='Modified By', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='proposalsectionreviewervote',
name='vote_value',
field=models.ForeignKey(to='proposals.ProposalSectionReviewerVoteValue'),
preserve_default=True,
),
]
| {
"content_hash": "23627037688b0fc5dbaf5992151392a1",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 192,
"avg_line_length": 43.76315789473684,
"alnum_prop": 0.6259771497294047,
"repo_name": "ChillarAnand/junction",
"id": "192fbfc7fefa12d32e354684b63feb4faf489ed7",
"size": "1687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "junction/proposals/migrations/0009_auto_20150529_2216.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190835"
},
{
"name": "HTML",
"bytes": "159139"
},
{
"name": "JavaScript",
"bytes": "48999"
},
{
"name": "Python",
"bytes": "314195"
},
{
"name": "Shell",
"bytes": "599"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Personal Site'
copyright = u'2014, Brandon Waskiewicz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'personal-sitedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'personal-site.tex', u'Personal Site Documentation',
u'Brandon Waskiewicz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'personal-site', u'Personal Site Documentation',
[u'Brandon Waskiewicz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'personal-site', u'Personal Site Documentation',
u'Brandon Waskiewicz', 'Personal Site',
'My personal website.','Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote' | {
"content_hash": "69bd85224fa1771fd1473cb6f80aa0de",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 32.17467248908297,
"alnum_prop": 0.7027687296416938,
"repo_name": "brandonw/personal-site",
"id": "24950d04ae6b1a7b637c1b2b75fcab882162e618",
"size": "7764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9936"
},
{
"name": "JavaScript",
"bytes": "2325"
},
{
"name": "Python",
"bytes": "54035"
},
{
"name": "Shell",
"bytes": "5109"
}
],
"symlink_target": ""
} |
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Instruction,
OutputAction,
Match)
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
from pybvc.common.constants import (ETH_TYPE_IPv4,
IP_PROTO_UDP,
IP_DSCP_CS1,
IP_ECN_CE)
def of_demo_10():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 10 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
# --- Flow Match: Ethernet Source Address
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# IP DSCP
# IP ECN
# UDP Source Port Number
# UDP Destination Port Number
# Input Port
# NOTE: Ethernet type MUST be 2048 (0x800) -> IPv4 protocol
eth_type = ETH_TYPE_IPv4
eth_src = "00:00:00:11:23:ae"
eth_dst = "20:14:29:01:19:61"
ipv4_src = "192.1.2.3/10"
ipv4_dst = "172.168.5.6/18"
ip_proto = IP_PROTO_UDP
ip_dscp = IP_DSCP_CS1 # 'Class Selector' = 'Priority'
ip_ecn = IP_ECN_CE # Congestion Encountered
udp_src_port = 25364
udp_dst_port = 8080
input_port = 13
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Ethernet Type (%s)\n"
" Ethernet Source Address (%s)\n"
" Ethernet Destination Address (%s)\n"
" IPv4 Source Address (%s)\n"
" IPv4 Destination Address (%s)\n"
" IP Protocol Number (%s)\n"
" IP DSCP (%s)\n"
" IP ECN (%s)\n"
" UDP Source Port Number (%s)\n"
" UDP Destination Port Number (%s)\n"
" Input Port (%s)" %
(hex(eth_type), eth_src,
eth_dst, ipv4_src, ipv4_dst,
ip_proto, ip_dscp, ip_ecn,
udp_src_port, udp_dst_port,
input_port))
print (" Action: Output (NORMAL)")
time.sleep(rundelay)
flow_entry = FlowEntry()
table_id = 0
flow_entry.set_flow_table_id(table_id)
flow_id = 17
flow_entry.set_flow_id(flow_id)
flow_entry.set_flow_hard_timeout(0)
flow_entry.set_flow_idle_timeout(0)
flow_entry.set_flow_priority(flow_priority=1008)
# --- Instruction: 'Apply-actions'
# Action: 'Output' NORMAL
instruction = Instruction(instruction_order=0)
action = OutputAction(order=0, port="NORMAL")
instruction.add_apply_action(action)
flow_entry.add_instruction(instruction)
# --- Match Fields: Ethernet Type
# Ethernet Source Address
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# IP Protocol Number
# IP DSCP
# IP ECN
# UDP Source Port Number
# UDP Destination Port Number
# Input Port
match = Match()
match.set_eth_type(eth_type)
match.set_eth_src(eth_src)
match.set_eth_dst(eth_dst)
match.set_ipv4_src(ipv4_src)
match.set_ipv4_dst(ipv4_dst)
match.set_ip_proto(ip_proto)
match.set_ip_dscp(ip_dscp)
match.set_ip_ecn(ip_ecn)
match.set_udp_src_port(udp_src_port)
match.set_udp_dst_port(udp_dst_port)
match.set_in_port(in_port=input_port)
flow_entry.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Get configured flow from the Controller")
time.sleep(rundelay)
result = ofswitch.get_configured_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully read from the Controller")
print ("Flow info:")
flow = result.get_data()
print json.dumps(flow, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Delete flow with id of '%s' from the Controller's cache "
"and from the table '%s' on the '%s' node" %
(flow_id, table_id, nodeName))
time.sleep(rundelay)
result = ofswitch.delete_flow(flow_entry.get_flow_table_id(),
flow_entry.get_flow_id())
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully removed from the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_10()
| {
"content_hash": "ad07108936d3f734e32fabe1b0d87b3e",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 73,
"avg_line_length": 34.10526315789474,
"alnum_prop": 0.5026234567901234,
"repo_name": "s-garbuzov/pybvc",
"id": "d12d1c9083c27229eb57444b0483ecdb2f497439",
"size": "8054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/sampleopenflow/demos/demo10.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "2452"
},
{
"name": "Python",
"bytes": "415443"
}
],
"symlink_target": ""
} |
"""
Usage:
./rtail.py user@host:path/foo.log bar.log host2:/path/baz.log
"""
import optparse
import os
import re
import select
import subprocess
import sys
def main():
op = optparse.OptionParser()
op.add_option("-i", "--identity", dest="identity")
options, args = op.parse_args()
streams = list()
for arg in args:
if re.match(r"^(.+@)?[a-zA-Z0-9.-]+:.+", arg):
# this is a remote location
hostname, path = arg.split(":", 1)
if options.identity:
s = subprocess.Popen(["ssh", "-i", options.identity, hostname, "tail -f " + path], stdout=subprocess.PIPE)
else:
s = subprocess.Popen(["ssh", hostname, "tail -f " + path], stdout=subprocess.PIPE)
s.name = arg
streams.append(s)
else:
s = subprocess.Popen(["tail", "-f", arg], stdout=subprocess.PIPE)
s.name = arg
streams.append(s)
try:
while True:
r, _, _ = select.select(
[stream.stdout.fileno() for stream in streams], [], [])
for fileno in r:
for stream in streams:
if stream.stdout.fileno() != fileno:
continue
data = os.read(fileno, 4096)
if not data:
streams.remove(stream)
break
host = re.match(r'^(.*?)\.', stream.name)
combination_message = "[%-6s] %s" % (host.group(1)[:6], data)
sys.stdout.write(combination_message)
break
except KeyboardInterrupt:
print(" --- End of Logging ---")
if __name__ == "__main__":
main()
| {
"content_hash": "24913374f7feac31e3dfcff1f8f0dc4a",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 122,
"avg_line_length": 31.836363636363636,
"alnum_prop": 0.48715019988577957,
"repo_name": "samuelclay/NewsBlur",
"id": "c2f5854b1aba0f3fb227a326fe4d2c43788cbc90",
"size": "1774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/rtail.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "454"
},
{
"name": "CSS",
"bytes": "776813"
},
{
"name": "CoffeeScript",
"bytes": "13093"
},
{
"name": "Dockerfile",
"bytes": "3704"
},
{
"name": "HCL",
"bytes": "29303"
},
{
"name": "HTML",
"bytes": "1921563"
},
{
"name": "Java",
"bytes": "853216"
},
{
"name": "JavaScript",
"bytes": "1803770"
},
{
"name": "Jinja",
"bytes": "89121"
},
{
"name": "Kotlin",
"bytes": "298281"
},
{
"name": "Makefile",
"bytes": "8909"
},
{
"name": "Objective-C",
"bytes": "2565934"
},
{
"name": "Perl",
"bytes": "55606"
},
{
"name": "Python",
"bytes": "2067295"
},
{
"name": "R",
"bytes": "527"
},
{
"name": "Ruby",
"bytes": "2094"
},
{
"name": "SCSS",
"bytes": "47069"
},
{
"name": "Shell",
"bytes": "51526"
},
{
"name": "Swift",
"bytes": "136021"
}
],
"symlink_target": ""
} |
"""
@name: PyHouse/src/Certificates/newcert.py
@author: D. Brian Kimmel
@contact: d.briankimmel@gmail.com
@copyright: (c) 2015-2019 by D. Brian Kimmel
@note: Created on Feb 24, 2015
@license: MIT License
@summary:
For posterity, you'll first need to generate a few client certificates all signed by the same CA.
You've probably already done this, but so others can understand the answer and try it out on their own
(and so I could _test my answer myself ;-)), they'll need some code like this:
With this program, you can create a few certificates like so:
$ python newcert.py a
$ python newcert.py b
"""
from twisted.python.filepath import FilePath
from twisted.internet.ssl import PrivateCertificate, KeyPair, DN
def getCAPrivateCert():
l_privatePath = FilePath(b"ca-private-cert.pem")
if l_privatePath.exists():
return PrivateCertificate.loadPEM(l_privatePath.getContent())
else:
l_caKey = KeyPair.generate(size=4096)
l_caCert = l_caKey.selfSignedCert(1, CN="the-authority")
l_privatePath.setContent(l_caCert.dumpPEM())
return l_caCert
def clientCertFor(p_name):
l_signingCert = getCAPrivateCert()
l_clientKey = KeyPair.generate(size=4096)
l_csr = l_clientKey.requestObject(DN(CN=p_name), "sha1")
l_clientCert = l_signingCert.signRequestObject(
l_csr, serialNumber=1, digestAlgorithm="sha1")
return PrivateCertificate.fromCertificateAndKeyPair(l_clientCert, l_clientKey)
if __name__ == '__main__':
import sys
l_name = sys.argv[1]
l_pem = clientCertFor(l_name.encode("utf-8")).dumpPEM()
FilePath(l_name.encode("utf-8") + b".client.private.pem").setContent(l_pem)
# ## END DBK
| {
"content_hash": "fa7ca875a5f7e363945bfdc58ececcb9",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 102,
"avg_line_length": 34.3,
"alnum_prop": 0.6997084548104956,
"repo_name": "DBrianKimmel/PyHouse",
"id": "dc4bf6be7d06649fb817c41df1487ef05d6e8e48",
"size": "1715",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Project/src/Certificates/newcert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "114778"
},
{
"name": "HTML",
"bytes": "15398"
},
{
"name": "JavaScript",
"bytes": "220171"
},
{
"name": "Python",
"bytes": "1491784"
},
{
"name": "Shell",
"bytes": "2131"
}
],
"symlink_target": ""
} |
import unicodecsv
from kvtags.models import *
def import_tags_csv(csv_file):
"""Imports tags from a csv file to the database.
A file instance must be provided as an argument.
File must be opened beforehand.
The first row is for tag key.
The second row is for keys of key-value pairs
Subsequent rows are values of key-value pairs, one row for each tag instance.
Example:
color
h,s,v,hex
0,100,50,#7F0000
30,100,50,#7F3F00
60,100,50,#7F7F00
:param csv_file: opened csv file instance
"""
reader = unicodecsv.reader(csv_file, encoding='utf-8')
tag_key = reader.next()[0]
keys = reader.next()
for row in reader:
tag = Tag.objects.create(key=tag_key)
for index, value in enumerate(row):
tag.add_kv(key=keys[index], value=value)
| {
"content_hash": "4f9abd768a50ecf676a21ef179e5b348",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 25.9375,
"alnum_prop": 0.653012048192771,
"repo_name": "ozen/django-kvtags",
"id": "388d6186477c378448834fbafac13ba65c61d5ae",
"size": "830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kvtags/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "860"
},
{
"name": "Python",
"bytes": "30283"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.25
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1PodAffinityTerm(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'label_selector': 'V1LabelSelector',
'namespace_selector': 'V1LabelSelector',
'namespaces': 'list[str]',
'topology_key': 'str'
}
attribute_map = {
'label_selector': 'labelSelector',
'namespace_selector': 'namespaceSelector',
'namespaces': 'namespaces',
'topology_key': 'topologyKey'
}
def __init__(self, label_selector=None, namespace_selector=None, namespaces=None, topology_key=None, local_vars_configuration=None): # noqa: E501
"""V1PodAffinityTerm - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._label_selector = None
self._namespace_selector = None
self._namespaces = None
self._topology_key = None
self.discriminator = None
if label_selector is not None:
self.label_selector = label_selector
if namespace_selector is not None:
self.namespace_selector = namespace_selector
if namespaces is not None:
self.namespaces = namespaces
self.topology_key = topology_key
@property
def label_selector(self):
"""Gets the label_selector of this V1PodAffinityTerm. # noqa: E501
:return: The label_selector of this V1PodAffinityTerm. # noqa: E501
:rtype: V1LabelSelector
"""
return self._label_selector
@label_selector.setter
def label_selector(self, label_selector):
"""Sets the label_selector of this V1PodAffinityTerm.
:param label_selector: The label_selector of this V1PodAffinityTerm. # noqa: E501
:type: V1LabelSelector
"""
self._label_selector = label_selector
@property
def namespace_selector(self):
"""Gets the namespace_selector of this V1PodAffinityTerm. # noqa: E501
:return: The namespace_selector of this V1PodAffinityTerm. # noqa: E501
:rtype: V1LabelSelector
"""
return self._namespace_selector
@namespace_selector.setter
def namespace_selector(self, namespace_selector):
"""Sets the namespace_selector of this V1PodAffinityTerm.
:param namespace_selector: The namespace_selector of this V1PodAffinityTerm. # noqa: E501
:type: V1LabelSelector
"""
self._namespace_selector = namespace_selector
@property
def namespaces(self):
"""Gets the namespaces of this V1PodAffinityTerm. # noqa: E501
namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\". # noqa: E501
:return: The namespaces of this V1PodAffinityTerm. # noqa: E501
:rtype: list[str]
"""
return self._namespaces
@namespaces.setter
def namespaces(self, namespaces):
"""Sets the namespaces of this V1PodAffinityTerm.
namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\". # noqa: E501
:param namespaces: The namespaces of this V1PodAffinityTerm. # noqa: E501
:type: list[str]
"""
self._namespaces = namespaces
@property
def topology_key(self):
"""Gets the topology_key of this V1PodAffinityTerm. # noqa: E501
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. # noqa: E501
:return: The topology_key of this V1PodAffinityTerm. # noqa: E501
:rtype: str
"""
return self._topology_key
@topology_key.setter
def topology_key(self, topology_key):
"""Sets the topology_key of this V1PodAffinityTerm.
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. # noqa: E501
:param topology_key: The topology_key of this V1PodAffinityTerm. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and topology_key is None: # noqa: E501
raise ValueError("Invalid value for `topology_key`, must not be `None`") # noqa: E501
self._topology_key = topology_key
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodAffinityTerm):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodAffinityTerm):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "105a603e1a947cf63707ee49c3701c20",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 366,
"avg_line_length": 37.00995024875622,
"alnum_prop": 0.6361070036295201,
"repo_name": "kubernetes-client/python",
"id": "0e141795f91a5e2f71e50ed4b551e7d8018667a4",
"size": "7456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_pod_affinity_term.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "Python",
"bytes": "11454299"
},
{
"name": "Shell",
"bytes": "43108"
}
],
"symlink_target": ""
} |
"""
Created on Wed Dec 9 21:31:53 2015
Create random synthetic velocity profile + linear first guesses
@author: alex
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
plt.close('all')
random.seed(2)
X = range(256)
Z = np.linspace(0,1000,256)
pos = 50
posVpVs = 1.7
L = np.array([pos])
vpvs = np.array([posVpVs])
for x in X[1:]:
pos += random.choice((-0.9,1)) #random.choice((-1,1))
posVpVs += random.choice((-0.02,0.02))
L=np.append(L,pos)
vpvs = np.append(vpvs,posVpVs)
L=70*L
Vp = savitzky_golay(L, 51, 3) # window size 51, polynomial order 3
A = np.array([ Z, np.ones(256)])
Vs = Vp/savitzky_golay(vpvs, 51, 3) # window size 51, polynomial order 3
w = np.linalg.lstsq(A.T,Vp)[0] # obtaining the parameters
# plotting the line
lineP = w[0]*Z+w[1]+500 # regression line
w = np.linalg.lstsq(A.T,Vs)[0] # obtaining the parameters
# plotting the line
lineS = w[0]*Z+w[1]-250 # regression line
plt.figure()
plt.hold(True)
plt.plot(L,Z,label="Random walk")
plt.plot(Vp,Z,linewidth=4,label="P wave velocity from this random walk")
plt.plot(lineP,Z,linewidth=4,label="First guess")
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
plt.legend()
plt.figure()
plt.hold(True)
plt.plot(vpvs,Z,linewidth=4,label="Random walk vp/vs")
plt.legend()
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
plt.figure()
plt.hold(True)
plt.plot(Vs,Z,linewidth=4,label="S wave velocity from random vp/vs")
plt.plot(lineS,Z,linewidth=4,label="First guess")
plt.legend()
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
# Save profiles
np.savetxt("dataExample/realProfileP.txt",np.dstack((Z,Vp))[0])
np.savetxt("dataExample/realProfileS.txt",np.dstack((Z,Vs))[0])
np.savetxt("dataExample/firstGuessP.txt",np.dstack((Z,lineP))[0])
np.savetxt("dataExample/firstGuessS.txt",np.dstack((Z,lineS))[0])
#####################################################################
coordShotsX=[300,500]
coordShotsY=[400]
coordShotsZ=[650]
coordStatsX=[200,300,400,500,600]
coordStatsY=[200,300,400,500,600]
coordStatsZ=[200,300,400,500,600]
Xshots=[]
Yshots=[]
Zshots=[]
Xstats=[]
Ystats=[]
Zstats=[]
#Open a file in write mode:
fo = open("dataExample/coordShots.txt", "w+")
for coordX in coordShotsX:
for coordY in coordShotsY:
for coordZ in coordShotsZ:
Xshots.append(coordX)
Yshots.append(coordY)
Zshots.append(coordZ)
fo.write(str(coordX)+" "+str(coordY)+" "+str(coordZ)+"\n")
# Close opened file
fo.close()
#Open a file in write mode:
fo = open("dataExample/coordStats.txt", "w+")
for coordX in coordStatsX:
for coordY in coordStatsY:
for coordZ in coordStatsZ:
Xstats.append(coordX)
Ystats.append(coordY)
Zstats.append(coordZ)
fo.write(str(coordX)+" "+str(coordY)+" "+str(coordZ)+"\n")
# Close opened file
fo.close()
fig = plt.figure()
ax = fig.gca(projection='3d') #Axes3D(fig)
ax.hold(True)
ax.scatter(Xstats,Ystats,Zstats,zdir='z',s=20,c='b')
if (len(coordShotsX) > 3):
ax.scatter(Xshots,Yshots,Zshots,zdir='z',s=20,c='r',marker='^')
else:
ax.scatter(Xshots,Yshots,Zshots,zdir='z',s=200,c='r',marker='^')
ax.set_xlim3d(min(min(Xshots),min(Xstats))-100,max(max(Xshots),max(Xstats))+100)
ax.set_ylim3d(min(min(Yshots),min(Ystats))-100,max(max(Yshots),max(Ystats))+100)
ax.set_zlim3d(min(min(Zshots),min(Zstats))-100,max(max(Zshots),max(Zstats))+100)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
ax.set_zlabel('Z (m)')
ax.set_title('Geometry')
ax.invert_zaxis()
| {
"content_hash": "cb2b0d90204d33be0c43ce618f160a37",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 89,
"avg_line_length": 33.12807881773399,
"alnum_prop": 0.6511524163568774,
"repo_name": "bottero/IMCMCrun",
"id": "038be167da31531251f298959cfc70941f40266f",
"size": "6749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/Synthetic1/createSyntheticRandomProfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "294"
},
{
"name": "C",
"bytes": "1277"
},
{
"name": "C++",
"bytes": "522432"
},
{
"name": "Fortran",
"bytes": "27087"
},
{
"name": "Makefile",
"bytes": "6908"
},
{
"name": "Python",
"bytes": "85900"
},
{
"name": "Shell",
"bytes": "1173"
}
],
"symlink_target": ""
} |
import io
import os
from absl.testing import absltest
from absl.testing import flagsaver
import requests
from grr_api_client import errors as api_errors
import grr_colab
from grr_colab import _api
from grr_colab import errors
from grr_colab import testing
from grr_colab import vfs
from grr_response_core.lib.util import temp
from grr_response_proto import jobs_pb2
from grr_response_server import data_store
class VfsTest(testing.ColabE2ETest):
FAKE_CLIENT_ID = 'C.0123456789abcdef'
@testing.with_approval_checks
def testOpen_WithoutApproval(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
vfs_obj = vfs.VFS(self._get_fake_api_client(), jobs_pb2.PathSpec.OS)
with self.assertRaises(errors.ApprovalMissingError) as context:
vfs_obj.open('/foo/bar')
self.assertEqual(context.exception.client_id, VfsTest.FAKE_CLIENT_ID)
def testOpen_DoesNotExist(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
vfs_obj = vfs.VFS(self._get_fake_api_client(), jobs_pb2.PathSpec.OS)
with self.assertRaises(api_errors.ResourceNotFoundError):
vfs_obj.open('/foo/bar')
def testOpen_NotCollected(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
content = b'foo bar'
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempFilePath() as temp_filepath:
with io.open(temp_filepath, 'wb') as filedesc:
filedesc.write(content)
client.ls(os.path.dirname(temp_filepath))
with self.assertRaises(api_errors.ResourceNotFoundError):
vfs_obj.open(temp_filepath)
def testOpen_ReadAll(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
content = b'foo bar'
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempFilePath() as temp_filepath:
with io.open(temp_filepath, 'wb') as filedesc:
filedesc.write(content)
with client.open(temp_filepath):
pass
with vfs_obj.open(temp_filepath) as filedesc:
self.assertEqual(filedesc.read(), content)
def testOpen_ReadMore(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
content = b'foo bar'
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempFilePath() as temp_filepath:
with io.open(temp_filepath, 'wb') as filedesc:
filedesc.write(content)
with client.open(temp_filepath):
pass
with vfs_obj.open(temp_filepath) as filedesc:
self.assertEqual(filedesc.read(10), content)
def testOpen_ReadLess(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
content = b'foo bar'
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempFilePath() as temp_filepath:
with io.open(temp_filepath, 'wb') as filedesc:
filedesc.write(content)
with client.open(temp_filepath):
pass
with vfs_obj.open(temp_filepath) as filedesc:
self.assertEqual(filedesc.read(3), b'foo')
def testOpen_Buffering(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
size = 1024 * 1024
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempFilePath() as temp_filepath:
with io.open(temp_filepath, 'wb') as filedesc:
filedesc.write(b'a' * size)
with client.open(temp_filepath):
pass
with vfs_obj.open(temp_filepath) as filedesc:
self.assertEqual(filedesc.tell(), 0)
self.assertLess(len(filedesc.read1()), size)
self.assertGreater(filedesc.tell(), 0)
def testOpen_ReadLargeFile(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
size = 1024 * 1024
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempFilePath() as temp_filepath:
with io.open(temp_filepath, 'wb') as filedesc:
filedesc.write(b'a' * size)
with client.open(temp_filepath):
pass
with vfs_obj.open(temp_filepath) as filedesc:
self.assertEqual(len(filedesc.read()), size)
def testOpen_SeekWithinOneBuffer(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
content = b'foo bar'
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempFilePath() as temp_filepath:
with io.open(temp_filepath, 'wb') as filedesc:
filedesc.write(content)
with client.open(temp_filepath):
pass
with vfs_obj.open(temp_filepath) as filedesc:
filedesc.read(1)
self.assertEqual(filedesc.seek(4), 4)
self.assertEqual(filedesc.read(), b'bar')
def testOpen_SeekOutOfBuffer(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
size = 1024 * 512
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempFilePath() as temp_filepath:
with io.open(temp_filepath, 'wb') as filedesc:
filedesc.write(b'a' * size)
filedesc.write(b'b' * size)
with client.open(temp_filepath):
pass
with vfs_obj.open(temp_filepath) as filedesc:
self.assertEqual(filedesc.seek(size - 1), size - 1)
self.assertEqual(filedesc.read(2), b'ab')
@testing.with_approval_checks
def testLs_WithoutApproval(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
vfs_obj = vfs.VFS(self._get_fake_api_client(), jobs_pb2.PathSpec.OS)
with self.assertRaises(errors.ApprovalMissingError) as context:
vfs_obj.ls('/foo/bar')
self.assertEqual(context.exception.client_id, VfsTest.FAKE_CLIENT_ID)
def testLs_DoesNotExist(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
vfs_obj = vfs.VFS(self._get_fake_api_client(), jobs_pb2.PathSpec.OS)
with self.assertRaises(api_errors.ResourceNotFoundError):
vfs_obj.ls('/foo/bar')
def testLs_ContainsFiles(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
dir_nodes = [
# name, content
('file1', b'foo'),
('file2', b'foo\nbar'),
]
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
for filename, file_content in dir_nodes:
filepath = os.path.join(temp_dirpath, filename)
with io.open(filepath, 'wb') as filedesc:
filedesc.write(file_content)
client.ls(temp_dirpath)
stat_entries = vfs_obj.ls(temp_dirpath)
stat_entries = sorted(stat_entries, key=lambda _: _.pathspec.path)
self.assertLen(stat_entries, 2)
self.assertEqual(stat_entries[0].pathspec.path,
os.path.join(temp_dirpath, 'file1'))
self.assertEqual(stat_entries[0].st_size, 3)
self.assertEqual(stat_entries[1].pathspec.path,
os.path.join(temp_dirpath, 'file2'))
self.assertEqual(stat_entries[1].st_size, 7)
def testLs_EmptyDirectory(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
client.ls(temp_dirpath)
stat_entries = vfs_obj.ls(temp_dirpath)
self.assertEmpty(stat_entries)
def testLs_NotDirectory(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempFilePath() as temp_file:
client.glob(temp_file)
with self.assertRaises(errors.NotDirectoryError):
vfs_obj.ls(temp_file)
def testLs_Recursive(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
dir_nodes = [
'file0',
os.path.join('dir1', 'file1'),
os.path.join('dir2', 'file2'),
os.path.join('dir2', 'file3'),
]
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
os.mkdir(os.path.join(temp_dirpath, 'dir1'))
os.mkdir(os.path.join(temp_dirpath, 'dir2'))
for path in dir_nodes:
with io.open(os.path.join(temp_dirpath, path), 'wb') as filedesc:
filedesc.write(b'foo')
client.ls(temp_dirpath, max_depth=10)
stat_entries = vfs_obj.ls(temp_dirpath, max_depth=5)
paths = sorted(_.pathspec.path for _ in stat_entries)
self.assertLen(paths, 6)
self.assertEqual(paths[0], os.path.join(temp_dirpath, 'dir1'))
self.assertEqual(paths[1], os.path.join(temp_dirpath, 'dir1', 'file1'))
self.assertEqual(paths[2], os.path.join(temp_dirpath, 'dir2'))
self.assertEqual(paths[3], os.path.join(temp_dirpath, 'dir2', 'file2'))
self.assertEqual(paths[4], os.path.join(temp_dirpath, 'dir2', 'file3'))
self.assertEqual(paths[5], os.path.join(temp_dirpath, 'file0'))
def testLs_MaxDepth(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
dir_components = ['dir1', 'dir2', 'dir3', 'dir4', 'dir5']
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
os.makedirs(os.path.join(temp_dirpath, *dir_components))
client.ls(temp_dirpath, max_depth=10)
stat_entries = vfs_obj.ls(temp_dirpath, max_depth=3)
paths = sorted(_.pathspec.path for _ in stat_entries)
self.assertLen(paths, 3)
self.assertEqual(paths[0], os.path.join(temp_dirpath, 'dir1'))
self.assertEqual(paths[1], os.path.join(temp_dirpath, 'dir1', 'dir2'))
self.assertEqual(paths[2],
os.path.join(temp_dirpath, 'dir1', 'dir2', 'dir3'))
@testing.with_approval_checks
def testRefresh_WithoutApproval(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
vfs_obj = vfs.VFS(self._get_fake_api_client(), jobs_pb2.PathSpec.OS)
with self.assertRaises(errors.ApprovalMissingError) as context:
vfs_obj.refresh('/foo/bar')
self.assertEqual(context.exception.client_id, VfsTest.FAKE_CLIENT_ID)
def testRefresh_DoesNotExist(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
vfs_obj = vfs.VFS(self._get_fake_api_client(), jobs_pb2.PathSpec.OS)
with self.assertRaises(api_errors.ResourceNotFoundError):
vfs_obj.refresh('/foo/bar')
def testRefresh_Plain(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
os.mkdir(os.path.join(temp_dirpath, 'dir1'))
client.ls(temp_dirpath)
stat_entries = vfs_obj.ls(temp_dirpath)
self.assertLen(stat_entries, 1)
self.assertEqual(stat_entries[0].pathspec.path,
os.path.join(temp_dirpath, 'dir1'))
os.mkdir(os.path.join(temp_dirpath, 'dir2'))
vfs_obj.refresh(temp_dirpath)
stat_entries = vfs_obj.ls(temp_dirpath)
paths = sorted(_.pathspec.path for _ in stat_entries)
self.assertLen(paths, 2)
self.assertEqual(paths[0], os.path.join(temp_dirpath, 'dir1'))
self.assertEqual(paths[1], os.path.join(temp_dirpath, 'dir2'))
def testRefresh_Recursive(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
dir_components = ['dir1', 'dir2', 'dir3', 'dir4', 'dir5']
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
os.makedirs(os.path.join(temp_dirpath, dir_components[0]))
client.ls(temp_dirpath)
os.makedirs(os.path.join(temp_dirpath, *dir_components))
vfs_obj.refresh(temp_dirpath, max_depth=3)
stat_entries = vfs_obj.ls(temp_dirpath, max_depth=10)
paths = sorted(_.pathspec.path for _ in stat_entries)
self.assertLen(paths, 3)
self.assertEqual(paths[0], os.path.join(temp_dirpath, 'dir1'))
self.assertEqual(paths[1], os.path.join(temp_dirpath, 'dir1', 'dir2'))
self.assertEqual(paths[2],
os.path.join(temp_dirpath, 'dir1', 'dir2', 'dir3'))
@testing.with_approval_checks
def testWget_WithoutApproval(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
vfs_obj = vfs.VFS(self._get_fake_api_client(), jobs_pb2.PathSpec.OS)
with flagsaver.flagsaver(grr_admin_ui_url=self.endpoint):
with self.assertRaises(errors.ApprovalMissingError) as context:
vfs_obj.wget('/foo/bar')
self.assertEqual(context.exception.client_id, VfsTest.FAKE_CLIENT_ID)
def testWget_NoAdminURLSpecified(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with flagsaver.flagsaver(grr_admin_ui_url=''):
with temp.AutoTempFilePath() as temp_file:
with io.open(temp_file, 'wb') as filedesc:
filedesc.write(b'foo bar')
with client.open(temp_file):
pass
with self.assertRaises(ValueError):
vfs_obj.wget(temp_file)
def testWget_FileDoesNotExist(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
vfs_obj = vfs.VFS(self._get_fake_api_client(), jobs_pb2.PathSpec.OS)
with flagsaver.flagsaver(grr_admin_ui_url=self.endpoint):
with self.assertRaises(Exception):
vfs_obj.wget('/non/existing/file')
def testWget_IsDirectory(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
with flagsaver.flagsaver(grr_admin_ui_url=self.endpoint):
with temp.AutoTempDirPath() as temp_dir:
client.ls(temp_dir)
with self.assertRaises(ValueError):
vfs_obj.wget(temp_dir)
def testWget_LinkWorksWithOfflineClient(self):
data_store.REL_DB.WriteClientMetadata(
client_id=VfsTest.FAKE_CLIENT_ID, fleetspeak_enabled=False)
api_client = self._get_fake_api_client()
client = grr_colab.Client(api_client)
vfs_obj = vfs.VFS(api_client, jobs_pb2.PathSpec.OS)
content = b'foo bar'
with flagsaver.flagsaver(grr_admin_ui_url=self.endpoint):
with temp.AutoTempFilePath() as temp_file:
with io.open(temp_file, 'wb') as filedesc:
filedesc.write(content)
with client.open(temp_file):
pass
link = vfs_obj.wget(temp_file)
self.assertEqual(requests.get(link).content, content)
@classmethod
def _get_fake_api_client(cls):
return _api.get().Client(cls.FAKE_CLIENT_ID).Get()
class VfsFileTest(absltest.TestCase):
def testClosed_AfterClose(self):
f = vfs.VfsFile(lambda _: iter([]))
self.assertFalse(f.closed)
f.close()
self.assertTrue(f.closed)
def testClose_DoesNotFailOnDoubleClose(self):
f = vfs.VfsFile(lambda _: iter([]))
f.close()
f.close()
def testFileno_UnsupportedOperation(self):
f = vfs.VfsFile(lambda _: iter([]))
with self.assertRaises(io.UnsupportedOperation):
f.fileno()
def testFlush_DoesNotFail(self):
f = vfs.VfsFile(lambda _: iter([]))
f.flush()
def testIsatty(self):
f = vfs.VfsFile(lambda _: iter([]))
self.assertFalse(f.isatty())
def testSeekable(self):
f = vfs.VfsFile(lambda _: iter([]))
self.assertTrue(f.seekable())
def testSeek_SetWhence(self):
f = vfs.VfsFile(lambda _: iter([b'foobar']))
f.read(5)
self.assertEqual(f.seek(2), 2)
self.assertEqual(f.read(), b'obar')
def testSeek_CurWhence(self):
f = vfs.VfsFile(lambda _: iter([b'foobar']))
f.read(5)
self.assertEqual(f.seek(-2, whence=io.SEEK_CUR), 3)
self.assertEqual(f.read(), b'bar')
def testSeek_EndWhenceUnsupportedOperation(self):
f = vfs.VfsFile(lambda _: iter([]))
with self.assertRaises(io.UnsupportedOperation):
f.seek(0, whence=io.SEEK_END)
def testSeek_ValueErrorOnFileClosed(self):
f = vfs.VfsFile(lambda _: iter([]))
f.close()
with self.assertRaises(ValueError):
f.seek(0)
def testSeek_OutOfBuffer(self):
data = [b'a', b'b', b'c', b'd', b'e']
f = vfs.VfsFile(lambda offset: iter(data[offset:]))
f.read(1)
self.assertEqual(f.seek(2, whence=io.SEEK_CUR), 3)
self.assertEqual(f.read(), b'de')
def testTell(self):
f = vfs.VfsFile(lambda _: iter([b'foo', b'bar']))
self.assertEqual(f.tell(), 0)
f.read(4)
self.assertEqual(f.tell(), 4)
f.read(1)
self.assertEqual(f.tell(), 5)
def testTell_ValueErrorOnFileClosed(self):
f = vfs.VfsFile(lambda _: iter([]))
f.close()
with self.assertRaises(ValueError):
f.tell()
def testTruncate_UnsupportedOperation(self):
f = vfs.VfsFile(lambda _: iter([]))
with self.assertRaises(io.UnsupportedOperation):
f.truncate()
def testWritable(self):
f = vfs.VfsFile(lambda _: iter([]))
self.assertFalse(f.writable())
def testWrite_UnsupportedOperation(self):
f = vfs.VfsFile(lambda _: iter([]))
with self.assertRaises(io.UnsupportedOperation):
f.write(b'foo')
def testWritelines_UnsupportedOperation(self):
f = vfs.VfsFile(lambda _: iter([]))
with self.assertRaises(io.UnsupportedOperation):
f.writelines([b'foo'])
def testDetach_UnsupportedOperation(self):
f = vfs.VfsFile(lambda _: iter([]))
with self.assertRaises(io.UnsupportedOperation):
f.detach()
def testReadable(self):
f = vfs.VfsFile(lambda _: iter([]))
self.assertTrue(f.readable())
def testRead_LessThanBuffer(self):
f = vfs.VfsFile(lambda _: iter([b'foo']))
self.assertEqual(f.read(2), b'fo')
def testRead_MoreThanBuffer(self):
f = vfs.VfsFile(lambda _: iter([b'foo', b'bar']))
self.assertEqual(f.read(4), b'foob')
def testRead_AllWithSingleBuffer(self):
f = vfs.VfsFile(lambda _: iter([b'foo']))
self.assertEqual(f.read(), b'foo')
def testRead_AllWithMultipleBuffers(self):
f = vfs.VfsFile(lambda _: iter([b'foo', b'bar']))
self.assertEqual(f.read(), b'foobar')
def testRead_ValueErrorOnFileClosed(self):
f = vfs.VfsFile(lambda _: iter([]))
f.close()
with self.assertRaises(ValueError):
f.read()
def testRead_EmptyOnEof(self):
f = vfs.VfsFile(lambda _: iter([b'foo']))
self.assertEqual(f.read(), b'foo')
self.assertEqual(f.read(), b'')
def testRead1_LessThanBuffer(self):
f = vfs.VfsFile(lambda _: iter([b'foo', b'bar', b'quux']))
self.assertEqual(f.read1(2), b'fo')
def testRead1_MoreThanBuffer(self):
f = vfs.VfsFile(lambda _: iter([b'foo', b'bar', b'quux']))
self.assertEqual(f.read1(5), b'foo')
def testRead1_ReadCachedAndPartlyNextBuffer(self):
f = vfs.VfsFile(lambda _: iter([b'foo', b'bar', b'quux']))
f.read(1)
self.assertEqual(f.read1(4), b'ooba')
def testRead1_ReadCachedAndWholeNextBuffer(self):
f = vfs.VfsFile(lambda _: iter([b'foo', b'bar', b'quux']))
f.read(1)
self.assertEqual(f.read1(), b'oobar')
def testRead1_ValueErrorOnFileClosed(self):
f = vfs.VfsFile(lambda _: iter([]))
f.close()
with self.assertRaises(ValueError):
f.read1()
def testRead1_EmptyOnEof(self):
f = vfs.VfsFile(lambda _: iter([b'foo']))
self.assertEqual(f.read1(), b'foo')
self.assertEqual(f.read1(), b'')
self.assertEqual(f.read1(), b'')
def testReadinto1_LessThanBuffer(self):
f = vfs.VfsFile(lambda _: iter([b'foo', b'bar', b'quux']))
b = bytearray(2)
self.assertEqual(f.readinto1(b), 2)
self.assertEqual(b, b'fo')
def testReadinto1_MoreThanBuffer(self):
f = vfs.VfsFile(lambda _: iter([b'foo', b'bar', b'quux']))
b = bytearray(5)
self.assertEqual(f.readinto1(b), 3)
self.assertEqual(b[:3], b'foo')
def testReadinto1_ReadCachedAndPartlyNextBuffer(self):
f = vfs.VfsFile(lambda _: iter([b'foo', b'bar', b'quux']))
f.read(1)
b = bytearray(4)
self.assertEqual(f.readinto1(b), 4)
self.assertEqual(b, b'ooba')
def testReadinto1_ReadCachedAndWholeNextBuffer(self):
f = vfs.VfsFile(lambda _: iter([b'foo', b'bar', b'quux']))
f.read(1)
b = bytearray(10)
self.assertEqual(f.readinto1(b), 5)
self.assertEqual(b[:5], b'oobar')
def testReadinto1_EmptyOnEof(self):
f = vfs.VfsFile(lambda _: iter([b'foo']))
b = bytearray(3)
self.assertEqual(f.readinto1(b), 3)
self.assertEqual(b, b'foo')
self.assertEqual(f.readinto1(b), 0)
self.assertEqual(f.readinto1(b), 0)
def testReadinto1_ValueErrorOnFileClosed(self):
f = vfs.VfsFile(lambda _: iter([]))
f.close()
with self.assertRaises(ValueError):
f.readinto1(bytearray())
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "011273364f27c83c2742f51d577f5a51",
"timestamp": "",
"source": "github",
"line_count": 737,
"max_line_length": 77,
"avg_line_length": 31.51696065128901,
"alnum_prop": 0.6654468744618564,
"repo_name": "google/grr",
"id": "7332565e2d83501d4b889bca0712a729213f5645",
"size": "23250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colab/grr_colab/vfs_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12697"
},
{
"name": "C++",
"bytes": "54814"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "366783"
},
{
"name": "JavaScript",
"bytes": "13088"
},
{
"name": "Jupyter Notebook",
"bytes": "199216"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "PowerShell",
"bytes": "531"
},
{
"name": "Python",
"bytes": "8844725"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "SCSS",
"bytes": "105120"
},
{
"name": "Shell",
"bytes": "48663"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TypeScript",
"bytes": "2139377"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import numpy as np
import json
import copy
import pycocotools
from pycocotools.coco import COCO
from .dataset import DetDataset
from lib.utils.workspace import register, serializable
__all__ = ['KeypointTopDownBaseDataset', 'KeypointTopDownCocoDataset']
@serializable
class KeypointTopDownBaseDataset(DetDataset):
"""Base class for top_down datasets.
All datasets should subclass it.
All subclasses should overwrite:
Methods:`_get_db`
Args:
dataset_dir (str): Root path to the dataset.
image_dir (str): Path to a directory where images are held.
anno_path (str): Relative path to the annotation file.
num_joints (int): keypoint numbers
transform (composed(operators)): A sequence of data transforms.
"""
def __init__(self,
dataset_dir,
image_dir,
anno_path,
num_joints,
transform=[]):
super().__init__(dataset_dir, image_dir, anno_path)
self.image_info = {}
self.ann_info = {}
self.img_prefix = os.path.join(dataset_dir, image_dir)
self.transform = transform
self.ann_info['num_joints'] = num_joints
self.db = []
def __len__(self):
"""Get dataset length."""
return len(self.db)
def _get_db(self):
"""Get a sample"""
raise NotImplementedError
def __getitem__(self, idx):
"""Prepare sample for training given the index."""
records = copy.deepcopy(self.db[idx])
records['image'] = cv2.imread(records['image_file'], cv2.IMREAD_COLOR |
cv2.IMREAD_IGNORE_ORIENTATION)
records['image'] = cv2.cvtColor(records['image'], cv2.COLOR_BGR2RGB)
records['score'] = records['score'] if 'score' in records else 1
records = self.transform(records)
# print('records', records)
return records
@register
@serializable
class KeypointTopDownCocoDataset(KeypointTopDownBaseDataset):
"""COCO dataset for top-down pose estimation. Adapted from
https://github.com/leoxiaobin/deep-high-resolution-net.pytorch
Copyright (c) Microsoft, under the MIT License.
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
COCO keypoint indexes:
0: 'nose',
1: 'left_eye',
2: 'right_eye',
3: 'left_ear',
4: 'right_ear',
5: 'left_shoulder',
6: 'right_shoulder',
7: 'left_elbow',
8: 'right_elbow',
9: 'left_wrist',
10: 'right_wrist',
11: 'left_hip',
12: 'right_hip',
13: 'left_knee',
14: 'right_knee',
15: 'left_ankle',
16: 'right_ankle'
Args:
dataset_dir (str): Root path to the dataset.
image_dir (str): Path to a directory where images are held.
anno_path (str): Relative path to the annotation file.
num_joints (int): Keypoint numbers
trainsize (list):[w, h] Image target size
transform (composed(operators)): A sequence of data transforms.
bbox_file (str): Path to a detection bbox file
Default: None.
use_gt_bbox (bool): Whether to use ground truth bbox
Default: True.
pixel_std (int): The pixel std of the scale
Default: 200.
image_thre (float): The threshold to filter the detection box
Default: 0.0.
"""
def __init__(self,
dataset_dir,
image_dir,
anno_path,
num_joints,
trainsize,
transform=[],
bbox_file=None,
use_gt_bbox=True,
pixel_std=200,
image_thre=0.0):
super().__init__(dataset_dir, image_dir, anno_path, num_joints,
transform)
self.bbox_file = bbox_file
self.use_gt_bbox = use_gt_bbox
self.trainsize = trainsize
self.pixel_std = pixel_std
self.image_thre = image_thre
self.dataset_name = 'coco'
def parse_dataset(self):
if self.use_gt_bbox:
self.db = self._load_coco_keypoint_annotations()
else:
self.db = self._load_coco_person_detection_results()
def _load_coco_keypoint_annotations(self):
coco = COCO(self.get_anno())
img_ids = coco.getImgIds()
gt_db = []
for index in img_ids:
im_ann = coco.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
file_name = im_ann['file_name']
im_id = int(im_ann["id"])
annIds = coco.getAnnIds(imgIds=index, iscrowd=False)
objs = coco.loadAnns(annIds)
valid_objs = []
for obj in objs:
x, y, w, h = obj['bbox']
x1 = np.max((0, x))
y1 = np.max((0, y))
x2 = np.min((width - 1, x1 + np.max((0, w - 1))))
y2 = np.min((height - 1, y1 + np.max((0, h - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2 - x1, y2 - y1]
valid_objs.append(obj)
objs = valid_objs
rec = []
for obj in objs:
if max(obj['keypoints']) == 0:
continue
joints = np.zeros(
(self.ann_info['num_joints'], 3), dtype=np.float)
joints_vis = np.zeros(
(self.ann_info['num_joints'], 3), dtype=np.float)
for ipt in range(self.ann_info['num_joints']):
joints[ipt, 0] = obj['keypoints'][ipt * 3 + 0]
joints[ipt, 1] = obj['keypoints'][ipt * 3 + 1]
joints[ipt, 2] = 0
t_vis = obj['keypoints'][ipt * 3 + 2]
if t_vis > 1:
t_vis = 1
joints_vis[ipt, 0] = t_vis
joints_vis[ipt, 1] = t_vis
joints_vis[ipt, 2] = 0
center, scale = self._box2cs(obj['clean_bbox'][:4])
rec.append({
'image_file': os.path.join(self.img_prefix, file_name),
'center': center,
'scale': scale,
'joints': joints,
'joints_vis': joints_vis,
'im_id': im_id,
})
gt_db.extend(rec)
return gt_db
def _box2cs(self, box):
x, y, w, h = box[:4]
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
aspect_ratio = self.trainsize[0] * 1.0 / self.trainsize[1]
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array(
[w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],
dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
def _load_coco_person_detection_results(self):
all_boxes = None
bbox_file_path = os.path.join(self.dataset_dir, self.bbox_file)
with open(bbox_file_path, 'r') as f:
all_boxes = json.load(f)
if not all_boxes:
print('=> Load %s fail!' % bbox_file_path)
return None
kpt_db = []
for n_img in range(0, len(all_boxes)):
det_res = all_boxes[n_img]
if det_res['category_id'] != 1:
continue
file_name = det_res[
'filename'] if 'filename' in det_res else '%012d.jpg' % det_res[
'image_id']
img_name = os.path.join(self.img_prefix, file_name)
box = det_res['bbox']
score = det_res['score']
im_id = int(det_res['image_id'])
if score < self.image_thre:
continue
center, scale = self._box2cs(box)
joints = np.zeros((self.ann_info['num_joints'], 3), dtype=np.float)
joints_vis = np.ones(
(self.ann_info['num_joints'], 3), dtype=np.float)
kpt_db.append({
'image_file': img_name,
'im_id': im_id,
'center': center,
'scale': scale,
'score': score,
'joints': joints,
'joints_vis': joints_vis,
})
return kpt_db
| {
"content_hash": "068d90318a6b90ddd08290420ecf09a7",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 80,
"avg_line_length": 33.63740458015267,
"alnum_prop": 0.505957108816521,
"repo_name": "PaddlePaddle/models",
"id": "42991a6f4ec67d6d3d9b38bc159ab379c02fa706",
"size": "9424",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/2.3",
"path": "tutorials/pp-series/HRNet-Keypoint/lib/dataset/keypoint_coco.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "46835"
},
{
"name": "CMake",
"bytes": "8248"
},
{
"name": "Jupyter Notebook",
"bytes": "1720166"
},
{
"name": "Makefile",
"bytes": "2920"
},
{
"name": "Python",
"bytes": "3099697"
},
{
"name": "Shell",
"bytes": "70177"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem035.py
#
# Circular primes
# ===============
# Published on Friday, 17th January 2003, 06:00 pm
#
# The number, 197, is called a circular prime because all rotations of the
# digits: 197, 971, and 719, are themselves prime. There are thirteen such
# primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97. How
# many circular primes are there below one million?
import itertools
import re
import util
EVENS = re.compile(r"[02468]")
def circular_primes(max_n):
res = []
for i in itertools.chain((2,), xrange(3, max_n + 1, 2)):
s = str(i)
l = len(s)
if l > 1 and any([EVENS.search(s), "5" in s]):
continue
permutations = [int("".join(p)) for p in itertools.permutations(s, l)]
for p in permutations:
if not util.is_prime(p):
break
else:
res.extend(permutations)
return res
def main():
cp = circular_primes(1000000)
print "The number of circular primes < 1000000 is:", len(cp)
if __name__ == "__main__":
main()
| {
"content_hash": "6956204197b0af55e73c352745489ef6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 25.88372093023256,
"alnum_prop": 0.5911949685534591,
"repo_name": "olduvaihand/ProjectEuler",
"id": "3ce4a85172f116f3fed6175badc1f9d3100a7f30",
"size": "1115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/problem035.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "0"
},
{
"name": "Python",
"bytes": "422751"
}
],
"symlink_target": ""
} |
import mock
from networking_nec.tests.unit.nwa.nwalib import test_client
TENANT_ID = 'OpenT9004'
DC_RESOURCE_GROUP_POD1 = 'OpenStack/DC1/Common/Pod1Grp/Pod1'
DC_RESOURCE_GROUP_APP1 = 'OpenStack/DC1/Common/App1Grp/App1'
class TestNwaClientTenant(test_client.TestNwaClientBase):
def test_create_tenant(self):
rd, rj = self.nwa.tenant.create_tenant(TENANT_ID)
self.post.assert_called_once_with(
'/umf/tenant/%s' % TENANT_ID,
{'TenantName': TENANT_ID})
self.assertEqual(rd, 200)
self.assertEqual(rj['status'], 'SUCCESS')
self.assertEqual(self.post.call_count, 1)
def _test_delete_tenant(self, status_code, sem_delete_tenant_called):
with mock.patch('networking_nec.nwa.nwalib.restclient.'
'RestClient.delete') as mock_delete, \
mock.patch('networking_nec.nwa.nwalib.semaphore.'
'Semaphore.delete_tenant_semaphore') as mock_sem_del:
mock_delete.return_value = (status_code, mock.sentinel.data)
rd, rj = self.nwa.tenant.delete_tenant(TENANT_ID)
mock_delete.assert_called_once_with('/umf/tenant/%s' % TENANT_ID)
if sem_delete_tenant_called:
mock_sem_del.assert_called_once_with(TENANT_ID)
else:
self.assertEqual(0, mock_sem_del.call_count)
self.assertEqual(rd, status_code)
self.assertEqual(mock.sentinel.data, rj)
self.assertEqual(0, self.post.call_count)
self.assertEqual(1, mock_delete.call_count)
def test_delete_tenant(self):
self._test_delete_tenant(200, sem_delete_tenant_called=True)
def test_delete_tenant_non_200(self):
self._test_delete_tenant(500, sem_delete_tenant_called=False)
| {
"content_hash": "9704dbe99a9f93ba0ba855ca2d9521a4",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 40.95348837209303,
"alnum_prop": 0.653038046564452,
"repo_name": "openstack/networking-nec",
"id": "c2b3e285de744f2e83a6586ea27098a228d2a7da",
"size": "2392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_nec/tests/unit/nwa/nwalib/test_client_tenant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "431"
},
{
"name": "Python",
"bytes": "387801"
},
{
"name": "Shell",
"bytes": "8526"
}
],
"symlink_target": ""
} |
try:
from django.db import models
django_loaded = True
except ImportError:
django_loaded = False
if django_loaded:
from semantic_version import django_fields as semver_fields
class VersionModel(models.Model):
version = semver_fields.VersionField(verbose_name='my version')
spec = semver_fields.SpecField(verbose_name='my spec')
npm_spec = semver_fields.SpecField(syntax='npm', blank=True, verbose_name='npm spec')
class PartialVersionModel(models.Model):
partial = semver_fields.VersionField(partial=True, verbose_name='partial version')
optional = semver_fields.VersionField(verbose_name='optional version', blank=True, null=True)
optional_spec = semver_fields.SpecField(verbose_name='optional spec', blank=True, null=True)
class CoerceVersionModel(models.Model):
version = semver_fields.VersionField(verbose_name='my version', coerce=True)
partial = semver_fields.VersionField(verbose_name='partial version', coerce=True, partial=True)
| {
"content_hash": "dd7947bdc810f112bd93593ec7ed66df",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 103,
"avg_line_length": 45,
"alnum_prop": 0.7236714975845411,
"repo_name": "rbarrois/python-semanticversion",
"id": "5f790b43c21ed606451578c906a0b86d4fef8111",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/django_test_app/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "2280"
},
{
"name": "Python",
"bytes": "111378"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import numpy as np
import cv2
import random
import copy
from . import data_augment
import threading
import itertools
def union(au, bu, area_intersection):
area_a = (au[2] - au[0]) * (au[3] - au[1])
area_b = (bu[2] - bu[0]) * (bu[3] - bu[1])
area_union = area_a + area_b - area_intersection
return area_union
def intersection(ai, bi):
x = max(ai[0], bi[0])
y = max(ai[1], bi[1])
w = min(ai[2], bi[2]) - x
h = min(ai[3], bi[3]) - y
if w < 0 or h < 0:
return 0
return w*h
def iou(a, b):
# a and b should be (x1,y1,x2,y2)
if a[0] >= a[2] or a[1] >= a[3] or b[0] >= b[2] or b[1] >= b[3]:
return 0.0
area_i = intersection(a, b)
area_u = union(a, b, area_i)
return float(area_i) / float(area_u + 1e-6)
def get_new_img_size(width, height, img_min_side=600):
if width <= height:
f = float(img_min_side) / width
resized_height = int(f * height)
resized_width = img_min_side
else:
f = float(img_min_side) / height
resized_width = int(f * width)
resized_height = img_min_side
return resized_width, resized_height
class SampleSelector:
def __init__(self, class_count):
# ignore classes that have zero samples
self.classes = [b for b in class_count.keys() if class_count[b] > 0]
self.class_cycle = itertools.cycle(self.classes)
self.curr_class = next(self.class_cycle)
def skip_sample_for_balanced_class(self, img_data):
class_in_img = False
for bbox in img_data['bboxes']:
cls_name = bbox['class']
if cls_name == self.curr_class:
class_in_img = True
self.curr_class = next(self.class_cycle)
break
if class_in_img:
return False
else:
return True
def calc_rpn(C, img_data, width, height, resized_width, resized_height, img_length_calc_function):
downscale = float(C.rpn_stride)
anchor_sizes = C.anchor_box_scales
anchor_ratios = C.anchor_box_ratios
num_anchors = len(anchor_sizes) * len(anchor_ratios)
# calculate the output map size based on the network architecture
(output_width, output_height) = img_length_calc_function(resized_width, resized_height)
n_anchratios = len(anchor_ratios)
# initialise empty output objectives
y_rpn_overlap = np.zeros((output_height, output_width, num_anchors))
y_is_box_valid = np.zeros((output_height, output_width, num_anchors))
y_rpn_regr = np.zeros((output_height, output_width, num_anchors * 4))
num_bboxes = len(img_data['bboxes'])
num_anchors_for_bbox = np.zeros(num_bboxes).astype(int)
best_anchor_for_bbox = -1*np.ones((num_bboxes, 4)).astype(int)
best_iou_for_bbox = np.zeros(num_bboxes).astype(np.float32)
best_x_for_bbox = np.zeros((num_bboxes, 4)).astype(int)
best_dx_for_bbox = np.zeros((num_bboxes, 4)).astype(np.float32)
# get the GT box coordinates, and resize to account for image resizing
gta = np.zeros((num_bboxes, 4))
for bbox_num, bbox in enumerate(img_data['bboxes']):
# get the GT box coordinates, and resize to account for image resizing
gta[bbox_num, 0] = bbox['x1'] * (resized_width / float(width))
gta[bbox_num, 1] = bbox['x2'] * (resized_width / float(width))
gta[bbox_num, 2] = bbox['y1'] * (resized_height / float(height))
gta[bbox_num, 3] = bbox['y2'] * (resized_height / float(height))
# rpn ground truth
for anchor_size_idx in range(len(anchor_sizes)):
for anchor_ratio_idx in range(n_anchratios):
anchor_x = anchor_sizes[anchor_size_idx] * anchor_ratios[anchor_ratio_idx][0]
anchor_y = anchor_sizes[anchor_size_idx] * anchor_ratios[anchor_ratio_idx][1]
for ix in range(output_width):
# x-coordinates of the current anchor box
x1_anc = downscale * (ix + 0.5) - anchor_x / 2
x2_anc = downscale * (ix + 0.5) + anchor_x / 2
# ignore boxes that go across image boundaries
if x1_anc < 0 or x2_anc > resized_width:
continue
for jy in range(output_height):
# y-coordinates of the current anchor box
y1_anc = downscale * (jy + 0.5) - anchor_y / 2
y2_anc = downscale * (jy + 0.5) + anchor_y / 2
# ignore boxes that go across image boundaries
if y1_anc < 0 or y2_anc > resized_height:
continue
# bbox_type indicates whether an anchor should be a target
bbox_type = 'neg'
# this is the best IOU for the (x,y) coord and the current anchor
# note that this is different from the best IOU for a GT bbox
best_iou_for_loc = 0.0
for bbox_num in range(num_bboxes):
# get IOU of the current GT box and the current anchor box
curr_iou = iou([gta[bbox_num, 0], gta[bbox_num, 2], gta[bbox_num, 1], gta[bbox_num, 3]], [x1_anc, y1_anc, x2_anc, y2_anc])
# calculate the regression targets if they will be needed
if curr_iou > best_iou_for_bbox[bbox_num] or curr_iou > C.rpn_max_overlap:
cx = (gta[bbox_num, 0] + gta[bbox_num, 1]) / 2.0
cy = (gta[bbox_num, 2] + gta[bbox_num, 3]) / 2.0
cxa = (x1_anc + x2_anc)/2.0
cya = (y1_anc + y2_anc)/2.0
tx = (cx - cxa) / (x2_anc - x1_anc)
ty = (cy - cya) / (y2_anc - y1_anc)
tw = np.log((gta[bbox_num, 1] - gta[bbox_num, 0]) / (x2_anc - x1_anc))
th = np.log((gta[bbox_num, 3] - gta[bbox_num, 2]) / (y2_anc - y1_anc))
if img_data['bboxes'][bbox_num]['class'] != 'bg':
# all GT boxes should be mapped to an anchor box, so we keep track of which anchor box was best
if curr_iou > best_iou_for_bbox[bbox_num]:
best_anchor_for_bbox[bbox_num] = [jy, ix, anchor_ratio_idx, anchor_size_idx]
best_iou_for_bbox[bbox_num] = curr_iou
best_x_for_bbox[bbox_num,:] = [x1_anc, x2_anc, y1_anc, y2_anc]
best_dx_for_bbox[bbox_num,:] = [tx, ty, tw, th]
# we set the anchor to positive if the IOU is >0.7 (it does not matter if there was another better box, it just indicates overlap)
if curr_iou > C.rpn_max_overlap:
bbox_type = 'pos'
num_anchors_for_bbox[bbox_num] += 1
# we update the regression layer target if this IOU is the best for the current (x,y) and anchor position
if curr_iou > best_iou_for_loc:
best_iou_for_loc = curr_iou
best_regr = (tx, ty, tw, th)
# if the IOU is >0.3 and <0.7, it is ambiguous and no included in the objective
if C.rpn_min_overlap < curr_iou < C.rpn_max_overlap:
# gray zone between neg and pos
if bbox_type != 'pos':
bbox_type = 'neutral'
# turn on or off outputs depending on IOUs
if bbox_type == 'neg':
y_is_box_valid[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 1
y_rpn_overlap[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 0
elif bbox_type == 'neutral':
y_is_box_valid[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 0
y_rpn_overlap[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 0
elif bbox_type == 'pos':
y_is_box_valid[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 1
y_rpn_overlap[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 1
start = 4 * (anchor_ratio_idx + n_anchratios * anchor_size_idx)
y_rpn_regr[jy, ix, start:start+4] = best_regr
# we ensure that every bbox has at least one positive RPN region
for idx in range(num_anchors_for_bbox.shape[0]):
if num_anchors_for_bbox[idx] == 0:
# no box with an IOU greater than zero ...
if best_anchor_for_bbox[idx, 0] == -1:
continue
y_is_box_valid[
best_anchor_for_bbox[idx,0], best_anchor_for_bbox[idx,1], best_anchor_for_bbox[idx,2] + n_anchratios *
best_anchor_for_bbox[idx,3]] = 1
y_rpn_overlap[
best_anchor_for_bbox[idx,0], best_anchor_for_bbox[idx,1], best_anchor_for_bbox[idx,2] + n_anchratios *
best_anchor_for_bbox[idx,3]] = 1
start = 4 * (best_anchor_for_bbox[idx,2] + n_anchratios * best_anchor_for_bbox[idx,3])
y_rpn_regr[
best_anchor_for_bbox[idx,0], best_anchor_for_bbox[idx,1], start:start+4] = best_dx_for_bbox[idx, :]
y_rpn_overlap = np.transpose(y_rpn_overlap, (2, 0, 1))
y_rpn_overlap = np.expand_dims(y_rpn_overlap, axis=0)
y_is_box_valid = np.transpose(y_is_box_valid, (2, 0, 1))
y_is_box_valid = np.expand_dims(y_is_box_valid, axis=0)
y_rpn_regr = np.transpose(y_rpn_regr, (2, 0, 1))
y_rpn_regr = np.expand_dims(y_rpn_regr, axis=0)
pos_locs = np.where(np.logical_and(y_rpn_overlap[0, :, :, :] == 1, y_is_box_valid[0, :, :, :] == 1))
neg_locs = np.where(np.logical_and(y_rpn_overlap[0, :, :, :] == 0, y_is_box_valid[0, :, :, :] == 1))
num_pos = len(pos_locs[0])
# one issue is that the RPN has many more negative than positive regions, so we turn off some of the negative
# regions. We also limit it to 256 regions.
num_regions = 256
if len(pos_locs[0]) > num_regions/2:
val_locs = random.sample(range(len(pos_locs[0])), len(pos_locs[0]) - num_regions/2)
y_is_box_valid[0, pos_locs[0][val_locs], pos_locs[1][val_locs], pos_locs[2][val_locs]] = 0
num_pos = num_regions/2
if len(neg_locs[0]) + num_pos > num_regions:
val_locs = random.sample(range(len(neg_locs[0])), len(neg_locs[0]) - num_pos)
y_is_box_valid[0, neg_locs[0][val_locs], neg_locs[1][val_locs], neg_locs[2][val_locs]] = 0
y_rpn_cls = np.concatenate([y_is_box_valid, y_rpn_overlap], axis=1)
y_rpn_regr = np.concatenate([np.repeat(y_rpn_overlap, 4, axis=1), y_rpn_regr], axis=1)
return np.copy(y_rpn_cls), np.copy(y_rpn_regr)
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def next(self):
with self.lock:
return next(self.it)
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
def get_anchor_gt(all_img_data, class_count, C, img_length_calc_function, backend, mode='train'):
# The following line is not useful with Python 3.5, it is kept for the legacy
# all_img_data = sorted(all_img_data)
sample_selector = SampleSelector(class_count)
while True:
if mode == 'train':
random.shuffle(all_img_data)
for img_data in all_img_data:
try:
if C.balanced_classes and sample_selector.skip_sample_for_balanced_class(img_data):
continue
# read in image, and optionally add augmentation
if mode == 'train':
img_data_aug, x_img = data_augment.augment(img_data, C, augment=False)
else:
img_data_aug, x_img = data_augment.augment(img_data, C, augment=False)
(width, height) = (img_data_aug['width'], img_data_aug['height'])
(rows, cols, _) = x_img.shape
assert cols == width
assert rows == height
# get image dimensions for resizing
(resized_width, resized_height) = get_new_img_size(width, height, C.im_size)
# resize the image so that smalles side is length = 600px
x_img = cv2.resize(x_img, (resized_width, resized_height), interpolation=cv2.INTER_CUBIC)
try:
y_rpn_cls, y_rpn_regr = calc_rpn(C, img_data_aug, width, height, resized_width, resized_height, img_length_calc_function)
except:
continue
# Zero-center by mean pixel, and preprocess image
x_img = x_img[:,:, (2, 1, 0)] # BGR -> RGB
x_img = x_img.astype(np.float32)
x_img[:, :, 0] -= C.img_channel_mean[0]
x_img[:, :, 1] -= C.img_channel_mean[1]
x_img[:, :, 2] -= C.img_channel_mean[2]
x_img /= C.img_scaling_factor
x_img = np.transpose(x_img, (2, 0, 1))
x_img = np.expand_dims(x_img, axis=0)
y_rpn_regr[:, y_rpn_regr.shape[1]//2:, :, :] *= C.std_scaling
if backend == 'tf':
x_img = np.transpose(x_img, (0, 2, 3, 1))
y_rpn_cls = np.transpose(y_rpn_cls, (0, 2, 3, 1))
y_rpn_regr = np.transpose(y_rpn_regr, (0, 2, 3, 1))
yield np.copy(x_img), [np.copy(y_rpn_cls), np.copy(y_rpn_regr)], img_data_aug
except Exception as e:
print(e)
continue
| {
"content_hash": "12bda3f1b2d5ef0d63bd38e0b3f0fbd9",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 137,
"avg_line_length": 35.486646884273,
"alnum_prop": 0.6355882598879505,
"repo_name": "alexcu/hermes-bib-detect",
"id": "3551b8c8157341947bff1072b1eed876716af7c0",
"size": "11959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras_frcnn/data_generators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3218"
},
{
"name": "Python",
"bytes": "89979"
},
{
"name": "Ruby",
"bytes": "16034"
},
{
"name": "Shell",
"bytes": "7484"
}
],
"symlink_target": ""
} |
"""Sparse categorical cross-entropy losses."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
def _adjust_labels(labels, predictions):
"""Adjust the 'labels' tensor by squeezing it if needed."""
labels = tf.cast(labels, tf.int32)
if len(predictions.shape) == len(labels.shape):
labels = tf.squeeze(labels, [-1])
return labels, predictions
def _validate_rank(labels, predictions, weights):
if weights is not None and len(weights.shape) != len(labels.shape):
raise RuntimeError(
("Weight and label tensors were not of the same rank. weights.shape "
"was %s, and labels.shape was %s.") %
(predictions.shape, labels.shape))
if (len(predictions.shape) - 1) != len(labels.shape):
raise RuntimeError(
("Weighted sparse categorical crossentropy expects `labels` to have a "
"rank of one less than `predictions`. labels.shape was %s, and "
"predictions.shape was %s.") % (labels.shape, predictions.shape))
def per_example_loss(labels, predictions, weights=None):
"""Calculate a per-example sparse categorical crossentropy loss.
This loss function assumes that the predictions are post-softmax.
Args:
labels: The labels to evaluate against. Should be a set of integer indices
ranging from 0 to (vocab_size-1).
predictions: The network predictions. Should have softmax already applied.
weights: An optional weight array of the same shape as the 'labels' array.
If None, all examples will be used.
Returns:
A tensor of shape predictions.shape[:-1] containing the per-example
loss.
"""
# When using these functions with the Keras core API, we will need to squeeze
# the labels tensor - Keras adds a spurious inner dimension.
labels, predictions = _adjust_labels(labels, predictions)
_validate_rank(labels, predictions, weights)
labels_one_hot = tf.one_hot(labels, predictions.shape[-1])
labels_one_hot = tf.cast(labels_one_hot, predictions.dtype)
per_example_loss_data = -tf.reduce_sum(
predictions * labels_one_hot, axis=[-1])
if weights is not None:
weights = tf.cast(weights, per_example_loss_data.dtype)
per_example_loss_data = weights * per_example_loss_data
return per_example_loss_data
def loss(labels, predictions, weights=None):
"""Calculate a per-batch sparse categorical crossentropy loss.
This loss function assumes that the predictions are post-softmax.
Args:
labels: The labels to evaluate against. Should be a set of integer indices
ranging from 0 to (vocab_size-1).
predictions: The network predictions. Should have softmax already applied.
weights: An optional weight array of the same shape as the 'labels' array.
If None, all examples will be used.
Returns:
A loss scalar.
Raises:
RuntimeError if the passed tensors do not have the same rank.
"""
# When using these functions with the Keras core API, we will need to squeeze
# the labels tensor - Keras adds a spurious inner dimension.
labels, predictions = _adjust_labels(labels, predictions)
_validate_rank(labels, predictions, weights)
per_example_loss_data = per_example_loss(labels, predictions, weights)
if weights is None:
return tf.reduce_mean(per_example_loss_data)
else:
numerator = tf.reduce_sum(per_example_loss_data)
weights = tf.cast(weights, predictions.dtype)
denominator = tf.reduce_sum(weights) + 1e-5
return numerator / denominator
| {
"content_hash": "78f85fb845ff11ddd4870d2413c44538",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 79,
"avg_line_length": 39.01086956521739,
"alnum_prop": 0.7152410142100863,
"repo_name": "alexgorban/models",
"id": "b88d8e3665b70be63aaa4aa2f90bb78e4bd9af3f",
"size": "4278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1619012"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "454746"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "16363107"
},
{
"name": "Shell",
"bytes": "144095"
},
{
"name": "Starlark",
"bytes": "148029"
}
],
"symlink_target": ""
} |
import json
import aiohttp
from asyncio import coroutine
from ...ut.async_request import request
from .base import Base
class Env(Base):
def __init__(self, id):
super(Env, self).__init__()
self.uri = uri(id)
@coroutine
def read(self, name):
return (yield from self.meta)['files'][name]['content'].encode('utf-8')
@property
@coroutine
def meta(self):
value = getattr(self, '_meta', None)
if value is None:
resp = yield from request(method='GET', url=self.uri)
self._meta = value = json.loads(
(yield from resp.read()).decode('utf-8')
)
return value
def uri(id):
return 'https://api.github.com/gists/{}'.format(id)
| {
"content_hash": "846112a18abfbf851d2e70010716cfb7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 24.129032258064516,
"alnum_prop": 0.5775401069518716,
"repo_name": "Answeror/torabot",
"id": "50c61e9a09609d2c7031de1dda38d0472e995b55",
"size": "748",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "torabot/lang/envs/gist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "174712"
},
{
"name": "JavaScript",
"bytes": "2849805"
},
{
"name": "Python",
"bytes": "552234"
},
{
"name": "Shell",
"bytes": "822"
},
{
"name": "TeX",
"bytes": "3381"
},
{
"name": "XSLT",
"bytes": "5063"
}
],
"symlink_target": ""
} |
"""
rx320 tcp/ip server
:copyright: (c) 2013 by Tom van Dijk
:license: BSD, see LICENSE for more details.
"""
from rx320 import RX320
import socket
import threading
class RX320Connection(threading.Thread):
def __init__(self, connection, controller, *args, **kwargs):
super(RX320Connection,self).__init__(*args, **kwargs)
self.connection = connection
self.controller = controller
self.daemon = True
self.start()
def linesplit(self):
socket = self.connection
buffer = socket.recv(4096)
done = False
while not done:
if "\n" in buffer:
(line, buffer) = buffer.split("\n", 1)
yield line.strip()
else:
more = socket.recv(4096)
if not more:
done = True
else:
buffer = buffer+more
if buffer:
yield buffer.strip()
def run(self):
try:
for line in self.linesplit():
result = self.handle(line.split())
self.connection.sendall("%s\n" % result)
finally:
self.connection.close()
def handle(self, command):
if len(command) == 0:
return "ERROR"
if command[0] == 'ALL' and len(command) == 4:
# ALL <freq> <mode> <filter>
self.controller.set_mode(int(command[2]))
self.controller.set_filter(int(command[3]))
self.controller.set_freq(int(command[1]))
return "Done"
elif command[0] == 'FREQ' and len(command) == 2:
self.controller.set_freq(int(command[1]))
return "Done"
elif command[0] == 'VOL' and len(command) == 2:
self.controller.set_speaker_volume(int(command[1]))
return "Done"
elif command[0] == 'LINEVOL' and len(command) == 2:
self.controller.set_line_volume(int(command[1]))
return "Done"
elif command[0] == 'MODE' and len(command) == 2:
self.controller.set_mode(int(command[1]))
return "Done"
elif command[0] == 'FILTER' and len(command) == 2:
self.controller.set_filter(int(command[1]))
return "Done"
elif command[0] == 'AGC' and len(command) == 2:
self.controller.set_agc(int(command[1]))
return "Done"
elif command[0] == 'GETMODE':
if hasattr(self.controller, 'mode'):
return str(self.controller.mode)
else:
return 'NA'
elif command[0] == 'GETFILTER':
if hasattr(self.controller, 'filter'):
return str(self.controller.filter)
else:
return 'NA'
elif command[0] == 'GETAGC':
if hasattr(self.controller, 'agc'):
return str(self.controller.agc)
else:
return 'NA'
elif command[0] == 'GETSMETER':
return str(self.controller.strength)
elif command[0] == 'GETVOL':
if hasattr(self.controller, 'speaker_volume'):
return str(self.controller.speaker_volume)
else:
return 'NA'
elif command[0] == 'GETLINEVOL':
if hasattr(self.controller, 'line_volume'):
return str(self.controller.line_volume)
else:
return 'NA'
elif command[0] == 'GETFREQ':
if hasattr(self.controller, 'freq'):
return str(self.controller.freq)
else:
return 'NA'
return "ERROR"
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] device",
description = "RX320 controller"
)
parser.add_option("-p", "--port",
dest = "local_port",
action = "store",
type = "int",
help = "TCP/IP port",
default = 4665
)
parser.add_option("-s", "--sleep",
dest = "sleep_time",
action = "store",
type = "float",
help = "Seconds to wait between strength polls",
default = 0.2
)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error('need device as argument, e.g. /dev/tty...')
controller = RX320(args[0], options.sleep_time)
# initialize controller
controller.set_volume(99)
controller.set_mode(RX320.MODE_LSB)
controller.set_agc(RX320.AGC_MEDIUM)
controller.set_filter(RX320.FILTERS.index(2100))
controller.set_freq(3630000)
controller.set_line_volume(16)
controller.set_speaker_volume(96)
print "Initialized RX320 '%s'" % args[0]
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srv.bind(('', options.local_port))
srv.listen(1)
print "Waiting for connections on port %d..." % (options.local_port)
while True:
try:
connection, addr = srv.accept()
connection.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
c = RX320Connection(connection, controller)
except KeyboardInterrupt:
break
except socket.error, msg:
pass
| {
"content_hash": "ba5c18434312617675f29b0e11d215e9",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 76,
"avg_line_length": 34.13548387096774,
"alnum_prop": 0.5407295407295407,
"repo_name": "trolando/python-rx320",
"id": "e3057ae9d1f9a7112e6bfe913c06502ff05f27a0",
"size": "5291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rxserver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9221"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from codefisher_apps.pastelsvg.models import Icon, PastelSVGDonation, ProtectedDownload, IconRequest, IconRequestComment, UseExample
from django.conf import settings
from upvotes.admin import RequestAdmin, RequestCommentAdmin
admin.site.register(IconRequest, RequestAdmin)
admin.site.register(IconRequestComment, RequestCommentAdmin)
class IconAdmin(admin.ModelAdmin):
list_display = ('file_name', 'icon', 'title', 'description', 'date_modified')
def icon(self, obj):
return '<img alt="%s" src="%s16/%s.png">' % (obj.file_name, settings.PASTEL_SVG_WEB, obj.file_name)
icon.allow_tags = True
admin.site.register(Icon, IconAdmin)
class PastelSVGDonationAdmin(admin.ModelAdmin):
list_display = ('user', 'date', 'amount', 'validated')
admin.site.register(PastelSVGDonation, PastelSVGDonationAdmin)
class UseExampleAdmin(admin.ModelAdmin):
list_display = ('title', 'url', 'author_name', 'posted', 'validated')
admin.site.register(UseExample, UseExampleAdmin)
class ProtectedDownloadAdmin(admin.ModelAdmin):
list_display = ('title', 'version', 'file', 'release_date')
fields = ('file', 'title', 'version', 'description', 'public')
def save_model(self, request, obj, form, change):
if "file" in request.FILES:
uploaded_file = request.FILES.get("file")
obj.file_name = uploaded_file.name
obj.file_size = uploaded_file.size
obj.save()
super(ProtectedDownloadAdmin, self).save_model(request, obj, form, change)
admin.site.register(ProtectedDownload, ProtectedDownloadAdmin) | {
"content_hash": "880fcac2f0648e3ec3261801e664a90e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 132,
"avg_line_length": 40.02439024390244,
"alnum_prop": 0.7074954296160878,
"repo_name": "codefisher/codefisher_apps",
"id": "e4abc8906609290b2ebf4a321a0edfd5071ad893",
"size": "1641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pastelsvg/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1920"
},
{
"name": "HTML",
"bytes": "32105"
},
{
"name": "JavaScript",
"bytes": "1146"
},
{
"name": "Python",
"bytes": "115129"
}
],
"symlink_target": ""
} |
"""Parsing module """
import logging
import os
import uuid
# noinspection PyPep8Naming
import xml.etree.ElementTree as ET
# noinspection PyPackageRequirements
from typing import Dict, List, Type
from airflow.utils.trigger_rule import TriggerRule
from o2a.converter.constants import HDFS_FOLDER
from o2a.converter.oozie_node import OozieActionNode, OozieControlNode
from o2a.converter.renderers import BaseRenderer
from o2a.converter.workflow import Workflow
from o2a.mappers.action_mapper import ActionMapper
from o2a.mappers.base_mapper import BaseMapper
from o2a.mappers.decision_mapper import DecisionMapper
from o2a.mappers.dummy_mapper import DummyMapper
from o2a.mappers.end_mapper import EndMapper
from o2a.mappers.fork_mapper import ForkMapper
from o2a.mappers.join_mapper import JoinMapper
from o2a.mappers.kill_mapper import KillMapper
from o2a.mappers.start_mapper import StartMapper
from o2a.o2a_libs.property_utils import PropertySet
from o2a.transformers.base_transformer import BaseWorkflowTransformer
from o2a.utils import xml_utils
# noinspection PyDefaultArgument
class WorkflowXmlParser:
"""Parses XML of an Oozie workflow"""
def __init__(
self,
props: PropertySet,
action_mapper: Dict[str, Type[ActionMapper]],
renderer: BaseRenderer,
workflow: Workflow,
transformers: List[BaseWorkflowTransformer] = None,
):
self.workflow = workflow
self.workflow_file = os.path.join(workflow.input_directory_path, HDFS_FOLDER, "workflow.xml")
self.props = props
self.action_map = action_mapper
self.renderer = renderer
self.transformers = transformers
def parse_kill_node(self, kill_node: ET.Element):
"""
When a workflow node reaches the `kill` node, it finishes in an error.
A workflow definition may have zero or more kill nodes.
"""
mapper = KillMapper(
oozie_node=kill_node,
name=kill_node.attrib["name"],
dag_name=self.workflow.dag_name,
trigger_rule=TriggerRule.ONE_FAILED,
props=self.props,
)
oozie_control_node = OozieControlNode(mapper)
mapper.on_parse_node()
logging.info(f"Parsed {mapper.name} as Kill Node.")
self.workflow.nodes[kill_node.attrib["name"]] = oozie_control_node
def parse_end_node(self, end_node):
"""
Upon reaching the end node, the workflow is considered completed successfully.
Thus it gets mapped to a dummy node that always completes.
"""
mapper = EndMapper(oozie_node=end_node, name=end_node.attrib["name"], dag_name=self.workflow.dag_name)
oozie_control_node = OozieControlNode(mapper)
mapper.on_parse_node()
logging.info(f"Parsed {mapper.name} as End Node.")
self.workflow.nodes[end_node.attrib["name"]] = oozie_control_node
def parse_fork_node(self, root, fork_node):
"""
Fork nodes need to be dummy operators with multiple parallel downstream
tasks.
This parses the fork node, the action nodes that it references and then
the join node at the end.
This will only parse well-formed xml-adhering workflows where all paths
end at the join node.
"""
fork_name = fork_node.attrib["name"]
mapper = ForkMapper(oozie_node=fork_node, name=fork_name, dag_name=self.workflow.dag_name)
oozie_control_node = OozieControlNode(mapper)
mapper.on_parse_node()
logging.info(f"Parsed {mapper.name} as Fork Node.")
paths = []
for node in fork_node:
if "path" in node.tag:
# Parse all the downstream tasks that can run in parallel.
curr_name = node.attrib["start"]
paths.append(xml_utils.find_node_by_name(root, curr_name))
self.workflow.nodes[fork_name] = oozie_control_node
for path in paths:
oozie_control_node.downstream_names.append(path.attrib["name"])
logging.info(f"Added {mapper.name}'s downstream: {path.attrib['name']}")
# Theoretically these will all be action nodes, however I don't
# think that is guaranteed.
# The end of the execution path has not been reached
self.parse_node(root, path)
if path.attrib["name"] not in self.workflow.nodes:
root.remove(path)
def parse_join_node(self, join_node):
"""
Join nodes wait for the corresponding beginning fork node paths to
finish. As the parser we are assuming the Oozie workflow follows the
schema perfectly.
"""
mapper = JoinMapper(
oozie_node=join_node, name=join_node.attrib["name"], dag_name=self.workflow.dag_name
)
oozie_control_node = OozieControlNode(mapper)
oozie_control_node.downstream_names.append(join_node.attrib["to"])
mapper.on_parse_node()
logging.info(f"Parsed {mapper.name} as Join Node.")
self.workflow.nodes[join_node.attrib["name"]] = oozie_control_node
def parse_decision_node(self, decision_node):
"""
A decision node enables a workflow to make a selection on the execution
path to follow.
The behavior of a decision node can be seen as a switch-case statement.
A decision node consists of a list of predicates-transition pairs plus
a default transition. Predicates are evaluated in order or appearance
until one of them evaluates to true and the corresponding transition is
taken. If none of the predicates evaluates to true the default
transition is taken.
example oozie wf decision node:
<decision name="[NODE-NAME]">
<switch>
<case to="[NODE_NAME]">[PREDICATE]</case>
...
<case to="[NODE_NAME]">[PREDICATE]</case>
<default to="[NODE_NAME]"/>
</switch>
</decision>
"""
mapper = DecisionMapper(
oozie_node=decision_node,
name=decision_node.attrib["name"],
dag_name=self.workflow.dag_name,
props=self.props,
)
oozie_control_node = OozieControlNode(mapper)
for cases in decision_node[0]:
oozie_control_node.downstream_names.append(cases.attrib["to"])
mapper.on_parse_node()
logging.info(f"Parsed {mapper.name} as Decision Node.")
self.workflow.nodes[decision_node.attrib["name"]] = oozie_control_node
def parse_action_node(self, action_node: ET.Element):
"""
Action nodes are the mechanism by which a workflow triggers the
execution of a computation/processing task.
Action nodes are required to have an action-choice (map-reduce, etc.),
ok, and error node in the xml.
"""
# The 0th element of the node is the actual action tag.
# In the form of 'action'
action_operation_node = action_node[0]
action_name = action_operation_node.tag
mapper: BaseMapper
if action_name not in self.action_map:
action_name = "unknown"
mapper = DummyMapper(
oozie_node=action_operation_node,
name=action_node.attrib["name"],
dag_name=self.workflow.dag_name,
props=self.props,
)
else:
map_class = self.action_map[action_name]
mapper = map_class(
oozie_node=action_operation_node,
name=action_node.attrib["name"],
props=self.props,
dag_name=self.workflow.dag_name,
action_mapper=self.action_map,
renderer=self.renderer,
input_directory_path=self.workflow.input_directory_path,
output_directory_path=self.workflow.output_directory_path,
jar_files=self.workflow.jar_files,
transformers=self.transformers,
)
oozie_action_node = OozieActionNode(mapper)
ok_node = action_node.find("ok")
if ok_node is None:
raise Exception(f"Missing ok node in {action_node}")
oozie_action_node.downstream_names.append(ok_node.attrib["to"])
error_node = action_node.find("error")
if error_node is None:
raise Exception(f"Missing error node in {action_node}")
oozie_action_node.error_downstream_name = error_node.attrib["to"]
mapper.on_parse_node()
logging.info(f"Parsed {mapper.name} as Action Node of type {action_name}.")
self.workflow.nodes[mapper.name] = oozie_action_node
def parse_start_node(self, start_node):
"""
The start node is the entry point for a workflow job, it indicates the
first workflow node the workflow job must transition to.
When a workflow is started, it automatically transitions to the
node specified in the start.
A workflow definition must have one start node.
"""
# Theoretically this could cause conflicts, but it is very unlikely
start_name = "start_node_" + str(uuid.uuid4())[:4]
mapper = StartMapper(
oozie_node=start_node,
name=start_name,
dag_name=self.workflow.dag_name,
props=self.props,
trigger_rule=TriggerRule.DUMMY,
)
oozie_control_node = OozieControlNode(mapper)
oozie_control_node.downstream_names.append(start_node.attrib["to"])
mapper.on_parse_node()
logging.info(f"Parsed {mapper.name} as Start Node.")
self.workflow.nodes[start_name] = oozie_control_node
def parse_node(self, root, node):
"""
Given a node, determines its tag, and then passes it to the correct
parser.
:param root: The root node of the XML tree.
:param node: The node to parse.
"""
if "action" in node.tag:
self.parse_action_node(node)
elif "start" in node.tag:
self.parse_start_node(node)
elif "kill" in node.tag:
self.parse_kill_node(node)
elif "end" in node.tag:
self.parse_end_node(node)
elif "fork" in node.tag:
self.parse_fork_node(root, node)
elif "join" in node.tag:
self.parse_join_node(node)
elif "decision" in node.tag:
self.parse_decision_node(node)
def parse_workflow(self):
"""Parses workflow replacing invalid characters in the names of the nodes"""
tree = ET.parse(self.workflow_file)
root = tree.getroot()
for node in tree.iter():
# Strip namespaces
node.tag = node.tag.split("}")[1][0:]
logging.info("Stripped namespaces, and replaced invalid characters.")
for node in root:
logging.debug(f"Parsing node: {node}")
self.parse_node(root, node)
| {
"content_hash": "22ff3bc0312fe742ec412db28bd8d272",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 110,
"avg_line_length": 37.40338983050847,
"alnum_prop": 0.6262461482689867,
"repo_name": "GoogleCloudPlatform/oozie-to-airflow",
"id": "103c2f019edb9307fc82d6f317d04ecb6036882f",
"size": "11628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "o2a/converter/workflow_xml_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "528273"
},
{
"name": "Shell",
"bytes": "57460"
},
{
"name": "Smarty",
"bytes": "31948"
}
],
"symlink_target": ""
} |
import os
import sys
from base64 import b64encode
from functools import wraps
from getpass import getpass
from subprocess import Popen
import peewee
import argh
from argh.decorators import aliases, arg
from tmc import conf
from tmc import api
from tmc.errors import (APIError, NoCourseSelected, NoExerciseSelected,
TMCError, TMCExit)
from tmc.exercise_tests.basetest import run_test
from tmc.files import download_exercise, submit_exercise
from tmc.models import Config, Course, Exercise, reset_db
from tmc.coloring import infomsg
from tmc.ui.menu import Menu
from tmc.ui.prompt import custom_prompt, yn_prompt
from tmc.ui.spinner import Spinner
from tmc.version import __version__
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
from tmc.coloring import as_success, as_error
def selected_course(func):
"""
Passes the selected course as the first argument to func.
"""
@wraps(func)
def inner(*args, **kwargs):
course = Course.get_selected()
return func(course, *args, **kwargs)
return inner
def selected_exercise(func):
"""
Passes the selected exercise as the first argument to func.
"""
@wraps(func)
def inner(*args, **kwargs):
exercise = Exercise.get_selected()
return func(exercise, *args, **kwargs)
return inner
def false_exit(func):
"""
If func returns False the program exits immediately.
"""
@wraps(func)
def inner(*args, **kwargs):
ret = func(*args, **kwargs)
if ret is False:
if "TMC_TESTING" in os.environ:
raise TMCExit()
else:
sys.exit(-1)
return ret
return inner
def check_for_updates():
from xmlrpc.client import ServerProxy
from distutils.version import StrictVersion
pypi = ServerProxy("https://pypi.python.org/pypi")
version = StrictVersion(__version__)
pypiversion = StrictVersion(pypi.package_releases("tmc")[0])
if pypiversion > version:
infomsg("There is a new version available. ({})".format(pypiversion))
print("You can upgrade tmc.py with either of these ways, depending",
"on the way you installed tmc.py in the first place.",
"\nIf you installed it with pip:",
"\n sudo pip install --upgrade tmc",
"\nIf you installed it with the installation script:",
"\n Run the script again and select upgrade.")
elif pypiversion < version:
print("You are running a newer version than available.")
else:
print("You are running the most current version.")
@aliases("init", "conf")
@arg("-s", "--server", help="Server address to be used.")
@arg("-u", "--username", help="Username to be used.")
@arg("-p", "--password", help="Password to be used.")
@arg("-i", "--id", dest="tid", help="Course ID to be used.")
@arg("-a", "--auto", action="store_true",
help="Don't prompt for download path, use default instead")
@false_exit
def configure(server=None, username=None, password=None, tid=None, auto=False):
"""
Configure tmc.py to use your account.
"""
if not server and not username and not password and not tid:
if Config.has():
if not yn_prompt("Override old configuration", False):
return False
reset_db()
if not server:
while True:
server = input("Server url [https://tmc.mooc.fi/mooc/]: ").strip()
if len(server) == 0:
server = "https://tmc.mooc.fi/mooc/"
if not server.endswith('/'):
server += '/'
if not (server.startswith("http://")
or server.startswith("https://")):
ret = custom_prompt(
"Server should start with http:// or https://\n" +
"R: Retry, H: Assume http://, S: Assume https://",
["r", "h", "s"], "r")
if ret == "r":
continue
# Strip previous schema
if "://" in server:
server = server.split("://")[1]
if ret == "h":
server = "http://" + server
elif ret == "s":
server = "https://" + server
break
print("Using URL: '{0}'".format(server))
while True:
if not username:
username = input("Username: ")
if not password:
password = getpass("Password: ")
# wow, such security
token = b64encode(
bytes("{0}:{1}".format(username, password), encoding='utf-8')
).decode("utf-8")
try:
api.configure(url=server, token=token, test=True)
except APIError as e:
print(e)
if auto is False and yn_prompt("Retry authentication"):
username = password = None
continue
return False
break
if tid:
select(course=True, tid=tid, auto=auto)
else:
select(course=True)
@aliases("cur")
@selected_exercise
def current(exercise):
"""
Prints some small info about the current exercise.
"""
list_all(single=exercise)
@aliases("dl")
@arg("-i", "--id", dest="tid", help="Download this ID.")
@arg("-a", "--all", default=False, action="store_true",
dest="dl_all", help="Download all exercises.")
@arg("-f", "--force", default=False, action="store_true",
help="Should the download be forced.")
@arg("-j", "--upgradejava", default=False, action="store_true",
help="Should the Java target be upgraded from 1.6 to 1.7")
@arg("-u", "--update", default=False, action="store_true",
help="Update the tests of the exercise.")
@selected_course
def download(course, tid=None, dl_all=False, force=False, upgradejava=False,
update=False):
"""
Download the exercises from the server.
"""
def dl(id):
download_exercise(Exercise.get(Exercise.tid == id),
force=force,
update_java=upgradejava,
update=update)
if dl_all:
for exercise in list(course.exercises):
dl(exercise.tid)
elif tid is not None:
dl(int(tid))
else:
for exercise in list(course.exercises):
if not exercise.is_completed:
dl(exercise.tid)
else:
exercise.update_downloaded()
@aliases("next")
@selected_course
@false_exit
def skip(course, num=1):
"""
Go to the next exercise.
"""
sel = None
try:
sel = Exercise.get_selected()
if sel.course.tid != course.tid:
sel = None
except NoExerciseSelected:
pass
if sel is None:
sel = course.exercises.first()
else:
try:
sel = Exercise.get(Exercise.id == sel.id + num)
except peewee.DoesNotExist:
print("There are no more exercises in this course.")
return False
sel.set_select()
list_all(single=sel)
@aliases("prev")
def previous():
skip(num=-1)
@aliases("resetdb")
def reset():
"""
Resets the local database.
"""
print("This won't remove any of your files,",
"but instead the local database that tracks your progress.")
if yn_prompt("Reset database", False):
reset_db()
print("Database resetted. You will need to tmc configure again.")
@arg('command', help='The command')
@selected_exercise
def run(exercise, command):
"""
Spawns a process with `command path-of-exercise`
"""
Popen(['nohup', command, exercise.path()], stdout=DEVNULL, stderr=DEVNULL)
@aliases("sel")
@arg("-c", "--course", action="store_true", help="Select a course instead.")
@arg("-i", "--id", dest="tid",
help="Select this ID without invoking the curses UI.")
def select(course=False, tid=None, auto=False):
"""
Select a course or an exercise.
"""
if course:
update(course=True)
course = None
try:
course = Course.get_selected()
except NoCourseSelected:
pass
ret = {}
if not tid:
ret = Menu.launch("Select a course",
Course.select().execute(),
course)
else:
ret["item"] = Course.get(Course.tid == tid)
if "item" in ret:
ret["item"].set_select()
update()
if ret["item"].path == "":
select_a_path(auto=auto)
# Selects the first exercise in this course
skip()
return
else:
print("You can select the course with `tmc select --course`")
return
else:
selected = None
try:
selected = Exercise.get_selected()
except NoExerciseSelected:
pass
ret = {}
if not tid:
ret = Menu.launch("Select an exercise",
Course.get_selected().exercises,
selected)
else:
ret["item"] = Exercise.byid(tid)
if "item" in ret:
ret["item"].set_select()
print("Selected {}".format(ret["item"]))
@aliases("su")
@arg("-i", "--id", dest="tid", help="Submit this ID.")
@arg("-p", "--pastebin", default=False, action="store_true",
help="Should the submission be sent to TMC pastebin.")
@arg("-r", "--review", default=False, action="store_true",
help="Request a review for this submission.")
@selected_course
@false_exit
def submit(course, tid=None, pastebin=False, review=False):
"""
Submit the selected exercise to the server.
"""
if tid is not None:
return submit_exercise(Exercise.byid(tid),
pastebin=pastebin,
request_review=review)
else:
sel = Exercise.get_selected()
if not sel:
raise NoExerciseSelected()
return submit_exercise(sel, pastebin=pastebin, request_review=review)
@aliases("pa")
@arg("-i", "--id", dest="tid", help="Submit this ID.")
@arg("-r", "--review", default=False, action="store_true",
help="Request a review for this submission.")
def paste(tid=None, review=False):
"""
Sends the selected exercise to the TMC pastebin.
"""
submit(pastebin=True, tid=tid, review=False)
@aliases("te")
@arg("-i", "--id", dest="tid", help="Test this ID.")
@arg("-t", "--time", action="store_true",
help="Output elapsed time at each test.")
@selected_course
@false_exit
def test(course, tid=None, time=None):
"""
Run tests on the selected exercise.
"""
if time is not None:
conf.tests_show_time = time
if tid is not None:
return run_test(Exercise.byid(tid))
else:
sel = Exercise.get_selected()
if not sel:
raise NoExerciseSelected()
return run_test(sel)
@aliases("ls", "listall")
@selected_course
def list_all(course, single=None):
"""
Lists all of the exercises in the current course.
"""
def bs(val):
return "●" if val else " "
def bc(val):
return as_success("✔") if val else as_error("✘")
def format_line(exercise):
return "{0} │ {1} │ {2} │ {3} │ {4}".format(exercise.tid,
bs(exercise.is_selected),
bc(exercise.is_downloaded),
bc(exercise.is_completed),
exercise.menuname())
print("ID{0}│ S │ D │ C │ Name".format(
(len(str(course.exercises[0].tid)) - 1) * " "
))
if single:
print(format_line(single))
return
for exercise in course.exercises:
# ToDo: use a pager
print(format_line(exercise))
@aliases("up")
@arg("-c", "--course", action="store_true", help="Update courses instead.")
def update(course=False):
"""
Update the data of courses and or exercises from server.
"""
if course:
with Spinner.context(msg="Updated course metadata.",
waitmsg="Updating course metadata."):
for course in api.get_courses():
old = None
try:
old = Course.get(Course.tid == course["id"])
except peewee.DoesNotExist:
old = None
if old:
old.details_url = course["details_url"]
old.save()
continue
Course.create(tid=course["id"], name=course["name"],
details_url=course["details_url"])
else:
selected = Course.get_selected()
# with Spinner.context(msg="Updated exercise metadata.",
# waitmsg="Updating exercise metadata."):
print("Updating exercise data.")
for exercise in api.get_exercises(selected):
old = None
try:
old = Exercise.byid(exercise["id"])
except peewee.DoesNotExist:
old = None
if old is not None:
old.name = exercise["name"]
old.course = selected.id
old.is_attempted = exercise["attempted"]
old.is_completed = exercise["completed"]
old.deadline = exercise.get("deadline")
old.is_downloaded = os.path.isdir(old.path())
old.return_url = exercise["return_url"]
old.zip_url = exercise["zip_url"]
old.submissions_url = exercise["exercise_submissions_url"]
old.save()
download_exercise(old, update=True)
else:
ex = Exercise.create(tid=exercise["id"],
name=exercise["name"],
course=selected.id,
is_attempted=exercise["attempted"],
is_completed=exercise["completed"],
deadline=exercise.get("deadline"),
return_url=exercise["return_url"],
zip_url=exercise["zip_url"],
submissions_url=exercise[("exercise_"
"submissions_"
"url")])
ex.is_downloaded = os.path.isdir(ex.path())
ex.save()
@selected_course
def select_a_path(course, auto=False):
defpath = os.path.join(os.path.expanduser("~"),
"tmc",
course.name)
if auto:
path = defpath
else:
path = input("File download path [{0}]: ".format(defpath)).strip()
if len(path) == 0:
path = defpath
path = os.path.expanduser(path)
# I guess we are looking at a relative path
if not path.startswith("/"):
path = os.path.join(os.getcwd(), path)
print("Using path: '{}'".format(path))
course.path = path
course.save()
if auto:
return
ret = custom_prompt("Download exercises R: Remaining A: All N: None",
["r", "a", "n"],
"r")
if ret == "a":
download(dl_all=True)
elif ret == "r":
download()
else:
print("You can download the exercises with `tmc download --all`")
def version():
"""
Prints the version and exits.
"""
print("tmc.py version {0}".format(__version__))
print("Copyright 2014 tmc.py contributors")
def should_update():
from xmlrpc.client import ServerProxy
from distutils.version import StrictVersion
from datetime import datetime
import calendar
current_version = StrictVersion(__version__)
last_value = (Config.has_name("needs_update")
and Config.get_value("needs_update") == "1")
last_version = (0, 0, 0)
if Config.has_name("last_version"):
last_version = StrictVersion(Config.get_value("last_version"))
# Return false if an upgrade has happened
if last_value and (last_version < current_version):
return False
Config.set("last_version", __version__)
# Next lets check the time
last_time = None
if Config.has_name("last_update_check"):
last_time = datetime.utcfromtimestamp(int(
Config.get_value("last_update_check")))
else:
last_time = datetime.now()
if (last_time - datetime.now()).days < 7:
return False
Config.set("last_update_check",
calendar.timegm(datetime.now().timetuple()))
# Lastly lets check pypi for versions
pypi = ServerProxy("http://pypi.python.org/pypi")
pypiversion = StrictVersion(pypi.package_releases("tmc")[0])
if pypiversion > current_version:
return True
return False
commands = [select, update, download, test, submit, skip, current, previous,
reset, configure, version, list_all, run, check_for_updates,
paste]
def main():
parser = argh.ArghParser()
parser.add_commands(commands)
needs_update = should_update()
Config.set("needs_update", "1" if needs_update else "0")
if needs_update:
infomsg("Update available to tmc.py. See tmc check-for-updates",
"for more info.")
# By default Argh only shows shortened help when no command is given.
# This makes it print out the full help instead.
if len(sys.argv) == 1:
return parser.dispatch(argv=["help"])
try:
parser.dispatch()
except TMCError as e:
print(e)
exit(-1)
def run_command(argv):
from io import StringIO
if type(argv) == str:
argv = [argv]
parser = argh.ArghParser()
parser.add_commands(commands)
sys.stdout = StringIO()
sys.stderr = StringIO()
exception = None
try:
parser.dispatch(argv=argv)
except Exception as e:
exception = e
return sys.stdout.getvalue(), sys.stderr.getvalue(), exception
if __name__ == "__main__":
main()
| {
"content_hash": "3a4bb159677057b2f1d8dd7db8d66c66",
"timestamp": "",
"source": "github",
"line_count": 584,
"max_line_length": 79,
"avg_line_length": 31.46232876712329,
"alnum_prop": 0.5495809295743986,
"repo_name": "JuhaniImberg/tmc.py",
"id": "bf9ad5360038eb786c65a275b06ffb837ecc833d",
"size": "18436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tmc/__main__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68310"
},
{
"name": "Shell",
"bytes": "3901"
}
],
"symlink_target": ""
} |
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
ANSIBLE_METADATA = {'status': ['deprecated'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: quantum_router_interface
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_router instead
short_description: Attach/Detach a subnet's interface to a router
description:
- Attach/Detach a subnet interface to a router, to provide a gateway for the subnet.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone URL for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
router_name:
description:
- Name of the router to which the subnet's interface should be attached.
required: true
default: None
subnet_name:
description:
- Name of the subnet to whose interface should be attached to the router.
required: true
default: None
tenant_name:
description:
- Name of the tenant whose subnet has to be attached.
required: false
default: None
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
- name: "Attach tenant1's subnet to the external router"
quantum_router_interface:
state: present
login_username: admin
login_password: admin
login_tenant_name: admin
tenant_name: tenant1
router_name: external_route
subnet_name: t1subnet
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception as e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception as e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception as e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
_os_tenant_id = _os_keystone.tenant_id
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['router_name'],
}
try:
routers = neutron.list_routers(**kwargs)
except Exception as e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _get_subnet_id(module, neutron):
subnet_id = None
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['subnet_name'],
}
try:
subnets = neutron.list_subnets(**kwargs)
except Exception as e:
module.fail_json( msg = " Error in getting the subnet list:%s " % e.message)
if not subnets['subnets']:
return None
return subnets['subnets'][0]['id']
def _get_port_id(neutron, module, router_id, subnet_id):
kwargs = {
'tenant_id': _os_tenant_id,
'device_id': router_id,
}
try:
ports = neutron.list_ports(**kwargs)
except Exception as e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
for port in ports['ports']:
for subnet in port['fixed_ips']:
if subnet['subnet_id'] == subnet_id:
return port['id']
return None
def _add_interface_router(neutron, module, router_id, subnet_id):
kwargs = {
'subnet_id': subnet_id
}
try:
neutron.add_interface_router(router_id, kwargs)
except Exception as e:
module.fail_json(msg = "Error in adding interface to router: %s" % e.message)
return True
def _remove_interface_router(neutron, module, router_id, subnet_id):
kwargs = {
'subnet_id': subnet_id
}
try:
neutron.remove_interface_router(router_id, kwargs)
except Exception as e:
module.fail_json(msg="Error in removing interface from router: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
router_name = dict(required=True),
subnet_name = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
router_id = _get_router_id(module, neutron)
if not router_id:
module.fail_json(msg="failed to get the router id, please check the router name")
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
module.fail_json(msg="failed to get the subnet id, please check the subnet name")
if module.params['state'] == 'present':
port_id = _get_port_id(neutron, module, router_id, subnet_id)
if not port_id:
_add_interface_router(neutron, module, router_id, subnet_id)
module.exit_json(changed=True, result="created", id=port_id)
module.exit_json(changed=False, result="success", id=port_id)
if module.params['state'] == 'absent':
port_id = _get_port_id(neutron, module, router_id, subnet_id)
if not port_id:
module.exit_json(changed = False, result = "Success")
_remove_interface_router(neutron, module, router_id, subnet_id)
module.exit_json(changed=True, result="Deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| {
"content_hash": "746ed2090138b11aaa81820a7cc36619",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 122,
"avg_line_length": 32.239837398373986,
"alnum_prop": 0.6174505106543942,
"repo_name": "nwiizo/workspace_2017",
"id": "b2a1784d99a1c7c29a5aefd747e1064d70eab375",
"size": "8649",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "ansible-modules-core/cloud/openstack/_quantum_router_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "173"
},
{
"name": "C++",
"bytes": "7105"
},
{
"name": "CSS",
"bytes": "50021"
},
{
"name": "Go",
"bytes": "112005"
},
{
"name": "HTML",
"bytes": "66435"
},
{
"name": "JavaScript",
"bytes": "73266"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "PowerShell",
"bytes": "277598"
},
{
"name": "Python",
"bytes": "11925958"
},
{
"name": "Ruby",
"bytes": "3779"
},
{
"name": "Rust",
"bytes": "1484076"
},
{
"name": "Shell",
"bytes": "86558"
}
],
"symlink_target": ""
} |
import importlib
import pkgutil
from flask import Blueprint
def register_blueprints(app, package_name, package_path):
"""Register all Blueprint found in the modules for the specified package.
:param app: the Flask application
:param package_name: the package name
:param package_path: the package path
"""
rv = []
for _, name, _ in pkgutil.iter_modules(package_path):
m = importlib.import_module('%s.%s' % (package_name, name))
for item in dir(m):
item = getattr(m, item)
if isinstance(item, Blueprint):
app.register_blueprint(item)
rv.append(item)
return rv
| {
"content_hash": "874f62cf16ebdb90a90b222cef4b0342",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 77,
"avg_line_length": 30.136363636363637,
"alnum_prop": 0.6349924585218703,
"repo_name": "kyouko-taiga/mushi",
"id": "6dfd3a99a5d1cca0571a060667f396fe8dd92552",
"size": "1243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mushi/core/utils/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99671"
},
{
"name": "HTML",
"bytes": "63585"
},
{
"name": "JavaScript",
"bytes": "105700"
},
{
"name": "Python",
"bytes": "66546"
}
],
"symlink_target": ""
} |
from custom_labels_csv_to_manifest import check_duplicates, create_manifest_file
from os.path import exists
from os import remove
from os import path
import pytest
"""
Unit tests for custom_labels_csv_to_manifest.py.
"""
def clean_up(*argv):
"""
Deletes supplied files, if they exist.
Removes any previous test run files.
"""
for arg in argv:
if exists(arg):
remove(arg)
@pytest.mark.parametrize("csv_file, result",
[
('test/test_csvs/test_s3_supplied.csv', False)
]
)
def test_check_no_errors(csv_file, result):
"""
Checks that the CSV file is valid.
"""
file_name = path.splitext(csv_file)[0]
manifest_file = f'{file_name}.manifest'
duplicates_file = f'{file_name}-duplicates.csv'
deduplicated_file = f'{file_name}-deduplicated.csv'
clean_up(deduplicated_file, duplicates_file, manifest_file)
assert check_duplicates(csv_file,deduplicated_file,duplicates_file) == result
assert not exists(deduplicated_file)
assert not exists(duplicates_file)
clean_up(deduplicated_file, duplicates_file, manifest_file)
@pytest.mark.parametrize("csv_file,result",
[
('test/test_csvs/test_dups_errors.csv', True)
]
)
def test_duplicates_errors(csv_file, result):
"""
Checks that a CSV file with duplicate entries
creates the deduplication and errors CSV file.
"""
file_name = path.splitext(csv_file)[0]
manifest_file = f'{file_name}.manifest'
duplicates_file = f'{file_name}-duplicates.csv'
deduplicated_file = f'{file_name}-deduplicated.csv'
clean_up(deduplicated_file, duplicates_file,manifest_file)
assert check_duplicates(csv_file,deduplicated_file,duplicates_file) == result
assert exists(deduplicated_file)
assert exists(duplicates_file)
clean_up(deduplicated_file, duplicates_file,manifest_file)
@pytest.mark.parametrize("csv_file,img_count,anom_count",
[
("test/test_csvs/test_no_s3.csv", 15,33)
]
)
def test_create_manifest_no_s3_supplied(csv_file, img_count, anom_count):
"""
Checks that a CSV file with images and no Amazon S3 path creates
a manifest file.
"""
s3_path="s3://docexamplebucket1/circuitboard/train/"
errors_file=f"{path.splitext(csv_file)[0]}_errors.csv"
deduplicated_file = f"{path.splitext(csv_file)[0]}_deduplicated.csv"
manifest_file = f"{path.splitext(csv_file)[0]}.manifest"
clean_up(deduplicated_file, errors_file,manifest_file)
image_count, anomalous_count = create_manifest_file(csv_file,
manifest_file,
s3_path)
assert image_count == img_count
assert anomalous_count == anom_count
assert exists(manifest_file)
assert not exists(deduplicated_file)
assert not exists(errors_file)
clean_up(deduplicated_file, errors_file,manifest_file)
@pytest.mark.parametrize("csv_file,img_count,lbl_count",
[
('test/test_csvs/test_s3_supplied.csv', 15,33)
]
)
def test_create_manifest_s3_supplied(csv_file,img_count, lbl_count):
"""
Checks that a CSV file with images and an Amazon S3 path creates
a manifest file.
"""
s3_path=""
errors_file=f"{path.splitext(csv_file)[0]}_errors.csv"
deduplicated_file = f"{path.splitext(csv_file)[0]}_deduplicated.csv"
manifest_file = f"{path.splitext(csv_file)[0]}.manifest"
clean_up(deduplicated_file, errors_file, manifest_file)
image_count, label_count = create_manifest_file(csv_file,
manifest_file,
s3_path)
assert image_count == img_count
assert label_count == lbl_count
assert exists(manifest_file)
assert not exists(deduplicated_file)
assert not exists(errors_file)
clean_up(deduplicated_file, errors_file,manifest_file)
| {
"content_hash": "0e5bc546858990e8391d0b2aaae804ee",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 81,
"avg_line_length": 29,
"alnum_prop": 0.6660617059891107,
"repo_name": "awsdocs/aws-doc-sdk-examples",
"id": "367ad1a1964baf1279c46978fada2d756673529a",
"size": "3857",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/example_code/rekognition/test/test_custom_labels_csv_to_manifest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "476653"
},
{
"name": "Batchfile",
"bytes": "900"
},
{
"name": "C",
"bytes": "3852"
},
{
"name": "C#",
"bytes": "2051923"
},
{
"name": "C++",
"bytes": "943634"
},
{
"name": "CMake",
"bytes": "82068"
},
{
"name": "CSS",
"bytes": "33378"
},
{
"name": "Dockerfile",
"bytes": "2243"
},
{
"name": "Go",
"bytes": "1764292"
},
{
"name": "HTML",
"bytes": "319090"
},
{
"name": "Java",
"bytes": "4966853"
},
{
"name": "JavaScript",
"bytes": "1655476"
},
{
"name": "Jupyter Notebook",
"bytes": "9749"
},
{
"name": "Kotlin",
"bytes": "1099902"
},
{
"name": "Makefile",
"bytes": "4922"
},
{
"name": "PHP",
"bytes": "1220594"
},
{
"name": "Python",
"bytes": "2507509"
},
{
"name": "Ruby",
"bytes": "500331"
},
{
"name": "Rust",
"bytes": "558811"
},
{
"name": "Shell",
"bytes": "63776"
},
{
"name": "Swift",
"bytes": "267325"
},
{
"name": "TypeScript",
"bytes": "119632"
}
],
"symlink_target": ""
} |
from project import create_app, logger, db
from flask_script import Manager
import coverage
import unittest
logger.info('Server has started.')
COV = coverage.coverage(
branch=True,
include='project/*',
omit=[
'tests/*',
'project/website/*',
'project/__init__.py'
]
)
COV.start()
app = create_app()
import project.models
db.create_all(app=app)
manager = Manager(app)
@manager.command
def cov():
tests = unittest.TestLoader().discover('tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
COV.html_report()
COV.erase()
return 0
else:
return 1
@manager.command
def test():
tests = unittest.TestLoader().discover('tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
else:
return 1
@manager.command
def test_one(test_file):
tests = unittest.TestLoader().discover('tests', pattern=test_file + '.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
else:
return 1
@manager.command
def finance(market, ticker):
from project.services.google_finance import GoogleFinance
google_finance = GoogleFinance(market, ticker)
report = google_finance.income_statement()
for x in report:
print(x, flush=True)
if __name__ == '__main__':
manager.run()
| {
"content_hash": "4b286485bb2ddf211cd54bc0d54d15bc",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 21.256756756756758,
"alnum_prop": 0.6382708200890019,
"repo_name": "Radu-Raicea/Stock-Analyzer",
"id": "d644da82799a25bf15daee675d98331e89c72332",
"size": "1574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask/manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7"
},
{
"name": "HTML",
"bytes": "8273"
},
{
"name": "JavaScript",
"bytes": "787"
},
{
"name": "Nginx",
"bytes": "630"
},
{
"name": "Python",
"bytes": "23114"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='VirtualMachine',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ip_address', models.GenericIPAddressField(protocol='ipv4')),
('creation_date', models.DateTimeField()),
('login', models.CharField(max_length=200)),
('password', models.CharField(max_length=200)),
],
options={
},
bases=(models.Model,),
),
]
| {
"content_hash": "c0cf86c963048c833730485375a6f2a5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 114,
"avg_line_length": 29.64,
"alnum_prop": 0.5492577597840755,
"repo_name": "arnaudmorin/instantserver",
"id": "be36d0e1a6eb0057c029f662ebd00d182dd3774d",
"size": "765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1302"
},
{
"name": "HTML",
"bytes": "4956"
},
{
"name": "JavaScript",
"bytes": "3121"
},
{
"name": "Python",
"bytes": "10439"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.