text stringlengths 4 1.02M | meta dict |
|---|---|
from setuptools import setup
import glob
import versioneer
d = 'Genome annotation data analysis and management implemented in pure Python'
setup(name='tag',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description=d,
url='http://tag.readthedocs.io',
author='Daniel Standage',
author_email='daniel.standage@gmail.com',
license='BSD-3',
packages=['tag', 'tag.cli', 'tag.tests'],
package_data={'tag': ['tag/tests/data/*']},
include_package_data=True,
entry_points={'console_scripts': ['tag = tag.__main__:main']},
install_requires=['intervaltree>=3.0', 'networkx>=2.0'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
zip_safe=True)
| {
"content_hash": "52f86524e99cf801875a8af04a0329b4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 36.724137931034484,
"alnum_prop": 0.6056338028169014,
"repo_name": "standage/tag",
"id": "a780b4178078c4eb86859a6557feac105d71f4ad",
"size": "1440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "865"
},
{
"name": "Python",
"bytes": "250058"
}
],
"symlink_target": ""
} |
from django.http import Http404, HttpResponse
from django.conf import settings
from django.template.loader import render_to_string
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from PIL import Image
import json
from settings import FILES_DIR, PROJECT_DIR, IMAGE_QUALITY
from controllers import ImagePath
def upload(request):
if not request.method == 'POST':
raise Http404
response_data = {}
if request.is_ajax():
if request.FILES:
files = request.FILES.values()[0]
path = default_storage.save('{}{}/{}'.format(FILES_DIR,
request.user.pk,
files.name), ContentFile(files.read()))
try:
full_path = PROJECT_DIR+'/'+path
img = Image.open(full_path)
img.save(full_path, quality=IMAGE_QUALITY)
except:
pass
try:
preview_size = request.POST['preview_size']
except KeyError:
preview_size = '64'
response_data['status'] = True
response_data['imagePath'] = path
response_data['thumbnail'] = render_to_string('files_widget/includes/thumbnail.html',
{'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL,
'preview_size': preview_size})
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
response_data['status'] = False
response_data['message'] = "We're sorry, but something went wrong."
return HttpResponse(json.dumps(response_data), content_type='application/json')
def thumbnail_url(request):
if not 'img' in request.GET or not 'preview_size' in request.GET:
raise Http404
thumbnail_url = ImagePath(request.GET['img']).thumbnail(request.GET['preview_size']).url
return HttpResponse(thumbnail_url)
| {
"content_hash": "c8092bd25e01bc9cfda225498a81c5ce",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 97,
"avg_line_length": 40.36363636363637,
"alnum_prop": 0.5490990990990992,
"repo_name": "dellax/django-files-widget",
"id": "e818aa0804e1730cee423c1c21f47592d40abce4",
"size": "2220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topnotchdev/files_widget/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4299"
},
{
"name": "HTML",
"bytes": "4336"
},
{
"name": "JavaScript",
"bytes": "28138"
},
{
"name": "Python",
"bytes": "25427"
}
],
"symlink_target": ""
} |
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
data = {'number': "001", 'name': "Bulbasaur", 'attack': 50}
list_item = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("MyCollection") \
.sync_list_items(0) \
.update(data=data)
print(list_item.data)
| {
"content_hash": "e9bf5abbfd36e5ec215b48b77979d96d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 62,
"avg_line_length": 29.125,
"alnum_prop": 0.7145922746781116,
"repo_name": "teoreteetik/api-snippets",
"id": "42710f27de4a09af0ad39880e668410743ea2081",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sync/rest/lists/update-list-item/update-list-item.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
"""This file contains integration tests for the semiparametric estimation routine."""
import numpy as np
import pandas as pd
import pickle
from grmpy.estimate.estimate import fit
from grmpy.grmpy_config import TEST_RESOURCES_DIR
from grmpy.read.read import read
from grmpy.simulate.simulate import simulate
from grmpy.test.random_init import print_dict
def test_replication_carneiro():
"""
This function checks the equality of the results of
R's locpoly function and grmpy's locpoly function. The mock data set
from Carneiro et al (2011) is used and both the mte_u and the final
mte are compared.
"""
init_dict = read(TEST_RESOURCES_DIR + "/replication_semipar.yml")
init_dict["ESTIMATION"]["file"] = TEST_RESOURCES_DIR + "/aer-replication-mock.pkl"
print_dict(init_dict, TEST_RESOURCES_DIR + "/replication_semipar")
test_rslt = fit(TEST_RESOURCES_DIR + "/replication_semipar.grmpy.yml", semipar=True)
expected_mte_u = pd.read_pickle(
TEST_RESOURCES_DIR + "/replication-results-mte_u.pkl"
)
expected_mte = pd.read_pickle(TEST_RESOURCES_DIR + "/replication-results-mte.pkl")
np.testing.assert_array_almost_equal(test_rslt["mte_u"], expected_mte_u, 6)
np.testing.assert_array_almost_equal(test_rslt["mte"], expected_mte, 6)
def test_rslt_dictionary():
"""
This test checks if the elements of the estimation dictionary are equal
to their expected values when the initialization file of the
semipar tutorial is used.
"""
fname = TEST_RESOURCES_DIR + "/tutorial-semipar.grmpy.yml"
simulate(fname)
rslt = fit(fname, semipar=True)
expected_rslt = pickle.load(
open(TEST_RESOURCES_DIR + "/tutorial-semipar-results.pkl", "rb")
)
np.testing.assert_equal(rslt["quantiles"], expected_rslt["quantiles"])
np.testing.assert_almost_equal(rslt["mte"], expected_rslt["mte"], 7)
np.testing.assert_almost_equal(rslt["mte_u"], expected_rslt["mte_u"], 7)
np.testing.assert_almost_equal(rslt["mte_min"], expected_rslt["mte_min"], 5)
np.testing.assert_almost_equal(rslt["mte_max"], expected_rslt["mte_max"], 5)
np.testing.assert_almost_equal(rslt["b0"], expected_rslt["b0"], 7)
np.testing.assert_almost_equal(rslt["b1"], expected_rslt["b1"], 7)
| {
"content_hash": "5bb2e78dd67d9ea14b825da3c6501826",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 88,
"avg_line_length": 42.111111111111114,
"alnum_prop": 0.7044854881266491,
"repo_name": "grmToolbox/grmpy",
"id": "a957ba1ee0b40b5ba51ddc5db7b2abb008908b65",
"size": "2274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grmpy/test/test_integration_semipar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1222844"
}
],
"symlink_target": ""
} |
from uuid import UUID
from graphscale.pent.pent import Pent
from graphscale.grapple.grapple_utils import req_data_elem_invalid, req_data_elem_valid
class TodoUserGenerated(Pent):
@staticmethod
# This method checks to see that data coming out of the database is valid
def is_input_data_valid(data):
if not isinstance(data, dict):
return False
if req_data_elem_invalid(data, 'obj_id', UUID): # id: ID!
return False
if req_data_elem_invalid(data, 'name', str): # name: String!
return False
return True
def name(self):
return self._data['name']
async def gen_todo_items(self, after=None, first=None):
target_type = self.config().get_edge_target_type_from_name('user_to_todo_edge')
return await self.gen_associated_pents(target_type, 'user_to_todo_edge', after, first)
class TodoItemGenerated(Pent):
@staticmethod
# This method checks to see that data coming out of the database is valid
def is_input_data_valid(data):
if not isinstance(data, dict):
return False
if not req_data_elem_valid(data, 'obj_id', UUID): # id: ID!
return False
if not req_data_elem_valid(data, 'text', str): # text: String!
return False
return True
def text(self):
return self._data['text']
async def gen_user(self):
klass = self.config().get_type(1000) # type_id
return await klass.gen(self._context, self._data['user_id'])
| {
"content_hash": "a87ed9ca8457eed6309856de212f5808",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 94,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.6369593709043251,
"repo_name": "schrockntemp/graphscaletemp",
"id": "01850d01a2315e04e6d82eaddc33e2816019deb8",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphscale/examples/todo/generated/todo_pents_generated.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "184175"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SupportingDocumentTypeList(ListResource):
def __init__(self, version):
"""
Initialize the SupportingDocumentTypeList
:param Version version: Version that contains the resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeList
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeList
"""
super(SupportingDocumentTypeList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/RegulatoryCompliance/SupportingDocumentTypes'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams SupportingDocumentTypeInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists SupportingDocumentTypeInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of SupportingDocumentTypeInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SupportingDocumentTypeInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypePage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return SupportingDocumentTypePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SupportingDocumentTypeInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SupportingDocumentTypeInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SupportingDocumentTypePage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a SupportingDocumentTypeContext
:param sid: The unique string that identifies the Supporting Document Type resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeContext
"""
return SupportingDocumentTypeContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a SupportingDocumentTypeContext
:param sid: The unique string that identifies the Supporting Document Type resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeContext
"""
return SupportingDocumentTypeContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Numbers.V2.SupportingDocumentTypeList>'
class SupportingDocumentTypePage(Page):
def __init__(self, version, response, solution):
"""
Initialize the SupportingDocumentTypePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypePage
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypePage
"""
super(SupportingDocumentTypePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SupportingDocumentTypeInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeInstance
"""
return SupportingDocumentTypeInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Numbers.V2.SupportingDocumentTypePage>'
class SupportingDocumentTypeContext(InstanceContext):
def __init__(self, version, sid):
"""
Initialize the SupportingDocumentTypeContext
:param Version version: Version that contains the resource
:param sid: The unique string that identifies the Supporting Document Type resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeContext
"""
super(SupportingDocumentTypeContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/RegulatoryCompliance/SupportingDocumentTypes/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the SupportingDocumentTypeInstance
:returns: The fetched SupportingDocumentTypeInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return SupportingDocumentTypeInstance(self._version, payload, sid=self._solution['sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Numbers.V2.SupportingDocumentTypeContext {}>'.format(context)
class SupportingDocumentTypeInstance(InstanceResource):
def __init__(self, version, payload, sid=None):
"""
Initialize the SupportingDocumentTypeInstance
:returns: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeInstance
"""
super(SupportingDocumentTypeInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'friendly_name': payload.get('friendly_name'),
'machine_name': payload.get('machine_name'),
'fields': payload.get('fields'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SupportingDocumentTypeContext for this SupportingDocumentTypeInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeContext
"""
if self._context is None:
self._context = SupportingDocumentTypeContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the Supporting Document Type resource
:rtype: unicode
"""
return self._properties['sid']
@property
def friendly_name(self):
"""
:returns: A human-readable description of the Supporting Document Type resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def machine_name(self):
"""
:returns: The machine-readable description of the Supporting Document Type resource
:rtype: unicode
"""
return self._properties['machine_name']
@property
def fields(self):
"""
:returns: The required information for creating a Supporting Document
:rtype: list[dict]
"""
return self._properties['fields']
@property
def url(self):
"""
:returns: The absolute URL of the Supporting Document Type resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the SupportingDocumentTypeInstance
:returns: The fetched SupportingDocumentTypeInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.supporting_document_type.SupportingDocumentTypeInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Numbers.V2.SupportingDocumentTypeInstance {}>'.format(context)
| {
"content_hash": "51d9a95121c9f5a5171542eb08e06cf1",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 122,
"avg_line_length": 39.29392971246006,
"alnum_prop": 0.6646881860313847,
"repo_name": "twilio/twilio-python",
"id": "fad7bfdf228255cfb76d25f0d20fc604df5ed0b3",
"size": "12314",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/numbers/v2/regulatory_compliance/supporting_document_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
import sys
from .client import MantridClient
class MantridCli(object):
"""Command line interface to Mantrid"""
def __init__(self, base_url):
self.client = MantridClient(base_url)
@classmethod
def main(cls):
cli = cls("http://localhost:8042")
cli.run(sys.argv)
@property
def action_names(self):
for method_name in dir(self):
if method_name.startswith("action_") \
and method_name != "action_names":
yield method_name[7:]
def run(self, argv):
# Work out what action we're doing
try:
action = argv[1]
except IndexError:
sys.stderr.write(
"Please provide an action (%s).\n" % (
", ".join(self.action_names),
)
)
sys.exit(1)
if action not in list(self.action_names):
sys.stderr.write(
"Action %s does not exist.\n" % (
action,
)
)
sys.exit(1)
# Run it
getattr(self, "action_%s" % action)(*argv[2:])
def action_list(self):
"Lists all hosts on the LB"
format = "%-35s %-25s %-8s"
print format % ("HOST", "ACTION", "SUBDOMS")
for host, details in sorted(self.client.get_all().items()):
if details[0] in ("proxy", "mirror"):
action = "%s<%s>" % (
details[0],
",".join(
"%s:%s" % (host, port)
for host, port in details[1]['backends']
)
)
elif details[0] == "static":
action = "%s<%s>" % (
details[0],
details[1]['type'],
)
elif details[0] == "redirect":
action = "%s<%s>" % (
details[0],
details[1]['redirect_to'],
)
elif details[0] == "empty":
action = "%s<%s>" % (
details[0],
details[1]['code'],
)
else:
action = details[0]
print format % (host, action, details[2])
def action_set(self, hostname=None, action=None, subdoms=None, *args):
"Adds a hostname to the LB, or alters an existing one"
usage = "set <hostname> <action> <subdoms> [option=value, ...]"
if hostname is None:
sys.stderr.write("You must supply a hostname.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
if action is None:
sys.stderr.write("You must supply an action.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
if subdoms is None or subdoms.lower() not in ("true", "false"):
sys.stderr.write("You must supply True or False for the subdomains flag.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
# Grab options
options = {}
for arg in args:
if "=" not in arg:
sys.stderr.write("%s is not a valid option (no =)\n" % (
arg
))
sys.exit(1)
key, value = arg.split("=", 1)
options[key] = value
# Sanity-check options
if action in ("proxy, mirror") and "backends" not in options:
sys.stderr.write("The %s action requires a backends option.\n" % action)
sys.exit(1)
if action == "static" and "type" not in options:
sys.stderr.write("The %s action requires a type option.\n" % action)
sys.exit(1)
if action == "redirect" and "redirect_to" not in options:
sys.stderr.write("The %s action requires a redirect_to option.\n" % action)
sys.exit(1)
if action == "empty" and "code" not in options:
sys.stderr.write("The %s action requires a code option.\n" % action)
sys.exit(1)
# Expand some options from text to datastructure
if "backends" in options:
options['backends'] = [
(lambda x: (x[0], int(x[1])))(bit.split(":", 1))
for bit in options['backends'].split(",")
]
# Set!
self.client.set(
hostname,
[action, options, subdoms.lower() == "true"]
)
def action_delete(self, hostname):
"Deletes the hostname from the LB."
self.client.delete(
hostname,
)
def action_stats(self, hostname=None):
"Shows stats (possibly limited by hostname)"
format = "%-35s %-11s %-11s %-11s %-11s"
print format % ("HOST", "OPEN", "COMPLETED", "BYTES IN", "BYTES OUT")
for host, details in sorted(self.client.stats(hostname).items()):
print format % (
host,
details.get("open_requests", 0),
details.get("completed_requests", 0),
details.get("bytes_received", 0),
details.get("bytes_sent", 0),
)
| {
"content_hash": "fc5be68c363f3ee4e3fb4388f3eac1ea",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 88,
"avg_line_length": 36.48951048951049,
"alnum_prop": 0.4727865082407052,
"repo_name": "epio/mantrid",
"id": "f7308663c6efc7fb20c889452d367dd74dc0ad2d",
"size": "5218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mantrid/cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "56020"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
# ====================================================================
# Non-standard packages
# ====================================================================
import esbltaylor.esbltaylor as esbltaylorbuilt
# ====================================================================
# Functions
# ====================================================================
def binrot(theta, x_old, y_old):
"""Rotation by an angle alpha.
:param theta: float, angle in radians.
:param x_old: numpy array, x coodinate in the old frame.
:param y_old: numpy array, y coodinate in the old frame.
:return x_new: numpy array, x coodinate in the new frame.
:return y_new: numpy array, y coodinate in the new frame.
"""
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
x_new = x_old * cos_theta - y_old * sin_theta
y_new = x_old * sin_theta + y_old * cos_theta
return x_new, y_new
# --------------------------------------------------------------------
def lens_rotation(alpha0, s0, dalpha, ds, t, tb):
"""Compute the angle alpha and projected separation s for each
time step due to the lens orbital motion.
:param alpha0: angle alpha at date tb.
:param s0: projected separation at date tb.
:param dalpha: float, angular velocity at date tb
(radians.year^-1).
:param ds: change rate of separation (year^-1).
:param t: list of dates.
:param tb: time reference for linear development.
:type alpha0: float
:type s0: float
:type dalpha: float
:type ds: float
:type t: numpy array
:type tb: float
:return: unpacked list of actual alpha and s values at each date.
:rtype: numpy array, numpy array
"""
Cte_yr_d = 365.25 # Julian year in days
alpha = alpha0 - (t - tb) * dalpha / Cte_yr_d
s = s0 + (t-tb) * ds / Cte_yr_d
return alpha, s
# --------------------------------------------------------------------
def magnifcalc(t, param, Ds=None, tb=None, **kwargs_method):
# Compute the amplification
kwargs = dict()
kwargs.update(kwargs_method)
kwargs.update({'params': param})
kwargs.update({'dates': t})
kwargs.update({'tb': tb})
kwargs.update({'Ds': Ds})
kwargs.update({'degree': 2})
amp, flag = magnifcalc_wrap(**kwargs)
return amp
# --------------------------------------------------------------------
def magnifcalc_wrap(**kwargs):
try:
params = kwargs['params']
except KeyError:
chat = "No parameters received in magnifcalc(...) from test_import."
sys.exit(chat)
try:
t = kwargs['dates']
except KeyError:
chat = "No dates received in magnifcalc(...) from test_import."
sys.exit(chat)
try:
tb = kwargs['tb']
except KeyError:
tb = params['t0']
try:
Ds = kwargs['Ds']
except KeyError:
Ds = dict({'N' : np.zeros(len(t)), 'E' : np.zeros(len(t))})
try:
degree = kwargs['degree']
except KeyError:
degree = 0
try:
err = float(kwargs['TriggerNextMethod'.lower()])
except KeyError:
err = 1e-3
try:
ray_sigma = float(kwargs['PrecisionGoalRayshooting'.lower()])
except KeyError:
ray_sigma = 1e-2
try:
ray_rect_pix = int(kwargs['LocalMarginRayshooting'.lower()])
except KeyError:
ray_rect_pix = 1
t0 = params['t0']
u0 = params['u0']
tE = params['tE']
rho = params['rho']
gamma = params['gamma']
q = params['q']
piEN = params['piEN']
piEE = params['piEE']
alpha0 = params['alpha']
s0 = params['s']
dalpha = params['dadt']
ds = params['dsdt']
# Correction of the separation/angle due to lens orbital motion
alpha, s = lens_rotation(alpha0, s0, dalpha, ds, t, tb)
# Correction of the trajectory due to parallax
DsN = Ds['N']
DsE = Ds['E']
tau = (t-t0)/tE + piEN * DsN + piEE * DsE
beta = u0 + piEN * DsE - piEE * DsN
x, y = binrot(alpha, tau, beta)
# Center of mass to Cassan (2008)
GL1 = s * q / (1 + q)
x = x - GL1
# Compute magnification using PSBL
list = [[q, rho, gamma, degree, err, ray_sigma, ray_rect_pix], s.tolist(), x.tolist(), y.tolist()]
magnif = esbltaylorbuilt.magnifcalc(list)
return np.array(magnif[0]), np.array(magnif[1])
# ====================================================================
# Main function
# ====================================================================
if __name__=="__main__":
print 'Please use with muLAn.'
| {
"content_hash": "67b2fd5919d5b8d5b474ecadc1b54700",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 102,
"avg_line_length": 31.12925170068027,
"alnum_prop": 0.5168269230769231,
"repo_name": "muLAn-project/muLAn",
"id": "8695d0cf643c0e3c1de29a71b0a9f9c24e0e5c4f",
"size": "4831",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3",
"path": "muLAn/models/ESBLq.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "136757"
},
{
"name": "Fortran",
"bytes": "119664"
},
{
"name": "OpenEdge ABL",
"bytes": "2796505"
},
{
"name": "Python",
"bytes": "579049"
},
{
"name": "SWIG",
"bytes": "936"
}
],
"symlink_target": ""
} |
"""Paralllel ``project'' from loooong chain/trees objects
"""
# =============================================================================
__version__ = "$Revision$"
__author__ = "Vanya BELYAEV Ivan.Belyaev@itep.ru"
__date__ = "2011-06-07"
__all__ = (
'cproject' , ## parallel project from looong TChain
'tproject' , ## parallel project from looong TTree
)
# =============================================================================
from ostap.parallel.parallel import Task, WorkManager
import ostap.core.pyrouts
import ostap.trees.trees
import ostap.trees.cuts
import ROOT
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.parallel.project' )
else : logger = getLogger ( __name__ )
# =============================================================================
## The simple task object for more efficient projection of loooong chains/trees
# into histogarms
# @see GaudiMP.Parallel
# @see GaudiMP.Parallel.Task
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2014-09-23
class ProjectTask(Task) :
"""The simple task object for the efficient parallel
projection of looooooong TChains/TTrees into histograms
"""
## constructor: histogram
def __init__ ( self , histo , what , cuts = '' ) :
"""Constructor: the histogram
>>> histo = ...
>>> task = ProjectTask ( histo )
"""
self.histo = histo
self.what = what
self.cuts = str ( cuts )
self.histo.Reset()
## local initialization (executed once in parent process)
def initialize_local ( self ) :
"""Local initialization (executed once in parent process)
"""
import ROOT,ostap.core.pyrouts
self.__output = 0, self.histo.clone()
## remote initialization (executed for each sub-processs)
def initialize_remote ( self , jobid = -1 ) :
"""Remote initialization (executed for each sub-processs
"""
import ROOT,ostap.core.pyrouts
self.__output = 0, self.histo.clone()
## finalization (executed at the end at parent process)
def finalize ( self ) : pass
## the actual processing
# ``params'' is assumed to be a tuple/list :
# - the file name
# - the tree name in the file
# - the variable/expression/expression list of quantities to project
# - the selection/weighting criteria
# - the first entry in tree to process
# - number of entries to process
def process ( self , jobid , item ) :
"""The actual processing
``params'' is assumed to be a tuple-like entity:
- the file name
- the tree name in the file
- the variable/expression/expression list of quantities to project
- the selection/weighting criteria
- the first entry in tree to process
- number of entries to process
"""
from ostap.logger.utils import logWarning
with logWarning() :
import ROOT
import ostap.core.pyrouts
import ostap.trees.trees
import ostap.histos.histos
import ostap.frames.frames
from ostap.trees.trees import Chain, Tree
input = Chain ( name = item.name ,
files = item.files ,
first = item.first ,
nevents = item.nevents )
chain = input.chain
first = input.first
nevents = input.nevents
## Create the output histogram NB! (why here???)
from ostap.core.core import ROOTCWD
with ROOTCWD() :
ROOT.gROOT.cd()
histo = self.histo.Clone ()
self.__output = histo
## from ostap.trees.trees import tree_project_old
## self.__output = tree_project_old (
## tree = chain , histo = histo ,
## what = self.what , cuts = self.cuts ,
## options = '' ,
## nentries = nevents , firstentry = first )
from ostap.trees.trees import tree_project
self.__output = tree_project (
tree = chain ,
histo = histo ,
what = self.what ,
cuts = self.cuts ,
first = first ,
last = -1 if nevents < 0 else first + nevents )
return self.__output
## merge results
def merge_results ( self , result , jobid ) :
import ostap.histos.histos
if not self.__output : self.__output = result
else :
filtered = self.__output[0] + result[0]
self.__output[1].Add ( result[1] )
self.__output = filtered, self.__output[1]
## get the results
def results ( self ) :
return self.__output
# =============================================================================
## make a projection of the loooooooong chain into histogram using
# multiprocessing functionality for per-file parallelisation
# @code
# >>> chain = ... ## large chain
# >>> histo = ... ## histogram template
# >>> project ( chain , histo , 'mass' , 'pt>10' )
# >>> chain.pproject ( histo , 'mass' , 'pt>0' ) ## ditto
# >>> chain.cproject ( histo , 'mass' , 'pt>0' ) ## ditto
# @endcode
# For 12-core machine, clear speedup factor of about 8 is achieved
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2014-09-23
def cproject ( chain ,
histo ,
what ,
cuts = '' ,
nentries = -1 ,
first = 0 ,
chunk_size = -1 ,
max_files = 5 ,
silent = False , **kwargs ) :
"""Make a projection of the loooong chain into histogram
>>> chain = ... ## large chain
>>> histo = ... ## histogram template
>>> cproject ( chain , histo , 'mass' , 'pt>10' )
>>> chain.ppropject ( histo , 'mass' , 'pt>0' ) ## ditto
>>> chain.cpropject ( histo , 'mass' , 'pt>0' ) ## ditto
For 12-core machine, clear speedup factor of about 8 is achieved
"""
#
from ostap.trees.trees import Chain
ch = Chain ( chain , first = first , nevents = nentries )
task = ProjectTask ( histo , what , cuts )
wmgr = WorkManager ( silent = silent , **kwargs )
wmgr.process ( task , ch.split ( chunk_size = chunk_size , max_files = max_files ) )
## unpack results
_f , _h = task.results ()
filtered = _f
histo += _h
del _h
return filtered , histo
ROOT.TChain.cproject = cproject
ROOT.TChain.pproject = cproject
# =============================================================================
## make a projection of the loooooooong tree into histogram using
# multiprocessing functionality for per-file parallelisation
# @code
#
# >>> tree = ... ## large tree
# >>> histo = ... ## histogram template
# >>> tproject ( tree , histo , 'mass' , 'pt>10' , maxentries = 1000000 )
# >>> tree.pproject ( histo , 'mass' , 'pt>10' ) ## ditto
# @endcode
# - significant gain can be achieved for very large ttrees with complicated expressions and cuts
# - <code>maxentries</code> parameter should be rather large
# @param tree the tree
# @param histo the histogram
# @param what variable/expression/varlist to be projected
# @param cuts selection/weighting criteria
# @param nentries number of entries to process (>0: all entries in th tree)
# @param first the first entry to process
# @param maxentries chunk size for parallel processing
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2014-09-23
def tproject ( tree , ## the tree
histo , ## histogram
what , ## variable/expression/list to be projected
cuts = '' , ## selection/weighting criteria
nentries = -1 , ## number of entries
first = 0 , ## the first entry
chunk_size = 1000000 , ## chunk size
max_files = 50 , ## not-used ....
silent = False , **kwargs ) : ## silent processing
"""Make a projection of the loooong tree into histogram
>>> tree = ... ## large chain
>>> histo = ... ## histogram template
>>> tproject ( tree , histo , 'mass' , 'pt>10' )
>>> tree.pproject ( histo , 'mass' , 'pt>10' ) ## ditto
- significant gain can be achieved for very large TTrees with complicated expressions and cuts
- maxentries parameter should be rather large
Arguments:
- tree the tree
- histo the histogram
- what variable/expression/varlist to be projected
- cuts selection/weighting criteria
- nentries number of entries to process (>0: all entries in th tree)
- first the first entry to process
- maxentries chunk size for parallel processing
"""
from ostap.trees.trees import Tree
ch = Tree ( tree , first = first , nevents = nentries )
task = ProjectTask ( histo , what , cuts )
wmgr = WorkManager ( silent = silent , **kwargs )
wmgr.process ( task, ch.split ( chunk_size = chunk_size ) )
## unpack results
_h = task.results ()
histo += _h
del _h
return histo
ROOT.TTree.tproject = tproject
ROOT.TTree.pproject = tproject
# =============================================================================
_decorated_classes_ = (
ROOT.TTree ,
ROOT.TChain ,
)
_new_methods_ = (
ROOT.TTree .tproject ,
ROOT.TTree .pproject ,
ROOT.TChain.cproject ,
ROOT.TChain.pproject ,
)
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
# The END
# =============================================================================
| {
"content_hash": "5c3e4030d901055537e51eecea1e0439",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 98,
"avg_line_length": 39.02888086642599,
"alnum_prop": 0.4969938026084543,
"repo_name": "OstapHEP/ostap",
"id": "2dd71b2c17c9b438ccd00d664530de402fcb4576",
"size": "11188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ostap/parallel/parallel_project.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41595313"
},
{
"name": "C++",
"bytes": "7480608"
},
{
"name": "CMake",
"bytes": "43634"
},
{
"name": "Dockerfile",
"bytes": "1028"
},
{
"name": "Python",
"bytes": "6658186"
},
{
"name": "Shell",
"bytes": "10365"
}
],
"symlink_target": ""
} |
import tensorflow as tf
def read_image_op(filename_queue, reader, height, width):
# 画像読み込みのオペレーションです。
# TensorFlowのチュートリアルでもよく見かけるやつ。
_, raw = reader.read(filename_queue)
read_image = tf.image.decode_jpeg(raw, channels=3)
read_image = tf.to_float(read_image) / 255.
read_image = tf.image.resize_images(read_image, [height, width])
read_image = tf.image.random_flip_left_right(read_image)
return read_image
| {
"content_hash": "bacb2ab28db776ce14415a2b354529c1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 33.92307692307692,
"alnum_prop": 0.7006802721088435,
"repo_name": "YusukeSuzuki/tensorflow_minimum_template",
"id": "b6b54d201e8ea938493752d323b9a52152a7f7ee",
"size": "513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_autoencoder/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19862"
},
{
"name": "Shell",
"bytes": "770"
}
],
"symlink_target": ""
} |
__all__ = ['labels_to_matrix']
import numpy as np
from ..core import ants_image as iio
def labels_to_matrix(image, mask, target_labels=None, missing_val=np.nan):
"""
Convert a labeled image to an n x m binary matrix where n = number of voxels
and m = number of labels. Only includes values inside the provided mask while
including background ( image == 0 ) for consistency with timeseries2matrix and
other image to matrix operations.
ANTsR function: `labels2matrix`
Arguments
---------
image : ANTsImage
input label image
mask : ANTsImage
defines domain of interest
target_labels : list/tuple
defines target regions to be returned. if the target label does not exist
in the input label image, then the matrix will contain a constant value
of missing_val (default None) in that row.
missing_val : scalar
value to use for missing label values
Returns
-------
ndarray
Example
-------
>>> import ants
>>> fi = ants.image_read(ants.get_ants_data('r16')).resample_image((60,60),1,0)
>>> mask = ants.get_mask(fi)
>>> labs = ants.kmeans_segmentation(fi,3)['segmentation']
>>> labmat = ants.labels_to_matrix(labs, mask)
"""
if (not isinstance(image, iio.ANTsImage)) or (not isinstance(mask, iio.ANTsImage)):
raise ValueError('image and mask must be ANTsImage types')
vec = image[mask > 0]
if target_labels is not None:
the_labels = target_labels
else:
the_labels = np.sort(np.unique(vec))
n_labels = len(the_labels)
labels = np.zeros((n_labels, len(vec)))
for i in range(n_labels):
lab = float(the_labels[i])
filler = (vec == lab).astype('float')
if np.sum(filler) == 0:
filler = np.asarray([np.nan]*len(vec))
labels[i,:] = filler
return labels
| {
"content_hash": "ffc7eacd135c9c00d302d3c630b4b089",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 87,
"avg_line_length": 29.71875,
"alnum_prop": 0.6235541535226078,
"repo_name": "ANTsX/ANTsPy",
"id": "4744a2cf4e94091c97e956b85ef29793a00e62c7",
"size": "1903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ants/utils/labels_to_matrix.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7463"
},
{
"name": "C",
"bytes": "922"
},
{
"name": "C++",
"bytes": "346752"
},
{
"name": "CMake",
"bytes": "12019"
},
{
"name": "Dockerfile",
"bytes": "1013"
},
{
"name": "Jupyter Notebook",
"bytes": "689688"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "750781"
},
{
"name": "Shell",
"bytes": "16720"
}
],
"symlink_target": ""
} |
class MockedClass:
def __init__(self):
pass
@classmethod
def mocked_class_method(a, b, c, d=10, *args, **kwargs):
"""This is a mocked class method"""
pass
def mocked_instance_method(self, a, b, c, d=10, *args, **kwargs):
"""This is a mocked instance method"""
pass
def mocked_builtin_function(a, b, c):
"""This is a mocked builtin function"""
pass
| {
"content_hash": "6969b27ae6256e40b6437db2294797cd",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 23.166666666666668,
"alnum_prop": 0.5731414868105515,
"repo_name": "tohyongcheng/pipspect",
"id": "32816af217922f7c5d0636eeef43008a80016474",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/mock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5019"
}
],
"symlink_target": ""
} |
"""ask URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('qa.urls')),
]
| {
"content_hash": "1fc4745d851a68fd72e378777f8fab98",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 36.54545454545455,
"alnum_prop": 0.6902985074626866,
"repo_name": "bugness/learning-django",
"id": "ce62c33e9f54c2fcb61103171750a5ad2fc5b6e0",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ask/ask/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10297"
},
{
"name": "Shell",
"bytes": "410"
}
],
"symlink_target": ""
} |
import http.server
import http.client
import json
import socketserver
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
OPENFDA_API_URL="api.fda.gov"
OPENFDA_API_EVENT= "/drug/event.json"
def get_main_page(self):
html="""
<html>
<head>
<title>OpenFDA Cool App</title>
</head>
<body>
<h1>OpenFDA Client</h1>
<form method="get" action="listDrugs">
<input type="submit" value="Medicinal Product"></input>
limit:
<input type="text" name="limit">
</input>
</form>
<form method="get" action="searchDrug">
<input type="text" name="drug"></input>
<input type="submit" value="Company Search by Drug">
</input>
</form>
<form method="get" action="listCompanies">
<input type="submit" value="Companies"></input>
limit:
<input type="text" name="limit">
</input>
</form>
<form method="get" action="searchCompany">
<input type="text" name="company"></input>
<input type="submit" value="Drug Search by Company">
</input>
</form>
<form method="get" action="listGender">
<input type="submit" value="patiensex">
<input type="text" name="limit"></input>
</input>
</form>
</body>
</html>
"""
return html
def limit(self):
url=self.path
url1=url.split("=")
limit=url1[1]
if limit=='':
limit=10
return limit
#LISTA DE MEDICAMENTOS
def read_data(self):
limit=self.limit()
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL) #se pone self porque la constante OPENFDA_API_URL esta definida dentro de la clase
conn.request("GET",self.OPENFDA_API_EVENT +"?limit="+str(limit))
r1 = conn.getresponse()
print(r1.status, r1.reason)
#200 OK
data1 = r1.read() # This will return entire content.
data=data1.decode("utf8") #Para decodifiar y que quede en string y poder leerlo en la terminal
return data
def get_event(self):
data=self.read_data()
event=json.loads(data)
results=event["results"]
return results
def get_med_list(self, results):
lista=[]
for event in results:
patient=event["patient"]
drug=patient["drug"]
medicinal_product=drug[0]["medicinalproduct"]
lista+=[medicinal_product]
return lista
def get_medicinal_product(self,lista):
s=''
for med in lista:
s+="<li>"+med+"</li>" # li para añadir un trozo
html2="""
<html>
<head>
<title>Medicinal Product</title>
</head>
<body>
<h1>Medicinal Product</h1>
<ul>
%s
</ul>
</body>
</html>""" %(s)
return html2
#LISTA DE COMPAÑIAS
def get_company_list(self, results):
listac=[]
for event in results:
listac.append(event["companynumb"])
return listac
def get_companies(self,listac):
s=''
for med in listac:
s+="<li>"+med+"</li>" # li para añadir un trozo
html3="""
<html>
<head>
<title>Companies</title>
</head>
<body>
<h1>Companies</h1>
<ul>
%s
</ul>
</body>
</html>""" %(s)
return html3
#BUSCAR COMPANIA POR MEDICAMENTO
def search_drug(self):
url=self.path
url1=url.split("=")
drug=url1[1]
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET",self.OPENFDA_API_EVENT +"?limit=10"+'&search=patient.drug.medicinalproduct:'+drug)
r1 = conn.getresponse()
data1 = r1.read()
data=data1.decode("utf8")
return data
def get_companies_from_events (self, data):
data=self.search_drug()
events=data
event=json.loads(events)
results=event["results"]
companies=[]
for event in results:
companies.append(event["companynumb"])
return companies
def get_companies_names(self, companies):
s=''
for drug in companies:
s+="<li>"+drug+"</li>"
html4="""
<html>
<head>
<title> companies </title>
</head>
<body>
<h1>Companies names</h1>
<ul>
%s
</ul>
</body>
</html3>""" %(s)
return html4
#BUSCAR MEDICAMENTO A PARTIR DE COMPANIA
def search_company(self):
url=self.path
url1=url.split("=")
company=url1[1]
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET",self.OPENFDA_API_EVENT +"?limit=10"+'&search=patient.drug.medicinalproduct:'+company)
r1 = conn.getresponse()
data1=r1.read()
datac=data1.decode("utf8")
return datac
def get_drug_from_events(self,datac):
datac=self.search_company()
events=datac
event=json.loads(events)
results=event["results"]
drugs=[]
for event in results:
patient=event["patient"]
drug=patient["drug"]
medicinal_product=drug[0]["medicinalproduct"]
drugs.append(medicinal_product)
return drugs
def get_drug_names(self,drugs):
s=''
for drug in drugs:
s+="<li>"+drug+"</li>"
html5="""
<html>
<head>
<title> drugs </title>
</head>
<body>
<h1>Drugs names</h1>
<ul>
%s
</ul>
</body>
</html5>""" %(s)
return html5
#SEXO DEL PACIENTE
def patientsex(self,results):
patient_list=[]
for event in results:
patient=event["patient"]
patient_sex=patient["patientsex"]
patient_list+=patient_sex
return patient_list
def get_patient_sex(self, patient_list):
s=''
for sex in patient_list:
s+="<li>"+sex+"</li>"
html6="""
<html>
<head>
<title>listGender</title>
</head>
<body>
<h1>Patient Sex</h1>
<ul>
%s
</ul>
</body>
</html>"""%(s)
return html6
def do_GET(self):
main_page=False
is_drug=False
is_companies=False
is_search_companies=False
is_search_drugs=False
is_patient_sex=False
if self.path=="/":
main_page=True
elif "/listDrugs?" in self.path:
is_drug=True
elif "/listCompanies?" in self.path:
is_companies=True
elif "/searchDrug" in self.path:
is_search_companies=True
elif "/searchCompany" in self.path:
is_search_drugs=True
elif "/listGender?" in self.path:
is_patient_sex=True
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','text/html')
self.end_headers()
#event=json.loads(data)
if main_page:
html=self.get_main_page()
self.wfile.write(bytes(html, "utf8"))
elif is_drug:
limit=self.limit()
results=self.get_event()
lista=self.get_med_list(results)
html2=self.get_medicinal_product(lista)
self.wfile.write(bytes(html2, "utf8"))
elif is_search_companies:
data=self.search_drug()
companies=self.get_companies_from_events(data)
html4=self.get_companies_names(companies)
self.wfile.write(bytes(html4, "utf8"))
elif is_companies:
limit=self.limit()
results=self.get_event()
listac=self.get_company_list(results)
html3=self.get_companies(listac)
self.wfile.write(bytes(html3, "utf8"))
elif is_search_drugs:
datac=self.search_company()
drugs=self.get_drug_from_events(datac)
html5=self.get_drug_names(drugs)
self.wfile.write(bytes(html5, "utf8"))
elif is_patient_sex:
results=self.get_event()
patient_list=self.patientsex(results)
html6=self.get_patient_sex(patient_list)
self.wfile.write(bytes(html6, "utf8"))
| {
"content_hash": "99b6cee2e9ce97c56ddc5d04ffe16876",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 148,
"avg_line_length": 30.548494983277592,
"alnum_prop": 0.495511276549157,
"repo_name": "cgomezh/openfda",
"id": "12cb6d50b3ac7ed171d800fc8725f3700b609d36",
"size": "10220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11544"
}
],
"symlink_target": ""
} |
from typing import Any, List, TYPE_CHECKING
import importlib
import urllib.parse
from ._recovery_services_backup_client import RecoveryServicesBackupClient as RecoveryServicesBackupClientGenerated
from azure.core.pipeline.policies import SansIOHTTPPolicy
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
class RemoveDuplicateParamsPolicy(SansIOHTTPPolicy):
def __init__(self, duplicate_param_names):
# type: (List[str]) -> None
self.duplicate_param_names = duplicate_param_names
def on_request(self, request):
parsed_url = urllib.parse.urlparse(request.http_request.url)
query_params = urllib.parse.parse_qs(parsed_url.query)
filtered_query_params = {k: v[-1:] if k in self.duplicate_param_names else v for k, v in query_params.items()}
request.http_request.url = request.http_request.url.replace(parsed_url.query, "") + urllib.parse.urlencode(
filtered_query_params, doseq=True
)
return super().on_request(request)
DUPLICATE_PARAMS_POLICY = RemoveDuplicateParamsPolicy(duplicate_param_names=["$filter", "$skiptoken", "api-version"])
class RecoveryServicesBackupClient(RecoveryServicesBackupClientGenerated):
__doc__ = RecoveryServicesBackupClientGenerated.__doc__
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
per_call_policies = kwargs.pop("per_call_policies", [])
try:
per_call_policies.append(DUPLICATE_PARAMS_POLICY)
except AttributeError:
per_call_policies = [per_call_policies, DUPLICATE_PARAMS_POLICY]
super().__init__(
credential=credential,
subscription_id=subscription_id,
base_url=base_url,
per_call_policies=per_call_policies,
**kwargs
)
# This file is used for handwritten extensions to the generated code. Example:
# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md
def patch_sdk():
curr_package = importlib.import_module("azure.mgmt.recoveryservicesbackup.activestamp")
curr_package.RecoveryServicesBackupClient = RecoveryServicesBackupClient
| {
"content_hash": "2c53fa71f3a89a9776a329a65b9677e7",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 118,
"avg_line_length": 40.64912280701754,
"alnum_prop": 0.6952956409149763,
"repo_name": "Azure/azure-sdk-for-python",
"id": "41fd81d5f41ec3da1005149950fe267d70cca666",
"size": "3635",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/_patch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
# Added Fortran compiler support to config. Currently useful only for
# try_compile call. try_run works but is untested for most of Fortran
# compilers (they must define linker_exe first).
# Pearu Peterson
from __future__ import division, absolute_import, print_function
import os, signal
import warnings
import sys
from distutils.command.config import config as old_config
from distutils.command.config import LANG_EXT
from distutils import log
from distutils.file_util import copy_file
from distutils.ccompiler import CompileError, LinkError
import distutils
from numpy.distutils.exec_command import exec_command
from numpy.distutils.mingw32ccompiler import generate_manifest
from numpy.distutils.command.autodist import check_inline, check_compiler_gcc4
from numpy.distutils.compat import get_exception
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
class config(old_config):
old_config.user_options += [
('fcompiler=', None, "specify the Fortran compiler type"),
]
def initialize_options(self):
self.fcompiler = None
old_config.initialize_options(self)
def try_run(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None, lang="c"):
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
"Usage of try_run is deprecated: please do not \n" \
"use it anymore, and avoid configuration checks \n" \
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning)
return old_config.try_run(self, body, headers, include_dirs, libraries,
library_dirs, lang)
def _check_compiler (self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if sys.platform == 'win32' and self.compiler.compiler_type == 'msvc':
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
# initialize call query_vcvarsall, which throws an IOError, and
# causes an error along the way without much information. We try to
# catch it here, hoping it is early enough, and print an helpful
# message instead of Error: None.
if not self.compiler.initialized:
try:
self.compiler.initialize()
except IOError:
e = get_exception()
msg = """\
Could not initialize compiler instance: do you have Visual Studio
installed ? If you are trying to build with mingw, please use python setup.py
build -c mingw32 instead ). If you have Visual Studio installed, check it is
correctly installed, and the right version (VS 2008 for python 2.6, VS 2003 for
2.5, etc...). Original exception was: %s, and the Compiler
class was %s
============================================================================""" \
% (e, self.compiler.__class__.__name__)
print ("""\
============================================================================""")
raise distutils.errors.DistutilsPlatformError(msg)
if not isinstance(self.fcompiler, FCompiler):
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
dry_run=self.dry_run, force=1,
c_compiler=self.compiler)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
def _wrap_method(self, mth, lang, args):
from distutils.ccompiler import CompileError
from distutils.errors import DistutilsExecError
save_compiler = self.compiler
if lang in ['f77', 'f90']:
self.compiler = self.fcompiler
try:
ret = mth(*((self,)+args))
except (DistutilsExecError, CompileError):
msg = str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
return ret
def _compile (self, body, headers, include_dirs, lang):
return self._wrap_method(old_config._compile, lang,
(body, headers, include_dirs, lang))
def _link (self, body,
headers, include_dirs,
libraries, library_dirs, lang):
if self.compiler.compiler_type=='msvc':
libraries = (libraries or [])[:]
library_dirs = (library_dirs or [])[:]
if lang in ['f77', 'f90']:
lang = 'c' # always use system linker when using MSVC compiler
if self.fcompiler:
for d in self.fcompiler.library_dirs or []:
# correct path when compiling in Cygwin but with
# normal Win Python
if d.startswith('/usr/lib'):
s, o = exec_command(['cygpath', '-w', d],
use_tee=False)
if not s: d = o
library_dirs.append(d)
for libname in self.fcompiler.libraries or []:
if libname not in libraries:
libraries.append(libname)
for libname in libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(libdir, '%s.lib' % (libname))
copy_file(libfile, libfile2)
self.temp_files.append(libfile2)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s' \
% (libname, library_dirs))
elif self.compiler.compiler_type == 'mingw32':
generate_manifest(self)
return self._wrap_method(old_config._link, lang,
(body, headers, include_dirs,
libraries, library_dirs, lang))
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
self._check_compiler()
return self.try_compile(
"/* we need a dummy line to make distutils happy */",
[header], include_dirs)
def check_decl(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main()
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}""" % (symbol, symbol)
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main()
{
#if %s
#else
#error false or undefined macro
#endif
;
return 0;
}""" % (symbol,)
return self.try_compile(body, headers, include_dirs)
def check_type(self, type_name, headers=None, include_dirs=None,
library_dirs=None):
"""Check type availability. Return True if the type can be compiled,
False otherwise"""
self._check_compiler()
# First check the type can be compiled
body = r"""
int main() {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % {'name': type_name}
st = False
try:
try:
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
st = True
except distutils.errors.CompileError:
st = False
finally:
self._clean()
return st
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
"""Check size of a given type."""
self._check_compiler()
# First check the type can be compiled
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
test_array [0] = 0
;
return 0;
}
"""
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
self._clean()
if expected:
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
for size in expected:
try:
self._compile(body % {'type': type_name, 'size': size},
headers, include_dirs, 'c')
self._clean()
return size
except CompileError:
pass
# this fails to *compile* if size > sizeof(type)
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
# The principle is simple: we first find low and high bounds of size
# for the type, where low/high are looked up on a log scale. Then, we
# do a binary search to find the exact size between low and high
low = 0
mid = 0
while True:
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
break
except CompileError:
#log.info("failure to test for bound %d" % mid)
low = mid + 1
mid = 2 * mid + 1
high = mid
# Binary search:
while low != high:
mid = (high - low) // 2 + low
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
high = mid
except CompileError:
low = mid + 1
return low
def check_func(self, func,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
# clean up distutils's config a bit: add void to main(), and
# return a value.
self._check_compiler()
body = []
if decl:
if type(decl) == str:
body.append(decl)
else:
body.append("int %s (void);" % func)
# Handle MSVC intrinsics: force MS compiler to make a function call.
# Useful to test for some functions when built with optimization on, to
# avoid build error because the intrinsic and our 'fake' test
# declaration do not match.
body.append("#ifdef _MSC_VER")
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
if call_args is None:
call_args = ''
body.append(" %s(%s);" % (func, call_args))
else:
body.append(" %s;" % func)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_funcs_once(self, funcs,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
"""Check a list of functions at once.
This is useful to speed up things, since all the functions in the funcs
list will be put in one compilation unit.
Arguments
---------
funcs : seq
list of functions to test
include_dirs : seq
list of header paths
libraries : seq
list of libraries to link the code snippet to
libraru_dirs : seq
list of library paths
decl : dict
for every (key, value), the declaration in the value will be
used for function in key. If a function is not in the
dictionay, no declaration will be used.
call : dict
for every item (f, value), if the value is True, a call will be
done to the function f.
"""
self._check_compiler()
body = []
if decl:
for f, v in decl.items():
if v:
body.append("int %s (void);" % f)
# Handle MS intrinsics. See check_func for more info.
body.append("#ifdef _MSC_VER")
for func in funcs:
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
for f in funcs:
if f in call and call[f]:
if not (call_args and f in call_args and call_args[f]):
args = ''
else:
args = call_args[f]
body.append(" %s(%s);" % (f, args))
else:
body.append(" %s;" % f)
else:
for f in funcs:
body.append(" %s;" % f)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_inline(self):
"""Return the inline keyword recognized by the compiler, empty string
otherwise."""
return check_inline(self)
def check_compiler_gcc4(self):
"""Return True if the C compiler is gcc >= 4."""
return check_compiler_gcc4(self)
def get_output(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c", use_tee=None):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Returns the exit status code
of the program and its output.
"""
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
"Usage of get_output is deprecated: please do not \n" \
"use it anymore, and avoid configuration checks \n" \
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning)
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
exitcode, output = 255, ''
try:
grabber = GrabStdout()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
grabber.restore()
except:
output = grabber.data
grabber.restore()
raise
exe = os.path.join('.', exe)
exitstatus, output = exec_command(exe, execute_in='.',
use_tee=use_tee)
if hasattr(os, 'WEXITSTATUS'):
exitcode = os.WEXITSTATUS(exitstatus)
if os.WIFSIGNALED(exitstatus):
sig = os.WTERMSIG(exitstatus)
log.error('subprocess exited with signal %d' % (sig,))
if sig == signal.SIGINT:
# control-C
raise KeyboardInterrupt
else:
exitcode = exitstatus
log.info("success!")
except (CompileError, LinkError):
log.info("failure.")
self._clean()
return exitcode, output
class GrabStdout(object):
def __init__(self):
self.sys_stdout = sys.stdout
self.data = ''
sys.stdout = self
def write (self, data):
self.sys_stdout.write(data)
self.data += data
def flush (self):
self.sys_stdout.flush()
def restore(self):
sys.stdout = self.sys_stdout
| {
"content_hash": "247643c7832b682a07c3c97c4711c17d",
"timestamp": "",
"source": "github",
"line_count": 465,
"max_line_length": 108,
"avg_line_length": 37.20860215053764,
"alnum_prop": 0.5105768119292567,
"repo_name": "techtonik/numpy",
"id": "0086e36328ca6de6535957f617265cec6802f4b4",
"size": "17302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/distutils/command/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import sys
import os
__author__ = 'João Trevizoli Esteves'
class ErrorInfo(object):
def __init__(self):
self.exc_type, self.exc_obj, self.exc_tb = sys.exc_info()
self.file_name = self.get_file_name()
self.line_number = self.get_line_number()
def get_line_number(self):
return self.exc_tb.tb_lineno
def get_file_name(self):
fname = os.path.split(self.exc_tb.tb_frame.f_code.co_filename)[1]
return fname
class StatusError(Exception):
pass
class BadXpath(Exception):
pass
class BadUrl(Exception):
pass
| {
"content_hash": "eb8506cc64a8002f6dcf41f0210fdcac",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 73,
"avg_line_length": 19.4,
"alnum_prop": 0.6374570446735395,
"repo_name": "joaoTrevizoli/climatemps-data",
"id": "ce6304a50dbe1d507878624a8b19b94272501880",
"size": "583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spider/botsExceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19576"
}
],
"symlink_target": ""
} |
def test(verbose=False):
import os, nose
from . import tests
directory = os.path.dirname(tests.__file__)
argv = ['nosetests', '--exe', directory]
try:
return nose.main(argv=argv)
except SystemExit as e:
return e.code
| {
"content_hash": "091d568bfc6289c62e0dbb658422c9e6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 47,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6038461538461538,
"repo_name": "tmerr/trevornet",
"id": "7734daec827d54497ba1445959dba1afa060fe53",
"size": "260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trevornet/trevornet/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35961"
}
],
"symlink_target": ""
} |
from flask import Flask, g
from proxypool.storages.redis import RedisClient
from proxypool.setting import API_HOST, API_PORT, API_THREADED, IS_DEV
__all__ = ['app']
app = Flask(__name__)
if IS_DEV:
app.debug = True
def get_conn():
"""
get redis client object
:return:
"""
if not hasattr(g, 'redis'):
g.redis = RedisClient()
return g.redis
@app.route('/')
def index():
"""
get home page, you can define your own templates
:return:
"""
return '<h2>Welcome to Proxy Pool System</h2>'
@app.route('/random')
def get_proxy():
"""
get a random proxy
:return: get a random proxy
"""
conn = get_conn()
return conn.random().string()
@app.route('/all')
def get_proxy_all():
"""
get a random proxy
:return: get a random proxy
"""
conn = get_conn()
proxies = conn.all()
proxies_string = ''
if proxies:
for proxy in proxies:
proxies_string += str(proxy) + '\n'
return proxies_string
@app.route('/count')
def get_count():
"""
get the count of proxies
:return: count, int
"""
conn = get_conn()
return str(conn.count())
if __name__ == '__main__':
app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)
| {
"content_hash": "d36f79699e1b1b9b4c1c806c735d495f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 70,
"avg_line_length": 18.420289855072465,
"alnum_prop": 0.5814319433516916,
"repo_name": "Python3WebSpider/ProxyPool",
"id": "f7138c64854f49ece2c496eff5f4166bc32c8367",
"size": "1271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proxypool/processors/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "666"
},
{
"name": "Mustache",
"bytes": "1507"
},
{
"name": "Python",
"bytes": "41330"
},
{
"name": "Shell",
"bytes": "82"
}
],
"symlink_target": ""
} |
"""Provides basic modules of Rime.
Basic package contains implementations of interfaces provided in core package,
providing standard commands/targets like build/test of problem/solution etc.
"""
| {
"content_hash": "ade7ec1bfddb515f92cdacb798f32d8a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 78,
"avg_line_length": 39.2,
"alnum_prop": 0.8112244897959183,
"repo_name": "AI-comp/Orientation2015Problems",
"id": "6299f76d55a49a062067628d314c358990cf77b6",
"size": "1311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rime/basic/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8322"
},
{
"name": "C++",
"bytes": "20831"
},
{
"name": "Java",
"bytes": "41886"
},
{
"name": "Python",
"bytes": "195151"
},
{
"name": "Ruby",
"bytes": "119"
}
],
"symlink_target": ""
} |
import subprocess as sp
# Motivation:
# It is often useful to have Python parse the output of a shell command.
# The subprocess.call() and subprocess.check_output() functions only
# return the status of the command that they execute.
#
# Consider the following command:
# $ grep -r foo *
#
# It could be useful to parse the output and perform some operation on
# the files that contain 'foo'. The call() and check_output() functions
# will not give the Python application access to those names. The
# subprocess.Popen() function can be used to get the names.
def call_sp():
"""
This function shells out to a canned shell command. It demonstrates
how to parse the output.
"""
# The command can be a regular string. It does not need to be
# a list (like call() or check_output())
command = "grep __main__ *"
# Popen arguments 'shell' and 'stdout' should be set otherwise
# exceptions will be thrown
proc = sp.Popen(command, shell=True, stdout=sp.PIPE)
# Note that readlines() is called on the stdout member. Using
# read() or readline() will give a list of characters.
# Ex.
# <line data> "Hello World!"
#
# read() - ['H','e','l','l','o',' ','W','o','r','l','d','!']
# readline() - ['H','e','l','l','o',' ','W','o','r','l','d','!']
# readlines() - 'Hello World!'
output = proc.stdout.readline()
for line in output:
print line.strip()
if __name__ == "__main__":
call_sp()
| {
"content_hash": "ce75d721212c27a64a85936907173777",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 72,
"avg_line_length": 32.59090909090909,
"alnum_prop": 0.6471408647140865,
"repo_name": "civissmith/python_examples",
"id": "1f517fb8c8e71e436d21f61feb5c9c4cb6e049bc",
"size": "1779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parse_sp.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41039"
}
],
"symlink_target": ""
} |
"""Process flow
The core process flow for managing the syncing of torrents to remote location.
"""
import logging
from seedbox import db
from seedbox.process import manager
from seedbox.process import workflow
from seedbox.torrent import loader
LOG = logging.getLogger(__name__)
def _get_work(dbapi):
# search through the directory of torrents and load any
# new ones into the cache.
loader.load_torrents(dbapi)
flows = []
# now retrieve any torrents that are eligible for processing
# and kick off the workflows.
for tor in dbapi.get_torrents_active():
# need to make sure there are actually media files that
# were parsed for the torrent.
LOG.debug('torrent %s media files: %s',
tor.torrent_id, tor.media_files)
if tor.media_files:
LOG.debug('creating workflow for torrent: %s', tor)
flows.append(workflow.Workflow(dbapi, tor))
return flows
def start():
"""The primary entry point for the process"""
dbapi = db.dbapi()
mgr = manager.TaskManager()
flows = []
try:
while True:
# if no flows which should happen on initial run
# or after processing all the previously found
# torrents.
if not flows:
# attempt to load the torrents and generate
# workflows for each active torrent
flows = _get_work(dbapi)
# if still no torrents break out
if not flows:
break
# for each flow get the next list of tasks to process
for wf in flows:
mgr.add_tasks(list(wf.next_tasks()))
# now execute the via TaskManager
results = mgr.run()
for item in results:
LOG.debug('saving media: %s', item)
# the results should be the updated media so save it
dbapi.save_media(item)
# for each flow execute it, if wf is done then remove it
# from the list.
for wf in flows:
if wf.run():
flows.remove(wf)
finally:
mgr.shutdown()
dbapi.clean_up()
| {
"content_hash": "197afc7735ec7867cc662e398f0ef03f",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 30.094594594594593,
"alnum_prop": 0.5792546026044005,
"repo_name": "shad7/seedbox",
"id": "1039eb279df0939dc4d6de6a41a6466967270002",
"size": "2227",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "seedbox/process/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "210321"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect, get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
from documents.models import Document
from projects.models import Project
from ...forms.invite_user import InviteUserForm, EmailForm
User = get_user_model()
class InviteUserView(TemplateView):
template_name = 'documents/invite_user.html'
def get_context_data(self, **kwargs):
return {
'form': self.form,
'email_form': self.email_form,
'project': self.project,
'organization': self.project.organization,
'document': self.document
}
@method_decorator(login_required)
def dispatch(self, request, project_pk, document_pk, *args, **kwargs):
self.project = get_object_or_404(Project.objects.active(), pk=project_pk)
self.document = get_object_or_404(
Document.objects.active().prefetch_related(
'documentversion_set',
'documentversion_set__user'
),
pk=document_pk
)
if not self.document.can_invite(self.request.user):
raise PermissionDenied(_('You do not have permissions to invite users to this document.'))
self.email_form = EmailForm(request.POST or None)
self.form = InviteUserForm(
request.POST or None,
manager=self.document.can_manage(self.request.user)
)
return super(InviteUserView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if self.email_form.is_valid() and self.form.is_valid():
try:
user = User.objects.get(email=self.email_form.cleaned_data['email'])
except User.DoesNotExist:
user = self.form.save(commit=False)
user.email = self.email_form.cleaned_data['email']
user.username = user.email
user.save()
if self.form.cleaned_data['user_type'] == 'manager':
self.document.add_manage(user)
elif self.form.cleaned_data['user_type'] == 'collaborator':
self.project.add_invite(user)
else:
self.document.add_create(user)
user.send_invite(settings.EMAIL_APP, 'email/invite_document', _('Invitation to collaborate'), user)
messages.success(request, _('The invitation has been sent.'))
return redirect(
reverse(
'projects:document_invite_user',
args=[self.project.pk, self.document.pk]
)
)
return self.render_to_response(self.get_context_data())
| {
"content_hash": "532630208e222bd606271ed7a1af0c22",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 111,
"avg_line_length": 39.58441558441559,
"alnum_prop": 0.6253280839895013,
"repo_name": "bgroff/kala-app",
"id": "75ab611f3ca6ce8563bbbea958132b82267460f0",
"size": "3048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_kala/projects/views/documents/invite_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5056002"
},
{
"name": "HTML",
"bytes": "176522"
},
{
"name": "JavaScript",
"bytes": "5567753"
},
{
"name": "Python",
"bytes": "360082"
},
{
"name": "Ruby",
"bytes": "540"
},
{
"name": "Shell",
"bytes": "9605"
},
{
"name": "TypeScript",
"bytes": "29522"
}
],
"symlink_target": ""
} |
from .pqdinstrument import *
from .server import run_server | {
"content_hash": "45591638b829d043c04618a455b29975",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 30,
"avg_line_length": 29.5,
"alnum_prop": 0.8135593220338984,
"repo_name": "guenp/PyQDInstrument",
"id": "ac2fbcd52e53141ea3e02d453ab4ec725536f593",
"size": "59",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8966"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 12,500 VIVO:
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 12190)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 290)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 12190 - 290 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 12400 VIVO to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 12190)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 290)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 12400
outputs[change_address] = 12480 - 12400 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 500 VIVO coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 400, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 200, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500VIVO for another
# matured block, minus 400, minus 200, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 12190+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 290+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 1000VIVO for
# two more matured blocks, minus 12400 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 1000 - 12400 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance("foo"), 12190)
assert_equal(self.nodes[0].getbalance("bar"), 290)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-12190
- 290
-12400
+ 1000
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
# Node1's "from0" account balance should be just the doublespend:
assert_equal(self.nodes[1].getbalance("from0"), 12400)
if __name__ == '__main__':
TxnMallTest().main()
| {
"content_hash": "1e835a74d53d0eabfeb6e10d2232d8ab",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 111,
"avg_line_length": 47.31578947368421,
"alnum_prop": 0.5806451612903226,
"repo_name": "vivocoin/vivo",
"id": "2d6d11c82d4ccd33e55d4eca31dbe0d8af03389f",
"size": "6567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/txn_doublespend.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "419693"
},
{
"name": "C",
"bytes": "1377184"
},
{
"name": "C++",
"bytes": "5321490"
},
{
"name": "CSS",
"bytes": "124311"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "158125"
},
{
"name": "Makefile",
"bytes": "105257"
},
{
"name": "Objective-C",
"bytes": "4133"
},
{
"name": "Objective-C++",
"bytes": "7224"
},
{
"name": "Python",
"bytes": "706233"
},
{
"name": "QMake",
"bytes": "2055"
},
{
"name": "Roff",
"bytes": "3688"
},
{
"name": "Shell",
"bytes": "35621"
}
],
"symlink_target": ""
} |
from instabot.api.request import Request
def is_id(smth):
''' checks if input string is a number '''
if str(smth).isdigit():
return True
return False
def get_user_info(user, user_id):
if is_id(user_id):
return Request.send(user.session,
'users/' + str(user_id) + '/info/')
else: # username was passed
return Request.send(user.session,
'users/' + str(user_id) + '/usernameinfo/')
def get_user_feed(user, user_id, maxid='', minTimestamp=None):
return Request.send(user.session,
'feed/user/' + str(user_id) + '/?max_id=' + str(maxid) + '&min_timestamp=' + str(minTimestamp) +
'&rank_token=' + str(user.rank_token) + '&ranked_content=true')
def get_user_followers(user, user_id, maxid=''):
return Request.send(user.session,
'friendships/' + str(user_id) + '/followers/?max_id=' + str(maxid) + '&rank_token=' + user.rank_token)
def get_user_following(user, user_id, maxid=''):
return Request.send(user.session,
'friendships/' + str(user_id) + '/following/?max_id=' + str(maxid) + '&rank_token=' + str(user.rank_token))
def get_liked_media(user, maxid=''):
return Request.send(user.session,
'feed/liked/?max_id=' + str(maxid))
def search_location(user, query):
return Request.send(user.session,
'fbsearch/places/?rank_token=' + str(user.rank_token) + '&query=' + str(query))
def get_geo_feed(user, location_id, maxid=''):
return Request.send(user.session,
'feed/location/' + str(location_id) + '/?max_id=' + str(maxid) + '&rank_token=' + user.rank_token + '&ranked_content=true&')
def get_hashtag_feed(user, hashtag, maxid=''):
return Request.send(user.session,
'feed/tag/' + str(hashtag) + '/?max_id=' + str(maxid) + '&rank_token=' + user.rank_token + '&ranked_content=true&')
| {
"content_hash": "3e9edab06aa67433ecac740367a9e917",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 148,
"avg_line_length": 37.943396226415096,
"alnum_prop": 0.5683739433117851,
"repo_name": "instagrambot/instapro",
"id": "6363064d954241b2ef152ef3e65a9bcd0c9b7d97",
"size": "2011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instabot/api/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34613"
}
],
"symlink_target": ""
} |
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db.models.signals import pre_save
from django.utils.text import slugify
from django.contrib.contenttypes.models import ContentType
from comments.models import Comment
def upload_location(instance, filename):
# return "%s/%s" % (instance.id, filename)
PostModel = instance.__class__
posts = PostModel.objects.order_by('id').last()
if posts:
new_id = posts.id + 1
else:
new_id = 1
return "%s/%s" % (new_id, filename)
class Post(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1)
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
image = models.ImageField(upload_to=upload_location,
null=True,
blank=True,
height_field='height_field',
width_field='width_field')
height_field = models.IntegerField(default=0)
width_field = models.IntegerField(default=0)
content = models.TextField()
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
def __unicode__(self):
return self.title
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('posts:detail', kwargs={'slug': self.slug})
def get_api_url(self):
return reverse('posts-api:detail', kwargs={'slug': self.slug})
@property
def comments(self):
instance = self
qs = Comment.objects.filter_by_instance(instance)
return qs
@property
def get_content_type(self):
instance = self
content_type = ContentType.objects.get_for_model(instance.__class__)
return content_type
class Meta:
ordering = ['-timestamp', '-updated']
def create_slug(instance, new_slug=None):
temp_title = ''
if len(instance.title) > 40:
temp_title = instance.title[:40]
else:
temp_title = instance.title
slug = slugify(temp_title)
if new_slug is not None:
slug = new_slug
qs = Post.objects.filter(slug=slug).order_by('-id')
exists = qs.exists()
if exists:
new_slug = "%s-%s" %(slug, qs.first().id)
return create_slug(instance, new_slug)
return slug
def pre_save_post_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = create_slug(instance)
pre_save.connect(pre_save_post_receiver, sender=Post)
| {
"content_hash": "876142742a460d299b19e517b988144c",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 76,
"avg_line_length": 28.138297872340427,
"alnum_prop": 0.6294896030245747,
"repo_name": "shawon922/django-blog-api",
"id": "e6c6f10bcb5f0dcb0954dae1af242a6c433bdd42",
"size": "2645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "posts/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "263"
},
{
"name": "HTML",
"bytes": "13139"
},
{
"name": "Python",
"bytes": "46165"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'django_es_model.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^demo/', include('demo.urls')),
)
| {
"content_hash": "b32144fc95b8b60b0c62c38f1ed9f7a4",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 60,
"avg_line_length": 29.636363636363637,
"alnum_prop": 0.6319018404907976,
"repo_name": "jxstanford/django_es_model",
"id": "59a797d79e91c88f4e4ae06727776fd8fd843e0b",
"size": "326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_es_model/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14193"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import mock
from sentry.tasks.fetch_source import expand_javascript_source
from sentry.testutils import TestCase
class ExpandJavascriptSourceTest(TestCase):
@mock.patch('sentry.models.Event.update')
@mock.patch('sentry.tasks.fetch_source.fetch_url')
@mock.patch('sentry.tasks.fetch_source.fetch_sourcemap')
def test_calls_from_kwargs(self, fetch_sourcemap, fetch_url, update):
data = {
'sentry.interfaces.Stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'lineno': 4,
'colno': 0,
},
{
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'lineno': 1,
'colno': 0,
},
],
},
}
fetch_sourcemap.return_value = None
fetch_url.return_value.body = '\n'.join('hello world')
expand_javascript_source(data)
fetch_url.assert_called_once_with('http://example.com/foo.js')
frame_list = data['sentry.interfaces.Stacktrace']['frames']
frame = frame_list[0]
assert frame['pre_context'] == ['h', 'e', 'l']
assert frame['context_line'] == 'l'
assert frame['post_context'] == ['o', ' ', 'w', 'o', 'r']
frame = frame_list[1]
assert frame['pre_context'] == []
assert frame['context_line'] == 'h'
assert frame['post_context'] == ['e', 'l', 'l', 'o', ' ']
| {
"content_hash": "2d636410f42ec8c7b50a841d3fd3e12d",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 35.3125,
"alnum_prop": 0.4920353982300885,
"repo_name": "NickPresta/sentry",
"id": "122d91cc2f10fde245e2fa5dee55f0684250d8f3",
"size": "1720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/tasks/fetch_source/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "779841"
},
{
"name": "Perl",
"bytes": "618"
},
{
"name": "Python",
"bytes": "2527496"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
} |
"""Session Handling for SQLAlchemy backend."""
from nova.db.sqlalchemy import session as nova_session
from nova.openstack.common import cfg
opts = [
cfg.StrOpt('sql_connection',
default='sqlite:///$state_path/baremetal_$sqlite_db',
help='The SQLAlchemy connection string used to connect to the '
'bare-metal database'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
CONF.import_opt('sqlite_db', 'nova.db.sqlalchemy.session')
CONF.import_opt('state_path', 'nova.config')
_ENGINE = None
_MAKER = None
def get_session(autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy session."""
global _MAKER
if _MAKER is None:
engine = get_engine()
_MAKER = nova_session.get_maker(engine, autocommit, expire_on_commit)
session = _MAKER()
return session
def get_engine():
"""Return a SQLAlchemy engine."""
global _ENGINE
if _ENGINE is None:
_ENGINE = nova_session.create_engine(CONF.baremetal.sql_connection)
return _ENGINE
| {
"content_hash": "5dd54bfe0ee8cc4589b0f9b820a8be99",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 78,
"avg_line_length": 27.477272727272727,
"alnum_prop": 0.6550868486352357,
"repo_name": "houshengbo/nova_vmware_compute_driver",
"id": "d6e2a533d970b1b5b2fa6185cf163782d23ab4c0",
"size": "2024",
"binary": false,
"copies": "1",
"ref": "refs/heads/attach-detach-VMware-iSCSI-driver",
"path": "nova/virt/baremetal/db/sqlalchemy/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7173520"
},
{
"name": "Shell",
"bytes": "15478"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from functools import wraps
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import resolve_url
from django.utils.decorators import available_attrs
from django.utils.six.moves.urllib.parse import urlparse
def provider_exists(user):
# print "Chekcing provider", hasattr(user, 'provider')
return hasattr(user, 'provider')
def clintype_set(session):
# print "Checking clintype", 'clintype_pk' in session
return 'clintype_pk' in session
def provider_has_updated(user):
return (not getattr(user, 'provider').needs_updating)
def session_passes_test(test_func, fail_url,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the session passes the given test,
redirecting to the choice page if necessary. The test should be a callable
that takes the session object and returns True if the session passes. It's
nearly a carbon copy of django.contrib.auth.decorators.user_passes_test.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if test_func(request.session):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_url = resolve_url(fail_url)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
scheme, netloc = urlparse(resolved_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not scheme or scheme == current_scheme) and
(not netloc or netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_url, redirect_field_name)
return _wrapped_view
return decorator
def clintype_required(func):
return session_passes_test(clintype_set, fail_url=reverse_lazy('choose-clintype'))(func)
def provider_update_required(func):
return user_passes_test(provider_has_updated, login_url=reverse_lazy('provider-update'))(func)
def provider_required(func):
return user_passes_test(provider_exists, login_url=reverse_lazy('new-provider'))(func)
| {
"content_hash": "feb89ab0e5385e046907af52a37fee9f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 98,
"avg_line_length": 36.53623188405797,
"alnum_prop": 0.6830622768742562,
"repo_name": "SaturdayNeighborhoodHealthClinic/clintools",
"id": "3c75e6d72267c616a5b0a610e26befc7861a0a87",
"size": "2521",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pttrack/decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "39945"
},
{
"name": "Python",
"bytes": "212180"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
} |
from setuptools import setup
project = "polichart"
setup(
name=project,
version='0.1',
url='https://github.com/cjmabry/PoliChart',
description='Election projections, data visualization, and political rants.',
author='Chris Mabry',
author_email='cjmab28@gmail.com',
packages=["polichart"],
include_package_data=True,
zip_safe=False,
install_requires=[
'Flask>=0.10.1',
'Flask-SQLAlchemy',
'Flask-WTF',
'Flask-Script',
'Flask-Babel',
'Flask-Testing',
'Flask-Mail',
'Flask-Cache',
'Flask-Login',
'Flask-OpenID',
'nose',
'mysql-python',
'fabric',
],
test_suite='tests',
classifiers=[
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Programming Language :: Python'
]
)
| {
"content_hash": "fec6d517561abc49b34adfda955a2a3c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 81,
"avg_line_length": 24.305555555555557,
"alnum_prop": 0.5714285714285714,
"repo_name": "cjmabry/PoliChart",
"id": "09d530fcf18fb7fd44d798a96b55e0d139334cdf",
"size": "900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9548"
},
{
"name": "HTML",
"bytes": "20198"
},
{
"name": "JavaScript",
"bytes": "18610"
},
{
"name": "Python",
"bytes": "28726"
}
],
"symlink_target": ""
} |
from datetime import datetime
import pytest
from sandglass import time
from sandglass.time import utils
from sandglass.time import guess_version
def test_base(config):
# Check app version generation
version = guess_version('sandglass.time', time.__file__)
assert version
assert version == time.__version__
# Check version for unistalled apps
assert guess_version('_foo_', time.__file__) == '(not installed)'
# Check base languages
languages = time.get_available_languages()
assert 'en' in languages
assert 'es' in languages
assert 'de' in languages
def test_iso_date_format():
"""
Test ISO date string to python date conversion.
"""
test_datetime = datetime(2015, 7, 16, 10, 34)
iso_date = test_datetime.isoformat()
assert datetime.strptime(iso_date, utils.ISO_DATE_FORMAT) == test_datetime
def test_is_valid_email():
"""
Test email checking function `is_valid_email`.
"""
# Test some invalid values
fail_values = ('invalid @ email', 1, True, {}, None)
for value in fail_values:
is_valid = utils.is_valid_email(value)
assert is_valid is False
# Test valid value
is_valid = utils.is_valid_email('valid@email.org')
assert is_valid
def test_get_settings(config):
"""
Test function `get_settings` to get application settings.
"""
settings = utils.get_settings()
# Settings should be a non empty dictionary
assert isinstance(settings, dict)
assert settings > 0
def test_camelcase_to_underscore():
"""
Test camel case to underscore convert function.
"""
# Check "falsy" values
text = utils.camelcase_to_underscore('')
assert text == ''
text = utils.camelcase_to_underscore(False)
assert text is False
text = utils.camelcase_to_underscore('testCamelCaseName')
assert text == 'test_camel_case_name'
text = utils.camelcase_to_underscore('_testCamelCaseName_end')
assert text == '_test_camel_case_name_end'
text = utils.camelcase_to_underscore('test_camel_case_name')
assert text == 'test_camel_case_name'
def test_underscore_to_camelcase():
"""
Test underscore to camel case convert function.
"""
# Check "falsy" values
text = utils.underscore_to_camelcase('')
assert text == ''
text = utils.underscore_to_camelcase(False)
assert text is False
text = utils.underscore_to_camelcase('test_camel_case_name')
assert text == 'testCamelCaseName'
text = utils.underscore_to_camelcase('_test_camel_case_name_End')
assert text == '_testCamelCaseNameEnd'
text = utils.underscore_to_camelcase('testCamelCaseName')
assert text == 'testCamelCaseName'
def test_camelcase_dict():
"""
Test convertion of dict keys to camel case.
"""
test_dict = utils.camelcase_dict({
'a_name': 0,
'anotherName': 0,
'Test_name': 0})
assert 'aName' in test_dict
assert 'anotherName' in test_dict
assert 'testName' in test_dict
def test_underscore_dict():
"""
Test convertion of dict keys to underscore.
"""
test_dict = utils.underscore_dict({
'a_name': 0,
'anotherName': 0,
'Test_name': 0})
assert 'a_name' in test_dict
assert 'another_name' in test_dict
assert 'test_name' in test_dict
def test_mixedmethod():
"""
Test `mixedmethod` decorator for class&instance methods.
This decorator allows a class to implement a single method
that can be called as instance or class method.
"""
class TestClass(object):
@utils.mixedmethod
def what_am_i(obj):
if isinstance(obj, TestClass):
return 'instance'
elif issubclass(obj, TestClass):
return 'class'
assert TestClass.what_am_i() == 'class'
test_obj = TestClass()
assert test_obj.what_am_i() == 'instance'
def test_route_path(config):
"""
Test route URL path generation.
"""
# TODO: This test should not depend on an API version
prefix = '/time/api/v1'
member = 'users'
pk = '1'
related = 'tags'
path = utils.route_path('api.rest.collection', member=member)
assert path == prefix + '/users/'
path = utils.route_path('api.rest.member', member=member, pk=pk)
assert path == prefix + '/users/1/'
path = utils.route_path(
'api.rest.related',
member=member,
pk=pk,
related_name=related,
)
assert path == (prefix + '/users/1/tags/')
def test_generate_random_hash():
"""
Test random hash generation function `generate_random_hash`.
"""
hash = utils.generate_random_hash(salt=u'fdskjgs', hash='sha1')
assert isinstance(hash, basestring)
assert len(hash) == 40
# Invalid hashing algorithms should raise an exception
with pytest.raises(Exception):
utils.generate_random_hash(hash='invalid_name')
def test_get_app_namespace():
"""
Check that function `get_app_namespace` gets proper app namespaces.
"""
class LocalClass(object):
pass
# Check a python module string
namespace = utils.get_app_namespace('sandglass.app_namespace.module')
assert namespace == 'app_namespace'
# Check a sandglass.time module
namespace = utils.get_app_namespace(utils)
assert namespace == 'time'
# Check a class defined in sandglass.time
namespace = utils.get_app_namespace(utils.mixedmethod)
assert namespace == 'time'
# Check non sandglass python module string
# Non sandglass prefixed modules should raise an exception
with pytest.raises(Exception):
utils.get_app_namespace('foreign.namespace.module')
# Check a non sandglass module
# Non sandglass prefixed modules should raise an exception
import os
with pytest.raises(Exception):
utils.get_app_namespace(os.path)
| {
"content_hash": "e5b215cad93918506578366e417c1453",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 78,
"avg_line_length": 27.652582159624412,
"alnum_prop": 0.6505942275042444,
"repo_name": "sanglass/sandglass.time",
"id": "746ca7e20c4e7ce671adb853e181d6cf12766fe6",
"size": "5890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Nginx",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "221820"
}
],
"symlink_target": ""
} |
from django.db import models
from django.db.models import Q
from django.conf import settings
#Python Imports
import datetime
#Local Imports
import swapper
from utils.models import TimeStampedModel,ForUserQuerySet
import utils
class ScheduleQuerySet(ForUserQuerySet):
def pending(self,**kwargs):
pending = self.filter(arrived__isnull=True,status='pending')
if not kwargs:
return pending
pending_Q = Q(**kwargs)
return pending.filter(pending_Q)
def is_active(self):
''' exclude those participants who's visits we should ignore '''
return self.exclude(participant__status__in=('completed','quit'))
def visit_range(self,start={'days':0},end=None,notification_start={'days':0},notification_end=None):
today = utils.today()
start = today - datetime.timedelta(**start)
notification_start = today - datetime.timedelta(**notification_start)
if end is not None:
end = today - datetime.timedelta(**end)
scheduled_Q = Q(scheduled__range=(end,start))
else:
scheduled_Q = Q(scheduled__lte=start)
if notification_end is not None:
notification_end = today - datetime.timedelta(**notification_end)
notification_Q = Q(notification_last_seen__range=(notification_end,notification_start))
else:
notification_Q = Q(notification_last_seen__lte=notification_start)
notification_Q |= Q(notification_last_seen__isnull=True)
return self.filter( scheduled_Q & notification_Q)
class ScheduledEvent(TimeStampedModel):
"""
Abstract base class for Visits and ScheduledPhoneCalls
"""
STATUS_CHOICES = (
('pending','Pending'),
('missed','Missed'),
('deleted','Deleted'),
('attended','Attended'),
)
class Meta:
abstract = True
ordering = ('-scheduled',)
app_label = 'contacts'
scheduled = models.DateField()
arrived = models.DateField(blank=True,null=True,default=None)
notification_last_seen = models.DateField(null=True,blank=True,default=None)
notify_count = models.IntegerField(default=0)
# skipped = models.NullBooleanField(default=None)
status = models.CharField(max_length=15,choices=STATUS_CHOICES,default='pending',help_text='current status of event')
participant = models.ForeignKey(swapper.get_model_name('contacts', 'Contact'))
def days_overdue(self):
if self.status == 'pending':
return (utils.today() - self.scheduled).days
return 0
def days_str(self):
delta_days = -1 * (utils.today() - self.scheduled).days
if self.status == 'attended' and self.arrived is not None:
delta_days = (utils.today() - self.arrived).days
return utils.days_as_str(delta_days)
def is_pregnant(self):
return self.participant.was_pregnant(today=self.scheduled)
def seen(self,seen=None):
''' Mark visit as seen today '''
if seen is None:
seen = utils.today()
else:
seen = utils.angular_datepicker(seen)
self.notify_count += 1
self.notification_last_seen = seen
self.save()
def attended(self,arrived=None):
''' Mark visted as attended on @arrived (default today) '''
if arrived is None:
arrived = utils.today()
else:
arrived = utils.angular_datepicker(arrived)
self.set_status('attended',arrived)
def set_status(self,status,arrived=None):
''' Mark scheduled event status '''
if arrived is not None:
self.arrived = arrived
self.status = status
self.save()
def __str__(self):
return str(self.scheduled)
def __repr__(self):
return "{} {} {}".format(self.participant,self.scheduled,self.status)
class VisitQuerySet(ScheduleQuerySet):
def get_visit_checks(self):
""" Return upcoming visits
- this_week: not seen today and visit is this week
- weekly: between 1-5 weeks away and not seen this week
- monthly: after 5 weeks and not seen for four weeks
"""
today = utils.today()
start = today - datetime.timedelta(weeks=3)
end = today - datetime.timedelta(weeks=3)
return self.pending().is_active().visit_range(
start={'days':-14},end={'days':21}
).order_by('scheduled')
def get_missed_visits(self,date=None,delta_days=3):
""" Return pending visits that are 3 days late and have been seen or it has been 3 days
since an SMS reminder was sent and has been seen more than three times"""
today = utils.today(date)
late = today - datetime.timedelta(days=delta_days)
first_reminder_Q = Q(scheduled__lte=late,notify_count__gt=0,missed_sms_count=0)
second_reminder_Q = Q(missed_sms_last_sent__lte=late,notify_count__gt=3,missed_sms_count__gt=0)
return self.pending().is_active().filter(first_reminder_Q | second_reminder_Q)
def to_send(self):
return self.exclude(visit_type__in=Visit.NO_SMS_TYPES)
def top(self):
return self[:2]
class Visit(ScheduledEvent):
#Set Custom Manager
objects = VisitQuerySet.as_manager()
VISIT_TYPE_CHOICES = (
('clinic','Clinic Visit'),
('study','Study Visit'),
('both','Both'),
('delivery','Delivery'),
)
NO_SMS_TYPES = ('study','delivery')
# Custom Visit Fields
comment = models.TextField(blank=True,null=True)
visit_type = models.CharField(max_length=25,choices=VISIT_TYPE_CHOICES,default='clinic')
missed_sms_last_sent = models.DateField(null=True,blank=True,default=None)
missed_sms_count = models.IntegerField(default=0)
def send_visit_reminder(self,send=True,extra_kwargs=None):
if self.no_sms:
return
if extra_kwargs is None:
scheduled_date = datetime.date.today() + datetime.timedelta(days=2)
extra_kwargs = {'days':2,'date':scheduled_date.strftime('%b %d')}
condition = self.get_condition('pre')
return self.participant.send_automated_message(send=send,send_base='visit',
condition=condition,extra_kwargs=extra_kwargs)
def send_visit_attended_message(self,send=True):
if self.no_sms:
return
condition = self.get_condition('attend')
message = self.participant.send_automated_message(send=send,send_base='visit',
condition=condition,exact=True)
def send_missed_visit_reminder(self,send=True):
if self.no_sms:
return
condition = self.get_condition('missed')
if send is True:
self.missed_sms_count += 1
self.missed_sms_last_sent = datetime.date.today()
if self.missed_sms_count >= 2:
self.status = 'missed'
self.save()
return self.participant.send_automated_message(send=send,send_base='visit',condition=condition)
def get_condition(self,postfix='pre'):
if self.is_pregnant():
prefix = 'anc'
elif self.visit_type == 'both':
prefix = 'both'
else:
prefix = 'pnc'
return '{}_{}'.format(prefix,postfix)
def is_pregnant(self):
return self.participant.was_pregnant(self.scheduled)
@property
def no_sms(self):
return self.visit_type in Visit.NO_SMS_TYPES
@property
def edited(self):
return (self.modified - self.created).days > 21
class ScheduledPhoneCallQuerySet(ScheduleQuerySet):
def pending_calls(self):
return self.pending().visit_range(notification_start={'days':2})
class ScheduledPhoneCall(ScheduledEvent):
objects = ScheduledPhoneCallQuerySet.as_manager()
CALL_TYPE_OPTIONS = (
('m','One Month'),
('y','One Year'),
)
call_type = models.CharField(max_length=2,choices=CALL_TYPE_OPTIONS,default='m')
def called(self,outcome,created=None,length=None,comment=None,admin_user=None):
if outcome == 'answered':
self.attended(created)
else:
self.seen(created)
# Make a new phone call for participant
return self.participant.add_call(created=created,outcome=outcome,length=length,comment=comment,
scheduled=self,admin_user=admin_user)
| {
"content_hash": "3102ebb4fb9c7636515bd7f236701856",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 121,
"avg_line_length": 32.70542635658915,
"alnum_prop": 0.6271628347949751,
"repo_name": "I-TECH-UW/mwachx",
"id": "d47d539c086a33b095f5dc8febd2babca50eb216",
"size": "8472",
"binary": false,
"copies": "1",
"ref": "refs/heads/neo",
"path": "contacts/models/visit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14250"
},
{
"name": "HTML",
"bytes": "56837"
},
{
"name": "JavaScript",
"bytes": "43890"
},
{
"name": "Python",
"bytes": "358250"
},
{
"name": "Shell",
"bytes": "2976"
}
],
"symlink_target": ""
} |
from flask import Flask, request
import os
import smtplib
import json
import logging
import gmail
def send_email(user, pwd, recipient, subject, body):
gmail_user = user
gmail_pwd = pwd
FROM = user
TO = recipient if type(recipient) is list else [recipient]
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
gmail.send_gmail(recipient, subject, body)
# try:
# server = smtplib.SMTP("smtp.gmail.com", 587)
# server.ehlo()
# server.starttls()
# server.login(gmail_user, gmail_pwd)
# server.sendmail(FROM, TO, message)
# server.close()
# print 'successfully sent the mail'
# except:
# print 'failed to send mail'
app = Flask(__name__)
port = int(os.getenv("PORT", 9000))
@app.route('/', methods=['POST'])
def root():
data = request.get_json()
lon = data['lon']
lat = data['lat']
description = data['description']
recipient = data['recipient']
user = os.getenv("GMAIL_USER")
pwd = os.getenv("GMAIL_PASSWORD")
url = "http://www.google.com/maps/place/{},{}".format(lat, lon)
body = "Your missing bike has been seen:\n\nDescription: {}\n\nLocation: {}".format(description, url)
subject = "Your missing bike has been seen!"
logging.info("Sending email to {}".format(recipient))
send_email(user, pwd, recipient, subject, body)
logging.info("Successfully sent email to {}".format(recipient))
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
app.run(host='0.0.0.0', port=port)
| {
"content_hash": "c83195cd16ddf9da4fa927fa1dc80373",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 105,
"avg_line_length": 28.72463768115942,
"alnum_prop": 0.6594349142280524,
"repo_name": "miguelcastilho/BikeApp",
"id": "1a84d1876d892e3ab6a42c50e62c8018410b2f1f",
"size": "1982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "email_service/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "658"
},
{
"name": "HTML",
"bytes": "13004"
},
{
"name": "JavaScript",
"bytes": "32761"
},
{
"name": "Python",
"bytes": "11591"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import pmisc
def timer(num_tries, fpointer):
with pmisc.Timer() as tobj:
for _ in range(num_tries):
fpointer()
print("Time per call: {0} seconds".format(tobj.elapsed_time / (2.0 * num_tries)))
def sample_func():
count = 0
for num in range(0, count):
count += num
| {
"content_hash": "f44e4e410bf3c4f6d28596061142b587",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 85,
"avg_line_length": 23.266666666666666,
"alnum_prop": 0.6045845272206304,
"repo_name": "pmacosta/pmisc",
"id": "2076b40039856c46d60fcd7964f7b454e020dd2e",
"size": "475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/support/pmisc_example_2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11028"
},
{
"name": "Makefile",
"bytes": "3516"
},
{
"name": "PowerShell",
"bytes": "7209"
},
{
"name": "Python",
"bytes": "122899"
},
{
"name": "Shell",
"bytes": "15547"
}
],
"symlink_target": ""
} |
"""Compare Pulsar and HabCat coordinates"""
import csv
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
from astropy import coordinates as coord
def flipra(coordinate):
"""Flips RA coordinates by 180 degrees"""
coordinate = coordinate + 180
if coordinate > 360:
coordinate = coordinate - 360
return coordinate
def flipde(coordinate):
"""Flips RA coordinates by 90 degrees"""
return coordinate * (-1.)
# Load HabCat
habcat_id = []
habcat_ra = []
habcat_de = []
with open('habcat.csv', 'rb') as csvfile:
dataset = csv.reader(csvfile, delimiter=';')
for row in dataset:
habcat_id.append(row[0])
ra = coord.Angle(row[1], unit=u.hour) # Define as hours
habcat_ra.append(ra.degree) # Convert to degree
de = coord.Angle(row[2], unit=u.deg)
habcat_de.append(de.degree)
print len(habcat_id), 'HabCat datalines loaded'
# Load Pulsar catalogue
pulsar_id = []
pulsar_ra = []
pulsar_de = []
pulsar_period = []
with open('pulsar_16msec.csv', 'rb') as csvfile:
dataset = csv.reader(csvfile, delimiter=';')
for row in dataset:
pulsar_id.append(row[0])
ra = coord.Angle(row[1], unit=u.hour) # Define as hours
pulsar_ra.append(ra.degree) # Convert to degree
de = coord.Angle(row[2], unit=u.deg)
pulsar_de.append(de.degree)
pulsar_period.append(row[3])
print len(pulsar_id), 'Pulsar datalines loaded'
# Nested loop through all Pulsars to find closest 180deg HabCat for each
for currentpulsar in range(len(pulsar_id)): # Pulsar loop
shortest_distance = 180 * 60 # set to max, in arcminutes
for currenthabcat in range(len(habcat_id)): # HabCat loop
habcat_coordinate = SkyCoord(
habcat_ra[currenthabcat],
habcat_de[currenthabcat],
unit="deg")
pulsar_coordinate_flipped = SkyCoord( # flip pulsar coordinates
flipra(pulsar_ra[currentpulsar]),
flipde(pulsar_de[currentpulsar]),
unit="deg")
distance = pulsar_coordinate_flipped.separation(habcat_coordinate)
if distance.arcminute < shortest_distance:
shortest_distance = distance.arcminute # New best found
bestfit_pulsar_id = pulsar_id[currentpulsar]
bestfit_habcat_id = habcat_id[currenthabcat]
print bestfit_pulsar_id, bestfit_habcat_id, shortest_distance / 60. # deg
with open('result.csv', 'a') as fp: # Append each result to CSV
a = csv.writer(fp, delimiter=';')
a.writerow([
bestfit_pulsar_id,
bestfit_habcat_id,
shortest_distance / 60.]) # degrees
print 'Done.'
| {
"content_hash": "7057826badf8a819a620904e8c442bee",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 35.51315789473684,
"alnum_prop": 0.6391256020748425,
"repo_name": "hippke/Pulsar-HabCat",
"id": "8bd44a4693e67e14e58979499d1babf4d79e9cd5",
"size": "2699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batch-match.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9718"
}
],
"symlink_target": ""
} |
import posixpath
from fabric.api import settings, run, sudo, puts, warn, hide
from . import system
from .containers import conf
from .task import Task
__all__ = [
'install',
'dump',
'execute',
'create_user',
'drop_user',
'create_db',
'drop_db',
'grant',
]
class Install(Task):
"""Installs MySQL."""
VERSIONS = {
'lenny': '5.0',
'squeeze': '5.1',
'lucid': '5.1',
'maverick': '5.1',
}
def is_installed(self):
with settings(warn_only=True):
output = run('mysql --version')
return output.succeeded
def do(self):
if self.is_installed():
puts('Mysql is already installed.')
return
system.package_install.run(packages='debconf-utils')
version = self.VERSIONS[self.conf.os]
debconf_defaults = [
'mysql-server-%s mysql-server/root_password_again password %s' %
(version, self.conf.db_root_password),
'mysql-server-%s mysql-server/root_password password %s' %
(version, self.conf.db_root_password),
]
sudo('echo "%s" | debconf-set-selections' %
'\n'.join(debconf_defaults))
message = ['\n', '=' * 10, '\n',
'MySQL root password is "%s"' % self.conf.db_root_password,
'\n', '=' * 10, '\n']
warn(''.join(message))
system.package_install.run(packages='mysql-server')
install = Install()
class Dump(Task):
@conf
def filename(self):
return '%(db_name)s%(current_time)s.sql' % self.conf
@conf
def filepath(self):
return posixpath.join(self.conf.backup_path, self.conf.filename)
@conf
def command(self):
return 'mysqldump --user="%(db_user)s" --password="%(db_password)s" ' \
'%(db_name)s > %(filepath)s' % self.conf
def do(self):
return run(self.command)
dump = Dump()
class Execute(Task):
@conf
def escaped_sql(self):
return self.conf.sql.replace('"', r'\"')
def do(self):
return run('echo "%(escaped_sql)s" | mysql --user="%(db_root_user)s" '
'--password="%(db_root_password)s"' % self.conf)
execute = Execute()
class CreateUser(Execute):
SQL_USER_EXISTS = "SHOW GRANTS FOR '%(db_user)s'@localhost;"
SQL_CREATE_USER = """
CREATE USER '%(db_user)s'@localhost
IDENTIFIED BY '%(db_password)s';
""".strip()
@conf
def sql_user_exists(self):
return self.SQL_USER_EXISTS % self.conf
@conf
def sql(self):
return self.SQL_CREATE_USER % self.conf
def user_exists(self):
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True):
with execute.tmp_conf(self.conf):
result = execute.run(sql=self.conf.sql_user_exists)
return result.succeeded
def do(self):
if self.user_exists():
puts('MySQL user "%(db_user)s" already exists' % self.conf)
return
super(CreateUser, self).do()
create_user = CreateUser()
class DropUser(Execute):
SQL_DROP_USER = "DROP USER '%(db_user)s'@localhost;"
@conf
def sql(self):
return self.SQL_DROP_USER % self.conf
drop_user = DropUser()
class CreateDb(Execute):
SQL_CREATE_DB = """
CREATE DATABASE %(db_name)s
DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
""".strip()
@conf
def sql(self):
return self.SQL_CREATE_DB % self.conf
create_db = CreateDb()
class DropDb(Execute):
SQL_DROP_DB = "DROP DATABASE %(db_name)s;"
@conf
def sql(self):
return self.SQL_DROP_DB % self.conf
drop_db = DropDb()
class Grant(Execute):
SQL_GRANT = """
GRANT ALL ON %(db_name)s.* TO '%(db_user)s'@'localhost';
FLUSH PRIVILEGES;
""".strip()
@conf
def sql(self):
return self.SQL_GRANT % self.conf
grant = Grant()
| {
"content_hash": "087c2bccd99dd37ff4c9dc489350e40e",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 79,
"avg_line_length": 23.514792899408285,
"alnum_prop": 0.5639154504277806,
"repo_name": "vmihailenco/fabdeploy",
"id": "3dd0db582f9a949ec472cbf30f3a830a163c0407",
"size": "3974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabdeploy/mysql.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "81345"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
# GETTING-STARTED: set your app name:
name='lister',
# GETTING-STARTED: set your app version:
version='0.1',
# GETTING-STARTED: set your app description:
description='OpenShift Lister App',
# GETTING-STARTED: set author name (your name):
author='Brian Olecki',
# GETTING-STARTED: set author email (your email):
author_email='bolecki019@gmail.com',
# GETTING-STARTED: set author url (your url):
url='https://github.com/bolecki',
# GETTING-STARTED: define required django version:
install_requires=[
'Django==1.9.5',
'djangorestframework',
'django-sslserver',
'django-ssl-redirect'
],
dependency_links=[
'https://pypi.python.org/simple/django/'
],
)
| {
"content_hash": "c6e3f536f17d803a8a4781cf628658d6",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 54,
"avg_line_length": 30.5,
"alnum_prop": 0.6406052963430012,
"repo_name": "bolecki/lister",
"id": "0d3d3ad44e0952b31bb0ce61c287de6ea28dc785",
"size": "816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "114102"
},
{
"name": "HTML",
"bytes": "16836"
},
{
"name": "Python",
"bytes": "40465"
},
{
"name": "Shell",
"bytes": "419"
}
],
"symlink_target": ""
} |
"""
Abstract pipeline interface definition.
"""
from . import StopPipeline
class Message:
"""
Base class which represents information passed within the Pipeline.
Implements dict-like interface, should be simple and extendable.
Should allow subclasses to support data and schema (elasticsearch schema).
"""
def __init__(self, data):
self.data = data
# Dict-like interface
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __contains__(self, key):
return key in self.data
def keys(self):
return self.data.keys()
# Debug interface
def __str__(self):
"Used for printing to stdout"
def format_dict(d):
if not isinstance(d, dict):
return repr(d)
return " ".join("{}={}".format(key, format_dict(value))
for key, value in d)
return repr(self.data)
def __repr__(self):
return "<Message %r>" % self.data
class Stage:
"""
Stage which reads data from previous stage and yields new data
"""
# Created by registering decorator
log = None
def handle(self, message):
"""
Handle incoming data and return result
Returns:
None: to drop message from the pipeline.
message: converted/augmented message.
message list: multiple messages created from single one
"""
raise NotImplementedError
def handle_bulk(self, messages):
"Naive handler for bulk of messages"
output = []
for message in messages:
# Handling can return single message or multiple.
message = self.handle(message)
if isinstance(message, list):
output += message
elif message is not None:
if not isinstance(message, Message):
self.log.error("Stage %s returned invalid message type: %s",
self, type(message))
raise StopPipeline("Pipeline Error")
output.append(message)
return output
@classmethod
def from_config(cls, config):
"Creates a stage object from part of YAML configuration file"
raise NotImplementedError
class SourceStage:
"""
Stage which starts the pipeline
"""
# Created by registering decorator
log = None
def run(self):
"Yield incoming data"
raise NotImplementedError
@classmethod
def from_config(cls, config):
"Creates a sourcestage object from part of YAML configuration file"
raise NotImplementedError
| {
"content_hash": "e1f111b69ca815f33c39469a0d99fdc8",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 80,
"avg_line_length": 26.865384615384617,
"alnum_prop": 0.5869720830350752,
"repo_name": "blaa/DataStalker",
"id": "89909d2b1c91cf9c01379be92e45d6c38e541d6c",
"size": "2850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datastalker/pipeline/stage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "112769"
},
{
"name": "Shell",
"bytes": "381"
}
],
"symlink_target": ""
} |
"""
Scraper for Indiana Supreme Court
CourtID: ind
Court Short Name: Ind.
Auth: Jon Andersen <janderse@gmail.com>
Reviewer: mlr
History:
2014-09-03: Created by Jon Andersen
"""
from juriscraper.OpinionSite import OpinionSite
import time
from datetime import date
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.url = 'http://www.in.gov/judiciary/opinions/supreme.html'
self.court_id = self.__module__
self.my_precedential_statuses = []
def _get_case_names(self):
raw_case_names = [s for s in self.html.xpath('//dl/dt/a/text()')]
case_names = []
self.my_precedential_statuses = []
for case_name in raw_case_names:
if case_name.find("(NFP)") >= 0:
case_names.append(case_name.replace("(NFP)", "").strip())
self.my_precedential_statuses.append("Unpublished")
else:
case_names.append(case_name)
self.my_precedential_statuses.append("Published")
return case_names
def _get_download_urls(self):
return [s for s in self.html.xpath('//dl/dt/a/@href')]
def _get_case_dates(self):
dates = []
for date_string in self.html.xpath('//dl/dd/dd/dd/text()'):
date_string = date_string.strip()
if date_string == '':
dates.append('')
else:
dates.append(date.fromtimestamp(
time.mktime(time.strptime(date_string, '%m/%d/%y'))))
return dates
def _get_docket_numbers(self):
return [s for s in self.html.xpath('//dl/dd/text()')]
def _get_lower_court_numbers(self):
return [e if e.strip() != "N/A" else "" for e in self.html.xpath('//dl/dd/dd/text()')]
def _get_precedential_statuses(self):
return self.my_precedential_statuses
| {
"content_hash": "7e22ca0088f0f5075d65c386b320c70c",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 94,
"avg_line_length": 34.125,
"alnum_prop": 0.5834641548927263,
"repo_name": "m4h7/juriscraper",
"id": "5e5db1adecf876698765ac288e5babae5745ff88",
"size": "1911",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "juriscraper/opinions/united_states/state/ind.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "27160373"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "623951"
}
],
"symlink_target": ""
} |
import os
import glob
from lino.textprinter.plain import PlainTextPrinter
from lino.textprinter.pdfprn import PdfTextPrinter
from lino.textprinter.htmlprn import HtmlTextPrinter
from lino.textprinter.winprn import Win32TextPrinter
from lino import config
PSPRINTER=config.win32.get('postscript_printer')
OUTDIR=os.path.join(config.paths.get('webhome'),
"examples","textprinter")
def doit(tp):
for codepage in ("cp850","cp437"):
tp.writeln("Codepage " + codepage)
tp.writeln()
for c in range(15,255):
tp.write(' '+chr(c).decode(codepage)+' ')
if c % 16 == 0:
tp.writeln()
tp.writeln()
tp.close()
if __name__ == "__main__":
if False:
doit(PlainTextPrinter())
# do it in a PDF document:
doit(PdfTextPrinter(os.path.join(OUTDIR,"testpage.pdf"),
fontName="Liberation"))
# do it in a HTML file:
if False:
doit(HtmlTextPrinter(os.path.join(OUTDIR,"testpage.html")))
# do it on a Windows printer:
if False:
doit( Win32TextPrinter(
printerName=PSPRINTER,
spoolFile=os.path.join(OUTDIR,"testpage.ps")))
| {
"content_hash": "3e525b1a0c5259a716a83c00abf0fcbf",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 67,
"avg_line_length": 25.79591836734694,
"alnum_prop": 0.5909810126582279,
"repo_name": "lsaffre/timtools",
"id": "5949f0a150cc331df27dc68d171cb791ed5c609c",
"size": "1264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/textprinter/data/testpage.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "674"
},
{
"name": "Python",
"bytes": "735679"
}
],
"symlink_target": ""
} |
import wx, subprocess, dialog, strings, vars
class Flasher:
def __init__(self):
self.version = ['--version']
self.title = ['--title']
self.format = ['--format']
self.delete01 = ['--delete','BANK','1']
self.delete02 = ['--delete','BANK','2']
self.read01 = ['--read','--rom','--bank','1']
self.read02 = ['--read','--rom','--bank','2']
self.readSR = ['--read','--save']
self.write01 = ['--write','--rom','--bank','1']
self.write02 = ['--write','--rom','--bank','2']
self.writeSR = ['--write','--save']
def callBash(self, command):
if(self.checkEms()):
instruction = [vars.emsPath,'--verbose'] + command
try:
output = subprocess.check_output(instruction)
return output
except subprocess.CalledProcessError as e:
return e.output
else:
return strings.notFound
def checkEms(self):
if(vars.emsPath != None):
return True
else:
return False
def loadEms(self):
def ok_handler(dlg,path):
vars.emsPath = path
return strings.emsSet + vars.emsPath + '\n'
return dialog.file_dialog(strings.dlgLoadEms,'',wx.OPEN,ok_handler)
def checkVersion(self):
return self.callBash(self.version)
def checkCart(self):
return self.callBash(self.title)
def deleteBank01(self):
return self.callBash(self.delete01)
def deleteBank02(self):
return self.callBash(self.delete02)
def formatCart(self):
return self.callBash(self.format)
def readBank01(self):
def ok_handler(dlg,path):
command = self.read01 + [str(path)]
return self.callBash(command)
if(vars.GBSavePath != None):
command = self.read01 + [str(vars.GBSavePath)]
return self.callBash(command)
else:
return dialog.file_dialog(strings.dlgSaveROM,'*.gb',wx.SAVE,ok_handler,'backup_bank-1.gb')
def readBank02(self):
def ok_handler(dlg,path):
command = self.read02 + [str(path)]
return self.callBash(command)
if(vars.GBSavePath != None):
command = self.read02 + [str(vars.GBSavePath)]
return self.callBash(command)
else:
return dialog.file_dialog(strings.dlgSaveROM,'*.gb',wx.SAVE,ok_handler,'backup_bank-2.gb')
def readSRam(self):
def ok_handler(dlg,path):
command = self.readSR + [str(path)]
return self.callBash(command)
if(vars.SRAMSavePath != None):
command = self.readSR + [str(vars.SRAMSavePath)]
return self.callBash(command)
else:
return dialog.file_dialog(strings.dlgSaveSav,'*.sav',wx.SAVE,ok_handler,'backup.sav')
def writeBank01(self):
def ok_handler(dlg,path):
command = self.write01 + [str(path)]
return self.callBash(command)
if(vars.GBWritePath != None):
command = self.write01 + [str(vars.GBWritePath)]
return self.callBash(command)
else:
return dialog.file_dialog(strings.dlgLoadROM,'*.gb',wx.OPEN,ok_handler)
def writeBank02(self):
def ok_handler(dlg,path):
command = self.write02 + [str(path)]
return self.callBash(command)
if(vars.GBWritePath != None):
command = self.write02 + [str(vars.GBWritePath)]
return self.callBash(command)
else:
return dialog.file_dialog(strings.dlgLoadROM,'*.gb',wx.OPEN,ok_handler)
def writeSRam(self):
def ok_handler(dlg,path):
command = self.writeSR + [str(path)]
return self.callBash(command)
if(vars.SRAMWritePath != None):
command = self.writeSR + [str(vars.SRAMWritePath)]
return self.callBash(command)
else:
return dialog.file_dialog(strings.dlgLoadSav,'*.sav',wx.OPEN,ok_handler)
| {
"content_hash": "55768c88cce87438ada0d4b57fde15bd",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 102,
"avg_line_length": 34.125,
"alnum_prop": 0.5631257631257631,
"repo_name": "zeroerrequattro/ems-flasher-gui",
"id": "781e25c40d690dadd2ed5b174e48c7d4a9fedf5c",
"size": "4114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ems.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17977"
}
],
"symlink_target": ""
} |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .provider import (TargetThread, ThreadProvider)
from .common import (read_c_string, HandlerModeThread)
from ..debug.context import DebugContext
from ..coresight.cortex_target import CORE_REGISTER
from pyOCD.pyDAPAccess import DAPAccess
import logging
IS_RUNNING_OFFSET = 0x54
ALL_OBJECTS_THREADS_OFFSET = 0
THREAD_STACK_POINTER_OFFSET = 0
THREAD_EXTENDED_FRAME_OFFSET = 4
THREAD_NAME_OFFSET = 8
THREAD_STACK_BOTTOM_OFFSET = 12
THREAD_PRIORITY_OFFSET = 16
THREAD_STATE_OFFSET = 17
THREAD_CREATED_NODE_OFFSET = 36
LIST_NODE_NEXT_OFFSET = 0
LIST_NODE_OBJ_OFFSET= 8
# Create a logger for this module.
log = logging.getLogger("argon")
class TargetList(object):
def __init__(self, context, ptr):
self._context = context
self._list = ptr
def __iter__(self):
next = 0
head = self._context.read32(self._list)
node = head
is_valid = head != 0
while is_valid and next != head:
try:
# Read the object from the node.
obj = self._context.read32(node + LIST_NODE_OBJ_OFFSET)
yield obj
next = self._context.read32(node + LIST_NODE_NEXT_OFFSET)
node = next
except DAPAccess.TransferError:
log.warning("TransferError while reading list elements (list=0x%08x, node=0x%08x), terminating list", self._list, node)
is_valid = False
## @brief
class ArgonThreadContext(DebugContext):
# SP is handled specially, so it is not in these dicts.
CORE_REGISTER_OFFSETS = {
# Software stacked
4: 0, # r4
5: 4, # r5
6: 8, # r6
7: 12, # r7
8: 16, # r8
9: 20, # r9
10: 24, # r10
11: 28, # r11
# Hardware stacked
0: 32, # r0
1: 36, # r1
2: 40, # r2
3: 44, # r3
12: 48, # r12
14: 52, # lr
15: 56, # pc
16: 60, # xpsr
}
FPU_EXTENDED_REGISTER_OFFSETS = {
# Software stacked
4: 0, # r4
5: 4, # r5
6: 8, # r6
7: 12, # r7
8: 16, # r8
9: 20, # r9
10: 24, # r10
11: 28, # r11
0x50: 32, # s16
0x51: 36, # s17
0x52: 40, # s18
0x53: 44, # s19
0x54: 48, # s20
0x55: 52, # s21
0x56: 56, # s22
0x57: 60, # s23
0x58: 64, # s24
0x59: 68, # s25
0x5a: 72, # s26
0x5b: 76, # s27
0x5c: 80, # s28
0x5d: 84, # s29
0x5e: 88, # s30
0x5f: 92, # s31
# Hardware stacked
0: 96, # r0
1: 100, # r1
2: 104, # r2
3: 108, # r3
12: 112, # r12
14: 116, # lr
15: 120, # pc
16: 124, # xpsr
0x40: 128, # s0
0x41: 132, # s1
0x42: 136, # s2
0x43: 140, # s3
0x44: 144, # s4
0x45: 148, # s5
0x46: 152, # s6
0x47: 156, # s7
0x48: 160, # s8
0x49: 164, # s9
0x4a: 168, # s10
0x4b: 172, # s11
0x4c: 176, # s12
0x4d: 180, # s13
0x4e: 184, # s14
0x4f: 188, # s15
33: 192, # fpscr
# (reserved word: 196)
}
# Registers that are not available on the stack for exceptions.
EXCEPTION_UNAVAILABLE_REGS = (4, 5, 6, 7, 8, 9, 10, 11)
def __init__(self, parentContext, thread):
super(ArgonThreadContext, self).__init__(parentContext.core)
self._parent = parentContext
self._thread = thread
def readCoreRegistersRaw(self, reg_list):
reg_list = [self.registerNameToIndex(reg) for reg in reg_list]
reg_vals = []
inException = self._get_ipsr() > 0
isCurrent = self._thread.is_current
# If this is the current thread and we're not in an exception, just read the live registers.
if isCurrent and not inException:
return self._parent.readCoreRegistersRaw(reg_list)
sp = self._thread.get_stack_pointer()
# Determine which register offset table to use and the offsets past the saved state.
realSpOffset = 0x40
realSpExceptionOffset = 0x20
table = self.CORE_REGISTER_OFFSETS
if self._thread.has_extended_frame:
table = self.FPU_EXTENDED_REGISTER_OFFSETS
realSpOffset = 0xc8
realSpExceptionOffset = 0x68
for reg in reg_list:
# Check for regs we can't access.
if isCurrent and inException:
if reg in self.EXCEPTION_UNAVAILABLE_REGS:
reg_vals.append(0)
continue
if reg == 18 or reg == 13: # PSP
log.debug("psp = 0x%08x", sp + realSpExceptionOffset)
reg_vals.append(sp + realSpExceptionOffset)
continue
# Must handle stack pointer specially.
if reg == 13:
reg_vals.append(sp + realSpOffset)
continue
# Look up offset for this register on the stack.
spOffset = table.get(reg, None)
if spOffset is None:
reg_vals.append(self._parent.readCoreRegisterRaw(reg))
continue
if isCurrent and inException:
spOffset -= realSpExceptionOffset #0x20
try:
reg_vals.append(self._parent.read32(sp + spOffset))
except DAPAccess.TransferError:
reg_vals.append(0)
return reg_vals
def _get_ipsr(self):
return self._parent.readCoreRegister('xpsr') & 0xff
def writeCoreRegistersRaw(self, reg_list, data_list):
self._parent.writeCoreRegistersRaw(reg_list, data_list)
## @brief Base class representing a thread on the target.
class ArgonThread(TargetThread):
UNKNOWN = 0
SUSPENDED = 1
READY = 2
RUNNING = 3
BLOCKED = 4
SLEEPING = 5
DONE = 6
STATE_NAMES = {
UNKNOWN : "Unknown",
SUSPENDED : "Suspended",
READY : "Ready",
RUNNING : "Running",
BLOCKED : "Blocked",
SLEEPING : "Sleeping",
DONE : "Done",
}
def __init__(self, targetContext, provider, base):
super(ArgonThread, self).__init__()
self._target_context = targetContext
self._provider = provider
self._base = base
self._thread_context = ArgonThreadContext(self._target_context, self)
self._has_fpu = self._thread_context.core.has_fpu
self._priority = 0
self._state = self.UNKNOWN
self._name = "?"
try:
self.update_info()
ptr = self._target_context.read32(self._base + THREAD_NAME_OFFSET)
self._name = read_c_string(self._target_context, ptr)
except DAPAccess.TransferError:
log.debug("Transfer error while reading thread info")
def get_stack_pointer(self):
sp = 0
if self.is_current:
# Read live process stack.
sp = self._target_context.readCoreRegister('psp')
else:
# Get stack pointer saved in thread struct.
try:
sp = self._target_context.read32(self._base + THREAD_STACK_POINTER_OFFSET)
except DAPAccess.TransferError:
log.debug("Transfer error while reading thread's stack pointer @ 0x%08x", self._base + THREAD_STACK_POINTER_OFFSET)
return sp
def update_info(self):
try:
self._priority = self._target_context.read8(self._base + THREAD_PRIORITY_OFFSET)
self._state = self._target_context.read8(self._base + THREAD_STATE_OFFSET)
if self._state > self.DONE:
self._state = self.UNKNOWN
except DAPAccess.TransferError:
log.debug("Transfer error while reading thread info")
@property
def state(self):
return self._state
@property
def priority(self):
return self._priority
@property
def unique_id(self):
return self._base
@property
def name(self):
return self._name
@property
def description(self):
return "%s; Priority %d" % (self.STATE_NAMES[self.state], self.priority)
@property
def is_current(self):
return self._provider.get_actual_current_thread_id() == self.unique_id
@property
def context(self):
return self._thread_context
@property
def has_extended_frame(self):
if not self._has_fpu:
return False
try:
flag = self._target_context.read8(self._base + THREAD_EXTENDED_FRAME_OFFSET)
return flag != 0
except DAPAccess.TransferError:
log.debug("Transfer error while reading thread's extended frame flag @ 0x%08x", self._base + THREAD_EXTENDED_FRAME_OFFSET)
return False
def __str__(self):
return "<ArgonThread@0x%08x id=%x name=%s>" % (id(self), self.unique_id, self.name)
def __repr__(self):
return str(self)
## @brief Base class for RTOS support plugins.
class ArgonThreadProvider(ThreadProvider):
def __init__(self, target):
super(ArgonThreadProvider, self).__init__(target)
self.g_ar = None
self.g_ar_objects = None
self._all_threads = None
self._threads = {}
def init(self, symbolProvider):
self.g_ar = symbolProvider.get_symbol_value("g_ar")
if self.g_ar is None:
return False
log.debug("Argon: g_ar = 0x%08x", self.g_ar)
self.g_ar_objects = symbolProvider.get_symbol_value("g_ar_objects")
if self.g_ar_objects is None:
return False
log.debug("Argon: g_ar_objects = 0x%08x", self.g_ar_objects)
self._all_threads = self.g_ar_objects + ALL_OBJECTS_THREADS_OFFSET
return True
def _build_thread_list(self):
allThreads = TargetList(self._target_context, self._all_threads)
newThreads = {}
for threadBase in allThreads:
try:
# Reuse existing thread objects if possible.
if threadBase in self._threads:
t = self._threads[threadBase]
# Ask the thread object to update its state and priority.
t.update_info()
else:
t = ArgonThread(self._target_context, self, threadBase)
log.debug("Thread 0x%08x (%s)", threadBase, t.name)
newThreads[t.unique_id] = t
except DAPAccess.TransferError:
log.debug("TransferError while examining thread 0x%08x", threadBase)
# Create fake handler mode thread.
if self.get_ipsr() > 0:
log.debug("creating handler mode thread")
t = HandlerModeThread(self._target_context, self)
newThreads[t.unique_id] = t
self._threads = newThreads
def get_threads(self):
if not self.is_enabled:
return []
self.update_threads()
return self._threads.values()
def get_thread(self, threadId):
if not self.is_enabled:
return None
self.update_threads()
return self._threads.get(threadId, None)
@property
def is_enabled(self):
return self.g_ar is not None and self.get_is_running()
@property
def current_thread(self):
if not self.is_enabled:
return None
self.update_threads()
id = self.get_current_thread_id()
try:
return self._threads[id]
except KeyError:
log.debug("key error getting current thread id=%x", id)
log.debug("self._threads = %s", repr(self._threads))
return None
def is_valid_thread_id(self, threadId):
if not self.is_enabled:
return False
self.update_threads()
return threadId in self._threads
def get_current_thread_id(self):
if not self.is_enabled:
return None
if self.get_ipsr() > 0:
return 2
return self.get_actual_current_thread_id()
def get_actual_current_thread_id(self):
if not self.is_enabled:
return None
return self._target_context.read32(self.g_ar)
def get_is_running(self):
if self.g_ar is None:
return False
flag = self._target_context.read8(self.g_ar + IS_RUNNING_OFFSET)
return flag != 0
| {
"content_hash": "9973bede41708bc25c972b082e172925",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 135,
"avg_line_length": 32.43262411347518,
"alnum_prop": 0.5415846636052191,
"repo_name": "matthewelse/pyOCD",
"id": "af176af5b2972389998915571d149e6c4d1babe7",
"size": "13719",
"binary": false,
"copies": "1",
"ref": "refs/heads/cortex-a",
"path": "pyOCD/rtos/argon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "801"
},
{
"name": "C",
"bytes": "3924"
},
{
"name": "Python",
"bytes": "871728"
},
{
"name": "Shell",
"bytes": "479"
}
],
"symlink_target": ""
} |
from common import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'djangoblog', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': get_env_setting('DB_USER'),
'PASSWORD': get_env_setting('DB_PASS'),
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS += (
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS += (
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES += (
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'djangoblog.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'djangoblog.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS += (
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'blogengine',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
SECRET_KEY = 'addddddqwer3242342342sdfscvvbfgr'
| {
"content_hash": "08cb66535c4367a88fa0ee20d24981a0",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 127,
"avg_line_length": 32.58064516129032,
"alnum_prop": 0.6688118811881189,
"repo_name": "NikhilKalige/DjangoBlog",
"id": "af2e585490ca3a4ef8d20d0a924fcbd7a7240adf",
"size": "4082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangoblog/djangoblog/settings/local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10302"
}
],
"symlink_target": ""
} |
import requests
from math import sqrt
# from .yelp import find_yelp_posts
from .sendsms import SMS
# from .models import Route, Trip
# from users.model import User
def distance(point_m, point_n):
d0 = point_n[0] - point_m[0]
d1 = point_n[1] - point_m[0]
return sqrt(d0**2 + d1**2)
def scenic_trip_builder(point_a, point_b):
'''Builds a scenic route to point_b using Yelps api.
Limit of 4 locations will be attached to a trip.
Params:
point_a: Location data of the starting point
Type: List
Format: [latitude, longitude]
point_b: Location data of the destination
Type: List
Format: [latitude, longitude]
'''
print(trip)
scenic_routes = []
# MATH ALERT
# convert to decimal for ya boy #yaboybillnye
point_a[0] = Decimal(point_a[0])
point_a[1] = Decimal(point_a[1])
point_b[0] = Decimal(point_b[0])
point_b[1] = Decimal(point_b[1])
# distance formula
distance = math.hypot((point_b[1] - point_a[0]), (point_b[1] - point[0]))
y = 0
x = 0
# just find 5 spots for now
interval = distance / 5
for i in range(5):
x = (point_a[1] + point_b[1]) / interval
y = (point_a[0] + point_b[0]) / interval
# scenic_routes.append(find_yelp_spot(x, y))
return order_points(scenic_routes)
def order_points(points):
""" Given an array of coordinate pairs
i.e. [[x1, y1], [x2, y2], [x3, y3] , ... ]
IMPORTANT: first and last coordinates are assumed endpoints
Returns the same format in the optimum route
[FOR TESTING]:
''' generates random lat-long pairs to the 7th decimal'''
import random
PAIR4 = []
for i in range(5):
temp = []
for j in range(2):
temp.append(round(random.random()*random.randrange(-1,2,2), 7))
PAIR4.append(temp)
print(PAIR4)
order_points(PAIR4)
"""
print("\n<PAIR value='" + str(points) + "'>")
points = points[::-1]
response = []
response.append(points.pop(len(points) - 1))
while len(points) > 1:
print("\nRemaining Points: \n" + str(points) + "\n")
distances = []
for i in range(len(points) - 1):
print(str(points[0])+" to "+str(points[i+1])+" is "+str(distance(points[0], points[i+1])))
distances.append(distance(points[0], points[i+1]))
response.append(points.pop(0))
shortest = distances.index(min(distances))
print("\nClosest Point: " + str(points[shortest]))
if shortest != 0:
points.append(points[0])
points[0] = points[shortest]
points.pop(shortest)
response.append(points.pop(0))
response.append(response.pop(0))
response = response[::-1]
print("\n<RESPONSE>" + str(response) + "</RESPONSE>\n</PAIR>")
# lists all distance relationships
"""
for i in range(len(points)):
for j in range(len(points)):
if i != j and j > i:
print(str(points[i])+" to "+str(points[j])+" is "+str(distance(points[i], points[j])))
"""
return response
def send_unconfirmed(user_number):
'''Send a sms to the rider(s) phone number
when a trip is created.
'''
status = None
if user_number:
sms = SMS(user_number)
status = sms.send("A rider has requested a ride.")
return status
def send_confirmed(user_number):
'''Send a sms to the customers phone number
when a trip is confirmed.
'''
status = None
if user_number:
sms = SMS(user_number)
status = sms.send("A rider has confirmed your trip request.")
return status
def send_arrived(user_number):
'''Send a sms to the customers phone number
when a rider has arrived at the pickup location.
'''
status = None
if user_number:
sms = SMS(user_number)
status = sms.send("Your rider has arrived at your pick-up location.")
return status
| {
"content_hash": "a127d3b020b27861ecc651337e68b073",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 102,
"avg_line_length": 26.87074829931973,
"alnum_prop": 0.5939240506329114,
"repo_name": "newtonjain/hacktheplanet",
"id": "c0cda2fbf47320f0075bf21c605262af3cf675fd",
"size": "3950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/utils/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "540270"
},
{
"name": "HTML",
"bytes": "738103"
},
{
"name": "JavaScript",
"bytes": "3486218"
},
{
"name": "Makefile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "73531"
},
{
"name": "Shell",
"bytes": "3751"
}
],
"symlink_target": ""
} |
from distutils import core
from distutils import errors
import logging
import os
import sys
import warnings
from pbr import util
if sys.version_info[0] == 3:
string_type = str
integer_types = (int,)
else:
string_type = basestring # flake8: noqa
integer_types = (int, long) # flake8: noqa
def pbr(dist, attr, value):
"""Implements the actual pbr setup() keyword.
When used, this should be the only keyword in your setup() aside from
`setup_requires`.
If given as a string, the value of pbr is assumed to be the relative path
to the setup.cfg file to use. Otherwise, if it evaluates to true, it
simply assumes that pbr should be used, and the default 'setup.cfg' is
used.
This works by reading the setup.cfg file, parsing out the supported
metadata and command options, and using them to rebuild the
`DistributionMetadata` object and set the newly added command options.
The reason for doing things this way is that a custom `Distribution` class
will not play nicely with setup_requires; however, this implementation may
not work well with distributions that do use a `Distribution` subclass.
"""
if not value:
return
if isinstance(value, string_type):
path = os.path.abspath(value)
else:
path = os.path.abspath('setup.cfg')
if not os.path.exists(path):
raise errors.DistutilsFileError(
'The setup.cfg file %s does not exist.' % path)
# Converts the setup.cfg file to setup() arguments
try:
attrs = util.cfg_to_args(path, dist.script_args)
except Exception:
e = sys.exc_info()[1]
# NB: This will output to the console if no explicit logging has
# been setup - but thats fine, this is a fatal distutils error, so
# being pretty isn't the #1 goal.. being diagnosable is.
logging.exception('Error parsing')
raise errors.DistutilsSetupError(
'Error parsing %s: %s: %s' % (path, e.__class__.__name__, e))
# Repeat some of the Distribution initialization code with the newly
# provided attrs
if attrs:
# Skips 'options' and 'licence' support which are rarely used; may
# add back in later if demanded
for key, val in attrs.items():
if hasattr(dist.metadata, 'set_' + key):
getattr(dist.metadata, 'set_' + key)(val)
elif hasattr(dist.metadata, key):
setattr(dist.metadata, key, val)
elif hasattr(dist, key):
setattr(dist, key, val)
else:
msg = 'Unknown distribution option: %s' % repr(key)
warnings.warn(msg)
# Re-finalize the underlying Distribution
try:
super(dist.__class__, dist).finalize_options()
except TypeError:
# If dist is not declared as a new-style class (with object as
# a subclass) then super() will not work on it. This is the case
# for Python 2. In that case, fall back to doing this the ugly way
dist.__class__.__bases__[-1].finalize_options(dist)
# This bit comes out of distribute/setuptools
if isinstance(dist.metadata.version, integer_types + (float,)):
# Some people apparently take "version number" too literally :)
dist.metadata.version = str(dist.metadata.version)
| {
"content_hash": "574a44c2a353d198d6fe3236d9acdcb9",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 37.674157303370784,
"alnum_prop": 0.6450939457202505,
"repo_name": "ryfeus/lambda-packs",
"id": "a93253baddf4796a1d3be3c80ef0f0979cf95d54",
"size": "5507",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Keras_tensorflow_nightly/source2.7/pbr/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
from twython import Twython
import pandas as pd
from loader import load_paths
import time
from shutil import rmtree, move
from urllib.request import urlopen
from zipfile import ZipFile
import os
import pickle
def access_twitter_api():
""" Gets API keys from a hardcoded path and returns an initialized Twython twitter API object. """
paths = load_paths()
with open(paths["PATH_TO_TWITTER_API_KEYS"], 'r') as f:
CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN = f.readlines()
twitter_api = Twython(CONSUMER_KEY, access_token = ACCESS_TOKEN)
return twitter_api
def load_tweet_ids():
""" Reads the tweet IDs and strips away the newline characters. Returns a list of tweet IDs. """
paths = load_paths()
with open(paths["PATH_TO_MICHIGAN_TWEET_IDS"], 'r') as f:
tweet_ids = f.readlines()
tweet_ids = [tweet_id.strip() for tweet_id in tweet_ids]
return tweet_ids
def get_tweets_from_tweet_ids(twitter_api, tweet_ids):
""" Ask Twitter to return all tweest in our list of tweet_ids. We can ask for 30,000 tweets every 15 minutes. """
# applications are limited to retrieving 30,000 tweets every 15 minutes (not including a couple of minutes as buffer)
# (100 tweets per query, 300 queries every 15 minutes according to https://developer.twitter.com/en/docs/basics/rate-limits)
# The Michigan dataset has 142,249 tweets. It takes ~90 minutes to run the 1,423 queries necessary to retreive all of them.
# Howeer, only 63,245 of the tweets are still available. The deleted tweets were probably disproportionately junk news.
TWEET_IDS_PER_QUERY = 100
QUERIES_PER_WINDOW = 300
LENGTH_OF_WINDOW = 15 # in minutes
paths = load_paths()
# chunk tweet ids into groups according to how many tweet ids can fit in each query
tweet_id_chunks = []
for i in range(0, len(tweet_ids), TWEET_IDS_PER_QUERY):
# syntax handles the special case of the last chunk, which may have fewer than 100 items, gracefully.
tweet_id_chunks.append(tweet_ids[i:i + TWEET_IDS_PER_QUERY])
# chunk those chunks into groups according to how many queries you can run during each time window
chunks_of_tweet_id_chunks = []
for i in range(0, len(tweet_id_chunks), QUERIES_PER_WINDOW):
# syntax handles the special case of the last chunk, which may have fewer than 300 items, gracefully.
chunks_of_tweet_id_chunks.append(tweet_id_chunks[i:i + QUERIES_PER_WINDOW])
all_tweets = []
# retrieve the max number of tweets you can, wait for the next time window, and repeat until done
# in the future, we could write the tweets to our database while we wait
for i, chunk_of_tweet_id_chunks in enumerate(chunks_of_tweet_id_chunks):
# wait 15 minutes + some buffer (sleep takes seconds) between chunks of calls
if i != 0:
time.sleep(17 * 60)
for tweet_id_chunk in chunk_of_tweet_id_chunks:
tweets = twitter_api.lookup_status(id = tweet_id_chunk) # each tweet is a dictionary. tweets is a list of dictionaries.
all_tweets += tweets
print("Downloaded {} tweets.".format(len(all_tweets)))
print("Finished downloading chunk {} of {} (chunk id {}). {} tweets downloaded so far.".format(i + 1, len(chunks_of_tweet_id_chunks), i, len(all_tweets)))
print("Finished downloading {} tweets!".format(len(all_tweets)))
#
return all_tweets
def download_michigan_data():
""" Downloads the Michigan dataset. Returns nothing. Some of the original tweets are now unavailable. It writes the IDs of the unavailable tweets to a text file. """
paths = load_paths()
twitter_api = access_twitter_api()
tweet_ids = load_tweet_ids()
tweets = get_tweets_from_tweet_ids(twitter_api, tweet_ids)
# automatically converting to a dataframe leaves columns of unspecified type and makes loading it later buggy
# for now just pickle the tweets
# eventually should manually covert the list of dictionaries into a pandas dataframe
# tweets = pd.DataFrame(tweets) # convert the list of dictionaries into a Pandas Dataframe
# tweets.to_csv(paths["PATH_TO_MICHIGAN_TWEETS"], index = False) # if file exists, overwrites content. if file doesn't exist, creates new file.
with open(paths["PATH_TO_MICHIGAN_TWEETS_PICKLED"], 'wb+') as f:
pickle.dump(tweets, f)
retreived_ids = [str(tweet_id) for tweet_id in tweets['id'].values]
unretreieved_ids = [tweet_id for tweet_id in tweet_ids if tweet_id not in retreived_ids]
with open(paths["PATH_TO_MICHIGAN_UNRETRIEVED_TWEET_IDS"], 'w') as f:
for unretreieved_id in unretreieved_ids:
f.write("{}\n".format(unretreieved_id))
assert(len(tweets.index) == 63277) # make sure you've downloaded the same tweets as us
def download_honeypot_data():
""" Download and unzip the Honeypot dataset. Returns nothing. """
paths = load_paths()
# create a temp directory and download the zip file to it
path_to_temp_directory = os.path.join(paths["PATH_TO_DATA_DIRECTORY"], "temp")
if not os.path.isdir(path_to_temp_directory):
os.mkdir(path_to_temp_directory)
path_to_zip_file = os.path.join(path_to_temp_directory, "data.zip")
with open(path_to_zip_file, "wb") as f:
contents = urlopen(paths["HONEYPOT_DATA_URL"])
f.write(contents.read())
# open the zip file and extract its files
zip_file = ZipFile(path_to_zip_file)
zip_file.extractall(path = path_to_temp_directory)
# move the extracted directory to the honeypot data directory
name = zip_file.namelist()[0]
extracted_directory = os.path.join(path_to_temp_directory, name)
if os.path.isdir(paths["PATH_TO_HONEYPOT_DATA_DIRECTORY"]):
rmtree(paths["PATH_TO_HONEYPOT_DATA_DIRECTORY"])
move(extracted_directory, paths["PATH_TO_HONEYPOT_DATA_DIRECTORY"])
rmtree(path_to_temp_directory)
| {
"content_hash": "5a0aa6d38358a9bf5a55776633eb0dd7",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 166,
"avg_line_length": 42.80152671755725,
"alnum_prop": 0.7335473515248796,
"repo_name": "pawarren/Fake-News-Echo-Chambers",
"id": "d38a0b0a384455196f0688660e4133d538743421",
"size": "5607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "downloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11680807"
},
{
"name": "Python",
"bytes": "43239"
},
{
"name": "Shell",
"bytes": "340"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = '/'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
# Social presence
TWITTER_USERNAME = 'voidspacexyz'
GITHUB_NAME = "ramaseshan"
GITLAB_NAME = "voidspacexyz"
DISQUS_SITENAME = "ramaseshan"
print "{slug}"
| {
"content_hash": "262f6dd282191ab7607b2e6171be625c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 53,
"avg_line_length": 21.923076923076923,
"alnum_prop": 0.7473684210526316,
"repo_name": "ramaseshan/ramaseshan.github.io",
"id": "e55ea526964d07b24a877485a5560e7c5dd14464",
"size": "618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/publishconf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10980"
},
{
"name": "HTML",
"bytes": "458350"
},
{
"name": "Makefile",
"bytes": "4327"
},
{
"name": "Python",
"bytes": "3770"
},
{
"name": "Shell",
"bytes": "2201"
}
],
"symlink_target": ""
} |
import datetime
import hashlib
import json
import falcon
USE_CACHE_CONTROL = 'USE_CACHE_CONTROL'
USE_ETAG = 'USE_ETAG'
def is_error(status):
"""Determine if the response has an error status
:param status: HTTP Status string to inspect
:return: True if the status code is 400 or greater, otherwise False
"""
return int(status.split(' ', 1)[0]) >= 400
def is_cacheable(status):
"""Determine if the response has a cacheable status
:param status: HTTP Status string to inspect
:return: True if the status code is 302 or greater and not 410, otherwise
False
"""
code = int(status.split(' ', 1)[0])
return code in [200, 203, 206, 300, 301, 410]
def respond(resp, method='GET', status=falcon.HTTP_200, headers=None,
body=None):
resp.status = status
if headers:
for key, value in headers.iteritems():
resp.set_header(key, value)
if status not in [falcon.HTTP_204, falcon.HTTP_304]:
if method != 'HEAD':
resp.body = body
# else MUST not include body in any other cases
def prod_handler(ex, req, resp, params):
"""Handle exceptions thrown during request processing
:param ex: exception to optionally handle
:param req: request being processed
:param resp: response being formulated
:param params: parameters to the request
:return: None
"""
# Pass on any HTTP Error that has already been determined
if isinstance(ex, falcon.HTTPError):
raise
raise falcon.HTTPInternalServerError("500 Internal Server Error",
"Sorry. My bad.")
def dev_handler(ex, req, resp, params):
"""Handle exceptions thrown during request processing
:param ex: exception to optionally handle
:param req: request being processed
:param resp: response being formulated
:param params: parameters to the request
:return: None
"""
# Pass on any HTTP Error that has already been determined
if isinstance(ex, falcon.HTTPError):
raise
resp.status = falcon.HTTP_INTERNAL_SERVER_ERROR
import traceback
trace = traceback.format_exc()
if req.client_accepts('text/html'):
resp.content_type = 'text/html'
content = ('<!DOCTYPE html><h2>%s</h2>%s<hr><pre>%s</pre>'
% (resp.status, ex.message, trace))
else:
error = {'status': resp.status, 'message': ex.message,
'description': trace.split('\n')}
content = json.dumps(error)
resp.body = content
class InjectorMiddleware(object):
"""Injects all arguments into each Falcon session
"""
def __init__(self, args):
self._injectables = args
def process_request(self, req, resp):
req.context.update(self._injectables)
class CacheControlMiddleware(object):
def __init__(self, duration_seconds=3600):
self.duration_seconds = duration_seconds
def process_response(self, req, resp, resource):
if self._include_cache_control_headers(req, resp):
resp.cache_control = ['public', 'max-age=' + self.cache_seconds]
resp.set_header('Expires', self.expires)
resp.vary = 'Accept-Encoding'
@property
def cache_seconds(self):
return str(self.duration_seconds)
@property
def expires(self):
now = datetime.datetime.utcnow()
duration = datetime.timedelta(seconds=self.duration_seconds)
return falcon.dt_to_http(now + duration)
@staticmethod
def _include_cache_control_headers(req, resp):
return resp.body and (req.context.get(USE_CACHE_CONTROL, None) or
is_cacheable(resp.status))
class EtagResponseMiddleware(object):
def __init__(self, cache):
self._cache = cache
def process_response(self, req, resp, resource):
if self._include_etag_header(req, resp):
etag = self.etag(resp.body)
resp.etag = etag
# Ensure current Etag is cached
req_key = req.relative_uri
if self._cache[req_key] != etag:
self._cache[req_key] = etag
@staticmethod
def etag(body):
return hashlib.md5(body).hexdigest()
@staticmethod
def _include_etag_header(req, resp):
return resp.body and (req.context.get(USE_ETAG, None) or
is_cacheable(resp.status))
| {
"content_hash": "b5a907f2337013f43174f24e1ff823c8",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 77,
"avg_line_length": 30.047619047619047,
"alnum_prop": 0.628254471360652,
"repo_name": "eve-basil/common",
"id": "c356401c5a64d66c69278d0f30bbaa1203538117",
"size": "4417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basil_common/falcon_support.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24827"
}
],
"symlink_target": ""
} |
"""
import custom loss here
"""
| {
"content_hash": "d83bdd49f8db301fc2871cb63c1adeb1",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 23,
"avg_line_length": 10.666666666666666,
"alnum_prop": 0.625,
"repo_name": "FederatedAI/FATE",
"id": "6e65894e2c5b9c175719865cffeb5db73645c7d7",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/federatedml/nn/backend/pytorch/custom/loss.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
} |
import setuptools
setuptools.setup(
name = 'hetio',
version = '0.1.0',
author = 'Daniel Himmelstein',
author_email = 'daniel.himmelstein@gmail.com',
url = 'https://github.com/dhimmel/hetio',
description = 'Heterogeneous networks',
license = 'CC0',
packages = ['hetio'],
)
| {
"content_hash": "2b695a36d3f2a50fb6464aec1dd3c27f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 50,
"avg_line_length": 25.75,
"alnum_prop": 0.6245954692556634,
"repo_name": "xypan1232/hetio",
"id": "7d8645bbf933ed8f6fdd047ef65a6e2de577b417",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34426"
}
],
"symlink_target": ""
} |
import networkx as nx
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.lib.mac import haddr_to_bin
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.topology import api as ryu_api
class RouteApp(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(RouteApp, self).__init__(*args, **kwargs)
def add_flow(self, datapath, match, actions):
ofproto = datapath.ofproto
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
datapath.send_msg(mod)
def find_host(self, mac_addr):
hosts = ryu_api.get_all_host(self)
for host in hosts:
if host.mac == mac_addr:
return host
return None
def flood_packet(self, dp, msg):
ofproto = dp.ofproto
out_port = ofproto.OFPP_FLOOD
actions = [dp.ofproto_parser.OFPActionOutput(out_port)]
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = dp.ofproto_parser.OFPPacketOut(
datapath=dp, buffer_id=msg.buffer_id, in_port=msg.in_port,
actions=actions, data=data)
dp.send_msg(out)
def get_all_links(self):
'''
link form format:
[(a.1, b.1), (b.2, c.1), (b.3, d.1)]
x.k means dpid x and port no k
a.1 means first port in switch "a"
this topology should looks like:
a.1 - 1.b.2 - 1.c
3
|
1
d
'''
all_links = ryu_api.get_all_link(self)
result = []
for link in all_links:
src = '{}.{}'.format(link.src.dpid, link.src.port_no)
dst = '{}.{}'.format(link.dst.dpid, link.dst.port_no)
result.append((src, dst))
# internal switch links
all_switches = ryu_api.get_all_switch(self)
link_to_add = []
# O(n^3), such dirty!!
for switch in all_switches:
ports = switch.ports
for port in ports:
for _port in ports:
if port != _port:
src = '{}.{}'.format(port.dpid, port.port_no)
dst = '{}.{}'.format(_port.dpid, _port.port_no)
link_to_add.append((src, dst))
result.extend(link_to_add)
return result
def cal_shortest_path(self, src_host, dst_host):
src_port = src_host.port
dst_port = dst_host.port
all_links = self.get_all_links()
graph = nx.Graph()
graph.add_edges_from(all_links)
src = '{}.{}'.format(src_port.dpid, src_port.port_no)
dst = '{}.{}'.format(dst_port.dpid, dst_port.port_no)
if nx.has_path(graph, src, dst):
return nx.shortest_path(graph, src, dst)
return None
def get_dp(self, dpid):
switch = ryu_api.get_switch(self, dpid)[0]
return switch.dp
def packet_out(self, dp, msg, out_port):
ofproto = dp.ofproto
actions = [dp.ofproto_parser.OFPActionOutput(out_port)]
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = dp.ofproto_parser.OFPPacketOut(
datapath=dp, buffer_id=msg.buffer_id, in_port=msg.in_port,
actions=actions, data=data)
dp.send_msg(out)
def install_path(self, match, path):
for node in path:
dpid = int(node.split('.')[0])
port_no = int(node.split('.')[1])
dp = self.get_dp(dpid)
actions = [dp.ofproto_parser.OFPActionOutput(port_no)]
self.add_flow(dp, match, actions)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
dp = msg.datapath
dpid = dp.id
ofproto = dp.ofproto
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
src = eth.src
dst = eth.dst
if dst.startswith('33:33'):
# IP multicast, flood it....
self.flood_packet(dp, msg)
return
if eth.ethertype == ether_types.ETH_TYPE_ARP:
# arp, flood it
self.flood_packet(dp, msg)
return
if dst == 'ff:ff:ff:ff:ff:ff':
self.flood_packet(dp, msg)
return
self.logger.info('From {} to {}'.format(src, dst))
# find dst host location(dpid, port)
dst_host = self.find_host(dst)
# can't find dst, flood it.
if not dst_host:
self.logger.info('Can\'t find host {}'.format(dst))
self.flood_packet(dp, msg)
return
src_host = self.find_host(src)
# calculate shortest path
shortest_path = self.cal_shortest_path(src_host, dst_host)
# can't find path, flood it!
if not shortest_path:
self.logger.info('Can\'t find path')
self.flood_packet(dp, msg)
return
self.logger.info('Shortest path : ')
self.logger.info(shortest_path)
# Now, insert flows to switches!
# shortest_path example:
# from dpid 7, port 2 to dpid 3 port 1
# ['7.2', '7.3', '5.2', '5.3', '1.2', '1.1', '2.3', '2.1', '3.3', '3.1']
# create match
match = dp.ofproto_parser.OFPMatch(
dl_dst=haddr_to_bin(dst))
self.install_path(match, shortest_path[1::2])
# create reverse path
match = dp.ofproto_parser.OFPMatch(
dl_dst=haddr_to_bin(src))
self.install_path(match, shortest_path[2::2])
# packet out this packet!
node = shortest_path[1]
dpid = int(node.split('.')[0])
port_no = int(node.split('.')[1])
self.packet_out(dp, msg, port_no)
| {
"content_hash": "e5e08fceefb9890f8d6d74addeecd1a3",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 80,
"avg_line_length": 30.03271028037383,
"alnum_prop": 0.5467558736580053,
"repo_name": "TakeshiTseng/SDN-Work",
"id": "da53e9c8f9419998867dda6c62a49aecc1211e0a",
"size": "6427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "COSCUP2015Workshop/route_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1219"
},
{
"name": "HTML",
"bytes": "469"
},
{
"name": "JavaScript",
"bytes": "7591"
},
{
"name": "Python",
"bytes": "131107"
},
{
"name": "Shell",
"bytes": "1790"
}
],
"symlink_target": ""
} |
from nose.tools import assert_greater
from .... import make
from ....core.mod import mod
from ...ut import need_scrapyd
from .. import name
@need_scrapyd
def test_spy_query():
app = make()
with app.app_context():
d = mod(name).spy('language:chinese', 60)
assert_greater(len(d.posts), 0)
d = mod(name).spy('http://g.e-hentai.org/?f_search=language%3Achinese', 60)
assert_greater(len(d.posts), 0)
| {
"content_hash": "f6e67e6f69f304e79b4810c596ef7076",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 83,
"avg_line_length": 29.133333333333333,
"alnum_prop": 0.6384439359267735,
"repo_name": "Answeror/torabot",
"id": "f4389d21db9fed61e89a552e31171f3ff558ab70",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "torabot/mods/ehentai/test/test_spy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "174712"
},
{
"name": "JavaScript",
"bytes": "2849805"
},
{
"name": "Python",
"bytes": "552234"
},
{
"name": "Shell",
"bytes": "822"
},
{
"name": "TeX",
"bytes": "3381"
},
{
"name": "XSLT",
"bytes": "5063"
}
],
"symlink_target": ""
} |
from collections import namedtuple
import struct, math
from panda3d.core import Geom, GeomVertexData, GeomVertexFormat, GeomVertexReader, GeomVertexRewriter
from panda3d.core import Thread
from panda3d.core import Point3
from direct.directnotify.DirectNotify import DirectNotify
from simpleCircle import SimpleCircle # for the circumcircle
from utils import getIntersectionBetweenPoints, EPSILON
notify = DirectNotify().newCategory("Trangle")
class PrimitiveInterface(object):
"""Handles interfacing with GeomVertexData objects as well as GeomPrimitives"""
@classmethod
def readData3f(cls, ind, vreader):
vreader.setRow(ind)
return vreader.getData3f()
def __init__(self, vdata, primitives):
# TODO find a way to make only one per vdata
self.vdata = vdata
self.primitives = primitives
def getTriangleAsPoints(self, ind, vreader=None):
if vreader is None:
vreader = GeomVertexReader(self.vdata, 'vertex')
pts = []
for vi in self.getTriangleVertexIndices(ind):
vreader.setRow(vi)
pt = vreader.getData3f()
pts.append(Point3(*pt))
return Triangle.TriangleTuple(pts[0], pts[1], pts[2])
def getTriangleVertexIndices(self, index):
st = self.primitives.getPrimitiveStart(index)
end = self.primitives.getPrimitiveEnd(index)
vertexIndices = []
for i in range(st, end):
vertexIndices.append(self.primitives.getVertex(i))
return vertexIndices
def setTrianglePointIndex(self, triangleIndex, pointIndex, newVertexIndex):
triangleArry = self.primitives.modifyVertices()
triangleArry = triangleArry.modifyHandle(Thread.getCurrentThread()) # releases the array when deleted
bytesPerVert = triangleArry.getArrayFormat().getTotalBytes()
# BLOG C string to Python struct conversion https://docs.python.org/2/library/struct.html#format-characters
fmtStr = triangleArry.getArrayFormat().getFormatString(False) # True pads the bytes
if fmtStr[0] != '=':
fmtStr = '=' + fmtStr # use standard sizing w/ = or native w/ @
readerWriter = struct.Struct(fmtStr) # creating the class instance saves on compiling the format string
packed = readerWriter.pack(newVertexIndex)
triangleArry.setSubdata(triangleIndex * bytesPerVert * 3 + pointIndex * bytesPerVert, bytesPerVert, packed)
class Triangle(object):
"""A triangle object to help with triangle related calculations."""
TriangleTuple = namedtuple('TriangleTuple', 'point0 point1 point2')
# keep Triangle ignorant of other triangles as much as possible
__slots__ = ('_selfIndex', '_primitiveInterface', '_rewriter')
# TODO may implement descriptor for attribute access:
# https://docs.python.org/2/reference/datamodel.html#implementing-descriptors
@classmethod
def getCcwOrder(cls, ind0, ind1, ind2, vreader):
pt0, pt1, pt2 = cls.makeDummy(ind0, ind1, ind2, vreader)
rightVec = pt1 - pt0
leftVec = pt2 - pt0
if rightVec.cross(leftVec).z <= 0:
tmp = ind1
ind1 = ind2
ind2 = tmp
return ind0, ind1, ind2
@classmethod
def getDummyMinAngleDeg(cls, ind0, ind1, ind2, vreader):
ind0, ind1, ind2 = cls.getCcwOrder(ind0, ind1, ind2, vreader) # ??? needs to be ccw
pt0, pt1, pt2 = cls.makeDummy(ind0, ind1, ind2, vreader)
v0 = pt1 - pt0
v1 = pt1 - pt2 # reverse of triangle eg cw winding
v2 = pt2 - pt0 # reverse of triangle
v0.normalize()
v1.normalize()
v2.normalize()
deg0 = v0.angleDeg(v2)
deg1 = (-v2).angleDeg(v1)
deg2 = (-v1).angleDeg(-v0)
if min(deg0, deg1, deg2) < EPSILON:
return 180.0 # virtually collinear (likely is collinear but is hidden by floating point error)
assert abs(deg0 + deg1 + deg2 - 180) < EPSILON
return min(deg0, deg1, deg2)
@classmethod
def makeDummy(cls, ind0, ind1, ind2, vreader):
pt0 = Point3(*PrimitiveInterface.readData3f(ind0, vreader))
pt1 = Point3(*PrimitiveInterface.readData3f(ind1, vreader))
pt2 = Point3(*PrimitiveInterface.readData3f(ind2, vreader))
return pt0, pt1, pt2
def __init__(self, vindex0, vindex1, vindex2, vertexData, geomTriangles, rewriter):
assert vindex0 not in (vindex1, vindex2) and vindex1 not in (vindex0, vindex2) # prevent duplicate indices
super(Triangle, self).__init__()
if Triangle.getDummyMinAngleDeg(vindex0, vindex1, vindex2, rewriter) <= 0:
rewriter.setRow(vindex0)
pt0 = rewriter.getData3f()
rewriter.setRow(vindex1)
pt1 = rewriter.getData3f()
rewriter.setRow(vindex2)
pt2 = rewriter.getData3f()
raise ValueError("Collinear degenerate triangle points: {0} {1} {2}".format(pt0, pt1, pt2))
inds = Triangle.getCcwOrder(vindex0, vindex1, vindex2, rewriter)
geomTriangles.addVertices(*inds)
self._selfIndex = geomTriangles.getNumPrimitives() - 1
self._primitiveInterface = PrimitiveInterface(vertexData, geomTriangles)
self._rewriter = rewriter
def asPointsEnum(self):
return self._primitiveInterface.getTriangleAsPoints(self._selfIndex, vreader=self._rewriter)
def asIndexList(self):
return self._primitiveInterface.getTriangleVertexIndices(self._selfIndex)
def containsPoint(self, point, includeEdges=True):
slf = self.asPointsEnum()
v1 = slf.point2 - slf.point0
pv1 = point - slf.point0
v2 = slf.point1 - slf.point2
pv2 = point - slf.point2
v3 = slf.point0 - slf.point1
pv3 = point - slf.point1
if not self.isLeftWinding():
v1 = -v1
v2 = -v2
v3 = -v3
if includeEdges:
return v1.cross(pv1).z <= 0 and v2.cross(pv2).z <= 0 and v3.cross(pv3).z <= 0
else:
return v1.cross(pv1).z < 0 and v2.cross(pv2).z < 0 and v3.cross(pv3).z < 0
@property
def edge0(self):
slf = self.asPointsEnum()
return slf.point0, slf.point1
@property
def edge1(self):
slf = self.asPointsEnum()
return slf.point1, slf.point2
@property
def edge2(self):
slf = self.asPointsEnum()
return slf.point2, slf.point0
@property
def edgeIndices0(self):
return self.pointIndex0, self.pointIndex1
@property
def edgeIndices1(self):
return self.pointIndex1, self.pointIndex2
@property
def edgeIndices2(self):
return self.pointIndex2, self.pointIndex0
def getAngleDeg0(self):
slf = self.asPointsEnum()
edge1 = slf.point1 - slf.point0
edge2 = slf.point2 - slf.point0
edge1.normalize()
edge2.normalize()
return edge1.angleDeg(edge2)
def getAngleDeg1(self):
slf = self.asPointsEnum()
edge1 = slf.point0 - slf.point1
edge2 = slf.point2 - slf.point1
edge1.normalize()
edge2.normalize()
return edge1.angleDeg(edge2)
def getAngleDeg2(self):
slf = self.asPointsEnum()
edge1 = slf.point0 - slf.point2
edge2 = slf.point1 - slf.point2
edge1.normalize()
edge2.normalize()
return edge1.angleDeg(edge2)
def getCircumcircle(self):
cirSquared = self.getCircumcircleSquared()
return SimpleCircle(cirSquared.center, (cirSquared.center - self.point0).length())
def getCircumcircleSquared(self):
slf = self.asPointsEnum()
edge1 = slf.point1 - slf.point0
edge2 = slf.point2 - slf.point0
norm = edge1.cross(edge2)
norm.normalize()
tanToVec1 = norm.cross(edge1)
tanToVec2 = norm.cross(edge2)
# the circumcircle is centered at the intersecting tangents at the midpoints of each edge (we need only 2)
midPt1 = (slf.point0 + slf.point1) / 2
midPt2 = (slf.point0 + slf.point2) / 2
pt1 = Point3(tanToVec1 + midPt1)
pt2 = Point3(tanToVec2 + midPt2)
center = getIntersectionBetweenPoints(midPt1, pt1, pt2, midPt2)
return SimpleCircle(center, (center - slf.point0).lengthSquared())
def getEdgeIndices0(self):
return self.pointIndex0, self.pointIndex1
def getEdgeIndices1(self):
return self.pointIndex2, self.pointIndex1
def getEdgeIndices2(self):
return self.pointIndex0, self.pointIndex2
def getGeomVertex(self, i):
self._rewriter.setRow(i)
return self._rewriter.getData3f()
def getPointIndices(self):
return self.pointIndex0, self.pointIndex1, self.pointIndex2
def getPoints(self):
slf = self.asPointsEnum()
return slf.point0, slf.point1, slf.point2
def getIntersectionsWithCircumcircle(self, point1, point2, tolerance=EPSILON):
circle = self.getCircumcircle()
return circle.getIntersectionsWithLine(point1, point2, tolerance=tolerance)
def getMidPoint0(self):
slf = self.asPointsEnum()
return (slf.point0 + slf.point1) / 2.0
def getMidPoint1(self):
slf = self.asPointsEnum()
return (slf.point1 + slf.point2) / 2.0
def getMidPoint2(self):
slf = self.asPointsEnum()
return (slf.point2 + slf.point0) / 2.0
def getMinAngleDeg(self):
minAng = self.getAngleDeg0()
ang1 = self.getAngleDeg1()
if ang1 < minAng:
minAng = ang1
return min(self.getAngleDeg2(), minAng)
def getNumGeomVertices(self):
return self._primitiveInterface.vdata.getNumRows()
def getOccupiedEdge(self, point, slf=None):
if slf is None:
slf = self.asPointsEnum()
edge0 = slf.point1 - slf.point0
edge1 = slf.point2 - slf.point1
edge2 = slf.point0 - slf.point2
testVec0 = point - slf.point0
testVec1 = point - slf.point1
testVec2 = point - slf.point2
# BLOG coding defensively. (This would actually be optimal if it shortcuts, but it's best to test assumptions.)
onEdge = ''
# z ~= zero only occurs if the point is close this edge and the triangle edge must be longer
if abs(edge0.cross(point - slf.point0).z) < EPSILON and edge0.lengthSquared() > testVec0.lengthSquared():
onEdge += '0'
if abs(edge1.cross(point - slf.point1).z) < EPSILON and edge1.lengthSquared() > testVec1.lengthSquared():
onEdge += '1'
if abs(edge2.cross(point - slf.point2).z) < EPSILON and edge2.lengthSquared() > testVec2.lengthSquared():
onEdge += '2'
return onEdge
def getSharedFeatures(self, other):
"""
returns namedtuple version of {
'numSharedPoints': int,'point(N)': T/F, 'edge(N)': T/F,
'indicesNotShared': (...), 'otherIndicesNotShared': (...)
}
or {}
"""
inds = other.getPointIndices()
selfInds = self.getPointIndices()
shared = ''
d = {
'numSharedPoints': 0,
'point0': False, 'point1': False, 'point2': False,
'edge0': False, 'edge1': False, 'edge2': False,
'indicesNotShared': self.getPointIndices(),
'otherIndicesNotShared': other.getPointIndices(),
'other': other,
}
if selfInds[0] in inds:
d['point0'] = True
shared += '0'
d['numSharedPoints'] += 1
d['indicesNotShared'] = filter(lambda i: i != selfInds[0], d['indicesNotShared'])
d['otherIndicesNotShared'] = filter(lambda i: i != selfInds[0], d['otherIndicesNotShared'])
if selfInds[1] in inds:
if shared:
d['edge0'] = True
d['point1'] = True
shared += '1'
d['numSharedPoints'] += 1
d['indicesNotShared'] = filter(lambda i: i != selfInds[1], d['indicesNotShared'])
d['otherIndicesNotShared'] = filter(lambda i: i != selfInds[1], d['otherIndicesNotShared'])
if selfInds[2] in inds:
if shared == '0':
d['edge2'] = True
elif shared == '1':
d['edge1'] = True
elif shared == '01':
d['edge0'] = True
d['edge1'] = True
d['edge2'] = True
d['point2'] = True
d['numSharedPoints'] += 1
d['indicesNotShared'] = filter(lambda i: i != selfInds[2], d['indicesNotShared'])
d['otherIndicesNotShared'] = filter(lambda i: i != selfInds[2], d['otherIndicesNotShared'])
SharedNamedTuple = namedtuple('SharedNamedTuple', [
'numSharedPoints',
'point0', 'point1', 'point2',
'edge0', 'edge1', 'edge2',
'indicesNotShared',
'otherIndicesNotShared',
'other'
])
nt = SharedNamedTuple(**d)
return nt
def getVec0(self):
slf = self.asPointsEnum()
return slf.point1 - slf.point0
def getVec1(self):
slf = self.asPointsEnum()
return slf.point2 - slf.point1
def getVec2(self):
slf = self.asPointsEnum()
return slf.point0 - slf.point2
@property
def index(self):
return self._selfIndex
def isLeftWinding(self):
slf = self.asPointsEnum()
v1 = slf.point1 - slf.point0
v2 = (slf.point2 + slf.point1) / 2 - slf.point0
return v1.cross(v2).z > 0
@property
def point0(self):
return self.asPointsEnum().point0
@property
def point1(self):
return self.asPointsEnum().point1
@property
def point2(self):
return self.asPointsEnum().point2
@property
def pointIndex0(self):
return self._primitiveInterface.getTriangleVertexIndices(self._selfIndex)[0]
@property
def pointIndex1(self):
return self._primitiveInterface.getTriangleVertexIndices(self._selfIndex)[1]
@property
def pointIndex2(self):
return self._primitiveInterface.getTriangleVertexIndices(self._selfIndex)[2]
@pointIndex0.setter
def pointIndex0(self, value):
self._primitiveInterface.setTrianglePointIndex(self._selfIndex, 0, value)
@pointIndex1.setter
def pointIndex1(self, value):
self._primitiveInterface.setTrianglePointIndex(self._selfIndex, 1, value)
@pointIndex2.setter
def pointIndex2(self, value):
self._primitiveInterface.setTrianglePointIndex(self._selfIndex, 2, value)
def reverse(self):
tmp = self.pointIndex1
self.pointIndex1 = self.pointIndex2
self.pointIndex2 = tmp
def setIndex(self, value):
self._selfIndex = value
def setPointIndices(self, *args):
self.pointIndex0 = args[0]
self.pointIndex1 = args[1]
self.pointIndex2 = args[2]
def __gt__(self, other):
if isinstance(other, Triangle):
return self._selfIndex > other.index
else:
return self._selfIndex > other
def __ge__(self, other):
if isinstance(other, Triangle):
return self._selfIndex >= other.index
else:
return self._selfIndex >= other
def __eq__(self, other):
if isinstance(other, Triangle):
return self._selfIndex == other.index
else:
return self._selfIndex == other
def __ne__(self, other):
if isinstance(other, Triangle):
return self._selfIndex != other.index
else:
return self._selfIndex != other
def __le__(self, other):
if isinstance(other, Triangle):
return self._selfIndex <= other.index
else:
return self._selfIndex <= other
def __lt__(self, other):
if isinstance(other, Triangle):
return self._selfIndex < other.index
else:
return self._selfIndex < other
def __str__(self):
return str(self.__class__).split('.')[-1][:-2] + " {0}:\n\t{1}, {2}, {3}\n\tind: {4} {5} {6}".format(
self._selfIndex,
self.point0, self.point1, self.point2,
self.pointIndex0, self.pointIndex1, self.pointIndex2
)
| {
"content_hash": "75a157a386b6683894e478b7639e0994",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 119,
"avg_line_length": 34.748936170212765,
"alnum_prop": 0.6136419299534656,
"repo_name": "jkcavin1/creepy-duck",
"id": "581b7c01ae7593e48a50597d25a7c82cc6593b80",
"size": "16350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "computationalgeom/triangle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "181969"
}
],
"symlink_target": ""
} |
"""
Emphatic Temporal Difference Learning Algorithm (ETD), implemented in Python 3.
"""
import numpy as np
class ETD:
"""Emphatic Temporal Difference Learning, or ETD(λ).
Attributes
----------
n : int
The number of features (and therefore the length of the weight vector).
z : Vector[float]
The eligibility trace vector.
w : Vector[float]
The weight vector.
F : float
The followon trace scalar.
M : float
The emphasis scalar.
"""
def __init__(self, n):
"""Initialize the learning algorithm.
Parameters
-----------
n : int
The number of features
"""
self.n = n
self.w = np.zeros(self.n)
self.z = np.zeros(self.n)
self.F = 0
self.M = 0
def get_value(self, x):
"""Get the approximate value for feature vector `x`."""
return np.dot(self.w, x)
def update(self, x, r, xp, alpha, gm, gm_p, lm, rho, interest):
"""Update from new experience, i.e. from a transition `(x,r,xp)`.
Parameters
----------
x : array_like
The observation/features from the current timestep.
r : float
The reward from the transition.
xp : array_like
The observation/features from the next timestep.
alpha : float
The stepsize parameter for the update.
gm : float
Gamma, abbreviated `gm`, the discount factor for the current state.
gm_p : float
The discount factor for the next state.
lm : float
Lambda, abbreviated `lm`, is the bootstrapping parameter for the
current timestep.
rho : float
The importance sampling ratio between the target policy and the
behavior policy for the current timestep.
interest : float
The interest for the current timestep.
Returns
-------
delta : float
The temporal difference error from the update.
Notes
-----
Features (`x` and `xp`) are assumed to be 1D arrays of length `self.n`.
Other parameters are floats but are generally expected to be in the
interval [0, 1].
"""
delta = r + gm_p*np.dot(self.w, xp) - np.dot(self.w, x)
self.F = gm*self.F + interest
self.M = lm*interest + (1 - lm)*self.F
self.z = rho*(x*self.M + gm*lm*self.z)
self.w += alpha*delta*self.z
# prepare for next iteration
self.F *= rho
return delta
def reset(self):
"""Reset weights, traces, and other parameters."""
self.F = 0
self.M = 0
self.w = np.zeros(self.n)
self.z = np.zeros(self.n) | {
"content_hash": "48d4c87cf87aad00363a1ca15757b5a0",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 79,
"avg_line_length": 29.73404255319149,
"alnum_prop": 0.5488372093023256,
"repo_name": "rldotai/rl-algorithms",
"id": "10fb9a41e05107b58bad08e8a114ccc5cd9ad556",
"size": "2796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py3/etd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3799"
},
{
"name": "Python",
"bytes": "31824"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['MovingAverage'] , ['Seasonal_Hour'] , ['AR'] ); | {
"content_hash": "4547b1d5d674678f03e5ba592398a7b5",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 89,
"avg_line_length": 40.5,
"alnum_prop": 0.7160493827160493,
"repo_name": "antoinecarme/pyaf",
"id": "3e09db9347f501f0e2b43feecab2ed69e6423ddc",
"size": "162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingAverage_Seasonal_Hour_AR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import os
import sys
import design
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = design.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst', 'rt').read()
history = open('HISTORY.rst', 'rt').read()
setup(
name='design',
version=version,
description='Generates various common web design elements. Borders, \
patterns, textures, gradients, etc.',
long_description=readme + '\n\n' + history,
author='Audrey Roy',
author_email='audreyr@gmail.com',
url='https://github.com/audreyr/design',
packages=[
'design',
],
package_dir={'design': 'design'},
include_package_data=True,
install_requires=[
'Pillow',
'colors.py',
'cairocffi',
],
license='BSD',
zip_safe=False,
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
keywords='design graphics generator border pattern texture gradient \
PIL Pillow PyCairo png webdesign',
test_suite='tests',
)
| {
"content_hash": "084eebe9192f9e8be685aa287dddb691",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 73,
"avg_line_length": 28.24137931034483,
"alnum_prop": 0.6117216117216118,
"repo_name": "audreyr/design",
"id": "75f3b67c28e1c038d068e49b1e51d98e43085cc6",
"size": "1685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20016"
},
{
"name": "Shell",
"bytes": "6701"
}
],
"symlink_target": ""
} |
import tempfile
from pathlib import Path
import bpy
from bpy_extras.image_utils import load_image
import gpu
from gpu_extras.batch import batch_for_shader
from mathutils import Matrix, Color
from ..lib import asset
from . import onscreen_text
from .offscreen import draw_gems
def srgb_to_linear(color) -> Color:
return Color(x ** (1.0 / 2.2) for x in color) # NOTE T74139
def _text_color(use_background: bool) -> tuple[float, float, float]:
if use_background:
shading = bpy.context.space_data.shading
if shading.background_type == "THEME":
gradients = bpy.context.preferences.themes[0].view_3d.space.gradients
if gradients.background_type == "RADIAL":
bgc = gradients.gradient
else:
bgc = gradients.high_gradient
elif shading.background_type == "WORLD":
bgc = srgb_to_linear(bpy.context.scene.world.color)
elif shading.background_type == "VIEWPORT":
bgc = srgb_to_linear(shading.background_color)
if bgc.v < 0.5:
return (1.0, 1.0, 1.0)
return (0.0, 0.0, 0.0)
def render_map(self):
image_name = "Gem Map"
temp_filepath = Path(tempfile.gettempdir()) / "gem_map_temp.png"
width, height = self.get_resolution()
padding = 30
x = padding
y = height - padding
asset.render_preview(width, height, temp_filepath, compression=15, gamma=2.2, use_transparent=not self.use_background)
render_image = load_image(str(temp_filepath))
mat_offscreen = Matrix()
mat_offscreen[0][0] = 2 / width
mat_offscreen[0][3] = -1
mat_offscreen[1][1] = 2 / height
mat_offscreen[1][3] = -1
gpu.state.blend_set("ALPHA")
shader = gpu.shader.from_builtin("2D_IMAGE")
offscreen = gpu.types.GPUOffScreen(width, height)
with offscreen.bind():
fb = gpu.state.active_framebuffer_get()
fb.clear(color=(1.0, 1.0, 1.0, 1.0))
with gpu.matrix.push_pop():
gpu.matrix.load_matrix(mat_offscreen)
gpu.matrix.load_projection_matrix(Matrix())
# Render result
# --------------------------------
tex = gpu.texture.from_image(render_image)
shader.bind()
shader.uniform_sampler("image", tex)
args = {
"pos": ((0, 0), (width, 0), (width, height), (0, height)),
"texCoord": ((0, 0), (1, 0), (1, 1), (0, 1)),
}
indices = ((0, 1, 2), (0, 2, 3))
batch = batch_for_shader(shader, "TRIS", args, indices=indices)
batch.draw(shader)
# Gem map
# --------------------------------
draw_gems(self)
onscreen_text.onscreen_gem_table(self, x, y, color=_text_color(self.use_background))
buffer = fb.read_color(0, 0, width, height, 4, 0, "UBYTE")
buffer.dimensions = width * height * 4
offscreen.free()
if image_name not in bpy.data.images:
bpy.data.images.new(image_name, width, height)
image = bpy.data.images[image_name]
image.scale(width, height)
image.pixels = [v / 255 for v in buffer]
if self.use_save and bpy.data.is_saved:
image.filepath_raw = str(Path(bpy.data.filepath).with_suffix("")) + " Gem Map.png"
image.file_format = "PNG"
image.save()
# Cleanup
# ----------------------------
bpy.data.images.remove(render_image)
temp_filepath.unlink(missing_ok=True)
gpu.state.blend_set("NONE")
# Show in a new window
# ----------------------------
asset.show_window(width, height, space_data={"image": image})
| {
"content_hash": "fc5536cc1dce3bff3ed5a9c4f9b38953",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 122,
"avg_line_length": 29.475806451612904,
"alnum_prop": 0.573187414500684,
"repo_name": "mrachinskiy/blender-addon-jewelcraft",
"id": "ed1a8530f28b5f583351245a811e66e1ee439f04",
"size": "3741",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "op_gem_map/onrender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46466"
}
],
"symlink_target": ""
} |
from direct.showbase.PythonUtil import randFloat, normalDistrib, Enum
from toontown.toonbase import TTLocalizer, ToontownGlobals
import random, copy
TraitDivisor = 10000
def getTraitNames():
if not hasattr(PetTraits, 'TraitNames'):
traitNames = []
for desc in PetTraits.TraitDescs:
traitNames.append(desc[0])
PetTraits.TraitNames = traitNames
return PetTraits.TraitNames
def uniform(min, max, rng):
return randFloat(min, max, rng.random)
def gaussian(min, max, rng):
return normalDistrib(min, max, rng.gauss)
class TraitDistribution:
TraitQuality = Enum('VERY_BAD, BAD, AVERAGE, GOOD, VERY_GOOD')
TraitTypes = Enum('INCREASING, DECREASING')
Sz2MinMax = None
TraitType = None
TraitCutoffs = {TraitTypes.INCREASING: {TraitQuality.VERY_BAD: 0.1,
TraitQuality.BAD: 0.25,
TraitQuality.GOOD: 0.75,
TraitQuality.VERY_GOOD: 0.9},
TraitTypes.DECREASING: {TraitQuality.VERY_BAD: 0.9,
TraitQuality.BAD: 0.75,
TraitQuality.GOOD: 0.25,
TraitQuality.VERY_GOOD: 0.1}}
def __init__(self, rndFunc = gaussian):
self.rndFunc = rndFunc
if not hasattr(self.__class__, 'GlobalMinMax'):
_min = 1.0
_max = 0.0
minMax = self.Sz2MinMax
for sz in minMax:
thisMin, thisMax = minMax[sz]
_min = min(_min, thisMin)
_max = max(_max, thisMax)
self.__class__.GlobalMinMax = [_min, _max]
def getRandValue(self, szId, rng = random):
min, max = self.getMinMax(szId)
return self.rndFunc(min, max, rng)
def getHigherIsBetter(self):
return self.TraitType == TraitDistribution.TraitTypes.INCREASING
def getMinMax(self, szId):
return (self.Sz2MinMax[szId][0], self.Sz2MinMax[szId][1])
def getGlobalMinMax(self):
return (self.GlobalMinMax[0], self.GlobalMinMax[1])
def _getTraitPercent(self, traitValue):
gMin, gMax = self.getGlobalMinMax()
if traitValue < gMin:
gMin = traitValue
elif traitValue > gMax:
gMax = traitValue
return (traitValue - gMin) / (gMax - gMin)
def getPercentile(self, traitValue):
if self.TraitType is TraitDistribution.TraitTypes.INCREASING:
return self._getTraitPercent(traitValue)
else:
return 1.0 - self._getTraitPercent(traitValue)
def getQuality(self, traitValue):
TraitQuality = TraitDistribution.TraitQuality
TraitCutoffs = self.TraitCutoffs[self.TraitType]
percent = self._getTraitPercent(traitValue)
if self.TraitType is TraitDistribution.TraitTypes.INCREASING:
if percent <= TraitCutoffs[TraitQuality.VERY_BAD]:
return TraitQuality.VERY_BAD
elif percent <= TraitCutoffs[TraitQuality.BAD]:
return TraitQuality.BAD
elif percent >= TraitCutoffs[TraitQuality.VERY_GOOD]:
return TraitQuality.VERY_GOOD
elif percent >= TraitCutoffs[TraitQuality.GOOD]:
return TraitQuality.GOOD
else:
return TraitQuality.AVERAGE
else:
if percent <= TraitCutoffs[TraitQuality.VERY_GOOD]:
return TraitQuality.VERY_GOOD
if percent <= TraitCutoffs[TraitQuality.GOOD]:
return TraitQuality.GOOD
if percent >= TraitCutoffs[TraitQuality.VERY_BAD]:
return TraitQuality.VERY_BAD
if percent >= TraitCutoffs[TraitQuality.BAD]:
return TraitQuality.BAD
return TraitQuality.AVERAGE
def getExtremeness(self, traitValue):
percent = self._getTraitPercent(traitValue)
if percent < 0.5:
howExtreme = (0.5 - percent) * 2.0
else:
howExtreme = (percent - 0.5) * 2.0
return min(max(howExtreme, 0.0), 1.0)
class PetTraits:
class StdIncDistrib(TraitDistribution):
TraitType = TraitDistribution.TraitTypes.INCREASING
Sz2MinMax = {ToontownGlobals.ToontownCentral: (0.2, 0.65),
ToontownGlobals.DonaldsDock: (0.3, 0.7),
ToontownGlobals.DaisyGardens: (0.4, 0.75),
ToontownGlobals.MinniesMelodyland: (0.5, 0.8),
ToontownGlobals.TheBrrrgh: (0.6, 0.85),
ToontownGlobals.DonaldsDreamland: (0.7, 0.9),
ToontownGlobals.ForestsEnd: (0.8, 0.95)}
class StdDecDistrib(TraitDistribution):
TraitType = TraitDistribution.TraitTypes.DECREASING
Sz2MinMax = {ToontownGlobals.ToontownCentral: (0.35, 0.8),
ToontownGlobals.DonaldsDock: (0.3, 0.7),
ToontownGlobals.DaisyGardens: (0.25, 0.6),
ToontownGlobals.MinniesMelodyland: (0.2, 0.5),
ToontownGlobals.TheBrrrgh: (0.15, 0.4),
ToontownGlobals.DonaldsDreamland: (0.1, 0.3),
ToontownGlobals.ForestsEnd: (0.05, 0.2)}
class ForgetfulnessDistrib(TraitDistribution):
TraitType = TraitDistribution.TraitTypes.DECREASING
Sz2MinMax = {ToontownGlobals.ToontownCentral: (0.0, 1.0),
ToontownGlobals.DonaldsDock: (0.0, 0.9),
ToontownGlobals.DaisyGardens: (0.0, 0.8),
ToontownGlobals.MinniesMelodyland: (0.0, 0.7),
ToontownGlobals.TheBrrrgh: (0.0, 0.6),
ToontownGlobals.DonaldsDreamland: (0.0, 0.5),
ToontownGlobals.ForestsEnd: (0.0, 0.3)}
TraitDescs = (('forgetfulness', ForgetfulnessDistrib(), True),
('boredomThreshold', StdIncDistrib(), True),
('restlessnessThreshold', StdIncDistrib(), True),
('playfulnessThreshold', StdDecDistrib(), True),
('lonelinessThreshold', StdIncDistrib(), True),
('sadnessThreshold', StdIncDistrib(), True),
('fatigueThreshold', StdIncDistrib(), True),
('hungerThreshold', StdIncDistrib(), True),
('confusionThreshold', StdIncDistrib(), True),
('excitementThreshold', StdDecDistrib(), True),
('angerThreshold', StdIncDistrib(), True),
('surpriseThreshold', StdIncDistrib(), False),
('affectionThreshold', StdDecDistrib(), True))
NumTraits = len(TraitDescs)
class Trait:
def __init__(self, index, traitsObj, value = None):
self.name, distrib, self.hasWorth = PetTraits.TraitDescs[index]
if value is not None:
self.value = value
else:
szId = traitsObj.safeZoneId
self.value = distrib.getRandValue(szId, traitsObj.rng)
self.value = int(self.value * TraitDivisor) / float(TraitDivisor)
self.higherIsBetter = distrib.getHigherIsBetter()
self.percentile = distrib.getPercentile(self.value)
self.quality = distrib.getQuality(self.value)
self.howExtreme = distrib.getExtremeness(self.value)
return
def __repr__(self):
return 'Trait: %s, %s, %s, %s' % (self.name,
self.value,
TraitDistribution.TraitQuality.getString(self.quality),
self.howExtreme)
def __init__(self, traitSeed, safeZoneId, traitValueList = []):
self.traitSeed = traitSeed
self.safeZoneId = safeZoneId
self.rng = random.Random(self.traitSeed)
self.traits = {}
for i in xrange(len(PetTraits.TraitDescs)):
if i < len(traitValueList) and traitValueList[i] > 0.0:
trait = PetTraits.Trait(i, self, traitValueList[i])
else:
trait = PetTraits.Trait(i, self)
self.traits[trait.name] = trait
self.__dict__[trait.name] = trait.value
extremeTraits = []
for trait in self.traits.values():
if not trait.hasWorth:
continue
if trait.quality == TraitDistribution.TraitQuality.AVERAGE:
continue
i = 0
while i < len(extremeTraits) and extremeTraits[i].howExtreme > trait.howExtreme:
i += 1
extremeTraits.insert(i, trait)
self.extremeTraits = []
for trait in extremeTraits:
self.extremeTraits.append((trait.name, trait.quality))
def getValueList(self):
traitValues = []
for desc in PetTraits.TraitDescs:
traitName = desc[0]
traitValues.append(self.traits[traitName].value)
return traitValues
def getTraitValue(self, traitName):
return self.traits[traitName].value
def getExtremeTraits(self):
return copy.copy(self.extremeTraits)
def getOverallValue(self):
total = 0
numUsed = 0
for trait in self.traits.values():
if trait.hasWorth:
if trait.higherIsBetter:
value = trait.value
else:
value = 1.0 - trait.value
total += value
numUsed += 1
value = total / len(self.traits.values())
return value
def getExtremeTraitDescriptions(self):
descs = []
TraitQuality = TraitDistribution.TraitQuality
Quality2index = {TraitQuality.VERY_BAD: 0,
TraitQuality.BAD: 1,
TraitQuality.GOOD: 2,
TraitQuality.VERY_GOOD: 3}
for name, quality in self.extremeTraits:
descs.append(TTLocalizer.PetTrait2descriptions[name][Quality2index[quality]])
return descs | {
"content_hash": "7fb17667e4d056d99e2fbbe862ae1c5f",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 92,
"avg_line_length": 39.088,
"alnum_prop": 0.589541547277937,
"repo_name": "DedMemez/ODS-August-2017",
"id": "770e9ef4ca3abe5ab5397f4f287ae86bf9883b0f",
"size": "9856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pets/PetTraits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10152014"
},
{
"name": "Shell",
"bytes": "707"
}
],
"symlink_target": ""
} |
""" This script converts a set of SWC files into an HDF5 file """
import h5py
import numpy as np
from glob import glob
# assumption
# the ids are ordered starting from 1, root node has parent -1
# the swc files are from neuromorpho.org
# http://neuromorpho.org/neuroMorpho/dableFiles/turner/Source-Version/l71.swc
# http://neuromorpho.org/neuroMorpho/dableFiles/turner/Source-Version/l55.swc
fi=glob('*.swc')
pos=None
offset=[0]
parents=None
for file in fi:
print "Working on ", file
a=np.loadtxt(file)
if pos == None:
pos = a[:,2:5]
parents = a[:,6] - 1
parents = parents.astype(np.int32)
col = np.random.random_integers(50, 255, (1,4)).astype(np.ubyte)
col[0,3] = 255
col = np.repeat(col, len(a), axis = 0)
colors = col
else:
pos = np.vstack( (pos, a[:,2:5]) )
parents = np.hstack( (parents, a[:,6] - 1) ).astype(np.int32)
# another random color without transparency
col = np.random.random_integers(50, 255, (1,4)).astype(np.ubyte)
col[0,3] = 255
col = np.repeat(col, len(a), axis = 0)
colors = np.vstack( (colors, col)).astype(np.ubyte)
size = len(a) + offset[-1]
offset.append(size)
offset = np.array(offset, dtype = np.int32)
pos = pos.astype(np.float32)
# in case, add a scaling factor
#pos = pos / 1000.
def create_hdf(pos, offset, parents, colors):
# create extendable hdf5 file
f = h5py.File('neurons.hdf5', 'w')
neurons = f.create_group('neurons')
neurons.create_dataset('positions', data=pos)
neurons.create_dataset('offset', data=offset)
neurons.create_dataset('parents', data=parents)
neurons.create_dataset('colors', data=colors)
f.close()
create_hdf(pos, offset, parents, colors)
| {
"content_hash": "934487acc63df567f39c42a6eefd151e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 30.79310344827586,
"alnum_prop": 0.6338185890257558,
"repo_name": "fos/fos-legacy",
"id": "97ef1e737b2c8d2bf26584e20cb3045a7c59ae40",
"size": "1786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/neurons/swc2hdf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "7294"
},
{
"name": "Erlang",
"bytes": "2662"
},
{
"name": "Haskell",
"bytes": "1973"
},
{
"name": "JavaScript",
"bytes": "432354"
},
{
"name": "Python",
"bytes": "1231025"
}
],
"symlink_target": ""
} |
class Meta(dict):
pass
| {
"content_hash": "36c45a13e27c72e1042e73cb1b745986",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 17,
"avg_line_length": 13.5,
"alnum_prop": 0.6296296296296297,
"repo_name": "mjdominus/suxsom",
"id": "24f6def960e0790310cd2589a7033ab88afad07f",
"size": "28",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "suxsom/meta.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "7346"
}
],
"symlink_target": ""
} |
import StringIO
from telemetry.internal.backends.chrome_inspector import websocket
class InspectorConsole(object):
def __init__(self, inspector_websocket):
self._inspector_websocket = inspector_websocket
self._inspector_websocket.RegisterDomain('Console', self._OnNotification)
self._message_output_stream = None
self._last_message = None
self._console_enabled = False
def _OnNotification(self, msg):
if msg['method'] == 'Console.messageAdded':
assert self._message_output_stream
if msg['params']['message']['url'] == 'chrome://newtab/':
return
self._last_message = '(%s) %s:%i: %s' % (
msg['params']['message']['level'],
msg['params']['message']['url'],
msg['params']['message']['line'],
msg['params']['message']['text'])
self._message_output_stream.write(
'%s\n' % self._last_message)
def GetCurrentConsoleOutputBuffer(self, timeout=10):
self._message_output_stream = StringIO.StringIO()
self._EnableConsoleOutputStream(timeout)
try:
self._inspector_websocket.DispatchNotifications(timeout)
return self._message_output_stream.getvalue()
except websocket.WebSocketTimeoutException:
return self._message_output_stream.getvalue()
finally:
self._DisableConsoleOutputStream(timeout)
self._message_output_stream.close()
self._message_output_stream = None
def _EnableConsoleOutputStream(self, timeout):
self._inspector_websocket.SyncRequest({'method': 'Console.enable'}, timeout)
def _DisableConsoleOutputStream(self, timeout):
self._inspector_websocket.SyncRequest(
{'method': 'Console.disable'}, timeout)
| {
"content_hash": "7264a96e1b28ea3b64f992da8c323677",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 36.91304347826087,
"alnum_prop": 0.676678445229682,
"repo_name": "endlessm/chromium-browser",
"id": "445222ab9a403d37ae24b787079409d89ea4ad39",
"size": "1860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_console.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import requests
import json
import constants
import sys
import os
from cloudify.exceptions import RecoverableError
from cloudify import ctx
from cloudify.decorators import operation
import utils
import auth
import azurerequests
@operation
def creation_validation(**_):
for property_key in constants.STORAGE_ACCOUNT_REQUIRED_PROPERTIES:
utils.validate_node_properties(property_key, ctx.node.properties)
@operation
def create_storage_account(**_):
utils.set_runtime_properties_from_file()
storage_account_name = utils.set_resource_name(_get_storage_account_name, 'Storage account',
constants.STORAGE_ACCOUNT_KEY, constants.EXISTING_STORAGE_ACCOUNT_KEY,
constants.STORAGE_ACCOUNT_PREFIX)
if storage_account_name is None:
# Using an existing storage account, so don't create anything
return constants.ACCEPTED_STATUS_CODE
headers, location, subscription_id = auth.get_credentials()
resource_group_name = ctx.instance.runtime_properties[constants.RESOURCE_GROUP_KEY]
if constants.STORAGE_ACCOUNT_KEY not in ctx.instance.runtime_properties:
ctx.instance.runtime_properties[constants.STORAGE_ACCOUNT_KEY] = storage_account_name
ctx.logger.info("Creating a new storage account: {0}".format(storage_account_name))
storage_account_url = constants.azure_url+'/subscriptions/'+subscription_id+'/resourceGroups/'+resource_group_name+'/providers/Microsoft.Storage/storageAccounts/'+storage_account_name+'?api-version='+constants.api_version
storage_account_params = json.dumps({"properties": {"accountType": constants.storage_account_type, }, "location":location})
status_code = utils.create_resource(headers, storage_account_name, storage_account_params, storage_account_url, constants.STORAGE_ACCOUNT)
ctx.logger.info("{0} is {1}".format(constants.STORAGE_ACCOUNT_KEY, storage_account_name))
return status_code
@operation
def verify_provision(start_retry_interval, **kwargs):
storage_account_name = ctx.instance.runtime_properties[constants.STORAGE_ACCOUNT_KEY]
curr_status = get_provisioning_state()
if curr_status != constants.SUCCEEDED:
return ctx.operation.retry(
message='Waiting for the storage_account ({0}) to be provisioned'.format(storage_account_name),
retry_after=start_retry_interval)
@operation
def delete_storage_account(start_retry_interval=30, **kwargs):
status_code = delete_current_storage_account(start_retry_interval, **kwargs)
utils.clear_runtime_properties()
return status_code
def delete_current_storage_account(start_retry_interval=30, **kwargs):
if constants.USE_EXTERNAL_RESOURCE in ctx.node.properties and ctx.node.properties[constants.USE_EXTERNAL_RESOURCE]:
ctx.logger.info("An existing storage_account was used, so there's no need to delete")
return constants.ACCEPTED_STATUS_CODE
ctx.instance.runtime_properties[constants.RESOURCE_NOT_DELETED] = True
resource_group_name = ctx.instance.runtime_properties[constants.RESOURCE_GROUP_KEY]
headers, location, subscription_id = auth.get_credentials()
storage_account_name = ctx.instance.runtime_properties[constants.STORAGE_ACCOUNT_KEY]
ctx.logger.info("Deleting Storage Account {0}".format(storage_account_name))
try:
storage_account_url = constants.azure_url+'/subscriptions/'+subscription_id+'/resourceGroups/'+resource_group_name+'/providers/Microsoft.Storage/storageAccounts/'+storage_account_name+'?api-version='+constants.api_version
response_sa = requests.delete(url=storage_account_url, headers=headers)
return azurerequests.check_delete_response(response_sa, start_retry_interval,
'delete_current_storage_account', storage_account_name,
'storage_account')
except:
ctx.logger.info("Storage Account {0} could not be deleted.".format(storage_account_name))
return constants.FAILURE_CODE
@operation
def set_dependent_resources_names(azure_config, **kwargs):
utils.write_target_runtime_properties_to_file([constants.RESOURCE_GROUP_KEY]+constants.REQUIRED_CONFIG_DATA)
def _get_storage_account_name(storage_account_name):
ctx.logger.info("In _get_storage_account_name looking for {0} ".format(storage_account_name))
headers, location, subscription_id = auth.get_credentials()
if constants.RESOURCE_GROUP_KEY in ctx.instance.runtime_properties:
resource_group_name = ctx.instance.runtime_properties[constants.RESOURCE_GROUP_KEY]
else:
raise RecoverableError("{} is not in storage account runtime_properties yet".format(constants.RESOURCE_GROUP_KEY))
url = constants.azure_url+'/subscriptions/'+subscription_id+'/resourceGroups/'+resource_group_name+'/providers/Microsoft.Storage/storageAccounts?api-version='+constants.api_version
response_list = requests.get(url, headers=headers)
ctx.logger.info("storage account response_list.text {0} ".format(response_list.text))
if storage_account_name in response_list.text:
return True
else:
ctx.logger.info("Storage account {0} does not exist".format(storage_account_name))
return False
def get_provisioning_state(**_):
utils.validate_node_properties(constants.STORAGE_ACCOUNT_REQUIRED_PROPERTIES, ctx.node.properties)
resource_group_name = ctx.instance.runtime_properties[constants.RESOURCE_GROUP_KEY]
storage_account_name = ctx.instance.runtime_properties[constants.STORAGE_ACCOUNT_KEY]
ctx.logger.info("Searching for storage account {0} in resource group {1}".format(storage_account_name, resource_group_name))
headers, location, subscription_id = auth.get_credentials()
storage_account_url = "{0}/subscriptions/{1}/resourceGroups/{2}/providers/" \
"Microsoft.Storage/storageAccounts/{3}?api-version={4}".\
format(constants.azure_url, subscription_id, resource_group_name, storage_account_name, constants.api_version)
return azurerequests.get_provisioning_state(headers, storage_account_name, storage_account_url)
def get_storageaccount_access_keys(**_):
ctx.logger.info("In get_storageaccount_access_keys")
headers, location, subscription_id = auth.get_credentials()
resource_group_name = ctx.instance.runtime_properties[constants.RESOURCE_GROUP_KEY]
ctx.logger.info("In get_storageaccount_access_keys resource group is {0}".format(resource_group_name))
storage_account_name = ctx.instance.runtime_properties[constants.STORAGE_ACCOUNT_KEY]
api_version = constants.api_version
keys_url = "{0}/subscriptions/{1}/resourceGroups/{2}/providers/Microsoft.Storage/" \
"storageAccounts/{3}/listKeys?api-version={4}"\
.format(constants.azure_url, subscription_id, resource_group_name,
storage_account_name, api_version)
response = requests.post(url=keys_url, data="{}", headers=headers)
result = response.json()
return [result['key1'], result['key2']] | {
"content_hash": "e44eae56a52ffcc5ff73b1b60dd634b5",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 229,
"avg_line_length": 52.161764705882355,
"alnum_prop": 0.7289258528333803,
"repo_name": "anilveeramalli/cloudify-azure-plugin",
"id": "3c10639fd74390b2790c48e15c0826f5a4e22d44",
"size": "7757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azurecloudify/storageaccount.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "241014"
},
{
"name": "Shell",
"bytes": "15254"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class table(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/tables/table. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: A network instance manages one or more forwarding or
routing tables. These may reflect a Layer 2 forwarding
information base, a Layer 3 routing table, or an MPLS
LFIB.
The table populated by a protocol within an instance is
identified by the protocol identifier (e.g., BGP, IS-IS)
and the address family (e.g., IPv4, IPv6) supported by
that protocol. Multiple instances of the same protocol
populate a single table -- such that
a single IS-IS or OSPF IPv4 table exists per network
instance.
An implementation is expected to create entries within
this list when the relevant protocol context is enabled.
i.e., when a BGP instance is created with IPv4 and IPv6
address families enabled, the protocol=BGP,
address-family=IPv4 table is created by the system.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__protocol",
"__address_family",
"__config",
"__state",
)
_yang_name = "table"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__protocol = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__address_family = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="address-family",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["network-instances", "network-instance", "tables", "table"]
def _get_protocol(self):
"""
Getter method for protocol, mapped from YANG variable /network_instances/network_instance/tables/table/protocol (leafref)
YANG Description: A reference to the protocol that populates
the table
"""
return self.__protocol
def _set_protocol(self, v, load=False):
"""
Setter method for protocol, mapped from YANG variable /network_instances/network_instance/tables/table/protocol (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_protocol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protocol() directly.
YANG Description: A reference to the protocol that populates
the table
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """protocol must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__protocol = t
if hasattr(self, "_set"):
self._set()
def _unset_protocol(self):
self.__protocol = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_address_family(self):
"""
Getter method for address_family, mapped from YANG variable /network_instances/network_instance/tables/table/address_family (leafref)
YANG Description: A reference to the address-family that the
table represents
"""
return self.__address_family
def _set_address_family(self, v, load=False):
"""
Setter method for address_family, mapped from YANG variable /network_instances/network_instance/tables/table/address_family (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_address_family is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address_family() directly.
YANG Description: A reference to the address-family that the
table represents
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="address-family",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """address_family must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="address-family", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__address_family = t
if hasattr(self, "_set"):
self._set()
def _unset_address_family(self):
self.__address_family = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="address-family",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/tables/table/config (container)
YANG Description: Configuration parameters relating to the
table
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/tables/table/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the
table
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/tables/table/state (container)
YANG Description: State parameters related to the table
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/tables/table/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters related to the table
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
protocol = __builtin__.property(_get_protocol, _set_protocol)
address_family = __builtin__.property(_get_address_family, _set_address_family)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict(
[
("protocol", protocol),
("address_family", address_family),
("config", config),
("state", state),
]
)
from . import config
from . import state
class table(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/tables/table. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: A network instance manages one or more forwarding or
routing tables. These may reflect a Layer 2 forwarding
information base, a Layer 3 routing table, or an MPLS
LFIB.
The table populated by a protocol within an instance is
identified by the protocol identifier (e.g., BGP, IS-IS)
and the address family (e.g., IPv4, IPv6) supported by
that protocol. Multiple instances of the same protocol
populate a single table -- such that
a single IS-IS or OSPF IPv4 table exists per network
instance.
An implementation is expected to create entries within
this list when the relevant protocol context is enabled.
i.e., when a BGP instance is created with IPv4 and IPv6
address families enabled, the protocol=BGP,
address-family=IPv4 table is created by the system.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__protocol",
"__address_family",
"__config",
"__state",
)
_yang_name = "table"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__protocol = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__address_family = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="address-family",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["network-instances", "network-instance", "tables", "table"]
def _get_protocol(self):
"""
Getter method for protocol, mapped from YANG variable /network_instances/network_instance/tables/table/protocol (leafref)
YANG Description: A reference to the protocol that populates
the table
"""
return self.__protocol
def _set_protocol(self, v, load=False):
"""
Setter method for protocol, mapped from YANG variable /network_instances/network_instance/tables/table/protocol (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_protocol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protocol() directly.
YANG Description: A reference to the protocol that populates
the table
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """protocol must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__protocol = t
if hasattr(self, "_set"):
self._set()
def _unset_protocol(self):
self.__protocol = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_address_family(self):
"""
Getter method for address_family, mapped from YANG variable /network_instances/network_instance/tables/table/address_family (leafref)
YANG Description: A reference to the address-family that the
table represents
"""
return self.__address_family
def _set_address_family(self, v, load=False):
"""
Setter method for address_family, mapped from YANG variable /network_instances/network_instance/tables/table/address_family (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_address_family is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address_family() directly.
YANG Description: A reference to the address-family that the
table represents
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="address-family",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """address_family must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="address-family", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__address_family = t
if hasattr(self, "_set"):
self._set()
def _unset_address_family(self):
self.__address_family = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="address-family",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/tables/table/config (container)
YANG Description: Configuration parameters relating to the
table
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/tables/table/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the
table
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/tables/table/state (container)
YANG Description: State parameters related to the table
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/tables/table/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters related to the table
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
protocol = __builtin__.property(_get_protocol, _set_protocol)
address_family = __builtin__.property(_get_address_family, _set_address_family)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict(
[
("protocol", protocol),
("address_family", address_family),
("config", config),
("state", state),
]
)
| {
"content_hash": "d5372d405dc8113c25cb62971cdf3b37",
"timestamp": "",
"source": "github",
"line_count": 870,
"max_line_length": 377,
"avg_line_length": 38.314942528735635,
"alnum_prop": 0.5819283614327714,
"repo_name": "napalm-automation/napalm-yang",
"id": "c35c7e9ded992dd656dbe75dd604e492649cb50e",
"size": "33358",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/tables/table/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
import numpy as np
import tensorflow as tf
import h5py
from sklearn.preprocessing import OneHotEncoder
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
import scipy.io
# Functions for initializing neural nets parameters
def weight_variable(shape, var_name):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float64)
return tf.Variable(initial, name=var_name)
def bias_variable(shape, var_name):
initial = tf.constant(0.1, shape=shape, dtype=tf.float64)
return tf.Variable(initial, name=var_name)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def batch_nm(x, eps=1e-5):
# batch normalization to have zero mean and unit variance
mu, var = tf.nn.moments(x, [0])
return tf.nn.batch_normalization(x, mu, var, None, None, eps)
# Download data from .mat file into numpy array
print('==> Experiment 8g')
filepath = '/scratch/ttanpras/exp8a_d7_1s.mat'
print('==> Loading data from {}'.format(filepath))
f = h5py.File(filepath)
data_train = np.array(f.get('trainingFeatures'))
data_val = np.array(f.get('validationFeatures'))
del f
print('==> Data sizes:',data_train.shape, data_val.shape)
# Transform labels into on-hot encoding form
enc = OneHotEncoder(n_values = 71)
'''
NN config parameters
'''
sub_window_size = 32
num_features = 169*sub_window_size
num_frames = 32
hidden_layer_size = 2000
num_bits = 2000
num_classes = 71
print("Number of features:", num_features)
print("Number of songs:",num_classes)
# Reshape input features
X_train = np.reshape(data_train,(-1, num_features))
X_val = np.reshape(data_val,(-1, num_features))
print("Input sizes:", X_train.shape, X_val.shape)
y_train = []
y_val = []
# Add Labels
for label in range(num_classes):
for sampleCount in range(X_train.shape[0]//num_classes):
y_train.append([label])
for sampleCount in range(X_val.shape[0]//num_classes):
y_val.append([label])
X_train = np.concatenate((X_train, y_train), axis=1)
X_val = np.concatenate((X_val, y_val), axis=1)
# Shuffle
np.random.shuffle(X_train)
np.random.shuffle(X_val)
# Separate coefficients and labels
y_train = X_train[:, -1].reshape(-1, 1)
X_train = X_train[:, :-1]
y_val = X_val[:, -1].reshape(-1, 1)
X_val = X_val[:, :-1]
print('==> Data sizes:',X_train.shape, y_train.shape,X_val.shape, y_val.shape)
y_train = enc.fit_transform(y_train.copy()).astype(int).toarray()
y_val = enc.fit_transform(y_val.copy()).astype(int).toarray()
plotx = []
ploty_train = []
ploty_val = []
# Set-up NN layers
x = tf.placeholder(tf.float64, [None, num_features])
W1 = weight_variable([num_features, hidden_layer_size], "W1")
b1 = bias_variable([hidden_layer_size], "b1")
OpW1 = tf.placeholder(tf.float64, [num_features, hidden_layer_size])
Opb1 = tf.placeholder(tf.float64, [hidden_layer_size])
# Hidden layer activation function: ReLU
h1 = tf.nn.relu(tf.matmul(x, W1) + b1)
W2 = weight_variable([hidden_layer_size, num_bits], "W2")
b2 = bias_variable([num_bits], "b2")
OpW2 = tf.placeholder(tf.float64, [hidden_layer_size, num_bits])
Opb2 = tf.placeholder(tf.float64, [num_bits])
# Pre-activation value for bit representation
h = tf.matmul(h1, W2) + b2
h2 = tf.nn.relu(tf.matmul(h1, W2) + b2)
W3 = weight_variable([num_bits, num_classes], "W3")
b3 = bias_variable([num_classes], "b3")
OpW3 = tf.placeholder(tf.float64, [num_bits, num_classes])
Opb3 = tf.placeholder(tf.float64, [num_classes])
# Softmax layer (Output), dtype = float64
y = tf.matmul(h2, W3) + b3
# NN desired value (labels)
y_ = tf.placeholder(tf.float64, [None, num_classes])
# Loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess = tf.InteractiveSession()
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
sess.run(tf.initialize_all_variables())
# Training
numTrainingVec = len(X_train)
batchSize = 500
numEpochs = 1000
bestValErr = 10000
bestValEpoch = 0
startTime = time.time()
for epoch in range(numEpochs):
for i in range(0,numTrainingVec,batchSize):
# Batch Data
batchEndPoint = min(i+batchSize, numTrainingVec)
trainBatchData = X_train[i:batchEndPoint]
trainBatchLabel = y_train[i:batchEndPoint]
train_step.run(feed_dict={x: trainBatchData, y_: trainBatchLabel})
# Print accuracy
if epoch % 5 == 0 or epoch == numEpochs-1:
plotx.append(epoch)
train_error = cross_entropy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel})
train_acc = accuracy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel})
val_error = cross_entropy.eval(feed_dict={x:X_val, y_: y_val})
val_acc = accuracy.eval(feed_dict={x:X_val, y_: y_val})
ploty_train.append(train_error)
ploty_val.append(val_error)
print("epoch: %d, val error %g, train error %g"%(epoch, val_error, train_error))
if val_error < bestValErr:
bestValErr = val_error
bestValEpoch = epoch
OpW1 = W1
Opb1 = b1
OpW2 = W2
Opb2 = b2
OpW3 = W3
Opb3 = b3
endTime = time.time()
print("Elapse Time:", endTime - startTime)
print("Best validation error: %g at epoch %d"%(bestValErr, bestValEpoch))
# Restore best model for early stopping
W1 = OpW1
b1 = Opb1
W2 = OpW2
b2 = Opb2
W3 = OpW3
b3 = Opb3
saveweight = {}
saveweight['W1'] = np.array(W1.eval())
saveweight['b1'] = np.array(b1.eval())
saveweight['W2'] = np.array(W2.eval())
saveweight['b2'] = np.array(b2.eval())
scipy.io.savemat('exp8g_none_weight.mat',saveweight)
print('==> Generating error plot...')
errfig = plt.figure()
trainErrPlot = errfig.add_subplot(111)
trainErrPlot.set_xlabel('Number of Epochs')
trainErrPlot.set_ylabel('Cross-Entropy Error')
trainErrPlot.set_title('Error vs Number of Epochs')
trainErrPlot.scatter(plotx, ploty_train)
valErrPlot = errfig.add_subplot(111)
valErrPlot.scatter(plotx, ploty_val)
errfig.savefig('exp8g_none.png')
'''
GENERATING REPRESENTATION OF NOISY FILES
'''
namelist = ['orig','comp5','comp10','str5','str10','ampSat_(-15)','ampSat_(-10)','ampSat_(-5)', \
'ampSat_(5)','ampSat_(10)','ampSat_(15)','pitchShift_(-1)','pitchShift_(-0.5)', \
'pitchShift_(0.5)','pitchShift_(1)','rev_dkw','rev_gal','rev_shan0','rev_shan1', \
'rev_gen','crowd-15','crowd-10','crowd-5','crowd0','crowd5','crowd10','crowd15', \
'crowd100','rest-15','rest-10','rest-5','rest0','rest5','rest10','rest15', \
'rest100','AWGN-15','AWGN-10','AWGN-5','AWGN0','AWGN5','AWGN10','AWGN15', 'AWGN100']
outdir = '/scratch/ttanpras/taylorswift_noisy_processed/'
repDict = {}
# Loop over each CQT files, not shuffled
for count in range(len(namelist)):
name = namelist[count]
filename = outdir + name + '.mat'
cqt = scipy.io.loadmat(filename)['Q']
cqt = np.transpose(np.array(cqt))
# Group into windows of 32 without overlapping
# Discard any leftover frames
num_windows = cqt.shape[0] // 32
cqt = cqt[:32*num_windows]
X = np.reshape(cqt,(num_windows, num_features))
# Feed window through model (Only 1 layer of weight w/o non-linearity)
rep = h.eval(feed_dict={x:X})
# Put the output representation into a dictionary
repDict['n'+str(count)] = rep
scipy.io.savemat('exp8g_none_repNon.mat',repDict) | {
"content_hash": "47f508623f7fe859809386f62b93739c",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 97,
"avg_line_length": 32.761702127659575,
"alnum_prop": 0.6508637485387713,
"repo_name": "Haunter17/MIR_SU17",
"id": "79ccefe2a03ed9bdf694898fb051f52df263366a",
"size": "7699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exp8/exp8g_none.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "7578"
},
{
"name": "M",
"bytes": "867"
},
{
"name": "Matlab",
"bytes": "222383"
},
{
"name": "Python",
"bytes": "1887228"
},
{
"name": "Shell",
"bytes": "18293"
}
],
"symlink_target": ""
} |
"""
# lixo unionmangas
find . -type f -name 'bannernovo_1.png' -delete
find . -type f -name 'bannernovo_2.png' -delete
find . -type f -name 'bannernovo_3.png' -delete
find . -type f -name 'banner_scan.png' -delete
find . -type f -name 'promo-anime.png' -delete
find . -type f -name 'Créditos.jpg' -delete
# lixo hqultimate
find . -type f -name 'banner_digital.png' -delete
find . -type f -name 'banner-hq.png' -delete
""" | {
"content_hash": "03aa45fa0385be3c2d85a23798b8a453",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 49,
"avg_line_length": 30.214285714285715,
"alnum_prop": 0.6784869976359338,
"repo_name": "marcelomaia/manga_downloader",
"id": "b5363d6d528e60fdaef5ac0129e541ae61eb4768",
"size": "424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "downloader/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21952"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from mothra.settings import LOGIN_URL
from mothra.settings import DEBUG, STATIC_DOC_ROOT
import workflows.views
import django.contrib.auth.views as auth_views
from django.views.static import serve
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'mothra.views.home', name='home'),
# url(r'^mothra/', include('mothra.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
#url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^streams/', include('streams.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('workflows.api.urls')),
url(r'^docs/', include('rest_framework_docs.urls')),
# url('^' + LOGIN_URL[1:] + '$', auth_views.login, name='login'),
# url(r'^logout/$', auth_views.logout, name='logout'),
# url(r'^change-password/$', auth_views.password_change, name='password change'),
# url(r'^password-changed/$', auth_views.password_change_done, name='password change done'),
#
# url(r'^password_reset/$', auth_views.password_reset, name='password_reset'),
# url(r'^password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
# url(r'^reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', auth_views.password_reset_confirm),
# url(r'^reset/done/$', auth_views.password_reset_complete),
]
#load urls.py from imported packages
packageUrls = {}
from workflows import module_importer
def set_package_url(name, value, package):
if name == 'urlpatterns':
packageUrls[package] = value
module_importer.import_all_packages_libs("urls",set_package_url)
urlpatterns += [url(r'^workflows/' + pck.replace('.','-') + '/', include(packageUrls[pck])) for pck in packageUrls ]
urlpatterns += [url(r'^workflows/widget-iframe/(?P<widget_id>[0-9]+)/$', workflows.views.widget_iframe, name='widget iframe'),]
## debug stuff to serve static media
if DEBUG:
urlpatterns += [
url(r'^media/(?P<path>.*)$', serve,
{'document_root': STATIC_DOC_ROOT}),
]
if DEBUG:
import debug_toolbar
urlpatterns = [
url('^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns | {
"content_hash": "90859ae9aeee3dab3a5d5f7f1db7ec60",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 135,
"avg_line_length": 38.03125,
"alnum_prop": 0.6622843056696796,
"repo_name": "xflows/clowdflows-backend",
"id": "d344940613dcc34f8b5923febc5237527c139af8",
"size": "2434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mothra/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "484"
},
{
"name": "HTML",
"bytes": "74413"
},
{
"name": "JavaScript",
"bytes": "10945"
},
{
"name": "Python",
"bytes": "372594"
},
{
"name": "Shell",
"bytes": "453"
}
],
"symlink_target": ""
} |
import os
import json
import logging
import warnings
from os.path import join, exists
from queuelib import PriorityQueue
from scrapy.utils.misc import load_object, create_instance
from scrapy.utils.job import job_dir
from scrapy.utils.deprecate import ScrapyDeprecationWarning
logger = logging.getLogger(__name__)
class Scheduler:
"""
Scrapy Scheduler. It allows to enqueue requests and then get
a next request to download. Scheduler is also handling duplication
filtering, via dupefilter.
Prioritization and queueing is not performed by the Scheduler.
User sets ``priority`` field for each Request, and a PriorityQueue
(defined by :setting:`SCHEDULER_PRIORITY_QUEUE`) uses these priorities
to dequeue requests in a desired order.
Scheduler uses two PriorityQueue instances, configured to work in-memory
and on-disk (optional). When on-disk queue is present, it is used by
default, and an in-memory queue is used as a fallback for cases where
a disk queue can't handle a request (can't serialize it).
:setting:`SCHEDULER_MEMORY_QUEUE` and
:setting:`SCHEDULER_DISK_QUEUE` allow to specify lower-level queue classes
which PriorityQueue instances would be instantiated with, to keep requests
on disk and in memory respectively.
Overall, Scheduler is an object which holds several PriorityQueue instances
(in-memory and on-disk) and implements fallback logic for them.
Also, it handles dupefilters.
"""
def __init__(self, dupefilter, jobdir=None, dqclass=None, mqclass=None,
logunser=False, stats=None, pqclass=None, crawler=None):
self.df = dupefilter
self.dqdir = self._dqdir(jobdir)
self.pqclass = pqclass
self.dqclass = dqclass
self.mqclass = mqclass
self.logunser = logunser
self.stats = stats
self.crawler = crawler
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
dupefilter_cls = load_object(settings['DUPEFILTER_CLASS'])
dupefilter = create_instance(dupefilter_cls, settings, crawler)
pqclass = load_object(settings['SCHEDULER_PRIORITY_QUEUE'])
if pqclass is PriorityQueue:
warnings.warn("SCHEDULER_PRIORITY_QUEUE='queuelib.PriorityQueue'"
" is no longer supported because of API changes; "
"please use 'scrapy.pqueues.ScrapyPriorityQueue'",
ScrapyDeprecationWarning)
from scrapy.pqueues import ScrapyPriorityQueue
pqclass = ScrapyPriorityQueue
dqclass = load_object(settings['SCHEDULER_DISK_QUEUE'])
mqclass = load_object(settings['SCHEDULER_MEMORY_QUEUE'])
logunser = settings.getbool('SCHEDULER_DEBUG')
return cls(dupefilter, jobdir=job_dir(settings), logunser=logunser,
stats=crawler.stats, pqclass=pqclass, dqclass=dqclass,
mqclass=mqclass, crawler=crawler)
def has_pending_requests(self):
return len(self) > 0
def open(self, spider):
self.spider = spider
self.mqs = self._mq()
self.dqs = self._dq() if self.dqdir else None
return self.df.open()
def close(self, reason):
if self.dqs:
state = self.dqs.close()
self._write_dqs_state(self.dqdir, state)
return self.df.close(reason)
def enqueue_request(self, request):
if not request.dont_filter and self.df.request_seen(request):
self.df.log(request, self.spider)
return False
dqok = self._dqpush(request)
if dqok:
self.stats.inc_value('scheduler/enqueued/disk', spider=self.spider)
else:
self._mqpush(request)
self.stats.inc_value('scheduler/enqueued/memory', spider=self.spider)
self.stats.inc_value('scheduler/enqueued', spider=self.spider)
return True
def next_request(self):
request = self.mqs.pop()
if request:
self.stats.inc_value('scheduler/dequeued/memory', spider=self.spider)
else:
request = self._dqpop()
if request:
self.stats.inc_value('scheduler/dequeued/disk', spider=self.spider)
if request:
self.stats.inc_value('scheduler/dequeued', spider=self.spider)
return request
def __len__(self):
return len(self.dqs) + len(self.mqs) if self.dqs else len(self.mqs)
def _dqpush(self, request):
if self.dqs is None:
return
try:
self.dqs.push(request)
except ValueError as e: # non serializable request
if self.logunser:
msg = ("Unable to serialize request: %(request)s - reason:"
" %(reason)s - no more unserializable requests will be"
" logged (stats being collected)")
logger.warning(msg, {'request': request, 'reason': e},
exc_info=True, extra={'spider': self.spider})
self.logunser = False
self.stats.inc_value('scheduler/unserializable',
spider=self.spider)
return
else:
return True
def _mqpush(self, request):
self.mqs.push(request)
def _dqpop(self):
if self.dqs:
return self.dqs.pop()
def _mq(self):
""" Create a new priority queue instance, with in-memory storage """
return create_instance(self.pqclass,
settings=None,
crawler=self.crawler,
downstream_queue_cls=self.mqclass,
key='')
def _dq(self):
""" Create a new priority queue instance, with disk storage """
state = self._read_dqs_state(self.dqdir)
q = create_instance(self.pqclass,
settings=None,
crawler=self.crawler,
downstream_queue_cls=self.dqclass,
key=self.dqdir,
startprios=state)
if q:
logger.info("Resuming crawl (%(queuesize)d requests scheduled)",
{'queuesize': len(q)}, extra={'spider': self.spider})
return q
def _dqdir(self, jobdir):
""" Return a folder name to keep disk queue state at """
if jobdir:
dqdir = join(jobdir, 'requests.queue')
if not exists(dqdir):
os.makedirs(dqdir)
return dqdir
def _read_dqs_state(self, dqdir):
path = join(dqdir, 'active.json')
if not exists(path):
return ()
with open(path) as f:
return json.load(f)
def _write_dqs_state(self, dqdir, state):
with open(join(dqdir, 'active.json'), 'w') as f:
json.dump(state, f)
| {
"content_hash": "19707fa3eee9d5f52783c1383a45a336",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 83,
"avg_line_length": 38.32967032967033,
"alnum_prop": 0.5976204128440367,
"repo_name": "starrify/scrapy",
"id": "a18c26b1747eb6f2a705f260f4599db1ab64cdb2",
"size": "6976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapy/core/scheduler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2076"
},
{
"name": "Python",
"bytes": "1466538"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
import hashlib
import urllib
# Adapted from https://github.com/zzzsochi/Flask-Gravatar/blob/master/flaskext/gravatar.py
def gravatar(user, use_ssl=False, d=None, r=None, size=None):
if use_ssl:
base_url = 'https://secure.gravatar.com/avatar/'
else:
base_url = 'http://www.gravatar.com/avatar/'
# user can be a User instance or a username string
username = user.username if hasattr(user, 'username') else user
hash_code = hashlib.md5(unicode(username).encode('utf-8')).hexdigest()
url = base_url + '?'
# Order of query params matters, due to a quirk with gravatar
params = [
('d', 'identicon'),
('s', size),
]
if r:
params.append(('r', r))
url = base_url + hash_code + '?' + urllib.urlencode(params)
return url
| {
"content_hash": "e32f713896bc9a507a2b2b59b39149d7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 90,
"avg_line_length": 29.77777777777778,
"alnum_prop": 0.6281094527363185,
"repo_name": "petermalcolm/osf.io",
"id": "9c75b840145d78bcc250955450e06de47ea74c7d",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/filters/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "117267"
},
{
"name": "HTML",
"bytes": "33688"
},
{
"name": "JavaScript",
"bytes": "1185942"
},
{
"name": "Mako",
"bytes": "525617"
},
{
"name": "Python",
"bytes": "3558146"
},
{
"name": "Shell",
"bytes": "1679"
}
],
"symlink_target": ""
} |
'''
Miscellaneous helper functions and constants for the VamPy project
'''
from os.path import dirname
from sys import argv
import math
OWNPATH = dirname(argv[0])
SIDES = ['left','right','top','bottom']
DATWILDCARD = "Data files (TXT, CSV, DAT)|*.txt;*.TXT;*.csv;*.CSV;*.dat;*.DAT | All files (*.*)|*.*"
CFG_FILENAME = 'vampy.cfg'
DEFAULT_SCALE = 0.31746 # micrometer/pixel, Teli CS3960DCL, 20x overall magnification, from the ruler
DEFAULT_PRESSACC = 0.00981 # 1 micrometer of water stack in Pascals
PIX_ERR = 0.5 # error for pixel resolution
def split_to_int(line, dflt=None):
mesg=None
if line == '':
return dflt, mesg
if dflt == None:
strlst = line.split()
else:
Nval = len(dflt)
strlst = line.split()[0:Nval]
if len(strlst) != Nval :
mesg = 'Wrong format, using defaults...'
return list(dflt), mesg
try:
value = map(int, strlst)
except ValueError:
value = list(dflt)
mesg = 'Values could not be converted, using defaults...'
return value, mesg
def grid_size(N):
"""
Find optimal dimensions to put elements on 2D grid,
with the resulting grid being as quadratic as possible.
@param N: number of elements to put on a grid
"""
n = int(math.ceil(math.sqrt(N)))
m = int(math.ceil(N/float(n)))
return n,m
| {
"content_hash": "87352d9bbf3bb8b8416e335e804e3a4b",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 102,
"avg_line_length": 30.58695652173913,
"alnum_prop": 0.6048329779673063,
"repo_name": "pshchelo/vampy",
"id": "81fd709ab3c35551b29671ca4c0123ce58556e86",
"size": "1407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calc/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "135036"
},
{
"name": "TeX",
"bytes": "38845"
}
],
"symlink_target": ""
} |
from keystoneauth1.exceptions import base
class AuthorizationFailure(base.ClientException):
"""Cannot authorize API client."""
pass
| {
"content_hash": "bf34c3451beaba2901cef0a802f460aa",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 49,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.7676056338028169,
"repo_name": "citrix-openstack-build/keystoneauth",
"id": "3550d64ab78c2b619e6b9c362267783ab87dfbbd",
"size": "688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystoneauth1/exceptions/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "393336"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
from atomic_reactor.core import DockerTasker
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import BuildCanceledException, PluginFailedException
from atomic_reactor.plugin import BuildStepPluginsRunner
from atomic_reactor.plugins import pre_reactor_config
from atomic_reactor.plugins.build_orchestrate_build import (OrchestrateBuildPlugin,
get_worker_build_info,
get_koji_upload_dir,
override_build_kwarg)
from atomic_reactor.plugins.pre_reactor_config import ReactorConfig
from atomic_reactor.plugins.pre_check_and_set_rebuild import CheckAndSetRebuildPlugin
from atomic_reactor.util import ImageName, df_parser
from atomic_reactor.constants import PLUGIN_ADD_FILESYSTEM_KEY
from flexmock import flexmock
from multiprocessing.pool import AsyncResult
from osbs.api import OSBS
from osbs.conf import Configuration
from osbs.build.build_response import BuildResponse
from osbs.exceptions import OsbsException
from tests.constants import MOCK_SOURCE, TEST_IMAGE, INPUT_IMAGE, SOURCE
from tests.docker_mock import mock_docker
from textwrap import dedent
from copy import deepcopy
import json
import os
import pytest
import time
import yaml
class MockSource(object):
def __init__(self, tmpdir):
tmpdir = str(tmpdir)
self.dockerfile_path = os.path.join(tmpdir, 'Dockerfile')
self.path = tmpdir
def get_build_file_path(self):
return self.dockerfile_path, self.path
class MockInsideBuilder(object):
def __init__(self):
mock_docker()
self.tasker = DockerTasker()
self.base_image = ImageName(repo='fedora', tag='25')
self.image_id = 'image_id'
self.image = INPUT_IMAGE
self.df_path = 'df_path'
self.df_dir = 'df_dir'
def simplegen(x, y):
yield "some\u2018".encode('utf-8')
flexmock(self.tasker, build_image_from_path=simplegen)
def get_built_image_info(self):
return {'Id': 'some'}
def inspect_built_image(self):
return None
def ensure_not_built(self):
pass
def mock_workflow(tmpdir):
workflow = DockerBuildWorkflow(MOCK_SOURCE, TEST_IMAGE)
builder = MockInsideBuilder()
source = MockSource(tmpdir)
setattr(builder, 'source', source)
setattr(workflow, 'source', source)
setattr(workflow, 'builder', builder)
df_path = os.path.join(str(tmpdir), 'Dockerfile')
with open(df_path, 'w') as f:
f.write(dedent("""\
FROM fedora:25
LABEL com.redhat.component=python \
version=2.7 \
release=10
"""))
df = df_parser(df_path)
setattr(workflow.builder, 'df_path', df.dockerfile_path)
build = '{"spec": {"strategy": {"customStrategy": {"from": \
{"name": "some_image:latest", "kind": "DockerImage"}}}}}'
flexmock(os, environ={'BUILD': build})
return workflow
def mock_reactor_config(tmpdir, clusters=None):
if not clusters:
clusters = {
'x86_64': [
{
'name': 'worker_x86_64',
'max_concurrent_builds': 3
}
],
'ppc64le': [
{
'name': 'worker_ppc64le',
'max_concurrent_builds': 3
}
]
}
conf = ReactorConfig({'version': 1, 'clusters': clusters})
(flexmock(pre_reactor_config)
.should_receive('get_config')
.and_return(conf))
with open(os.path.join(str(tmpdir), 'osbs.conf'), 'w') as f:
for platform, plat_clusters in clusters.items():
for cluster in plat_clusters:
f.write(dedent("""\
[{name}]
openshift_url = https://{name}.com/
namespace = {name}_namespace
""".format(name=cluster['name'])))
def mock_osbs(current_builds=2, worker_builds=1, logs_return_bytes=False, worker_expect=None):
(flexmock(OSBS)
.should_receive('list_builds')
.and_return(range(current_builds)))
koji_upload_dirs = set()
def mock_create_worker_build(**kwargs):
# koji_upload_dir parameter must be identical for all workers
koji_upload_dirs.add(kwargs.get('koji_upload_dir'))
assert len(koji_upload_dirs) == 1
if worker_expect:
testkwargs = deepcopy(kwargs)
testkwargs.pop('koji_upload_dir')
assert testkwargs == worker_expect
return make_build_response('worker-build-{}'.format(kwargs['platform']),
'Running')
(flexmock(OSBS)
.should_receive('create_worker_build')
.replace_with(mock_create_worker_build))
if logs_return_bytes:
log_format_string = b'line \xe2\x80\x98 - %d'
else:
log_format_string = 'line \u2018 - %d'
(flexmock(OSBS)
.should_receive('get_build_logs')
.and_yield(log_format_string % line for line in range(10)))
def mock_wait_for_build_to_finish(build_name):
return make_build_response(build_name, 'Complete')
(flexmock(OSBS)
.should_receive('wait_for_build_to_finish')
.replace_with(mock_wait_for_build_to_finish))
def make_build_response(name, status, annotations=None, labels=None):
build_response = {
'metadata': {
'name': name,
'annotations': annotations or {},
'labels': labels or {},
},
'status': {
'phase': status
}
}
return BuildResponse(build_response)
def make_worker_build_kwargs(**overrides):
kwargs = {
'git_uri': SOURCE['uri'],
'git_ref': 'master',
'git_branch': 'master',
'user': 'bacon',
'arrangement_version': 1
}
kwargs.update(overrides)
return kwargs
@pytest.mark.parametrize('config_kwargs', [
None,
{},
{'build_image': 'osbs-buildroot:latest'},
{'build_image': 'osbs-buildroot:latest', 'sources_command': 'fedpkg source'},
])
@pytest.mark.parametrize('worker_build_image', [
'fedora:latest',
None
])
@pytest.mark.parametrize('logs_return_bytes', [
True,
False
])
def test_orchestrate_build(tmpdir, caplog, config_kwargs, worker_build_image, logs_return_bytes):
workflow = mock_workflow(tmpdir)
mock_osbs(logs_return_bytes=logs_return_bytes)
mock_reactor_config(tmpdir)
plugin_args = {
'platforms': ['x86_64'],
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
}
if worker_build_image:
plugin_args['worker_build_image'] = worker_build_image
if config_kwargs is not None:
plugin_args['config_kwargs'] = config_kwargs
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': plugin_args
}]
)
expected_kwargs = {
'conf_section': 'worker_x86_64',
'conf_file': tmpdir + '/osbs.conf',
}
# Update with config_kwargs last to ensure that, when set
# always has precedence over worker_build_image param.
if config_kwargs is not None:
expected_kwargs.update(config_kwargs)
expected_kwargs['build_image'] = 'some_image:latest'
(flexmock(Configuration).should_call('__init__').with_args(**expected_kwargs).once())
build_result = runner.run()
assert not build_result.is_failed()
assert (build_result.annotations == {
'worker-builds': {
'x86_64': {
'build': {
'build-name': 'worker-build-x86_64',
'cluster-url': 'https://worker_x86_64.com/',
'namespace': 'worker_x86_64_namespace'
},
'digests': [],
'plugins-metadata': {}
}
}
})
assert (build_result.labels == {})
build_info = get_worker_build_info(workflow, 'x86_64')
assert build_info.osbs
for record in caplog.records():
if not record.name.startswith("atomic_reactor"):
continue
assert hasattr(record, 'arch')
if record.funcName == 'watch_logs':
assert record.arch == 'x86_64'
else:
assert record.arch == '-'
@pytest.mark.parametrize('metadata_fragment', [
True,
False
])
def test_orchestrate_build_annotations_and_labels(tmpdir, metadata_fragment):
workflow = mock_workflow(tmpdir)
mock_osbs()
md = {
'metadata_fragment': 'configmap/spam-md',
'metadata_fragment_key': 'metadata.json'
}
def mock_wait_for_build_to_finish(build_name):
annotations = {
'repositories': json.dumps({
'unique': ['{}-unique'.format(build_name)],
'primary': ['{}-primary'.format(build_name)],
}),
'digests': json.dumps([
{
'digest': 'sha256:{}-digest'.format(build_name),
'tag': '{}-latest'.format(build_name),
'registry': '{}-registry'.format(build_name),
'repository': '{}-repository'.format(build_name),
},
]),
}
if metadata_fragment:
annotations.update(md)
labels = {'koji-build-id': 'koji-build-id'}
return make_build_response(build_name, 'Complete', annotations, labels)
(flexmock(OSBS)
.should_receive('wait_for_build_to_finish')
.replace_with(mock_wait_for_build_to_finish))
mock_reactor_config(tmpdir)
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': {
'platforms': ['x86_64', 'ppc64le'],
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
'max_cluster_fails': 2,
'unreachable_cluster_retry_delay': .1
}
}]
)
build_result = runner.run()
assert not build_result.is_failed()
expected = {
'worker-builds': {
'x86_64': {
'build': {
'build-name': 'worker-build-x86_64',
'cluster-url': 'https://worker_x86_64.com/',
'namespace': 'worker_x86_64_namespace'
},
'digests': [
{
'digest': 'sha256:worker-build-x86_64-digest',
'tag': 'worker-build-x86_64-latest',
'registry': 'worker-build-x86_64-registry',
'repository': 'worker-build-x86_64-repository',
},
],
'plugins-metadata': {}
},
'ppc64le': {
'build': {
'build-name': 'worker-build-ppc64le',
'cluster-url': 'https://worker_ppc64le.com/',
'namespace': 'worker_ppc64le_namespace'
},
'digests': [
{
'digest': 'sha256:worker-build-ppc64le-digest',
'tag': 'worker-build-ppc64le-latest',
'registry': 'worker-build-ppc64le-registry',
'repository': 'worker-build-ppc64le-repository',
},
],
'plugins-metadata': {}
},
},
'repositories': {
'unique': [
'worker-build-ppc64le-unique',
'worker-build-x86_64-unique',
],
'primary': [
'worker-build-ppc64le-primary',
'worker-build-x86_64-primary',
],
},
}
if metadata_fragment:
expected['worker-builds']['x86_64'].update(md)
expected['worker-builds']['ppc64le'].update(md)
assert (build_result.annotations == expected)
assert (build_result.labels == {'koji-build-id': 'koji-build-id'})
build_info = get_worker_build_info(workflow, 'x86_64')
assert build_info.osbs
koji_upload_dir = get_koji_upload_dir(workflow)
assert koji_upload_dir
def test_orchestrate_choose_cluster_retry(tmpdir):
mock_osbs()
(flexmock(OSBS).should_receive('list_builds')
.and_raise(OsbsException)
.and_raise(OsbsException)
.and_return([1, 2, 3]))
workflow = mock_workflow(tmpdir)
mock_reactor_config(tmpdir, {
'x86_64': [
{'name': cluster[0], 'max_concurrent_builds': cluster[1]}
for cluster in [('chosen_x86_64', 5), ('spam', 4)]
],
'ppc64le': [
{'name': cluster[0], 'max_concurrent_builds': cluster[1]}
for cluster in [('chosen_ppc64le', 5), ('ham', 5)]
]
})
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': {
'platforms': ['x86_64', 'ppc64le'],
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
'find_cluster_retry_delay': .1,
'max_cluster_fails': 2
}
}]
)
runner.run()
def test_orchestrate_choose_cluster_retry_timeout(tmpdir):
(flexmock(OSBS).should_receive('list_builds')
.and_raise(OsbsException)
.and_raise(OsbsException)
.and_raise(OsbsException))
workflow = mock_workflow(tmpdir)
mock_reactor_config(tmpdir, {
'x86_64': [
{'name': cluster[0], 'max_concurrent_builds': cluster[1]}
for cluster in [('chosen_x86_64', 5), ('spam', 4)]
],
'ppc64le': [
{'name': cluster[0], 'max_concurrent_builds': cluster[1]}
for cluster in [('chosen_ppc64le', 5), ('ham', 5)]
]
})
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': {
'platforms': ['x86_64', 'ppc64le'],
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
'find_cluster_retry_delay': .1,
'max_cluster_fails': 2
}
}]
)
build_result = runner.run()
assert build_result.is_failed()
fail_reason = json.loads(build_result.fail_reason)['ppc64le']['general']
assert 'Could not find appropriate cluster for worker build.' in fail_reason
def test_orchestrate_build_cancelation(tmpdir):
workflow = mock_workflow(tmpdir)
mock_osbs()
mock_reactor_config(tmpdir)
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': {
'platforms': ['x86_64'],
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
}
}]
)
def mock_wait_for_build_to_finish(build_name):
return make_build_response(build_name, 'Running')
(flexmock(OSBS)
.should_receive('wait_for_build_to_finish')
.replace_with(mock_wait_for_build_to_finish))
flexmock(OSBS).should_receive('cancel_build').once()
(flexmock(AsyncResult).should_receive('ready')
.and_return(False) # normal execution
.and_return(False) # after cancel_build
.and_return(True)) # finally succeed
class RaiseOnce(object):
"""
Only raise an exception the first time this mocked wait() method
is called.
"""
def __init__(self):
self.exception_raised = False
def get(self, timeout=None):
time.sleep(0.1)
if not self.exception_raised:
self.exception_raised = True
raise BuildCanceledException()
raise_once = RaiseOnce()
(flexmock(AsyncResult).should_receive('get')
.replace_with(raise_once.get))
with pytest.raises(PluginFailedException) as exc:
runner.run()
assert 'BuildCanceledException' in str(exc)
@pytest.mark.parametrize(('clusters_x86_64'), (
([('chosen_x86_64', 5), ('spam', 4)]),
([('chosen_x86_64', 5000), ('spam', 4)]),
([('spam', 4), ('chosen_x86_64', 5)]),
([('chosen_x86_64', 5), ('spam', 4), ('bacon', 4)]),
([('chosen_x86_64', 5), ('spam', 5)]),
([('chosen_x86_64', 1), ('spam', 1)]),
([('chosen_x86_64', 2), ('spam', 2)]),
))
@pytest.mark.parametrize(('clusters_ppc64le'), (
([('chosen_ppc64le', 7), ('eggs', 6)]),
))
def test_orchestrate_build_choose_clusters(tmpdir, clusters_x86_64,
clusters_ppc64le):
workflow = mock_workflow(tmpdir)
mock_osbs() # Current builds is a constant 2
mock_reactor_config(tmpdir, {
'x86_64': [
{'name': cluster[0], 'max_concurrent_builds': cluster[1]}
for cluster in clusters_x86_64
],
'ppc64le': [
{'name': cluster[0], 'max_concurrent_builds': cluster[1]}
for cluster in clusters_ppc64le
]
})
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': {
'platforms': ['x86_64', 'ppc64le'],
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
}
}]
)
build_result = runner.run()
assert not build_result.is_failed()
annotations = build_result.annotations
assert set(annotations['worker-builds'].keys()) == set(['x86_64', 'ppc64le'])
for platform, plat_annotations in annotations['worker-builds'].items():
assert plat_annotations['build']['cluster-url'] == 'https://chosen_{}.com/'.format(platform)
@pytest.mark.parametrize(('platforms', 'platform_exclude', 'platform_only', 'result'), [
(['x86_64', 'powerpc64le'], '', 'powerpc64le', ['powerpc64le']),
(['x86_64', 'spam', 'bacon', 'toast', 'powerpc64le'], ['spam', 'bacon', 'eggs', 'toast'], '',
['x86_64', 'powerpc64le']),
(['powerpc64le', 'spam', 'bacon', 'toast'], ['spam', 'bacon', 'eggs', 'toast'], 'powerpc64le',
['powerpc64le']),
(['x86_64', 'bacon', 'toast'], 'toast', ['x86_64', 'powerpc64le'], ['x86_64']),
(['x86_64', 'toast'], 'toast', 'x86_64', ['x86_64']),
(['x86_64', 'spam', 'bacon', 'toast'], ['spam', 'bacon', 'eggs', 'toast'], ['x86_64',
'powerpc64le'],
['x86_64']),
(['x86_64', 'powerpc64le'], '', '', ['x86_64', 'powerpc64le'])
])
def test_orchestrate_build_exclude_platforms(tmpdir, platforms, platform_exclude, platform_only,
result):
workflow = mock_workflow(tmpdir)
mock_osbs()
reactor_config = {
'x86_64': [
{
'name': 'worker01',
'max_concurrent_builds': 3
}
],
'powerpc64le': [
{
'name': 'worker02',
'max_concurrent_builds': 3
}
]
}
for exclude in ('spam', 'bacon', 'eggs'):
reactor_config[exclude] = [
{'name': 'worker-{}'.format(exclude), 'max_concurrent_builds': 3}
]
mock_reactor_config(tmpdir, reactor_config)
platforms_dict = {}
if platform_exclude != '':
platforms_dict['platforms'] = {}
platforms_dict['platforms']['not'] = platform_exclude
if platform_only != '':
if 'platforms' not in platforms_dict:
platforms_dict['platforms'] = {}
platforms_dict['platforms']['only'] = platform_only
with open(os.path.join(str(tmpdir), 'container.yaml'), 'w') as f:
f.write(yaml.safe_dump(platforms_dict))
f.flush()
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': {
# Explicitly leaving off 'eggs' platform to
# ensure no errors occur when unknown platform
# is provided in container.yaml file.
'platforms': platforms,
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
}
}]
)
build_result = runner.run()
assert not build_result.is_failed()
annotations = build_result.annotations
assert set(annotations['worker-builds'].keys()) == set(result)
def test_orchestrate_build_unknown_platform(tmpdir):
workflow = mock_workflow(tmpdir)
mock_osbs()
mock_reactor_config(tmpdir)
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': {
# Explicitly leaving off 'eggs' platform to
# ensure no errors occur when unknow platform
# is provided in exclude-platform file.
'platforms': ['x86_64', 'spam'],
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
}
}]
)
with pytest.raises(PluginFailedException) as exc:
runner.run()
assert "'No clusters found for platform spam!'" in str(exc)
def test_orchestrate_build_failed_create(tmpdir):
workflow = mock_workflow(tmpdir)
mock_osbs()
def mock_create_worker_build(**kwargs):
if kwargs['platform'] == 'ppc64le':
raise OsbsException('it happens')
return make_build_response('worker-build-1', 'Running')
(flexmock(OSBS)
.should_receive('create_worker_build')
.replace_with(mock_create_worker_build))
fail_reason = 'build not started'
annotation_keys = set(['x86_64'])
mock_reactor_config(tmpdir)
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': {
'platforms': ['x86_64', 'ppc64le'],
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
'find_cluster_retry_delay': .1,
'failure_retry_delay': .1
}
}]
)
build_result = runner.run()
assert build_result.is_failed()
annotations = build_result.annotations
assert set(annotations['worker-builds'].keys()) == annotation_keys
fail_reason = json.loads(build_result.fail_reason)['ppc64le']['general']
assert "Could not find appropriate cluster for worker build." in fail_reason
@pytest.mark.parametrize('pod_available,pod_failure_reason,expected,cancel_fails', [
# get_pod_for_build() returns error
(False,
None,
KeyError,
False),
# get_failure_reason() not available in PodResponse
(True,
AttributeError("'module' object has no attribute 'get_failure_reason'"),
KeyError,
False),
# get_failure_reason() result used
(True,
{
'reason': 'reason message',
'exitCode': 23,
'containerID': 'abc123',
},
{
'reason': 'reason message',
'exitCode': 23,
'containerID': 'abc123',
},
False),
# cancel_build() fails (and failure is ignored)
(True,
{
'reason': 'reason message',
'exitCode': 23,
'containerID': 'abc123',
},
{
'reason': 'reason message',
'exitCode': 23,
'containerID': 'abc123',
},
True)
])
def test_orchestrate_build_failed_waiting(tmpdir,
pod_available,
pod_failure_reason,
cancel_fails,
expected):
workflow = mock_workflow(tmpdir)
mock_osbs()
class MockPodResponse(object):
def __init__(self, pod_failure_reason):
self.pod_failure_reason = pod_failure_reason
def get_failure_reason(self):
if isinstance(self.pod_failure_reason, Exception):
raise self.pod_failure_reason
return self.pod_failure_reason
def mock_wait_for_build_to_finish(build_name):
if build_name == 'worker-build-ppc64le':
raise OsbsException('it happens')
return make_build_response(build_name, 'Failed')
(flexmock(OSBS)
.should_receive('wait_for_build_to_finish')
.replace_with(mock_wait_for_build_to_finish))
cancel_build_expectation = flexmock(OSBS).should_receive('cancel_build')
if cancel_fails:
cancel_build_expectation.and_raise(OsbsException)
cancel_build_expectation.once()
expectation = flexmock(OSBS).should_receive('get_pod_for_build')
if pod_available:
expectation.and_return(MockPodResponse(pod_failure_reason))
else:
expectation.and_raise(OsbsException())
mock_reactor_config(tmpdir)
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': {
'platforms': ['x86_64', 'ppc64le'],
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
}
}]
)
build_result = runner.run()
assert build_result.is_failed()
annotations = build_result.annotations
assert set(annotations['worker-builds'].keys()) == {'x86_64', 'ppc64le'}
fail_reason = json.loads(build_result.fail_reason)['ppc64le']
if expected is KeyError:
assert 'pod' not in fail_reason
else:
assert fail_reason['pod'] == expected
@pytest.mark.parametrize(('task_id', 'error'), [
('1234567', None),
('bacon', 'ValueError'),
(None, 'TypeError'),
])
def test_orchestrate_build_get_fs_task_id(tmpdir, task_id, error):
workflow = mock_workflow(tmpdir)
mock_osbs()
mock_reactor_config(tmpdir)
workflow.prebuild_results[PLUGIN_ADD_FILESYSTEM_KEY] = {
'filesystem-koji-task-id': task_id,
}
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': {
'platforms': ['x86_64'],
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
}
}]
)
if error is not None:
with pytest.raises(PluginFailedException) as exc:
runner.run()
workflow.build_result.is_failed()
assert error in str(exc)
else:
build_result = runner.run()
assert not build_result.is_failed()
@pytest.mark.parametrize('fail_at', ('all', 'first'))
def test_orchestrate_build_failed_to_list_builds(tmpdir, fail_at):
workflow = mock_workflow(tmpdir)
mock_osbs() # Current builds is a constant 2
mock_reactor_config(tmpdir, {
'x86_64': [
{'name': 'spam', 'max_concurrent_builds': 5},
{'name': 'eggs', 'max_concurrent_builds': 5}
],
})
flexmock_chain = flexmock(OSBS).should_receive('list_builds').and_raise(OsbsException("foo"))
if fail_at == 'all':
flexmock_chain.and_raise(OsbsException("foo"))
if fail_at == 'first':
flexmock_chain.and_return(['a', 'b'])
if fail_at == 'build_canceled':
flexmock_chain.and_raise(OsbsException(cause=BuildCanceledException()))
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': {
'platforms': ['x86_64'],
'build_kwargs': make_worker_build_kwargs(),
'osbs_client_config': str(tmpdir),
'find_cluster_retry_delay': .1,
'max_cluster_fails': 2
}
}]
)
if fail_at == 'first':
build_result = runner.run()
assert not build_result.is_failed()
annotations = build_result.annotations
assert annotations['worker-builds']['x86_64']['build']['cluster-url'] == 'https://eggs.com/'
else:
build_result = runner.run()
assert build_result.is_failed()
if fail_at == 'all':
assert 'Could not find appropriate cluster for worker build.' \
in build_result.fail_reason
elif fail_at == 'build_canceled':
assert 'BuildCanceledException()' in str(exc)
@pytest.mark.parametrize('is_auto', [
True,
False
])
def test_orchestrate_build_worker_build_kwargs(tmpdir, caplog, is_auto):
workflow = mock_workflow(tmpdir)
expected_kwargs = {
'git_uri': SOURCE['uri'],
'git_ref': 'master',
'git_branch': 'master',
'user': 'bacon',
'is_auto': is_auto,
'platform': 'x86_64',
'release': '10',
'arrangement_version': 1
}
mock_osbs(worker_expect=expected_kwargs)
mock_reactor_config(tmpdir)
plugin_args = {
'platforms': ['x86_64'],
'build_kwargs': make_worker_build_kwargs(),
'worker_build_image': 'fedora:latest',
'osbs_client_config': str(tmpdir),
}
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': plugin_args,
}]
)
workflow.prebuild_results[CheckAndSetRebuildPlugin.key] = is_auto
build_result = runner.run()
assert not build_result.is_failed()
def test_orchestrate_override_build_kwarg(tmpdir):
workflow = mock_workflow(tmpdir)
expected_kwargs = {
'git_uri': SOURCE['uri'],
'git_ref': 'master',
'git_branch': 'master',
'user': 'bacon',
'is_auto': False,
'platform': 'x86_64',
'release': '4242',
'arrangement_version': 1
}
mock_osbs(worker_expect=expected_kwargs)
mock_reactor_config(tmpdir)
plugin_args = {
'platforms': ['x86_64'],
'build_kwargs': make_worker_build_kwargs(),
'worker_build_image': 'fedora:latest',
'osbs_client_config': str(tmpdir),
}
override_build_kwarg(workflow, 'release', '4242')
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': plugin_args,
}]
)
build_result = runner.run()
assert not build_result.is_failed()
@pytest.mark.parametrize(('build', 'exc'), [
('{"spec": {"strategy": {"customStrategy": {"from": \
{"name": "osbs-buildroot:latest", "kind": "DockerImage"}}}}}',
None),
('{"spec": {"strategy": {"customStrategy": {"from": {"name": "osbs-buildroot:latest"}}}}}',
'RuntimeError'),
('{"spec": {"strategy": {"customStrategy": {"from": \
{"name": "osbs-buildroot:latest", "kind": "ImageStreamTag"}}}}}',
'RuntimeError'),
('{"spec": {"strategy": {"customStrategy": {"from": \
{"name": "osbs-buildroot:latest", "kind": "wrong_kind"}}}}}',
'RuntimeError'),
('{"spec": {}}',
'RuntimeError'),
('{"spec": {"strategy": {}}}',
'RuntimeError'),
('{"spec": {"strategy": {"customStrategy": {}}}}',
'RuntimeError'),
('{"spec": {"strategy": {"customStrategy": {"from": {}}}}}',
'RuntimeError'),
])
def test_set_build_image(tmpdir, build, exc):
workflow = mock_workflow(tmpdir)
flexmock(os, environ={'BUILD': build})
expected_kwargs = {
'git_uri': SOURCE['uri'],
'git_ref': 'master',
'git_branch': 'master',
'user': 'bacon',
'is_auto': False,
'platform': 'x86_64',
'release': '4242',
'arrangement_version': 1
}
mock_osbs(worker_expect=expected_kwargs)
mock_reactor_config(tmpdir)
plugin_args = {
'platforms': ['x86_64'],
'build_kwargs': make_worker_build_kwargs(),
'worker_build_image': 'osbs-buildroot:latest',
'osbs_client_config': str(tmpdir),
}
runner = BuildStepPluginsRunner(
workflow.builder.tasker,
workflow,
[{
'name': OrchestrateBuildPlugin.key,
'args': plugin_args,
}]
)
if not exc:
runner.run()
else:
exc_str = "raised an exception: %s" % exc
with pytest.raises(PluginFailedException) as ex:
runner.run()
assert exc_str in str(ex)
| {
"content_hash": "52412f1826e914f8f8aaf94bea5ae325",
"timestamp": "",
"source": "github",
"line_count": 1062,
"max_line_length": 100,
"avg_line_length": 31.34463276836158,
"alnum_prop": 0.5527817832251862,
"repo_name": "vrutkovs/atomic-reactor",
"id": "fc1fb57c6715d3bd6fa7d88e02958e2dd0e33e36",
"size": "33288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plugins/test_orchestrate_build.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1413753"
},
{
"name": "Shell",
"bytes": "6571"
}
],
"symlink_target": ""
} |
import csb.test as test
from csb.bio.io import ClansParser
from csb.bio.io.clans import Clans, ClansEntry, ClansParams, ClansSeqgroup,\
Color, ClansEntryCollection, ClansSeqgroupCollection, DuplicateEntryError,\
DuplicateEntryNameError
@test.unit
class TestClansColor(test.Case):
def setUp(self):
super(TestClansColor, self).setUp()
def testColorInit(self):
color = Color()
self.assertEqual(color.r, 0)
self.assertEqual(color.g, 0)
self.assertEqual(color.b, 0)
def testColorSetter(self):
color = Color()
for color_name in ('r', 'g', 'b'):
self.assertRaises(ValueError, color.__setattr__, color_name, -1)
self.assertRaises(ValueError, color.__setattr__, color_name, 256)
def testParseClansColorWithCorrectInput(self):
correct_input = '83;92;3'
color = Color.from_string(correct_input)
self.assertEqual(color.r, 83)
self.assertEqual(color.g, 92)
self.assertEqual(color.b, 3)
def testParseClansColorWithWrongInput(self):
color = Color()
wrong_input_1 = (83, 92, 3)
self.assertRaises(TypeError,
color.from_string, wrong_input_1)
wrong_input_2 = '83;92;3;'
self.assertRaises(ValueError, color.from_string, wrong_input_2)
wrong_input_3 = '83;92'
self.assertRaises(ValueError, color.from_string, wrong_input_3)
def testToClansColor(self):
color = Color()
self.assertEqual(color.to_clans_color(), '0;0;0;255')
testValues = (83, 92, 3, 87)
color.r = testValues[0]
color.g = testValues[1]
color.b = testValues[2]
color.a = testValues[3]
self.assertEqual(color.to_clans_color(),
';'.join(map(str, testValues)))
@test.functional
class TestClansParams(test.Case):
def setUp(self):
super(TestClansParams, self).setUp()
def testInstatiation(self):
cp = ClansParams()
for attribute_name, default_value in cp._DEFAULTS.items():
if attribute_name == 'colors':
continue
self.assertEqual(cp.__getattribute__(attribute_name),
default_value)
def testUnknownParamFail(self):
self.assertRaises(KeyError, ClansParams, **{'unknownParam': True})
def testForbiddenAssignments(self):
self.assertRaises(ValueError, ClansParams, **{'attfactor': 'a'})
self.assertRaises(ValueError, ClansParams, **{'attvalpow': 'a'})
self.assertRaises(ValueError, ClansParams, **{'avgfoldchange': 'a'})
self.assertRaises(ValueError, ClansParams, **{'blastpath': 3})
self.assertRaises(ValueError, ClansParams, **{'cluster2d': 'a'})
self.assertRaises(ValueError, ClansParams, **{'colors': 'a'})
self.assertRaises(ValueError, ClansParams, **{'complexatt': 'a'})
self.assertRaises(ValueError, ClansParams, **{'cooling': 'a'})
self.assertRaises(ValueError, ClansParams, **{'currcool': 'a'})
self.assertRaises(ValueError, ClansParams, **{'dampening': 'a'})
self.assertRaises(ValueError, ClansParams, **{'dotsize': 'a'})
self.assertRaises(ValueError, ClansParams, **{'formatdbpath': 3})
self.assertRaises(ValueError, ClansParams, **{'groupsize': 'a'})
self.assertRaises(ValueError, ClansParams, **{'maxmove': 'a'})
self.assertRaises(ValueError, ClansParams, **{'minattract': 'a'})
self.assertRaises(ValueError, ClansParams, **{'ovalsize': 'a'})
self.assertRaises(ValueError, ClansParams, **{'pval': 'a'})
self.assertRaises(ValueError, ClansParams, **{'repfactor': 'a'})
self.assertRaises(ValueError, ClansParams, **{'repvalpow': 'a'})
self.assertRaises(ValueError, ClansParams, **{'showinfo': 'a'})
self.assertRaises(ValueError, ClansParams, **{'usefoldchange': 'a'})
self.assertRaises(ValueError, ClansParams, **{'usescval': 'a'})
self.assertRaises(ValueError, ClansParams, **{'zoom': 'a'})
@test.functional
class TestClans(test.Case):
def setUp(self):
super(TestClans, self).setUp()
def testClansInit(self):
'''
Test creating an empty L{Clans} instance.
'''
c = Clans()
param_names = ['attfactor', 'attvalpow', 'avgfoldchange', 'blastpath',
'cluster2d', 'colors', 'complexatt', 'cooling',
'currcool', 'dampening', 'dotsize', 'formatdbpath',
'groupsize', 'maxmove', 'minattract', 'ovalsize',
'pval', 'repfactor', 'repvalpow', 'showinfo',
'usefoldchange', 'usescval', 'zoom']
for param_name in param_names:
self.assertTrue(hasattr(c.params, param_name))
self.assertEqual(c.filename, None)
self.assertEqual(c.rotmtx.shape, (3, 3))
self.assertEqual(len(c.entries), 0)
self.assertEqual(len(c.seqgroups), 0)
self.assertTrue(isinstance(c.entries, ClansEntryCollection))
self.assertTrue(isinstance(c.seqgroups, ClansSeqgroupCollection))
def testClansEntryAddingAndSorting(self):
c = Clans()
names = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
shuffled_names = ['g', 'f', 'b', 'd', 'e', 'c', 'a']
for name in shuffled_names:
c.add_entry(ClansEntry(name=name))
c.sort()
for i, e in enumerate(c):
self.assertEqual(e.name, names[i])
def testClansEntrySortingWithCustomKeyFunction(self):
c = Clans()
sequences = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
shuffled_sequences = ['g', 'f', 'b', 'd', 'e', 'c', 'a']
for i, sequence in enumerate(sequences):
c.add_entry(ClansEntry(name=str(i), seq=shuffled_sequences[i]))
custom_key_function = lambda e: e.seq # sort by sequence instead of name
c.sort(key=custom_key_function)
for i, e in enumerate(c):
self.assertEqual(e.seq, sequences[i])
def testGetEntry(self):
c = Clans()
## get non-existant entry from empty clans instance
self.assertRaises(ValueError, c.get_entry, 'a')
names = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
entries = [ClansEntry(name=name) for name in names]
[c.add_entry(e) for e in entries]
## check whether entries fetched by name match those created
for i, name in enumerate(names):
self.assertEqual(c.get_entry(name), entries[i])
## check pedantic flag for duplicate name='a' entries
c.add_entry(ClansEntry(name='a'))
self.assertTrue(c.get_entry('a', False).name == 'a')
self.assertRaises(DuplicateEntryNameError, c.get_entry, 'a', True)
def testDuplicateEntryError(self):
c = Clans()
e = ClansEntry(name='a', seq='A', coords=(1., 1., 1.))
c.add_entry(e)
c.add_entry(e)
original_length = len(c)
self.assertRaises(DuplicateEntryError, c._update_index)
self.assertEqual(original_length, len(c))
@test.functional
class TestClansSeqgroup(test.Case):
def setUp(self):
super(TestClansSeqgroup, self).setUp()
def testInit(self):
sg = ClansSeqgroup()
self.assertTrue(sg.is_empty())
def testAddingAndRemovingSeqgroups(self):
c = Clans()
names = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
for i, name in enumerate(names):
c.add_group(ClansSeqgroup(name=name))
self.assertEqual(len(c.seqgroups), i + 1)
removed = 0
while len(c.seqgroups) != 0:
c.remove_group(c.seqgroups[-1])
removed += 1
self.assertEqual(removed, len(names))
self.assertEqual(len(c.seqgroups), 0)
testGroup = ClansSeqgroup()
self.assertRaises(TypeError, testGroup.add, 23)
self.assertRaises(TypeError, testGroup.remove, 23)
def testAppendingSeqgroupsFromOtherInstance(self):
source = Clans()
source_entry1 = ClansEntry(name='X', seq='S')
source_entry2 = ClansEntry(name='A', seq='S')
source.add_entry(source_entry1)
source.add_entry(source_entry2)
seqgroup_names_to_transfer = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
for i, name in enumerate(seqgroup_names_to_transfer):
sg = ClansSeqgroup(name=name)
sg.add(source_entry1)
source.add_group(sg)
seqgroup_names_to_omit = ['x', 'y', 'z']
for i, name in enumerate(seqgroup_names_to_omit):
sg = ClansSeqgroup(name=name)
sg.add(source_entry2)
source.add_group(sg)
target = Clans()
# different seq is tolerated, only name identity is checked
target_entry = ClansEntry(name='X', seq='Q')
target.add_entry(target_entry)
self.assertEqual(source[0].name, target[0].name)
target.append_groups_from(source)
## each group should have exactly one member
self.assertEqual(len(set([len(group.members) for group in target.seqgroups])), 1)
## all groups of seqgroup_names should have been transferred
self.assertEqual(len(target.seqgroups), len(seqgroup_names_to_transfer))
self.assertEqual([group.name for group in target.seqgroups], seqgroup_names_to_transfer)
## the ones from seqgroup_names_to_omit should not be there
self.assertEqual(len([group.name for group in target.seqgroups
if group.name in seqgroup_names_to_omit]), 0)
def testAddingClansEntries(self):
c = Clans()
sg = ClansSeqgroup()
c.add_group(sg)
e = ClansEntry()
c.add_entry(e)
## add entry to seqgroup
sg.add(e)
self.assertEqual(len(sg), 1)
self.assertEqual(len(e.groups), 1)
## adding the same entry is forbidden
self.assertRaises(ValueError, sg.add, e)
## adding s.th. else than a ClansEntry
self.assertRaises(TypeError, sg.add, 23)
def testRemovingClansEntries(self):
c = Clans()
sg = ClansSeqgroup()
c.add_group(sg)
e = ClansEntry()
c.add_entry(e)
sg.add(e)
sg.remove(e)
self.assertEqual(len(sg), 0)
self.assertEqual(len(e.groups), 0)
self.assertRaises(TypeError, sg.remove, 23)
self.assertRaises(ValueError, sg.remove, e)
@test.functional
class TestClansParser(test.Case):
def setUp(self):
super(TestClansParser, self).setUp()
self.filename = self.config.getTestFile('out.clans')
def testPrematureGetter(self):
'''
Test whether the premature (before parsing) access to clans_instance is
properly handled.
'''
cp = ClansParser()
self.assertRaises(ValueError, cp.__getattribute__, 'clans_instance')
def testParseFile(self):
'''
Test parsing of a small dummy file with known values
'''
from numpy import array
cp = ClansParser()
self.clans_instance = cp.parse_file(self.filename)
self.assertEqual(len(self.clans_instance), 41)
self.assertRaises(IndexError, self.clans_instance.__getitem__, 41)
correct_rotmtx = array([[0.75614862, 0.65439992, 0.],
[-0.65439992, 0.75614862, 0.],
[0., 0., 1.]])
self.assertEqual(self.clans_instance.rotmtx.shape, (3, 3))
self.assertTrue(
(self.clans_instance.rotmtx - correct_rotmtx < 1e-6).all())
self.assertEqual(len(self.clans_instance.seqgroups), 4)
seqgroup_names = ('insect hypoth. protein (2 copies, C term)',
'allergens >= xyz',
'empty group WITH terminal semicolon in numbers line',
'empty group WITHOUT terminal semicolon in numbers line')
seqgroup_sizes = (20, 17, 0, 0)
for i, seqgroup in enumerate(self.clans_instance.seqgroups):
self.assertEqual(len(seqgroup), seqgroup_sizes[i])
self.assertEqual(seqgroup.name, seqgroup_names[i])
@test.functional
class TestClansFileWriter(test.Case):
def setUp(self):
super(TestClansFileWriter, self).setUp()
self.filename = self.config.getTestFile('out.clans')
self.temp = self.config.getTempStream()
def testWrittenIsIdenticalToOriginal(self):
cp = ClansParser()
clans_instance = cp.parse_file(self.filename)
clans_instance.write(self.temp.name)
self.temp.flush()
with open(self.filename) as original_file:
original_lines = original_file.readlines()
with open(self.temp.name) as written_file:
written_lines = written_file.readlines()
self.assertEqual(len(original_lines), len(written_lines))
in_hsps = False
start_tag_hsp = '<hsp>\n'
end_tag_hsp = '</hsp>\n'
in_seqgroups = False
start_tag_seqgroups = '<seqgroups>\n'
end_tag_seqgroups = '</seqgroups>\n'
colorarr_tag = 'colorarr='
color_tag = 'color='
for i, original_line in enumerate(original_lines):
if original_line == start_tag_hsp:
in_hsps = True
continue
if original_line == end_tag_hsp:
in_hsps = False
continue
if original_line == start_tag_seqgroups:
in_seqgroups = True
continue
if original_line == end_tag_seqgroups:
in_seqgroups = False
continue
if original_line.startswith(colorarr_tag):
## remove colorarr_tag from beginning of line
original_line = original_line[len(colorarr_tag):].strip().strip(':')
self.assertTrue(written_lines[i].startswith(colorarr_tag))
written_line = written_lines[i][len(colorarr_tag):].strip().strip(':')
original_colors = original_line.replace('(', ''). replace(')', '').split(':')
written_colors = written_line.replace('(', ''). replace(')', '').split(':')
self.assertEqual(len(original_colors), len(written_colors))
for j, original_color_string in enumerate(original_colors):
original_color = Color.from_string(original_color_string)
written_color = Color.from_string(written_colors[j])
self.assertEqual(original_color.r, written_color.r)
self.assertEqual(original_color.g, written_color.g)
self.assertEqual(original_color.b, written_color.b)
self.assertEqual(original_color.a, written_color.a)
continue
if original_line.startswith(color_tag):
original_color_string = original_line[len(color_tag):].strip()
self.assertTrue(written_lines[i].startswith(color_tag))
written_color_string = written_lines[i][len(color_tag):].strip()
original_color = Color.from_string(original_color_string)
written_color = Color.from_string(written_color_string)
self.assertEqual(original_color.r, written_color.r)
self.assertEqual(original_color.g, written_color.g)
self.assertEqual(original_color.b, written_color.b)
self.assertEqual(original_color.a, written_color.a)
continue
if in_hsps:
original_start_end, original_value \
= original_line.strip().split(':')
written_start_end, written_value \
= written_lines[i].strip().split(':')
self.assertEqual(original_start_end, written_start_end)
self.assertTrue((float(original_value) - float(written_value)) < 1e-6)
elif in_seqgroups and (original_line == 'numbers=\n'):
## a terminal semicolon is added by the ClansWriter
self.assertEqual(original_line.strip() + ';', written_lines[i].strip())
else:
self.assertEqual(original_line, written_lines[i])
def tearDown(self):
self.temp.close()
if __name__ == '__main__':
test.Console()
| {
"content_hash": "81fa5c6ba7c0c0d65f7d48e2428bfeb1",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 96,
"avg_line_length": 35.46170212765958,
"alnum_prop": 0.5795284094318114,
"repo_name": "csb-toolbox/CSB",
"id": "57b0f8693bf127053a18ad1ef6b41350da065f51",
"size": "16667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csb/test/cases/bio/io/clans/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14987"
},
{
"name": "Python",
"bytes": "1475360"
}
],
"symlink_target": ""
} |
from glob import glob
import numpy as np
from .extern.six import string_types
from . import _librootnumpy
__all__ = [
'root2array',
'root2rec',
'list_trees',
'list_branches',
'list_structures',
'list_directories',
'tree2array',
'tree2rec',
'array2tree',
'array2root',
]
def _glob(filenames):
"""Glob a filename or list of filenames but always return the original
string if the glob didn't match anything so URLs for remote file access
are not clobbered.
"""
if isinstance(filenames, string_types):
filenames = [filenames]
matches = []
for name in filenames:
matched_names = glob(name)
if not matched_names:
# use the original string
matches.append(name)
else:
matches.extend(matched_names)
return matches
def list_trees(filename):
"""Get list of the tree names in a ROOT file.
Parameters
----------
filename : str
Path to ROOT file.
Returns
-------
trees : list
List of tree names
"""
return _librootnumpy.list_trees(filename)
def list_branches(filename, treename=None):
"""Get a list of the branch names of a tree in a ROOT file.
Parameters
----------
filename : str
Path to ROOT file.
treename : str, optional (default=None)
Name of tree in the ROOT file.
(optional if the ROOT file has only one tree).
Returns
-------
branches : list
List of branch names
"""
return _librootnumpy.list_branches(filename, treename)
def list_directories(filename):
"""Get a list of the directories in a ROOT file.
Parameters
----------
filename : str
Path to ROOT file.
Returns
-------
directories : list
List of directory names.
"""
return _librootnumpy.list_directories(filename)
def list_structures(filename, treename=None):
"""Get a dictionary mapping branch names to leaf structures.
Parameters
----------
filename : str
Path to ROOT file.
treename : str, optional (default=None)
Name of tree in the ROOT file
(optional if the ROOT file has only one tree).
Returns
-------
structures : OrderedDict
An ordered dictionary mapping branch names to leaf structures.
"""
return _librootnumpy.list_structures(filename, treename)
def root2array(filenames,
treename=None,
branches=None,
selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1,
warn_missing_tree=False):
"""Convert trees in ROOT files into a numpy structured array.
Parameters
----------
filenames : str or list
ROOT file name pattern or list of patterns. Wildcarding is supported by
Python globbing.
treename : str, optional (default=None)
Name of the tree to convert (optional if each file contains exactly one
tree).
branches : list of strings or single string, optional (default=None)
List of branch names to include as columns of the array or a single
branch name to convert into a one-dimensional array. If None then
include all branches that can be converted.
selection : str, optional (default=None)
Only include entries fulfilling this condition.
start, stop, step: int, optional (default=None)
The meaning of the ``start``, ``stop`` and ``step`` parameters is the
same as for Python slices. If a range is supplied (by setting some of
the ``start``, ``stop`` or ``step`` parameters), only the entries in
that range and fulfilling the ``selection`` condition (if defined) are
used.
include_weight : bool, optional (default=False)
Include a column containing the tree weight.
weight_name : str, optional (default='weight')
The field name for the weight column if ``include_weight=True``.
cache_size : int, optional (default=-1)
Set the size (in bytes) of the TTreeCache used while reading a TTree. A
value of -1 uses ROOT's default cache size. A value of 0 disables the
cache.
warn_missing_tree : bool, optional (default=False)
If True, then warn when a tree is missing from an input file instead of
raising an IOError.
Notes
-----
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
tree2array
"""
filenames = _glob(filenames)
if not filenames:
raise ValueError("specify at least one filename")
if treename is None:
trees = list_trees(filenames[0])
if len(trees) > 1:
raise ValueError(
"treename must be specified if the file "
"contains more than one tree")
elif not trees:
raise IOError(
"no trees present in {0}".format(filenames[0]))
treename = trees[0]
if isinstance(branches, string_types):
# single branch selected
branches = [branches]
flatten = True
else:
flatten = False
arr = _librootnumpy.root2array_fromfile(
filenames, treename, branches,
selection,
start, stop, step,
include_weight,
weight_name,
cache_size,
warn_missing_tree)
if flatten:
# select single column
return arr[branches[0]]
return arr
def root2rec(filenames,
treename=None,
branches=None,
selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1,
warn_missing_tree=False):
"""View the result of :func:`root2array` as a record array.
Notes
-----
* This is equivalent to::
root2array(filenames, treename, branches).view(np.recarray)
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
root2array
"""
return root2array(filenames, treename,
branches, selection,
start, stop, step,
include_weight,
weight_name,
cache_size,
warn_missing_tree).view(np.recarray)
def tree2array(tree,
branches=None,
selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1):
"""Convert a tree into a numpy structured array.
Parameters
----------
tree : ROOT TTree instance
The ROOT TTree to convert into an array.
branches : list of strings or single string, optional (default=None)
List of branch names to include as columns of the array or a single
branch name to convert into a one-dimensional array. If None then
include all branches that can be converted.
selection : str, optional (default=None)
Only include entries fulfilling this condition.
start, stop, step: int, optional (default=None)
The meaning of the ``start``, ``stop`` and ``step`` parameters is the
same as for Python slices. If a range is supplied (by setting some of
the ``start``, ``stop`` or ``step`` parameters), only the entries in
that range and fulfilling the ``selection`` condition (if defined) are
used.
include_weight : bool, optional (default=False)
Include a column containing the tree weight.
weight_name : str, optional (default='weight')
The field name for the weight column if ``include_weight=True``.
cache_size : int, optional (default=-1)
Set the size (in bytes) of the TTreeCache used while reading a TTree. A
value of -1 uses ROOT's default cache size. A value of 0 disables the
cache.
Notes
-----
Types are converted according to:
.. _conversion_table:
======================== ===============================
ROOT NumPy
======================== ===============================
``Bool_t`` ``np.bool``
``Char_t`` ``np.int8``
``UChar_t`` ``np.uint8``
``Short_t`` ``np.int16``
``UShort_t`` ``np.uint16``
``Int_t`` ``np.int32``
``UInt_t`` ``np.uint32``
``Float_t`` ``np.float32``
``Double_t`` ``np.float64``
``Long64_t`` ``np.int64``
``ULong64_t`` ``np.uint64``
``<type>[2][3]...`` ``(<nptype>, (2, 3, ...))``
``<type>[nx][2]...`` ``np.object``
``string`` ``np.object``
``vector<t>`` ``np.object``
``vector<vector<t> >`` ``np.object``
======================== ===============================
* Variable-length arrays (such as ``x[nx][2]``) and vectors (such as
``vector<int>``) are converted to NumPy arrays of the corresponding
types.
* Fixed-length arrays are converted to fixed-length NumPy array fields.
See Also
--------
root2array
"""
import ROOT
if not isinstance(tree, ROOT.TTree):
raise TypeError("tree must be a ROOT.TTree")
cobj = ROOT.AsCObject(tree)
if isinstance(branches, string_types):
# single branch selected
branches = [branches]
flatten = True
else:
flatten = False
arr = _librootnumpy.root2array_fromtree(
cobj, branches, selection,
start, stop, step,
include_weight,
weight_name,
cache_size)
if flatten:
# select single column
return arr[branches[0]]
return arr
def tree2rec(tree,
branches=None,
selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1):
"""View the result of :func:`tree2array` as a record array.
Notes
-----
* This is equivalent to::
tree2array(treename, branches).view(np.recarray)
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
tree2array
"""
return tree2array(tree,
branches=branches,
selection=selection,
start=start,
stop=stop,
step=step,
include_weight=include_weight,
weight_name=weight_name,
cache_size=cache_size).view(np.recarray)
def array2tree(arr, name='tree', tree=None):
"""Convert a numpy structured array into a ROOT TTree.
Fields of basic types, strings, and fixed-size subarrays of basic types are
supported. ``np.object`` and ``np.float16`` are currently not supported.
Parameters
----------
arr : array
A numpy structured array
name : str (optional, default='tree')
Name of the created ROOT TTree if ``tree`` is None.
tree : ROOT TTree (optional, default=None)
An existing ROOT TTree to be extended by the numpy array. Any branch
with the same name as a field in the numpy array will be extended as
long as the types are compatible, otherwise a TypeError is raised. New
branches will be created and filled for all new fields.
Returns
-------
root_tree : a ROOT TTree
See Also
--------
array2root
Examples
--------
>>> from root_numpy import array2tree
>>> import numpy as np
>>>
>>> a = np.array([(1, 2.5, 3.4),
... (4, 5, 6.8)],
... dtype=[('a', np.int32),
... ('b', np.float32),
... ('c', np.float64)])
>>> tree = array2tree(a)
>>> tree.Scan()
************************************************
* Row * a * b * c *
************************************************
* 0 * 1 * 2.5 * 3.4 *
* 1 * 4 * 5 * 6.8 *
************************************************
"""
import ROOT
if tree is not None:
if not isinstance(tree, ROOT.TTree):
raise TypeError("tree must be a ROOT.TTree")
incobj = ROOT.AsCObject(tree)
else:
incobj = None
cobj = _librootnumpy.array2tree_toCObj(arr, name=name, tree=incobj)
return ROOT.BindObject(cobj, 'TTree')
def array2root(arr, filename, treename='tree', mode='update'):
"""Convert a numpy array into a ROOT TTree and save it in a ROOT TFile.
Fields of basic types, strings, and fixed-size subarrays of basic types are
supported. ``np.object`` and ``np.float16`` are currently not supported.
Parameters
----------
arr : array
A numpy structured array
filename : str
Name of the output ROOT TFile. A new file will be created if it doesn't
already exist.
treename : str (optional, default='tree')
Name of the ROOT TTree that will be created. If a TTree with the same
name already exists in the TFile, it will be extended as documented in
:func:`array2tree`.
mode : str (optional, default='update')
Mode used to open the ROOT TFile ('update' or 'recreate').
See Also
--------
array2tree
Examples
--------
>>> from root_numpy import array2root, root2array
>>> import numpy as np
>>>
>>> a = np.array([(1, 2.5, 3.4),
... (4, 5, 6.8)],
... dtype=[('a', np.int32),
... ('b', np.float32),
... ('c', np.float64)])
>>> array2root(a, 'test.root', mode='recreate')
>>> root2array('test.root')
array([(1, 2.5, 3.4), (4, 5.0, 6.8)],
dtype=[('a', '<i4'), ('b', '<f4'), ('c', '<f8')])
>>>
>>> a = np.array(['', 'a', 'ab', 'abc', 'xyz', ''],
... dtype=[('string', 'S3')])
>>> array2root(a, 'test.root', mode='recreate')
>>> root2array('test.root')
array([('',), ('a',), ('ab',), ('abc',), ('xyz',), ('',)],
dtype=[('string', 'S3')])
>>>
>>> a = np.array([([1, 2, 3],),
... ([4, 5, 6],)],
... dtype=[('array', np.int32, (3,))])
>>> array2root(a, 'test.root', mode='recreate')
>>> root2array('test.root')
array([([1, 2, 3],), ([4, 5, 6],)],
dtype=[('array', '<i4', (3,))])
"""
_librootnumpy.array2root(arr, filename, treename, mode)
| {
"content_hash": "f0f1db8e7e089018a3f61e5a8087cd44",
"timestamp": "",
"source": "github",
"line_count": 488,
"max_line_length": 79,
"avg_line_length": 30.62704918032787,
"alnum_prop": 0.5381372942593337,
"repo_name": "ibab/root_numpy",
"id": "af12da7450cd4f90055aa27c1fb77c7741b35951",
"size": "14946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "root_numpy/_tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "412"
},
{
"name": "C++",
"bytes": "24544"
},
{
"name": "Makefile",
"bytes": "4411"
},
{
"name": "Python",
"bytes": "208450"
},
{
"name": "Shell",
"bytes": "2051"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="pie.marker.line", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
| {
"content_hash": "adb3b2daba239a4d67087dec68cd4a62",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 37.61538461538461,
"alnum_prop": 0.5950920245398773,
"repo_name": "plotly/plotly.py",
"id": "a754a75ab103195ee22c7db6725638a6a9fb1a35",
"size": "489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/pie/marker/line/_width.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import subprocess
import sys
import setup_util
import os
def start(args, logfile, errfile):
setup_util.replace_text("ninja/src/main/webapp/WEB-INF/resin-web.xml", "mysql:\/\/.*:3306", "mysql://" + args.database_host + ":3306")
try:
subprocess.check_call("mvn clean compile war:war", shell=True, cwd="ninja", stderr=errfile, stdout=logfile)
if os.name == 'nt':
subprocess.check_call('rmdir /S /Q "%RESIN_HOME%\\webapps\\"', shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call('mkdir "%RESIN_HOME%\\webapps\\"', shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call('copy ninja\\target\\ninja.war "%RESIN_HOME%\\webapps\\ninja.war"', shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call('"%RESIN_HOME%\\bin\\start.bat -Dninja.mode=prod"', shell=True, stderr=errfile, stdout=logfile)
else:
subprocess.check_call("rm -rf $RESIN_HOME/webapps/*", shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call("cp ninja/target/ninja.war $RESIN_HOME/webapps/ninja.war", shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call("$RESIN_HOME/bin/resinctl start -Dninja.mode=prod", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
try:
if os.name == 'nt':
subprocess.check_call('"%RESIN_HOME%\\bin\\stop.bat"', shell=True, stderr=errfile, stdout=logfile)
else:
subprocess.check_call("$RESIN_HOME/bin/resinctl shutdown", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
| {
"content_hash": "14d5c471bc85932c5fd13c7494ceb8fa",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 139,
"avg_line_length": 51.40625,
"alnum_prop": 0.7045592705167173,
"repo_name": "morrisonlevi/FrameworkBenchmarks",
"id": "102b6dad7db1d3910f0f5da74eb797db4fca174d",
"size": "1645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ninja/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from diskcache import Cache
class GraphCache:
inner = None
disk_cache_file = None
def __init__(self, inner, disk_cache):
self.keys = {}
self.row_pointer = -1
self.inner = inner
if disk_cache is None:
self.cache = {}
else:
self.disk_cache_file = disk_cache
self.cache = Cache(self.disk_cache_file, size_limit=2 ** 42)
def get_neighborhood_graph(self, mention_entities):
key = "cachekey_" + ":".join(mention_entities)
if key not in self.cache:
print("retrieve")
neighborhood_graph = self.inner.get_neighborhood_graph(mention_entities)
self.cache[key] = neighborhood_graph
graph = neighborhood_graph
else:
graph = self.cache[key]
return graph.copy() | {
"content_hash": "79d9d5edf04a256f0aef746ba6910d0b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 84,
"avg_line_length": 26.21875,
"alnum_prop": 0.5697258641239571,
"repo_name": "MichSchli/QuestionAnsweringGCN",
"id": "6781e921ed3be58bdd154b0e113378dc91ee0b1a",
"size": "839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_reader/graph_reader/graph_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "730851"
},
{
"name": "Shell",
"bytes": "1446"
}
],
"symlink_target": ""
} |
"""
Created on 2013-1-19
@author: Administrator
"""
from string import Template
t = Template('${village}folk send $$10 to $cause.')
s = t.substitute(village='Nottingham', cause='the ditch fund')
print(s)
d = dict(village='Jinan')
s = t.safe_substitute(d)
print(s)
basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
for f in sorted(set(basket)):
print(f)
| {
"content_hash": "8a7488b46ab09dcefe27daa9d16d5575",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 65,
"avg_line_length": 22.11764705882353,
"alnum_prop": 0.6622340425531915,
"repo_name": "quchunguang/test",
"id": "7b3f870dfde3e3c281568477111aa2b018989ebd",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testpy3/testtemplate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "1086"
},
{
"name": "Assembly",
"bytes": "71339"
},
{
"name": "Awk",
"bytes": "1033"
},
{
"name": "Batchfile",
"bytes": "571"
},
{
"name": "C",
"bytes": "1063602"
},
{
"name": "C++",
"bytes": "309142"
},
{
"name": "CSS",
"bytes": "22567"
},
{
"name": "CoffeeScript",
"bytes": "5429"
},
{
"name": "Common Lisp",
"bytes": "941"
},
{
"name": "Fortran",
"bytes": "21095"
},
{
"name": "Gnuplot",
"bytes": "11868"
},
{
"name": "Go",
"bytes": "14507"
},
{
"name": "HCL",
"bytes": "21381"
},
{
"name": "HTML",
"bytes": "788820"
},
{
"name": "Java",
"bytes": "947462"
},
{
"name": "JavaScript",
"bytes": "11208"
},
{
"name": "Lex",
"bytes": "8920"
},
{
"name": "M",
"bytes": "14447"
},
{
"name": "M4",
"bytes": "550"
},
{
"name": "Makefile",
"bytes": "123588"
},
{
"name": "Mathematica",
"bytes": "3808649"
},
{
"name": "Matlab",
"bytes": "99775"
},
{
"name": "Objective-C",
"bytes": "18954"
},
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "PHP",
"bytes": "80666"
},
{
"name": "PLpgSQL",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "350"
},
{
"name": "PostScript",
"bytes": "9049"
},
{
"name": "Python",
"bytes": "521668"
},
{
"name": "QMake",
"bytes": "258"
},
{
"name": "R",
"bytes": "67"
},
{
"name": "Roff",
"bytes": "1331"
},
{
"name": "Scala",
"bytes": "1467"
},
{
"name": "Scheme",
"bytes": "68"
},
{
"name": "Shell",
"bytes": "551111"
},
{
"name": "SuperCollider",
"bytes": "26339"
},
{
"name": "TeX",
"bytes": "6604"
},
{
"name": "Yacc",
"bytes": "23335"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
class AbstractEntry(models.Model):
user = models.ForeignKey(User)
title = models.CharField(max_length=255,blank=True,)
slug = models.SlugField(unique=True)
text = models.TextField(blank=True)
class Meta:
abstract=True
class Page(AbstractEntry):
pass
# When a Page is deleted, mediafiles' content_type and object_id are still there.
#####
from mediafiles.models import MediaFile
class Entry(AbstractEntry):
medias = models.ManyToManyField(MediaFile,
through='EntryMedia',
null=True,blank=True, default=None)
class EntryMedia(models.Model):
entry = models.ForeignKey(Entry)
media = models.ForeignKey(MediaFile)
#####
class Blog(AbstractEntry):
medias = models.ManyToManyField(MediaFile,
null=True,blank=True, default=None)
| {
"content_hash": "28ebc3ae9d4665b7de5d2e69296f238a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 81,
"avg_line_length": 27.78787878787879,
"alnum_prop": 0.6859323882224646,
"repo_name": "hdknr/django-mediafiles",
"id": "3d2619b4879a723e3441cd6410d7acd8a223689b",
"size": "941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample/web/app/blogs/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1611"
},
{
"name": "JavaScript",
"bytes": "99832"
},
{
"name": "Python",
"bytes": "46310"
}
],
"symlink_target": ""
} |
from Base import Base
class Callback(Base):
""" Callbacks nodes, used for signals """
def parse_node(self):
""" Parse the node """
self.parse_attributes_from_map({
'name' : 'name',
'ctype' : (self.NS_C, 'type')
})
self.parse_parameters()
self.parse_returnvalue()
# propagate the Callback definition to the namespace
self._namespace.registerType(self, self.getName(), self.getCType())
| {
"content_hash": "5ccdd8d590b58f48e2127e482d212af8",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.5411764705882353,
"repo_name": "derAndreas/pyGtk3Docs",
"id": "47182daea13e33be58b59d2af0d04890ffc9953c",
"size": "511",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyGtk3Docs/ast/Callback.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "960563"
},
{
"name": "Python",
"bytes": "55655"
}
],
"symlink_target": ""
} |
import unittest
from StringIO import StringIO
import sdv
import sdv.errors as errors
from sdv.validators.stix.profile import InstanceMapping
STIX_NO_VERSION_XML = \
"""
<stix:STIX_Package
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:stix="http://stix.mitre.org/stix-1"
>
<stix:STIX_Header>
<stix:Title>Unknown version of STIX</stix:Title>
</stix:STIX_Header>
</stix:STIX_Package>
"""
class STIXProfileTests(unittest.TestCase):
def test_invalid_profile(self):
xml = StringIO(STIX_NO_VERSION_XML)
func = sdv.validate_profile
self.assertRaises(errors.ProfileParseError, func, xml, "INVALID Profile DOC")
class InstanceMappingTests(unittest.TestCase):
_NSMAP = {
'http://stix.mitre.org/stix-1': 'stix'
}
_NAMESPACE = "http://stix.mitre.org/stix-1"
_SELECTORS = "stix:STIX_Package, //stix:Package"
_LABEL = "STIXType"
def test_missing_label(self):
mapping = InstanceMapping(self._NSMAP)
mapping.selectors = "stix:STIX_Package, //stix:Package"
mapping.namespace = self._NAMESPACE
self.assertRaises(errors.ProfileParseError, mapping.validate)
def test_missing_namespace(self):
mapping = InstanceMapping(self._NSMAP)
mapping.selectors = "stix:STIX_Package, //stix:Package"
mapping.label = "STIXType"
self.assertRaises(errors.ProfileParseError, mapping.validate)
def test_invalid_namespace(self):
mapping = InstanceMapping(self._NSMAP)
mapping.selectors = "stix:STIX_Package, //stix:Package"
mapping.label = "STIXType"
def set_namespace():
mapping.namespace = "this will fail"
self.assertRaises(errors.ProfileParseError, set_namespace)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f2393763808418139900adde8c3db6b9",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 85,
"avg_line_length": 31.24137931034483,
"alnum_prop": 0.6710816777041942,
"repo_name": "pombredanne/stix-validator",
"id": "9500d1aa1820032f52e855f94997f862530cfa91",
"size": "1917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdv/test/stix_profile_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "227005"
}
],
"symlink_target": ""
} |
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 30838 if testnet else 20838
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| {
"content_hash": "a0bcd9ecbddf6956af80afb0d1662467",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 111,
"avg_line_length": 38.3968253968254,
"alnum_prop": 0.6155436130632492,
"repo_name": "emarketcoin/EMARKET",
"id": "6198c4e62cfd576b9b242acea3161bb87c990a44",
"size": "10054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/spendfrom/spendfrom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32394"
},
{
"name": "C++",
"bytes": "2605900"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "13377"
},
{
"name": "NSIS",
"bytes": "5918"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69716"
},
{
"name": "QMake",
"bytes": "14749"
},
{
"name": "Roff",
"bytes": "18289"
},
{
"name": "Shell",
"bytes": "16339"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
try_get,
urljoin,
)
class PhilharmonieDeParisIE(InfoExtractor):
IE_DESC = 'Philharmonie de Paris'
_VALID_URL = r'''(?x)
https?://
(?:
live\.philharmoniedeparis\.fr/(?:[Cc]oncert/|embed(?:app)?/|misc/Playlist\.ashx\?id=)|
pad\.philharmoniedeparis\.fr/doc/CIMU/
)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'http://pad.philharmoniedeparis.fr/doc/CIMU/1086697/jazz-a-la-villette-knower',
'md5': 'a0a4b195f544645073631cbec166a2c2',
'info_dict': {
'id': '1086697',
'ext': 'mp4',
'title': 'Jazz à la Villette : Knower',
},
}, {
'url': 'http://live.philharmoniedeparis.fr/concert/1032066.html',
'info_dict': {
'id': '1032066',
'title': 'md5:0a031b81807b3593cffa3c9a87a167a0',
},
'playlist_mincount': 2,
}, {
'url': 'http://live.philharmoniedeparis.fr/Concert/1030324.html',
'only_matching': True,
}, {
'url': 'http://live.philharmoniedeparis.fr/misc/Playlist.ashx?id=1030324&track=&lang=fr',
'only_matching': True,
}, {
'url': 'https://live.philharmoniedeparis.fr/embedapp/1098406/berlioz-fantastique-lelio-les-siecles-national-youth-choir-of.html?lang=fr-FR',
'only_matching': True,
}, {
'url': 'https://live.philharmoniedeparis.fr/embed/1098406/berlioz-fantastique-lelio-les-siecles-national-youth-choir-of.html?lang=fr-FR',
'only_matching': True,
}]
_LIVE_URL = 'https://live.philharmoniedeparis.fr'
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_json(
'%s/otoPlayer/config.ashx' % self._LIVE_URL, video_id, query={
'id': video_id,
'lang': 'fr-FR',
})
def extract_entry(source):
if not isinstance(source, dict):
return
title = source.get('title')
if not title:
return
files = source.get('files')
if not isinstance(files, dict):
return
format_urls = set()
formats = []
for format_id in ('mobile', 'desktop'):
format_url = try_get(
files, lambda x: x[format_id]['file'], compat_str)
if not format_url or format_url in format_urls:
continue
format_urls.add(format_url)
m3u8_url = urljoin(self._LIVE_URL, format_url)
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
if not formats:
return
self._sort_formats(formats)
return {
'title': title,
'formats': formats,
}
thumbnail = urljoin(self._LIVE_URL, config.get('image'))
info = extract_entry(config)
if info:
info.update({
'id': video_id,
'thumbnail': thumbnail,
})
return info
entries = []
for num, chapter in enumerate(config['chapters'], start=1):
entry = extract_entry(chapter)
entry['id'] = '%s-%d' % (video_id, num)
entries.append(entry)
return self.playlist_result(entries, video_id, config.get('title'))
| {
"content_hash": "e292b90024d90dea5e2390eb33e82854",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 148,
"avg_line_length": 35.63809523809524,
"alnum_prop": 0.5096205237840727,
"repo_name": "yasoob/youtube-dl-GUI",
"id": "03da64b116128f01893f047e747a1fb40af1e2df",
"size": "3759",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "youtube_dl/extractor/philharmoniedeparis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Inno Setup",
"bytes": "7102"
},
{
"name": "Python",
"bytes": "1335226"
}
],
"symlink_target": ""
} |
"""Package with general repository related functions"""
import os
from string import digits
from git.compat import xrange
from git.exc import WorkTreeRepositoryUnsupported
from git.objects import Object
from git.refs import SymbolicReference
from git.util import hex_to_bin, bin_to_hex, decygpath
from gitdb.exc import (
BadObject,
BadName,
)
import os.path as osp
from git.cmd import Git
__all__ = ('rev_parse', 'is_git_dir', 'touch', 'find_submodule_git_dir', 'name_to_object', 'short_to_long', 'deref_tag',
'to_commit')
def touch(filename):
with open(filename, "ab"):
pass
return filename
def is_git_dir(d):
""" This is taken from the git setup.c:is_git_directory
function.
@throws WorkTreeRepositoryUnsupported if it sees a worktree directory. It's quite hacky to do that here,
but at least clearly indicates that we don't support it.
There is the unlikely danger to throw if we see directories which just look like a worktree dir,
but are none."""
if osp.isdir(d):
if osp.isdir(osp.join(d, 'objects')) and osp.isdir(osp.join(d, 'refs')):
headref = osp.join(d, 'HEAD')
return osp.isfile(headref) or \
(osp.islink(headref) and
os.readlink(headref).startswith('refs'))
elif (osp.isfile(osp.join(d, 'gitdir')) and
osp.isfile(osp.join(d, 'commondir')) and
osp.isfile(osp.join(d, 'gitfile'))):
raise WorkTreeRepositoryUnsupported(d)
return False
def find_submodule_git_dir(d):
"""Search for a submodule repo."""
if is_git_dir(d):
return d
try:
with open(d) as fp:
content = fp.read().rstrip()
except (IOError, OSError):
# it's probably not a file
pass
else:
if content.startswith('gitdir: '):
path = content[8:]
if Git.is_cygwin():
## Cygwin creates submodules prefixed with `/cygdrive/...` suffixes.
path = decygpath(path)
if not osp.isabs(path):
path = osp.join(osp.dirname(d), path)
return find_submodule_git_dir(path)
# end handle exception
return None
def short_to_long(odb, hexsha):
""":return: long hexadecimal sha1 from the given less-than-40 byte hexsha
or None if no candidate could be found.
:param hexsha: hexsha with less than 40 byte"""
try:
return bin_to_hex(odb.partial_to_complete_sha_hex(hexsha))
except BadObject:
return None
# END exception handling
def name_to_object(repo, name, return_ref=False):
"""
:return: object specified by the given name, hexshas ( short and long )
as well as references are supported
:param return_ref: if name specifies a reference, we will return the reference
instead of the object. Otherwise it will raise BadObject or BadName
"""
hexsha = None
# is it a hexsha ? Try the most common ones, which is 7 to 40
if repo.re_hexsha_shortened.match(name):
if len(name) != 40:
# find long sha for short sha
hexsha = short_to_long(repo.odb, name)
else:
hexsha = name
# END handle short shas
# END find sha if it matches
# if we couldn't find an object for what seemed to be a short hexsha
# try to find it as reference anyway, it could be named 'aaa' for instance
if hexsha is None:
for base in ('%s', 'refs/%s', 'refs/tags/%s', 'refs/heads/%s', 'refs/remotes/%s', 'refs/remotes/%s/HEAD'):
try:
hexsha = SymbolicReference.dereference_recursive(repo, base % name)
if return_ref:
return SymbolicReference(repo, base % name)
# END handle symbolic ref
break
except ValueError:
pass
# END for each base
# END handle hexsha
# didn't find any ref, this is an error
if return_ref:
raise BadObject("Couldn't find reference named %r" % name)
# END handle return ref
# tried everything ? fail
if hexsha is None:
raise BadName(name)
# END assert hexsha was found
return Object.new_from_sha(repo, hex_to_bin(hexsha))
def deref_tag(tag):
"""Recursively dereference a tag and return the resulting object"""
while True:
try:
tag = tag.object
except AttributeError:
break
# END dereference tag
return tag
def to_commit(obj):
"""Convert the given object to a commit if possible and return it"""
if obj.type == 'tag':
obj = deref_tag(obj)
if obj.type != "commit":
raise ValueError("Cannot convert object %r to type commit" % obj)
# END verify type
return obj
def rev_parse(repo, rev):
"""
:return: Object at the given revision, either Commit, Tag, Tree or Blob
:param rev: git-rev-parse compatible revision specification as string, please see
http://www.kernel.org/pub/software/scm/git/docs/git-rev-parse.html
for details
:raise BadObject: if the given revision could not be found
:raise ValueError: If rev couldn't be parsed
:raise IndexError: If invalid reflog index is specified"""
# colon search mode ?
if rev.startswith(':/'):
# colon search mode
raise NotImplementedError("commit by message search ( regex )")
# END handle search
obj = None
ref = None
output_type = "commit"
start = 0
parsed_to = 0
lr = len(rev)
while start < lr:
if rev[start] not in "^~:@":
start += 1
continue
# END handle start
token = rev[start]
if obj is None:
# token is a rev name
if start == 0:
ref = repo.head.ref
else:
if token == '@':
ref = name_to_object(repo, rev[:start], return_ref=True)
else:
obj = name_to_object(repo, rev[:start])
# END handle token
# END handle refname
if ref is not None:
obj = ref.commit
# END handle ref
# END initialize obj on first token
start += 1
# try to parse {type}
if start < lr and rev[start] == '{':
end = rev.find('}', start)
if end == -1:
raise ValueError("Missing closing brace to define type in %s" % rev)
output_type = rev[start + 1:end] # exclude brace
# handle type
if output_type == 'commit':
pass # default
elif output_type == 'tree':
try:
obj = to_commit(obj).tree
except (AttributeError, ValueError):
pass # error raised later
# END exception handling
elif output_type in ('', 'blob'):
if obj.type == 'tag':
obj = deref_tag(obj)
else:
# cannot do anything for non-tags
pass
# END handle tag
elif token == '@':
# try single int
assert ref is not None, "Requre Reference to access reflog"
revlog_index = None
try:
# transform reversed index into the format of our revlog
revlog_index = -(int(output_type) + 1)
except ValueError:
# TODO: Try to parse the other date options, using parse_date
# maybe
raise NotImplementedError("Support for additional @{...} modes not implemented")
# END handle revlog index
try:
entry = ref.log_entry(revlog_index)
except IndexError:
raise IndexError("Invalid revlog index: %i" % revlog_index)
# END handle index out of bound
obj = Object.new_from_sha(repo, hex_to_bin(entry.newhexsha))
# make it pass the following checks
output_type = None
else:
raise ValueError("Invalid output type: %s ( in %s )" % (output_type, rev))
# END handle output type
# empty output types don't require any specific type, its just about dereferencing tags
if output_type and obj.type != output_type:
raise ValueError("Could not accommodate requested object type %r, got %s" % (output_type, obj.type))
# END verify output type
start = end + 1 # skip brace
parsed_to = start
continue
# END parse type
# try to parse a number
num = 0
if token != ":":
found_digit = False
while start < lr:
if rev[start] in digits:
num = num * 10 + int(rev[start])
start += 1
found_digit = True
else:
break
# END handle number
# END number parse loop
# no explicit number given, 1 is the default
# It could be 0 though
if not found_digit:
num = 1
# END set default num
# END number parsing only if non-blob mode
parsed_to = start
# handle hierarchy walk
try:
if token == "~":
obj = to_commit(obj)
for _ in xrange(num):
obj = obj.parents[0]
# END for each history item to walk
elif token == "^":
obj = to_commit(obj)
# must be n'th parent
if num:
obj = obj.parents[num - 1]
elif token == ":":
if obj.type != "tree":
obj = obj.tree
# END get tree type
obj = obj[rev[start:]]
parsed_to = lr
else:
raise ValueError("Invalid token: %r" % token)
# END end handle tag
except (IndexError, AttributeError):
raise BadName("Invalid revision spec '%s' - not enough parent commits to reach '%s%i'" % (rev, token, num))
# END exception handling
# END parse loop
# still no obj ? Its probably a simple name
if obj is None:
obj = name_to_object(repo, rev)
parsed_to = lr
# END handle simple name
if obj is None:
raise ValueError("Revision specifier could not be parsed: %s" % rev)
if parsed_to != lr:
raise ValueError("Didn't consume complete rev spec %s, consumed part: %s" % (rev, rev[:parsed_to]))
return obj
| {
"content_hash": "58c81ed82fd32e5ef5edcf5e0fa02c29",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 120,
"avg_line_length": 33.55864197530864,
"alnum_prop": 0.5428124712590822,
"repo_name": "nvie/GitPython",
"id": "39e55880f14400d16efc6a09761e11441b47c348",
"size": "10873",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "git/repo/fun.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "784076"
},
{
"name": "Shell",
"bytes": "367"
}
],
"symlink_target": ""
} |
"""Testcase for cssutils imports"""
before = len(locals()) # to check is only exp amount is imported
from cssutils import *
after = len(locals()) # to check is only exp amount is imported
import unittest
class CSSutilsImportTestCase(unittest.TestCase):
def test_import_all(self):
"from cssutils import *"
import cssutils
act = globals()
exp = {'CSSParser': CSSParser,
'CSSSerializer': CSSSerializer,
'css': cssutils.css,
'stylesheets': cssutils.stylesheets,
}
exptotal = before + len(exp) + 1
# imports before + * + "after"
self.assertTrue(after == exptotal, 'too many imported')
found = 0
for e in exp:
self.assertTrue(e in act, '%s not found' %e)
self.assertTrue(act[e] == exp[e], '%s not the same' %e)
found += 1
self.assertTrue(found == len(exp))
if __name__ == '__main__':
import unittest
unittest.main()
| {
"content_hash": "452f044de31dd373b84722d203573cef",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 67,
"avg_line_length": 29.352941176470587,
"alnum_prop": 0.5751503006012024,
"repo_name": "hackatbrown/2015.hackatbrown.org",
"id": "8674fe7c9168836dfe3c08a8b18b12323440278c",
"size": "998",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "hack-at-brown-2015/cssutils/tests/test_cssutilsimport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2826195"
},
{
"name": "HTML",
"bytes": "853190"
},
{
"name": "JavaScript",
"bytes": "3333401"
},
{
"name": "Python",
"bytes": "3830632"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0037_auto_20160121_1227'),
]
operations = [
migrations.RemoveField(
model_name='grade',
name='subjects',
),
migrations.AddField(
model_name='teacher',
name='abilities',
field=models.ManyToManyField(to='app.Ability'),
),
migrations.AlterUniqueTogether(
name='ability',
unique_together=set([('grade', 'subject')]),
),
migrations.RemoveField(
model_name='ability',
name='teacher',
),
]
| {
"content_hash": "737516ef3b1386082af7e41e8a974f65",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 59,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.536986301369863,
"repo_name": "malaonline/Server",
"id": "18ef8e9936796ded2b7b527d2726b38c6e07e18e",
"size": "754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/app/migrations/0038_auto_20160121_1709.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "236251"
},
{
"name": "HTML",
"bytes": "532032"
},
{
"name": "JavaScript",
"bytes": "580515"
},
{
"name": "Python",
"bytes": "987542"
},
{
"name": "Shell",
"bytes": "1881"
}
],
"symlink_target": ""
} |
from pybloom_live import BloomFilter
from scrapy.utils.job import job_dir
from scrapy.dupefilter import BaseDupeFilter
class BLOOMDupeFilter(BaseDupeFilter):
"""Request Fingerprint duplicates filter"""
def __init__(self, path=None):
self.file = None
self.fingerprints = BloomFilter(2000000, 0.00001)
@classmethod
def from_settings(cls, settings):
return cls(job_dir(settings))
def request_seen(self, request):
fp = request.url
if fp in self.fingerprints:
return True
self.fingerprints.add(fp)
def close(self, reason):
self.fingerprints = None
| {
"content_hash": "9f41cfdffcc8fa9998ad265450857a9e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 57,
"avg_line_length": 26.708333333333332,
"alnum_prop": 0.6692667706708268,
"repo_name": "nolram/news_crawler",
"id": "426cc7ef14d20a11d026c208d2dbecd2558d0908",
"size": "641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "news_crawler/duplicates_filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16111"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
} |
import os
from solnlib.modular_input.checkpointer import FileCheckpointer
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def get_file_checkpointer(dir):
if not os.path.exists(dir):
os.makedirs(dir)
checkpointer = FileCheckpointer(dir)
return checkpointer
# initial kv helper
user_path = os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'kv_store'), 'user')
topic_path = os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'kv_store'), 'topic')
user_helper = get_file_checkpointer(user_path)
topic_helper = get_file_checkpointer(topic_path)
def initiate_kv_helper():
user_helper = get_file_checkpointer(user_path)
topic_helper = get_file_checkpointer(topic_path)
def get_Context(name):
if user_helper == None:
initiate_kv_helper()
context = user_helper.get(name)
if context == None:
context = {'test_key': 'test_value'}
return context
def set_Context(name, context):
if user_helper == None:
initiate_kv_helper()
user_helper.update(name, context)
def add_xiaohua(id, content):
xiaohua_list = topic_helper.get('xiaohua')
if not xiaohua_list:
xiaohua_list = []
for xiaohua in xiaohua_list:
if xiaohua['id'] == id:
return
xiaohua = {}
xiaohua['id'] = id
xiaohua['content'] = content
xiaohua_list.append(xiaohua)
topic_helper.update('xiaohua', xiaohua_list)
def add_xingzuo(id, content):
xingzuo_list = topic_helper.get('xingzuo')
if not xingzuo_list:
xingzuo_list = []
for xingzuo in xingzuo_list:
if xingzuo['id'] == id:
return
xingzuo = {}
xingzuo['id'] = id
xingzuo['content'] = content
xingzuo_list.append(xingzuo)
topic_helper.update('xingzuo', xingzuo_list)
def add_youxi(id, content):
youxi_list = topic_helper.get('youxi')
if not youxi_list:
youxi_list = []
for youxi in youxi_list:
if youxi['id'] == id:
return
youxi = {}
youxi['id'] = id
youxi['content'] = content
youxi_list.append(youxi)
topic_helper.update('youxi', youxi_list)
def get_xiaohua():
xiaohua_list = topic_helper.get('xiaohua')
return xiaohua_list[0]
def get_xingzuo():
xingzuo_list = topic_helper.get('xingzuo')
return xingzuo_list[0]
def get_youxi():
youxi_list = topic_helper.get('youxi')
return youxi_list[0]
| {
"content_hash": "5f3f3fb4ebe043315db7a25ed2b33641",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 121,
"avg_line_length": 26.473118279569892,
"alnum_prop": 0.644191714053615,
"repo_name": "Teisei/TaxiRobot",
"id": "60d4a537ef415fcd31d2047d14446d55b8d55f4a",
"size": "2500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/kvstore_module.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "730774"
},
{
"name": "Shell",
"bytes": "86"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2010 The Echo Nest. All rights reserved.
Created by Tyler Williams on 2010-04-25.
"""
from . import util
class ResultList(list):
def __init__(self, li, start=0, total=0):
self.extend(li)
self.start = start
if total == 0:
total = len(li)
self.total = total
class GenericProxy(object):
def __init__(self):
self.cache = {}
def get_attribute(self, method_name, **kwargs):
result = util.callm("%s/%s" % (self._object_type, method_name), kwargs)
return result['response']
def post_attribute(self, method_name, **kwargs):
data = kwargs.pop('data') if 'data' in kwargs else {}
result = util.callm("%s/%s" % (self._object_type, method_name), kwargs, POST=True, data=data)
return result['response']
class ArtistProxy(GenericProxy):
def __init__(self, identifier, buckets = None, **kwargs):
super(ArtistProxy, self).__init__()
buckets = buckets or []
self.id = identifier
self._object_type = 'artist'
kwargs = dict((str(k), v) for (k,v) in kwargs.items())
# the following are integral to all artist objects... the rest is up to you!
core_attrs = ['name']
if not all(ca in kwargs for ca in core_attrs):
profile = self.get_attribute('profile', **{'bucket':buckets})
kwargs.update(profile.get('artist'))
[self.__dict__.update({ca:kwargs.pop(ca)}) for ca in core_attrs+['id'] if ca in kwargs]
self.cache.update(kwargs)
def get_attribute(self, *args, **kwargs):
if util.short_regex.match(self.id) or util.long_regex.match(self.id) or util.foreign_regex.match(self.id):
kwargs['id'] = self.id
else:
kwargs['name'] = self.id
return super(ArtistProxy, self).get_attribute(*args, **kwargs)
class CatalogProxy(GenericProxy):
def __init__(self, identifier, type, buckets = None, **kwargs):
super(CatalogProxy, self).__init__()
buckets = buckets or []
self.id = identifier
self._object_type = 'catalog'
kwargs = dict((str(k), v) for (k,v) in kwargs.items())
# the following are integral to all catalog objects... the rest is up to you!
core_attrs = ['name']
if not all(ca in kwargs for ca in core_attrs):
if util.short_regex.match(self.id) or util.long_regex.match(self.id) or util.foreign_regex.match(self.id):
profile = self.get_attribute('profile')
kwargs.update(profile['catalog'])
else:
if not type:
raise Exception('You must specify a "type"!')
try:
profile = self.get_attribute('profile')
existing_type = profile['catalog'].get('type', 'Unknown')
if type != existing_type:
raise Exception("Catalog type requested (%s) does not match existing catalog type (%s)" % (type, existing_type))
kwargs.update(profile['catalog'])
except util.EchoNestAPIError:
profile = self.post_attribute('create', type=type, **kwargs)
kwargs.update(profile)
[self.__dict__.update({ca:kwargs.pop(ca)}) for ca in core_attrs+['id'] if ca in kwargs]
self.cache.update(kwargs)
def get_attribute_simple(self, *args, **kwargs):
# omit name/id kwargs for this call
return super(CatalogProxy, self).get_attribute(*args, **kwargs)
def get_attribute(self, *args, **kwargs):
if util.short_regex.match(self.id) or util.long_regex.match(self.id) or util.foreign_regex.match(self.id):
kwargs['id'] = self.id
else:
kwargs['name'] = self.id
return super(CatalogProxy, self).get_attribute(*args, **kwargs)
def post_attribute(self, *args, **kwargs):
if util.short_regex.match(self.id) or util.long_regex.match(self.id) or util.foreign_regex.match(self.id):
kwargs['id'] = self.id
else:
kwargs['name'] = self.id
return super(CatalogProxy, self).post_attribute(*args, **kwargs)
class PlaylistProxy(GenericProxy):
def __init__(self, session_id = None, buckets = None, **kwargs):
super(PlaylistProxy, self).__init__()
core_attrs = ['session_id']
self._object_type = 'playlist'
if session_id:
self.session_id=session_id
else:
buckets = buckets or []
kwargs['bucket'] = buckets
kwargs['genre'] = kwargs['genres']
del kwargs['genres']
kwargs = dict((str(k), v) for (k,v) in kwargs.items())
if not all(ca in kwargs for ca in core_attrs):
kwargs = dict((str(k), v) for (k,v) in kwargs.items())
profile = self.get_attribute('create', **kwargs)
kwargs.update(profile)
[self.__dict__.update({ca:kwargs.pop(ca)}) for ca in core_attrs if ca in kwargs]
self.cache.update(kwargs)
def get_attribute(self, method, **kwargs):
return super(PlaylistProxy, self).get_attribute('dynamic/' + method, **kwargs)
class SongProxy(GenericProxy):
def __init__(self, identifier, buckets = None, **kwargs):
super(SongProxy, self).__init__()
buckets = buckets or []
self.id = identifier
self._object_type = 'song'
kwargs = dict((str(k), v) for (k,v) in kwargs.items())
# BAW -- this is debug output from identify that returns a track_id. i am not sure where else to access this..
if "track_id" in kwargs:
self.track_id = kwargs["track_id"]
if "tag" in kwargs:
self.tag = kwargs["tag"]
if "score" in kwargs:
self.score = kwargs["score"]
if 'audio' in kwargs:
self.audio = kwargs['audio']
if 'release_image' in kwargs:
self.release_image = kwargs['release_image']
# the following are integral to all song objects... the rest is up to you!
core_attrs = ['title', 'artist_name', 'artist_id']
if not all(ca in kwargs for ca in core_attrs):
profile = self.get_attribute('profile', **{'id':self.id, 'bucket':buckets})
kwargs.update(profile.get('songs')[0])
[self.__dict__.update({ca:kwargs.pop(ca)}) for ca in core_attrs]
self.cache.update(kwargs)
def get_attribute(self, *args, **kwargs):
kwargs['id'] = self.id
return super(SongProxy, self).get_attribute(*args, **kwargs)
class TrackProxy(GenericProxy):
def __init__(self, identifier, md5, properties):
"""
You should not call this constructor directly, rather use the convenience functions
that are in track.py. For example, call track.track_from_filename
Let's always get the bucket `audio_summary`
"""
super(TrackProxy, self).__init__()
self.id = identifier
self.md5 = md5
self.analysis_url = None
self._object_type = 'track'
self.__dict__.update(properties)
| {
"content_hash": "d18dbcb3f656aa0183fdf27755fe62b8",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 136,
"avg_line_length": 42.27325581395349,
"alnum_prop": 0.570072892311924,
"repo_name": "ruohoruotsi/pyechonest",
"id": "ba5d26db8092156d02a5a50c22f90bd6adb48c4b",
"size": "7312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyechonest/proxies.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "133257"
},
{
"name": "Shell",
"bytes": "1752"
}
],
"symlink_target": ""
} |
from celery.task import task
@task
def dummy(x, y):
return x + y
| {
"content_hash": "7a3f0ad1797986a0aae25da61d296c09",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 28,
"avg_line_length": 14,
"alnum_prop": 0.6571428571428571,
"repo_name": "toopy/django-toopy-deployer",
"id": "9ce1880c643b0bb0be513eb038a974b67d2760ba",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/django_toopy_deployer/apps/server/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37015"
},
{
"name": "Shell",
"bytes": "208"
}
],
"symlink_target": ""
} |
def search(self,key):
current = self.head
found = False
stop = False
while not found and not stop:
if current == None:
stop = True
else:
if current.getNext() == None:
current = current.getDown()
else:
if current.getNext().getKey() == key:
found = True
else:
if key < current.getNext().getKey():
current = current.getDown()
else:
current = current.getNext()
if found:
return current.getNext().getData()
else:
return None
| {
"content_hash": "7e6327ddd3b0ccfb081b962aa2e57a7c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 56,
"avg_line_length": 30.363636363636363,
"alnum_prop": 0.4491017964071856,
"repo_name": "robin1885/algorithms-exercises-using-python",
"id": "6b3c88fe1e004fe17ed4617a3e68ac638c1436d7",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source-code-from-author-book/Listings-for-Second-Edition/listing_8_15.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182896"
}
],
"symlink_target": ""
} |
from plex.objects.core.base import Property
from plex.objects.directory import Directory
from plex.objects.library.metadata.album import Album
from plex.objects.library.metadata.artist import Artist
from plex.objects.library.metadata.base import Metadata
from plex.objects.mixins.playlist_item import PlaylistItemMixin
from plex.objects.mixins.rate import RateMixin
from plex.objects.mixins.scrobble import ScrobbleMixin
from plex.objects.mixins.session import SessionMixin
class Track(Directory, Metadata, PlaylistItemMixin, RateMixin, SessionMixin, ScrobbleMixin):
artist = Property(resolver=lambda: Track.construct_artist)
album = Property(resolver=lambda: Track.construct_album)
index = Property(type=int)
view_count = Property('viewCount', int)
view_offset = Property('viewOffset', int)
duration = Property(type=int)
chapter_source = Property('chapterSource')
def __repr__(self):
if self.artist:
return '<Track %r - %r>' % (
self.artist.title,
self.title
)
elif self.album:
return '<Track %r (%s) - %r>' % (
self.album.title,
self.album.year,
self.title
)
return '<Track %r>' % self.title
@staticmethod
def construct_artist(client, node):
attribute_map = {
'key': 'grandparentKey',
'ratingKey': 'grandparentRatingKey',
'title': 'grandparentTitle',
'art': 'grandparentArt',
'thumb': 'grandparentThumb'
}
return Artist.construct(client, node, attribute_map, child=True)
@staticmethod
def construct_album(client, node):
attribute_map = {
'index': 'parentIndex',
'key': 'parentKey',
'ratingKey': 'parentRatingKey',
'title': 'parentTitle',
'year': 'parentYear',
'thumb': 'parentThumb'
}
return Album.construct(client, node, attribute_map, child=True)
| {
"content_hash": "5b81d177fd71fe35649f434679bf5595",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 92,
"avg_line_length": 31.235294117647058,
"alnum_prop": 0.5979284369114878,
"repo_name": "fuzeman/plex.py",
"id": "6f3cb3efc8e5aafdeb8a25cbcdf5e03ad4fbf73b",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plex/objects/library/metadata/track.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135510"
}
],
"symlink_target": ""
} |
from django.urls import path
from .views import ContactView, ContactSuccessView
urlpatterns = [
path('success/', ContactSuccessView.as_view(), name='contact_success'),
path('', ContactView.as_view(), name='contact'),
]
| {
"content_hash": "07b0ac5053e69df9828b4a2336e70681",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 75,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.7130434782608696,
"repo_name": "richardcornish/richardcornish",
"id": "6ebb828644a395caaa92b534d1c724c0ce670e93",
"size": "230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "richardcornish/contact/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "194488"
},
{
"name": "HTML",
"bytes": "71437"
},
{
"name": "JavaScript",
"bytes": "495426"
},
{
"name": "Python",
"bytes": "46473"
}
],
"symlink_target": ""
} |
"""Setup file for easy installation"""
import os
from setuptools import setup, find_packages
from tests import test_cmd
ROOT = os.path.dirname(__file__)
setup(
name="django-payzen",
version="1.0.6",
description="Django app to manage payments with Payzen ETP",
license='MIT',
author="Bertrand Svetchine",
author_email="bertrand.svetchine@gmail.com",
url="https://github.com/bsvetchine/django-payzen",
packages=find_packages(),
include_package_data=True,
install_requires=["Django"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Framework :: Django",
"Topic :: Software Development"],
cmdclass={'test': test_cmd.TestCommand}
)
| {
"content_hash": "038be46251618c36891f7f4c99b004f1",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 64,
"avg_line_length": 30.973684210526315,
"alnum_prop": 0.6244689889549703,
"repo_name": "zehome/django-payzen",
"id": "4af5ce78619f451bcabe7cc4fa00b8672aaacb0b",
"size": "1177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "351"
},
{
"name": "Python",
"bytes": "74499"
}
],
"symlink_target": ""
} |
""" Test Quantum Gradient Framework """
import unittest
from test import combine
import numpy as np
from ddt import ddt
from qiskit import QuantumCircuit
from qiskit.algorithms.gradients import (
FiniteDiffEstimatorGradient,
LinCombEstimatorGradient,
ParamShiftEstimatorGradient,
SPSAEstimatorGradient,
)
from qiskit.circuit import Parameter
from qiskit.circuit.library import EfficientSU2, RealAmplitudes
from qiskit.circuit.library.standard_gates import RXXGate, RYYGate, RZXGate, RZZGate
from qiskit.primitives import Estimator
from qiskit.quantum_info import Operator, SparsePauliOp
from qiskit.quantum_info.random import random_pauli_list
from qiskit.test import QiskitTestCase
@ddt
class TestEstimatorGradient(QiskitTestCase):
"""Test Estimator Gradient"""
@combine(
grad=[FiniteDiffEstimatorGradient, ParamShiftEstimatorGradient, LinCombEstimatorGradient]
)
def test_gradient_operators(self, grad):
"""Test the estimator gradient for different operators"""
estimator = Estimator()
a = Parameter("a")
qc = QuantumCircuit(1)
qc.h(0)
qc.p(a, 0)
qc.h(0)
if grad is FiniteDiffEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6)
else:
gradient = grad(estimator)
op = SparsePauliOp.from_list([("Z", 1)])
correct_result = -1 / np.sqrt(2)
param = [np.pi / 4]
value = gradient.run([qc], [op], [param]).result().gradients[0]
self.assertAlmostEqual(value[0], correct_result, 3)
op = SparsePauliOp.from_list([("Z", 1)])
value = gradient.run([qc], [op], [param]).result().gradients[0]
self.assertAlmostEqual(value[0], correct_result, 3)
op = Operator.from_label("Z")
value = gradient.run([qc], [op], [param]).result().gradients[0]
self.assertAlmostEqual(value[0], correct_result, 3)
@combine(
grad=[FiniteDiffEstimatorGradient, ParamShiftEstimatorGradient, LinCombEstimatorGradient]
)
def test_gradient_p(self, grad):
"""Test the estimator gradient for p"""
estimator = Estimator()
a = Parameter("a")
qc = QuantumCircuit(1)
qc.h(0)
qc.p(a, 0)
qc.h(0)
if grad is FiniteDiffEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6)
else:
gradient = grad(estimator)
op = SparsePauliOp.from_list([("Z", 1)])
param_list = [[np.pi / 4], [0], [np.pi / 2]]
correct_results = [[-1 / np.sqrt(2)], [0], [-1]]
for i, param in enumerate(param_list):
gradients = gradient.run([qc], [op], [param]).result().gradients[0]
for j, value in enumerate(gradients):
self.assertAlmostEqual(value, correct_results[i][j], 3)
@combine(
grad=[FiniteDiffEstimatorGradient, ParamShiftEstimatorGradient, LinCombEstimatorGradient]
)
def test_gradient_u(self, grad):
"""Test the estimator gradient for u"""
estimator = Estimator()
a = Parameter("a")
b = Parameter("b")
c = Parameter("c")
qc = QuantumCircuit(1)
qc.h(0)
qc.u(a, b, c, 0)
qc.h(0)
if grad is FiniteDiffEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6)
else:
gradient = grad(estimator)
op = SparsePauliOp.from_list([("Z", 1)])
param_list = [[np.pi / 4, 0, 0], [np.pi / 4, np.pi / 4, np.pi / 4]]
correct_results = [[-0.70710678, 0.0, 0.0], [-0.35355339, -0.85355339, -0.85355339]]
for i, param in enumerate(param_list):
gradients = gradient.run([qc], [op], [param]).result().gradients[0]
for j, value in enumerate(gradients):
self.assertAlmostEqual(value, correct_results[i][j], 3)
@combine(
grad=[FiniteDiffEstimatorGradient, ParamShiftEstimatorGradient, LinCombEstimatorGradient]
)
def test_gradient_efficient_su2(self, grad):
"""Test the estimator gradient for EfficientSU2"""
estimator = Estimator()
qc = EfficientSU2(2, reps=1)
op = SparsePauliOp.from_list([("ZI", 1)])
if grad is FiniteDiffEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6)
else:
gradient = grad(estimator)
param_list = [
[np.pi / 4 for param in qc.parameters],
[np.pi / 2 for param in qc.parameters],
]
correct_results = [
[
-0.35355339,
-0.70710678,
0,
0.35355339,
0,
-0.70710678,
0,
0,
],
[0, 0, 0, 1, 0, 0, 0, 0],
]
for i, param in enumerate(param_list):
gradients = gradient.run([qc], [op], [param]).result().gradients[0]
np.testing.assert_allclose(gradients, correct_results[i], atol=1e-3)
@combine(
grad=[FiniteDiffEstimatorGradient, ParamShiftEstimatorGradient, LinCombEstimatorGradient],
)
def test_gradient_2qubit_gate(self, grad):
"""Test the estimator gradient for 2 qubit gates"""
estimator = Estimator()
for gate in [RXXGate, RYYGate, RZZGate, RZXGate]:
param_list = [[np.pi / 4], [np.pi / 2]]
correct_results = [
[-0.70710678],
[-1],
]
op = SparsePauliOp.from_list([("ZI", 1)])
for i, param in enumerate(param_list):
a = Parameter("a")
qc = QuantumCircuit(2)
if grad is FiniteDiffEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6)
else:
gradient = grad(estimator)
if gate is RZZGate:
qc.h([0, 1])
qc.append(gate(a), [qc.qubits[0], qc.qubits[1]], [])
qc.h([0, 1])
else:
qc.append(gate(a), [qc.qubits[0], qc.qubits[1]], [])
gradients = gradient.run([qc], [op], [param]).result().gradients[0]
np.testing.assert_allclose(gradients, correct_results[i], atol=1e-3)
@combine(
grad=[FiniteDiffEstimatorGradient, ParamShiftEstimatorGradient, LinCombEstimatorGradient]
)
def test_gradient_parameter_coefficient(self, grad):
"""Test the estimator gradient for parameter variables with coefficients"""
estimator = Estimator()
qc = RealAmplitudes(num_qubits=2, reps=1)
qc.rz(qc.parameters[0].exp() + 2 * qc.parameters[1], 0)
qc.rx(3.0 * qc.parameters[0] + qc.parameters[1].sin(), 1)
qc.u(qc.parameters[0], qc.parameters[1], qc.parameters[3], 1)
qc.p(2 * qc.parameters[0] + 1, 0)
qc.rxx(qc.parameters[0] + 2, 0, 1)
if grad is FiniteDiffEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6)
else:
gradient = grad(estimator)
param_list = [[np.pi / 4 for _ in qc.parameters], [np.pi / 2 for _ in qc.parameters]]
correct_results = [
[-0.7266653, -0.4905135, -0.0068606, -0.9228880],
[-3.5972095, 0.10237173, -0.3117748, 0],
]
op = SparsePauliOp.from_list([("ZI", 1)])
for i, param in enumerate(param_list):
gradients = gradient.run([qc], [op], [param]).result().gradients[0]
np.testing.assert_allclose(gradients, correct_results[i], atol=1e-3)
@combine(
grad=[FiniteDiffEstimatorGradient, ParamShiftEstimatorGradient, LinCombEstimatorGradient]
)
def test_gradient_parameters(self, grad):
"""Test the estimator gradient for parameters"""
estimator = Estimator()
a = Parameter("a")
b = Parameter("b")
qc = QuantumCircuit(1)
qc.rx(a, 0)
qc.rx(b, 0)
if grad is FiniteDiffEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6)
else:
gradient = grad(estimator)
param_list = [[np.pi / 4, np.pi / 2]]
correct_results = [
[-0.70710678],
]
op = SparsePauliOp.from_list([("Z", 1)])
for i, param in enumerate(param_list):
gradients = gradient.run([qc], [op], [param], parameters=[[a]]).result().gradients[0]
np.testing.assert_allclose(gradients, correct_results[i], atol=1e-3)
@combine(
grad=[FiniteDiffEstimatorGradient, ParamShiftEstimatorGradient, LinCombEstimatorGradient]
)
def test_gradient_multi_arguments(self, grad):
"""Test the estimator gradient for multiple arguments"""
estimator = Estimator()
a = Parameter("a")
b = Parameter("b")
qc = QuantumCircuit(1)
qc.rx(a, 0)
qc2 = QuantumCircuit(1)
qc2.rx(b, 0)
if grad is FiniteDiffEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6)
else:
gradient = grad(estimator)
param_list = [[np.pi / 4], [np.pi / 2]]
correct_results = [
[-0.70710678],
[-1],
]
op = SparsePauliOp.from_list([("Z", 1)])
gradients = gradient.run([qc, qc2], [op] * 2, param_list).result().gradients
np.testing.assert_allclose(gradients, correct_results, atol=1e-3)
c = Parameter("c")
qc3 = QuantumCircuit(1)
qc3.rx(c, 0)
qc3.ry(a, 0)
param_list2 = [[np.pi / 4], [np.pi / 4, np.pi / 4], [np.pi / 4, np.pi / 4]]
correct_results2 = [
[-0.70710678],
[-0.5],
[-0.5, -0.5],
]
gradients2 = (
gradient.run([qc, qc3, qc3], [op] * 3, param_list2, parameters=[[a], [c], None])
.result()
.gradients
)
np.testing.assert_allclose(gradients2[0], correct_results2[0], atol=1e-3)
np.testing.assert_allclose(gradients2[1], correct_results2[1], atol=1e-3)
np.testing.assert_allclose(gradients2[2], correct_results2[2], atol=1e-3)
@combine(
grad=[FiniteDiffEstimatorGradient, ParamShiftEstimatorGradient, LinCombEstimatorGradient]
)
def test_gradient_validation(self, grad):
"""Test estimator gradient's validation"""
estimator = Estimator()
a = Parameter("a")
qc = QuantumCircuit(1)
qc.rx(a, 0)
if grad is FiniteDiffEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6)
with self.assertRaises(ValueError):
_ = grad(estimator, epsilon=-0.1)
else:
gradient = grad(estimator)
param_list = [[np.pi / 4], [np.pi / 2]]
op = SparsePauliOp.from_list([("Z", 1)])
with self.assertRaises(ValueError):
gradient.run([qc], [op], param_list)
with self.assertRaises(ValueError):
gradient.run([qc, qc], [op, op], param_list, parameters=[[a]])
with self.assertRaises(ValueError):
gradient.run([qc, qc], [op], param_list, parameters=[[a]])
with self.assertRaises(ValueError):
gradient.run([qc], [op], [[np.pi / 4, np.pi / 4]])
def test_spsa_gradient(self):
"""Test the SPSA estimator gradient"""
estimator = Estimator()
with self.assertRaises(ValueError):
_ = SPSAEstimatorGradient(estimator, epsilon=-0.1)
a = Parameter("a")
b = Parameter("b")
qc = QuantumCircuit(2)
qc.rx(b, 0)
qc.rx(a, 1)
param_list = [[1, 1]]
correct_results = [[-0.84147098, 0.84147098]]
op = SparsePauliOp.from_list([("ZI", 1)])
gradient = SPSAEstimatorGradient(estimator, epsilon=1e-6, seed=123)
gradients = gradient.run([qc], [op], param_list).result().gradients
np.testing.assert_allclose(gradients, correct_results, atol=1e-3)
# multi parameters
gradient = SPSAEstimatorGradient(estimator, epsilon=1e-6, seed=123)
param_list2 = [[1, 1], [1, 1], [3, 3]]
gradients2 = (
gradient.run([qc] * 3, [op] * 3, param_list2, parameters=[None, [b], None])
.result()
.gradients
)
correct_results2 = [[-0.84147098, 0.84147098], [0.84147098], [-0.14112001, 0.14112001]]
for grad, correct in zip(gradients2, correct_results2):
np.testing.assert_allclose(grad, correct, atol=1e-3)
# batch size
correct_results = [[-0.84147098, 0.1682942]]
gradient = SPSAEstimatorGradient(estimator, epsilon=1e-6, batch_size=5, seed=123)
gradients = gradient.run([qc], [op], param_list).result().gradients
np.testing.assert_allclose(gradients, correct_results, atol=1e-3)
@combine(grad=[ParamShiftEstimatorGradient, LinCombEstimatorGradient])
def test_gradient_random_parameters(self, grad):
"""Test param shift and lin comb w/ random parameters"""
rng = np.random.default_rng(123)
qc = RealAmplitudes(num_qubits=3, reps=1)
params = qc.parameters
qc.rx(3.0 * params[0] + params[1].sin(), 0)
qc.ry(params[0].exp() + 2 * params[1], 1)
qc.rz(params[0] * params[1] - params[2], 2)
qc.p(2 * params[0] + 1, 0)
qc.u(params[0].sin(), params[1] - 2, params[2] * params[3], 1)
qc.sx(2)
qc.rxx(params[0].sin(), 1, 2)
qc.ryy(params[1].cos(), 2, 0)
qc.rzz(params[2] * 2, 0, 1)
qc.crx(params[0].exp(), 1, 2)
qc.cry(params[1].arctan(), 2, 0)
qc.crz(params[2] * -2, 0, 1)
qc.dcx(0, 1)
qc.csdg(0, 1)
qc.toffoli(0, 1, 2)
qc.iswap(0, 2)
qc.swap(1, 2)
qc.global_phase = params[0] * params[1] + params[2].cos().exp()
size = 10
op = SparsePauliOp(random_pauli_list(num_qubits=qc.num_qubits, size=size, seed=rng))
op.coeffs = rng.normal(0, 10, size)
estimator = Estimator()
findiff = FiniteDiffEstimatorGradient(estimator, 1e-6)
gradient = grad(estimator)
num_tries = 10
param_values = rng.normal(0, 2, (num_tries, qc.num_parameters)).tolist()
np.testing.assert_allclose(
findiff.run([qc] * num_tries, [op] * num_tries, param_values).result().gradients,
gradient.run([qc] * num_tries, [op] * num_tries, param_values).result().gradients,
rtol=1e-4,
)
@combine(
grad=[
FiniteDiffEstimatorGradient,
ParamShiftEstimatorGradient,
LinCombEstimatorGradient,
SPSAEstimatorGradient,
],
)
def test_options(self, grad):
"""Test estimator gradient's run options"""
a = Parameter("a")
qc = QuantumCircuit(1)
qc.rx(a, 0)
op = SparsePauliOp.from_list([("Z", 1)])
estimator = Estimator(options={"shots": 100})
with self.subTest("estimator"):
if grad is FiniteDiffEstimatorGradient or grad is SPSAEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6)
else:
gradient = grad(estimator)
options = gradient.options
result = gradient.run([qc], [op], [[1]]).result()
self.assertEqual(result.options.get("shots"), 100)
self.assertEqual(options.get("shots"), 100)
with self.subTest("gradient init"):
if grad is FiniteDiffEstimatorGradient or grad is SPSAEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6, options={"shots": 200})
else:
gradient = grad(estimator, options={"shots": 200})
options = gradient.options
result = gradient.run([qc], [op], [[1]]).result()
self.assertEqual(result.options.get("shots"), 200)
self.assertEqual(options.get("shots"), 200)
with self.subTest("gradient update"):
if grad is FiniteDiffEstimatorGradient or grad is SPSAEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6, options={"shots": 200})
else:
gradient = grad(estimator, options={"shots": 200})
gradient.update_default_options(shots=100)
options = gradient.options
result = gradient.run([qc], [op], [[1]]).result()
self.assertEqual(result.options.get("shots"), 100)
self.assertEqual(options.get("shots"), 100)
with self.subTest("gradient run"):
if grad is FiniteDiffEstimatorGradient or grad is SPSAEstimatorGradient:
gradient = grad(estimator, epsilon=1e-6, options={"shots": 200})
else:
gradient = grad(estimator, options={"shots": 200})
options = gradient.options
result = gradient.run([qc], [op], [[1]], shots=300).result()
self.assertEqual(result.options.get("shots"), 300)
# Only default + estimator options. Not run.
self.assertEqual(options.get("shots"), 200)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "7c611e7678539a2ea7a3bb3e38e2179d",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 98,
"avg_line_length": 40.38770685579196,
"alnum_prop": 0.5697728869117302,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "e82383a56c47e53546cd3222c03694ea879303cb",
"size": "17648",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/python/algorithms/test_estimator_gradient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("..") # need to add the parent test path... yuck!
import mrgeotest
class MrGeoMetadataTests(mrgeotest.MrGeoTests):
toblertiny = None
roads = None
@classmethod
def setUpClass(cls):
# cls.GENERATE_BASELINE_DATA = True
super(MrGeoMetadataTests, cls).setUpClass()
# copy data to HDFS
cls.copy("tobler-raw-tiny")
cls.copy("AmbulatoryPt.shp")
cls.copy("AmbulatoryPt.prj")
cls.copy("AmbulatoryPt.shx")
cls.copy("AmbulatoryPt.dbf")
def setUp(self):
super(MrGeoMetadataTests, self).setUp()
if self.toblertiny is None:
self.toblertiny = self.mrgeo.load_image("tobler-raw-tiny")
if self.roads is None:
self.roads = self.mrgeo.load_vector("AmbulatoryPt.shp")
def test_load_raster_metadata(self):
meta = self.toblertiny.metadata()
# print(meta)
self.assertIsNotNone(meta, "Missing metadata")
self.assertEqual(len(meta), 14, "number of metadata elements is different")
self.assertEqual(meta['tilesize'], 512, "Tilesize is different")
self.assertEqual(meta['maxZoomLevel'], 10, "maxZoomLevel is different")
self.assertEqual(meta['bands'], 1, "bands is different")
self.assertEqual(meta['protectionLevel'], "", "protectionLevel is different")
self.assertIsNone(meta['quantiles'], "quantiles is not None")
self.assertEqual(meta['tileType'], 4, "Tilesize is different")
self.assertIsNone(meta['categories'], "categories is not None")
self.assertEqual(meta['classification'], 'Continuous', "classification is different")
self.assertIsNone(meta['resamplingMethod'], "resamplingMethod is no None")
bounds = meta['bounds']
self.assertIsNotNone(bounds, "Missing bounds")
self.assertEqual(len(bounds), 4, "number of bounds elements is different")
self.assertAlmostEqual(bounds['w'], 64.93599700927734, 5, "bounds w is different")
self.assertAlmostEqual(bounds['s'], 29.98699951171875, 5, "bounds s is different")
self.assertAlmostEqual(bounds['e'], 65.16200256347656, 5, "bounds e is different")
self.assertAlmostEqual(bounds['n'], 30.117000579833984, 5, "bounds n is different")
stats = meta['stats']
self.assertIsNotNone(stats, "Missing stats")
self.assertEqual(len(stats), 1, "number of stats (array) elements is different")
stats = stats[0] # get the single element
self.assertIsNotNone(stats, "Missing stats")
self.assertEqual(len(stats), 5, "number of stats elements is different")
self.assertAlmostEqual(stats['min'], 0.7147477269172668, 5, "stats min is different")
self.assertAlmostEqual(stats['max'], 4.190144062042236, 5, "stats max is different")
self.assertAlmostEqual(stats['mean'], 0.8273760278752889, 5, "stats mean is different")
self.assertAlmostEqual(stats['sum'], 433783.32290267944, 5, "stats sum is different")
self.assertEqual(stats['count'], 524288, "stats count is different")
imeta = meta['imageMetadata']
self.assertIsNotNone(imeta, "Missing imageMetadata")
self.assertEqual(len(imeta), 11, "number of imageMetadata elements is different")
for i in range(0, 10):
m = imeta[i]
self.assertIsNotNone(m, "Missing imageMetadata entry")
self.assertEqual(len(m), 4, "number of imageMetadata entry elements is different")
self.assertIsNone(m['tileBounds'], "tileBounds is not None")
self.assertIsNone(m['name'], " name is not None")
self.assertIsNone(m['pixelBounds'], " pixelBounds is not None")
self.assertIsNone(m['stats'], " stats is not None")
m = imeta[10]
self.assertIsNotNone(m, "Missing imageMetadata entry")
self.assertEqual(len(m), 4, "number of imageMetadata entry elements is different")
self.assertIsNotNone(m['name'], " name is None")
self.assertEqual(m['name'], '10', "name is different")
self.assertIsNotNone(m['tileBounds'], "tileBounds is None")
tb = m['tileBounds']
self.assertEqual(len(tb), 4, "number of tileBounds elements is different")
self.assertEqual(tb['minX'], 696, "minX is different")
self.assertEqual(tb['minY'], 341, "minY is different")
self.assertEqual(tb['maxX'], 697, "maxX is different")
self.assertEqual(tb['maxY'], 341, "maxY is different")
self.assertIsNotNone(m['pixelBounds'], "pixelBounds is None")
pb = m['pixelBounds']
self.assertEqual(len(pb), 4, "number of pixelBounds elements is different")
self.assertEqual(pb['minX'], 0, "minX is different")
self.assertEqual(pb['minY'], 0, "minY is different")
self.assertEqual(pb['maxX'], 330, "maxX is different")
self.assertEqual(pb['maxY'], 190, "maxY is different")
self.assertIsNotNone(m['stats'], " stats is None")
stats = m['stats']
self.assertEqual(len(stats), 1, "number of stats (array) elements is different")
stats = stats[0] # get the single element
self.assertIsNotNone(stats, "Missing stats")
self.assertEqual(len(stats), 5, "number of stats elements is different")
self.assertAlmostEqual(stats['min'], 0.7147477269172668, 5, "stats min is different")
self.assertAlmostEqual(stats['max'], 4.190144062042236, 5, "stats max is different")
self.assertAlmostEqual(stats['mean'], 0.8273760278752889, 5, "stats mean is different")
self.assertAlmostEqual(stats['sum'], 433783.32290267944, 5, "stats sum is different")
self.assertEqual(stats['count'], 524288, "stats count is different")
def test_load_vector_metadata(self):
meta = self.roads.metadata()
self.assertIsNone(meta, "Vector metadata should be None until it is completely implemented")
| {
"content_hash": "14d6d415b9012dd7bb43480cb33b1912",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 100,
"avg_line_length": 43.231884057971016,
"alnum_prop": 0.6540395574924572,
"repo_name": "ngageoint/mrgeo",
"id": "999312cec9753c440ddbac685fe595bba94a12b0",
"size": "5966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mrgeo-python/src/test/python/integration/metadata_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "FreeMarker",
"bytes": "2033"
},
{
"name": "Java",
"bytes": "3075234"
},
{
"name": "Jupyter Notebook",
"bytes": "21757"
},
{
"name": "Python",
"bytes": "162426"
},
{
"name": "Scala",
"bytes": "775082"
},
{
"name": "Scheme",
"bytes": "17974"
},
{
"name": "Shell",
"bytes": "75079"
}
],
"symlink_target": ""
} |
import vtk
def main():
colors = vtk.vtkNamedColors()
# Set the background color.
colors.SetColor("BkgColor", [26, 51, 77, 255])
# Create a plane
planeSource = vtk.vtkPlaneSource()
planeSource.SetCenter(1.0, 0.0, 0.0)
planeSource.SetNormal(1.0, 0.0, 1.0)
planeSource.Update()
plane = planeSource.GetOutput()
# Create a mapper and actor
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(plane)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d("Cyan"))
# Create a renderer, render window and interactor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Plane")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Add the actors to the scene
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("BkgColor"))
# Render and interact
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
# import vtk
#
# # create a rendering window and renderer
# ren = vtk.vtkRenderer()
# renWin = vtk.vtkRenderWindow()
# renWin.AddRenderer(ren)
#
# # create a renderwindowinteractor
# iren = vtk.vtkRenderWindowInteractor()
# iren.SetRenderWindow(renWin)
#
# # create source
# source = vtk.vtkPlaneSource()
# source.SetCenter(1, 0, 0)
# source.SetNormal(1, 0, 1)
#
# # mapper
# mapper = vtk.vtkPolyDataMapper()
# mapper.SetInputConnection(source.GetOutputPort())
#
# # actor
# actor = vtk.vtkActor()
# actor.SetMapper(mapper)
#
# # assign actor to the renderer
# ren.AddActor(actor)
#
# # enable user interface interactor
# iren.Initialize()
# renWin.Render()
# iren.Start()
| {
"content_hash": "60f2126e8b5b75864d5445c594261b56",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 60,
"avg_line_length": 23.88157894736842,
"alnum_prop": 0.7002754820936639,
"repo_name": "lorensen/VTKExamples",
"id": "c5b9fa688f2479aa7f2eb41071f2774887260483",
"size": "1862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Python/GeometricObjects/Plane.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "322226"
},
{
"name": "C++",
"bytes": "4187688"
},
{
"name": "CMake",
"bytes": "155244"
},
{
"name": "CSS",
"bytes": "556"
},
{
"name": "G-code",
"bytes": "377583"
},
{
"name": "GLSL",
"bytes": "5375"
},
{
"name": "HTML",
"bytes": "635483160"
},
{
"name": "Java",
"bytes": "629442"
},
{
"name": "JavaScript",
"bytes": "18199"
},
{
"name": "Python",
"bytes": "1376010"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
} |
"""autogenerated by genpy from zuros_environment_sensors/MSG_ZWAVE_SENSORS.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class MSG_ZWAVE_SENSORS(genpy.Message):
_md5sum = "795e66ef3e1f1ec8b6e126177b5aefce"
_type = "zuros_environment_sensors/MSG_ZWAVE_SENSORS"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string name
string communication_id
string value
"""
__slots__ = ['name','communication_id','value']
_slot_types = ['string','string','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
name,communication_id,value
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MSG_ZWAVE_SENSORS, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.name is None:
self.name = ''
if self.communication_id is None:
self.communication_id = ''
if self.value is None:
self.value = ''
else:
self.name = ''
self.communication_id = ''
self.value = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.communication_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8')
else:
self.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.communication_id = str[start:end].decode('utf-8')
else:
self.communication_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.value = str[start:end].decode('utf-8')
else:
self.value = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.communication_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8')
else:
self.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.communication_id = str[start:end].decode('utf-8')
else:
self.communication_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.value = str[start:end].decode('utf-8')
else:
self.value = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
| {
"content_hash": "0b20d34e01cbb63e5f217cbce285b0c6",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 95,
"avg_line_length": 30.053475935828878,
"alnum_prop": 0.5918149466192171,
"repo_name": "robertjacobs/zuros",
"id": "aa9f64aedd2c582dcb33b292a1e1387ae0c033a8",
"size": "5620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zuros_sensors/zuros_environment_sensors/src/zuros_environment_sensors/msg/_MSG_ZWAVE_SENSORS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "57753"
},
{
"name": "Common Lisp",
"bytes": "15044"
},
{
"name": "Python",
"bytes": "86972"
},
{
"name": "Shell",
"bytes": "4531"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0107_v370_workflow_convergence_api_toggle'),
]
operations = [
migrations.AddField(
model_name='unifiedjob',
name='dependencies_processed',
field=models.BooleanField(default=False, editable=False, help_text='If True, the task manager has already processed potential dependencies for this job.'),
),
]
| {
"content_hash": "f384715d5efba36041673fed6b9a0a26",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 167,
"avg_line_length": 30.4375,
"alnum_prop": 0.6509240246406571,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "6c10b11083a86bf056263af5fe44497f2c4298dd",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx/main/migrations/0108_v370_unifiedjob_dependencies_processed.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.