text stringlengths 4 1.02M | meta dict |
|---|---|
import _setup
import inspect
import pprint
from httpmessage._multidict import MultiDict
def header_case(header_key):
return "-".join([part.capitalize() for part in header_key.split("-")])
def _key_wrap(func):
"""creates a function where the value of the 'key' argument, if there
is one, has the function 'header_case' run on it.
"""
varnames = func.func_code.co_varnames
def key_filter(kv):
name, value = kv
if name == 'key':
return header_case(value)
else:
return value
def wrapped(*args):
if len(args) == len(varnames):
args = [key_filter(kv) for kv in zip(varnames, args)]
return func(*args)
wrapped.func_name = func.func_name
wrapped.func_doc = func.func_doc
return wrapped
class Headers(MultiDict):
for attrname in dir(MultiDict):
attrvalue = getattr(MultiDict, attrname)
if inspect.ismethod(attrvalue):
attrvalue = attrvalue.im_func
if inspect.isfunction(attrvalue) and \
'key' in attrvalue.func_code.co_varnames:
locals()[attrname] = _key_wrap(attrvalue)
#---------------------------------------------------------------
def iteritems(self):
return iter(sorted(super(Headers,self).iteritems()))
#---------------------------------------------------------------
def __repr__(self):
data = pprint.pformat(list(self.iteritems()))
if '\n' in data:
data = ''.join([data[0], '\n ', data[1:-1], '\n', data[-1]])
return '<%s(%s)>' % (
type(self).__name__, data
)
#---------------------------------------------------------------
def __copy__(self):
dup = Headers()
for k,v in self.iteritems():
dup.append_at(k,v)
return dup
if __name__ == "__main__":
h = Headers()
h['foo'] = 'bar'
h['content-lenGth'] = 5
print h
h['CONTENT-length'] = 10
print h
del h['foO']
print h
h['content-type'] = 'wack wack wackiness'
h['rover-dookie'] = 'oh yah, lots'
print h | {
"content_hash": "b868728726c03050a5cbd3365817c7af",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 74,
"avg_line_length": 27.753246753246753,
"alnum_prop": 0.5049134300421151,
"repo_name": "mangpo/cacheall-proxy-server",
"id": "ffb5d35619ad0031ade1661b4550b2fdabff6d47",
"size": "2137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "httpmessage/_headers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "6392"
},
{
"name": "Python",
"bytes": "202488"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from .views import EvolucionesListView, EvolucionesCreateView, EvolucionesUpdateView, EvolucionesDeleteView
urlpatterns = patterns('',
url(r'^/(?P<historia>\d+)$', EvolucionesListView.as_view(), name='list'),
url(r'^/create/(?P<historia>\d+)$', EvolucionesCreateView.as_view(), name='create'),
url(r'^/update/(?P<pk>\d+)/(?P<historia>\d+)$', EvolucionesUpdateView.as_view(), name='update'),
url(r'^/delete/(?P<pk>\d+)/(?P<historia>\d+)$', EvolucionesDeleteView.as_view(), name='delete'),
)
| {
"content_hash": "136776454eb14579df7191ab06f1dd6a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 107,
"avg_line_length": 39.642857142857146,
"alnum_prop": 0.6792792792792792,
"repo_name": "btenaglia/hpc-historias-clinicas",
"id": "b7c02cd77a230dce6e950245c219d5cd0220d74b",
"size": "579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hpc-historias-clinicas/evoluciones/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "231102"
},
{
"name": "HTML",
"bytes": "148185"
},
{
"name": "JavaScript",
"bytes": "570412"
},
{
"name": "Python",
"bytes": "243694"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from contrail_openstack_dashboard.openstack_dashboard.dashboards.admin.l3routers import views
ROUTER_URL = r'^(?P<router_id>[^/]+)/%s'
urlpatterns = patterns('horizon.dashboards.admin.l3routers.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(ROUTER_URL % '$',
views.DetailView.as_view(),
name='detail'),
url(ROUTER_URL % 'update',
views.UpdateView.as_view(),
name='update'),
)
| {
"content_hash": "cf4771335603ef134f245aab8850d956",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 93,
"avg_line_length": 29.444444444444443,
"alnum_prop": 0.660377358490566,
"repo_name": "Juniper/contrail-horizon",
"id": "5ab3e3830d3fd7ec529f8b309781e2452b9c2fdb",
"size": "1152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/l3routers/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2191"
},
{
"name": "HTML",
"bytes": "135295"
},
{
"name": "JavaScript",
"bytes": "445916"
},
{
"name": "Python",
"bytes": "664980"
}
],
"symlink_target": ""
} |
import os
import re
import time
import unittest
from requests.exceptions import Timeout
import tests.file_utils as file_utils
from ingenico.connect.sdk.communication_exception import CommunicationException
from ingenico.connect.sdk.declined_payment_exception import DeclinedPaymentException
from ingenico.connect.sdk.domain.definitions.address import Address
from ingenico.connect.sdk.domain.definitions.amount_of_money import AmountOfMoney
from ingenico.connect.sdk.domain.definitions.card import Card
from ingenico.connect.sdk.domain.payment.create_payment_request import CreatePaymentRequest
from ingenico.connect.sdk.domain.payment.definitions.card_payment_method_specific_input import \
CardPaymentMethodSpecificInput
from ingenico.connect.sdk.domain.payment.definitions.customer import Customer
from ingenico.connect.sdk.domain.payment.definitions.order import Order
from ingenico.connect.sdk.global_collect_exception import GlobalCollectException
from ingenico.connect.sdk.log.communicator_logger import CommunicatorLogger
from ingenico.connect.sdk.merchant.services.convert_amount_params import ConvertAmountParams
from ingenico.connect.sdk.not_found_exception import NotFoundException
from ingenico.connect.sdk.validation_exception import ValidationException
from tests.unit.server_mock_utils import create_server_listening, create_client
class DefaultConnectionLoggerTest(unittest.TestCase):
"""Tests that services can operate through DefaultConnection and that their network traffic is appropriately logged
"""
def setUp(self):
self.request_path = None # Indicating whether or not a request has arrived at the correct server path
self.client = None # Stores the client used in testing so callbacks can reach it
def test_logging_test_connection(self):
"""Test that a GET service without parameters can connect to a server and is logged appropriately"""
test_path = "/v1/1234/services/testconnection" # relative url through which the request should be sent
logger = TestLogger()
response_body = read_resource("testConnection.json")
handler = self.create_handler(body=response_body,
additional_headers=(('Content-type', 'application/json'),))
with create_server_listening(handler) as address: # start server to listen to request
with create_client(address) as client: # create client under test
client.enable_logging(logger)
response = client.merchant("1234").services().testconnection()
self.assertIsNotNone(response)
self.assertEqual(test_path, self.request_path, 'Request has arrived at the wrong path')
self.assertEqual('OK', response.result)
self.assertEqual(2, len(logger.entries))
# for request and response, check that the message exists in the logs and there are no errors
request_entry = logger.entries[0]
self.assertIsNotNone(request_entry[0])
self.assertIsNone(request_entry[1])
response_entry = logger.entries[1]
self.assertIsNotNone(response_entry[0])
self.assertIsNone(response_entry[1])
# for request and response, check that their output is as predicted and that they match each other
self.assertRequestAndResponse(request_entry[0], response_entry[0], "testConnection")
def test_logging_convert_amount(self):
"""Test that a GET service with parameters can connect to a server and is logged appropriately"""
test_path = "/v1/1234/services/convert/amount" # relative url through which the request should be sent
logger = TestLogger()
query_params = ConvertAmountParams()
query_params.amount = 1000
query_params.source = "EUR"
query_params.target = "USD"
response_body = read_resource("convertAmount.json")
handler = self.create_handler(body=response_body,
additional_headers=(('Content-type', 'application/json'),))
with create_server_listening(handler) as address: # start server to listen to request
with create_client(address) as client: # create client under test
client.enable_logging(logger)
response = client.merchant("1234").services().convert_amount(query_params)
self.assertIsNotNone(response)
self.assertIsNotNone(response.converted_amount)
self.assertEqual(test_path, self.request_path.split("?")[0],
'Request has arrived at {} instead of {}'.format(self.request_path.split("?")[0], test_path))
self.assertLogsRequestAndResponse(logger, "convertAmount")
def test_delete_token(self):
"""Test that a POST service without body and a void response can connect to a server and is logged appropriately
"""
test_path = "/v1/1234/tokens/5678" # relative url through which the request should be sent
logger = TestLogger()
handler = self.create_handler(response_code=204)
with create_server_listening(handler) as address: # start server to listen to request
with create_client(address) as client: # create client under test
client.enable_logging(logger)
client.merchant("1234").tokens().delete("5678", None)
self.assertEqual(test_path, self.request_path, 'Request has arrived at the wrong path')
self.assertLogsRequestAndResponse(logger, "deleteToken")
def test_create_payment_unicode(self):
"""Tests if the body is encoded correctly"""
test_path = "/v1/1234/payments" # relative url through which the request should be sent
logger = TestLogger()
request = create_payment_request()
response_body = read_resource("createPayment.unicode.json")
additional_headers = (("Content-Type", "application/json"),
("Location", "http://localhost/v1/1234/payments/000000123410000595980000100001"))
handler = self.create_handler(response_code=201, body=response_body,
additional_headers=additional_headers)
with create_server_listening(handler) as address: # start server to listen to request
with create_client(address) as client: # create client under test
client.enable_logging(logger)
response = client.merchant("1234").payments().create(request)
self.assertIsNotNone(response)
self.assertIsNotNone(response.payment)
self.assertIsNotNone(response.payment.id)
surname = response.payment.payment_output.redirect_payment_method_specific_output.\
payment_product840_specific_output.customer_account.surname
self.assertEqual(surname, u"Schr\xf6der")
self.assertEqual(test_path, self.request_path,
'Request has arrived at "{1}" while it should have been delivered to "{0}"'.format(
test_path, self.request_path))
self.assertLogsRequestAndResponse(logger, "createPayment_unicode")
def test_create_payment(self):
"""Test that a POST service with 201 response can connect to a server and is logged appropriately"""
test_path = "/v1/1234/payments" # relative url through which the request should be sent
logger = TestLogger()
request = create_payment_request()
response_body = read_resource("createPayment.json")
additional_headers = (("content-Type", "application/json"),
("Location", "http://localhost/v1/1234/payments/000000123410000595980000100001"))
handler = self.create_handler(response_code=201, body=response_body,
additional_headers=additional_headers)
with create_server_listening(handler) as address: # start server to listen to request
with create_client(address) as client: # create client under test
client.enable_logging(logger)
response = client.merchant("1234").payments().create(request)
self.assertIsNotNone(response)
self.assertIsNotNone(response.payment)
self.assertIsNotNone(response.payment.id)
self.assertEqual(test_path, self.request_path,
'Request has arrived at "{1}" while it should have been delivered to "{0}"'.format(
test_path, self.request_path))
self.assertLogsRequestAndResponse(logger, "createPayment")
def test_create_payment_invalid_card_number(self):
"""Test that a POST service that is invalid results in an error, which is logged appropriately"""
test_path = "/v1/1234/payments" # relative url through which the request should be sent
logger = TestLogger()
request = create_payment_request()
response_body = read_resource("createPayment.failure.invalidCardNumber.json")
handler = self.create_handler(response_code=400, body=response_body,
additional_headers=(('Content-type', 'application/json'),))
with create_server_listening(handler) as address: # start server to listen to request
with create_client(address) as client: # create client under test
client.enable_logging(logger)
self.assertRaises(ValidationException, client.merchant("1234").payments().create, request)
self.assertEqual(test_path, self.request_path, 'Request has arrived at the wrong path')
self.assertLogsRequestAndResponse(logger, "createPayment_failure_invalidCardNumber")
def test_create_payment_rejected(self):
"""Test that a POST service that is rejected results in an error, which is logged appropriately"""
test_path = "/v1/1234/payments" # relative url through which the request should be sent
logger = TestLogger()
request = create_payment_request()
response_body = read_resource("createPayment.failure.rejected.json")
handler = self.create_handler(response_code=402, body=response_body,
additional_headers=(('Content-type', 'application/json'),))
with create_server_listening(handler) as address: # start server to listen to request
with create_client(address) as client: # create client under test
client.enable_logging(logger)
with self.assertRaises(DeclinedPaymentException) as exc:
client.merchant("1234").payments().create(request)
self.assertIsNotNone(exc.exception.create_payment_result)
self.assertIsNotNone(exc.exception.create_payment_result.payment)
self.assertIsNotNone(exc.exception.create_payment_result.payment.id)
self.assertEqual(test_path, self.request_path, 'Request has arrived at the wrong path')
self.assertLogsRequestAndResponse(logger, "createPayment_failure_rejected")
def test_logging_unknown_server_error(self):
"""Test that a GET service that results in an error is logged appropriately"""
test_path = "/v1/1234/services/testconnection" # relative url through which the request should be sent
logger = TestLogger()
response_body = read_resource("unknownServerError.json")
handler = self.create_handler(response_code=500, body=response_body,
additional_headers=(('Content-type', 'application/json'),))
with create_server_listening(handler) as address: # start server to listen to request
with create_client(address) as client: # create client under test
client.enable_logging(logger)
with self.assertRaises(GlobalCollectException):
client.merchant("1234").services().testconnection()
self.assertEqual(test_path, self.request_path, 'Request has arrived at the wrong path')
self.assertLogsRequestAndResponse(logger, "testConnection", "unknownServerError")
def test_non_json(self):
"""Test that a GET service that results in a not found error is logged appropriately"""
test_path = "/v1/1234/services/testconnection" # relative url through which the request should be sent
logger = TestLogger()
response_body = read_resource("notFound.html")
handler = self.create_handler(response_code=404, body=response_body,
additional_headers=(("Content-Type", "text/html"),))
with create_server_listening(handler) as address: # start server to listen to request
with create_client(address, connect_timeout=0.500, socket_timeout=0.050) as client:
client.enable_logging(logger)
with self.assertRaises(NotFoundException):
client.merchant("1234").services().testconnection()
self.assertEqual(test_path, self.request_path, 'Request has arrived at the wrong path')
self.assertLogsRequestAndResponse(logger, "testConnection", "notFound")
def test_read_timeout(self):
"""Test that if an exception is thrown before log due to a timeout, it is logged"""
test_path = "/v1/1234/services/testconnection" # relative url through which the request should be sent
logger = TestLogger()
response_body = read_resource("notFound.html")
handler = self.create_handler(response_code=404, body=response_body,
additional_headers=(("Content-Type", "text/html"),))
def delayed_response(*args, **kwargs):
time.sleep(0.100)
handler(*args, **kwargs)
with create_server_listening(delayed_response) as address: # start server to listen to request
with create_client(address, socket_timeout=0.05) as client: # create client under test
client.enable_logging(logger)
with self.assertRaises(CommunicationException):
client.merchant("1234").services().testconnection()
self.assertEqual(test_path, self.request_path, 'Request has arrived at the wrong path')
self.assertEqual(2, len(logger.entries))
# for request and response, check that the message exists in the logs and there is an error in the response
request_entry = logger.entries[0]
self.assertIsNotNone(request_entry[0])
self.assertIsNone(request_entry[1])
response_entry = logger.entries[1]
self.assertIsNotNone(response_entry[0])
self.assertIsNotNone(response_entry[1])
# for request and error, check that their output is as predicted and that they match each other
self.assertRequestAndError(request_entry[0], response_entry[0], "testConnection")
self.assertIsInstance(response_entry[1], Timeout, "logger should have logged a timeout error")
def test_log_request_only(self):
"""Test that a request can be logged separately by disabling log between request and response"""
test_path = "/v1/1234/services/testconnection" # relative url through which the request should be sent
logger = TestLogger()
response_body = read_resource("testConnection.json")
handler = self.create_handler(response_code=200, body=response_body,
additional_headers=(('Content-type', 'application/json'),))
def disable_logging_response(*args, **kwargs): # handler that disables the log of the client
self.client.disable_logging() # before responding
handler(*args, **kwargs)
with create_server_listening(disable_logging_response) as address: # start server to listen to request
with create_client(address) as client: # create client under test
self.client = client
client.enable_logging(logger)
response = client.merchant("1234").services().testconnection()
self.assertEqual("OK", response.result)
self.assertEqual(test_path, self.request_path, 'Request has arrived at the wrong path')
self.assertEqual(1, len(logger.entries))
# check that the request message exists in the logs and there are no errors
request_entry = logger.entries[0]
self.assertIsNotNone(request_entry[0])
self.assertIsNone(request_entry[1],
"Error '{}' logged that should not have been thrown".format(request_entry[1]))
# check that the request is formatted correctly
self.assertRequest(request_entry[0], "testConnection")
def test_log_response_only(self):
"""Test that a response can be logged separately by enabling log between request and response"""
test_path = "/v1/1234/services/testconnection" # relative url through which the request should be sent
logger = TestLogger()
response_body = read_resource("testConnection.json")
handler = self.create_handler(response_code=200, body=response_body,
additional_headers=(("Content-Type", "application/json"),))
def enable_logging_response(*args, **kwargs): # handler that enables the log of the client
self.client.enable_logging(logger) # before responding
handler(*args, **kwargs)
with create_server_listening(enable_logging_response) as address: # start server to listen to request
with create_client(address) as client: # create client under test
self.client = client
response = client.merchant("1234").services().testconnection()
self.assertEqual("OK", response.result)
self.assertEqual(test_path, self.request_path, 'Request has arrived at the wrong path')
self.assertEqual(1, len(logger.entries))
# check that the response message exists in the logs and there are no errors
response_entry = logger.entries[0]
self.assertIsNotNone(response_entry[0])
self.assertIsNone(response_entry[1],
"Error '{}' logged that should not have been thrown".format(response_entry[1]))
# check that the response is formatted correctly
self.assertResponse(response_entry[0], "testConnection")
def test_log_error_only(self):
"""Test that an error can be logged separately by enabling log between request and response"""
test_path = "/v1/1234/services/testconnection" # relative url through which the request should be sent
logger = TestLogger()
response_body = read_resource("notFound.html")
handler = self.create_handler(response_code=404, body=response_body,
additional_headers=(("Content-Type", "text/html"),))
def enable_logging_late_response(*args, **kwargs): # handler that enables the log of the client
self.client.enable_logging(logger) # and waits for a timeout before responding
time.sleep(0.1)
handler(*args, **kwargs)
with create_server_listening(enable_logging_late_response) as address: # start server to listen to request
with create_client(address, connect_timeout=0.500, socket_timeout=0.050) as client:
self.client = client
with self.assertRaises(CommunicationException):
client.merchant("1234").services().testconnection()
self.assertEqual(test_path, self.request_path, 'Request has arrived at the wrong path')
self.assertEqual(1, len(logger.entries))
# check that the response message exists in the logs and there are no errors
error_entry = logger.entries[0]
self.assertIsNotNone(error_entry[0])
self.assertIsNotNone(error_entry[1])
# check that the error is formatted correctly
self.assertError(error_entry[0])
self.assertIsInstance(error_entry[1], Timeout,
"logger should have logged a timeout error, logged {} instead".format(error_entry[1]))
def assertLogsRequestAndResponse(self, logger, request_resource_prefix, response_resource_prefix=None):
"""Assert that the logs of the logger contain both request and response and no errors,
then check that the request and response match using "assertRequestAndResponse"
"""
if response_resource_prefix is None:
response_resource_prefix = request_resource_prefix
self.assertEqual(2, len(logger.entries))
# for request and response, check that the message exists in the logs and there are no errors
request_entry = logger.entries[0]
self.assertIsNotNone(request_entry[0])
self.assertIsNone(request_entry[1],
"Error '{}' logged that should not have been thrown".format(request_entry[1]))
response_entry = logger.entries[1]
self.assertIsNotNone(response_entry[0])
self.assertIsNone(response_entry[1],
"Error '{}' logged that should not have been thrown".format(response_entry[1]))
# for request and response, check that their output is as predicted and that they match each other
self.assertRequestAndResponse(request_entry[0], response_entry[0],
request_resource_prefix, response_resource_prefix)
def assertRequestAndResponse(self, request_message, response_message,
request_resource_prefix, response_resource_prefix=None):
"""Assert that the request and response messages match the request and response regular expressions stored in
'request_resource_prefix'.request and 'response_resource_prefix'.response respectively.
If response_resource_prefix is not given it is assumed to be equal to request_resource_prefix"""
if response_resource_prefix is None:
response_resource_prefix = request_resource_prefix
request_id = self.assertRequest(request_message, request_resource_prefix)
self.assertResponse(response_message, response_resource_prefix, request_id)
def assertRequestAndError(self, request_message, error_message, resource_prefix):
"""Assert that the request message matches the request regular expression stored in 'resource_prefix.request'
and the error is a valid error message and refers to the request"""
request_id = self.assertRequest(request_message, resource_prefix)
self.assertError(error_message, request_id)
def assertRequest(self, request_message, request_resource_prefix):
"""Assert that the request message matches the regex stored in 'request_resource_prefix'.request
:param request_message: the request message to match
:param request_resource_prefix: prefix of the regex file location,
it will be appended with '.request' to obtain the file location
:return: the request_id for use in matching the corresponding response or error
"""
request_resource = request_resource_prefix + "_request"
regex = globals()[request_resource](request_message, self)
if type(regex) == type(""):
request_pattern = re.compile(regex, re.DOTALL)
match = request_pattern.match(request_message.get_message())
print(globals()[request_resource])
if match is None:
raise AssertionError("request message '" + request_message.get_message() +
"' does not match pattern " + str(request_pattern))
self.assertRegex(request_message, request_pattern)
return match.group(1)
return regex[0]
def assertResponse(self, response_message, response_resource_prefix, request_id=None):
"""Assert that the response message matches the regex stored in 'response_resource_prefix'.response
:param response_message: the response message to match
:param response_resource_prefix: prefix of the regex file location,
it will be appended with '.response' to obtain the file location
:param request_id: if a request_id is provided, it is matched against the response_id found in the response,
failing the assert if not equal
"""
response_resource = response_resource_prefix + "_response"
# for each response call the corresponding asserting function
regex = globals()[response_resource](response_message, self)
if type(regex) == type(""):
response_pattern = re.compile(regex, re.DOTALL)
match = response_pattern.match(response_message.get_message())
if match is None:
raise AssertionError("response message '" + response_message.get_message() +
"' does not match pattern " + str(response_pattern))
if request_id is not None:
self.assertEqual(request_id, match.group(1),
"request_id '{0}' does not match response_id '{1}'".format(request_id, match.group(1)))
def assertError(self, error_message, request_id=None):
"""Assert that the error message matches the regex stored in 'generic.error'
:param error_message: the error message to match
:param request_id: if a request_id is provided, it is matched against the error_id found in the error,
failing the assert if not equal
"""
error_resource = "generic_error"
error_pattern_string = globals()[error_resource]()
error_pattern = re.compile(error_pattern_string, re.DOTALL)
match = error_pattern.match(error_message)
if match is None:
raise AssertionError("response message '" + error_message +
"' does not match pattern " + str(error_pattern_string))
if request_id is not None:
self.assertEqual(request_id, match.group(1),
"request_id '{0}' does not match error_id '{1}'".format(request_id, match.group(1)))
def assertHeaderIn(self, _tuple, _list):
# If tuple has incorrect number of elements, assume it is not in the list
self.assertIn((_tuple[0].lower(), _tuple[1]),
list(map((lambda el: (el[0].lower(), el[1])), _list)))
def create_handler(self, response_code=200, body='', # path='',
additional_headers=()):
"""Creates a request handler that receives the request on the server side
:param response_code: status code of the desired response
:param body: the body of the response message to return, it should be in json format
:param additional_headers: additional headers that are added to the handler's response
If the request is sent through the proper path, self.request_successful will be set to true, false otherwise
"""
def handler_func(handler):
self.request_path = handler.path # record if the request was sent through the expected path
handler.protocol_version = 'HTTP/1.1'
try:
handler.send_response(response_code)
for header in additional_headers:
handler.send_header(*header)
handler.send_header('Dummy', None)
handler.send_header('Content-Length', len(bytes(body, "utf-8")))
handler.end_headers()
handler.wfile.write(bytes(body, "utf-8"))
except ConnectionAbortedError:
pass
return handler_func
def create_payment_request():
"""Creates a commonly used payment for testing"""
amount_of_money = AmountOfMoney()
amount_of_money.currency_code = "CAD"
amount_of_money.amount = 2345
billing_address = Address()
billing_address.country_code = "CA"
customer = Customer()
customer.billing_address = billing_address
order = Order()
order.amount_of_money = amount_of_money
order.customer = customer
card = Card()
card.cvv = "123"
card.card_number = "1234567890123456"
card.expiry_date = "1220"
payment_specific_input = CardPaymentMethodSpecificInput()
payment_specific_input.payment_product_id = 1
payment_specific_input.card = card
request = CreatePaymentRequest()
request.order = order
request.card_payment_method_specific_input = payment_specific_input
return request
class TestLogger(CommunicatorLogger):
def __init__(self):
CommunicatorLogger.__init__(self)
self.entries = []
def log_request(self, request_log_message):
self.entries.append((request_log_message, None))
def log_response(self, response_log_message):
self.entries.append((response_log_message, None))
def log(self, message, thrown=None):
self.entries.append((message, thrown))
def replace_all_java_regex():
r"""Script to convert all \Q \E java regex pattern files used by this test to python equivalent"""
replace_java_regex("convertAmount.request")
replace_java_regex("convertAmount.response")
replace_java_regex("createPayment.failure.invalidCardNumber.request")
replace_java_regex("createPayment.failure.invalidCardNumber.response")
replace_java_regex("createPayment.failure.rejected.request")
replace_java_regex("createPayment.failure.rejected.response")
replace_java_regex("createPayment.request")
replace_java_regex("createPayment.response")
replace_java_regex("deleteToken.request")
replace_java_regex("deleteToken.response")
replace_java_regex("generic.error")
replace_java_regex("notFound.response")
replace_java_regex("testConnection.request")
replace_java_regex("testConnection.response")
replace_java_regex("unknownServerError.response")
def replace_java_regex(relative_path):
r"""Reads a source file containing java regex patterns and writes it back in python regex.
Recognises: \Qxxx\E as literal content xxx
"""
result = ""
text = read_resource(relative_path)
substrings = re.split(r"\\Q|\\E", text)
# even substrings now contain regular expressions and odd substrings are to be escaped because they should be quoted
for i, substring in enumerate(substrings):
if i % 2 == 0:
result += substring
if i % 2 == 1:
result += re.escape(substring)
file_utils.write_file(os.path.join("default_implementation", relative_path), result)
# reads a file names file_name stored under resources/default_implementation
def read_resource(file_name): return file_utils.read_file(os.path.join("default_implementation", file_name))
# ------------------------ REGEX SOURCES ------------------
def convertAmount_request(request, test):
test.assertEqual(request.method, "GET")
test.assertEqual(request.uri, '/v1/1234/services/convert/amount?source=EUR&target=USD&amount=1000')
headers = request.get_header_list()
test.assertHeaderIn(('Authorization', '"********"'), headers)
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertTrue(len(list(filter((lambda header: header[0] == 'X-GCS-ServerMetaInfo'), headers))))
test.assertIsNone(request.body)
return request.request_id, False
def convertAmount_response(response, test):
test.assertIsNotNone(response.get_duration())
test.assertEqual(response.get_status_code(), 200)
headers = response.get_header_list()
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertHeaderIn(('Content-Type', '"application/json"'), headers)
test.assertHeaderIn(('Dummy', '""'), headers)
test.assertEqual(response.content_type, 'application/json')
test.assertIsNotNone(response.body)
test.assertTrue(len(response.body))
return response.request_id, False
def createPayment_failure_invalidCardNumber_request(request, test):
test.assertEqual(request.method, "POST")
test.assertEqual(request.uri, '/v1/1234/payments')
headers = request.get_header_list()
test.assertHeaderIn(('Authorization', '"********"'), headers)
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertTrue(len(list(filter((lambda header: header[0] == 'X-GCS-ServerMetaInfo'), headers))))
test.assertHeaderIn(('Content-Type', '"application/json"'), headers)
# Note: originaly 'application/json; charset=UTF-8', but I think that was a Java specific thing
test.assertEqual(request.content_type, 'application/json')
test.assertIsNotNone(request.body)
test.assertTrue(len(request.body))
return request.request_id, False
def createPayment_failure_invalidCardNumber_response(response, test):
test.assertIsNotNone(response.get_duration())
test.assertEqual(response.get_status_code(), 400)
headers = response.get_header_list()
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertHeaderIn(('Content-Type', '"application/json"'), headers)
test.assertHeaderIn(('Dummy', '""'), headers)
test.assertEqual(response.content_type, 'application/json')
test.assertIsNotNone(response.body)
test.assertTrue(len(response.body))
return response.request_id, False
def createPayment_failure_rejected_request(request, test):
test.assertEqual(request.method, "POST")
test.assertEqual(request.uri, '/v1/1234/payments')
headers = request.get_header_list()
test.assertHeaderIn(('Authorization', '"********"'), headers)
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertTrue(len(list(filter((lambda header: header[0] == 'X-GCS-ServerMetaInfo'), headers))))
test.assertHeaderIn(('Content-Type', '"application/json"'), headers)
# Note: originaly 'application/json; charset=UTF-8', but I think that was a Java specific thing
test.assertEqual(request.content_type, 'application/json')
test.assertIsNotNone(request.body)
test.assertTrue(len(request.body))
return request.request_id, False
def createPayment_failure_rejected_response(response, test):
test.assertIsNotNone(response.get_duration())
test.assertEqual(response.get_status_code(), 402)
headers = response.get_header_list()
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertHeaderIn(('Content-Type', '"application/json"'), headers)
test.assertHeaderIn(('Dummy', '""'), headers)
test.assertEqual(response.content_type, 'application/json')
test.assertIsNotNone(response.body)
test.assertTrue(len(response.body))
return response.request_id, False
def createPayment_request_test():
return re.compile(r"""^ headers:[ ]+Connection.*$""", re.M)
def createPayment_request(request, test):
test.assertEqual(request.method, "POST")
test.assertEqual(request.uri, '/v1/1234/payments')
headers = request.get_header_list()
test.assertHeaderIn(('Authorization', '"********"'), headers)
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertTrue(len(list(filter((lambda header: header[0] == 'X-GCS-ServerMetaInfo'), headers))))
test.assertHeaderIn(('Content-Type', '"application/json"'), headers)
# Note: originaly 'application/json; charset=UTF-8', but I think that was a Java specific thing
test.assertEqual(request.content_type, 'application/json')
test.assertIsNotNone(request.body)
test.assertTrue(len(request.body))
return request.request_id, False
def createPayment_unicode_request(request, test):
test.assertEqual(request.method, "POST")
test.assertEqual(request.uri, '/v1/1234/payments')
headers = request.get_header_list()
test.assertHeaderIn(('Authorization', '"********"'), headers)
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertTrue(len(list(filter((lambda header: header[0] == 'X-GCS-ServerMetaInfo'), headers))))
test.assertHeaderIn(('Content-Type', '"application/json"'), headers)
# Note: originaly 'application/json; charset=UTF-8', but I think that was a Java specific thing
test.assertEqual(request.content_type, 'application/json')
test.assertIsNotNone(request.body)
test.assertTrue(len(request.body))
return request.request_id, False
def createPayment_unicode_response(response, test):
test.assertIsNotNone(response.get_duration())
test.assertEqual(response.get_status_code(), 201)
headers = response.get_header_list()
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertHeaderIn(('Content-Type', '"application/json"'), headers)
test.assertHeaderIn(('Dummy', '""'), headers)
test.assertHeaderIn(('Location', '"http://localhost/v1/1234/payments/000000123410000595980000100001"'), headers)
test.assertIsNotNone(response.body)
test.assertTrue(len(response.body))
return response.request_id, False
def createPayment_response(response, test):
test.assertIsNotNone(response.get_duration())
test.assertEqual(response.get_status_code(), 201)
test.assertEqual(response.content_type, 'application/json')
headers = response.get_header_list()
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertHeaderIn(('Content-Type', '"application/json"'), headers)
test.assertHeaderIn(('Dummy', '""'), headers)
test.assertHeaderIn(('Location', '"http://localhost/v1/1234/payments/000000123410000595980000100001"'), headers)
test.assertIsNotNone(response.body)
test.assertTrue(len(response.body))
return response.request_id, False
def deleteToken_request(request, test):
test.assertEqual(request.method, "DELETE")
test.assertEqual(request.uri, '/v1/1234/tokens/5678')
headers = request.get_header_list()
test.assertHeaderIn(('Authorization', '"********"'), headers)
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertTrue(len(list(filter((lambda header: header[0] == 'X-GCS-ServerMetaInfo'), headers))))
return request.request_id, False
def deleteToken_response(response, test):
test.assertIsNotNone(response.get_duration())
test.assertEqual(response.get_status_code(), 204)
test.assertIsNone(response.content_type)
headers = response.get_header_list()
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertHeaderIn(('Dummy', '""'), headers)
test.assertIsNone(response.body)
return response.request_id, False
def generic_error():
return r"""Error\ occurred\ for\ outgoing\ request\ \(requestId\=\'([-a-zA-Z0-9]+)\'\,\ \d+\ s\)"""
def notFound_response(response, test):
test.assertIsNotNone(response.get_duration())
test.assertEqual(response.get_status_code(), 404)
headers = response.get_header_list()
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertHeaderIn(('Content-Type', '"text/html"'), headers)
test.assertHeaderIn(('Dummy', '""'), headers)
test.assertEqual(response.content_type, 'text/html')
test.assertIsNotNone(response.body)
test.assertEqual(response.body, "Not Found")
return response.request_id, False
def testConnection_request(request, test):
test.assertEqual(request.method, "GET")
test.assertEqual(request.uri, '/v1/1234/services/testconnection')
headers = request.get_header_list()
test.assertHeaderIn(('Authorization', '"********"'), headers)
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertTrue(len(list(filter((lambda header: header[0] == 'X-GCS-ServerMetaInfo'), headers))))
return request.request_id, False
def testConnection_response(response, test):
test.assertIsNotNone(response.get_duration())
test.assertEqual(response.get_status_code(), 200)
test.assertEqual(response.content_type, 'application/json')
headers = response.get_header_list()
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertHeaderIn(('Content-Type', '"application/json"'), headers)
test.assertHeaderIn(('Dummy', '""'), headers)
test.assertIsNotNone(response.body)
test.assertTrue(len(response.body))
return response.request_id, False
def unknownServerError_response(response, test):
test.assertIsNotNone(response.get_duration())
test.assertEqual(response.get_status_code(), 500)
headers = response.get_header_list()
test.assertTrue(len(list(filter((lambda header: header[0] == 'Date'), headers))))
test.assertHeaderIn(('Content-Type', '"application/json"'), headers)
test.assertHeaderIn(('Dummy', '""'), headers)
test.assertEqual(response.content_type, 'application/json')
test.assertIsNotNone(response.body)
test.assertTrue(len(response.body))
return response.request_id, False
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a8ca81fffc7d6bed85c4c87cb878c046",
"timestamp": "",
"source": "github",
"line_count": 804,
"max_line_length": 120,
"avg_line_length": 51.190298507462686,
"alnum_prop": 0.6763855480234225,
"repo_name": "Ingenico-ePayments/connect-sdk-python3",
"id": "4d2638f258f4c1577639326ab6d6694ffb627ff8",
"size": "41157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_default_connection_logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1735057"
}
],
"symlink_target": ""
} |
import os
import time
import urlparse
import uuid
from flask import Flask, abort, request, jsonify, json
from doppler.queue import callback, rpqueue
SENTRY_DSN = os.environ.get('SENTRY_DSN')
if SENTRY_DSN:
from raven.contrib.flask import Sentry
sentry = Sentry(dsn=SENTRY_DSN)
def _get_job(request_id):
return rpqueue._EnqueuedTask(callback.name, request_id, 'default')
FORM = {
'callback_url': basestring,
'run_at': int,
'max_retries': int,
'retry_delay': int,
'message': basestring,
}
REQUIRED_FIELDS = ('callback_url', 'message')
def validate():
for key, item in request.json.iteritems():
if key not in FORM:
continue
if not isinstance(item, FORM[key]):
abort(422, u'{} not of type {}'.format(key, FORM[key]))
for key in FORM:
if key not in request.json and key in REQUIRED_FIELDS:
abort(400, u'{} not found'.format(key))
def _get_delay():
# Convert `run_at` (unix timestamp) to `delay` (seconds)
run_at = request.json['run_at']
if run_at is None:
return None
seconds = run_at - time.time()
return max(0, int(seconds))
def post_job():
if request.json is None:
abort(415)
validate()
request_id = str(uuid.uuid4())
callback_url = request.json['callback_url']
message = request.json['message']
max_retries = request.json.get('max_retries', 0)
retry_delay = request.json.get('retry_delay')
scheduled_at = int(time.time())
url_parts = urlparse.urlparse(callback_url)
if not (url_parts.scheme and url_parts.netloc):
abort(422, u'{} is not a valid callback url'.format(callback_url))
job = callback.execute(
request_id=request_id,
message=message,
callback_url=callback_url,
scheduled_at=scheduled_at,
retry_delay=retry_delay,
last_retry=None,
taskid=request_id,
delay=_get_delay(),
_attempts=max_retries,
)
return jsonify({
'request_id': job.taskid,
'status': job.status,
'run_at': request.json['run_at'],
'scheduled_at': scheduled_at,
'last_retry': None,
'retries_left': max_retries,
})
def get_job(request_id):
job = _get_job(request_id)
status = job.status
run_at = None
scheduled_at = None
last_retry = None
retries_left = 0
if status not in ("done", "started"):
args = json.loads(job.args)
taskid, fname, args, kwargs, run_at = args
scheduled_at = kwargs['scheduled_at']
last_retry = kwargs.get('last_retry')
retries_left = kwargs['_attempts']
return jsonify({
'request_id': request_id,
'status': status,
'run_at': run_at,
'scheduled_at': scheduled_at,
'last_retry': last_retry,
'retries_left': retries_left,
})
def delete_job(request_id):
job = _get_job(request_id)
return jsonify({
'was_cancelled': bool(job.cancel()),
})
def get_service_app():
app = Flask(__name__)
if SENTRY_DSN:
sentry.init_app(app)
app.add_url_rule('/', 'post_job', post_job, methods=['POST'])
app.add_url_rule('/<request_id>', 'get_job', get_job)
app.add_url_rule('/<request_id>', 'delete_job', delete_job, methods=['DELETE'])
return app
if __name__ == "__main__":
get_service_app().run()
| {
"content_hash": "bdbbf4e28ec002b9b76d7420acf9fe16",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 83,
"avg_line_length": 25.518796992481203,
"alnum_prop": 0.5981143193871538,
"repo_name": "TakumiHQ/doppler",
"id": "2ed96dac662715e0d5eea9423a2f04103bf21f4b",
"size": "3412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doppler/service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14705"
}
],
"symlink_target": ""
} |
import os
import shutil
import stat
import sys
import tempfile
import time
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), '../scoot'))
import client_lib as proto
tmpdir = None
def setUp():
""" Start a (verified) client connection.
"""
global tmpdir
tmpdir = tempfile.mkdtemp()
proto.start()
# wait up to 1 second for daemon to start
start = time.time()
elapsedTime = 0
started = False
while not started and elapsedTime < 1.0:
try:
echo = proto.echo("ping")
if echo == "ping":
started = True
except proto.ScootException as e:
elapsedTime = time.time() - start
if not started:
raise Exception("Connection to daemon couldn't be established in {0} seconds".format(elapsedTime))
def tearDown():
shutil.rmtree(tmpdir)
proto.stop_daemon()
sleep_s_key = "sleep_s_id"
ls_s_key = "ls_s_id"
ls_fail_s_key = "ls_fail_s_id"
# test scenario
class CmdDef(object):
""" Reusable command definitions for the tests:
cmd = the command,
snapshot_key = the snapshot key to use when requesting the command be run,
interm_states = states to allow while not all the tests have completed
final_state = the state to expect when all the runs have completed
"""
def __init__(self, cmd, snapshot_key, interm_states, final_state):
self.cmd = cmd
self.snapshot_key = snapshot_key
self.interm_states = interm_states
self.final_state = final_state
# run the sleep script and expec final status:complete
sleepDef = CmdDef(cmd=["./sleep_script.sh"],
snapshot_key="sleep_s_id",
interm_states=[proto.ScootStatus.State.PENDING,
proto.ScootStatus.State.PREPARING,
proto.ScootStatus.State.RUNNING,
proto.ScootStatus.State.COMPLETED],
final_state=proto.ScootStatus.State.COMPLETED)
# run the ls script against a snapshot that has the target file and expect final status:complete
lsDef = CmdDef(cmd=["./ls_script.sh"],
snapshot_key="ls_s_id",
interm_states=[proto.ScootStatus.State.PREPARING,
proto.ScootStatus.State.PENDING,
proto.ScootStatus.State.RUNNING,
proto.ScootStatus.State.COMPLETED],
final_state=proto.ScootStatus.State.COMPLETED)
# run the ls script against a snapshot that does not have the target file and expect final status:failed
failDef = CmdDef(cmd=["./ls_script.sh"],
snapshot_key="ls_fail_s_id",
interm_states=[proto.ScootStatus.State.PENDING,
proto.ScootStatus.State.PREPARING,
proto.ScootStatus.State.RUNNING,
proto.ScootStatus.State.COMPLETED],
final_state=proto.ScootStatus.State.COMPLETED)
def test_many():
""" Submit ~50 run requests (5 sets of requests where each set of requests contain 10 requests that should complete and 1 request
that should fail).
Wait for all the requests to complete and verify their final states.
"""
global sleepDef, lsDef, failDef
rpc_timeout_ns = int(1000 * 1e6)
runs = {}
# Make the snapshots.
s_ids = make_snapshots()
# Run the sleep script
id = proto.run(argv=sleepDef.cmd, timeout_ns=rpc_timeout_ns, snapshot_id=s_ids[sleepDef.snapshot_key])
runs[id] = "sleep"
# run ~200 requests, 10 repetitions of 20 successful, 1 error
for i in range(5):
for j in range(10):
try:
id = proto.run(argv=lsDef.cmd, timeout_ns=rpc_timeout_ns, snapshot_id=s_ids[lsDef.snapshot_key])
runs[id] = "ls"
except proto.ScootException as e:
if "Runner is busy" not in str(e):
raise Exception("Run error, not resource limitation.{}".format(str(e)))
try:
id = proto.run(argv=failDef.cmd, timeout_ns=rpc_timeout_ns, snapshot_id=s_ids[failDef.snapshot_key])
except proto.ScootException as e:
if "Runner is busy" not in str(e):
raise Exception("Run error, not resource limitation.{}".format(str(e)))
runs[id] = "fail"
# get all statuses
statuses = proto.poll(run_ids=runs.keys(), timeout_ns=0, return_all=True)
assertIntermediateStatuses(statuses, runs)
# wait for all runs to finish and validate their status
start = time.time()
allDone = False
elapsedTime = 0
while not allDone and elapsedTime < 10.0: #FIXME: seeing spurious failures in travis-ci, making this extra long for now.
ids = runs.keys()
statuses = proto.poll(run_ids=ids, timeout_ns=int(3000 * 1e6), return_all=False)
if len(statuses) == len(ids):
allDone = True
elapsedTime = time.time() - start
if len(ids) != len(statuses):
raise Exception("runs did not finish: %i/%i" % (len(statuses), len(ids)))
assertCompleteStatuses(statuses, runs)
def make_snapshots():
""" make the following snapshots
sleep snapshot: has a "sleep_script.sh" that sleeps for 0.5 seconds
ls snapshot: has an "ls_script.sh" that issues a successful ls command
ls fail snapshot: has an "ls_script.sh" that issues a failing ls command
Returns a dict of the snapshot ids
"""
sleep_script = os.path.join(tmpdir, "sleep_script.sh")
open(sleep_script, 'w').write("#!/bin/sh\nsleep 0.5")
os.chmod(sleep_script, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
ls_script = os.path.join(tmpdir, "ls_script.sh")
open(ls_script, 'w').write("#!/bin/sh\nls resource.txt")
os.chmod(ls_script, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
resource = os.path.join(tmpdir, "resource.txt")
open(resource, 'w').write("content")
# make the snapshots
sleep_s_id = proto.create_snapshot(sleep_script)
ls_s_id = proto.create_snapshot(os.path.join(tmpdir, "*"))
fail_ls_s_id = proto.create_snapshot(ls_script)
# return a dict of the snapshot ids
return {sleep_s_key:sleep_s_id, ls_s_key:ls_s_id, ls_fail_s_key:fail_ls_s_id}
def assertIntermediateStatuses( statuses, runs):
""" Verify that each run's state matches one of the states in its interm_states list
"""
for status in statuses:
state = status.state
if runs[status.run_id] == "sleep":
if not state in sleepDef.interm_states:
raise Exception("run type:{0}, expected on of {1}, got {2}".format("sleep", sleepDef.interm_states, state))
if runs[status.run_id] == "ls":
if not state in lsDef.interm_states:
raise Exception("run type:{0}, expected on of {1}, got {2}".format("ls", lsDef.interm_states, state))
if runs[status.run_id] == "fail":
if not state in failDef.interm_states:
raise Exception("run type:{0}, expected on of {1}, got {2}".format("fail", failDef.interm_states, state))
return True
def assertCompleteStatuses( statuses, runs):
""" Verify that each run's state matches it's expected final_state
"""
for status in statuses:
state = status.state
if runs[status.run_id] == "sleep":
if state != sleepDef.final_state:
raise Exception("run type:{0}, expected {1}, got {2}".format("sleep", sleepDef.final_state, state))
if runs[status.run_id] == "ls":
if state != lsDef.final_state:
raise Exception("run type:{0}, expected {1}, got {2}".format("ls", lsDef.final_state, state))
if runs[status.run_id] != "fail":
if state != failDef.final_state:
raise Exception("run type:{0}, expected {1}, got {2}".format("fail", failDef.final_state, state))
return True
if __name__ == '__main__':
try:
setUp()
test_many()
finally:
tearDown()
| {
"content_hash": "ee2525009d959ef5f8cc95de4f49c3da",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 132,
"avg_line_length": 36.91943127962085,
"alnum_prop": 0.6342747111681644,
"repo_name": "dbentley/scoot",
"id": "66b2ea5aa73116d74f9e2c23acca21e5e625f0bf",
"size": "7813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daemon/protocol/python/tests/run_many.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "685382"
},
{
"name": "Makefile",
"bytes": "2887"
},
{
"name": "Protocol Buffer",
"bytes": "3584"
},
{
"name": "Python",
"bytes": "36170"
},
{
"name": "Shell",
"bytes": "3425"
},
{
"name": "Thrift",
"bytes": "5323"
}
],
"symlink_target": ""
} |
import subprocess
subprocess.call('python manage.py runserver')
| {
"content_hash": "6c58e69c5ca42178f0b966ad6f1b10ac",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 45,
"avg_line_length": 33,
"alnum_prop": 0.803030303030303,
"repo_name": "nishowsan/pyblog",
"id": "74774e36d17ad70a694532dce540186c849b82e2",
"size": "66",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162950"
},
{
"name": "HTML",
"bytes": "7480"
},
{
"name": "JavaScript",
"bytes": "326279"
},
{
"name": "Python",
"bytes": "7513"
}
],
"symlink_target": ""
} |
"""ProjektSuper URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include, url
from django.contrib import admin
from MySuperApp import views
urlpatterns = [
url(r'', include('MySuperApp.urls')),
url(r'^koordynacje/', include('koordynacje.urls'))
]
| {
"content_hash": "03a7431457ba201f2f85b0c21982cbc5",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 35.84615384615385,
"alnum_prop": 0.7145922746781116,
"repo_name": "ztmtoosm/ztmtoosm-django",
"id": "bcabb182f50378c38b62b19d8588b6e9a3bc7ca0",
"size": "932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ProjektSuper/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "10160"
},
{
"name": "JavaScript",
"bytes": "5217"
},
{
"name": "Python",
"bytes": "40661"
}
],
"symlink_target": ""
} |
import time
from django.conf import settings
from django.template import Context
from sekizai.context import SekizaiContext
from cms.api import add_plugin, create_page, create_title
from cms.cache import _get_cache_version, invalidate_cms_page_cache
from cms.cache.placeholder import (
_get_placeholder_cache_key, _get_placeholder_cache_version, _get_placeholder_cache_version_key,
_set_placeholder_cache_version, clear_placeholder_cache, get_placeholder_cache, set_placeholder_cache,
)
from cms.exceptions import PluginAlreadyRegistered
from cms.models import Page
from cms.plugin_pool import plugin_pool
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.project.pluginapp.plugins.caching.cms_plugins import (
DateTimeCacheExpirationPlugin, LegacyCachePlugin, NoCachePlugin, SekizaiPlugin, TimeDeltaCacheExpirationPlugin,
TTLCacheExpirationPlugin, VaryCacheOnPlugin,
)
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import get_timezone_name
class CacheTestCase(CMSTestCase):
def tearDown(self):
from django.core.cache import cache
super().tearDown()
cache.clear()
def setUp(self):
from django.core.cache import cache
super().setUp()
cache.clear()
def test_cache_placeholder(self):
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
page1_url = page1.get_absolute_url()
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(5, 9)):
self.render_template_obj(template, {}, request)
request = self.get_request(page1_url)
request.session['cms_edit'] = True
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
with self.assertNumQueries(2):
self.render_template_obj(template, {}, request)
# toolbar
with self.login_user_context(self.get_superuser()):
request = self.get_request(page1_url)
request.session['cms_edit'] = True
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
request.toolbar.show_toolbar = True
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
with self.assertNumQueries(4):
self.render_template_obj(template, {}, request)
page1.publish('en')
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = dict(
CMS_PAGE_CACHE=False,
MIDDLEWARE=[mw for mw in settings.MIDDLEWARE if mw not in exclude],
)
with self.settings(**overrides):
with self.assertNumQueries(FuzzyInt(13, 25)):
self.client.get(page1_url)
with self.assertNumQueries(FuzzyInt(5, 14)):
self.client.get(page1_url)
overrides['CMS_PLACEHOLDER_CACHE'] = False
with self.settings(**overrides):
with self.assertNumQueries(FuzzyInt(7, 18)):
self.client.get(page1_url)
def test_no_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot='body')[0]
placeholder2 = page1.placeholders.filter(slot='right-column')[0]
try:
plugin_pool.register_plugin(NoCachePlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, 'TextPlugin', 'en', body="English")
add_plugin(placeholder2, 'TextPlugin', 'en', body="Deutsch")
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Request the page without the 'no-cache' plugin
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(18, 25)):
response1 = self.client.get(page1_url)
content1 = response1.content
# Fetch it again, it is cached.
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
response2 = self.client.get(page1_url)
content2 = response2.content
self.assertEqual(content1, content2)
# Once again with PAGE_CACHE=False, to prove the cache can
# be disabled
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.settings(CMS_PAGE_CACHE=False):
with self.assertNumQueries(FuzzyInt(5, 24)):
response3 = self.client.get(page1_url)
content3 = response3.content
self.assertEqual(content1, content3)
# Add the 'no-cache' plugin
add_plugin(placeholder1, "NoCachePlugin", 'en')
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(4, 6)):
output = self.render_template_obj(template, {}, request)
with self.assertNumQueries(FuzzyInt(14, 24)):
response = self.client.get(page1_url)
self.assertTrue("no-cache" in response['Cache-Control'])
resp1 = response.content.decode('utf8').split("$$$")[1]
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(5):
output2 = self.render_template_obj(template, {}, request)
with self.settings(CMS_PAGE_CACHE=False):
with self.assertNumQueries(FuzzyInt(8, 17)):
response = self.client.get(page1_url)
resp2 = response.content.decode('utf8').split("$$$")[1]
self.assertNotEqual(output, output2)
self.assertNotEqual(resp1, resp2)
plugin_pool.unregister_plugin(NoCachePlugin)
def test_timedelta_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TimeDeltaCacheExpirationPlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *TimeDeltaCacheExpirationPlugin, expires in 45s.
add_plugin(placeholder1, "TimeDeltaCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1.get_absolute_url())
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get(page1.get_absolute_url())
self.assertTrue('max-age=45' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(TimeDeltaCacheExpirationPlugin)
def test_datetime_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
try:
plugin_pool.register_plugin(DateTimeCacheExpirationPlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "DateTimeCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get(page1_url)
self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(DateTimeCacheExpirationPlugin)
def TTLCacheExpirationPlugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TTLCacheExpirationPlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "TTLCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get('/en/')
self.assertTrue('max-age=50' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(TTLCacheExpirationPlugin)
def test_expiration_cache_plugins(self):
"""
Tests that when used in combination, the page is cached to the
shortest TTL.
"""
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TTLCacheExpirationPlugin)
try:
plugin_pool.register_plugin(DateTimeCacheExpirationPlugin)
except PluginAlreadyRegistered:
pass
try:
plugin_pool.register_plugin(NoCachePlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "TTLCacheExpirationPlugin", 'en')
add_plugin(placeholder2, "DateTimeCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 26)):
response = self.client.get(page1_url)
resp1 = response.content.decode('utf8').split("$$$")[1]
self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control'])
cache_control1 = response['Cache-Control']
expires1 = response['Expires']
time.sleep(1) # This ensures that the cache has aged measurably
# Request it again, this time, it comes from the cache
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
response = self.client.get(page1_url)
resp2 = response.content.decode('utf8').split("$$$")[1]
# Content will be the same
self.assertEqual(resp2, resp1)
# Cache-Control will be different because the cache has aged
self.assertNotEqual(response['Cache-Control'], cache_control1)
# However, the Expires timestamp will be the same
self.assertEqual(response['Expires'], expires1)
plugin_pool.unregister_plugin(TTLCacheExpirationPlugin)
plugin_pool.unregister_plugin(DateTimeCacheExpirationPlugin)
plugin_pool.unregister_plugin(NoCachePlugin)
def test_dual_legacy_cache_plugins(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(LegacyCachePlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Adds a no-cache plugin. In older versions of the CMS, this would
# prevent the page from caching in, but since this plugin also defines
# get_cache_expiration() it is ignored.
add_plugin(placeholder1, "LegacyCachePlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)):
response = self.client.get(page1_url)
self.assertTrue('no-cache' not in response['Cache-Control'])
plugin_pool.unregister_plugin(LegacyCachePlugin)
def test_cache_page(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
page1_url = page1.get_absolute_url()
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Create a request object
request = self.get_request(page1_url, 'en')
# Ensure that user is NOT authenticated
self.assertFalse(request.user.is_authenticated)
# Test that the page is initially uncached
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are cached by
# asserting that they require fewer queries.
#
with self.assertNumQueries(0):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that the cache is invalidated on unpublishing the page
#
old_version = _get_cache_version()
page1.unpublish('en')
self.assertGreater(_get_cache_version(), old_version)
#
# Test that this means the page is actually not cached.
#
page1.publish('en')
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that the above behavior is different when CMS_PAGE_CACHE is
# set to False (disabled)
#
with self.settings(CMS_PAGE_CACHE=False):
# Test that the page is initially un-cached
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are still requires DB
# access.
#
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
def test_no_page_cache_on_toolbar_edit(self):
with self.settings(CMS_PAGE_CACHE=True):
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en')
page1_url = page1.get_absolute_url()
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Publish
page1.publish('en')
# Set edit mode
session = self.client.session
session['cms_edit'] = True
session.save()
# Make an initial ?edit request
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
# Disable edit mode
session = self.client.session
session['cms_edit'] = False
session.save()
# Set the cache
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
# Assert cached content was used
with self.assertNumQueries(0):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
# Set edit mode once more
session = self.client.session
session['cms_edit'] = True
session.save()
# Assert no cached content was used
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(f'{page1_url}?edit')
self.assertEqual(response.status_code, 200)
def test_invalidate_restart(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
page1_url = page1.get_absolute_url()
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Create a request object
request = self.get_request(page1.get_path(), 'en')
# Ensure that user is NOT authenticated
self.assertFalse(request.user.is_authenticated)
# Test that the page is initially uncached
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are cached by
# asserting that they require fewer queries.
#
with self.assertNumQueries(0):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
old_plugins = plugin_pool.plugins
plugin_pool.clear()
plugin_pool.discover_plugins()
plugin_pool.plugins = old_plugins
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
def test_sekizai_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(SekizaiPlugin)
add_plugin(placeholder1, "SekizaiPlugin", 'en')
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
page1.publish('en')
response = self.client.get(page1.get_absolute_url())
self.assertContains(response, 'alert(')
response = self.client.get(page1.get_absolute_url())
self.assertContains(response, 'alert(')
def test_cache_invalidation(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder = page1.placeholders.get(slot="body")
add_plugin(placeholder, "TextPlugin", 'en', body="First content")
page1.publish('en')
response = self.client.get(page1_url)
self.assertContains(response, 'First content')
response = self.client.get(page1_url)
self.assertContains(response, 'First content')
add_plugin(placeholder, "TextPlugin", 'en', body="Second content")
page1.publish('en')
response = self.client.get(page1_url)
self.assertContains(response, 'Second content')
def test_render_placeholder_cache(self):
"""
Regression test for #4223
Assert that placeholder cache is cleared correctly when a plugin is saved
"""
invalidate_cms_page_cache()
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph1 = ex.placeholder
###
# add the test plugin
##
test_plugin = add_plugin(ph1, "TextPlugin", "en", body="Some text")
test_plugin.save()
request = self.get_request()
content_renderer = self.get_content_renderer(request)
# asserting initial text
context = SekizaiContext()
context['request'] = self.get_request()
text = content_renderer.render_placeholder(ph1, context)
self.assertEqual(text, "Some text")
# deleting local plugin cache
del ph1._plugins_cache
test_plugin.body = 'Other text'
test_plugin.save()
# plugin text has changed, so the placeholder rendering
text = content_renderer.render_placeholder(ph1, context)
self.assertEqual(text, "Other text")
def test_render_placeholderfield_cache_in_custom_model(self):
"""
Regression test for #6912
Assert that placeholder of a placeholderfield in custom model has its cache cleared correctly when mark_as_dirty is called in the admin
"""
invalidate_cms_page_cache()
# Create an instance of a custom model containing a placeholderfield
ex = Example1(char_1="one", char_2="two", char_3="tree", char_4="four")
ex.save()
ph1 = ex.placeholder
# Add a first plugin
test_plugin = add_plugin(ph1, "TextPlugin", "en", body="Some text")
test_plugin.save()
# Create a first request using render_placeholder to ensure that the content is equal to the first plugin content
request = self.get_request()
content_renderer = self.get_content_renderer(request)
context = SekizaiContext()
context["request"] = self.get_request()
text = content_renderer.render_placeholder(ph1, context, use_cache=True)
self.assertEqual(text, "Some text")
# Add a second plugin in the placeholder
test_plugin = add_plugin(ph1, "TextPlugin", "en", body="Some other text")
test_plugin.save()
# Clear plugins cache to ensure that cms.utils.plugins.get_plugins() will refetch the plugins
del ph1._plugins_cache
# Create a second request using render_placeholder to ensure that the content is still equal to the first plugin content as cache was not cleared yet
request = self.get_request()
content_renderer = self.get_content_renderer(request)
context = SekizaiContext()
context["request"] = self.get_request()
text = content_renderer.render_placeholder(ph1, context, use_cache=True)
self.assertEqual(text, "Some text")
# Mark placeholder as dirty as it is done in cms.admin.placeholderadmin file
ph1.mark_as_dirty("en", clear_cache=False)
# Create a last request to ensure that rendered content contains the two plugins content
request = self.get_request()
content_renderer = self.get_content_renderer(request)
context = SekizaiContext()
context["request"] = self.get_request()
text = content_renderer.render_placeholder(ph1, context, use_cache=True)
self.assertEqual(text, "Some textSome other text")
class PlaceholderCacheTestCase(CMSTestCase):
def setUp(self):
from django.core.cache import cache
super().setUp()
cache.clear()
self.page = create_page(
'en test page', 'nav_playground.html', 'en', published=True)
# Now create and publish as 'de' title
create_title('de', "de test page", self.page)
self.page.publish('de')
self.placeholder = self.page.placeholders.filter(slot="body")[0]
plugin_pool.register_plugin(VaryCacheOnPlugin)
add_plugin(self.placeholder, 'TextPlugin', 'en', body='English')
add_plugin(self.placeholder, 'TextPlugin', 'de', body='Deutsch')
add_plugin(self.placeholder, 'VaryCacheOnPlugin', 'en')
add_plugin(self.placeholder, 'VaryCacheOnPlugin', 'de')
self.en_request = self.get_request('/en/')
self.en_request.current_page = Page.objects.get(pk=self.page.pk)
self.en_us_request = self.get_request('/en/')
self.en_us_request.META['HTTP_COUNTRY_CODE'] = 'US'
self.en_uk_request = self.get_request('/en/')
self.en_uk_request.META['HTTP_COUNTRY_CODE'] = 'UK'
self.de_request = self.get_request('/de/')
self.de_request.current_page = Page.objects.get(pk=self.page.pk)
def tearDown(self):
from django.core.cache import cache
super().tearDown()
plugin_pool.unregister_plugin(VaryCacheOnPlugin)
cache.clear()
def test_get_placeholder_cache_version_key(self):
cache_version_key = '{prefix}|placeholder_cache_version|id:{id}|lang:{lang}|site:{site}'.format(
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
)
self.assertEqual(
_get_placeholder_cache_version_key(self.placeholder, 'en', 1),
cache_version_key
)
def test_set_clear_get_placeholder_cache_version(self):
initial, _ = _get_placeholder_cache_version(self.placeholder, 'en', 1)
clear_placeholder_cache(self.placeholder, 'en', 1)
version, _ = _get_placeholder_cache_version(self.placeholder, 'en', 1)
self.assertGreater(version, initial)
def test_get_placeholder_cache_key(self):
version, vary_on_list = _get_placeholder_cache_version(self.placeholder, 'en', 1)
desired_key = '{prefix}|render_placeholder|id:{id}|lang:{lang}|site:{site}|tz:{tz}|v:{version}|country-code:{cc}'.format(
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
tz=get_timezone_name(),
version=version,
cc='_',
)
_set_placeholder_cache_version(self.placeholder, 'en', 1, version, vary_on_list=vary_on_list, duration=1)
actual_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_request)
self.assertEqual(actual_key, desired_key)
en_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_request)
de_key = _get_placeholder_cache_key(self.placeholder, 'de', 1, self.de_request)
self.assertNotEqual(en_key, de_key)
en_us_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_us_request)
self.assertNotEqual(en_key, en_us_key)
desired_key = '{prefix}|render_placeholder|id:{id}|lang:{lang}|site:{site}|tz:{tz}|v:{version}|country-code:{cc}'.format(
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
tz=get_timezone_name(),
version=version,
cc='US',
)
self.assertEqual(en_us_key, desired_key)
def test_set_get_placeholder_cache(self):
# Test with a super-long prefix
en_renderer = self.get_content_renderer(self.en_request)
en_context = Context({
'request': self.en_request,
})
en_us_renderer = self.get_content_renderer(self.en_us_request)
en_us_context = Context({
'request': self.en_us_request,
})
en_uk_renderer = self.get_content_renderer(self.en_uk_request)
en_uk_context = Context({
'request': self.en_uk_request,
})
en_content = en_renderer.render_placeholder(self.placeholder, en_context, 'en', width=350)
en_us_content = en_us_renderer.render_placeholder(self.placeholder, en_us_context, 'en', width=350)
en_uk_content = en_uk_renderer.render_placeholder(self.placeholder, en_uk_context, 'en', width=350)
del self.placeholder._plugins_cache
de_renderer = self.get_content_renderer(self.de_request)
de_context = Context({
'request': self.de_request,
})
de_content = de_renderer.render_placeholder(self.placeholder, de_context, 'de', width=350)
self.assertNotEqual(en_content, de_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_content, self.en_request)
cached_en_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_request)
self.assertEqual(cached_en_content, en_content)
set_placeholder_cache(self.placeholder, 'de', 1, de_content, self.de_request)
cached_de_content = get_placeholder_cache(self.placeholder, 'de', 1, self.de_request)
self.assertNotEqual(cached_en_content, cached_de_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_us_content, self.en_us_request)
cached_en_us_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_us_request)
self.assertNotEqual(cached_en_content, cached_en_us_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_uk_content, self.en_uk_request)
cached_en_uk_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_uk_request)
self.assertNotEqual(cached_en_us_content, cached_en_uk_content)
def test_set_get_placeholder_cache_with_long_prefix(self):
"""
This is for testing that everything continues to work even when the
cache-keys are hashed.
"""
# Use an absurdly long cache prefix to get us in the right neighborhood...
with self.settings(CMS_CACHE_PREFIX="super_lengthy_prefix" * 9): # 180 chars
en_crazy_request = self.get_request('/en/')
en_crazy_renderer = self.get_content_renderer(self.de_request)
# Use a ridiculously long "country code" (80 chars), already we're at 260 chars.
en_crazy_request.META['HTTP_COUNTRY_CODE'] = 'US' * 40 # 80 chars
en_crazy_context = Context({'request': en_crazy_request})
en_crazy_content = en_crazy_renderer.render_placeholder(
self.placeholder,
en_crazy_context,
language='en',
width=350,
)
set_placeholder_cache(self.placeholder, 'en', 1, en_crazy_content, en_crazy_request)
# Prove that it is hashed...
crazy_cache_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, en_crazy_request)
key_length = len(crazy_cache_key)
# 221 = 180 (prefix length) + 1 (separator) + 40 (sha1 hash)
self.assertTrue('render_placeholder' not in crazy_cache_key and key_length == 221)
# Prove it still works as expected
cached_en_crazy_content = get_placeholder_cache(self.placeholder, 'en', 1, en_crazy_request)
self.assertEqual(en_crazy_content, cached_en_crazy_content)
def test_cache_limit_ttl(self):
""""
Test the `CMS_LIMIT_TTL_CACHE_FUNCTION` setting that allows to change the default 40 seconds
default ttl cache value with a business logic function.
"""
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
limit_page_cache_ttl_function = ".".join([PlaceholderCacheTestCase.__module__, limit_page_cache_ttl_test_5.__name__])
with self.settings(CMS_LIMIT_TTL_CACHE_FUNCTION=limit_page_cache_ttl_function):
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
response = self.client.get(page1_url)
self.assertTrue('max-age=5' in response['Cache-Control'], response['Cache-Control'])
def test_cache_limit_ttl_greater_than_default_cache_ttl(self):
"""
Test the `CMS_LIMIT_TTL_CACHE_FUNCTION` setting with a class that returns a value much
greater than the default value of 40 seconds.
"""
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
limit_page_cache_ttl_function = ".".join([PlaceholderCacheTestCase.__module__, limit_page_cache_ttl_test_500.__name__])
with self.settings(CMS_LIMIT_TTL_CACHE_FUNCTION=limit_page_cache_ttl_function):
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
response = self.client.get(page1_url)
self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control'])
def limit_page_cache_ttl_test_5(response):
return 5
def limit_page_cache_ttl_test_500(response):
return 40
| {
"content_hash": "c0368b4337815e12b4efceccb79e82b0",
"timestamp": "",
"source": "github",
"line_count": 901,
"max_line_length": 157,
"avg_line_length": 44.68257491675916,
"alnum_prop": 0.6150177600039742,
"repo_name": "rsalmaso/django-cms",
"id": "76a7f0222019a84123890d560a97489baaa1fa1f",
"size": "40259",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/tests/test_cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "204223"
},
{
"name": "JavaScript",
"bytes": "1250281"
},
{
"name": "Python",
"bytes": "2386268"
},
{
"name": "SCSS",
"bytes": "137693"
},
{
"name": "Shell",
"bytes": "22511"
}
],
"symlink_target": ""
} |
"""
@author: efourrier
Purpose : File with all custom exceptions
"""
class NotNumericColumn(Exception):
""" The column should be numeric """
pass
class NumericError(Exception):
""" The column should not be numeric """
pass
# class NotFactor
| {
"content_hash": "31e0a56be9fe153e5b6b1cf2c1244719",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 45,
"avg_line_length": 17.533333333333335,
"alnum_prop": 0.6768060836501901,
"repo_name": "ericfourrier/auto-clean",
"id": "48970a5d692ba1a8480c1c9898ceeb571c6e2001",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "autoc/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78253"
}
],
"symlink_target": ""
} |
"""
Plugin for generation of Sphinx-suitable JSON from Protobuf definitions
It's a plugin for protoc as per https://developers.google.com/protocol-buffers/docs/reference/other
Usage:
protoc --plugin=protoc-gen-custom=<script path>/protobuf-json-docs.py <proto file>
The JSON output can then be interpreted by protobufdomain.py to make RST files for Sphinx
"""
import sys
import collections
from google.protobuf.compiler import plugin_pb2 as plugin
import itertools
import json
from google.protobuf.descriptor_pb2 import DescriptorProto, EnumDescriptorProto, EnumValueDescriptorProto, FieldDescriptorProto, ServiceDescriptorProto, MethodDescriptorProto
def simplify_name(name):
"Remove all the namespace information to make short names for Sphinx"
return name.split(".")[-1]
def convert_protodef_to_editable(proto):
"""
Protobuf objects can't have arbitrary fields addedd and we need to later on
add comments to them, so we instead make "Editable" objects that can do so
"""
class Editable(object):
def __init__(self, prot):
self.kind = type(prot)
self.name = prot.name
self.comment = ""
self.options = dict([(key.name, value) for (key, value) in prot.options.ListFields()])
if isinstance(prot, EnumDescriptorProto):
self.value = [convert_protodef_to_editable(x) for x in prot.value]
elif isinstance(prot, DescriptorProto):
self.field = [convert_protodef_to_editable(x) for x in prot.field]
self.enum_type = [convert_protodef_to_editable(x) for x in prot.enum_type]
self.nested_type = prot.nested_type
self.oneof_decl = prot.oneof_decl
elif isinstance(prot, EnumValueDescriptorProto):
self.number = prot.number
elif isinstance(prot, FieldDescriptorProto):
if prot.type in [11, 14]:
self.ref_type = prot.type_name[1:]
self.type = prot.type
self.label = prot.label
elif isinstance(prot, ServiceDescriptorProto):
self.method = [convert_protodef_to_editable(x) for x in prot.method]
elif isinstance(prot, MethodDescriptorProto):
self.input_type = prot.input_type
self.output_type = prot.output_type
else:
raise Exception, type(prot)
return Editable(proto)
def traverse(proto_file):
"""
proto_file is a FileDescriptorProto from protoc. We walk the SourceCodeInfo
in this file, and find all the comments, and return a flattened out tree
of all the messages and enums
"""
def _collapse_comments(comments):
return '\n'.join(
[c.strip() for c in (comments["leading_comments"] + comments["trailing_comments"]).split('\n')])
def _traverse(package, items, tree):
for item_index, item in enumerate(items):
item = convert_protodef_to_editable(item)
if item_index in tree:
comments = tree[item_index]
if "leading_comments" in comments or "trailing_comments" in comments:
item.comment = _collapse_comments(comments)
del comments["leading_comments"]
del comments["trailing_comments"]
if item.kind is EnumDescriptorProto:
if 2 in comments: # value in EnumDescriptorProto
for k in comments[2]:
value_comment = comments[2][k]
if value_comment != {}:
item.value[k].comment = _collapse_comments(value_comment)
elif item.kind is DescriptorProto:
if 2 in comments: # field in DescriptorProto
for k in comments[2]:
field_comment = comments[2][k]
if field_comment != {}:
item.field[k].comment = _collapse_comments(field_comment)
elif item.kind is ServiceDescriptorProto:
if 2 in comments: # method in ServiceDescriptorProto
for k in comments[2]:
method_comment = comments[2][k]
if method_comment != {}:
item.method[k].comment = _collapse_comments(method_comment)
else:
raise Exception, item.kind
yield item, package
if item.kind is DescriptorProto:
for enum in item.enum_type:
yield enum, package
for nested in item.nested_type:
nested_package = package + "." + item.name
for nested_item, np in _traverse(nested_package, [nested], tree[item_index]):
yield nested_item, np
tree = collections.defaultdict(collections.defaultdict)
for loc in proto_file.source_code_info.location:
if loc.leading_comments or loc.trailing_comments:
place = tree
for p in loc.path:
if not place.has_key(p):
place[p] = collections.defaultdict(collections.defaultdict)
place = place[p]
place["leading_comments"] = loc.leading_comments
place["trailing_comments"] = loc.trailing_comments
# Only message, services, enums, extensions, options
if set(tree.keys()).difference(set([4, 5, 6, 7, 8])) != set():
raise Exception, tree
return {"types":
list(itertools.chain(
_traverse(proto_file.package, proto_file.service, tree[6]), # 6 is service_type in FileDescriptorProto
_traverse(proto_file.package, proto_file.enum_type, tree[5]), # 5 is enum_type in FileDescriptorProto
_traverse(proto_file.package, proto_file.message_type, tree[4]), # 4 is message_type in FileDescriptorProto
)),
"file": ["".join(x.leading_detached_comments) for x in proto_file.source_code_info.location if len(x.leading_detached_comments) > 0]
}
def type_to_string(f, map_types):
"""
Convert type info to pretty names, based on numbers from from FieldDescriptorProto
https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.descriptor.pb
"""
if f.type in [1]:
return "double"
elif f.type in [2]:
return "float"
elif f.type in [3]:
return "long"
elif f.type in [4]:
return "uint64"
elif f.type in [5]:
return "integer"
elif f.type in [8]:
return "boolean"
elif f.type in [9]:
return "string"
elif f.type in [11, 14]:
ref_name = f.ref_type
if ref_name in map_types:
ref_fields = map_types[ref_name]
return {
"type": "map",
"key": " %s "% type_to_string(ref_fields["key"], map_types),
"value": " %s "% type_to_string(ref_fields["value"], map_types)
}
else:
kind = ":protobuf:message:`%s`" % simplify_name(f.ref_type)
if f.label == 3: # LABEL_REPEATED
return "list of " + kind
else:
return kind
elif f.type in [12]:
return "bytes"
else:
raise Exception, f.type
def generate_code(request, response):
"""
Core function. Starts from a CodeGeneratorRequest and adds files to
a CodeGeneratorResponse
"""
for proto_file in request.proto_file:
types = []
messages = {}
results = traverse(proto_file)
map_types = {}
def full_name(package, item):
return "%s.%s" % (package, item.name)
for item, package in results["types"]:
if item.options.has_key("map_entry"):
map_types[full_name(package, item)] = dict([(x.name,x) for x in item.field])
for item, package in results["types"]:
name = full_name(package, item)
if name in map_types:
continue
data = {
'name': simplify_name(name),
'doc': item.comment
}
if item.kind == DescriptorProto:
data.update({
'type': 'message',
'fields': []
})
for f in item.field:
kind = type_to_string(f, map_types)
data["fields"].append({
'name': f.name,
'type': kind,
'doc': f.comment
})
if len(item.oneof_decl) > 0:
data["fields"] = [
{
"name": item.oneof_decl[0].name,
"type": [" %s "% x["type"] for x in data["fields"]],
"doc": "\n".join(["%s: %s"%(x["type"],x["doc"]) for x in data["fields"] if x["doc"] != ""])
}]
types.append(data)
elif item.kind == EnumDescriptorProto:
comments = ["\n* `%s`: %s"%(v.name, v.comment) for v in item.value]
data.update({
'type': 'enum',
'symbols': [v.name for v in item.value]
})
data["doc"] += "\n" + " ".join(comments)
types.append(data)
elif item.kind == ServiceDescriptorProto:
for m in item.method:
messages[m.name] = {
"doc": m.comment,
"request": [{
"name": "request",
"type": ":protobuf:message:`%s`" % simplify_name(m.input_type),
}],
"response": ":protobuf:message:`%s`" % simplify_name(m.output_type),
"errors" : [ ":protobuf:message:`GAException`" ]
}
else:
raise Exception, item.kind
comments = "\n".join(results["file"])
output = {
"types": types,
"messages": messages,
"protocol": proto_file.name.split("/")[-1].split(".")[0],
'doc': comments,
"namespace": proto_file.package,
}
# Fill response
f = response.file.add()
f.name = proto_file.name + '.json'
f.content = json.dumps(output, indent=2)
if __name__ == '__main__':
# Read request message from stdin
data = sys.stdin.read()
# Parse request
request = plugin.CodeGeneratorRequest()
request.ParseFromString(data)
# Create response
response = plugin.CodeGeneratorResponse()
# Generate code
generate_code(request, response)
# Serialise response message
output = response.SerializeToString()
# Write to stdout
sys.stdout.write(output)
| {
"content_hash": "780cd0b2e175f02afd1e4895298a1508",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 174,
"avg_line_length": 40.262773722627735,
"alnum_prop": 0.5360768672951414,
"repo_name": "heuermh/ga4gh-schemas",
"id": "1c93af0cd027cb97f886005bef8beffe03167a0e",
"size": "11054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/sphinx/protobuf-json-docs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1095"
},
{
"name": "Protocol Buffer",
"bytes": "122105"
},
{
"name": "Python",
"bytes": "39732"
}
],
"symlink_target": ""
} |
from smartform import descriptor
errors = descriptor.errors
ConfigurationDescriptor = descriptor.ConfigurationDescriptor
DescriptorData = descriptor.DescriptorData
ProtectedUnicode = descriptor.ProtectedUnicode
class CredentialsDescriptor(descriptor.BaseDescriptor):
"Class for representing the credentials descriptor definition"
class LaunchDescriptor(descriptor.BaseDescriptor):
"Class for representing the launch descriptor definition"
class DeployDescriptor(descriptor.BaseDescriptor):
"Class for representing the deploy descriptor definition"
| {
"content_hash": "84b43f5681e1bc81416c256e316d9502",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 66,
"avg_line_length": 37.6,
"alnum_prop": 0.849290780141844,
"repo_name": "sassoftware/catalog-service",
"id": "92f7996469ad981d6f3b5ea1f1f508dcbf5a70f8",
"size": "1169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalogService/rest/models/descriptor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "9398"
},
{
"name": "Makefile",
"bytes": "26985"
},
{
"name": "Python",
"bytes": "8543839"
},
{
"name": "Shell",
"bytes": "329"
}
],
"symlink_target": ""
} |
'''
Copyright@USTC SSE
Author by ch yy in suzhou ,28/11/2015
Find out nevents, ndays_act, nplay_video, nchapters, nforum_posts weather or not determined on
viewed,explored,certified,grade
'''
from numpy import *
import load_csv as lc
import operator
def knn(base, dataSet, labels, k):
'''
:param base: 基础数据矩阵用来对其他数据进行分类距离的计算
:param dataSet: 需要分类的数据集合
:param labels: 每一条记录真实属于哪一类的标签
:param k: knn算法中所取的top数量
:return sortedClassCount:返回排序好的分类数据,是labels值
'''
dataSetSize = dataSet.shape[0]
diffMat = tile(base, (dataSetSize,1)) - dataSet # 重复 max(datasetsize,1) 次
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort()
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1 # 有标签属性了加一,没有则加入
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def createMatrix(filename):
'''
:param filename: 需要处理成矩阵的数据文件
:return returnMat,classLabelVector:数据矩阵,数据标签矩阵
'''
fr = open(filename)
numberOfLines = len(fr.readlines())
returnMat = zeros((numberOfLines,5)) #返回向量
classLabelVector = [] #labels
fr = open(filename)
index = 0
for line in fr.readlines():
line = line.strip()
listFromLine = line.split('\t')
returnMat[index,:] = listFromLine[0:5]
classLabelVector.append(int(round(float(listFromLine[-1])))) #仅仅是为了处理int('1.0')这个错误加了这么多函数
index += 1
print "record count = %d \n" % index
return returnMat,classLabelVector
def Normalized(dataSet):
'''
:param dataSet: 数据矩阵
:return normDataSet, ranges, minVals:归一化后的矩阵,取值范围,最小值
'''
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals #处理不同的特征值之间数值的不统一,进行归一化
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals, (m,1))
normDataSet = normDataSet/tile(ranges, (m,1)) #归一化后数值 =(真实数据-最小值)/(最大值-最小值)
return normDataSet, ranges, minVals
def data_test(filename):
'''
:param filename: 需要进行分类的文件
:return: 输出分类结果,以及错误率等
'''
how = 0.10 # 测数数据占数据的百分比
dataMat,dataLabels = createMatrix(filename)
normMat, ranges, minData = Normalized(dataMat)
m = normMat.shape[0]
testNum = int(m*how)
errorCount = 0.0
for i in range(testNum):
classifierResult = knn(normMat[i,:],normMat[testNum:m,:],dataLabels[testNum:m],3)
print "classifier into : %d, real answer is: %d" % (classifierResult, dataLabels[i])
if (classifierResult != dataLabels[i]): errorCount += 1.0
print "error rate : %f \n" % (errorCount/float(testNum))
print "error count:%d \n" %errorCount
def start_test():
'''
导入数据文件,测试knn算法开始函数
'''
# lc.load_csv_data()
data_test('edx_knn.csv')
def displayData(filename):
how = 0.10 # 测数数据占数据的百分比
dataMat,dataLabels = createMatrix(filename)
normMat, ranges, minData = Normalized(dataMat)
m = normMat.shape[0]
testNum = int(m*how)
errorCount = 0.0
classifierData = []
realData = []
for i in range(testNum):
classifierResult = knn(normMat[i,:],normMat[testNum:m,:],dataLabels[testNum:m],3)
classifierData.append(classifierResult)
realData.append(dataLabels[i])
if (classifierResult != dataLabels[i]): errorCount += 1.0
return testNum,(errorCount/float(testNum)), errorCount, classifierData, realData | {
"content_hash": "9f357b87d8aa0aed9301f34fb863437e",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 98,
"avg_line_length": 35.114285714285714,
"alnum_prop": 0.6493083807973963,
"repo_name": "ch710798472/GithubRecommended",
"id": "8d32f3bd5491fc2d6315b14326551ca55f00790e",
"size": "4221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RecGithub/function/knn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "18"
},
{
"name": "HTML",
"bytes": "54544"
},
{
"name": "Jupyter Notebook",
"bytes": "210681"
},
{
"name": "Python",
"bytes": "48032"
}
],
"symlink_target": ""
} |
import distro
import subprocess
import pkg_resources
from manager_rest import premium_enabled
def get_version() -> str:
return pkg_resources.get_distribution('cloudify-rest-service').version
def get_edition():
return 'premium' if premium_enabled else 'community'
def get_distribution():
distro_id = distro.id().replace('rhel', 'redhat')
# for consistency with the output of platform.linux_distribution
return distro_id, distro.codename()
def get_version_data():
version = get_version()
distro, distro_release = get_distribution()
if not premium_enabled:
package_name = 'cloudify-rest-service'
try:
rpm_info = \
subprocess.check_output(['rpm', '-q', package_name]).decode()
except (subprocess.CalledProcessError, OSError):
pass
else:
version = rpm_info.replace(package_name, '').split('-')[1]
return {
'version': version,
'edition': get_edition(),
'distribution': distro,
'distro_release': distro_release,
}
| {
"content_hash": "b7de26327dfb9e270c99c8e0bab661b8",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 27.487179487179485,
"alnum_prop": 0.6352611940298507,
"repo_name": "cloudify-cosmo/cloudify-manager",
"id": "72c54505667c89343be4d9757176d69c77e5ab59",
"size": "1706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest-service/manager_rest/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "4067"
},
{
"name": "Dockerfile",
"bytes": "3843"
},
{
"name": "HTML",
"bytes": "320"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PLpgSQL",
"bytes": "119062"
},
{
"name": "Python",
"bytes": "3825971"
},
{
"name": "Shell",
"bytes": "49121"
}
],
"symlink_target": ""
} |
import fnmatch
import os
import sh
from molecule import ansible_playbook
from molecule import config
from molecule import util
from molecule.verifier import base
class Testinfra(base.Base):
def __init__(self, molecule):
super(Testinfra, self).__init__(molecule)
self._testinfra_dir = molecule.config.config['molecule'][
'testinfra_dir']
self._debug = molecule.args.get('debug')
def execute(self):
"""
Executes linting/integration tests and returns None.
Flake8 performs the code linting.
Testinfra executes integration tests.
:return: None
"""
ansible = ansible_playbook.AnsiblePlaybook(
self._molecule.config.config['ansible'], {},
_env=self._molecule.env)
testinfra_options = config.merge_dicts(
self._molecule.driver.testinfra_args,
self._molecule.config.config['verifier']['options'])
testinfra_options['ansible_env'] = ansible.env
if self._molecule.args.get('debug'):
testinfra_options['debug'] = True
if self._molecule.args.get('sudo'):
testinfra_options['sudo'] = True
tests = self._get_tests()
if len(tests) > 0:
if 'flake8' not in self._molecule.disabled:
self._flake8(tests)
self._testinfra(tests, **testinfra_options)
def _testinfra(self,
tests,
debug=False,
ansible_env={},
out=util.callback_info,
err=util.callback_error,
**kwargs):
"""
Executes testinfra against specified tests and returns a :func:`sh`
response object.
:param tests: A list of testinfra tests.
:param debug: An optional bool to toggle debug output.
:param pattern: A string containing the pattern of files to lint.
:param ansible_env: An optional environment to pass to underlying
:func:`sh` call.
:param out: An optional function to process STDOUT for underlying
:func:`sh` call.
:param err: An optional function to process STDERR for underlying
:func:`sh` call.
:return: :func:`sh` response object.
"""
kwargs['debug'] = debug
kwargs['_env'] = ansible_env
kwargs['_out'] = out
kwargs['_err'] = err
msg = 'Executing testinfra tests found in {}/...'.format(
self._testinfra_dir)
util.print_info(msg)
cmd = sh.testinfra.bake(tests, **kwargs)
return util.run_command(cmd, debug=self._debug)
def _flake8(self, tests, out=util.callback_info, err=util.callback_error):
"""
Executes flake8 against specified tests and returns a :func:`sh`
response object.
:param tests: A list of testinfra tests.
:param out: An optional function to process STDOUT for underlying
:func:`sh` call.
:param err: An optional function to process STDERR for underlying
:func:`sh` call.
:return: :func:`sh` response object.
"""
msg = 'Executing flake8 on *.py files found in {}/...'.format(
self._testinfra_dir)
util.print_info(msg)
cmd = sh.flake8.bake(tests)
return util.run_command(cmd, debug=self._debug)
def _get_tests(self):
return [
filename
for filename in self._walk(self._testinfra_dir, 'test_*.py')
]
def _walk(self, directory, pattern):
# Python 3.5 supports a recursive glob without needing os.walk.
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
| {
"content_hash": "acb09269a70a366eff17ae76f89e16b6",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 34.01754385964912,
"alnum_prop": 0.5838060856111398,
"repo_name": "rgreinho/molecule",
"id": "c4eff28adb81f3c27c3a0741f9a9a72723616622",
"size": "4998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "molecule/verifier/testinfra.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "315730"
},
{
"name": "Ruby",
"bytes": "1110"
},
{
"name": "Shell",
"bytes": "4029"
}
],
"symlink_target": ""
} |
from django.db import models
__all__ = ['VisibilityManager', 'VisibilityManagerMixin']
class VisibilityManagerMixin(object):
"""
This manager should be used with a model that implements the Hideable
mixin.
"""
def __init__(self, *args, **kwargs):
self.visible = kwargs.pop('visible')
super(VisibilityManagerMixin, self).__init__(*args, **kwargs)
def get_queryset(self):
return super().get_queryset().filter(hidden__isnull=self.visible)
class VisibilityManager(VisibilityManagerMixin, models.Manager):
pass
| {
"content_hash": "0372f22765a8d4c4343e45dc1caa3924",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 73,
"avg_line_length": 25.727272727272727,
"alnum_prop": 0.6802120141342756,
"repo_name": "jleeothon/trufflehog",
"id": "4ac6f9df34de4d8e50a9725548fde740879a425f",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trufflehog/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4670"
}
],
"symlink_target": ""
} |
from xbee import XBee, ZigBee
import serial
import time
import sys, getopt
ser = serial.Serial('/dev/ttyUSB1', 9600)
xbee = ZigBee(ser)
#xbee = XBee(ser)
position = 'n'
timeout = 1
def doCommand():
#destinationAddrLong = b'\x00\x13\xA2\x00\x40\xBA\xF5\xE8'
destinationAddrLong = b'\x00\x13\xA2\x00\x40\xC5\x5A\x84'
timeout = 0.05
print 'position is %c' % position
xbee.tx_long_addr(dest_addr=destinationAddrLong, data=position)
print xbee.wait_read_frame()
time.sleep(timeout)
return
def main(argv):
global position
try:
opts, args = getopt.getopt(argv, "hp:", ["position="])
except getopt.GetoptError:
print 'sendData.py -p <position - one of n, w, s>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'sendData.py -p <position - one of n, w, s>'
sys.exit()
elif opt in ("-p", "--position"):
position = arg
print "argument is ", arg
print "POS is ", position
doCommand()
ser.close()
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "ef4cb3c7ee7c09ea30d85817f40ecdca",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 64,
"avg_line_length": 24.113636363636363,
"alnum_prop": 0.6135721017907634,
"repo_name": "louietsai/python-xbee",
"id": "70acd88d15a707770bdcb1204051de19747b4a21",
"size": "1081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/sendData.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121176"
}
],
"symlink_target": ""
} |
import sys,os
import re,glob
import numpy as np
import scipy.sparse
from sklearn.naive_bayes import MultinomialNB
import cPickle as pickle
import pkg_resources
from . import content_sources
#remove punctuation and prepositions from a string
def find_keywords(text):
keywords=re.sub('[{}:?!@#$%^&*\(\)_.\\/,\'\"]','',text).upper()
prepositions = pkg_resources.resource_stream('shakespeare','data/prepositions.dat').read().upper().split()
for p in prepositions:
keywords = re.sub(r"\b{!s}\b".format(p),' ',keywords)
return keywords.encode('ascii','ignore').split()
#Identify good entries using naive_bayes object
def filter_content(content,
method,
naive_bayes,
keywords):
new_samples = [find_keywords(entry[method]) for entry in content]
#compute vector for each new entry
X = scipy.sparse.lil_matrix((len(new_samples),len(keywords)))
for j,kw in enumerate(keywords):
for i,ns in enumerate(new_samples):
X[i,j]=ns.count(kw)
categories = naive_bayes.predict(X)
return [e for c,e in zip(categories,content) if c =='good']
#Gather content from all sources (BibTex files, arXiv, journal RSS feeds, etc)
def get_content(sources):
all_content = list()
for src in sources:
try:
src.fetch()
except:
print("Fetch of content from {!r} has failed".format(src))
content = None
try:
print('parsing {!r}'.format(src))
content = src.parse()
except:
print("parsing of content from {!r} has failed".format(src))
if content:
all_content += content
return all_content
#Human review of content classification
#You can review all the content, or just one that the nb classifier thought were good.
#Human input is used to train the NB classifier.
def review_content(good_content,content,method,review_all=False):
to_review=[]
if review_all:
to_review = content
else:
to_review = good_content
human_class=[]
for entry in to_review:
print("Is \"{}\" a good entry?".format(entry[method].encode('ascii','ignore')))
decision = raw_input('Y/n?').lower()
human_class.append('good' if decision=='y' else 'bad')
return human_class, to_review
#Load in a trained naive_bayes object and keyword list
def load_knowledge(knowledge):
#existing naive_bayes object and keyword list
nb=None
kw=list()
if knowledge is not None:
if not os.path.isdir(knowledge):
print("Knowledge bust be a directory")
exit()
else:
knowledge =os.path.expanduser('~/.shakespeare')
#make this directory if it doesn't already exist
if not (os.path.exists(knowledge)):
print('Creating directory: {}'.format(knowledge))
os.mkdir(knowledge)
kfiles = glob.glob(knowledge+'/*')
if os.path.join(knowledge,'nb.p') in kfiles:
nb=pickle.load(open(os.path.join(knowledge,'nb.p')))
else:
print("Warning: knowledge dir {} does not contain nb.p (pickled naive bayes object)".format(knowledge))
if os.path.join(knowledge,'kw.p') in kfiles:
kw=pickle.load(open(os.path.join(knowledge,'kw.p')))
else:
print("Warning: knowledge dir {} does not contain kw.p (pickled keyword list)".format(knowledge))
return(nb,kw, knowledge)
#Train naive_bayes object on a data set
def train(good_sources, bad_sources,method,naive_bayes=None,keywords=list()):
"""
This trains a MultinomialNB classifier with a bag of good words and a
bag of bad words. This requires a kinda goofy work around to use sklearn's
MultivariateNB class. In particular, updating the classifier with new content
that contains new keywords, I don't use sklearn's partial_fit.
"""
#train the algorithm
good_samples = find_keywords(' '.join([entry[method] for entry in good_sources]))
bad_samples = find_keywords(' '.join([entry[method] for entry in bad_sources]))
#if we have an exists knowledge base to append this new information to, do so
if naive_bayes:
new_kws = set(good_samples+bad_samples)
print('Using old keywords as well')
print("# old keywords = {}\n # new keywords = {}".format(len(keywords),len(new_kws)))
new_kws = set(good_samples+bad_samples).difference(keywords)
print("# fresh keywords = {}\n".format(len(new_kws)))
#Instead of doing a partial fit here, i want to expand the list of keywords
#naive_bayes.feature_count_[0] contains the counts of words in 'bad' entries
#naive_bayes.feature_count_[1] contains the counts of words in 'good' entries
#This grabs the frequencies of all the old keywords and puts them into X
#the entries corresponding to the new keywords are 0s appended to X
X = np.concatenate((naive_bayes.feature_count_, np.zeros((naive_bayes.feature_count_.shape[0],len(new_kws)))),1)
all_kw = keywords + list(new_kws)
else:
print('Only using keyownrds from this content set')
all_kw = list(set(good_samples+bad_samples))
X = np.zeros((2,len(all_kw)))
for j,kw in enumerate(all_kw):
X[0,j] += bad_samples.count(kw)
X[1,j] += good_samples.count(kw)
y = ['bad','good']
naive_bayes = MultinomialNB()
naive_bayes.fit(X,y)
return naive_bayes, all_kw
#export content to simple markdown format
def to_markdown(content,output_file):
try:
with open(output_file,'w') as outf:
outf.write('# Relevant articles\n')
for article in content:
outf.write("## {}\n".format(re.sub(r'\n',' ',article['title']).encode('ascii','ignore')))
outf.write("* authors: {}\n".format(re.sub(r'\n',' ',article['author']).encode('ascii','ignore')))
outf.write("* abstract: {}\n".format(re.sub(r'\n',' ',article['abstract']).encode('ascii','ignore')))
outf.write("* [link]({})\n\n".format(re.sub(r'\n',' ',article['url']).encode('ascii','ignore')))
except:
print("Failed to write markdown file")
| {
"content_hash": "cddd4161f6538cdf4ff242f2350bfb5a",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 120,
"avg_line_length": 38.23456790123457,
"alnum_prop": 0.6336777526638683,
"repo_name": "benjaminaschultz/shakespeare",
"id": "937cfd49c404c8f23878ab82510d1f1b488f85b3",
"size": "6194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shakespeare/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "1006984"
},
{
"name": "Python",
"bytes": "23739"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING, Optional
from twisted.web.server import Request
from sydent.http.auth import authV2
from sydent.http.servlets import SydentResource, get_args, jsonwrap, send_cors
from sydent.types import JsonDict
from sydent.util.emailutils import EmailAddressException, EmailSendException
from sydent.util.stringutils import MAX_EMAIL_ADDRESS_LENGTH, is_valid_client_secret
from sydent.validators import (
IncorrectClientSecretException,
IncorrectSessionTokenException,
InvalidSessionIdException,
SessionExpiredException,
)
if TYPE_CHECKING:
from sydent.sydent import Sydent
class EmailRequestCodeServlet(SydentResource):
isLeaf = True
def __init__(self, syd: "Sydent", require_auth: bool = False) -> None:
super().__init__()
self.sydent = syd
self.require_auth = require_auth
@jsonwrap
def render_POST(self, request: Request) -> JsonDict:
send_cors(request)
ipaddress = self.sydent.ip_from_request(request)
if self.require_auth:
account = authV2(self.sydent, request)
self.sydent.email_sender_ratelimiter.ratelimit(account.userId)
elif ipaddress:
# For `/v1/` requests the ip address is the best we can do for rate
# limiting.
self.sydent.email_sender_ratelimiter.ratelimit(ipaddress)
args = get_args(request, ("email", "client_secret", "send_attempt"))
email = args["email"]
clientSecret = args["client_secret"]
try:
# if we got this via the v1 API in a querystring or urlencoded body,
# then the values in args will be a string. So check that
# send_attempt is an int.
#
# NB: We don't check if we're processing a url-encoded v1 request.
# This means we accept string representations of integers for
# `send_attempt` in v2 requests, and in v1 requests that supply a
# JSON body. This is contrary to the spec and leaves me with a dirty
# feeling I can't quite shake off.
#
# Where's Raymond Hettinger when you need him? (THUMP) There must be
# a better way!
sendAttempt = int(args["send_attempt"])
except (TypeError, ValueError):
request.setResponseCode(400)
return {
"errcode": "M_INVALID_PARAM",
"error": f"send_attempt should be an integer (got {args['send_attempt']}",
}
if not is_valid_client_secret(clientSecret):
request.setResponseCode(400)
return {
"errcode": "M_INVALID_PARAM",
"error": "Invalid client_secret provided",
}
if not (0 < len(email) <= MAX_EMAIL_ADDRESS_LENGTH):
request.setResponseCode(400)
return {"errcode": "M_INVALID_PARAM", "error": "Invalid email provided"}
brand = self.sydent.brand_from_request(request)
nextLink: Optional[str] = None
if "next_link" in args and not args["next_link"].startswith("file:///"):
nextLink = args["next_link"]
try:
sid = self.sydent.validators.email.requestToken(
email,
clientSecret,
sendAttempt,
nextLink,
ipaddress=ipaddress,
brand=brand,
)
resp = {"sid": str(sid)}
except EmailAddressException:
request.setResponseCode(400)
resp = {"errcode": "M_INVALID_EMAIL", "error": "Invalid email address"}
except EmailSendException:
request.setResponseCode(500)
resp = {"errcode": "M_EMAIL_SEND_ERROR", "error": "Failed to send email"}
return resp
def render_OPTIONS(self, request: Request) -> bytes:
send_cors(request)
return b""
class EmailValidateCodeServlet(SydentResource):
isLeaf = True
def __init__(self, syd: "Sydent", require_auth: bool = False) -> None:
super().__init__()
self.sydent = syd
self.require_auth = require_auth
def render_GET(self, request: Request) -> bytes:
args = get_args(request, ("nextLink",), required=False)
resp = None
try:
resp = self.do_validate_request(request)
except Exception:
pass
if resp and "success" in resp and resp["success"]:
msg = "Verification successful! Please return to your Matrix client to continue."
if "nextLink" in args:
next_link = args["nextLink"]
if not next_link.startswith("file:///"):
request.setResponseCode(302)
request.setHeader("Location", next_link)
else:
msg = "Verification failed: you may need to request another verification email"
brand = self.sydent.brand_from_request(request)
# self.sydent.config.http.verify_response_template is deprecated
if self.sydent.config.http.verify_response_template is None:
templateFile = self.sydent.get_branded_template(
brand,
"verify_response_template.html",
)
else:
templateFile = self.sydent.config.http.verify_response_template
request.setHeader("Content-Type", "text/html")
res = open(templateFile).read() % {"message": msg}
return res.encode("UTF-8")
@jsonwrap
def render_POST(self, request: Request) -> JsonDict:
send_cors(request)
if self.require_auth:
authV2(self.sydent, request)
return self.do_validate_request(request)
def do_validate_request(self, request: Request) -> JsonDict:
"""
Extracts information about a validation session from the request and
attempts to validate that session.
:param request: The request to extract information about the session from.
:return: A dict with a "success" key which value indicates whether the
validation succeeded. If the validation failed, this dict also includes
a "errcode" and a "error" keys which include information about the failure.
"""
args = get_args(request, ("token", "sid", "client_secret"))
sid = args["sid"]
tokenString = args["token"]
clientSecret = args["client_secret"]
if not is_valid_client_secret(clientSecret):
request.setResponseCode(400)
return {
"errcode": "M_INVALID_PARAM",
"error": "Invalid client_secret provided",
}
try:
return self.sydent.validators.email.validateSessionWithToken(
sid, clientSecret, tokenString
)
except IncorrectClientSecretException:
return {
"success": False,
"errcode": "M_INVALID_PARAM",
"error": "Client secret does not match the one given when requesting the token",
}
except SessionExpiredException:
return {
"success": False,
"errcode": "M_SESSION_EXPIRED",
"error": "This validation session has expired: call requestToken again",
}
except InvalidSessionIdException:
return {
"success": False,
"errcode": "M_INVALID_PARAM",
"error": "The token doesn't match",
}
except IncorrectSessionTokenException:
return {
"success": False,
"errcode": "M_NO_VALID_SESSION",
"error": "No session could be found with this sid",
}
def render_OPTIONS(self, request: Request) -> bytes:
send_cors(request)
return b""
| {
"content_hash": "1ad988b6e197add8ad92d715e6c05eb2",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 96,
"avg_line_length": 36.29032258064516,
"alnum_prop": 0.5880634920634921,
"repo_name": "matrix-org/sydent",
"id": "6e60f17c92302484ce2b68c1158c18b303dadc00",
"size": "8454",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sydent/http/servlets/emailservlet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2161"
},
{
"name": "Gherkin",
"bytes": "283"
},
{
"name": "HTML",
"bytes": "1143"
},
{
"name": "Jinja",
"bytes": "31155"
},
{
"name": "Python",
"bytes": "463511"
},
{
"name": "Shell",
"bytes": "3218"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from textwrap import dedent
import sys
from flexmock import flexmock
import pytest
import atomic_reactor.utils.koji as koji_util
from atomic_reactor import util
from atomic_reactor.utils.cachito import CachitoAPI
from atomic_reactor.constants import PLUGIN_BUILD_ORCHESTRATE_KEY
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PreBuildPluginsRunner, PluginFailedException
from atomic_reactor.plugins import pre_reactor_config
from atomic_reactor.plugins.build_orchestrate_build import (
WORKSPACE_KEY_OVERRIDE_KWARGS, OrchestrateBuildPlugin)
from atomic_reactor.plugins.pre_reactor_config import (
ReactorConfigPlugin, WORKSPACE_CONF_KEY, ReactorConfig)
from atomic_reactor.plugins.pre_resolve_remote_source import ResolveRemoteSourcePlugin
from atomic_reactor.source import SourceConfig
from tests.constants import MOCK_SOURCE
from tests.stubs import StubInsideBuilder, StubSource
KOJI_HUB = 'http://koji.com/hub'
KOJI_TASK_ID = 123
KOJI_TASK_OWNER = 'spam'
CACHITO_URL = 'https://cachito.example.com'
CACHITO_REQUEST_ID = 98765
CACHITO_REQUEST_DOWNLOAD_URL = '{}/api/v1/{}/download'.format(CACHITO_URL, CACHITO_REQUEST_ID)
CACHITO_REQUEST_CONFIG_URL = '{}/api/v1/requests/{}/configuration-files'.format(
CACHITO_URL,
CACHITO_REQUEST_ID
)
CACHITO_ICM_URL = '{}/api/v1/requests/{}/content-manifest'.format(
CACHITO_URL,
CACHITO_REQUEST_ID
)
REMOTE_SOURCE_REPO = 'https://git.example.com/team/repo.git'
REMOTE_SOURCE_REF = 'b55c00f45ec3dfee0c766cea3d395d6e21cc2e5a'
REMOTE_SOURCE_PACKAGES = [
{
'name': 'test-package',
'type': 'npm',
'version': '0.0.1'
}
]
CACHITO_SOURCE_REQUEST = {
'id': CACHITO_REQUEST_ID,
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'environment_variables': {
'GO111MODULE': 'on',
'GOPATH': 'deps/gomod',
'GOCACHE': 'deps/gomod',
},
'flags': ['enable-confeti', 'enable-party-popper'],
'pkg_managers': ['gomod'],
'dependencies': [
{
'name': 'github.com/op/go-logging',
'type': 'gomod',
'version': 'v0.1.1',
}
],
'packages': [
{
'name': 'github.com/spam/bacon/v2',
'type': 'gomod',
'version': 'v2.0.3'
}
],
'configuration_files': CACHITO_REQUEST_CONFIG_URL,
'content_manifest': CACHITO_ICM_URL,
'extra_cruft': 'ignored',
}
REMOTE_SOURCE_JSON = {
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'environment_variables': {
'GO111MODULE': 'on',
'GOPATH': 'deps/gomod',
'GOCACHE': 'deps/gomod',
},
'flags': ['enable-confeti', 'enable-party-popper'],
'pkg_managers': ['gomod'],
'dependencies': [
{
'name': 'github.com/op/go-logging',
'type': 'gomod',
'version': 'v0.1.1',
}
],
'packages': [
{
'name': 'github.com/spam/bacon/v2',
'type': 'gomod',
'version': 'v2.0.3'
}
],
'configuration_files': CACHITO_REQUEST_CONFIG_URL,
'content_manifest': CACHITO_ICM_URL,
}
@pytest.fixture
def workflow(tmpdir, user_params):
workflow = DockerBuildWorkflow(source=MOCK_SOURCE)
# Stash the tmpdir in workflow so it can be used later
workflow._tmpdir = tmpdir
class MockSource(StubSource):
def __init__(self, workdir):
super(MockSource, self).__init__()
self.workdir = workdir
workflow.source = MockSource(str(tmpdir))
builder = StubInsideBuilder().for_workflow(workflow)
builder.set_df_path(str(tmpdir))
builder.tasker = flexmock()
workflow.builder = flexmock(builder)
workflow.buildstep_plugins_conf = [{'name': PLUGIN_BUILD_ORCHESTRATE_KEY}]
mock_repo_config(workflow)
mock_reactor_config(workflow)
mock_build_json()
mock_cachito_api(workflow)
mock_koji()
return workflow
def mock_reactor_config(workflow, data=None):
if data is None:
data = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, workflow._tmpdir))
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
workflow._tmpdir.join('cert').write('')
config = util.read_yaml(data, 'schemas/config.json')
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] = ReactorConfig(config)
def mock_build_json(build_json=None):
if build_json is None:
build_json = {'metadata': {'labels': {'koji-task-id': str(KOJI_TASK_ID)}}}
flexmock(util).should_receive('get_build_json').and_return(build_json)
def mock_repo_config(workflow, data=None):
if data is None:
data = dedent("""\
remote_source:
repo: {}
ref: {}
""".format(REMOTE_SOURCE_REPO, REMOTE_SOURCE_REF))
workflow._tmpdir.join('container.yaml').write(data)
# The repo config is read when SourceConfig is initialized. Force
# reloading here to make usage easier.
workflow.source.config = SourceConfig(str(workflow._tmpdir))
def mock_cachito_api(workflow, user=KOJI_TASK_OWNER, source_request=None,
dependency_replacements=None):
if source_request is None:
source_request = CACHITO_SOURCE_REQUEST
(flexmock(CachitoAPI)
.should_receive('request_sources')
.with_args(
repo=REMOTE_SOURCE_REPO,
ref=REMOTE_SOURCE_REF,
user=user,
dependency_replacements=dependency_replacements,
)
.and_return({'id': CACHITO_REQUEST_ID}))
(flexmock(CachitoAPI)
.should_receive('wait_for_request')
.with_args({'id': CACHITO_REQUEST_ID})
.and_return(source_request))
(flexmock(CachitoAPI)
.should_receive('download_sources')
.with_args(source_request, dest_dir=str(workflow._tmpdir))
.and_return(expected_dowload_path(workflow)))
(flexmock(CachitoAPI)
.should_receive('assemble_download_url')
.with_args(source_request)
.and_return(CACHITO_REQUEST_DOWNLOAD_URL))
def mock_koji(user=KOJI_TASK_OWNER):
koji_session = flexmock()
flexmock(pre_reactor_config).should_receive('get_koji_session').and_return(koji_session)
flexmock(koji_util).should_receive('get_koji_task_owner').and_return({'name': user})
def expected_dowload_path(workflow):
return workflow._tmpdir.join('source.tar.gz')
def setup_function(*args):
# IMPORTANT: This needs to be done to ensure mocks at the module
# level are reset between test cases.
sys.modules.pop('pre_resolve_remote_source', None)
def teardown_function(*args):
# IMPORTANT: This needs to be done to ensure mocks at the module
# level are reset between test cases.
sys.modules.pop('pre_resolve_remote_source', None)
@pytest.mark.parametrize('scratch', (True, False))
@pytest.mark.parametrize('dr_strs, dependency_replacements',
((None, None),
(['gomod:foo.bar/project:2'],
[{
'name': 'foo.bar/project',
'type': 'gomod',
'version': '2'}]),
(['gomod:foo.bar/project:2:newproject'],
[{
'name': 'foo.bar/project',
'type': 'gomod',
'new_name': 'newproject',
'version': '2'}]),
(['gomod:foo.bar/project'], None)))
def test_resolve_remote_source(workflow, scratch, dr_strs, dependency_replacements):
build_json = {'metadata': {'labels': {'koji-task-id': str(KOJI_TASK_ID)}}}
mock_build_json(build_json=build_json)
mock_cachito_api(workflow, dependency_replacements=dependency_replacements)
workflow.user_params['scratch'] = scratch
err = None
if dr_strs and not scratch:
err = 'Cachito dependency replacements are only allowed for scratch builds'
if dr_strs and any(len(dr.split(':')) < 3 for dr in dr_strs):
err = 'Cachito dependency replacements must be'
run_plugin_with_args(
workflow,
dependency_replacements=dr_strs,
expect_error=err
)
@pytest.mark.parametrize('build_json', ({}, {'metadata': {}}))
def test_no_koji_user(workflow, build_json, caplog):
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, workflow._tmpdir))
mock_reactor_config(workflow, reactor_config)
mock_build_json(build_json=build_json)
mock_cachito_api(workflow, user='unknown_user')
log_msg = 'No build metadata'
if build_json:
log_msg = 'Invalid Koji task ID'
run_plugin_with_args(workflow)
assert log_msg in caplog.text
@pytest.mark.parametrize('pop_key', ('repo', 'ref', 'packages'))
def test_invalid_remote_source_structure(workflow, pop_key):
source_request = {
'id': CACHITO_REQUEST_ID,
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'packages': REMOTE_SOURCE_PACKAGES,
}
source_request.pop(pop_key)
mock_cachito_api(workflow, source_request=source_request)
run_plugin_with_args(workflow, expect_error='Received invalid source request')
def test_ignore_when_missing_cachito_config(workflow):
reactor_config = dedent("""\
version: 1
koji:
hub_url: /
root_url: ''
auth: {}
""")
mock_reactor_config(workflow, reactor_config)
result = run_plugin_with_args(workflow, expect_result=False)
assert result is None
def test_invalid_cert_reference(workflow):
bad_certs_dir = str(workflow._tmpdir.join('invalid-dir'))
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, bad_certs_dir))
mock_reactor_config(workflow, reactor_config)
run_plugin_with_args(workflow, expect_error="Cachito ssl_certs_dir doesn't exist")
def test_ignore_when_missing_remote_source_config(workflow):
remote_source_config = dedent("""---""")
mock_repo_config(workflow, remote_source_config)
result = run_plugin_with_args(workflow, expect_result=False)
assert result is None
@pytest.mark.parametrize(('build_json', 'log_entry'), (
({}, 'No build metadata'),
({'metadata': None}, 'Invalid Koji task ID'),
({'metadata': {}}, 'Invalid Koji task ID'),
({'metadata': {'labels': {}}}, 'Invalid Koji task ID'),
({'metadata': {'labels': {'koji-task-id': None}}}, 'Invalid Koji task ID'),
({'metadata': {'labels': {'koji-task-id': 'not-an-int'}}}, 'Invalid Koji task ID'),
))
def test_bad_build_metadata(workflow, build_json, log_entry, caplog):
mock_build_json(build_json=build_json)
mock_cachito_api(workflow, user='unknown_user')
run_plugin_with_args(workflow)
assert log_entry in caplog.text
assert 'unknown_user' in caplog.text
def run_plugin_with_args(workflow, dependency_replacements=None, expect_error=None,
expect_result=True):
runner = PreBuildPluginsRunner(
workflow.builder.tasker,
workflow,
[
{'name': ResolveRemoteSourcePlugin.key,
'args': {'dependency_replacements': dependency_replacements}},
]
)
if expect_error:
with pytest.raises(PluginFailedException, match=expect_error):
runner.run()
return
results = runner.run()[ResolveRemoteSourcePlugin.key]
if expect_result:
assert results['annotations']['remote_source_url']
assert results['remote_source_json'] == REMOTE_SOURCE_JSON
assert results['remote_source_path'] == expected_dowload_path(workflow)
# A result means the plugin was enabled and executed successfully.
# Let's verify the expected side effects.
orchestrator_build_workspace = workflow.plugin_workspace[OrchestrateBuildPlugin.key]
worker_params = orchestrator_build_workspace[WORKSPACE_KEY_OVERRIDE_KWARGS][None]
assert worker_params['remote_source_url'] == CACHITO_REQUEST_DOWNLOAD_URL
assert worker_params['remote_source_configs'] == CACHITO_REQUEST_CONFIG_URL
assert worker_params['remote_source_build_args'] == {
'GO111MODULE': 'on',
'GOPATH': '/remote-source/deps/gomod',
'GOCACHE': '/remote-source/deps/gomod',
'CACHITO_ENV_FILE': '/remote-source/cachito.env',
}
assert worker_params['remote_source_icm_url'] == CACHITO_ICM_URL
return results
| {
"content_hash": "0fb97655960f2592e739ff89f80b29d1",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 98,
"avg_line_length": 33.08684863523573,
"alnum_prop": 0.6147442627868607,
"repo_name": "DBuildService/atomic-reactor",
"id": "fdf8afa131cd0bf1086a3781c786078134767198",
"size": "13334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plugins/test_resolve_remote_source.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "506236"
},
{
"name": "Shell",
"bytes": "3589"
}
],
"symlink_target": ""
} |
"""Dashboard models."""
import datetime as dt
from enum import Enum
from league.database import (Column, Model, SurrogatePK, association_proxy, db,
func, reference_col, relationship, session)
Color = Enum('Color', 'white black')
Color.white.abbr = 'w'
Color.black.abbr = 'b'
class Player(SurrogatePK, Model):
"""A player."""
__tablename__ = 'players'
first_name = Column(db.String(30))
last_name = Column(db.String(30))
aga_id = Column(db.Integer, index=True, unique=True)
aga_rank = Column(db.Integer)
white_player_games = relationship('WhitePlayerGame', backref='player')
white_games = association_proxy('white_player_games', 'game')
black_player_games = relationship('BlackPlayerGame', backref='player')
black_games = association_proxy('black_player_games', 'game')
def __init__(self, first_name, last_name, aga_id, aga_rank):
"""Initialize player."""
self.first_name = first_name
self.last_name = last_name
self.aga_id = aga_id
self.aga_rank = aga_rank
def __repr__(self):
"""Represent instance as a unique string."""
return ('<Player({first_name}, {last_name}, {aga_id})>'.
format(first_name=self.first_name, last_name=self.last_name,
aga_id=self.aga_id))
@property
def games(self):
"""All games that player has played."""
return self.black_games + self.white_games
@property
def full_name(self):
"""Full player name."""
return '{0} {1}'.format(self.first_name, self.last_name)
@classmethod
def get_by_aga_id(cls, aga_id):
"""Get player by AGA ID."""
return cls.query.filter_by(aga_id=aga_id)[0]
@classmethod
def get_players(cls):
"""Get all players."""
return cls.query.all()
def latest_season(self):
"""Get latest season player has played in."""
return sorted([game.season for game in self.games])[-1]
def season_stats(self, season=None):
"""Get player statistics for a season."""
if season is None:
season = Game.latest_season_episode()[0]
wins, losses = 0, 0
for game in ([game for game in self.games if game.season == season]):
if ((game.winner == Color.white and game.white == self) or
(game.winner == Color.black and game.black == self)):
wins += 1
else:
losses += 1
return {'wins': wins, 'losses': losses}
def latest_season_episode(self):
"""Get latest season and episode player has played in."""
games = self.games
if len(games) > 0:
return sorted([(game.season, game.episode)
for game in games])[-1]
else:
return (0, 0)
def episode_stats(self, season_episode=None):
"""Get player statistics for an episode."""
latest_season_episode = Game.latest_season_episode()
if season_episode is None:
season_episode = latest_season_episode
wins, losses = 0, 0
for game in ([game for game in self.games
if game.season == season_episode[0] and
game.episode == season_episode[1]]):
if ((game.winner == Color.white and game.white == self) or
(game.winner == Color.black and game.black == self)):
wins += 1
else:
losses += 1
return {'wins': wins, 'losses': losses}
def league_stats(self):
"""Get player statistics for the whole league."""
wins, losses = 0, 0
for game in self.games:
if ((game.winner == Color.white and game.white == self) or
(game.winner == Color.black and game.black == self)):
wins += 1
else:
losses += 1
return {'wins': wins, 'losses': losses}
class Game(SurrogatePK, Model):
"""A game record."""
__tablename__ = 'games'
white_player_game = relationship('WhitePlayerGame', backref='game',
cascade='all, delete-orphan',
uselist=False)
white = association_proxy('white_player_game', 'player',
creator=lambda pl: WhitePlayerGame(player=pl))
black_player_game = relationship('BlackPlayerGame', backref='game',
cascade='all, delete-orphan',
uselist=False)
black = association_proxy('black_player_game', 'player',
creator=lambda pl: BlackPlayerGame(player=pl))
winner = Column(db.Enum(Color))
handicap = Column(db.SmallInteger)
komi = Column(db.SmallInteger)
season = Column(db.Integer)
episode = Column(db.Integer)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
played_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
last_modified_at = Column(db.DateTime, nullable=False,
default=dt.datetime.utcnow)
db.Index('ix_games_season_episode', 'season', 'episode')
def __init__(self, white, black, winner, handicap, komi, season, episode,
created_at=None, played_at=None, last_modified_at=None):
"""Initialize game."""
self.white = white
self.black = black
self.winner = winner
self.handicap = handicap
self.komi = komi
self.season = season
self.episode = episode
self.created_at = created_at
self.played_at = played_at
self.last_modified_at = last_modified_at
def __repr__(self):
"""Represent instance as a unique string."""
return ('<Game({white!r}, {black!r}, {winner}, {handicap}, {komi})>'.
format(white=self.white, black=self.black, winner=self.winner,
handicap=self.handicap, komi=self.komi))
def to_dict(self):
"""Return game as dictionary."""
return {
'game_id': self.id,
'white_id': self.white.id,
'black_id': self.black.id,
'winner': self.winner.name,
'handicap': self.handicap,
'komi': self.komi,
'season': self.season,
'episode': self.episode,
'created_at': str(self.created_at),
'played_at': str(self.played_at),
'last_modified_at': str(self.last_modified_at)
}
def update(self, **kwargs):
"""Override update method to reset last_modified_at."""
self.last_modified_at = dt.datetime.utcnow()
super().update(**kwargs)
@classmethod
def get_by_season_ep(cls, season, episode):
"""Get games by season and episode."""
return cls.query.filter_by(season=season, episode=episode)
@classmethod
def get_max_season_ep(cls):
"""Get maximum season and episode."""
max_season, max_episode = session.query(func.max(cls.season),
func.max(cls.episode)).one()
max_season = 0 if max_season is None else max_season
max_episode = 0 if max_episode is None else max_episode
return (max_season, max_episode)
@property
def players(self):
"""Get players in game as set."""
return frozenset((self.white, self.black))
@classmethod
def latest_season_episode(cls):
"""Get latest episode and season."""
games = cls.query.all()
if len(games) > 0:
return sorted([(game.season, game.episode) for game in games])[-1]
else:
return (0, 0)
@classmethod
def episode_stats(cls, episode=None, season=None, num_players=5):
"""Get statistics for an episode."""
latest_season_episode = cls.latest_season_episode()
if episode is None:
episode = latest_season_episode[1]
if season is None:
season = latest_season_episode[0]
players = Player.query.all()
wins = {p.id: 0 for p in players}
games_played = {p.id: 0 for p in players}
stones_given = {p.id: 0 for p in players}
dans_slain = {p.id: 0 for p in players}
kyus_killed = {p.id: 0 for p in players}
games = [game for game in cls.query.all()
if game.season == season and
game.episode == episode]
for game in games:
if game.winner is Color.white:
wins[game.white.id] = wins.get(game.white.id, 0) + 1
else:
wins[game.black.id] = wins.get(game.black.id, 0) + 1
games_played[game.white.id] = games_played.get(game.white.id, 0) + 1
games_played[game.black.id] = games_played.get(game.black.id, 0) + 1
stones_given[game.white.id] = \
stones_given[game.white.id] + (game.handicap)
black_player = Player.get_by_id(game.black.id)
white_player = Player.get_by_id(game.white.id)
if (white_player.aga_rank > 0 and black_player.aga_rank < 0 and
game.winner is Color.black):
dans_slain[game.black.id] = \
dans_slain.get(game.black.id, 0) + 1
elif (black_player.aga_rank > 0 and white_player.aga_rank < 0 and
game.winner is Color.white):
dans_slain[game.white.id] = \
dans_slain.get(game.white.id, 0) + 1
if (white_player.aga_rank > 0 and black_player.aga_rank < 0 and
game.winner is Color.white):
kyus_killed[game.white.id] = \
kyus_killed.get(game.white.id, 0) + 1
elif (black_player.aga_rank > 0 and white_player.aga_rank < 0 and
game.winner is Color.black):
kyus_killed[game.black.id] = \
kyus_killed.get(game.black.id, 0) + 1
win_ratios = {p.id: wins[p.id] / games_played[p.id]
for p in players if games_played[p.id] > 0}
wins_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_wins)
for player_id, player_wins in wins.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
games_played_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_games_played)
for player_id, player_games_played in games_played.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
win_ratios_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_win_ratio)
for player_id, player_win_ratio in win_ratios.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
stones_given_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_stones_given)
for player_id, player_stones_given in stones_given.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
dans_slain_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_dans_slain)
for player_id, player_dans_slain in dans_slain.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
kyus_killed_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_kyus_killed)
for player_id, player_kyus_killed in kyus_killed.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
return {'wins': wins_list,
'games_played': games_played_list,
'win_ratios': win_ratios_list,
'stones_given': stones_given_list,
'dans_slain': dans_slain_list,
'kyus_killed': kyus_killed_list}
@classmethod
def season_stats(cls, season=None, num_players=5):
"""Get statistics for a season."""
latest_season_episode = cls.latest_season_episode()
if season is None:
season = latest_season_episode[0]
players = Player.query.all()
wins_minus_losses = {p.id: 0 for p in players}
wins = {p.id: 0 for p in players}
games_played = {p.id: 0 for p in players}
games_played_one_ep = {p.id: 0 for p in players}
dans_slain = {p.id: 0 for p in players}
kyus_killed = {p.id: 0 for p in players}
games_against_weaker = {p.id: 0 for p in players}
losses = {p.id: 0 for p in players}
games_per_ep = {p.id: {ep: 0 for ep
in range(1, latest_season_episode[1] + 1)}
for p in players}
wins_per_ep = {p.id: {ep: 0 for ep
in range(1, latest_season_episode[1] + 1)}
for p in players}
losses_per_ep = {p.id: {ep: 0 for ep
in range(1, latest_season_episode[1] + 1)}
for p in players}
games = [game for game in cls.query.all()
if game.season == season]
for game in games:
if game.winner is Color.white:
wins[game.white.id] = wins.get(game.white.id, 0) + 1
wins_per_ep[game.white.id][game.episode] = \
wins_per_ep[game.white.id][game.episode] + 1
losses[game.black.id] = losses.get(game.black.id, 0) + 1
losses_per_ep[game.black.id][game.episode] = \
losses_per_ep[game.black.id][game.episode] + 1
else:
wins[game.black.id] = wins.get(game.black.id, 0) + 1
wins_per_ep[game.black.id][game.episode] = \
wins_per_ep[game.black.id][game.episode] + 1
losses[game.white.id] = losses.get(game.white.id, 0) + 1
losses_per_ep[game.white.id][game.episode] = \
losses_per_ep[game.white.id][game.episode] + 1
games_played[game.white.id] = games_played.get(game.white.id, 0) + 1
games_played[game.black.id] = games_played.get(game.black.id, 0) + 1
games_per_ep[game.white.id][game.episode] = \
games_per_ep[game.white.id][game.episode] + 1
games_per_ep[game.black.id][game.episode] = \
games_per_ep[game.black.id][game.episode] + 1
black_player = Player.get_by_id(game.black.id)
white_player = Player.get_by_id(game.white.id)
if (white_player.aga_rank > 0 and black_player.aga_rank < 0 and
game.winner is Color.black):
dans_slain[game.black.id] = \
dans_slain.get(game.black.id, 0) + 1
elif (black_player.aga_rank > 0 and white_player.aga_rank < 0 and
game.winner is Color.white):
dans_slain[game.white.id] = \
dans_slain.get(game.white.id, 0) + 1
if (white_player.aga_rank > 0 and black_player.aga_rank < 0 and
game.winner is Color.white):
kyus_killed[game.white.id] = \
kyus_killed.get(game.white.id, 0) + 1
elif (black_player.aga_rank > 0 and white_player.aga_rank < 0 and
game.winner is Color.black):
kyus_killed[game.black.id] = \
kyus_killed.get(game.black.id, 0) + 1
if (white_player.aga_rank > black_player.aga_rank):
games_against_weaker[game.white.id] = \
games_against_weaker[game.white.id] + 1
if (black_player.aga_rank > white_player.aga_rank):
games_against_weaker[game.black.id] = \
games_against_weaker[game.black.id] + 1
games_played_one_ep = {p.id: max([g for (d, g)
in games_per_ep[p.id].items()])
for p in players}
wins_minus_losses = {p.id: 2 * wins[p.id] - games_played[p.id]
for p in players if games_played[p.id] > 0}
wins_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_wins)
for player_id, player_wins in wins.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
games_played_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_games_played)
for player_id, player_games_played in games_played.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
games_played_one_ep_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_games_played_one_ep)
for player_id, player_games_played_one_ep
in games_played_one_ep.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
wins_minus_losses_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_wins_minus_losses)
for player_id, player_wins_minus_losses
in wins_minus_losses.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
games_against_weaker_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_games_against_weaker)
for player_id, player_games_against_weaker
in games_against_weaker.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
dans_slain_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_dans_slain)
for player_id, player_dans_slain in dans_slain.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
kyus_killed_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_kyus_killed)
for player_id, player_kyus_killed in kyus_killed.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
losses_list = enumerate(sorted(
[(Player.get_by_id(player_id), player_losses)
for player_id, player_losses in losses.items()],
key=lambda stat: stat[1],
reverse=True
)[0:num_players])
steady_freddy = [
p for p in players
if min([g for (d, g) in games_per_ep[p.id].items()]) > 0
]
fifteen_min_fame = [
p for p in players
if len([g for (d, g)
in games_per_ep[p.id].items()
if (wins_per_ep[p.id][d] >= 3 and
losses_per_ep[p.id][d] == 0)]) > 0
]
rock_bottom = [
p for p in players
if len([g for (d, g)
in games_per_ep[p.id].items()
if (losses_per_ep[p.id][d] >= 3 and
wins_per_ep[p.id][d] == 0)]) > 0
]
return {'wins': wins_list,
'games_played': games_played_list,
'games_played_one_ep': games_played_one_ep_list,
'wins_minus_losses': wins_minus_losses_list,
'games_against_weaker': games_against_weaker_list,
'dans_slain': dans_slain_list,
'kyus_killed': kyus_killed_list,
'losses': losses_list,
'steady_freddy': steady_freddy,
'fifteen_min_fame': fifteen_min_fame,
'rock_bottom': rock_bottom}
class WhitePlayerGame(Model):
"""A map between players and the games they've played as white."""
__tablename__ = 'white_player_games'
player_id = reference_col('players', primary_key=True)
game_id = reference_col('games', primary_key=True)
class BlackPlayerGame(Model):
"""A map between players and the games they've played as black."""
__tablename__ = 'black_player_games'
player_id = reference_col('players', primary_key=True)
game_id = reference_col('games', primary_key=True)
| {
"content_hash": "e97fcd9475f358935413aee3a1724756",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 80,
"avg_line_length": 38.80534351145038,
"alnum_prop": 0.5386052916297827,
"repo_name": "MattClarke131/league",
"id": "2a4f42a0529667d45cb5507f3e96e0de730141ff",
"size": "20358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/league/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1240"
},
{
"name": "HTML",
"bytes": "49706"
},
{
"name": "JavaScript",
"bytes": "143"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "106446"
},
{
"name": "Shell",
"bytes": "1400"
}
],
"symlink_target": ""
} |
"""Configure test fixtures"""
# std imports
import os
import platform
import subprocess
# 3rd party
import pytest
IS_WINDOWS = platform.system() == 'Windows'
all_terms_params = 'xterm screen ansi vt220 rxvt cons25 linux'.split()
many_lines_params = [40, 80]
# we must test a '1' column for conditional in _handle_long_word
many_columns_params = [1, 10]
def envvar_enabled(envvar):
"""
Return True if environment variable is set and enabled
unset values, 'no', 0, and 'false' and treated as False regardless of case
All other values are considered True
"""
value = os.environ.get(envvar, False)
if value is False:
return value
if value.lower() in ('no', 'false'):
return False
try:
return bool(int(value))
except ValueError:
return True
TEST_FULL = envvar_enabled('TEST_FULL')
TEST_KEYBOARD = envvar_enabled('TEST_KEYBOARD')
TEST_QUICK = envvar_enabled('TEST_QUICK')
TEST_RAW = envvar_enabled('TEST_RAW')
if TEST_FULL:
try:
all_terms_params = [
# use all values of the first column of data in output of 'toe -a'
_term.split(None, 1)[0] for _term in
subprocess.Popen(('toe', '-a'), # pylint: disable=consider-using-with
stdout=subprocess.PIPE,
close_fds=True)
.communicate()[0].splitlines()]
except OSError:
pass
elif IS_WINDOWS:
all_terms_params = ['vtwin10', ]
elif TEST_QUICK:
all_terms_params = 'xterm screen ansi linux'.split()
if TEST_QUICK:
many_lines_params = [80, ]
many_columns_params = [25, ]
@pytest.fixture(params=all_terms_params)
def all_terms(request):
"""Common kind values for all kinds of terminals."""
return request.param
@pytest.fixture(params=many_lines_params)
def many_lines(request):
"""Various number of lines for screen height."""
return request.param
@pytest.fixture(params=many_columns_params)
def many_columns(request):
"""Various number of columns for screen width."""
return request.param
| {
"content_hash": "680d3109a5ddfa1263290b0755c8f32f",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 82,
"avg_line_length": 24.833333333333332,
"alnum_prop": 0.6452540747842761,
"repo_name": "jquast/blessed",
"id": "8ef49e96c93c7db89a133d64f1c587c18a5bea4f",
"size": "2086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "387567"
}
],
"symlink_target": ""
} |
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import redirect, render
from django.utils.translation import ugettext as _
from django.views.decorators.debug import sensitive_post_parameters
from user.forms import UserForm, UserInformationForm, UserEditForm, PrivacyAgreementForm
from util.error.reporting import db_error
@sensitive_post_parameters('password1', 'password2')
@login_required()
def modify(request):
user = request.user
student = user.userinformation
if request.method == "POST":
user_form = UserEditForm(request.POST, instance=request.user)
userinformation_form = UserInformationForm(
request.POST, instance=request.user.userinformation)
if user_form.is_valid() and userinformation_form.is_valid():
user.save()
userinformation_form.save()
return redirect('user-profile')
else:
user_form = UserEditForm(instance=request.user)
userinformation_form = UserInformationForm(
instance=request.user.userinformation)
return render(
request,
'user/edit.html',
{
'title': '{} {}'.format(user.first_name, user.last_name),
'user_form': user_form,
'userinformation_form': userinformation_form
}
)
def profile(request, user_id=None):
attend = []
if user_id is None:
if request.user.is_authenticated:
user = request.user
# require consenting to privacy policy
if not user.userinformation.accepted_privacy_policy:
return redirect('privacy-policy-updated')
template = 'user/profile.html'
is_own = True
# construct a list of attended courses that are not yet archived
attend = [(p.course, p.course.position_in_queue(user.userinformation)) for p in user.userinformation.participation_set.all() if p.course.archiving == 't']
else:
return redirect('login')
else:
try:
user = User.objects.get(id=user_id)
is_own = request.user.id == user.id
except User.DoesNotExist:
return db_error(request, _('This user does not exist'))
template = 'user/public_profile.html'
return render(
request,
template,
{
'course_list_show_subject': True,
'profiled_user': user,
'is_own': is_own,
'attend': attend,
'title': '{} {}'.format(user.first_name, user.last_name)
}
)
@login_required
def privacy_consent(request):
user = request.user
user_info = user.userinformation
# users who have consented already are not in scope for this
if user_info.accepted_privacy_policy:
return redirect('user-profile')
if request.method == "POST":
# validate the form
agreement_form = PrivacyAgreementForm(
request.POST, instance=request.user.userinformation)
if agreement_form.is_valid():
user_info.save()
return redirect('user-profile')
else:
agreement_form = PrivacyAgreementForm()
return render(
request,
"user/privacy-agreement.html",
{
'title': "Privacy Policy",
'agreement_form': agreement_form,
}
)
@login_required
def delete_account(request):
if request.method == "POST" and "delete-confirm" in request.POST:
user = request.user
user.delete()
return render(
None,
"user/deletion-success.html",
{
'title': "Account Deletion Successful"
}
)
else:
return redirect('modify-user')
| {
"content_hash": "adfdb8c30be714aad4953f3339bf0ea6",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 166,
"avg_line_length": 30.206349206349206,
"alnum_prop": 0.6106148187073043,
"repo_name": "fsr/course-management",
"id": "552bf6eeaca133bb7377529fb76e125289c10648",
"size": "3806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user/views/user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "45"
},
{
"name": "HTML",
"bytes": "103817"
},
{
"name": "Python",
"bytes": "77635"
}
],
"symlink_target": ""
} |
import sys
import time
imported_neural_signal = False
no_bci = False
try:
#from unlock.bci.acquire.neuralsignal import create_timer
from unlock.bci.acquire.random_signal import create_timer, create_random_signal
imported_neural_signal = True
except:
assert sys.platform == 'darwin' or sys.platform == 'linux'
no_bci = True
try:
from unlock.bci.acquire.mobilab_signal import create_nonblocking_mobilab_signal
except Exception as e:
print("unlock/acquire.__init__.py: mobilab not present", e)
try:
from unlock.bci.acquire.enobio_signal import create_nonblocking_enobio_signal
except:
print("unlock/acquire.__init__.py: enobio not present")
try:
from unlock.bci.acquire.nidaq_signal import create_nidaq_signal
except:
print("unlock/acquire.__init__.py: nidaq not present")
from unlock.bci.acquire.audio_signal import *
from unlock.bci.acquire.file_signal import *
class NoBciRandomSignal(object):
def __init__(self,channels=8, seed=42, lower_bound=1, upper_bound=65536):
super(NoBciRandomSignal, self).__init__()
import random
self.chans = channels
self.rand = random.Random()
self.rand.seed(seed)
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def open(self, macaddr):
self.mac = macaddr
return True
def init(self, channels):
self.chans = channels
return True
def channels(self):
return self.chans
def start(self):
return True
def acquire(self):
return 1 * self.chans
def getdata(self, samples):
import numpy as np
ret = np.array([float(self.rand.randint(self.lower_bound, self.upper_bound)) for i in range(0, samples)])
ret[-1] = 0
return ret
def getEaplsedMicros(self):
pass
def timestamp(self):
pass
def stop(self):
pass
def close(self):
pass
class BasicTimer(object):
def __init__(self):
self.start = time.time()
def elapsedMicroSecs(self):
return time.time() - self.start
class UnlockAcquisitionFactory:
def __init__(self):
if imported_neural_signal:
self.timer = create_timer()
else:
self.timer = BasicTimer()
def create_nidaq_signal(self):
signal = create_nidaq_signal(self.timer)
if not signal.start():
raise RuntimeError('Failed to start National Instruments DAQ')
return signal
#for j in range(50):
# ret = daq.acquire()
# ret = daq.getdata(ret)
# f = open('test.data', 'wb')
# import numpy as np
# a = np.array(ret, dtype='float64')
# a = a.reshape((500, 4))
# #np.savetxt(f, a, fmt='%d', delimiter='\t')
# for i in range(20):
# print(a[i])
#
def create_audio_signal(self):
signal = AudioSignal()
if not signal.start():
raise RuntimeError('failed to start audio signal')
return signal
def create_enobio_signal(self, mac_addr):
assert 'mac_addr' in self.config['signal']
mac_addr = [int(value,0) for value in [x.strip() for x in self.config['signal']['mac_addr'].split(',')]]
signal = create_nonblocking_enobio_signal(self.timer)
if not signal.open(mac_addr):
print('enobio did not open')
raise RuntimeError('enobio did not open')
if not signal.start():
print('enobio device did not start streaming')
raise RuntimeError('enobio device did not start streaming')
return signal
def create_mobilab_signal(self, com_port, analog_channels_bitmask):
from unlock.bci import acquire
signal = create_nonblocking_mobilab_signal(
self.timer, analog_channels_bitmask, 0, com_port)
if not signal.start():
print('mobilab device did not start streaming')
raise RuntimeError('mobilab device did not start streaming')
return signal
def create_file_signal(self, timer):
from unlock.bci import acquire
timer = acquire.create_timer()
raise Exception("FIX ME")
signal = acquire.MemoryResidentFileSignal(self.config['bci']['signal']['file'], timer, channels=17) #analysis/data/valid/emg_signal_1380649383_tongue_c.5_r.5_i1.txt',
if not signal.start():
print('file signal failed to start; filename = ', self.config['filename'])
raise RuntimeError('file signal failed to start')
return signal
def create_random_signal(self):
if no_bci:
signal = NoBciRandomSignal()
else:
from unlock.bci import acquire
signal = create_random_signal(self.timer)
signal.open([])
signal.start()
return signal
| {
"content_hash": "c91c1f13b5e16af280277dddcb61e082",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 175,
"avg_line_length": 31.89873417721519,
"alnum_prop": 0.5954365079365079,
"repo_name": "NeuralProsthesisLab/unlock",
"id": "9a553808a850ce9be1f825a05e4ec741c2b55fde",
"size": "6619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unlock/bci/acquire/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1386"
},
{
"name": "C++",
"bytes": "994297"
},
{
"name": "CSS",
"bytes": "8977"
},
{
"name": "Go",
"bytes": "62639"
},
{
"name": "HTML",
"bytes": "33643"
},
{
"name": "JavaScript",
"bytes": "711666"
},
{
"name": "Makefile",
"bytes": "402"
},
{
"name": "Matlab",
"bytes": "81353"
},
{
"name": "Python",
"bytes": "493447"
},
{
"name": "Shell",
"bytes": "3842"
},
{
"name": "TeX",
"bytes": "29718"
}
],
"symlink_target": ""
} |
"""Call HLA alleles with assembly methods implemented in bwakit.
https://github.com/lh3/bwa/blob/master/README-alt.md#hla-typing
https://github.com/lh3/bwa/tree/master/bwakit
"""
import csv
import glob
import os
import toolz as tz
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.hla import groups as hla_groups
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
def run(data):
"""HLA typing with bwakit, parsing output from called genotype files.
"""
bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem")))
align_file = dd.get_align_bam(data)
hla_base = os.path.join(utils.safe_makedir(os.path.join(os.path.dirname(align_file), "hla")),
os.path.basename(align_file) + ".hla")
if len(glob.glob(hla_base + ".*")) > 0:
out_file = hla_base + ".top"
if not utils.file_exists(out_file):
cmd = "{bwakit_dir}/run-HLA {hla_base}"
do.run(cmd.format(**locals()), "HLA typing with bwakit")
out_file = _organize_calls(out_file, hla_base, data)
data["hla"] = {"call_file": out_file,
"hlacaller": "bwakit"}
return data
def _organize_calls(out_file, hla_base, data):
"""Prepare genotype calls, reporting best call along with quality metrics.
"""
hla_truth = get_hla_truthset(data)
align_file = dd.get_align_bam(data)
sample = dd.get_sample_name(data)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["sample", "locus", "mismatches", "options", "alleles", "p-groups", "expected",
"validates"])
for genotype_file in glob.glob("%s.HLA-*.gt" % (hla_base)):
hla_locus = os.path.basename(genotype_file).replace(
"%s.hla.HLA-" % os.path.basename(align_file), "").replace(".gt", "")
with open(genotype_file) as in_handle:
total_options = set([])
for i, line in enumerate(in_handle):
_, aone, atwo, m = line.split("\t")[:4]
pgroups = (hla_groups.hla_protein(aone, data), hla_groups.hla_protein(atwo, data))
if i == 0:
call_alleles = [aone, atwo]
call_pgroups = pgroups
mismatches = m
total_options.add(pgroups)
if len(total_options) > 0:
truth_alleles = tz.get_in([sample, hla_locus], hla_truth, [])
writer.writerow([sample, hla_locus, mismatches, len(total_options),
";".join(call_alleles), ";".join(call_pgroups),
";".join(truth_alleles), matches_truth(call_alleles, truth_alleles, data)])
return out_file
def matches_truth(call_alleles, truth_alleles, data):
"""Flexibly check if truth and call alleles match, using p-groups.
"""
if not truth_alleles:
return ""
else:
def _remove_p(x):
return x[:-1] if x.endswith("P") else x
t_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in truth_alleles])
c_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in call_alleles])
return "yes" if len(t_cmp.intersection(c_cmp)) == len(t_cmp) else "no"
def get_hla_truthset(data):
"""Retrieve expected truth calls for annotating HLA called output.
"""
val_csv = tz.get_in(["config", "algorithm", "hlavalidate"], data)
out = {}
if val_csv and utils.file_exists(val_csv):
with open(val_csv) as in_handle:
reader = csv.reader(in_handle)
reader.next() # header
for sample, locus, alleles in (l for l in reader if l):
out = tz.update_in(out, [sample, locus], lambda x: [x.strip() for x in alleles.split(";")])
return out
| {
"content_hash": "3f33447b6dff1eb8bf6f796e6c9ada4e",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 116,
"avg_line_length": 46.157303370786515,
"alnum_prop": 0.5676728334956183,
"repo_name": "lpantano/bcbio-nextgen",
"id": "99d8bcc6d98b6d53e5b200b3377d1f62fe76692c",
"size": "4108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcbio/hla/bwakit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1553199"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "14377"
}
],
"symlink_target": ""
} |
"""
Rotations, VTK Textbook figure 3-31a.
Note: Make sure Rotations.py is in the same directory as this program.
"""
import Rotations
def main():
file_name, figure, book_color = Rotations.get_program_parameters()
# Set up for six rotations about the x-axis.
figure = 1
book_color = True
Rotations.rotate(file_name, figure, book_color)
if __name__ == '__main__':
main()
| {
"content_hash": "61e605ef71e441152d9e4ec08ed17a7b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 70,
"avg_line_length": 21,
"alnum_prop": 0.6641604010025063,
"repo_name": "lorensen/VTKExamples",
"id": "db2e2329ad06efe6846146d451226b5cd08ccb18",
"size": "422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Python/Rendering/RotationsA.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "322226"
},
{
"name": "C++",
"bytes": "4187688"
},
{
"name": "CMake",
"bytes": "155244"
},
{
"name": "CSS",
"bytes": "556"
},
{
"name": "G-code",
"bytes": "377583"
},
{
"name": "GLSL",
"bytes": "5375"
},
{
"name": "HTML",
"bytes": "635483160"
},
{
"name": "Java",
"bytes": "629442"
},
{
"name": "JavaScript",
"bytes": "18199"
},
{
"name": "Python",
"bytes": "1376010"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
} |
""" Export a Guidebook schedule CSV file with the currently accepted
talks.
Usage: manage.py guidebook_csv ep2015 gb.csv
Guidebook CSV format (UTF-8 encoded):
-------------------------------------
Session Title,Date,Time Start,Time End,Room/Location,Schedule Track (Optional),Description (Optional)
Sample Session: Opening Remarks,4/21/11,10:00 AM,11:00 AM,Main Events,Key Event,The conference chairman will be kicking off the event with opening remarks.
Sample Session: Presentation XYZ,4/21/11,4:00 PM,6:00 PM,Room 101,Key Event; Track 1,John Doe will be presenting on XYZ.
"""
from django.core.management.base import BaseCommand, CommandError
from django.utils.html import strip_tags
from conference import models
import datetime
### Globals
# Talk .type values, name, description
TYPE_NAMES = (
('keynote', 'Keynote', ''),
('s', 'Talk', ''),
('t', 'Training', ''),
('p', 'Poster session', ''),
('h', 'Help desk', 'Help desks provide slots for attendee to discuss their problems one-on-one with experts from the projects.'),
('europython', 'EuroPython session', 'The EuroPython sessions are intended for anyone interested in helping with the EuroPython organization in the coming years.'),
('i', 'Other session', ''),
)
def _check_talk_types(type_names):
d = set(x[0] for x in type_names)
for code, entry in models.TALK_TYPE:
assert code in d, 'Talk type code %r is missing' % code
_check_talk_types(TYPE_NAMES)
# Headers to use for the GB CSV file
GB_HEADERS = (
'Session Title',
'Date', # in format MM/DD/YYYY
'Time Start', # in format HH:MM AM/PM
'Time End', # in format HH:MM AM/PM
'Room/Location', # String
'Schedule Track (Optional)', # String
'Description (Optional)', # String
)
# Poster sessions don't have events associated with them, so use these
# defaults
POSTER_START = datetime.datetime(2015,7,21,17,30)
POSTER_DURATION = datetime.timedelta(minutes=90)
POSTER_ROOM = 'Exhibition Hall'
### Helpers
def speaker_listing(talk):
return ', '.join(
'<i>%s %s</i>' % (
speaker.user.first_name,
speaker.user.last_name)
for speaker in talk.get_all_speakers())
def format_text(text, remove_tags=False):
# Remove whitespace
text = text.strip()
if not text:
return text
# Remove links, tags, etc.
if remove_tags:
text = strip_tags(text)
# Remove quotes
if text[0] == '"' and text[-1] == '"':
text = text[1:-1]
return text
def talk_title(talk):
title = format_text(talk.title, remove_tags=True)
if not title:
return title
return title + ' [%s]' % talk.pk
def talk_abstract(talk):
return 'By %s\n\n%s' % (
speaker_listing(talk),
format_text(talk.getAbstract().body))
def event_title(event):
title = format_text(event.custom, remove_tags=True)
if not title:
return title
return title + ' [%s]' % event.pk
def event_abstract(event):
return format_text(event.abstract)
def add_event(data, talk=None, event=None, session_type='', talk_events=None):
# Determine title and abstract
title = ''
abstract = ''
if talk is None:
if event is None:
raise TypeError('need either talk or event given')
title = event_title(event)
abstract = event_abstract(event)
else:
title = talk_title(talk)
abstract = talk_abstract(talk)
if event is None:
event = talk.get_event()
# Determine time_range and room
if event is None:
if talk.type and talk.type == 'p':
# Poster session
time_range = (POSTER_START,
POSTER_START + POSTER_DURATION)
room = POSTER_ROOM
else:
print('Talk %r (type %r) does not have an event '
'associated with it; skipping' %
(title, type))
return
else:
time_range = event.get_time_range()
tracks = event.tracks.all()
if tracks:
room = tracks[0].title
else:
room = ''
if talk_events is not None:
talk_events[event.pk] = event
# Don't add entries for events without title
if not title:
return
# Format time entries
date = time_range[0].strftime('%m/%d/%Y')
start_time = time_range[0].strftime('%I:%M %p')
stop_time = time_range[1].strftime('%I:%M %p')
data.append((
title,
date,
start_time,
stop_time,
room,
session_type,
abstract,
))
###
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
# make_option('--option',
# action='store',
# dest='option_attr',
# default=0,
# type='int',
# help='Help text',
# ),
)
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
try:
csv_file = args[1]
except IndexError:
raise CommandError('CSV file not specified')
talks = (models.Talk.objects
.filter(conference=conference,
status='accepted'))
# Group by types
talk_types = {}
for talk in talks:
if 'EPS' in talk.title or 'EuroPython 20' in talk.title:
type = 'europython'
elif talk.title.lower().startswith('keynote'):
type = 'keynote'
else:
type = talk.type
if type in talk_types:
talk_types[type].append(talk)
else:
talk_types[type] = [talk]
# Create CSV
data = []
talk_events = {}
for type, type_name, description in TYPE_NAMES:
# Get bag with talks
bag = talk_types.get(type, [])
if not bag:
continue
# Sort by talk title using title case
bag.sort(key=lambda talk: talk_title(talk).title())
# Add talks from bag to csv
for talk in bag:
add_event(data, talk=talk, talk_events=talk_events, session_type=type_name)
# Add events which are not talks
for schedule in models.Schedule.objects.filter(conference=conference):
for event in models.Event.objects.filter(schedule=schedule):
if event.pk in talk_events:
continue
add_event(data, event=event)
# Output CSV data, UTF-8 encoded
data.insert(0, GB_HEADERS)
with open(csv_file, 'wb') as f:
for row in data:
csv_data = ('"%s"' % (str(x).replace('"', '""'))
for x in row)
f.write(','.join(csv_data).encode('utf-8'))
f.write('\n')
| {
"content_hash": "560036aa83b0510c555d74d0cf50ee1f",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 168,
"avg_line_length": 29.80168776371308,
"alnum_prop": 0.5603851054792581,
"repo_name": "EuroPython/epcon",
"id": "c983141ac3895ad088ae7799d68b354b1267014b",
"size": "7064",
"binary": false,
"copies": "1",
"ref": "refs/heads/ep2021",
"path": "p3/management/commands/guidebook_csv.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "6475"
},
{
"name": "Dockerfile",
"bytes": "609"
},
{
"name": "HTML",
"bytes": "412025"
},
{
"name": "JavaScript",
"bytes": "421281"
},
{
"name": "Makefile",
"bytes": "4679"
},
{
"name": "Python",
"bytes": "991334"
},
{
"name": "Shell",
"bytes": "1182"
}
],
"symlink_target": ""
} |
from pycerberus.validators.basic_numbers import *
from pycerberus.validators.checkbox import *
from pycerberus.validators.domain import *
from pycerberus.validators.foreach import *
from pycerberus.validators.email import *
from pycerberus.validators.matching_fields import *
from pycerberus.validators.oneof import *
from pycerberus.validators.string import *
| {
"content_hash": "222e26b84d8524d50a0e90f1c4371067",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 51,
"avg_line_length": 40.22222222222222,
"alnum_prop": 0.8370165745856354,
"repo_name": "gpatonay/popy",
"id": "be899e584a2082e15d0adbc7177a1f4a4de2e3a3",
"size": "363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycerberus/validators/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198330"
}
],
"symlink_target": ""
} |
__author__ = """Nicholas A. Del Grosso"""
__email__ = 'delgrosso@bio.lmu.de'
__version__ = '0.1.0'
from .reading import read_objfile, parse_mixed_delim_str, read_mtlfile, read_wavefront
from .writing import WavefrontWriter
| {
"content_hash": "b05514e0c9d0bf2e2261f3259f6680db",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 86,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.7053571428571429,
"repo_name": "neuroneuro15/wavefront_reader",
"id": "c92d326d0c2e5e10f2e35598de421aff7eb07ec4",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wavefront_reader/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2310"
},
{
"name": "Python",
"bytes": "21266"
}
],
"symlink_target": ""
} |
from parser_3x3_abilities import *
class StrategicLobe(Parser):
def take_the_center(self, intel):
if 4 in intel['options']:
return 4
return False
def take_catty_corner(self, intel):
center = intel['board'][4]
NWSE_diag = intel['analysis'][6]
NESW_diag = intel['analysis'][7]
if center == intel['marker_code']:
if NWSE_diag == 11:
return self.get_empty_square(intel['options'], 6)
elif NESW_diag == 11:
return self.get_empty_square(intel['options'], 7)
return False
def make_default_choice(self, intel):
priorities = [0,2,6,8]
for priority in priorities:
if priority in intel['options']:
return priority
return intel['options'][0]
| {
"content_hash": "33eae2399dbff7466227f14e92887746",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 65,
"avg_line_length": 31.576923076923077,
"alnum_prop": 0.5566382460414129,
"repo_name": "IanDCarroll/xox",
"id": "3f996de7f18a2bc1badab5eb6ebd508aeca84c34",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Training/strategic_3x3_lobe_slot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58931"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
try:
from llvm.core import Builder, Constant, Module, Type
from llvm.core import IPRED_EQ, IPRED_NE, IPRED_SGE, IPRED_SGT, IPRED_SLE, IPRED_SLT
except:
pass
from tinyc import token
from tinyc.analyzer import Analyzer
from tinyc.common import Kinds
class LLVMGenerator(Analyzer):
def __init__(self):
self.nbranch = 0
self.nlabel = 0
self.op_assign = {
'ASSIGN': '',
'ASSIGN_PLUS': '',
'ASSIGN_MINUS': ''
}
self.op_arithmetic = {
'PLUS': 'add',
'MINUS': 'sub',
'MULT': 'imul',
'DIV': 'idiv'
}
self.op_compare = {
'EQ': IPRED_EQ,
'NEQ': IPRED_NE,
'LT': IPRED_SLT,
'LTE': IPRED_SLE,
'GT': IPRED_SGT,
'GTE': IPRED_SGE
}
self.op_logical = {
'LAND': '',
'LOR': ''
}
self.types = {
'bool': Type.int(1),
'int': Type.int(32)
}
self.undefined_functions = {}
self.returns = {}
def _new_label(self, prefix='label'):
self.nlabel += 1
return("L_{0}".format(self.nlabel))
def analyze(self, ast, optimize=True):
self.optimize = optimize
self.module = Module.new('module')
ast.accept(self)
return self.module
def a_ExternalDeclarationList(self, node):
u"""先にグローバル変数をまとめて処理し, 次に関数定義を処理する"""
# グローバル変数
for external_declaration in node.nodes:
if isinstance(external_declaration, token.Declaration):
for declarator in external_declaration.declarators.nodes:
declarator.identifier.gv = self.module.add_global_variable(
self.types['int'], declarator.identifier.name, 0)
# 関数定義 (コード)
for external_declaration in node.nodes:
if isinstance(external_declaration, token.FunctionDefinition):
external_declaration.accept(self)
def a_FunctionDefinition(self, node):
# モジュールに関数を追加
function = self.module.add_function(
Type.function(
self.types['int'],
(self.types['int'],) * len(node.parameter_type_list.nodes)),
node.declarator.identifier.name)
node.declarator.identifier.ir = function
entry = function.append_basic_block(
'entry_' + node.declarator.identifier.name)
self.builder = Builder.new(entry)
# 戻り値を格納するためのメモリを確保
self.returns[function] = (self.builder.alloca(self.types['int']), [],)
# パラメータをメモリに割り当て
for i, arg in enumerate(node.parameter_type_list):
function.args[i].name = arg.declarator.identifier.name
arg.declarator.identifier.memory = self.builder.alloca(self.types['int'])
self.builder.store(function.args[i], arg.declarator.identifier.memory)
# 関数本体のコード生成
node.compound_statement.accept(self)
return_block = function.append_basic_block(
'return_' + node.declarator.identifier.name)
self.builder.branch(return_block)
self.builder.position_at_end(return_block)
return_value = self.returns[function][0]
ir = self.builder.load(return_value, 'return')
self.builder.ret(ir)
# 適切な場所に return を配置する
for block in self.returns[function][1]:
if not block.instructions[-1].is_terminator:
self.builder.position_at_end(block)
self.builder.branch(return_block)
def a_Declarator(self, node):
node.identifier.memory = self.builder.alloca(self.types['int'])
def a_IfStatement(self, node):
then_returned = else_returned = False
# 条件判定のコード
node.expr.accept(self)
ir = self.builder.icmp(
IPRED_NE, node.expr.ir, Constant.int(self.types['int'], 0))
function = self.builder.basic_block.function
then_block = function.append_basic_block(self._new_label())
else_block = function.append_basic_block(self._new_label())
# 条件分岐点
self.builder.cbranch(ir, then_block, else_block)
# then のコード生成, then 内で return されたかをチェックする
self.builder.position_at_end(then_block)
nbranch = self.nbranch
node.then_statement.accept(self)
if nbranch != self.nbranch:
then_returned = True
self.nbranch = nbranch
then_block = self.builder.basic_block
# else のコード生成, else 内で return されたかをチェックする
self.builder.position_at_end(else_block)
if not node.else_statement.is_null():
node.else_statement.accept(self)
if nbranch != self.nbranch:
else_returned = True
self.nbranch = nbranch
else_block = self.builder.basic_block
done_block = function.append_basic_block(self._new_label())
# then, else それぞれについて, 内部で return されていなければ,
# then -> done, else -> done のジャンプを設定
if not then_returned:
self.builder.position_at_end(then_block)
self.builder.branch(done_block)
if not else_returned:
self.builder.position_at_end(else_block)
self.builder.branch(done_block)
self.builder.position_at_end(done_block)
def a_ReturnStatement(self, node):
self.nbranch += 1 # then. else 節内での分岐を検知
function = self.builder.basic_block.function
node.expr.accept(self)
# return を配置したいブロックを覚えておく
return_value = self.returns[function][0]
self.returns[function][1].append(self.builder.basic_block)
self.builder.store(node.expr.ir, return_value)
def a_WhileLoop(self, node):
function = self.builder.basic_block.function
test_block = function.append_basic_block(self._new_label())
loop_block = function.append_basic_block(self._new_label())
self.builder.branch(test_block)
self.builder.position_at_end(test_block)
node.expr.accept(self)
ir = self.builder.icmp(
IPRED_EQ, node.expr.ir, Constant.int(self.types['int'], 0))
self.builder.position_at_end(loop_block)
node.statement.accept(self)
self.builder.branch(test_block)
done_block = function.append_basic_block(self._new_label())
self.builder.position_at_end(test_block)
self.builder.cbranch(ir, done_block, loop_block)
self.builder.position_at_end(done_block)
def a_FunctionExpression(self, node):
if node.function.kind == Kinds.undefined_function:
if node.function.name in self.undefined_functions:
node.function.ir = self.undefined_functions[node.function.name]
else:
node.function.ir = self.module.add_function(
Type.function(
self.types['int'],
(self.types['int'],) * len(node.argument_list.nodes)),
node.function.name)
self.undefined_functions[node.function.name] = node.function.ir
node.argument_list.accept(self)
node.ir = self.builder.call(
node.function.ir, map(lambda a: a.ir, node.argument_list.nodes))
def a_Negative(self, node):
node.expr.accept(self)
node.ir = self.builder.neg(node.expr.ir)
def a_Increment(self, node):
node.expr.accept(self)
node.ir = self.builder.add(
node.expr.ir, Constant.int(self.types['int'], 1))
self.builder.store(node.ir, node.expr.memory)
def a_Decrement(self, node):
node.expr.accept(self)
node.ir = self.builder.sub(
node.expr.ir, Constant.int(self.types['int'], 1))
self.builder.store(node.ir, node.expr.memory)
def a_BinaryOperator(self, node):
if node.op in self.op_assign:
self._a_BinaryOperator_assign(node)
elif node.op in self.op_arithmetic:
self._a_BinaryOperator_arithmetic(node)
elif node.op in self.op_compare:
self._a_BinaryOperator_compare(node)
elif node.op in self.op_logical:
self._a_BinaryOperator_logical(node)
def _a_BinaryOperator_assign(self, node):
node.right.accept(self)
node.left.accept(self)
if node.op == 'ASSIGN':
node.ir = node.right.ir
ir = node.right.ir
elif node.op == 'ASSIGN_PLUS':
node.ir = self.builder.add(node.left.ir, node.right.ir)
ir = node.ir
elif node.op == 'ASSIGN_MINUS':
node.ir = self.builder.sub(node.left.ir, node.right.ir)
ir = node.ir
if getattr(node.left, 'memory', None) is not None:
self.builder.store(ir, node.left.memory)
else:
self.builder.store(ir, node.left.gv)
def _a_BinaryOperator_arithmetic(self, node):
node.right.accept(self)
node.left.accept(self)
if node.op == 'PLUS':
node.ir = self.builder.add(node.left.ir, node.right.ir)
elif node.op == 'MINUS':
node.ir = self.builder.sub(node.left.ir, node.right.ir)
elif node.op == 'MULT':
node.ir = self.builder.mul(node.left.ir, node.right.ir)
elif node.op == 'DIV':
node.ir = self.builder.sdiv(node.left.ir, node.right.ir)
def _a_BinaryOperator_compare(self, node):
node.right.accept(self)
node.left.accept(self)
comparator = self.op_compare[node.op]
result = self.builder.icmp(comparator, node.left.ir, node.right.ir)
node.ir = self.builder.zext(result, self.types['int'])
def _a_BinaryOperator_logical(self, node):
function = self.builder.basic_block.function
result = self.builder.alloca(self.types['int'])
if node.op == 'LAND':
self.builder.store(Constant.int(self.types['int'], 0), result)
node.left.accept(self)
left_ir = self.builder.icmp(
IPRED_EQ, node.left.ir, Constant.int(self.types['int'], 0))
left_block = self.builder.basic_block
right_block = function.append_basic_block(self._new_label())
self.builder.position_at_end(right_block)
node.right.accept(self)
right_ir = self.builder.icmp(
IPRED_EQ, node.right.ir, Constant.int(self.types['int'], 0))
true_block = function.append_basic_block(self._new_label(''))
self.builder.position_at_end(true_block)
self.builder.store(Constant.int(self.types['int'], 1), result)
done_block = function.append_basic_block(self._new_label())
self.builder.position_at_end(left_block)
self.builder.cbranch(left_ir, done_block, right_block)
self.builder.position_at_end(right_block)
self.builder.cbranch(right_ir, done_block, true_block)
self.builder.position_at_end(true_block)
self.builder.branch(done_block)
self.builder.position_at_end(done_block)
elif node.op == 'LOR':
self.builder.store(Constant.int(self.types['int'], 1), result)
node.left.accept(self)
left_ir = self.builder.icmp(IPRED_EQ, node.left.ir, Constant.int(self.types['int'], 0))
left_block = self.builder.basic_block
right_block = function.append_basic_block(self._new_label())
self.builder.position_at_end(right_block)
node.right.accept(self)
right_ir = self.builder.icmp(IPRED_EQ, node.right.ir, Constant.int(self.types['int'], 0))
false_block = function.append_basic_block(self._new_label())
self.builder.position_at_end(false_block)
self.builder.store(Constant.int(self.types['int'], 0), result)
done_block = function.append_basic_block(self._new_label())
self.builder.position_at_end(left_block)
self.builder.cbranch(left_ir, right_block, done_block)
self.builder.position_at_end(right_block)
self.builder.cbranch(right_ir, false_block, done_block)
self.builder.position_at_end(false_block)
self.builder.branch(done_block)
self.builder.position_at_end(done_block)
node.ir = self.builder.load(result)
def a_Identifier(self, node):
if getattr(node, 'memory', None) is not None:
node.ir = self.builder.load(node.memory)
else:
node.ir = self.builder.load(node.gv)
def a_Constant(self, node):
node.ir = Constant.int(self.types['int'], node.value)
| {
"content_hash": "6a1c2fdee8fe66fdfce7686bbab247e1",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 101,
"avg_line_length": 38.210210210210214,
"alnum_prop": 0.5922665828355863,
"repo_name": "ymyzk/tinyc",
"id": "f56e78d367b2fe60bea606147701c9b99443f2a9",
"size": "13187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinyc/generator/llvm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "741"
},
{
"name": "Python",
"bytes": "99108"
},
{
"name": "Shell",
"bytes": "1306"
}
],
"symlink_target": ""
} |
import time
import sqlite3
import datetime
"""DB interface for comunicating with sqlite3"""
class DBError(Exception):
pass
class DB(object):
"""
Public methods:
add_request(): add a request to the database (requests table).
get_user(): get user info from the database (users table).
add_user(): add a user to the database (users table).
update_user(): update a user on the database (users table).
Exceptions:
DBError: Something went wrong when trying to connect/interact
with the database.
"""
def __init__(self, dbname):
"""Create a new db object.
:param: dbname (string) the path of the database.
"""
self.dbname = dbname
def connect(self):
""" """
try:
self.con = sqlite3.connect(self.dbname)
self.con.row_factory = sqlite3.Row
except sqlite3.Error as e:
raise DBError("%s" % str(e))
def add_request(self):
"""Add a request to the database.
For now we just count the number of requests we have received so far.
"""
try:
with self.con:
cur = self.con.cursor()
cur.execute("SELECT counter FROM requests WHERE id = 1")
row = cur.fetchone()
if row:
cur.execute("UPDATE requests SET counter=? WHERE id=?",
(row['counter']+1, 1))
else:
cur.execute("INSERT INTO requests VALUES(?, ?)", (1, 1))
except sqlite3.Error as e:
raise DBError("%s" % str(e))
def get_user(self, user, service):
"""Get user info from the database.
:param: user (string) unique (hashed) string that represents the user.
:param: service (string) the service related to the user (e.g. SMTP).
:return: (dict) the user information, with fields as indexes
(e.g. row['user']).
"""
try:
with self.con:
cur = self.con.cursor()
cur.execute("SELECT * FROM users WHERE id =? AND service =?",
(user, service))
row = cur.fetchone()
return row
except sqlite3.Error as e:
raise DBError("%s" % str(e))
def add_user(self, user, service, blocked):
"""Add a user to the database.
We add a user with one 'times' and the current time as 'last_request'
by default.
:param: user (string) unique (hashed) string that represents the user.
:param: service (string) the service related to the user (e.g. SMTP).
:param: blocked (int) one if user is blocked, zero otherwise.
"""
try:
with self.con:
cur = self.con.cursor()
cur.execute("INSERT INTO users VALUES(?,?,?,?,?)",
(user, service, 1, blocked, str(time.time())))
except sqlite3.Error as e:
raise DBError("%s" % str(e))
def update_user(self, user, service, times, blocked):
"""Update a user on the database.
We update the user info with the current time as 'last_request'.
:param: user (string) unique (hashed) string that represents the user.
:param: service (string) the service related to the user (e.g. SMTP).
:param: times (int) the number of requests the user has made.
:param: blocked (int) one if user is blocked, zero otherwise.
"""
try:
with self.con:
cur = self.con.cursor()
cur.execute("UPDATE users SET times =?, blocked =?,"
" last_request =? WHERE id =? AND service =?",
(times, blocked, str(time.time()), user, service))
except sqlite3.Error as e:
raise DBError("%s" % str(e))
| {
"content_hash": "45a96d12cdf970c87911e61c994f14e4",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 78,
"avg_line_length": 32.09756097560975,
"alnum_prop": 0.5364741641337386,
"repo_name": "ilv/gettor",
"id": "7a595a308374715bdd67275edaee4fb69e69baf7",
"size": "4287",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gettor/db.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "151291"
},
{
"name": "Smarty",
"bytes": "5150"
}
],
"symlink_target": ""
} |
import libsvm
import argparse
from cPickle import load
from learn import extractSift, computeHistograms, writeHistogramsToFile
import os
#default file paths
HISTOGRAMS_FILE = 'testdata.svm'
CODEBOOK_FILE = 'codebook.file'
MODEL_FILE = 'trainingdata.svm.model'
#parsing for classification purpose
#python classify.py -c path_to_folders_with_images/codebook.file -m path_to_folders_with_images/trainingdata.svm.model images_you_want_to_classify
def parse_arguments():
parser = argparse.ArgumentParser(description='classify images with a visual bag of words model')
parser.add_argument('-c', help='path to the codebook file', required=False, default=CODEBOOK_FILE)
parser.add_argument('-m', help='path to the model file', required=False, default=MODEL_FILE)
parser.add_argument('input_images', help='images to classify', nargs='+')
args = parser.parse_args()
#print('check for arguments passed :')
#print('path to the model file',args.m)
#print('path to the codebook file',args.c)
#print(args.input_images)
return args
#arguments are passed for predicting image
def predict_image(model_file,codebook_file,fnames):
print str(fnames[0])+".sift"
try:
os.remove(str(fnames[0])+".sift")
except OSError:
pass
# extract Sift features for each individual image"
all_files = []
all_files_labels = {}
all_features = {}
print fnames
try:
all_features = extractSift(fnames)
for i in fnames:
all_files_labels[i] = 0 # label is unknown
# loading codebook from codebook_file
# default codebookfile: codebook.file
with open(codebook_file, 'rb') as f:
codebook = load(f)
# computing visual word histograms"
all_word_histgrams = {}
for imagefname in all_features:
word_histgram = computeHistograms(codebook, all_features[imagefname])
all_word_histgrams[imagefname] = word_histgram
# write the histograms to file to pass it to the svm
nclusters = codebook.shape[0]
writeHistogramsToFile(nclusters, all_files_labels,
fnames,
all_word_histgrams,
HISTOGRAMS_FILE)
# test data with svm"
print libsvm.test(HISTOGRAMS_FILE, model_file)
except Exception as e:
pass #Incase of error with sift extraction, just try again on next stroke
if __name__=="__main__":
args = parse_arguments()
model_file = args.m
codebook_file = args.c
fnames = args.input_images
predict_image(model_file,codebook_file,fnames)
| {
"content_hash": "ebec1dad66975e1ed335d66658db7d22",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 146,
"avg_line_length": 32.12,
"alnum_prop": 0.7256122872561229,
"repo_name": "navinpai/CS706",
"id": "7560cbf172d6863efefa853536155f301538d220",
"size": "2409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "How_Humans_Sketch_CS706/classify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "85811"
},
{
"name": "C++",
"bytes": "96257"
},
{
"name": "Java",
"bytes": "100815"
},
{
"name": "M",
"bytes": "3271"
},
{
"name": "Makefile",
"bytes": "4224"
},
{
"name": "Matlab",
"bytes": "4100"
},
{
"name": "Python",
"bytes": "93240"
}
],
"symlink_target": ""
} |
from django.template import Library
from django.template.base import Node
from django.test import TestCase
class FilterRegistrationTests(TestCase):
def setUp(self):
self.library = Library()
def test_filter(self):
@self.library.filter
def func():
return ''
self.assertEqual(self.library.filters['func'], func)
def test_filter_parens(self):
@self.library.filter()
def func():
return ''
self.assertEqual(self.library.filters['func'], func)
def test_filter_name_arg(self):
@self.library.filter('name')
def func():
return ''
self.assertEqual(self.library.filters['name'], func)
def test_filter_name_kwarg(self):
@self.library.filter(name='name')
def func():
return ''
self.assertEqual(self.library.filters['name'], func)
def test_filter_call(self):
def func():
return ''
self.library.filter('name', func)
self.assertEqual(self.library.filters['name'], func)
def test_filter_invalid(self):
msg = "Unsupported arguments to Library.filter: (None, '')"
with self.assertRaisesMessage(ValueError, msg):
self.library.filter(None, '')
class InclusionTagRegistrationTests(TestCase):
def setUp(self):
self.library = Library()
def test_inclusion_tag(self):
@self.library.inclusion_tag('template.html')
def func():
return ''
self.assertIn('func', self.library.tags)
def test_inclusion_tag_name(self):
@self.library.inclusion_tag('template.html', name='name')
def func():
return ''
self.assertIn('name', self.library.tags)
class SimpleTagRegistrationTests(TestCase):
def setUp(self):
self.library = Library()
def test_simple_tag(self):
@self.library.simple_tag
def func():
return ''
self.assertIn('func', self.library.tags)
def test_simple_tag_parens(self):
@self.library.simple_tag()
def func():
return ''
self.assertIn('func', self.library.tags)
def test_simple_tag_name_kwarg(self):
@self.library.simple_tag(name='name')
def func():
return ''
self.assertIn('name', self.library.tags)
def test_simple_tag_invalid(self):
msg = "Invalid arguments provided to simple_tag"
with self.assertRaisesMessage(ValueError, msg):
self.library.simple_tag('invalid')
class TagRegistrationTests(TestCase):
def setUp(self):
self.library = Library()
def test_tag(self):
@self.library.tag
def func(parser, token):
return Node()
self.assertEqual(self.library.tags['func'], func)
def test_tag_parens(self):
@self.library.tag()
def func(parser, token):
return Node()
self.assertEqual(self.library.tags['func'], func)
def test_tag_name_arg(self):
@self.library.tag('name')
def func(parser, token):
return Node()
self.assertEqual(self.library.tags['name'], func)
def test_tag_name_kwarg(self):
@self.library.tag(name='name')
def func(parser, token):
return Node()
self.assertEqual(self.library.tags['name'], func)
def test_tag_call(self):
def func(parser, token):
return Node()
self.library.tag('name', func)
self.assertEqual(self.library.tags['name'], func)
def test_tag_invalid(self):
msg = "Unsupported arguments to Library.tag: (None, '')"
with self.assertRaisesMessage(ValueError, msg):
self.library.tag(None, '')
| {
"content_hash": "a15ce8c0397de18901afab78e676642c",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 67,
"avg_line_length": 29.431818181818183,
"alnum_prop": 0.5742599742599742,
"repo_name": "yephper/django",
"id": "2e58102bfddf9a5abe710c85116a7d78202186c8",
"size": "3885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/template_tests/test_library.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
__title__ = 'unihan-etl'
__package_name__ = 'unihan_etl'
__description__ = 'Export UNIHAN to Python, Data Package, CSV, JSON and YAML'
__version__ = '0.10.3'
__author__ = 'Tony Narlock'
__email__ = 'cihai@git-pull.com'
__github__ = 'https://github.com/cihai/unihan-etl'
__docs__ = 'https://unihan-etl.git-pull.com'
__tracker__ = 'https://github.com/cihai/unihan-etl/issues'
__pypi__ = 'https://pypi.python.org/pypi/unihan-etl'
__license__ = 'MIT'
__copyright__ = 'Copyright 2013-present cihai software foundation (tony narlock)'
| {
"content_hash": "74d4ec27e7a015ebc59f1656d7204f01",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 81,
"avg_line_length": 44.083333333333336,
"alnum_prop": 0.6597353497164461,
"repo_name": "cihai/cihaidata-unihan",
"id": "97b9414077a550dcd9f2d8027e7a03a7578225dc",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unihan_etl/__about__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "807"
},
{
"name": "Python",
"bytes": "43894"
}
],
"symlink_target": ""
} |
import tempfile
import subprocess
from typing import List
from typeguard import check_argument_types
from neuralmonkey.logging import warn
from neuralmonkey.evaluators.evaluator import Evaluator
# pylint: disable=too-few-public-methods
class MultEvalWrapper(Evaluator[List[str]]):
"""Wrapper for mult-eval's reference BLEU and METEOR scorer."""
def __init__(self,
wrapper: str,
name: str = "MultEval",
encoding: str = "utf-8",
metric: str = "bleu",
language: str = "en") -> None:
"""Initialize the wrapper.
Arguments:
wrapper: Path to multeval.sh script
name: Name of the evaluator
encoding: Encoding of input files
language: Language of hypotheses and references
metric: Evaluation metric "bleu", "ter", "meteor"
"""
check_argument_types()
super().__init__("{}_{}_{}".format(name, metric, language))
self.wrapper = wrapper
self.encoding = encoding
self.language = language
self.metric = metric
if self.metric not in ["bleu", "ter", "meteor"]:
warn("{} metric is not valid. Using bleu instead.".
format(self.metric))
self.metric = "bleu"
def score_batch(self,
hypotheses: List[List[str]],
references: List[List[str]]) -> float:
ref_bytes = self.serialize_to_bytes(references)
hyp_bytes = self.serialize_to_bytes(hypotheses)
with tempfile.NamedTemporaryFile() as reffile, \
tempfile.NamedTemporaryFile() as hypfile:
reffile.write(ref_bytes)
reffile.flush()
hypfile.write(hyp_bytes)
hypfile.flush()
args = [self.wrapper, "eval", "--refs", reffile.name,
"--hyps-baseline", hypfile.name, "--metrics", self.metric]
if self.metric == "meteor":
args.extend(["--meteor.language", self.language])
# problem: if meteor run for the first time,
# paraphrase tables are downloaded
output_proc = subprocess.run(
args, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
proc_stdout = output_proc.stdout.decode("utf-8") # type: ignore
lines = proc_stdout.splitlines()
if not lines:
return 0.0
try:
filtered = float(lines[1].split()[1])
eval_score = filtered / 100.
return eval_score
except IndexError:
warn("Error: Malformed output from MultEval wrapper:")
warn(proc_stdout)
warn("=======")
return 0.0
except ValueError:
warn("Value error - '{}' is not a number.".format(lines[0]))
return 0.0
def serialize_to_bytes(self, sentences: List[List[str]]) -> bytes:
joined = [" ".join(r) for r in sentences]
string = "\n".join(joined) + "\n"
return string.encode(self.encoding)
| {
"content_hash": "486aef01ac89504f9c121d27b72dcfda",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 35.47191011235955,
"alnum_prop": 0.5470383275261324,
"repo_name": "ufal/neuralmonkey",
"id": "bb0ae7bd0fe9c2d0d84f6d440e603d216b4a6421",
"size": "3157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neuralmonkey/evaluators/multeval.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13780"
},
{
"name": "HTML",
"bytes": "3116"
},
{
"name": "JavaScript",
"bytes": "2070"
},
{
"name": "Makefile",
"bytes": "2564"
},
{
"name": "Mask",
"bytes": "69384"
},
{
"name": "Mathematica",
"bytes": "1874"
},
{
"name": "Perl",
"bytes": "45129"
},
{
"name": "Python",
"bytes": "823152"
},
{
"name": "Shell",
"bytes": "4671"
}
],
"symlink_target": ""
} |
import collections as coll
import numpy as np
from scipy import ndimage
import warnings
from skimage.util import img_as_float, regular_grid
from skimage.segmentation._slic import _slic_cython, _enforce_label_connectivity_cython
from skimage.color import rgb2lab
def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=None,
spacing=None, multichannel=True, convert2lab=True, ratio=None,
enforce_connectivity=False, min_size_factor=0.5, max_size_factor=3,
slic_zero=False):
"""Segments image using k-means clustering in Color-(x,y,z) space.
Parameters
----------
image : 2D, 3D or 4D ndarray
Input image, which can be 2D or 3D, and grayscale or multichannel
(see `multichannel` parameter).
n_segments : int, optional
The (approximate) number of labels in the segmented output image.
compactness : float, optional
Balances color-space proximity and image-space proximity. Higher
values give more weight to image-space. As `compactness` tends to
infinity, superpixel shapes become square/cubic. In SLICO mode, this
is the initial compactness.
max_iter : int, optional
Maximum number of iterations of k-means.
sigma : float or (3,) array-like of floats, optional
Width of Gaussian smoothing kernel for pre-processing for each
dimension of the image. The same sigma is applied to each dimension in
case of a scalar value. Zero means no smoothing.
Note, that `sigma` is automatically scaled if it is scalar and a
manual voxel spacing is provided (see Notes section).
spacing : (3,) array-like of floats, optional
The voxel spacing along each image dimension. By default, `slic`
assumes uniform spacing (same voxel resolution along z, y and x).
This parameter controls the weights of the distances along z, y,
and x during k-means clustering.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
convert2lab : bool, optional
Whether the input should be converted to Lab colorspace prior to
segmentation. For this purpose, the input is assumed to be RGB. Highly
recommended.
ratio : float, optional
Synonym for `compactness`. This keyword is deprecated.
enforce_connectivity: bool, optional (default False)
Whether the generated segments are connected or not
min_size_factor: float, optional
Proportion of the minimum segment size to be removed with respect
to the supposed segment size ```depth*width*height/n_segments```
max_size_factor: float, optional
Proportion of the maximum connected segment size. A value of 3 works
in most of the cases.
slic_zero: bool, optional
Run SLIC-zero, the zero-parameter mode of SLIC
Returns
-------
labels : 2D or 3D array
Integer mask indicating segment labels.
Raises
------
ValueError
If:
- the image dimension is not 2 or 3 and `multichannel == False`, OR
- the image dimension is not 3 or 4 and `multichannel == True`
Notes
-----
* If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to
segmentation.
* If `sigma` is scalar and `spacing` is provided, the kernel width is
divided along each dimension by the spacing. For example, if ``sigma=1``
and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This
ensures sensible smoothing for anisotropic images.
* The image is rescaled to be in [0, 1] prior to processing.
* Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To
interpret them as 3D with the last dimension having length 3, use
`multichannel=False`.
References
----------
.. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,
Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to
State-of-the-art Superpixel Methods, TPAMI, May 2012.
Examples
--------
>>> from skimage.segmentation import slic
>>> from skimage.data import lena
>>> img = lena()
>>> segments = slic(img, n_segments=100, compactness=10, sigma=0)
Increasing the compactness parameter yields more square regions:
>>> segments = slic(img, n_segments=100, compactness=20, sigma=0)
"""
if sigma is None:
warnings.warn('Default value of keyword `sigma` changed from ``1`` '
'to ``0``.')
sigma = 0
if ratio is not None:
warnings.warn('Keyword `ratio` is deprecated. Use `compactness` '
'instead.')
compactness = ratio
if enforce_connectivity is None:
warnings.warn('Deprecation: enforce_connectivity will default to'
' True in future versions.')
enforce_connectivity = False
image = img_as_float(image)
is_2d = False
if image.ndim == 2:
# 2D grayscale image
image = image[np.newaxis, ..., np.newaxis]
is_2d = True
elif image.ndim == 3 and multichannel:
# Make 2D multichannel image 3D with depth = 1
image = image[np.newaxis, ...]
is_2d = True
elif image.ndim == 3 and not multichannel:
# Add channel as single last dimension
image = image[..., np.newaxis]
if spacing is None:
spacing = np.ones(3)
elif isinstance(spacing, (list, tuple)):
spacing = np.array(spacing, dtype=np.double)
if not isinstance(sigma, coll.Iterable):
sigma = np.array([sigma, sigma, sigma], dtype=np.double)
sigma /= spacing.astype(np.double)
elif isinstance(sigma, (list, tuple)):
sigma = np.array(sigma, dtype=np.double)
if (sigma > 0).any():
# add zero smoothing for multichannel dimension
sigma = list(sigma) + [0]
image = ndimage.gaussian_filter(image, sigma)
if convert2lab and multichannel:
if image.shape[3] != 3:
raise ValueError("Lab colorspace conversion requires a RGB image.")
image = rgb2lab(image)
depth, height, width = image.shape[:3]
# initialize cluster centroids for desired number of segments
grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]
slices = regular_grid(image.shape[:3], n_segments)
step_z, step_y, step_x = [int(s.step) for s in slices]
segments_z = grid_z[slices]
segments_y = grid_y[slices]
segments_x = grid_x[slices]
segments_color = np.zeros(segments_z.shape + (image.shape[3],))
segments = np.concatenate([segments_z[..., np.newaxis],
segments_y[..., np.newaxis],
segments_x[..., np.newaxis],
segments_color],
axis=-1).reshape(-1, 3 + image.shape[3])
segments = np.ascontiguousarray(segments)
# we do the scaling of ratio in the same way as in the SLIC paper
# so the values have the same meaning
step = float(max((step_z, step_y, step_x)))
ratio = 1.0 / compactness
image = np.ascontiguousarray(image * ratio)
labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero)
if enforce_connectivity:
segment_size = depth * height * width / n_segments
min_size = int(min_size_factor * segment_size)
max_size = int(max_size_factor * segment_size)
labels = _enforce_label_connectivity_cython(labels,
n_segments,
min_size,
max_size)
if is_2d:
labels = labels[0]
return labels
| {
"content_hash": "50cc6555f2c18b597735580b3d347e44",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 87,
"avg_line_length": 40.025510204081634,
"alnum_prop": 0.6281708094327597,
"repo_name": "chintak/scikit-image",
"id": "11373381255d99bbbc8e84631db71b0c4d8fa24f",
"size": "7862",
"binary": false,
"copies": "1",
"ref": "refs/heads/placeholder",
"path": "skimage/segmentation/slic_superpixels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "70225"
},
{
"name": "CSS",
"bytes": "3629"
},
{
"name": "JavaScript",
"bytes": "777"
},
{
"name": "Python",
"bytes": "2115723"
},
{
"name": "Shell",
"bytes": "3346"
}
],
"symlink_target": ""
} |
import unittest
import datetime
import numpy as np
import desisurvey.tiles
import desisurvey.config
from desisurvey.test.base import Tester
from desisurvey.plan import Planner
class TestPlan(Tester):
def test_plan(self):
tiles = desisurvey.tiles.get_tiles()
donefrac = np.zeros(tiles.ntiles, 'f4')
num_nights = (self.stop - self.start).days
gen = np.random.RandomState(123)
config = desisurvey.config.Configuration()
for cadence in 'daily', 'monthly':
config.fiber_assignment_cadence.set_value(cadence)
plan = Planner(simulate=True)
plan2 = None
for i in range(num_nights):
night = self.start + datetime.timedelta(i)
# Run afternoon plan using original and restored objects.
avail, planned = plan.afternoon_plan(night)
if plan2 is not None:
# Check that the restored planner gives identical results.
avail2, planned2 = plan2.afternoon_plan(night)
self.assertTrue(np.array_equal(avail, avail2))
self.assertTrue(np.array_equal(planned, planned2))
self.assertTrue(np.array_equal(plan.tile_countdown, plan2.tile_countdown))
self.assertTrue(np.array_equal(plan.donefrac, plan2.donefrac))
# round off in ecsv file is okay.
self.assertTrue(
np.max(np.abs(plan.designha-plan2.designha)) < 0.01)
# Mark a random set of tiles completed after this night.
malreadydone = donefrac == 1
donefrac[gen.choice(tiles.ntiles, tiles.ntiles // num_nights)] = 1.
plan.set_donefrac(tiles.tileID[~malreadydone], donefrac[~malreadydone])
# Save and restore our state.
plan.save('snapshot.ecsv')
plan2 = Planner(restore='snapshot.ecsv', simulate=True)
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| {
"content_hash": "e964cfda9d17260a10b6c9d7261c5a31",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 94,
"avg_line_length": 42.15384615384615,
"alnum_prop": 0.6031021897810219,
"repo_name": "desihub/desisurvey",
"id": "35dfb205fda592993b2c0dea8685b63c8d8bc927",
"size": "2192",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/desisurvey/test/test_plan.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "533431"
},
{
"name": "Shell",
"bytes": "3254"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import sh
from sh import uwsgi, ErrorReturnCode
from termcolor import colored
from studio.frame import config
from studio.launch.base import manager
from .contrib import build_structure
uwsgi_manager = manager.subcommand('uwsgi')
def _uwsgi_common(*args, **kwargs):
for key in config.common:
if not key.startswith('UWSGI_') or \
key == 'UWSGI_LOGFILE': # LOGFILE 为特殊配置
continue
val = config.common[key]
if key.startswith('UWSGI_VASSAL_'):
continue
else:
kwargs[key[6:].lower()] = val
env = os.environ.copy()
# plugins_dir = config.common['UWSGI_PLUGINS_DIR']
# env['UWSGI_VASSAL_PLUGINS_DIR'] = plugins_dir
print(kwargs)
return uwsgi(_out=sys.stdout, _err=sys.stderr,
_env=env,
*args, **kwargs)
def is_alive(pidfile):
if os.path.exists(pidfile):
with open(pidfile, 'rb') as fp:
pid = fp.read()
try:
sh.kill('-0', pid.strip())
return True
except ErrorReturnCode:
return False
return False
@uwsgi_manager.command
def start():
pidfile = config.common['UWSGI_PIDFILE']
print('Starting uWSGI:', end=' ')
if is_alive(pidfile):
print(colored('failed', 'red', attrs=['bold']) +
', uWSGI is already running.')
else:
_uwsgi_common(daemonize=config.common['UWSGI_LOGFILE'])
print(colored('uWSGI', 'green', attrs=['bold']) + '.')
@uwsgi_manager.command
def debug():
pidfile = config.common['UWSGI_PIDFILE']
print('Debugging uWSGI:', end=' ')
if is_alive(pidfile):
print(colored('failed', 'red', attrs=['bold']) +
', uWSGI is already running.')
else:
p = _uwsgi_common(catch_exceptions=True,
die_on_term=True, _bg=True)
print(colored('uWSGI', 'green', attrs=['bold']) + '.')
try:
p.wait()
except KeyboardInterrupt:
p.terminate()
p.wait()
@uwsgi_manager.command
def reload():
pidfile = config.common['UWSGI_PIDFILE']
print('Reloading uWSGI:', end=' ')
try:
uwsgi(reload=pidfile)
except ErrorReturnCode:
print(colored('failed', 'red', attrs=['bold']) + '.')
else:
print(colored('uWSGI', 'green', attrs=['bold']) + '.')
@uwsgi_manager.command
def stop():
pidfile = config.common['UWSGI_PIDFILE']
print('Stopping uWSGI:', end=' ')
try:
uwsgi(stop=pidfile)
except ErrorReturnCode:
print(colored('failed', 'red', attrs=['bold']) + '.')
else:
print(colored('uWSGI', 'green', attrs=['bold']) + '.')
@uwsgi_manager.command
def restart():
pidfile = config.common['UWSGI_PIDFILE']
stop()
count = 0
while is_alive(pidfile):
print('.', end='')
count += 1
print('\b' * count)
start()
@uwsgi_manager.command
def log():
uwsgi_logfile = config.common['UWSGI_LOGFILE']
def process_output(line):
print(line.strip(), file=sys.stdout)
p = sh.tail(uwsgi_logfile, follow=True, _out=process_output)
try:
p.wait()
except KeyboardInterrupt:
print(colored('\nleave log', 'red'))
p.terminate()
| {
"content_hash": "c51e6eaa75df5cb7e5b4808a0ae6ca93",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 64,
"avg_line_length": 27.252032520325205,
"alnum_prop": 0.5802505966587113,
"repo_name": "qisanstudio/qstudio-launch",
"id": "3489645188a4316924bf0196e22dbf29f563b33b",
"size": "3386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/studio/launch/commands/uwsgi_commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9630"
},
{
"name": "HTML",
"bytes": "43390"
},
{
"name": "Mako",
"bytes": "1236"
},
{
"name": "Python",
"bytes": "34152"
}
],
"symlink_target": ""
} |
"""
NAME
huji_magic.py
DESCRIPTION
converts HUJI format files to measurements format files
SYNTAX
huji_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: Colon delimited list of analysts, default is ""
-ID: directory for input file if not included in -f flag
-f FILE: specify infile file, required
-fd FILE: specify HUJI datafile with sample orientations
-WD: directory to output files to (default : current directory)
-F FILE: specify output measurements file, default is measurements.txt
-Fsp FILE: specify output specimens.txt file, default is specimens.txt
-Fsa FILE: specify output samples.txt file, default is samples.txt
-Fsi FILE: specify output sites.txt file, default is sites.txt
-Flo FILE: specify output locations.txt file, default is locations.txt
-A: don't average replicate measurements
-LP [colon delimited list of protocols, include all that apply]
AF: af demag
T: thermal including thellier but not trm acquisition
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
CR: cooling rate experiment.
The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional)
where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps.
XXX.00 is optional zerofield baseline. XXX.70 is alteration check.
syntax in sio_magic is: -LP CR xxx,yyy,zzz,.....xx
where xx, yyy,zzz...xxx are cooling time in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70
if you use a zerofield step then no need to specify the cooling rate for the zerofield
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment
# to do! -ac B : peak AF field (in mT) for ARM acquisition, default is none
-ncn NCON: specify naming convention: default is #1 below
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
INPUT
separate experiments ( AF, thermal, thellier, trm aquisition) should be seperate files
(eg. af.txt, thermal.txt, etc.)
HUJI masurement file format (space delimited text):
Spec lab-running-numbe-code Date Hour Treatment-type(T/N/A) Treatment(XXX.XX) dec(geo) inc(geo) dec(tilt) inc(tilt)
---------
conventions:
Spec: specimen name
Treat: treatment step
XXX T in Centigrade
XXX AF in mT
for special experiments:
Thellier:
XXX.0 first zero field step
XXX.1 first in field step [XXX.0 and XXX.1 can be done in any order]
XXX.2 second in-field step at lower temperature (pTRM check)
ATRM:
X.00 optional baseline
X.1 ATRM step (+X)
X.2 ATRM step (+Y)
X.3 ATRM step (+Z)
X.4 ATRM step (-X)
X.5 ATRM step (-Y)
X.6 ATRM step (-Z)
X.7 optional alteration check (+X)
TRM:
XXX.YYY XXX is temperature step of total TRM
YYY is dc field in microtesla
Intensity assumed to be total moment in 10^3 Am^2 (emu)
Declination: Declination in specimen coordinate system
Inclination: Inclination in specimen coordinate system
Optional metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS
hh in 24 hours.
dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively
xx.xxx DC field
UNITS of DC field (microT, mT)
INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes, measured in four positions)
NMEAS: number of measurements in a single position (1,3,200...)
"""
import sys
from pmagpy import convert_2_magic as convert
def do_help():
return __doc__
def main():
kwargs = {}
if "-h" in sys.argv:
help(__name__)
sys.exit()
if "-usr" in sys.argv:
ind = sys.argv.index("-usr")
kwargs['user'] = sys.argv[ind+1]
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
kwargs['dir_path'] = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
kwargs['input_dir_path'] = sys.argv[ind+1]
if '-F' in sys.argv:
ind = sys.argv.index("-F")
kwargs['meas_file'] = sys.argv[ind+1]
if '-Fsp' in sys.argv:
ind = sys.argv.index("-Fsp")
kwargs['spec_file'] = sys.argv[ind+1]
if '-Fsa' in sys.argv:
ind = sys.argv.index("-Fsa")
kwargs['samp_file'] = sys.argv[ind+1]
if '-Fsi' in sys.argv:
ind = sys.argv.index("-Fsi")
kwargs['site_file'] = sys.argv[ind+1]
if '-Flo' in sys.argv:
ind = sys.argv.index("-Flo")
kwargs['loc_file'] = sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index("-f")
kwargs['magfile'] = sys.argv[ind+1]
if '-fd' in sys.argv:
ind = sys.argv.index("-fd")
kwargs['datafile'] = sys.argv[ind+1]
if "-dc" in sys.argv:
ind = sys.argv.index("-dc")
kwargs['labfield'] = float(sys.argv[ind+1])
kwargs['phi'] = float(sys.argv[ind+2])
kwargs['theta'] = float(sys.argv[ind+3])
#if "-ac" in sys.argv:
# ind = sys.argv.index("-ac")
# kwargs['peakfield'] = float(sys.argv[ind+1])
if "-spc" in sys.argv:
ind = sys.argv.index("-spc")
kwargs['specnum'] = int(sys.argv[ind+1])
if "-loc" in sys.argv:
ind = sys.argv.index("-loc")
kwargs['location'] = sys.argv[ind+1]
if "-ncn" in sys.argv:
ind = sys.argv.index("-ncn")
kwargs['samp_con'] = sys.argv[ind+1]
if '-LP' in sys.argv:
ind = sys.argv.index("-LP")
kwargs['codelist'] = sys.argv[ind+1]
if '-A' in sys.argv:
kwargs['noave'] = True
res, error_message = convert.huji(**kwargs)
if not res:
print(__doc__)
if __name__ == "__main__":
main()
| {
"content_hash": "b4d7ee1760808edc055e4ef06e571413",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 141,
"avg_line_length": 40.78857142857143,
"alnum_prop": 0.605771924908938,
"repo_name": "lfairchild/PmagPy",
"id": "0aef2f3f04b2c94a9508ce6bfd3612cbc27b3ada",
"size": "7160",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "programs/conversion_scripts/huji_magic.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "33903"
},
{
"name": "Inno Setup",
"bytes": "3675"
},
{
"name": "Jupyter Notebook",
"bytes": "29090864"
},
{
"name": "Python",
"bytes": "15912726"
},
{
"name": "Rich Text Format",
"bytes": "1104"
},
{
"name": "Shell",
"bytes": "9167"
},
{
"name": "TeX",
"bytes": "3146"
}
],
"symlink_target": ""
} |
from mytools.utilities import not_str
from . import exceptions as exc, reg
from .cid import controllers as ctl
from .cid.enum import CidEnum
class Builder():
def __init__(self, obj):
self.obj = obj
def __enter__(self):
self.obj.listener = self.build(self.obj)
exec_logic = self.exec_logic
try:
self.logic = exec_logic(self.start)
except AttributeError:
self.logic = exec_logic()
return self.obj.listener
def __exit__(self, owner, value, tb):
self.obj.listener.close()
del self.obj.listener
del self.logic
def exec_logic(self, cidmember='A1'):
'''Get a logic generator object corresponding to the member'''
return cidgen_reg[cidmember]
def build(self, obj):
for _ in self.logic:
try:
# signal sent by self.logic generator
signal = yield
# lookup and init. a handler generator
obj_handler = obj.get_handler(signal)
# transmit signal for making an obj from line
yield signal
# new_obj sent by main from_cid loop
new_obj = yield
while True:
try:
obj_handler.send(new_obj)
next(self.logic)
signal = yield
yield signal
new_obj = yield
except exc.Complete as e:
obj_handler.throw(e)
break
except exc.Complete:
continue
finally:
obj_handler.close()
@staticmethod
def valid_tag(tag):
try:
return CidEnum[tag].name
except KeyError:
raise ValueError('Invalid tag: '
'{!r}'.format(tag)) from None
def __call__(self, start):
self.start = self.valid_tag(start)
return self
class CandeObj():
'''For working with Cande Level 3 problems'''
# catalog of members that are atomic
_fields = tuple(f.lower() for f in Master._fields + Info._fields + Control._fields)
# catalog of members that are collections
_lists = 'groups nodes elements boundaries materials factors'.split()
# for initializing empty collection members
groups = ObjListDesc('_groups')
A2 = groups
nodes = ObjListDesc('_nodes')
C3 = nodes
elements = ObjListDesc('_elements')
C4 = elements
boundaries = ObjListDesc('_boundaries')
C5 = boundaries
materials = ObjListDesc('_materials')
D1Soil = materials
D1Interf = materials
factors = ObjListDesc('_factors')
E1 = factors
def __init__(self, cande_obj=None, **kwargs):
if cande_obj is None:
cande_obj = CandeObj(CandeNT())
try:
from_kwargs = CandeNT(**kwargs)
except TypeError as e:
raise TypeError('Invalid kwargs to create CandeObj') from e
copymembers(cande_obj, self, CandeNT._fields, suppress_err=False)
copymembers(from_kwargs, self, kwargs, suppress_err=False)
self._init_reg()
def _init_reg(self):
self.candeattr_reg = reg.CidRegistry(A1=self, A2=self.groups, C1=self, C2=self,
C3=self.nodes, C4=self.elements,
C5=self.boundaries, D1Soil=self.materials,
D1Interf=self.materials, E1=self.factors)
self.handlerargs_reg = reg.CidRegistry(
A1 = (self, ctl.merge_namedtuple_lower),
C1 = (self, ctl.merge_namedtuple_lower),
C2 = (self, ctl.merge_namedtuple_lower),
A2 = (self.groups, ctl.merge_namedtuple_lower),
D1Soil = (self.materials, ctl.merge_namedtuple_lower),
D1Interf = (self.materials, ctl.merge_namedtuple_lower),
C3 = (self.nodes,),
C4 = (self.elements,),
C5 = (self.boundaries,),
E1 = (self.factors,),
)
@classmethod
def empty(cls):
obj = cls.__new__(cls)
obj._init_reg()
return obj
@classmethod
def from_cid(cls, lines, start='A1'):
'''Construct an instance using a file-like sequence'''
not_str(lines)
obj = cls.empty()
with obj.builder(start) as build_process:
logging.debug('***CANDE_OBJ BUILD BEGUN***')
for line, tag in zip(lines, build_process):
logging.debug('***BEGINNING OF SECTION {} HANDLING***'
''.format(tag))
cid_obj = obj.unformat(line, tag)
build_process.send(cid_obj)
logging.debug('***ENDING OF SECTION {} HANDLING***'
''.format(label))
logging.debug('***CANDE_OBJ BUILD COMPLETE***')
return obj
def builder(self, cidmember='A1'):
try:
if self._builder.start != cidmember:
raise AttributeError()
except AttributeError:
self._builder = Builder(self)(cidmember)
return self._builder
@staticmethod
def unformatter(cidmember):
'''The corresponding unformatter for the member'''
return unformatter_reg[cidmember]
@classmethod
def unformat(cls, cidline, cidmember):
'''Parse a CID file line into a cid object'''
unformatter = cls.unformatter(cidmember)
ObjType = obj_reg[cidmember]
logging.debug('Unformatting {} to a {}'
''.format(cidmember, ObjType.__name__))
return ObjType(*unformatter.unformat(cidline))
def __repr__(self):
repgen = ('{}={!r}'.format(f, getattr(self, f, None))
for f in self._fields)
return 'CandeObj({})'.format(', '.join(repgen))
| {
"content_hash": "6232e09986e476a084b1504bc3cf26f1",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 100,
"avg_line_length": 41.86,
"alnum_prop": 0.512979773849339,
"repo_name": "Ricyteach/candemaker",
"id": "015305073e8fc5c752d960c20db94f8c9bcb95b4",
"size": "6279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/candemaker/cande replacement.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "142106"
}
],
"symlink_target": ""
} |
from plugit.utils import action, only_orga_member_user, json_only, cache, PlugItRedirect
from models import Ecc, CountryCode
# Include homepage
@action(route="/", template="main/home.html")
@only_orga_member_user()
def main_home(request):
"""Show the home page."""
return PlugItRedirect('stations')
# Include terms
@action(route="/terms/", template="terms.html")
@only_orga_member_user()
def terms(request):
"""Show the system status."""
return {}
# Load the list of countries. Cached
@action(route="/ecc_list")
@json_only()
@cache(time=3600, byUser=False)
def main_ecc_list(request):
"""Return the list of ECCs"""
list = []
for elem in Ecc.query.order_by(Ecc.name).all():
list.append(elem.json)
return {'list': list}
# Load the list of country codes (cc). Cached
@action(route="/cc_list")
@json_only()
@cache(time=3600, byUser=False)
def main_cc_list(request):
"""Return the list of CCs"""
list = []
for cc in CountryCode.query.order_by(CountryCode.name).all():
list.append(cc.json)
return {'list': list}
| {
"content_hash": "c91b0d76cb91db090fa72c3e52910205",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 88,
"avg_line_length": 21.313725490196077,
"alnum_prop": 0.6623735050597976,
"repo_name": "ebu/radiodns-plugit",
"id": "82de981d866fdc5be8304170f5b6ee5a01384397",
"size": "1120",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "RadioDns-PlugIt/main/actions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11815"
},
{
"name": "Python",
"bytes": "190516"
}
],
"symlink_target": ""
} |
''' FSL IO '''
from __future__ import with_statement
import os
from os.path import join as pjoin
from subprocess import Popen, PIPE
import numpy as np
import numpy.linalg as npl
from numpy import newaxis
from scipy.ndimage import map_coordinates as mc
from scipy.ndimage import affine_transform
from dipy.io.dpy import Dpy
import nibabel as nib
from nibabel.tmpdirs import InTemporaryDirectory
_VAL_FMT = ' %e'
class FSLError(Exception):
""" Class signals error in FSL processing """
def have_flirt():
""" Return True if we can call flirt without error
Relies on the fact that flirt produces text on stdout when called with no
arguments
"""
p = Popen('flirt', stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
return stdout != ''
def write_bvals_bvecs(bvals, bvecs, outpath=None, prefix=''):
''' Write FSL FDT bvals and bvecs files
Parameters
-------------
bvals : (N,) sequence
Vector with diffusion gradient strength (one per diffusion
acquisition, N=no of acquisitions)
bvecs : (N, 3) array-like
diffusion gradient directions
outpath : None or str
path to write FDT bvals, bvecs text files
None results in current working directory.
prefix : str
prefix for bvals, bvecs files in directory. Defaults to ''
'''
if outpath is None:
outpath = os.getcwd()
bvals = tuple(bvals)
bvecs = np.asarray(bvecs)
bvecs[np.isnan(bvecs)] = 0
N = len(bvals)
fname = pjoin(outpath, prefix + 'bvals')
fmt = _VAL_FMT * N + '\n'
open(fname, 'wt').write(fmt % bvals)
fname = pjoin(outpath, prefix + 'bvecs')
bvf = open(fname, 'wt')
for dim_vals in bvecs.T:
bvf.write(fmt % tuple(dim_vals))
def flirt2aff(mat, in_img, ref_img):
""" Transform from `in_img` voxels to `ref_img` voxels given `mat`
Parameters
----------
mat : (4,4) array
contents (as array) of output ``-omat`` transformation file from flirt
in_img : img
image passed (as filename) to flirt as ``-in`` image
ref_img : img
image passed (as filename) to flirt as ``-ref`` image
Returns
-------
aff : (4,4) array
Transform from voxel coordinates in ``in_img`` to voxel coordinates in
``ref_img``
Notes
-----
Thanks to Mark Jenkinson and Jesper Andersson for the correct statements
here, apologies for any errors we've added.
``flirt`` registers an ``in`` image to a ``ref`` image. It can produce
(with the ``-omat`` option) - a 4 x 4 affine matrix giving the mapping from
*inspace* to *refspace*.
The rest of this note is to specify what *inspace* and *refspace* are.
In what follows, a *voxtrans* for an image is the 4 by 4 affine
``np.diag([vox_i, vox_j, vox_k, 1])`` where ``vox_i`` etc are the voxel
sizes for the first second and third voxel dimension. ``vox_i`` etc are
always positive.
If the input image has an affine with a negative determinant, then the
mapping from voxel coordinates in the input image to *inspace* is simply
*voxtrans* for the input image. If the reference image has a negative
determinant, the mapping from voxel space in the reference image to
*refspace* is simply *voxtrans* for the reference image.
A negative determinant for the image affine is the common case, of an image
with a x voxel flip. Analyze images don't store affines and flirt assumes a
negative determinant in these cases.
For positive determinant affines, flirt starts *inspace* and / or *refspace*
with an x voxel flip. The mapping implied for an x voxel flip for image
with shape (N_i, N_j, N_k) is:
[[-1, 0, 0, N_i - 1],
[ 0, 1, 0, 0],
[ 0, 0, 1, 0],
[ 0, 0, 0, 1]]
If the input image has an affine with a positive determinant, then mapping
from input image voxel coordinates to *inspace* is ``np.dot(input_voxtrans,
input_x_flip)`` - where ``input_x_flip`` is the matrix above with ``N_i``
given by the input image first axis length. Similarly the mapping from
reference voxel coordinates to *refspace*, if the reference image has a
positive determinant, is ``np.dot(ref_voxtrans, ref_x_flip)`` - where
``ref_x_flip`` is the matrix above with ``N_i`` given by the reference image
first axis length.
"""
in_hdr = in_img.get_header()
ref_hdr = ref_img.get_header()
# get_zooms gets the positive voxel sizes as returned in the header
inspace = np.diag(in_hdr.get_zooms() + (1,))
refspace = np.diag(ref_hdr.get_zooms() + (1,))
if npl.det(in_img.get_affine()) >= 0:
inspace = np.dot(inspace, _x_flipper(in_hdr.get_data_shape()[0]))
if npl.det(ref_img.get_affine()) >= 0:
refspace = np.dot(refspace, _x_flipper(ref_hdr.get_data_shape()[0]))
# Return voxel to voxel mapping
return np.dot(npl.inv(refspace), np.dot(mat, inspace))
def _x_flipper(N_i):
flipr = np.diag([-1, 1, 1, 1])
flipr[0, 3] = N_i - 1
return flipr
def flirt2aff_files(matfile, in_fname, ref_fname):
""" Map from `in_fname` image voxels to `ref_fname` voxels given `matfile`
See :func:`flirt2aff` docstring for details.
Parameters
------------
matfile : str
filename of output ``-omat`` transformation file from flirt
in_fname : str
filename for image passed to flirt as ``-in`` image
ref_fname : str
filename for image passed to flirt as ``-ref`` image
Returns
-------
aff : (4,4) array
Transform from voxel coordinates in image for ``in_fname`` to voxel
coordinates in image for ``ref_fname``
"""
mat = np.loadtxt(matfile)
in_img = nib.load(in_fname)
ref_img = nib.load(ref_fname)
return flirt2aff(mat, in_img, ref_img)
def warp_displacements(ffa, flaff, fdis, fref, ffaw, order=1):
''' Warp an image using fsl displacements
Parameters
------------
ffa : filename of nifti to be warped
flaff : filename of .mat (flirt)
fdis : filename of displacements (fnirtfileutils)
fref : filename of reference volume e.g. (FMRIB58_FA_1mm.nii.gz)
ffaw : filename for the output warped image
'''
refaff = nib.load(fref).get_affine()
disdata = nib.load(fdis).get_data()
imgfa = nib.load(ffa)
fadata = imgfa.get_data()
fazooms = imgfa.get_header().get_zooms()
# from fa index to ref index
res = flirt2aff_files(flaff, ffa, fref)
# from ref index to fa index
ires = np.linalg.inv(res)
# create the 4d volume which has the indices for the reference image
reftmp = np.zeros(disdata.shape)
'''
#create the grid indices for the reference
#refinds = np.ndindex(disdata.shape[:3])
for ijk_t in refinds:
i,j,k = ijk_t
reftmp[i,j,k,0]=i
reftmp[i,j,k,1]=j
reftmp[i,j,k,2]=k
'''
# same as commented above but much faster
reftmp[..., 0] = np.arange(disdata.shape[0])[:, newaxis, newaxis]
reftmp[..., 1] = np.arange(disdata.shape[1])[newaxis, :, newaxis]
reftmp[..., 2] = np.arange(disdata.shape[2])[newaxis, newaxis, :]
# affine transform from reference index to the fa index
A = np.dot(reftmp, ires[:3, :3].T) + ires[:3, 3]
# add the displacements but first devide them by the voxel sizes
A2 = A + disdata / fazooms
# hold the displacements' shape reshaping
di, dj, dk, dl = disdata.shape
# do the interpolation using map coordinates
# the list of points where the interpolation is done given by the reshaped in 2D A2 (list of 3d points in fa index)
W = mc(fadata, A2.reshape(di * dj * dk, dl).T, order=order).reshape(di, dj, dk)
# save the warped image
Wimg = nib.Nifti1Image(W, refaff)
nib.save(Wimg, ffaw)
def warp_displacements_tracks(fdpy, ffa, fmat, finv, fdis, fdisa, fref, fdpyw):
""" Warp tracks from native space to the FMRIB58/MNI space
We use here the fsl displacements. Have a look at create_displacements to
see an example of how to use these displacements.
Parameters
------------
fdpy : filename of the .dpy file with the tractography
ffa : filename of nifti to be warped
fmat : filename of .mat (flirt)
fdis : filename of displacements (fnirtfileutils)
fdisa : filename of displacements (fnirtfileutils + affine)
finv : filename of invwarp displacements (invwarp)
fref : filename of reference volume e.g. (FMRIB58_FA_1mm.nii.gz)
fdpyw : filename of the warped tractography
See also
-----------
dipy.external.fsl.create_displacements
"""
# read the tracks from the image space
dpr = Dpy(fdpy, 'r')
T = dpr.read_tracks()
dpr.close()
# copy them in a new file
dpw = Dpy(fdpyw, 'w', compression=1)
dpw.write_tracks(T)
dpw.close()
# from fa index to ref index
res = flirt2aff_files(fmat, ffa, fref)
# load the reference img
imgref = nib.load(fref)
refaff = imgref.get_affine()
# load the invwarp displacements
imginvw = nib.load(finv)
invwdata = imginvw.get_data()
invwaff = imginvw.get_affine()
# load the forward displacements
imgdis = nib.load(fdis)
disdata = imgdis.get_data()
# load the forward displacements + affine
imgdis2 = nib.load(fdisa)
disdata2 = imgdis2.get_data()
# from their difference create the affine
disaff = disdata2 - disdata
del disdata
del disdata2
shape = nib.load(ffa).get_data().shape
# transform the displacements affine back to image space
disaff0 = affine_transform(disaff[..., 0], res[:3, :3], res[:3, 3], shape, order=1)
disaff1 = affine_transform(disaff[..., 1], res[:3, :3], res[:3, 3], shape, order=1)
disaff2 = affine_transform(disaff[..., 2], res[:3, :3], res[:3, 3], shape, order=1)
# remove the transformed affine from the invwarp displacements
di = invwdata[:, :, :, 0] + disaff0
dj = invwdata[:, :, :, 1] + disaff1
dk = invwdata[:, :, :, 2] + disaff2
dprw = Dpy(fdpyw, 'r+')
rows = len(dprw.f.root.streamlines.tracks)
blocks = np.round(np.linspace(0, rows, 10)).astype(int) # lets work in blocks
# print rows
for i in range(len(blocks) - 1):
# print blocks[i],blocks[i+1]
# copy a lot of tracks together
caboodle = dprw.f.root.streamlines.tracks[blocks[i]:blocks[i + 1]]
mci = mc(di, caboodle.T, order=1) # interpolations for i displacement
mcj = mc(dj, caboodle.T, order=1) # interpolations for j displacement
mck = mc(dk, caboodle.T, order=1) # interpolations for k displacement
D = np.vstack((mci, mcj, mck)).T
# go back to mni image space
WI2 = np.dot(caboodle, res[:3, :3].T) + res[:3, 3] + D
# and then to mni world space
caboodlew = np.dot(WI2, refaff[:3, :3].T) + refaff[:3, 3]
# write back
dprw.f.root.streamlines.tracks[blocks[i]:blocks[i + 1]] = caboodlew.astype('f4')
dprw.close()
def pipe(cmd):
""" A tine pipeline system to run external tools.
For more advanced pipelining use nipype http://www.nipy.org/nipype
"""
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
sto = p.stdout.readlines()
ste = p.stderr.readlines()
print(sto)
print(ste)
def dcm2nii(dname, outdir, filt='*.dcm', options='-d n -g n -i n -o'):
cmd = 'dcm2nii ' + options + ' ' + outdir + ' ' + dname + '/' + filt
print(cmd)
pipe(cmd)
def eddy_correct(in_nii, out_nii, ref=0):
cmd = 'eddy_correct ' + in_nii + ' ' + out_nii + ' ' + str(ref)
print(cmd)
pipe(cmd)
def bet(in_nii, out_nii, options=' -F -f .2 -g 0'):
cmd = 'bet ' + in_nii + ' ' + out_nii + options
print(cmd)
pipe(cmd)
def run_flirt_imgs(in_img, ref_img, dof=6, flags=''):
""" Run flirt on nibabel images, returning affine
Parameters
----------
in_img : `SpatialImage`
image to register
ref_img : `SpatialImage`
image to register to
dof : int, optional
degrees of freedom for registration (default 6)
flags : str, optional
other flags to pass to flirt command string
Returns
-------
in_vox2out_vox : (4,4) ndarray
affine such that, if ``[i, j, k]`` is a coordinate in voxels in the
`in_img`, and ``[p, q, r]`` are the equivalent voxel coordinates in the
reference image, then
``[p, q, r] = np.dot(in_vox2out_vox[:3,:3]), [i, j, k] + in_vox2out_vox[:3,3])``
"""
omat = 'reg.mat'
with InTemporaryDirectory():
nib.save(in_img, 'in.nii')
nib.save(ref_img, 'ref.nii')
cmd = 'flirt %s -dof %d -in in.nii -ref ref.nii -omat %s' % (
flags, dof, omat)
proc = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if not os.path.isfile(omat):
raise FSLError('Command "%s" failed somehow - stdout: %s\n'
'and stderr: %s\n' % (cmd, stdout, stderr))
res = np.loadtxt(omat)
return flirt2aff(res, in_img, ref_img)
def apply_warp(in_nii, affine_mat, nonlin_nii, out_nii):
cmd = 'applywarp --ref=${FSLDIR}/data/standard/FMRIB58_FA_1mm --in=' + in_nii + ' --warp=' + nonlin_nii + \
' --out=' + out_nii
print(cmd)
pipe(cmd)
def create_displacements(fin, fmat, fnonlin, finvw, fdisp, fdispa, fref):
""" Create displacements using FSL's FLIRT and FNIRT tools
Parameters
----------
fin : filename of initial source image
fmat : filename of .mat (flirt)
fnonlin : filename of fnirt output
finvw : filename of invwarp displacements (invwarp)
fdis : filename of fnirtfileutils
fdisa : filename of fnirtfileutils (with other parameters)
fref : filename of reference image e.g. (FMRIB58_FA_1mm.nii.gz)
"""
commands = []
commands.append('flirt -ref ' + fref + ' -in ' + fin + ' -omat ' + fmat)
commands.append('fnirt --in=' + fin + ' --aff=' + fmat + ' --cout=' + fnonlin + ' --config=FA_2_FMRIB58_1mm')
commands.append('invwarp --ref=' + fin + ' --warp=' + fnonlin + ' --out=' + finvw)
commands.append('fnirtfileutils --in=' + fnonlin + ' --ref=${FSLDIR}/data/standard/FMRIB58_FA_1mm --out=' + fdisp)
commands.append('fnirtfileutils --in=' + fnonlin + ' --ref=${FSLDIR}/data/standard/FMRIB58_FA_1mm --out=' +
fdispa + ' --withaff')
for c in commands:
print(c)
pipe(c)
| {
"content_hash": "f1720f26cc8d34e748fbea2bfda4ebeb",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 119,
"avg_line_length": 34.73444976076555,
"alnum_prop": 0.6227012879674909,
"repo_name": "maurozucchelli/dipy",
"id": "197d4c5a23c4ac05523f6a88c8690791ff0ef1c2",
"size": "14519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dipy/external/fsl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "335"
},
{
"name": "CSS",
"bytes": "641"
},
{
"name": "Python",
"bytes": "1177807"
},
{
"name": "Shell",
"bytes": "2955"
},
{
"name": "TeX",
"bytes": "537291"
}
],
"symlink_target": ""
} |
from time import sleep
from django.core.management.base import BaseCommand
import celery
from applications.cart.models import Order
from applications.cart.tasks import recompile_order
__author__ = 'AlexStarov'
""" Парсим заказы - переводим Email-ы в базу Email-ов а телефоны в базу телефонов. """
class Command(BaseCommand, ):
def handle(self, *args, **options):
orders = Order.objects.all().order_by('pk')
for i, order in enumerate(start=1, iterable=orders, ):
print('i:', i, 'pk:', order.pk, 'order.user:', order.user, 'order:', order, )
if order.user:
sleep(0.1, )
continue
else:
recompile_order.apply_async(
queue='celery',
kwargs={'order_pk': order.pk, },
task_id='celery-task-id-recompile_order-{0}'.format(celery.utils.uuid(), ),
)
sleep(1, )
| {
"content_hash": "71621296a90eabac5d94ad9a2caf92a1",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 95,
"avg_line_length": 29.75,
"alnum_prop": 0.569327731092437,
"repo_name": "AlexStarov/Shop",
"id": "f4e170f29d0f3aa65259d5225ba5c27e1ce7eb16",
"size": "1028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applications/cart/management/commands/recompyle_orders.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "268281"
},
{
"name": "HTML",
"bytes": "138853"
},
{
"name": "JavaScript",
"bytes": "10629133"
},
{
"name": "PHP",
"bytes": "14"
},
{
"name": "Python",
"bytes": "1532862"
},
{
"name": "Shell",
"bytes": "2089"
}
],
"symlink_target": ""
} |
import os
from subprocess import PIPE
from ovm.exceptions import OVMError
from ovm.utils.compat23 import Popen
from ovm.utils.copyprogress import CopyProgress
class ImageTemplate:
def __init__(self, config):
self.path = config.get('path')
self.format = config.get('format')
self.size = int(config.get('size'))
def copy_on_device(self, dest, dest_format):
if not os.path.exists(dest):
raise OVMError('copy_on_device: destination must exists.')
cp = CopyProgress(self.path, dest, 'Copy image file')
cp.start()
args = [
'qemu-img', 'convert',
'-f', str(self.format),
'-O', str(dest_format),
self.path,
dest
]
with Popen(args, stderr=PIPE) as process:
process.wait()
if process.returncode != 0:
raise OVMError(process.stderr.read().decode('utf-8'))
cp.finish()
| {
"content_hash": "712ca22f5d9f5fa3fdd7b00db81df8c0",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 70,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.5755693581780539,
"repo_name": "lightcode/OVM",
"id": "54580eeec6b6ff5daaffc81097ffd6817a9a1ab7",
"size": "1014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ovm/templates/image_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "93740"
},
{
"name": "Shell",
"bytes": "15995"
}
],
"symlink_target": ""
} |
''' Interface and other definitions needed to create game modes for the drafting engine. '''
import os
from abc import ABCMeta as abstractclass, abstractmethod
# Definition of how many cards a set draft should have.
HS_NUM_CARDS_IN_SET = 3
# Private definitions; boilerplate for module definition.
thisdir = os.path.split(os.path.realpath(__file__))[0]
itlist = os.listdir(thisdir)
__all__ = [os.path.split(x)[-1].strip('\.py') for x in itlist if x.endswith('py') and not x.endswith('__init__.py')]
# Class definition for a game mode; interface.
class gameMode(object):
__metaclass__ = abstractclass
def __init__(self,coll,hero,info):
self.name = ''
self.collection = coll
self.hero = hero
self.info = info
@abstractmethod
def getDraft(self,numCards): return []
| {
"content_hash": "f5dcf76810e836a9d8f9c4be53c5bf6c",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 116,
"avg_line_length": 35.73913043478261,
"alnum_prop": 0.6763990267639902,
"repo_name": "AlexSafatli/HearthstoneDrafter",
"id": "8345ea36360eea474f43fc00a4aaa0828d64b88f",
"size": "822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modes/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4727"
},
{
"name": "HTML",
"bytes": "4680"
},
{
"name": "JavaScript",
"bytes": "4031"
},
{
"name": "Python",
"bytes": "19824"
}
],
"symlink_target": ""
} |
import re
from hacking import core
RE_RELATIVE_IMPORT = re.compile(r'^from\s*[.]')
@core.flake8ext
def hacking_import_rules(logical_line, filename, noqa):
r"""Check for imports.
OpenStack HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
Okay: from nova.compute import api
H301: from nova.compute import api, utils
Do not use wildcard import
Do not make relative imports
Examples:
Okay: from os import path
Okay: from os import path as p
Okay: from os import (path as p)
Okay: import os.path
Okay: from nova.compute import rpcapi
Okay: from six.moves.urllib import parse
H303: from os.path import *
H304: from .compute import rpcapi
"""
# TODO(jogo): make the following doctests pass:
# H301: import os, sys
# TODO(mordred: We need to split this into different checks so that they
# can be disabled by command line switches properly
if noqa:
return
split_line = logical_line.split()
split_line_len = len(split_line)
if (split_line_len > 1 and split_line[0] in ('import', 'from') and
not core.is_import_exception(split_line[1])):
pos = logical_line.find(',')
if pos != -1:
if split_line[0] == 'from':
yield pos, "H301: one import per line"
pos = logical_line.find('*')
if pos != -1:
yield pos, "H303: No wildcard (*) import."
return
if split_line_len in (2, 4, 6) and split_line[1] != "__future__":
if 'from' == split_line[0] and split_line_len > 3:
mod = '.'.join((split_line[1], split_line[3]))
if core.is_import_exception(mod):
return
if RE_RELATIVE_IMPORT.search(logical_line):
yield logical_line.find('.'), (
"H304: No relative imports. '%s' is a relative import"
% logical_line)
return
@core.flake8ext
def hacking_import_alphabetical(logical_line, blank_before, previous_logical,
indent_level, previous_indent_level):
r"""Check for imports in alphabetical order.
OpenStack HACKING guide recommendation for imports:
imports in human alphabetical order
Okay: import os\nimport sys\n\nimport nova\nfrom nova import test
Okay: import os\nimport sys
H306: import sys\nimport os
Okay: import sys\n\n# foo\nimport six
"""
# handle import x
# use .lower since capitalization shouldn't dictate order
if blank_before < 1 and indent_level == previous_indent_level:
split_line = core.import_normalize(logical_line.
strip()).lower().split()
split_previous = core.import_normalize(previous_logical.
strip()).lower().split()
length = [2, 4]
if (len(split_line) in length and len(split_previous) in length and
split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
yield (0, "H306: imports not in alphabetical order (%s, %s)"
% (split_previous[1], split_line[1]))
| {
"content_hash": "c0bb06d19a1bc9f7ad92461878d99ced",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 36.141304347826086,
"alnum_prop": 0.5840601503759398,
"repo_name": "openstack-dev/hacking",
"id": "627b7aa8cba11955a6ec0af9c8ece2f5d9223464",
"size": "3880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hacking/checks/imports.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "79058"
},
{
"name": "Shell",
"bytes": "1135"
}
],
"symlink_target": ""
} |
__author__ = 'Allison MacLeay'
from sklearn.tree import DecisionTreeRegressor
import CS6140_A_MacLeay.Homeworks.HW4 as hw4
import numpy as np
class GradientBoostRegressor(object):
def __init__(self, n_estimators=10, learning_rate=0.1, max_depth=1, learner=DecisionTreeRegressor):
self.train_score = 0
self.max_rounds = n_estimators
self.learner = learner
self.learning_rate = learning_rate #TODO - unused variable
self.max_depth = max_depth
self.hypotheses = []
self.mean = None
self.training_error = []
self.local_error = []
def fit(self, X, y):
X = np.asarray(X)
y = np.asarray(y)
self.mean = np.mean(y)
#y = np.asarray([self.mean]*len(y))
#hypothesis = self.learner().fit(X, y)
#self.hypotheses.append(hypothesis)
for round in xrange(self.max_rounds):
residual = [(yn - yl) for yn, yl in zip(y, self.predict(X))]
hypothesis = self.learner().fit(X, residual)
self.hypotheses.append(hypothesis)
self.local_error.append(hw4.compute_mse(residual, hypothesis.predict(X)))
pred_round = self.predict(X)
self.train_score = hw4.compute_mse(pred_round, y)
self.training_error.append(self.train_score)
def predict(self, X):
X = np.asarray(X)
#predictions = np.array([self.mean] * X.shape[0])
predictions = np.zeros(len(X))
for h in self.hypotheses:
predictions += h.predict(X)
return predictions
def print_stats(self):
for r in range(len(self.training_error)):
print 'Round {}: training error: {}'.format(r, self.local_error[r], self.training_error[r])
def decision_function(self):
pass
def loss(self, y, yhat, weights):
return sum([(yh - yt)**2 for yh, yt, w in zip(yhat, y, weights)]) * .5
| {
"content_hash": "90b637021e3dcf0d5426626995c02b50",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 103,
"avg_line_length": 33.70175438596491,
"alnum_prop": 0.599167100468506,
"repo_name": "alliemacleay/MachineLearning_CS6140",
"id": "b42876d1d46f187abb6308fea92690e77792b86c",
"size": "1921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/GradientBoost.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "289042"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
#set GPIO Pins
GPIO_TRIGGER = 18
GPIO_ECHO = 24
GPIO_IR = 6
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
GPIO.setup(GPIO_IR, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def main ():
prev_ir_state = False
curr_ir_state = False
try:
while True:
time.sleep(0.1)
prev_ir_state = curr_ir_state
curr_ir_state = GPIO.input(GPIO_IR)
if curr_ir_state != prev_ir_state:
new_state = "HIGH" if curr_ir_state else "LOW"
if new_state == "HIGH":
dist = distance()
if dist > 2000:
print("Somethings moving around me, but I can't see it")
else:
print("I caught you moving and you are standing %.1f cm in front of me" % dist)
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup()
def distance():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
return distance
if __name__ == '__main__':
main()
| {
"content_hash": "015b3502d0ab0fcc2ac7c40cca9bfb2e",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 103,
"avg_line_length": 27.104477611940297,
"alnum_prop": 0.5759911894273128,
"repo_name": "JohnOmernik/pimeup",
"id": "efd35b8881584fd353af658e680ebf4299679c4f",
"size": "1846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "animatronics/dist_ir.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "125"
},
{
"name": "HTML",
"bytes": "1766"
},
{
"name": "JavaScript",
"bytes": "1148"
},
{
"name": "Python",
"bytes": "263679"
},
{
"name": "Shell",
"bytes": "22782"
}
],
"symlink_target": ""
} |
"""Task driver for completing node provisioning with Canonical MaaS 2.2+."""
import logging
import uuid
import concurrent.futures
from oslo_config import cfg
import drydock_provisioner.error as errors
import drydock_provisioner.objects.fields as hd_fields
import drydock_provisioner.config as config
from drydock_provisioner.drivers.node.driver import NodeDriver
from drydock_provisioner.drivers.node.maasdriver.api_client import MaasRequestFactory
from drydock_provisioner.drivers.node.maasdriver.models.boot_resource import BootResources
from .actions.node import ValidateNodeServices
from .actions.node import CreateStorageTemplate
from .actions.node import CreateBootMedia
from .actions.node import PrepareHardwareConfig
from .actions.node import InterrogateNode
from .actions.node import DestroyNode
from .actions.node import CreateNetworkTemplate
from .actions.node import ConfigureUserCredentials
from .actions.node import IdentifyNode
from .actions.node import ConfigureHardware
from .actions.node import ApplyNodeNetworking
from .actions.node import ApplyNodePlatform
from .actions.node import ApplyNodeStorage
from .actions.node import DeployNode
class MaasNodeDriver(NodeDriver):
maasdriver_options = [
cfg.StrOpt(
'maas_api_key', help='The API key for accessing MaaS',
secret=True),
cfg.StrOpt('maas_api_url', help='The URL for accessing MaaS API'),
cfg.IntOpt(
'poll_interval',
default=10,
help='Polling interval for querying MaaS status in seconds'),
]
driver_name = 'maasdriver'
driver_key = 'maasdriver'
driver_desc = 'MaaS Node Provisioning Driver'
action_class_map = {
hd_fields.OrchestratorAction.ValidateNodeServices:
ValidateNodeServices,
hd_fields.OrchestratorAction.CreateStorageTemplate:
CreateStorageTemplate,
hd_fields.OrchestratorAction.CreateBootMedia:
CreateBootMedia,
hd_fields.OrchestratorAction.PrepareHardwareConfig:
PrepareHardwareConfig,
hd_fields.OrchestratorAction.InterrogateNode:
InterrogateNode,
hd_fields.OrchestratorAction.DestroyNode:
DestroyNode,
hd_fields.OrchestratorAction.CreateNetworkTemplate:
CreateNetworkTemplate,
hd_fields.OrchestratorAction.ConfigureUserCredentials:
ConfigureUserCredentials,
hd_fields.OrchestratorAction.IdentifyNode:
IdentifyNode,
hd_fields.OrchestratorAction.ConfigureHardware:
ConfigureHardware,
hd_fields.OrchestratorAction.ApplyNodeNetworking:
ApplyNodeNetworking,
hd_fields.OrchestratorAction.ApplyNodePlatform:
ApplyNodePlatform,
hd_fields.OrchestratorAction.ApplyNodeStorage:
ApplyNodeStorage,
hd_fields.OrchestratorAction.DeployNode:
DeployNode,
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
cfg.CONF.register_opts(
MaasNodeDriver.maasdriver_options, group=MaasNodeDriver.driver_key)
self.logger = logging.getLogger(
cfg.CONF.logging.nodedriver_logger_name)
def execute_task(self, task_id):
# actions that should be threaded for execution
threaded_actions = [
hd_fields.OrchestratorAction.InterrogateNode,
hd_fields.OrchestratorAction.DestroyNode,
hd_fields.OrchestratorAction.IdentifyNode,
hd_fields.OrchestratorAction.ConfigureHardware,
hd_fields.OrchestratorAction.ApplyNodeNetworking,
hd_fields.OrchestratorAction.ApplyNodeStorage,
hd_fields.OrchestratorAction.ApplyNodePlatform,
hd_fields.OrchestratorAction.DeployNode
]
action_timeouts = {
hd_fields.OrchestratorAction.IdentifyNode:
config.config_mgr.conf.timeouts.identify_node,
hd_fields.OrchestratorAction.ConfigureHardware:
config.config_mgr.conf.timeouts.configure_hardware,
hd_fields.OrchestratorAction.DeployNode:
config.config_mgr.conf.timeouts.deploy_node,
}
task = self.state_manager.get_task(task_id)
if task is None:
raise errors.DriverError("Invalid task %s" % (task_id))
if task.action not in self.supported_actions:
raise errors.DriverError(
"Driver %s doesn't support task action %s" % (self.driver_desc,
task.action))
task.set_status(hd_fields.TaskStatus.Running)
task.save()
if task.action in threaded_actions:
if task.retry > 0:
msg = "Retrying task %s on previous failed entities." % str(
task.get_id())
task.add_status_msg(
msg=msg,
error=False,
ctx=str(task.get_id()),
ctx_type='task')
target_nodes = self.orchestrator.get_target_nodes(
task, failures=True)
else:
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e:
subtask_futures = dict()
for n in target_nodes:
maas_client = MaasRequestFactory(
config.config_mgr.conf.maasdriver.maas_api_url,
config.config_mgr.conf.maasdriver.maas_api_key)
nf = self.orchestrator.create_nodefilter_from_nodelist([n])
subtask = self.orchestrator.create_task(
design_ref=task.design_ref,
action=task.action,
node_filter=nf,
retry=task.retry)
task.register_subtask(subtask)
action = self.action_class_map.get(task.action, None)(
subtask,
self.orchestrator,
self.state_manager,
maas_client=maas_client)
subtask_futures[subtask.get_id().bytes] = e.submit(
action.start)
timeout = action_timeouts.get(
task.action,
config.config_mgr.conf.timeouts.drydock_timeout)
finished, running = concurrent.futures.wait(
subtask_futures.values(), timeout=(timeout * 60))
for t, f in subtask_futures.items():
if not f.done():
task.add_status_msg(
"Subtask %s timed out before completing.",
error=True,
ctx=str(uuid.UUID(bytes=t)),
ctx_type='task')
task.failure()
else:
if f.exception():
self.logger.error(
"Uncaught exception in subtask %s." % str(
uuid.UUID(bytes=t)),
exc_info=f.exception())
task.failure()
task.bubble_results()
task.align_result()
else:
try:
maas_client = MaasRequestFactory(
config.config_mgr.conf.maasdriver.maas_api_url,
config.config_mgr.conf.maasdriver.maas_api_key)
action = self.action_class_map.get(task.action, None)(
task,
self.orchestrator,
self.state_manager,
maas_client=maas_client)
action.start()
except Exception as e:
msg = (
"Subtask for action %s raised unexpected exception: %s" %
(task.action, str(e)))
self.logger.error(msg, exc_info=e)
task.add_status_msg(
msg=msg,
error=True,
ctx=str(task.get_id()),
ctx_type='task')
task.failure()
task.set_status(hd_fields.TaskStatus.Complete)
task.save()
return
def get_available_images(self):
"""Return images available in MAAS."""
maas_client = MaasRequestFactory(
config.config_mgr.conf.maasdriver.maas_api_url,
config.config_mgr.conf.maasdriver.maas_api_key)
br = BootResources(maas_client)
br.refresh()
return br.get_available_images()
def get_available_kernels(self, image_name):
"""Return kernels available for ``image_name``.
:param image_name: str image name (e.g. 'xenial')
"""
maas_client = MaasRequestFactory(
config.config_mgr.conf.maasdriver.maas_api_url,
config.config_mgr.conf.maasdriver.maas_api_key)
br = BootResources(maas_client)
br.refresh()
return br.get_available_kernels(image_name)
def list_opts():
return {MaasNodeDriver.driver_key: MaasNodeDriver.maasdriver_options}
| {
"content_hash": "d2316c38c102be380513a38891b53d15",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 90,
"avg_line_length": 38.529411764705884,
"alnum_prop": 0.5917121046892039,
"repo_name": "att-comdev/drydock",
"id": "25973e519943ff01159c87aa99408695fa5370d7",
"size": "9787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drydock_provisioner/drivers/node/maasdriver/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2663"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "902228"
},
{
"name": "Shell",
"bytes": "15107"
},
{
"name": "Smarty",
"bytes": "2147"
}
],
"symlink_target": ""
} |
from . import base
from .. import utils
from grow.pods import env
from protorpc import messages
import os
import webreview
class Config(messages.Message):
env = messages.MessageField(env.EnvConfig, 1)
project = messages.StringField(2, required=True)
name = messages.StringField(3, required=True)
server = messages.StringField(4, required=True)
secure = messages.BooleanField(5, default=True)
keep_control_dir = messages.BooleanField(6, default=False)
remote = messages.StringField(8)
class WebReviewDestination(base.BaseDestination):
KIND = 'webreview'
Config = Config
threaded = True
batch_writes = True
def __init__(self, *args, **kwargs):
super(WebReviewDestination, self).__init__(*args, **kwargs)
api_key = os.getenv('WEBREVIEW_API_KEY')
if self.config.remote:
self.config.server, self.config.project = self.config.remote.split('/', 1)
if self.config.server.startswith('localhost:'):
self.config.secure = False
self.webreview = webreview.WebReview(
project=self.config.project,
name=self.config.name,
host=self.config.server,
secure=self.config.secure,
api_key=api_key)
def __str__(self):
return self.config.server
def deploy(self, *args, **kwargs):
repo = kwargs.get('repo')
if repo:
try:
self.webreview.commit = utils.create_commit_message(repo)
except ValueError:
raise ValueError(
'Cannot deploy to WebReview from a Git repository without a HEAD.'
' Commit first then deploy to WebReview.')
result = super(WebReviewDestination, self).deploy(*args, **kwargs)
self.webreview.finalize()
return result
def login(self, account='default', reauth=False):
self.webreview.login(account, reauth=reauth)
def prelaunch(self, dry_run=False):
super(WebReviewDestination, self).prelaunch(dry_run=dry_run)
def test(self):
# Skip the default "can write files at destination" test.
pass
def read_file(self, path):
try:
paths_to_contents, errors = self.webreview.read([path])
if path not in paths_to_contents:
raise IOError('{} not found.'.format(path))
if errors:
raise base.Error(errors)
return paths_to_contents[path]
except webreview.RpcError as e:
raise base.Error(e.message)
def write_file(self, paths_to_contents):
try:
for path, content in paths_to_contents.iteritems():
if isinstance(content, unicode):
paths_to_contents[path] = content.encode('utf-8')
paths_to_contents, errors = self.webreview.write(paths_to_contents)
if errors:
raise base.Error(errors)
return paths_to_contents
except webreview.RpcError as e:
raise base.Error(e.message)
def delete_file(self, paths):
try:
paths_to_contents, errors = self.webreview.delete(paths)
if errors:
raise base.Error(errors)
return paths_to_contents
except webreview.RpcError as e:
raise base.Error(e.message)
# Support legacy "jetway" destination. Remove this in a future release.
class LegacyJetwayDestination(WebReviewDestination):
KIND = 'jetway'
| {
"content_hash": "f29b8990153459b10c1e76de0b2b509b",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 31.74,
"alnum_prop": 0.6802142407057341,
"repo_name": "codedcolors/pygrow",
"id": "f93f18ccfb3112626054fbbbbb7d47ccf92dd636",
"size": "3174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grow/deployments/destinations/webreview_destination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "177"
},
{
"name": "HTML",
"bytes": "6179"
},
{
"name": "Python",
"bytes": "300256"
},
{
"name": "Shell",
"bytes": "4219"
}
],
"symlink_target": ""
} |
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from publishers.community.generic import StringifyArrays
from streamalert.shared.publisher import AlertPublisher, Register
@Register
class ShortenTitle(AlertPublisher):
"""A publisher that shortens the title of PagerDuty incidents.
By popular demand from our TIR team! By default, PagerDuty incidents have a title that look
something like 'StreamAlert Rule Triggered - blah_blah_blah'. If StreamAlert is the only
system producing alerts into PagerDuty, this is a lot of extraneous data.
Instead, this publisher strips out the 'StreamAlert Rule Triggered' prefix and opts to only
output the rule name.
"""
def publish(self, alert, publication):
publication['@pagerduty-v2.summary'] = alert.rule_name
publication['@pagerduty-incident.incident_title'] = alert.rule_name
publication['@pagerduty.description'] = alert.rule_name
return publication
@Register
def as_custom_details(_, publication):
"""Takes the current publication and sends the entire thing to custom details.
It does this for all fields EXCEPT the pagerduty special fields.
"""
def _is_custom_field(key):
return key.startswith('@pagerduty')
custom_details = {
key: value for key, value in publication.items() if not _is_custom_field(key)
}
publication['@pagerduty.details'] = custom_details
publication['@pagerduty-v2.custom_details'] = custom_details
return publication
@Register
def v2_high_urgency(_, publication):
"""Designates this alert as critical or high urgency
This only works for pagerduty-v2 and pagerduty-incident Outputs. The original pagerduty
integration uses the Events v1 API which does not support urgency.
"""
publication['@pagerduty-v2.severity'] = 'critical'
publication['@pagerduty-incident.urgency'] = 'high'
return publication
@Register
def v2_low_urgency(_, publication):
"""Designates this alert as a warning or low urgency
This only works for pagerduty-v2 and pagerduty-incident Outputs. The original pagerduty
integration uses the Events v1 API which does not support urgency.
"""
publication['@pagerduty-v2.severity'] = 'warning'
publication['@pagerduty-incident.urgency'] = 'low'
return publication
@Register
class PrettyPrintArrays(StringifyArrays):
"""Deeply navigates a dict publication and coverts all scalar arrays to strings
Scalar arrays render poorly on PagerDuty's default UI. Newlines are ignored, and the scalar
values are wrapped with quotations:
[
"element_here\n with newlines\noh no",
"hello world\nhello world"
]
This method searches the publication dict for scalar arrays and transforms them into strings
by joining their values with the provided delimiter. This converts the above array into:
element here
with newlines
oh no
----------
hello world
hello world
"""
DELIMITER = '\n\n----------\n\n'
@Register
class AttachImage(StringifyArrays):
"""Attaches the given image to the PagerDuty request
Works for both the v1 and v2 event api integrations.
It is recommended to subclass this class with your own implementation of _image_url(),
_click_url() and _alt_text() so that you can customize your own image.
"""
IMAGE_URL = 'https://streamalert.io/en/stable/_images/sa-banner.png'
IMAGE_CLICK_URL = 'https://streamalert.io/en/stable/'
IMAGE_ALT_TEXT = 'StreamAlert Docs'
def publish(self, alert, publication):
publication['@pagerduty-v2.images'] = publication.get('@pagerduty-v2.images', [])
publication['@pagerduty-v2.images'].append({
'src': self._image_url(),
'href': self._click_url(),
'alt': self._alt_text(),
})
publication['@pagerduty.contexts'] = publication.get('@pagerduty.contexts', [])
publication['@pagerduty.contexts'].append({
'type': 'image',
'src': self._image_url(),
})
return publication
@classmethod
def _image_url(cls):
return cls.IMAGE_URL
@classmethod
def _click_url(cls):
return cls.IMAGE_CLICK_URL
@classmethod
def _alt_text(cls):
return cls.IMAGE_ALT_TEXT
| {
"content_hash": "37ee81e4144d8ec184e94ca8d3b8e1d7",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 96,
"avg_line_length": 32.54,
"alnum_prop": 0.690842040565458,
"repo_name": "airbnb/streamalert",
"id": "9dd37f3d0ad5c30e1e32ccb497d1d5509062520b",
"size": "4881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "publishers/community/pagerduty/pagerduty_layout.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "142275"
},
{
"name": "Python",
"bytes": "2209853"
},
{
"name": "Shell",
"bytes": "2975"
}
],
"symlink_target": ""
} |
import argparse
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="subparser_name")
server_parser = subparsers.add_parser("server",
description="Capdash server")
server_parser.\
add_argument("-l",
"--listen-address",
default="0.0.0.0",
dest="address",
metavar="ADDRESS",
help="Listen on this address [default=%(default)s]")
server_parser.\
add_argument("-p",
"--listen-port",
default=8000,
type=int,
dest="port",
metavar="PORT",
help="Listen on this port [default=%(default)s]")
args = parser.parse_args()
if args.subparser_name == "server":
server(args.address, args.port)
def server(address, port):
import messier
messier.app.run(host=address, port=port)
if __name__ == "__main__":
main()
| {
"content_hash": "3b92e77040435b8b2d2f8150374ae747",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 73,
"avg_line_length": 28.263157894736842,
"alnum_prop": 0.4851024208566108,
"repo_name": "mwhooker/messier",
"id": "9dc637f4299970c99567b72c677103454c75ab46",
"size": "1074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cli.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "669"
},
{
"name": "JavaScript",
"bytes": "6668"
},
{
"name": "Python",
"bytes": "7963"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python2
'''
You should normally never use this! Use emcc instead.
This is a small wrapper script around the core JS compiler. This calls that
compiler with the settings given to it. It can also read data from C/C++
header files (so that the JS compiler can see the constants in those
headers, for the libc implementation in JS).
'''
import os, sys, json, optparse, subprocess, re, time, multiprocessing, string, logging
from tools import shared
from tools import jsrun, cache as cache_module, tempfiles
from tools.response_file import read_response_file
from tools.shared import WINDOWS
__rootpath__ = os.path.abspath(os.path.dirname(__file__))
def path_from_root(*pathelems):
"""Returns the absolute path for which the given path elements are
relative to the emscripten root.
"""
return os.path.join(__rootpath__, *pathelems)
def get_configuration():
if hasattr(get_configuration, 'configuration'):
return get_configuration.configuration
configuration = shared.Configuration(environ=os.environ)
get_configuration.configuration = configuration
return configuration
def scan(ll, settings):
# blockaddress(@main, %23)
blockaddrs = []
for blockaddr in re.findall('blockaddress\([^)]*\)', ll):
b = blockaddr.split('(')[1][:-1].split(', ')
blockaddrs.append(b)
if len(blockaddrs) > 0:
settings['NECESSARY_BLOCKADDRS'] = blockaddrs
NUM_CHUNKS_PER_CORE = 1.0
MIN_CHUNK_SIZE = 1024*1024
MAX_CHUNK_SIZE = float(os.environ.get('EMSCRIPT_MAX_CHUNK_SIZE') or 'inf') # configuring this is just for debugging purposes
STDERR_FILE = os.environ.get('EMCC_STDERR_FILE')
if STDERR_FILE:
STDERR_FILE = os.path.abspath(STDERR_FILE)
logging.info('logging stderr in js compiler phase into %s' % STDERR_FILE)
STDERR_FILE = open(STDERR_FILE, 'w')
def process_funcs((i, funcs_file, meta, settings_file, compiler, forwarded_file, libraries, compiler_engine, DEBUG)):
try:
#print >> sys.stderr, 'running', str([settings_file, funcs_file, 'funcs', forwarded_file] + libraries).replace("'/", "'") # can use this in src/compiler_funcs.html arguments,
# # just copy temp dir to under this one
out = jsrun.run_js(
compiler,
engine=compiler_engine,
args=[settings_file, funcs_file, 'funcs', forwarded_file] + libraries,
stdout=subprocess.PIPE,
stderr=STDERR_FILE,
cwd=path_from_root('src'))
except KeyboardInterrupt:
# Python 2.7 seems to lock up when a child process throws KeyboardInterrupt
raise Exception()
if DEBUG: logging.debug('.')
return out
def emscript(infile, settings, outfile, libraries=[], compiler_engine=None,
jcache=None, temp_files=None, DEBUG=None, DEBUG_CACHE=None):
"""Runs the emscripten LLVM-to-JS compiler. We parallelize as much as possible
Args:
infile: The path to the input LLVM assembly file.
settings: JSON-formatted settings that override the values
defined in src/settings.js.
outfile: The file where the output is written.
"""
compiler = path_from_root('src', 'compiler.js')
# Parallelization: We run 3 phases:
# 1 aka 'pre' : Process types and metadata and so forth, and generate the preamble.
# 2 aka 'funcs': Process functions. We can parallelize this, working on each function independently.
# 3 aka 'post' : Process globals, generate postamble and finishing touches.
if DEBUG: logging.debug('emscript: ll=>js')
if jcache: jcache.ensure()
# Pre-scan ll and alter settings as necessary
if DEBUG: t = time.time()
ll = open(infile).read()
scan(ll, settings)
total_ll_size = len(ll)
if DEBUG: logging.debug(' emscript: scan took %s seconds' % (time.time() - t))
# Split input into the relevant parts for each phase
if DEBUG: t = time.time()
pre = []
funcs = [] # split up functions here, for parallelism later
meta_start = ll.find('\n!')
if meta_start > 0:
meta = ll[meta_start:]
else:
meta = ''
meta_start = -1
start = ll.find('\n') if ll[0] == ';' else 0 # ignore first line, which contains ; ModuleID = '/dir name'
func_start = start
last = func_start
while 1:
last = func_start
func_start = ll.find('\ndefine ', func_start)
if func_start > last:
pre.append(ll[last:min(func_start+1, meta_start) if meta_start > 0 else func_start+1] + '\n')
if func_start < 0:
pre.append(ll[last:meta_start] + '\n')
break
header = ll[func_start+1:ll.find('\n', func_start+1)+1]
end = ll.find('\n}', func_start)
last = end+3
funcs.append((header, ll[func_start+1:last]))
pre.append(header + '}\n')
func_start = last
ll = None
if DEBUG and len(meta) > 1024*1024: logging.debug('emscript warning: large amounts of metadata, will slow things down')
if DEBUG: logging.debug(' emscript: split took %s seconds' % (time.time() - t))
if len(funcs) == 0:
logging.error('No functions to process. Make sure you prevented LLVM from eliminating them as dead (use EXPORTED_FUNCTIONS if necessary, see the FAQ)')
#if DEBUG:
# logging.debug('========= pre ================\n')
# logging.debug(''.join(pre))
# logging.debug('========== funcs ===============\n')
# for func in funcs:
# logging.debug('\n// ===\n\n', ''.join(func))
# logging.debug('=========================\n')
# Save settings to a file to work around v8 issue 1579
settings_file = temp_files.get('.txt').name
def save_settings():
global settings_text
settings_text = json.dumps(settings, sort_keys=True)
s = open(settings_file, 'w')
s.write(settings_text)
s.close()
save_settings()
# Phase 1 - pre
if DEBUG: t = time.time()
pre_file = temp_files.get('.pre.ll').name
pre_input = ''.join(pre) + '\n' + meta
out = None
if jcache:
keys = [pre_input, settings_text, ','.join(libraries)]
shortkey = jcache.get_shortkey(keys)
if DEBUG_CACHE: logging.debug('shortkey', shortkey)
out = jcache.get(shortkey, keys)
if DEBUG_CACHE and not out:
dfpath = os.path.join(get_configuration().TEMP_DIR, "ems_" + shortkey)
dfp = open(dfpath, 'w')
dfp.write(pre_input)
dfp.write("\n\n========================== settings_text\n\n")
dfp.write(settings_text)
dfp.write("\n\n========================== libraries\n\n")
dfp.write("\n".join(libraries))
dfp.close()
logging.debug(' cache miss, key data dumped to %s' % dfpath)
if out and DEBUG: logging.debug(' loading pre from jcache')
if not out:
open(pre_file, 'w').write(pre_input)
#print >> sys.stderr, 'running', str([settings_file, pre_file, 'pre'] + libraries).replace("'/", "'") # see funcs
out = jsrun.run_js(compiler, compiler_engine, [settings_file, pre_file, 'pre'] + libraries, stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'))
assert '//FORWARDED_DATA:' in out, 'Did not receive forwarded data in pre output - process failed?'
if jcache:
if DEBUG: logging.debug(' saving pre to jcache')
jcache.set(shortkey, keys, out)
pre, forwarded_data = out.split('//FORWARDED_DATA:')
forwarded_file = temp_files.get('.json').name
pre_input = None
open(forwarded_file, 'w').write(forwarded_data)
if DEBUG: logging.debug(' emscript: phase 1 took %s seconds' % (time.time() - t))
indexed_functions = set()
forwarded_json = json.loads(forwarded_data)
for key in forwarded_json['Functions']['indexedFunctions'].iterkeys():
indexed_functions.add(key)
# Phase 2 - func
cores = int(os.environ.get('EMCC_CORES') or multiprocessing.cpu_count())
assert cores >= 1
if cores > 1:
intended_num_chunks = int(round(cores * NUM_CHUNKS_PER_CORE))
chunk_size = max(MIN_CHUNK_SIZE, total_ll_size / intended_num_chunks)
chunk_size += 3*len(meta) # keep ratio of lots of function code to meta (expensive to process, and done in each parallel task)
chunk_size = min(MAX_CHUNK_SIZE, chunk_size)
else:
chunk_size = MAX_CHUNK_SIZE # if 1 core, just use the max chunk size
if DEBUG: t = time.time()
if settings.get('ASM_JS'):
settings['EXPORTED_FUNCTIONS'] = forwarded_json['EXPORTED_FUNCTIONS']
save_settings()
chunks = cache_module.chunkify(
funcs, chunk_size,
jcache.get_cachename('emscript_files') if jcache else None)
#sys.exit(1)
#chunks = [chunks[0]] # pick specific chunks for debugging/profiling
funcs = None
if jcache:
# load chunks from cache where we can # TODO: ignore small chunks
cached_outputs = []
def load_from_cache(chunk):
keys = [settings_text, forwarded_data, chunk]
shortkey = jcache.get_shortkey(keys) # TODO: share shortkeys with later code
out = jcache.get(shortkey, keys) # this is relatively expensive (pickling?)
if out:
cached_outputs.append(out)
return False
return True
chunks = filter(load_from_cache, chunks)
if len(cached_outputs) > 0:
if out and DEBUG: logging.debug(' loading %d funcchunks from jcache' % len(cached_outputs))
else:
cached_outputs = []
# TODO: minimize size of forwarded data from funcs to what we actually need
if len(chunks) > 0:
if cores == 1 and total_ll_size < MAX_CHUNK_SIZE:
assert len(chunks) == 1, 'no point in splitting up without multiple cores'
if DEBUG: logging.debug(' emscript: phase 2 working on %d chunks %s (intended chunk size: %.2f MB, meta: %.2f MB, forwarded: %.2f MB, total: %.2f MB)' % (len(chunks), ('using %d cores' % cores) if len(chunks) > 1 else '', chunk_size/(1024*1024.), len(meta)/(1024*1024.), len(forwarded_data)/(1024*1024.), total_ll_size/(1024*1024.)))
commands = []
for i in range(len(chunks)):
funcs_file = temp_files.get('.func_%d.ll' % i).name
f = open(funcs_file, 'w')
f.write(chunks[i])
if not jcache:
chunks[i] = None # leave chunks array alive (need its length later)
f.write('\n')
f.write(meta)
f.close()
commands.append(
(i, funcs_file, meta, settings_file, compiler, forwarded_file, libraries, compiler_engine,# + ['--prof'],
DEBUG)
)
if len(chunks) > 1:
pool = multiprocessing.Pool(processes=cores)
outputs = pool.map(process_funcs, commands, chunksize=1)
elif len(chunks) == 1:
outputs = [process_funcs(commands[0])]
commands = None
else:
outputs = []
if jcache:
# save chunks to cache
for i in range(len(chunks)):
chunk = chunks[i]
keys = [settings_text, forwarded_data, chunk]
shortkey = jcache.get_shortkey(keys)
jcache.set(shortkey, keys, outputs[i])
if out and DEBUG and len(chunks) > 0: logging.debug(' saving %d funcchunks to jcache' % len(chunks))
chunks = None
if jcache: outputs += cached_outputs # TODO: preserve order
outputs = [output.split('//FORWARDED_DATA:') for output in outputs]
for output in outputs:
assert len(output) == 2, 'Did not receive forwarded data in an output - process failed? We only got: ' + output[0][-3000:]
if DEBUG: logging.debug(' emscript: phase 2 took %s seconds' % (time.time() - t))
if DEBUG: t = time.time()
# merge forwarded data
if settings.get('ASM_JS'):
all_exported_functions = set(settings['EXPORTED_FUNCTIONS']) # both asm.js and otherwise
for additional_export in settings['DEFAULT_LIBRARY_FUNCS_TO_INCLUDE']: # additional functions to export from asm, if they are implemented
all_exported_functions.add('_' + additional_export)
exported_implemented_functions = set()
for func_js, curr_forwarded_data in outputs:
curr_forwarded_json = json.loads(curr_forwarded_data)
forwarded_json['Types']['hasInlineJS'] = forwarded_json['Types']['hasInlineJS'] or curr_forwarded_json['Types']['hasInlineJS']
forwarded_json['Types']['usesSIMD'] = forwarded_json['Types']['usesSIMD'] or curr_forwarded_json['Types']['usesSIMD']
forwarded_json['Types']['preciseI64MathUsed'] = forwarded_json['Types']['preciseI64MathUsed'] or curr_forwarded_json['Types']['preciseI64MathUsed']
for key, value in curr_forwarded_json['Functions']['blockAddresses'].iteritems():
forwarded_json['Functions']['blockAddresses'][key] = value
for key in curr_forwarded_json['Functions']['indexedFunctions'].iterkeys():
indexed_functions.add(key)
if settings.get('ASM_JS'):
export_bindings = settings['EXPORT_BINDINGS']
export_all = settings['EXPORT_ALL']
for key in curr_forwarded_json['Functions']['implementedFunctions'].iterkeys():
if key in all_exported_functions or export_all or (export_bindings and key.startswith('_emscripten_bind')):
exported_implemented_functions.add(key)
for key, value in curr_forwarded_json['Functions']['unimplementedFunctions'].iteritems():
forwarded_json['Functions']['unimplementedFunctions'][key] = value
for key, value in curr_forwarded_json['Functions']['neededTables'].iteritems():
forwarded_json['Functions']['neededTables'][key] = value
if settings.get('ASM_JS'):
parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
if len(parts) > 1:
pre = parts[0]
outputs.append([parts[1]])
funcs_js = [output[0] for output in outputs]
outputs = None
if DEBUG: logging.debug(' emscript: phase 2b took %s seconds' % (time.time() - t))
if DEBUG: t = time.time()
# calculations on merged forwarded data
forwarded_json['Functions']['indexedFunctions'] = {}
i = settings['FUNCTION_POINTER_ALIGNMENT'] # universal counter
if settings['ASM_JS']: i += settings['RESERVED_FUNCTION_POINTERS']*settings['FUNCTION_POINTER_ALIGNMENT']
base_fp = i
table_counters = {} # table-specific counters
alias = settings['ASM_JS'] and settings['ALIASING_FUNCTION_POINTERS']
sig = None
for indexed in indexed_functions:
if alias:
sig = forwarded_json['Functions']['implementedFunctions'].get(indexed) or forwarded_json['Functions']['unimplementedFunctions'].get(indexed)
assert sig, indexed
if sig not in table_counters:
table_counters[sig] = base_fp
curr = table_counters[sig]
table_counters[sig] += settings['FUNCTION_POINTER_ALIGNMENT']
else:
curr = i
i += settings['FUNCTION_POINTER_ALIGNMENT']
#logging.debug('function indexing ' + str([indexed, curr, sig]))
forwarded_json['Functions']['indexedFunctions'][indexed] = curr # make sure not to modify this python object later - we use it in indexize
def split_32(x):
x = int(x)
return '%d,%d,%d,%d' % (x&255, (x >> 8)&255, (x >> 16)&255, (x >> 24)&255)
indexing = forwarded_json['Functions']['indexedFunctions']
def indexize_mem(js):
return re.sub(r"\"?'?{{ FI_([\w\d_$]+) }}'?\"?,0,0,0", lambda m: split_32(indexing.get(m.groups(0)[0]) or 0), js)
def indexize(js):
return re.sub(r"'{{ FI_([\w\d_$]+) }}'", lambda m: str(indexing.get(m.groups(0)[0]) or 0), js)
blockaddrs = forwarded_json['Functions']['blockAddresses']
def blockaddrsize_mem(js):
return re.sub(r'"?{{{ BA_([\w\d_$]+)\|([\w\d_$]+) }}}"?,0,0,0', lambda m: split_32(blockaddrs[m.groups(0)[0]][m.groups(0)[1]]), js)
def blockaddrsize(js):
return re.sub(r'"?{{{ BA_([\w\d_$]+)\|([\w\d_$]+) }}}"?', lambda m: str(blockaddrs[m.groups(0)[0]][m.groups(0)[1]]), js)
pre = blockaddrsize(blockaddrsize_mem(indexize(indexize_mem(pre))))
if settings.get('ASM_JS'):
# move postsets into the asm module
class PostSets: js = ''
def handle_post_sets(m):
PostSets.js = m.group(0)
return '\n'
pre = re.sub(r'function runPostSets[^}]+}', handle_post_sets, pre)
#if DEBUG: outfile.write('// pre\n')
outfile.write(pre)
pre = None
#if DEBUG: outfile.write('// funcs\n')
# forward
forwarded_data = json.dumps(forwarded_json)
forwarded_file = temp_files.get('.2.json').name
open(forwarded_file, 'w').write(indexize(forwarded_data))
if DEBUG: logging.debug(' emscript: phase 2c took %s seconds' % (time.time() - t))
# Phase 3 - post
if DEBUG: t = time.time()
post_file = temp_files.get('.post.ll').name
open(post_file, 'w').write('\n') # no input, just processing of forwarded data
out = jsrun.run_js(compiler, compiler_engine, [settings_file, post_file, 'post', forwarded_file] + libraries, stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'))
post, last_forwarded_data = out.split('//FORWARDED_DATA:') # if this fails, perhaps the process failed prior to printing forwarded data?
last_forwarded_json = json.loads(last_forwarded_data)
if settings.get('ASM_JS'):
post_funcs, post_rest = post.split('// EMSCRIPTEN_END_FUNCS\n')
post = post_rest
# Move preAsms to their right place
def move_preasm(m):
contents = m.groups(0)[0]
outfile.write(contents + '\n')
return ''
post_funcs = re.sub(r'/\* PRE_ASM \*/(.*)\n', lambda m: move_preasm(m), post_funcs)
funcs_js += ['\n' + post_funcs + '// EMSCRIPTEN_END_FUNCS\n']
simple = os.environ.get('EMCC_SIMPLE_ASM')
class Counter:
i = 0
j = 0
pre_tables = last_forwarded_json['Functions']['tables']['pre']
del last_forwarded_json['Functions']['tables']['pre']
def make_table(sig, raw):
i = Counter.i
Counter.i += 1
bad = 'b' + str(i)
params = ','.join(['p%d' % p for p in range(len(sig)-1)])
coercions = ';'.join(['p%d = %s' % (p, shared.JS.make_coercion('p%d' % p, sig[p+1], settings)) for p in range(len(sig)-1)]) + ';'
ret = '' if sig[0] == 'v' else ('return %s' % shared.JS.make_initializer(sig[0], settings))
start = raw.index('[')
end = raw.rindex(']')
body = raw[start+1:end].split(',')
for j in range(settings['RESERVED_FUNCTION_POINTERS']):
body[settings['FUNCTION_POINTER_ALIGNMENT'] * (1 + j)] = 'jsCall_%s_%s' % (sig, j)
Counter.j = 0
def fix_item(item):
Counter.j += 1
newline = Counter.j % 30 == 29
if item == '0': return bad if not newline else (bad + '\n')
return item if not newline else (item + '\n')
body = ','.join(map(fix_item, body))
return ('function %s(%s) { %s %s(%d); %s }' % (bad, params, coercions, 'abort' if not settings['ASSERTIONS'] else 'nullFunc', i, ret), ''.join([raw[:start+1], body, raw[end:]]))
infos = [make_table(sig, raw) for sig, raw in last_forwarded_json['Functions']['tables'].iteritems()]
function_tables_defs = '\n'.join([info[0] for info in infos]) + '\n// EMSCRIPTEN_END_FUNCS\n' + '\n'.join([info[1] for info in infos])
asm_setup = ''
maths = ['Math.' + func for func in ['floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'atan2', 'exp', 'log', 'ceil', 'imul']]
fundamentals = ['Math', 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array']
if settings['ALLOW_MEMORY_GROWTH']: fundamentals.append('byteLength')
math_envs = ['Math.min'] # TODO: move min to maths
asm_setup += '\n'.join(['var %s = %s;' % (f.replace('.', '_'), f) for f in math_envs])
if settings['PRECISE_F32']: maths += ['Math.fround']
basic_funcs = ['abort', 'assert', 'asmPrintInt', 'asmPrintFloat'] + [m.replace('.', '_') for m in math_envs]
if settings['RESERVED_FUNCTION_POINTERS'] > 0: basic_funcs.append('jsCall')
if settings['SAFE_HEAP']: basic_funcs += ['SAFE_HEAP_LOAD', 'SAFE_HEAP_STORE', 'SAFE_FT_MASK']
if settings['CHECK_HEAP_ALIGN']: basic_funcs += ['CHECK_ALIGN_2', 'CHECK_ALIGN_4', 'CHECK_ALIGN_8']
if settings['ASSERTIONS']:
basic_funcs += ['nullFunc']
asm_setup += 'function nullFunc(x) { Module["printErr"]("Invalid function pointer called. Perhaps a miscast function pointer (check compilation warnings) or bad vtable lookup (maybe due to derefing a bad pointer, like NULL)?"); abort(x) }\n'
basic_vars = ['STACKTOP', 'STACK_MAX', 'tempDoublePtr', 'ABORT']
basic_float_vars = ['NaN', 'Infinity']
if forwarded_json['Types']['preciseI64MathUsed'] or \
forwarded_json['Functions']['libraryFunctions'].get('_llvm_cttz_i32') or \
forwarded_json['Functions']['libraryFunctions'].get('_llvm_ctlz_i32'):
basic_vars += ['cttz_i8', 'ctlz_i8']
if settings.get('DLOPEN_SUPPORT'):
for sig in last_forwarded_json['Functions']['tables'].iterkeys():
basic_vars.append('F_BASE_%s' % sig)
asm_setup += ' var F_BASE_%s = %s;\n' % (sig, 'FUNCTION_TABLE_OFFSET' if settings.get('SIDE_MODULE') else '0') + '\n'
asm_runtime_funcs = ['stackAlloc', 'stackSave', 'stackRestore', 'setThrew'] + ['setTempRet%d' % i for i in range(10)] + ['getTempRet%d' % i for i in range(10)]
# function tables
function_tables = ['dynCall_' + table for table in last_forwarded_json['Functions']['tables']]
function_tables_impls = []
for sig in last_forwarded_json['Functions']['tables'].iterkeys():
args = ','.join(['a' + str(i) for i in range(1, len(sig))])
arg_coercions = ' '.join(['a' + str(i) + '=' + shared.JS.make_coercion('a' + str(i), sig[i], settings) + ';' for i in range(1, len(sig))])
coerced_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i], settings) for i in range(1, len(sig))])
ret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('FUNCTION_TABLE_%s[index&{{{ FTM_%s }}}](%s)' % (sig, sig, coerced_args), sig[0], settings)
function_tables_impls.append('''
function dynCall_%s(index%s%s) {
index = index|0;
%s
%s;
}
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))
for i in range(settings['RESERVED_FUNCTION_POINTERS']):
jsret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('jsCall(%d%s%s)' % (i, ',' if coerced_args else '', coerced_args), sig[0], settings)
function_tables_impls.append('''
function jsCall_%s_%s(%s) {
%s
%s;
}
''' % (sig, i, args, arg_coercions, jsret))
shared.Settings.copy(settings)
asm_setup += '\n' + shared.JS.make_invoke(sig) + '\n'
basic_funcs.append('invoke_%s' % sig)
if settings.get('DLOPEN_SUPPORT'):
asm_setup += '\n' + shared.JS.make_extcall(sig) + '\n'
basic_funcs.append('extCall_%s' % sig)
# calculate exports
exported_implemented_functions = list(exported_implemented_functions)
exported_implemented_functions.append('runPostSets')
exports = []
if not simple:
for export in exported_implemented_functions + asm_runtime_funcs + function_tables:
exports.append("%s: %s" % (export, export))
exports = '{ ' + ', '.join(exports) + ' }'
else:
exports = '_main'
# calculate globals
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except:
pass
# If no named globals, only need externals
global_vars = map(lambda g: g['name'], filter(lambda g: settings['NAMED_GLOBALS'] or g.get('external') or g.get('unIndexable'), forwarded_json['Variables']['globals'].values()))
global_funcs = [key for key, value in forwarded_json['Functions']['libraryFunctions'].iteritems() if value != 2]
def math_fix(g):
return g if not g.startswith('Math_') else g.split('_')[1]
asm_global_funcs = ''.join([' var ' + g.replace('.', '_') + '=global.' + g + ';\n' for g in maths]) + \
''.join([' var ' + g + '=env.' + math_fix(g) + ';\n' for g in basic_funcs + global_funcs])
asm_global_vars = ''.join([' var ' + g + '=env.' + g + '|0;\n' for g in basic_vars + global_vars]) + \
''.join([' var ' + g + '=+env.' + g + ';\n' for g in basic_float_vars])
# In linkable modules, we need to add some explicit globals for global variables that can be linked and used across modules
if settings.get('MAIN_MODULE') or settings.get('SIDE_MODULE'):
assert settings.get('TARGET_ASMJS_UNKNOWN_EMSCRIPTEN'), 'TODO: support x86 target when linking modules (needs offset of 4 and not 8 here)'
for key, value in forwarded_json['Variables']['globals'].iteritems():
if value.get('linkable'):
init = forwarded_json['Variables']['indexedGlobals'][key] + 8 # 8 is Runtime.GLOBAL_BASE / STATIC_BASE
if settings.get('SIDE_MODULE'): init = '(H_BASE+' + str(init) + ')|0'
asm_global_vars += ' var %s=%s;\n' % (key, str(init))
# sent data
the_global = '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in fundamentals]) + ' }'
sending = '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in basic_funcs + global_funcs + basic_vars + basic_float_vars + global_vars]) + ' }'
# received
if not simple:
receiving = ';\n'.join(['var ' + s + ' = Module["' + s + '"] = asm["' + s + '"]' for s in exported_implemented_functions + function_tables])
else:
receiving = 'var _main = Module["_main"] = asm;'
# finalize
if DEBUG: logging.debug('asm text sizes' + str([map(len, funcs_js), len(asm_setup), len(asm_global_vars), len(asm_global_funcs), len(pre_tables), len('\n'.join(function_tables_impls)), len(function_tables_defs.replace('\n', '\n ')), len(exports), len(the_global), len(sending), len(receiving)]))
funcs_js = ['''
%s
function asmPrintInt(x, y) {
Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
}
function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
var asm = (function(global, env, buffer) {
%s
%s
''' % (asm_setup, "'use asm';" if not forwarded_json['Types']['hasInlineJS'] and not settings['SIDE_MODULE'] and settings['ASM_JS'] == 1 else "'almost asm';",
'''
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
var HEAP32 = new global.Int32Array(buffer);
var HEAPU8 = new global.Uint8Array(buffer);
var HEAPU16 = new global.Uint16Array(buffer);
var HEAPU32 = new global.Uint32Array(buffer);
var HEAPF32 = new global.Float32Array(buffer);
var HEAPF64 = new global.Float64Array(buffer);
''' if not settings['ALLOW_MEMORY_GROWTH'] else '''
var Int8View = global.Int8Array;
var Int16View = global.Int16Array;
var Int32View = global.Int32Array;
var Uint8View = global.Uint8Array;
var Uint16View = global.Uint16Array;
var Uint32View = global.Uint32Array;
var Float32View = global.Float32Array;
var Float64View = global.Float64Array;
var HEAP8 = new Int8View(buffer);
var HEAP16 = new Int16View(buffer);
var HEAP32 = new Int32View(buffer);
var HEAPU8 = new Uint8View(buffer);
var HEAPU16 = new Uint16View(buffer);
var HEAPU32 = new Uint32View(buffer);
var HEAPF32 = new Float32View(buffer);
var HEAPF64 = new Float64View(buffer);
var byteLength = global.byteLength;
''') + '\n' + asm_global_vars + '''
var __THREW__ = 0;
var threwValue = 0;
var setjmpId = 0;
var undef = 0;
var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
''' + ''.join(['''
var tempRet%d = 0;''' % i for i in range(10)]) + '\n' + asm_global_funcs] + [' var tempFloat = %s;\n' % ('Math_fround(0)' if settings.get('PRECISE_F32') else '0.0')] + ([' const f0 = Math_fround(0);\n'] if settings.get('PRECISE_F32') else []) + ['''
// EMSCRIPTEN_START_FUNCS
function stackAlloc(size) {
size = size|0;
var ret = 0;
ret = STACKTOP;
STACKTOP = (STACKTOP + size)|0;
''' + ('STACKTOP = (STACKTOP + 3)&-4;' if settings['TARGET_X86'] else 'STACKTOP = (STACKTOP + 7)&-8;') + '''
return ret|0;
}
function stackSave() {
return STACKTOP|0;
}
function stackRestore(top) {
top = top|0;
STACKTOP = top;
}
function setThrew(threw, value) {
threw = threw|0;
value = value|0;
if ((__THREW__|0) == 0) {
__THREW__ = threw;
threwValue = value;
}
}
function copyTempFloat(ptr) {
ptr = ptr|0;
HEAP8[tempDoublePtr>>0] = HEAP8[ptr>>0];
HEAP8[tempDoublePtr+1>>0] = HEAP8[ptr+1>>0];
HEAP8[tempDoublePtr+2>>0] = HEAP8[ptr+2>>0];
HEAP8[tempDoublePtr+3>>0] = HEAP8[ptr+3>>0];
}
function copyTempDouble(ptr) {
ptr = ptr|0;
HEAP8[tempDoublePtr>>0] = HEAP8[ptr>>0];
HEAP8[tempDoublePtr+1>>0] = HEAP8[ptr+1>>0];
HEAP8[tempDoublePtr+2>>0] = HEAP8[ptr+2>>0];
HEAP8[tempDoublePtr+3>>0] = HEAP8[ptr+3>>0];
HEAP8[tempDoublePtr+4>>0] = HEAP8[ptr+4>>0];
HEAP8[tempDoublePtr+5>>0] = HEAP8[ptr+5>>0];
HEAP8[tempDoublePtr+6>>0] = HEAP8[ptr+6>>0];
HEAP8[tempDoublePtr+7>>0] = HEAP8[ptr+7>>0];
}
''' + ''.join(['''
function setTempRet%d(value) {
value = value|0;
tempRet%d = value;
}
''' % (i, i) for i in range(10)]) + ''.join(['''
function getTempRet%d() {
return tempRet%d|0;
}
''' % (i, i) for i in range(10)])] + [PostSets.js + '\n'] + funcs_js + ['''
%s
return %s;
})
// EMSCRIPTEN_END_ASM
(%s, %s, buffer);
%s;
''' % (pre_tables + '\n'.join(function_tables_impls) + '\n' + function_tables_defs.replace('\n', '\n '), exports, the_global, sending, receiving)]
if not settings.get('SIDE_MODULE'):
funcs_js.append('''
Runtime.stackAlloc = asm['stackAlloc'];
Runtime.stackSave = asm['stackSave'];
Runtime.stackRestore = asm['stackRestore'];
Runtime.setTempRet0 = asm['setTempRet0'];
Runtime.getTempRet0 = asm['getTempRet0'];
''')
# Set function table masks
masks = {}
max_mask = 0
for sig, table in last_forwarded_json['Functions']['tables'].iteritems():
mask = table.count(',')
masks[sig] = str(mask)
max_mask = max(mask, max_mask)
def function_table_maskize(js, masks):
def fix(m):
sig = m.groups(0)[0]
return masks[sig]
return re.sub(r'{{{ FTM_([\w\d_$]+) }}}', lambda m: fix(m), js) # masks[m.groups(0)[0]]
funcs_js = map(lambda js: function_table_maskize(js, masks), funcs_js)
if settings.get('DLOPEN_SUPPORT'):
funcs_js.append('''
asm.maxFunctionIndex = %(max_mask)d;
DLFCN.registerFunctions(asm, %(max_mask)d+1, %(sigs)s, Module);
Module.SYMBOL_TABLE = SYMBOL_TABLE;
''' % { 'max_mask': max_mask, 'sigs': str(map(str, last_forwarded_json['Functions']['tables'].keys())) })
else:
function_tables_defs = '\n'.join([table for table in last_forwarded_json['Functions']['tables'].itervalues()])
outfile.write(function_tables_defs)
funcs_js = ['''
// EMSCRIPTEN_START_FUNCS
'''] + funcs_js + ['''
// EMSCRIPTEN_END_FUNCS
''']
# Create symbol table for self-dlopen
if settings.get('DLOPEN_SUPPORT'):
symbol_table = {}
for k, v in forwarded_json['Variables']['indexedGlobals'].iteritems():
if forwarded_json['Variables']['globals'][k]['named']:
symbol_table[k] = str(v + forwarded_json['Runtime']['GLOBAL_BASE'])
for raw in last_forwarded_json['Functions']['tables'].itervalues():
if raw == '': continue
table = map(string.strip, raw[raw.find('[')+1:raw.find(']')].split(","))
for i in range(len(table)):
value = table[i]
if value != '0':
if settings.get('SIDE_MODULE'):
symbol_table[value] = 'FUNCTION_TABLE_OFFSET+' + str(i)
else:
symbol_table[value] = str(i)
outfile.write("var SYMBOL_TABLE = %s;" % json.dumps(symbol_table).replace('"', ''))
for i in range(len(funcs_js)): # do this loop carefully to save memory
funcs_js_item = funcs_js[i]
funcs_js[i] = None
funcs_js_item = indexize(funcs_js_item)
funcs_js_item = blockaddrsize(funcs_js_item)
if WINDOWS: funcs_js_item = funcs_js_item.replace('\r\n', '\n') # Normalize to UNIX line endings, otherwise writing to text file will duplicate \r\n to \r\r\n!
outfile.write(funcs_js_item)
funcs_js = None
indexized = indexize(post)
if WINDOWS: indexized = indexized.replace('\r\n', '\n') # Normalize to UNIX line endings, otherwise writing to text file will duplicate \r\n to \r\r\n!
outfile.write(indexized)
outfile.close()
if DEBUG: logging.debug(' emscript: phase 3 took %s seconds' % (time.time() - t))
# emscript_fast: emscript'en code using the 'fast' compilation path, using
# an LLVM backend
# FIXME: this is just a copy-paste of normal emscript(), and we trample it
# if the proper env var is set (see below). we should refactor to
# share code between the two, once emscript_fast stabilizes (or,
# leaving it separate like it is will make it trivial to rip out
# if the experiment fails)
def emscript_fast(infile, settings, outfile, libraries=[], compiler_engine=None,
jcache=None, temp_files=None, DEBUG=None, DEBUG_CACHE=None):
"""Runs the emscripten LLVM-to-JS compiler.
Args:
infile: The path to the input LLVM assembly file.
settings: JSON-formatted settings that override the values
defined in src/settings.js.
outfile: The file where the output is written.
"""
assert settings['ASM_JS'], 'fastcomp is asm.js-only (mode 1 or 2)'
success = False
try:
# Overview:
# * Run LLVM backend to emit JS. JS includes function bodies, memory initializer,
# and various metadata
# * Run compiler.js on the metadata to emit the shell js code, pre/post-ambles,
# JS library dependencies, etc.
temp_js = temp_files.get('.4.js').name
backend_compiler = os.path.join(shared.LLVM_ROOT, 'llc')
backend_args = [backend_compiler, infile, '-march=js', '-filetype=asm', '-o', temp_js]
if settings['PRECISE_F32']:
backend_args += ['-emscripten-precise-f32']
if settings['WARN_UNALIGNED']:
backend_args += ['-emscripten-warn-unaligned']
if settings['RESERVED_FUNCTION_POINTERS'] > 0:
backend_args += ['-emscripten-reserved-function-pointers=%d' % settings['RESERVED_FUNCTION_POINTERS']]
if settings['ASSERTIONS'] > 0:
backend_args += ['-emscripten-assertions=%d' % settings['ASSERTIONS']]
if settings['ALIASING_FUNCTION_POINTERS'] == 0:
backend_args += ['-emscripten-no-aliasing-function-pointers']
if settings['GLOBAL_BASE'] >= 0:
backend_args += ['-emscripten-global-base=%d' % settings['GLOBAL_BASE']]
backend_args += ['-O' + str(settings['OPT_LEVEL'])]
if DEBUG:
logging.debug('emscript: llvm backend: ' + ' '.join(backend_args))
t = time.time()
shared.jsrun.timeout_run(subprocess.Popen(backend_args, stdout=subprocess.PIPE))
if DEBUG:
logging.debug(' emscript: llvm backend took %s seconds' % (time.time() - t))
t = time.time()
# Split up output
backend_output = open(temp_js).read()
#if DEBUG: print >> sys.stderr, backend_output
start_funcs_marker = '// EMSCRIPTEN_START_FUNCTIONS'
end_funcs_marker = '// EMSCRIPTEN_END_FUNCTIONS'
metadata_split_marker = '// EMSCRIPTEN_METADATA'
start_funcs = backend_output.index(start_funcs_marker)
end_funcs = backend_output.rindex(end_funcs_marker)
metadata_split = backend_output.rindex(metadata_split_marker)
funcs = backend_output[start_funcs+len(start_funcs_marker):end_funcs]
metadata_raw = backend_output[metadata_split+len(metadata_split_marker):]
#if DEBUG: print >> sys.stderr, "METAraw", metadata_raw
metadata = json.loads(metadata_raw)
mem_init = backend_output[end_funcs+len(end_funcs_marker):metadata_split]
#if DEBUG: print >> sys.stderr, "FUNCS", funcs
#if DEBUG: print >> sys.stderr, "META", metadata
#if DEBUG: print >> sys.stderr, "meminit", mem_init
# if emulating pointer casts, force all tables to the size of the largest
if settings['EMULATE_FUNCTION_POINTER_CASTS']:
max_size = 0
for k, v in metadata['tables'].iteritems():
max_size = max(max_size, v.count(',')+1)
for k, v in metadata['tables'].iteritems():
curr = v.count(',')+1
if curr < max_size:
metadata['tables'][k] = v.replace(']', (',0'*(max_size - curr)) + ']')
# function table masks
table_sizes = {}
for k, v in metadata['tables'].iteritems():
table_sizes[k] = str(v.count(',')) # undercounts by one, but that is what we want
#if settings['ASSERTIONS'] >= 2 and table_sizes[k] == 0:
# print >> sys.stderr, 'warning: no function pointers with signature ' + k + ', but there is a call, which will abort if it occurs (this can result from undefined behavior, check for compiler warnings on your source files and consider -Werror)'
funcs = re.sub(r"#FM_(\w+)#", lambda m: table_sizes[m.groups(0)[0]], funcs)
# fix +float into float.0, if not running js opts
if not settings['RUNNING_JS_OPTS']:
def fix_dot_zero(m):
num = m.group(3)
# TODO: handle 0x floats?
if num.find('.') < 0:
e = num.find('e');
if e < 0:
num += '.0'
else:
num = num[:e] + '.0' + num[e:]
return m.group(1) + m.group(2) + num
funcs = re.sub(r'([(=,+\-*/%<>:?] *)\+(-?)((0x)?[0-9a-f]*\.?[0-9]+([eE][-+]?[0-9]+)?)', lambda m: fix_dot_zero(m), funcs)
# js compiler
if DEBUG: logging.debug('emscript: js compiler glue')
# Settings changes
assert settings['TARGET_ASMJS_UNKNOWN_EMSCRIPTEN'] == 1
settings['TARGET_ASMJS_UNKNOWN_EMSCRIPTEN'] = 2
i64_funcs = ['i64Add', 'i64Subtract', '__muldi3', '__divdi3', '__udivdi3', '__remdi3', '__uremdi3']
for i64_func in i64_funcs:
if i64_func in metadata['declares']:
settings['PRECISE_I64_MATH'] = 2
break
metadata['declares'] = filter(lambda i64_func: i64_func not in ['getHigh32', 'setHigh32', '__muldi3', '__divdi3', '__remdi3', '__udivdi3', '__uremdi3'], metadata['declares']) # FIXME: do these one by one as normal js lib funcs
# Integrate info from backend
settings['DEFAULT_LIBRARY_FUNCS_TO_INCLUDE'] = list(
set(settings['DEFAULT_LIBRARY_FUNCS_TO_INCLUDE'] + map(shared.JS.to_nice_ident, metadata['declares'])).difference(
map(lambda x: x[1:], metadata['implementedFunctions'])
)
) + map(lambda x: x[1:], metadata['externs'])
if metadata['simd']:
settings['SIMD'] = 1
if metadata['cantValidate'] and settings['ASM_JS'] != 2:
logging.warning('disabling asm.js validation due to use of non-supported features: ' + metadata['cantValidate'])
settings['ASM_JS'] = 2
# Save settings to a file to work around v8 issue 1579
settings_file = temp_files.get('.txt').name
def save_settings():
global settings_text
settings_text = json.dumps(settings, sort_keys=True)
s = open(settings_file, 'w')
s.write(settings_text)
s.close()
save_settings()
# Call js compiler
if DEBUG: t = time.time()
out = jsrun.run_js(path_from_root('src', 'compiler.js'), compiler_engine,
[settings_file, ';', 'glue'] + libraries, stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'), error_limit=300)
assert '//FORWARDED_DATA:' in out, 'Did not receive forwarded data in pre output - process failed?'
glue, forwarded_data = out.split('//FORWARDED_DATA:')
if DEBUG:
logging.debug(' emscript: glue took %s seconds' % (time.time() - t))
t = time.time()
last_forwarded_json = forwarded_json = json.loads(forwarded_data)
# merge in information from llvm backend
last_forwarded_json['Functions']['tables'] = metadata['tables']
'''indexed_functions = set()
for key in forwarded_json['Functions']['indexedFunctions'].iterkeys():
indexed_functions.add(key)'''
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
#print >> sys.stderr, 'glue:', pre, '\n\n||||||||||||||||\n\n', post, '...............'
# memory and global initializers
global_initializers = ', '.join(map(lambda i: '{ func: function() { %s() } }' % i, metadata['initializers']))
if settings['SIMD'] == 1:
pre = open(path_from_root(os.path.join('src', 'ecmascript_simd.js'))).read() + '\n\n' + pre
staticbump = mem_init.count(',')+1
while staticbump % 16 != 0: staticbump += 1
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''STATICTOP = STATIC_BASE + %d;
/* global initializers */ __ATINIT__.push(%s);
%s''' % (staticbump, global_initializers, mem_init)) # XXX wrong size calculation!
funcs_js = [funcs]
parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
if len(parts) > 1:
pre = parts[0]
funcs_js.append(parts[1])
# merge forwarded data
settings['EXPORTED_FUNCTIONS'] = forwarded_json['EXPORTED_FUNCTIONS']
all_exported_functions = set(settings['EXPORTED_FUNCTIONS']) # both asm.js and otherwise
for additional_export in settings['DEFAULT_LIBRARY_FUNCS_TO_INCLUDE']: # additional functions to export from asm, if they are implemented
all_exported_functions.add('_' + additional_export)
if settings['EXPORT_FUNCTION_TABLES']:
for table in last_forwarded_json['Functions']['tables'].values():
for func in table.split('[')[1].split(']')[0].split(','):
if func[0] == '_':
all_exported_functions.add(func)
exported_implemented_functions = set(metadata['exports'])
export_bindings = settings['EXPORT_BINDINGS']
export_all = settings['EXPORT_ALL']
all_implemented = metadata['implementedFunctions'] + forwarded_json['Functions']['implementedFunctions'].keys() # XXX perf?
for key in all_implemented:
if key in all_exported_functions or export_all or (export_bindings and key.startswith('_emscripten_bind')):
exported_implemented_functions.add(key)
implemented_functions = set(metadata['implementedFunctions'])
if settings['ASSERTIONS'] and settings.get('ORIGINAL_EXPORTED_FUNCTIONS'):
original_exports = settings['ORIGINAL_EXPORTED_FUNCTIONS']
if original_exports[0] == '@': original_exports = json.loads(open(original_exports[1:]).read())
for requested in original_exports:
if requested not in all_implemented and \
requested != '_malloc': # special-case malloc, EXPORTED by default for internal use, but we bake in a trivial allocator and warn at runtime if used in ASSERTIONS
logging.warning('function requested to be exported, but not implemented: "%s"', requested)
# Add named globals
named_globals = '\n'.join(['var %s = %s;' % (k, v) for k, v in metadata['namedGlobals'].iteritems()])
pre = pre.replace('// === Body ===', '// === Body ===\n' + named_globals + '\n')
#if DEBUG: outfile.write('// pre\n')
outfile.write(pre)
pre = None
#if DEBUG: outfile.write('// funcs\n')
# when emulating function pointer casts, we need to know what is the target of each pointer
if settings['EMULATE_FUNCTION_POINTER_CASTS']:
function_pointer_targets = {}
for sig, table in last_forwarded_json['Functions']['tables'].iteritems():
start = table.index('[')
end = table.rindex(']')
body = table[start+1:end].split(',')
parsed = map(lambda x: x.strip(), body)
for i in range(len(parsed)):
if parsed[i] != '0':
assert i not in function_pointer_targets
function_pointer_targets[i] = [sig, str(parsed[i])]
# Move preAsms to their right place
def move_preasm(m):
contents = m.groups(0)[0]
outfile.write(contents + '\n')
return ''
if not settings['BOOTSTRAPPING_STRUCT_INFO']:
funcs_js[1] = re.sub(r'/\* PRE_ASM \*/(.*)\n', lambda m: move_preasm(m), funcs_js[1])
class Counter:
i = 0
j = 0
if 'pre' in last_forwarded_json['Functions']['tables']:
pre_tables = last_forwarded_json['Functions']['tables']['pre']
del last_forwarded_json['Functions']['tables']['pre']
else:
pre_tables = ''
def unfloat(s):
return 'd' if s == 'f' else s # lower float to double for ffis
if settings['ASSERTIONS'] >= 2:
debug_tables = {}
def make_params(sig): return ','.join(['p%d' % p for p in range(len(sig)-1)])
def make_coerced_params(sig): return ','.join([shared.JS.make_coercion('p%d', unfloat(sig[p+1]), settings) % p for p in range(len(sig)-1)])
def make_coercions(sig): return ';'.join(['p%d = %s' % (p, shared.JS.make_coercion('p%d' % p, sig[p+1], settings)) for p in range(len(sig)-1)]) + ';'
def make_func(name, code, params, coercions): return 'function %s(%s) { %s %s }' % (name, params, coercions, code)
def make_table(sig, raw):
params = make_params(sig)
coerced_params = make_coerced_params(sig)
coercions = make_coercions(sig)
def make_bad(target=None):
i = Counter.i
Counter.i += 1
if target is None: target = i
name = 'b' + str(i)
if not settings['ASSERTIONS']:
code = 'abort(%s);' % target
else:
code = 'nullFunc_' + sig + '(%d);' % target
if sig[0] != 'v':
code += 'return %s' % shared.JS.make_initializer(sig[0], settings) + ';'
return name, make_func(name, code, params, coercions)
bad, bad_func = make_bad() # the default bad func
if settings['ASSERTIONS'] <= 1:
Counter.pre = [bad_func]
else:
Counter.pre = []
start = raw.index('[')
end = raw.rindex(']')
body = raw[start+1:end].split(',')
for j in range(settings['RESERVED_FUNCTION_POINTERS']):
curr = 'jsCall_%s_%s' % (sig, j)
body[settings['FUNCTION_POINTER_ALIGNMENT'] * (1 + j)] = curr
implemented_functions.add(curr)
Counter.j = 0
def fix_item(item):
j = Counter.j
Counter.j += 1
newline = Counter.j % 30 == 29
if item == '0':
if j > 0 and settings['EMULATE_FUNCTION_POINTER_CASTS'] and j in function_pointer_targets: # emulate all non-null pointer calls, if asked to
proper_sig, proper_target = function_pointer_targets[j]
def make_emulated_param(i):
if i >= len(sig): return shared.JS.make_initializer(proper_sig[i], settings) # extra param, just send a zero
return shared.JS.make_coercion('p%d' % (i-1), proper_sig[i], settings, convert_from=sig[i])
proper_code = proper_target + '(' + ','.join(map(lambda i: make_emulated_param(i+1), range(len(proper_sig)-1))) + ')'
if proper_sig[0] != 'v':
# proper sig has a return, which the wrapper may or may not use
proper_code = shared.JS.make_coercion(proper_code, proper_sig[0], settings)
if sig[0] != 'v':
proper_code = 'return ' + proper_code
else:
# proper sig has no return, we may need a fake return
if sig[0] != 'v':
proper_code = 'return ' + shared.JS.make_initializer(sig[0], settings)
name = 'fpemu_%s_%d' % (sig, j)
wrapper = make_func(name, proper_code, params, coercions)
Counter.pre.append(wrapper)
return name if not newline else (name + '\n')
if settings['ASSERTIONS'] <= 1:
return bad if not newline else (bad + '\n')
else:
specific_bad, specific_bad_func = make_bad(j)
Counter.pre.append(specific_bad_func)
return specific_bad if not newline else (specific_bad + '\n')
if item not in implemented_functions:
# this is imported into asm, we must wrap it
call_ident = item
if call_ident in metadata['redirects']: call_ident = metadata['redirects'][call_ident]
if not call_ident.startswith('_') and not call_ident.startswith('Math_'): call_ident = '_' + call_ident
code = call_ident + '(' + coerced_params + ')'
if sig[0] != 'v':
# ffis cannot return float
if sig[0] == 'f': code = '+' + code
code = 'return ' + shared.JS.make_coercion(code, sig[0], settings)
code += ';'
Counter.pre.append(make_func(item + '__wrapper', code, params, coercions))
return item + '__wrapper'
return item if not newline else (item + '\n')
if settings['ASSERTIONS'] >= 2:
debug_tables[sig] = body
body = ','.join(map(fix_item, body))
return ('\n'.join(Counter.pre), ''.join([raw[:start+1], body, raw[end:]]))
infos = [make_table(sig, raw) for sig, raw in last_forwarded_json['Functions']['tables'].iteritems()]
Counter.pre = []
function_tables_defs = '\n'.join([info[0] for info in infos]) + '\n\n// EMSCRIPTEN_END_FUNCS\n' + '\n'.join([info[1] for info in infos])
asm_setup = ''
maths = ['Math.' + func for func in ['floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'atan2', 'exp', 'log', 'ceil', 'imul', 'min', 'clz32']]
simdfloattypes = ['float32x4']
simdinttypes = ['int32x4']
simdtypes = simdfloattypes + simdinttypes
simdfuncs = ['check', 'add', 'sub', 'neg', 'mul',
'equal', 'lessThan', 'greaterThan',
'notEqual', 'lessThanOrEqual', 'greaterThanOrEqual',
'select', 'and', 'or', 'xor', 'not',
'splat', 'swizzle', 'shuffle',
'withX', 'withY', 'withZ', 'withW',
'load', 'store', 'loadX', 'storeX', 'loadXY', 'storeXY', 'loadXYZ', 'storeXYZ']
simdfloatfuncs = simdfuncs + ['div', 'min', 'max', 'minNum', 'maxNum', 'sqrt',
'abs', 'fromInt32x4', 'fromInt32x4Bits',
'reciprocalApproximation', 'reciprocalSqrtApproximation'];
simdintfuncs = simdfuncs + ['fromFloat32x4', 'fromFloat32x4Bits',
'shiftRightArithmeticByScalar',
'shiftRightLogicalByScalar',
'shiftLeftByScalar'];
fundamentals = ['Math', 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array', 'NaN', 'Infinity']
if metadata['simd']:
fundamentals += ['SIMD']
if settings['ALLOW_MEMORY_GROWTH']: fundamentals.append('byteLength')
math_envs = []
provide_fround = settings['PRECISE_F32'] or settings['SIMD']
if provide_fround: maths += ['Math.fround']
basic_funcs = ['abort', 'assert'] + [m.replace('.', '_') for m in math_envs]
if settings['RESERVED_FUNCTION_POINTERS'] > 0: basic_funcs.append('jsCall')
if settings['SAFE_HEAP']: basic_funcs += ['SAFE_HEAP_LOAD', 'SAFE_HEAP_STORE', 'SAFE_FT_MASK']
if settings['CHECK_HEAP_ALIGN']: basic_funcs += ['CHECK_ALIGN_2', 'CHECK_ALIGN_4', 'CHECK_ALIGN_8']
if settings['ASSERTIONS']:
if settings['ASSERTIONS'] >= 2: import difflib
for sig in last_forwarded_json['Functions']['tables'].iterkeys():
basic_funcs += ['nullFunc_' + sig]
if settings['ASSERTIONS'] <= 1:
extra = ' Module["printErr"]("Build with ASSERTIONS=2 for more info.");'
pointer = ' '
else:
pointer = ' \'" + x + "\' '
asm_setup += '\nvar debug_table_' + sig + ' = ' + json.dumps(debug_tables[sig]) + ';'
extra = ' Module["printErr"]("This pointer might make sense in another type signature: '
# sort signatures, attempting to show most likely related ones first
sigs = last_forwarded_json['Functions']['tables'].keys()
def keyfunc(other):
ret = 0
minlen = min(len(other), len(sig))
maxlen = min(len(other), len(sig))
if other.startswith(sig) or sig.startswith(other): ret -= 1000 # prioritize prefixes, could be dropped params
ret -= 133*difflib.SequenceMatcher(a=other, b=sig).ratio() # prioritize on diff similarity
ret += 15*abs(len(other) - len(sig))/float(maxlen) # deprioritize the bigger the length difference is
for i in range(minlen):
if other[i] == sig[i]: ret -= 5/float(maxlen) # prioritize on identically-placed params
ret += 20*len(other) # deprioritize on length
return ret
sigs.sort(key=keyfunc)
for other in sigs:
if other != sig:
extra += other + ': " + debug_table_' + other + '[x] + " '
extra += '"); '
asm_setup += '\nfunction nullFunc_' + sig + '(x) { Module["printErr"]("Invalid function pointer' + pointer + 'called with signature \'' + sig + '\'. ' + \
'Perhaps this is an invalid value (e.g. caused by calling a virtual method on a NULL pointer)? ' + \
'Or calling a function with an incorrect type, which will fail? ' + \
'(it is worth building your source files with -Werror (warnings are errors), as warnings can indicate undefined behavior which can cause this)' + \
'"); ' + extra + ' abort(x) }\n'
basic_vars = ['STACKTOP', 'STACK_MAX', 'tempDoublePtr', 'ABORT']
basic_float_vars = []
if metadata.get('preciseI64MathUsed'):
basic_vars += ['cttz_i8']
else:
if forwarded_json['Functions']['libraryFunctions'].get('_llvm_cttz_i32'):
basic_vars += ['cttz_i8']
if settings.get('DLOPEN_SUPPORT'):
for sig in last_forwarded_json['Functions']['tables'].iterkeys():
basic_vars.append('F_BASE_%s' % sig)
asm_setup += ' var F_BASE_%s = %s;\n' % (sig, 'FUNCTION_TABLE_OFFSET' if settings.get('SIDE_MODULE') else '0') + '\n'
asm_runtime_funcs = ['stackAlloc', 'stackSave', 'stackRestore', 'setThrew', 'setTempRet0', 'getTempRet0']
# See if we need ASYNCIFY functions
# We might not need them even if ASYNCIFY is enabled
need_asyncify = '_emscripten_alloc_async_context' in exported_implemented_functions
if need_asyncify:
basic_vars += ['___async', '___async_unwind', '___async_retval', '___async_cur_frame']
asm_runtime_funcs += ['setAsync']
if settings.get('EMTERPRETIFY'):
asm_runtime_funcs += ['emterpret']
if settings.get('EMTERPRETIFY_ASYNC'):
asm_runtime_funcs += ['setAsyncState', 'emtStackSave']
# function tables
function_tables = ['dynCall_' + table for table in last_forwarded_json['Functions']['tables']]
function_tables_impls = []
for sig in last_forwarded_json['Functions']['tables'].iterkeys():
args = ','.join(['a' + str(i) for i in range(1, len(sig))])
arg_coercions = ' '.join(['a' + str(i) + '=' + shared.JS.make_coercion('a' + str(i), sig[i], settings) + ';' for i in range(1, len(sig))])
coerced_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i], settings) for i in range(1, len(sig))])
ret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('FUNCTION_TABLE_%s[index&{{{ FTM_%s }}}](%s)' % (sig, sig, coerced_args), sig[0], settings)
function_tables_impls.append('''
function dynCall_%s(index%s%s) {
index = index|0;
%s
%s;
}
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))
ffi_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i], settings, ffi_arg=True) for i in range(1, len(sig))])
for i in range(settings['RESERVED_FUNCTION_POINTERS']):
jsret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('jsCall(%d%s%s)' % (i, ',' if ffi_args else '', ffi_args), sig[0], settings, ffi_result=True)
function_tables_impls.append('''
function jsCall_%s_%s(%s) {
%s
%s;
}
''' % (sig, i, args, arg_coercions, jsret))
shared.Settings.copy(settings)
asm_setup += '\n' + shared.JS.make_invoke(sig) + '\n'
basic_funcs.append('invoke_%s' % sig)
if settings.get('DLOPEN_SUPPORT'):
asm_setup += '\n' + shared.JS.make_extcall(sig) + '\n'
basic_funcs.append('extCall_%s' % sig)
def quote(prop):
if settings['CLOSURE_COMPILER'] == 2:
return "'" + prop + "'"
else:
return prop
def access_quote(prop):
if settings['CLOSURE_COMPILER'] == 2:
return "['" + prop + "']"
else:
return '.' + prop
# calculate exports
exported_implemented_functions = list(exported_implemented_functions) + metadata['initializers']
exported_implemented_functions.append('runPostSets')
if settings['ALLOW_MEMORY_GROWTH']:
exported_implemented_functions.append('_emscripten_replace_memory')
exports = []
for export in exported_implemented_functions + asm_runtime_funcs + function_tables:
exports.append(quote(export) + ": " + export)
exports = '{ ' + ', '.join(exports) + ' }'
# calculate globals
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except:
pass
# If no named globals, only need externals
global_vars = metadata['externs'] #+ forwarded_json['Variables']['globals']
global_funcs = list(set([key for key, value in forwarded_json['Functions']['libraryFunctions'].iteritems() if value != 2]).difference(set(global_vars)).difference(implemented_functions))
def math_fix(g):
return g if not g.startswith('Math_') else g.split('_')[1]
asm_global_funcs = ''.join([' var ' + g.replace('.', '_') + '=global' + access_quote(g) + ';\n' for g in maths]);
asm_global_funcs += ''.join([' var ' + g + '=env' + access_quote(math_fix(g)) + ';\n' for g in basic_funcs + global_funcs])
if metadata['simd']:
asm_global_funcs += ''.join([' var SIMD_' + ty + '=global' + access_quote('SIMD') + access_quote(ty) + ';\n' for ty in simdtypes])
asm_global_funcs += ''.join([' var SIMD_' + ty + '_' + g + '=SIMD_' + ty + access_quote(g) + ';\n' for ty in simdinttypes for g in simdintfuncs])
asm_global_funcs += ''.join([' var SIMD_' + ty + '_' + g + '=SIMD_' + ty + access_quote(g) + ';\n' for ty in simdfloattypes for g in simdfloatfuncs])
asm_global_vars = ''.join([' var ' + g + '=env' + access_quote(g) + '|0;\n' for g in basic_vars + global_vars])
# In linkable modules, we need to add some explicit globals for global variables that can be linked and used across modules
if settings.get('MAIN_MODULE') or settings.get('SIDE_MODULE'):
assert settings.get('TARGET_ASMJS_UNKNOWN_EMSCRIPTEN'), 'TODO: support x86 target when linking modules (needs offset of 4 and not 8 here)'
for key, value in forwarded_json['Variables']['globals'].iteritems():
if value.get('linkable'):
init = forwarded_json['Variables']['indexedGlobals'][key] + 8 # 8 is Runtime.GLOBAL_BASE / STATIC_BASE
if settings.get('SIDE_MODULE'): init = '(H_BASE+' + str(init) + ')|0'
asm_global_vars += ' var %s=%s;\n' % (key, str(init))
if settings['POINTER_MASKING']:
for i in [0, 1, 2, 3]:
if settings['POINTER_MASKING_DYNAMIC']:
asm_global_vars += ' const MASK%d=env' % i + access_quote('MASK%d' % i) + '|0;\n';
basic_vars += ['MASK%d' %i]
else:
asm_global_vars += ' const MASK%d=%d;\n' % (i, (settings['TOTAL_MEMORY']-1) & (~((2**i)-1)));
# sent data
the_global = '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in fundamentals]) + ' }'
sending = '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in basic_funcs + global_funcs + basic_vars + basic_float_vars + global_vars]) + ' }'
# received
receiving = ''
if settings['ASSERTIONS']:
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are
# some support code like malloc TODO: verify that malloc is actually safe to use that way
receiving = '\n'.join(['var real_' + s + ' = asm["' + s + '"]; asm["' + s + '''"] = function() {
assert(runtimeInitialized, 'you need to wait for the runtime to be ready (e.g. wait for main() to be called)');
assert(!runtimeExited, 'the runtime was exited (use NO_EXIT_RUNTIME to keep it alive after main() exits)');
return real_''' + s + '''.apply(null, arguments);
};
''' for s in exported_implemented_functions if s not in ['_malloc', '_free', '_memcpy', '_memset']])
if not settings['SWAPPABLE_ASM_MODULE']:
receiving += ';\n'.join(['var ' + s + ' = Module["' + s + '"] = asm["' + s + '"]' for s in exported_implemented_functions + function_tables])
else:
receiving += 'Module["asm"] = asm;\n' + ';\n'.join(['var ' + s + ' = Module["' + s + '"] = function() { return Module["asm"]["' + s + '"].apply(null, arguments) }' for s in exported_implemented_functions + function_tables])
if settings['EXPORT_FUNCTION_TABLES']:
receiving += '\n'
for table in last_forwarded_json['Functions']['tables'].values():
tableName = table.split()[1]
table = table.replace('var ' + tableName, 'var ' + tableName + ' = Module["' + tableName + '"]')
receiving += table + '\n'
# finalize
if DEBUG: logging.debug('asm text sizes' + str([map(len, funcs_js), len(asm_setup), len(asm_global_vars), len(asm_global_funcs), len(pre_tables), len('\n'.join(function_tables_impls)), len(function_tables_defs.replace('\n', '\n ')), len(exports), len(the_global), len(sending), len(receiving)]))
funcs_js = ['''
%s
Module%s = %s;
Module%s = %s;
// EMSCRIPTEN_START_ASM
var asm = (function(global, env, buffer) {
%s
%s
''' % (asm_setup,
access_quote('asmGlobalArg'), the_global,
access_quote('asmLibraryArg'), sending,
"'use asm';" if not metadata.get('hasInlineJS') and not settings['SIDE_MODULE'] and settings['ASM_JS'] == 1 else "'almost asm';", '''
var HEAP8 = new global%s(buffer);
var HEAP16 = new global%s(buffer);
var HEAP32 = new global%s(buffer);
var HEAPU8 = new global%s(buffer);
var HEAPU16 = new global%s(buffer);
var HEAPU32 = new global%s(buffer);
var HEAPF32 = new global%s(buffer);
var HEAPF64 = new global%s(buffer);
''' % (access_quote('Int8Array'),
access_quote('Int16Array'),
access_quote('Int32Array'),
access_quote('Uint8Array'),
access_quote('Uint16Array'),
access_quote('Uint32Array'),
access_quote('Float32Array'),
access_quote('Float64Array')) if not settings['ALLOW_MEMORY_GROWTH'] else '''
var Int8View = global%s;
var Int16View = global%s;
var Int32View = global%s;
var Uint8View = global%s;
var Uint16View = global%s;
var Uint32View = global%s;
var Float32View = global%s;
var Float64View = global%s;
var HEAP8 = new Int8View(buffer);
var HEAP16 = new Int16View(buffer);
var HEAP32 = new Int32View(buffer);
var HEAPU8 = new Uint8View(buffer);
var HEAPU16 = new Uint16View(buffer);
var HEAPU32 = new Uint32View(buffer);
var HEAPF32 = new Float32View(buffer);
var HEAPF64 = new Float64View(buffer);
var byteLength = global.byteLength;
''' % (access_quote('Int8Array'),
access_quote('Int16Array'),
access_quote('Int32Array'),
access_quote('Uint8Array'),
access_quote('Uint16Array'),
access_quote('Uint32Array'),
access_quote('Float32Array'),
access_quote('Float64Array'))) + '\n' + asm_global_vars + ('''
var __THREW__ = 0;
var threwValue = 0;
var setjmpId = 0;
var undef = 0;
var nan = global%s, inf = global%s;
var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
''' % (access_quote('NaN'), access_quote('Infinity'))) + ''.join(['''
var tempRet%d = 0;''' % i for i in range(10)]) + '\n' + asm_global_funcs] + \
[' var tempFloat = %s;\n' % ('Math_fround(0)' if provide_fround else '0.0')] + \
[' var asyncState = 0;\n' if settings.get('EMTERPRETIFY_ASYNC') else ''] + \
([' const f0 = Math_fround(0);\n'] if provide_fround else []) + \
['' if not settings['ALLOW_MEMORY_GROWTH'] else '''
function _emscripten_replace_memory(newBuffer) {
if ((byteLength(newBuffer) & 0xffffff || byteLength(newBuffer) <= 0xffffff) || byteLength(newBuffer) > 0x80000000) return false;
HEAP8 = new Int8View(newBuffer);
HEAP16 = new Int16View(newBuffer);
HEAP32 = new Int32View(newBuffer);
HEAPU8 = new Uint8View(newBuffer);
HEAPU16 = new Uint16View(newBuffer);
HEAPU32 = new Uint32View(newBuffer);
HEAPF32 = new Float32View(newBuffer);
HEAPF64 = new Float64View(newBuffer);
buffer = newBuffer;
return true;
}
'''] + \
['' if not settings['POINTER_MASKING'] or settings['POINTER_MASKING_DYNAMIC'] else '''
function _declare_heap_length() {
return HEAP8[%s] | 0;
}
''' % (settings['TOTAL_MEMORY'] + settings['POINTER_MASKING_OVERFLOW'] - 1)] + ['''
// EMSCRIPTEN_START_FUNCS
function stackAlloc(size) {
size = size|0;
var ret = 0;
ret = STACKTOP;
STACKTOP = (STACKTOP + size)|0;
''' + ('STACKTOP = (STACKTOP + 3)&-4;' if settings['TARGET_X86'] else 'STACKTOP = (STACKTOP + 15)&-16;\n') +
('if ((STACKTOP|0) >= (STACK_MAX|0)) abort();\n' if settings['ASSERTIONS'] else '') + '''
return ret|0;
}
function stackSave() {
return STACKTOP|0;
}
function stackRestore(top) {
top = top|0;
STACKTOP = top;
}
''' + ('''
function setAsync() {
___async = 1;
}''' if need_asyncify else '') + ('''
function emterpret(pc) { // this will be replaced when the emterpreter code is generated; adding it here allows validation until then
pc = pc | 0;
assert(0);
}
''' if settings['EMTERPRETIFY'] else '') + ('''
function setAsyncState(x) {
x = x | 0;
asyncState = x;
}
function emtStackSave() {
return EMTSTACKTOP|0;
}
''' if settings['EMTERPRETIFY_ASYNC'] else '') + '''
function setThrew(threw, value) {
threw = threw|0;
value = value|0;
if ((__THREW__|0) == 0) {
__THREW__ = threw;
threwValue = value;
}
}
function copyTempFloat(ptr) {
ptr = ptr|0;
HEAP8[tempDoublePtr>>0] = HEAP8[ptr>>0];
HEAP8[tempDoublePtr+1>>0] = HEAP8[ptr+1>>0];
HEAP8[tempDoublePtr+2>>0] = HEAP8[ptr+2>>0];
HEAP8[tempDoublePtr+3>>0] = HEAP8[ptr+3>>0];
}
function copyTempDouble(ptr) {
ptr = ptr|0;
HEAP8[tempDoublePtr>>0] = HEAP8[ptr>>0];
HEAP8[tempDoublePtr+1>>0] = HEAP8[ptr+1>>0];
HEAP8[tempDoublePtr+2>>0] = HEAP8[ptr+2>>0];
HEAP8[tempDoublePtr+3>>0] = HEAP8[ptr+3>>0];
HEAP8[tempDoublePtr+4>>0] = HEAP8[ptr+4>>0];
HEAP8[tempDoublePtr+5>>0] = HEAP8[ptr+5>>0];
HEAP8[tempDoublePtr+6>>0] = HEAP8[ptr+6>>0];
HEAP8[tempDoublePtr+7>>0] = HEAP8[ptr+7>>0];
}
function setTempRet0(value) {
value = value|0;
tempRet0 = value;
}
function getTempRet0() {
return tempRet0|0;
}
'''] + funcs_js + ['''
%s
return %s;
})
// EMSCRIPTEN_END_ASM
(%s, %s, buffer);
%s;
''' % (pre_tables + '\n'.join(function_tables_impls) + '\n' + function_tables_defs, exports,
'Module' + access_quote('asmGlobalArg'),
'Module' + access_quote('asmLibraryArg'),
receiving)]
if not settings.get('SIDE_MODULE'):
funcs_js.append('''
Runtime.stackAlloc = asm['stackAlloc'];
Runtime.stackSave = asm['stackSave'];
Runtime.stackRestore = asm['stackRestore'];
Runtime.setTempRet0 = asm['setTempRet0'];
Runtime.getTempRet0 = asm['getTempRet0'];
''')
# Set function table masks
masks = {}
max_mask = 0
for sig, table in last_forwarded_json['Functions']['tables'].iteritems():
mask = table.count(',')
masks[sig] = str(mask)
max_mask = max(mask, max_mask)
def function_table_maskize(js, masks):
def fix(m):
sig = m.groups(0)[0]
return masks[sig]
return re.sub(r'{{{ FTM_([\w\d_$]+) }}}', lambda m: fix(m), js) # masks[m.groups(0)[0]]
funcs_js = map(lambda js: function_table_maskize(js, masks), funcs_js)
if settings.get('DLOPEN_SUPPORT'):
funcs_js.append('''
asm.maxFunctionIndex = %(max_mask)d;
DLFCN.registerFunctions(asm, %(max_mask)d+1, %(sigs)s, Module);
Module.SYMBOL_TABLE = SYMBOL_TABLE;
''' % { 'max_mask': max_mask, 'sigs': str(map(str, last_forwarded_json['Functions']['tables'].keys())) })
# Create symbol table for self-dlopen
if settings.get('DLOPEN_SUPPORT'):
symbol_table = {}
for k, v in forwarded_json['Variables']['indexedGlobals'].iteritems():
if forwarded_json['Variables']['globals'][k]['named']:
symbol_table[k] = str(v + forwarded_json['Runtime']['GLOBAL_BASE'])
for raw in last_forwarded_json['Functions']['tables'].itervalues():
if raw == '': continue
table = map(string.strip, raw[raw.find('[')+1:raw.find(']')].split(","))
for i in range(len(table)):
value = table[i]
if value != '0':
if settings.get('SIDE_MODULE'):
symbol_table[value] = 'FUNCTION_TABLE_OFFSET+' + str(i)
else:
symbol_table[value] = str(i)
outfile.write("var SYMBOL_TABLE = %s;" % json.dumps(symbol_table).replace('"', ''))
for i in range(len(funcs_js)): # do this loop carefully to save memory
if WINDOWS: funcs_js[i] = funcs_js[i].replace('\r\n', '\n') # Normalize to UNIX line endings, otherwise writing to text file will duplicate \r\n to \r\r\n!
outfile.write(funcs_js[i])
funcs_js = None
if WINDOWS: post = post.replace('\r\n', '\n') # Normalize to UNIX line endings, otherwise writing to text file will duplicate \r\n to \r\r\n!
outfile.write(post)
outfile.close()
if DEBUG: logging.debug(' emscript: final python processing took %s seconds' % (time.time() - t))
success = True
finally:
if not success:
outfile.close()
shared.try_delete(outfile.name) # remove partial output
if os.environ.get('EMCC_FAST_COMPILER') != '0':
emscript = emscript_fast
else:
logging.critical('Non-fastcomp compiler is no longer available, please use fastcomp or an older version of emscripten')
sys.exit(1)
def main(args, compiler_engine, cache, jcache, relooper, temp_files, DEBUG, DEBUG_CACHE):
# Prepare settings for serialization to JSON.
settings = {}
for setting in args.settings:
name, value = setting.strip().split('=', 1)
settings[name] = json.loads(value)
# libraries
libraries = args.libraries[0].split(',') if len(args.libraries) > 0 else []
# Compile the assembly to Javascript.
if settings.get('RELOOP'):
if not relooper:
relooper = settings.get('RELOOPER')
if not relooper:
relooper = cache.get_path('relooper.js')
settings.setdefault('RELOOPER', relooper)
if not os.path.exists(relooper):
shared.Building.ensure_relooper(relooper)
settings.setdefault('STRUCT_INFO', cache.get_path('struct_info.compiled.json'))
struct_info = settings.get('STRUCT_INFO')
if not os.path.exists(struct_info) and not settings.get('BOOTSTRAPPING_STRUCT_INFO'):
if DEBUG: logging.debug(' emscript: bootstrapping struct info...')
shared.Building.ensure_struct_info(struct_info)
if DEBUG: logging.debug(' emscript: bootstrapping struct info complete')
emscript(args.infile, settings, args.outfile, libraries, compiler_engine=compiler_engine,
jcache=jcache, temp_files=temp_files, DEBUG=DEBUG, DEBUG_CACHE=DEBUG_CACHE)
def _main(environ):
response_file = True
while response_file:
response_file = None
for index in range(1, len(sys.argv)):
if sys.argv[index][0] == '@':
# found one, loop again next time
response_file = True
response_file_args = read_response_file(sys.argv[index])
# slice in extra_args in place of the response file arg
sys.argv[index:index+1] = response_file_args
break
parser = optparse.OptionParser(
usage='usage: %prog [-h] [-H HEADERS] [-o OUTFILE] [-c COMPILER_ENGINE] [-s FOO=BAR]* infile',
description=('You should normally never use this! Use emcc instead. '
'This is a wrapper around the JS compiler, converting .ll to .js.'),
epilog='')
parser.add_option('-H', '--headers',
default=[],
action='append',
help='System headers (comma separated) whose #defines should be exposed to the compiled code.')
parser.add_option('-L', '--libraries',
default=[],
action='append',
help='Library files (comma separated) to use in addition to those in emscripten src/library_*.')
parser.add_option('-o', '--outfile',
default=sys.stdout,
help='Where to write the output; defaults to stdout.')
parser.add_option('-c', '--compiler',
default=None,
help='Which JS engine to use to run the compiler; defaults to the one in ~/.emscripten.')
parser.add_option('--relooper',
default=None,
help='Which relooper file to use if RELOOP is enabled.')
parser.add_option('-s', '--setting',
dest='settings',
default=[],
action='append',
metavar='FOO=BAR',
help=('Overrides for settings defined in settings.js. '
'May occur multiple times.'))
parser.add_option('-j', '--jcache',
action='store_true',
default=False,
help=('Enable jcache (ccache-like caching of compilation results, for faster incremental builds).'))
parser.add_option('-T', '--temp-dir',
default=None,
help=('Where to create temporary files.'))
parser.add_option('-v', '--verbose',
action='store_true',
dest='verbose',
help='Displays debug output')
parser.add_option('-q', '--quiet',
action='store_false',
dest='verbose',
help='Hides debug output')
parser.add_option('--suppressUsageWarning',
action='store_true',
default=environ.get('EMSCRIPTEN_SUPPRESS_USAGE_WARNING'),
help=('Suppress usage warning'))
# Convert to the same format that argparse would have produced.
keywords, positional = parser.parse_args()
if not keywords.suppressUsageWarning:
logging.warning('''
==============================================================
WARNING: You should normally never use this! Use emcc instead.
==============================================================
''')
if len(positional) != 1:
raise RuntimeError('Must provide exactly one positional argument. Got ' + str(len(positional)) + ': "' + '", "'.join(positional) + '"')
keywords.infile = os.path.abspath(positional[0])
if isinstance(keywords.outfile, basestring):
keywords.outfile = open(keywords.outfile, 'w')
if keywords.relooper:
relooper = os.path.abspath(keywords.relooper)
else:
relooper = None # use the cache
if keywords.temp_dir is None:
temp_files = get_configuration().get_temp_files()
temp_dir = get_configuration().TEMP_DIR
else:
temp_dir = os.path.abspath(keywords.temp_dir)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
temp_files = tempfiles.TempFiles(temp_dir)
if keywords.compiler is None:
keywords.compiler = shared.COMPILER_ENGINE
if keywords.verbose is None:
DEBUG = get_configuration().DEBUG
DEBUG_CACHE = get_configuration().DEBUG_CACHE
else:
DEBUG = keywords.verbose
DEBUG_CACHE = keywords.verbose
cache = cache_module.Cache()
temp_files.run_and_clean(lambda: main(
keywords,
compiler_engine=keywords.compiler,
cache=cache,
jcache=cache_module.JCache(cache) if keywords.jcache else None,
relooper=relooper,
temp_files=temp_files,
DEBUG=DEBUG,
DEBUG_CACHE=DEBUG_CACHE,
))
if __name__ == '__main__':
_main(environ=os.environ)
| {
"content_hash": "203f2b339292da6cc3d194dc2158b54b",
"timestamp": "",
"source": "github",
"line_count": 1691,
"max_line_length": 338,
"avg_line_length": 44.51271437019515,
"alnum_prop": 0.6162798421702913,
"repo_name": "PopCap/GameIdea",
"id": "39f301b0acfb1015281aa0288d09cd964d941193",
"size": "75271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Engine/Source/ThirdParty/HTML5/emsdk/emscripten/1.30.0/emscripten.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "238055"
},
{
"name": "Assembly",
"bytes": "184134"
},
{
"name": "Batchfile",
"bytes": "116983"
},
{
"name": "C",
"bytes": "84264210"
},
{
"name": "C#",
"bytes": "9612596"
},
{
"name": "C++",
"bytes": "242290999"
},
{
"name": "CMake",
"bytes": "548754"
},
{
"name": "CSS",
"bytes": "134910"
},
{
"name": "GLSL",
"bytes": "96780"
},
{
"name": "HLSL",
"bytes": "124014"
},
{
"name": "HTML",
"bytes": "4097051"
},
{
"name": "Java",
"bytes": "757767"
},
{
"name": "JavaScript",
"bytes": "2742822"
},
{
"name": "Makefile",
"bytes": "1976144"
},
{
"name": "Objective-C",
"bytes": "75778979"
},
{
"name": "Objective-C++",
"bytes": "312592"
},
{
"name": "PAWN",
"bytes": "2029"
},
{
"name": "PHP",
"bytes": "10309"
},
{
"name": "PLSQL",
"bytes": "130426"
},
{
"name": "Pascal",
"bytes": "23662"
},
{
"name": "Perl",
"bytes": "218656"
},
{
"name": "Python",
"bytes": "21593012"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "2889614"
},
{
"name": "Tcl",
"bytes": "1452"
}
],
"symlink_target": ""
} |
from api.models import * # noqa
from rest_framework import viewsets
from api.serializers import * # noqa
from rest_framework import filters
from rest_pandas import PandasViewSet
import django_filters
class ProgramViewSet(viewsets.ModelViewSet):
queryset = Program.objects.all()
serializer_class = ProgramSerializer
class CampaignFilter(django_filters.FilterSet):
program = django_filters.CharFilter(name="program__program_id")
class Meta:
model = Campaign
fields = ['program']
class CampaignViewSet(viewsets.ModelViewSet):
queryset = Campaign.objects.all()
serializer_class = CampaignSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = CampaignFilter
class TacticFilter(django_filters.FilterSet):
campaign = django_filters.CharFilter(name="campaign__campaign_id")
class Meta:
model = Tactic
fields = ['campaign']
class TacticViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Tactics to be viewed or edited.
"""
queryset = Tactic.objects.all()
serializer_class = TacticSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = TacticFilter
class MediumViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Mediums to be viewed or edited.
"""
queryset = Medium.objects.all()
serializer_class = MediumSerializer
class SourceViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Sources to be viewed or edited.
"""
queryset = Source.objects.all()
serializer_class = SourceSerializer
class CreativeViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Content to be viewed or edited.
"""
queryset = Creative.objects.all()
serializer_class = CreativeSerializer
class LOBViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Line of Business to be viewed or edited.
"""
queryset = LOB.objects.all()
serializer_class = LOBSerializer
class IntentViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Content to be viewed or edited.
"""
queryset = Intent.objects.all()
serializer_class = IntentSerializer
class LifeCycleViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Content to be viewed or edited.
"""
queryset = LifeCycle.objects.all()
serializer_class = LifeCycleSerializer
class AudienceViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Content to be viewed or edited.
"""
queryset = Audience.objects.all()
serializer_class = AudienceSerializer
class LOB_xref_ViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Line of Business to be viewed or edited.
"""
queryset = LOB_xref.objects.all()
serializer_class = LOB_xref_Serializer
class Intent_xref_ViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Content to be viewed or edited.
"""
queryset = Intent_xref.objects.all()
serializer_class = Intent_xref_Serializer
class LifeCycle_xref_ViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Content to be viewed or edited.
"""
queryset = Lifecycle_xref.objects.all()
serializer_class = LifeCycle_xref_Serializer
class Audience_xref_ViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Content to be viewed or edited.
"""
queryset = Audience_xref.objects.all()
serializer_class = Audience_xref_Serializer
class Ad_Network_ViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Content to be viewed or edited.
"""
queryset = Ad_Network.objects.all()
serializer_class = Ad_Network_Serializer
class PlacementViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Placements (with Foreign Keys)
to be viewed or edited.
"""
queryset = Placement.objects.all()
serializer_class = PlacementSerializer
class ExportCSVPlacementViewSet(PandasViewSet):
model = Placement
queryset = Placement.objects.all()
serializer_class = PlacementCSVExportSerializer
class ExportJSONPlacementViewSet(viewsets.ModelViewSet):
queryset = Placement.objects.all()
serializer_class = PlacementJSONExportSerializer
| {
"content_hash": "40bd3d9ffa1d3626ab38b4fa0e8fe80b",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 70,
"avg_line_length": 26.61875,
"alnum_prop": 0.7161305470767786,
"repo_name": "Seshra/holocron-api",
"id": "9c588a11359f50482b8c5a47b37c5169c201baf8",
"size": "4259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holocron_api/api/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "102751"
},
{
"name": "Shell",
"bytes": "429"
}
],
"symlink_target": ""
} |
import line_patterns, composite_patterns, diamond_patterns, grid_patterns
import copy
def create(d):
if 'topology' not in d:
raise Exception('topology is missing in pattern.create')
topology = d['topology']
if 'composite' in d:
c = d['composite']
dcopy = copy.copy(d)
del dcopy['composite']
return composite_patterns.PatternComposite(c, create, dcopy)
if topology == 'line':
if 'type' not in d:
raise Exception('type is missing in pattern.create')
if 'N' not in d:
raise Exception('N is missing in pattern.create')
if d['type'] == 'uniform':
return line_patterns.Uniform(d['N'])
if d['type'] == 'uniform_init':
return line_patterns.UniformWithInitializations(d['N'])
if d['type'] == 'uniform_rate':
if 'rate' not in d:
raise Exception('rate is missing in pattern.create')
return line_patterns.UniformRate(d['N'], d['rate'])
if d['type'] == 'burst':
if 'p_n2b' not in d or 'p_b2n' not in d:
raise Exception('p_n2b or p_b2n are missing in pattern.create')
if 'rate' in d: rate = d['rate']
else: rate = 1
return line_patterns.BurstyRate(d['N'], rate, d['p_n2b'], d['p_b2n'])
if d['type'] == 'split':
if 'num_splits' not in d:
raise Exception('num_splits is missing in pattern.create')
if 'rate' in d: rate = d['rate']
else: rate = 1
return line_patterns.MultiSplitPathRate(d['N'], d['num_splits'], rate)
elif topology == 'diamond':
if 'type' not in d:
raise Exception('type is missing in pattern.create')
if 'N' not in d:
raise Exception('N is missing in pattern.create')
if 'k' not in d:
raise Exception('k is missing in pattern.create')
dashed = 'dashed' in d and d['dashed']
rate = 1 if 'rate' not in d else d['rate']
if d['type'] == 'uniform_src_poisson_rate':
return diamond_patterns.UniformPoissonRate(d['N'], d['k'], dashed, rate)
elif topology == 'grid':
if 'type' not in d:
raise Exception('type is missing in pattern.create')
if 'N' not in d or 'rate' not in d:
raise Exception('N or rate are missing in pattern.create')
if d['type'] == 'random_one_bent':
return grid_patterns.RandomNodesOneBentRoute(d['N'], d['rate'])
raise Exception('Unknwon pattern for {}'.format(d)) | {
"content_hash": "0cc35d6c769ecdaf7ff8a2601cda94fa",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 84,
"avg_line_length": 35.04054054054054,
"alnum_prop": 0.5599691477053605,
"repo_name": "efectivo/network_sim",
"id": "1207d0f156615ccd8e42b719cc7b1a262a0d65e4",
"size": "2593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "patterns/pattern_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7993"
},
{
"name": "Python",
"bytes": "61634"
}
],
"symlink_target": ""
} |
import os
import sys
DBG_OUTPUT_FILE="Output/" + sys.argv[1] + ".dbg.out"
OPT_DBG_OUTPUT_FILE="Output/" + sys.argv[1] + ".dbg.opt.out"
LOG_FILE="Output/" + sys.argv[1] + ".log"
NATIVE_DBG_OUTPUT_FILE="Output/" + sys.argv[1] + ".native.dbg.out"
NATIVE_OPT_DBG_OUTPUT_FILE="Output/" + sys.argv[1] + ".native.dbg.opt.out"
NATIVE_LOG_FILE="Output/" + sys.argv[1] + ".native.log"
REPORT_FILE="Output/" + sys.argv[1] + ".dbg.report.txt"
class BreakPoint:
def __init__(self, bp_name):
self.name = bp_name
self.values = {}
self.missing_args = []
self.matching_args = []
self.notmatching_args = []
self.missing_bp = False
def setMissing(self):
self.missing_bp = True
def getArgCount(self):
return len(self.values)
def getMissingArgCount(self):
if self.missing_bp == True:
return len(self.values)
return len(self.missing_args)
def getMatchingArgCount(self):
if self.missing_bp == True:
return 0
return len(self.matching_args)
def getNotMatchingArgCount(self):
if self.missing_bp == True:
return 0
return len(self.notmatching_args)
def recordArgument(self, arg_name, value):
self.values[arg_name] = value
def __repr__(self):
print self.name
items = self.values.items()
for i in range(len(items)):
print items[i][0]," = ",items[i][1]
return ''
def compare_args(self, other, file):
myitems = self.values.items()
otheritems = other.values.items()
match = False
for i in range(len(myitems)):
if i >= len(otheritems):
match = True
self.missing_args.append(myitems[i][0])
elif cmp(myitems[i][1], otheritems[i][1]):
match = True
self.notmatching_args.append(myitems[i][0])
else:
self.matching_args.append(myitems[i][0])
self.print_list(self.matching_args, " Matching arguments ", file)
self.print_list(self.notmatching_args, " Not Matching arguments ", file)
self.print_list(self.missing_args, " Missing arguments ", file)
return match
def print_list(self, items, txt, pfile):
if len(items) == 0:
return
pfile.write(self.name)
pfile.write(txt)
for e in items:
pfile.write(e)
pfile.write(' ')
pfile.write('\n')
def read_input(filename, dict):
f = open(filename, "r")
lines = f.readlines()
for l in range(len(lines)):
c = lines[l].split()
if c[0] == "#Breakpoint":
bp = dict.get(c[2])
if bp is None:
bp = BreakPoint(c[1])
dict[c[2]] = bp
if c[0] == "#Argument":
bp = dict.get(c[2])
if bp is None:
bp = BreakPoint(c[1])
dict[c[2]] = bp
bp.recordArgument(c[3], c[4])
return
f1_breakpoints = {}
read_input(DBG_OUTPUT_FILE, f1_breakpoints)
f1_items = f1_breakpoints.items()
f2_breakpoints = {}
read_input(OPT_DBG_OUTPUT_FILE, f2_breakpoints)
f2_items = f2_breakpoints.items()
f = open(LOG_FILE, "w")
f.write("Log output\n")
for f2bp in range(len(f2_items)):
id = f2_items[f2bp][0]
bp = f2_items[f2bp][1]
bp1 = f1_breakpoints.get(id)
if bp1 is None:
bp.setMissing()
else:
bp1.compare_args(bp,f)
f.close()
nf1_breakpoints = {}
read_input(NATIVE_DBG_OUTPUT_FILE, nf1_breakpoints)
nf1_items = nf1_breakpoints.items()
nf2_breakpoints = {}
read_input(NATIVE_OPT_DBG_OUTPUT_FILE, nf2_breakpoints)
nf2_items = nf2_breakpoints.items()
nfl = open(NATIVE_LOG_FILE, "w")
for nf2bp in range(len(nf2_items)):
id = nf2_items[nf2bp][0]
bp = nf2_items[nf2bp][1]
bp1 = nf1_breakpoints.get(id)
if bp1 is None:
bp.setMissing()
else:
bp1.compare_args(bp,nfl)
nfl.close()
f1_arg_count = 0
f1_matching_arg_count = 0
f1_notmatching_arg_count = 0
f1_missing_arg_count = 0
for idx in range(len(f1_items)):
bp = f1_items[idx][1]
f1_arg_count = f1_arg_count + bp.getArgCount()
f1_matching_arg_count = f1_matching_arg_count + bp.getMatchingArgCount()
f1_notmatching_arg_count = f1_notmatching_arg_count + bp.getNotMatchingArgCount()
f1_missing_arg_count = f1_missing_arg_count + bp.getMissingArgCount()
nf1_arg_count = 0
nf1_matching_arg_count = 0
nf1_notmatching_arg_count = 0
nf1_missing_arg_count = 0
for idx in range(len(nf1_items)):
bp = nf1_items[idx][1]
nf1_arg_count = nf1_arg_count + bp.getArgCount()
nf1_matching_arg_count = nf1_matching_arg_count + bp.getMatchingArgCount()
nf1_notmatching_arg_count = nf1_notmatching_arg_count + bp.getNotMatchingArgCount()
nf1_missing_arg_count = nf1_missing_arg_count + bp.getMissingArgCount()
rf = open(REPORT_FILE, "w")
rf.write("---------------------------------------------------------------\n");
rf.write(">>> ========= '")
rf.write(sys.argv[1])
rf.write("'")
rf.write(" Program\n")
rf.write("---------------------------------------------------------------\n\n");
rf.write("GCC Total Arguments: ")
rf.write(str(nf1_arg_count))
rf.write("\n")
rf.write("GCC Matching Arguments: ")
rf.write(str(nf1_matching_arg_count))
rf.write("\n")
rf.write("GCC Not Matching Arguments: ")
rf.write(str(nf1_notmatching_arg_count))
rf.write("\n")
rf.write("GCC Missing Arguments: ")
rf.write(str(nf1_missing_arg_count))
rf.write("\n")
rf.write("LLVM Total Arguments: ")
rf.write(str(f1_arg_count))
rf.write("\n")
rf.write("LLVM Matching Arguments: ")
rf.write(str(f1_matching_arg_count))
rf.write("\n")
rf.write("LLVM Not Matching Arguments: ")
rf.write(str(f1_notmatching_arg_count))
rf.write("\n")
rf.write("LLVM Missing Arguments: ")
rf.write(str(f1_missing_arg_count))
rf.write("\n")
rf.close()
| {
"content_hash": "d7202a0f67fdbac0f913c5099ba4e2a0",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 87,
"avg_line_length": 30.837696335078533,
"alnum_prop": 0.5932088285229202,
"repo_name": "ensemblr/llvm-project-boilerplate",
"id": "be6fa2368d8b89b3d76c8a505f82f680af74e026",
"size": "5909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "include/llvm/projects/test-suite/CompareDebugInfo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "32"
},
{
"name": "AppleScript",
"bytes": "1429"
},
{
"name": "Assembly",
"bytes": "15649629"
},
{
"name": "Awk",
"bytes": "1747037"
},
{
"name": "Batchfile",
"bytes": "34481"
},
{
"name": "Brainfuck",
"bytes": "284"
},
{
"name": "C",
"bytes": "85584624"
},
{
"name": "C#",
"bytes": "20737"
},
{
"name": "C++",
"bytes": "168418524"
},
{
"name": "CMake",
"bytes": "1174816"
},
{
"name": "CSS",
"bytes": "49900"
},
{
"name": "Cuda",
"bytes": "414703"
},
{
"name": "Emacs Lisp",
"bytes": "110018"
},
{
"name": "Forth",
"bytes": "1490"
},
{
"name": "Fortran",
"bytes": "356707"
},
{
"name": "GAP",
"bytes": "6167"
},
{
"name": "Go",
"bytes": "132137"
},
{
"name": "HTML",
"bytes": "1751124"
},
{
"name": "JavaScript",
"bytes": "141512"
},
{
"name": "LLVM",
"bytes": "62219250"
},
{
"name": "Limbo",
"bytes": "7437"
},
{
"name": "Logos",
"bytes": "1572537943"
},
{
"name": "Lua",
"bytes": "86606"
},
{
"name": "M",
"bytes": "2008"
},
{
"name": "M4",
"bytes": "109560"
},
{
"name": "Makefile",
"bytes": "616437"
},
{
"name": "Mathematica",
"bytes": "7845"
},
{
"name": "Matlab",
"bytes": "53817"
},
{
"name": "Mercury",
"bytes": "1194"
},
{
"name": "Mirah",
"bytes": "1079943"
},
{
"name": "OCaml",
"bytes": "407143"
},
{
"name": "Objective-C",
"bytes": "5910944"
},
{
"name": "Objective-C++",
"bytes": "1720450"
},
{
"name": "OpenEdge ABL",
"bytes": "690534"
},
{
"name": "PHP",
"bytes": "15986"
},
{
"name": "POV-Ray SDL",
"bytes": "19471"
},
{
"name": "Perl",
"bytes": "591927"
},
{
"name": "PostScript",
"bytes": "845774"
},
{
"name": "Protocol Buffer",
"bytes": "20013"
},
{
"name": "Python",
"bytes": "1895427"
},
{
"name": "QMake",
"bytes": "15580"
},
{
"name": "RenderScript",
"bytes": "741"
},
{
"name": "Roff",
"bytes": "94555"
},
{
"name": "Rust",
"bytes": "200"
},
{
"name": "Scheme",
"bytes": "2654"
},
{
"name": "Shell",
"bytes": "1144090"
},
{
"name": "Smalltalk",
"bytes": "144607"
},
{
"name": "SourcePawn",
"bytes": "1544"
},
{
"name": "Standard ML",
"bytes": "2841"
},
{
"name": "Tcl",
"bytes": "8285"
},
{
"name": "TeX",
"bytes": "320484"
},
{
"name": "Vim script",
"bytes": "17239"
},
{
"name": "Yacc",
"bytes": "163484"
}
],
"symlink_target": ""
} |
from __future__ import division
"""Defines a Client class that can be used to communicate with the
Server class defined in server.py.
The client will connect to the port specified in the environment
variable PYSYN_PORT. If this variable is not set, it will use
the default_port imported from server.py.
"""
import socket
import threading
import SocketServer
import os
from server import default_port
class Client(threading.Thread):
def __init__(self, url, line):
self._url = url
self._line = line
self._port = int(os.environ.get('PYSYN_PORT',default_port))
print "Client will use url, port (%s,%s)"%(self._url,self._port)
threading.Thread.__init__(self)
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self._url,self._port))
hr= "\n==============================================\n"
print hr,"Client connected to server. "
sock.sendall(self._line)
print "Client sent: ", self._line
self._response = sock.recv(8192)
print "%s Client sent: %s \n Client received: %s"%(hr,self._line,
self._response)
sock.close()
| {
"content_hash": "7bafa90253327e427a962c12655591d0",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 32.717948717948715,
"alnum_prop": 0.5783699059561128,
"repo_name": "martindurant/starclassifier",
"id": "5fc9783ef065492f1b023d8573ddf4c9ba5480b7",
"size": "1276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/pysynphot/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "IDL",
"bytes": "2688"
},
{
"name": "Python",
"bytes": "236927"
}
],
"symlink_target": ""
} |
from abc import ABCMeta
from numbers import Integral
import numpy as np
import sqlite3
from sqlite3 import Row
import warnings
from logbook import Logger
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import with_metaclass, string_types
from zipline.errors import (
ConsumeAssetMetaDataError,
InvalidAssetType,
MultipleSymbolsFound,
RootSymbolNotFound,
SidAssignmentError,
SidNotFound,
SymbolNotFound,
MapAssetIdentifierIndexError,
)
from zipline.assets._assets import (
Asset, Equity, Future
)
log = Logger('assets.py')
# Expected fields for an Asset's metadata
ASSET_FIELDS = [
'sid',
'asset_type',
'symbol',
'root_symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
'notice_date',
'expiration_date',
'contract_multiplier',
# The following fields are for compatibility with other systems
'file_name', # Used as symbol
'company_name', # Used as asset_name
'start_date_nano', # Used as start_date
'end_date_nano', # Used as end_date
]
# Expected fields for an Asset's metadata
ASSET_TABLE_FIELDS = [
'sid',
'symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
]
# Expected fields for an Asset's metadata
FUTURE_TABLE_FIELDS = ASSET_TABLE_FIELDS + [
'root_symbol',
'notice_date',
'expiration_date',
'contract_multiplier',
]
EQUITY_TABLE_FIELDS = ASSET_TABLE_FIELDS
# Create the query once from the fields, so that the join is not done
# repeatedly.
FUTURE_BY_SID_QUERY = 'select {0} from futures where sid=?'.format(
", ".join(FUTURE_TABLE_FIELDS))
EQUITY_BY_SID_QUERY = 'select {0} from equities where sid=?'.format(
", ".join(EQUITY_TABLE_FIELDS))
class AssetFinder(object):
def __init__(self,
metadata=None,
allow_sid_assignment=True,
fuzzy_char=None,
db_path=':memory:',
create_table=True):
self.fuzzy_char = fuzzy_char
# This flag controls if the AssetFinder is allowed to generate its own
# sids. If False, metadata that does not contain a sid will raise an
# exception when building assets.
self.allow_sid_assignment = allow_sid_assignment
if allow_sid_assignment:
self.end_date_to_assign = normalize_date(
pd.Timestamp('now', tz='UTC'))
self.conn = sqlite3.connect(db_path)
self.conn.text_factory = str
self.cursor = self.conn.cursor()
# The AssetFinder also holds a nested-dict of all metadata for
# reference when building Assets
self.metadata_cache = {}
# Create table and read in metadata.
# Should we use flags like 'r', 'w', instead?
# What we need to support is:
# - A 'throwaway' mode where the metadata is read each run.
# - A 'write' mode where the data is written to the provided db_path
# - A 'read' mode where the asset finder uses a prexisting db.
if create_table:
self.create_db_tables()
if metadata is not None:
self.consume_metadata(metadata)
# Cache for lookup of assets by sid, the objects in the asset lookp may
# be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset, _retrieve_equity etc. will populate the cache on
# first retrieval.
self._asset_cache = {}
self._equity_cache = {}
self._future_cache = {}
self._asset_type_cache = {}
def create_db_tables(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE equities(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
fuzzy text
)""")
c.execute('CREATE INDEX equities_sid on equities(sid)')
c.execute('CREATE INDEX equities_symbol on equities(symbol)')
c.execute('CREATE INDEX equities_fuzzy on equities(fuzzy)')
c.execute("""
CREATE TABLE futures(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
root_symbol text,
notice_date integer,
expiration_date integer,
contract_multiplier real
)""")
c.execute('CREATE INDEX futures_sid on futures(sid)')
c.execute('CREATE INDEX futures_root_symbol on equities(symbol)')
c.execute("""
CREATE TABLE asset_router
(sid integer,
asset_type text)
""")
c.execute('CREATE INDEX asset_router_sid on asset_router(sid)')
self.conn.commit()
def asset_type_by_sid(self, sid):
try:
return self._asset_type_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
# Python 3 compatibility required forcing to int for sid = 0.
t = (int(sid),)
query = 'select asset_type from asset_router where sid=:sid'
c.execute(query, t)
data = c.fetchone()
if data is None:
return
asset_type = data[0]
self._asset_type_cache[sid] = asset_type
return asset_type
def retrieve_asset(self, sid, default_none=False):
if isinstance(sid, Asset):
return sid
try:
asset = self._asset_cache[sid]
except KeyError:
asset_type = self.asset_type_by_sid(sid)
if asset_type == 'equity':
asset = self._retrieve_equity(sid)
elif asset_type == 'future':
asset = self._retrieve_futures_contract(sid)
else:
asset = None
self._asset_cache[sid] = asset
if asset is not None:
return asset
elif default_none:
return None
else:
raise SidNotFound(sid=sid)
def _retrieve_equity(self, sid):
try:
return self._equity_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
c.row_factory = Row
t = (int(sid),)
c.execute(EQUITY_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
equity = Equity(**data)
else:
equity = None
self._equity_cache[sid] = equity
return equity
def _retrieve_futures_contract(self, sid):
try:
return self._future_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
t = (int(sid),)
c.row_factory = Row
c.execute(FUTURE_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
if data['notice_date']:
data['notice_date'] = pd.Timestamp(
data['notice_date'], tz='UTC')
if data['expiration_date']:
data['expiration_date'] = pd.Timestamp(
data['expiration_date'], tz='UTC')
future = Future(**data)
else:
future = None
self._future_cache[sid] = future
return future
def lookup_symbol_resolve_multiple(self, symbol, as_of_date=None):
"""
Return matching Asset of name symbol in database.
If multiple Assets are found and as_of_date is not set,
raises MultipleSymbolsFound.
If no Asset was active at as_of_date, and allow_expired is False
raises SymbolNotFound.
"""
if as_of_date is not None:
as_of_date = pd.Timestamp(normalize_date(as_of_date))
c = self.conn.cursor()
if as_of_date:
# If one SID exists for symbol, return that symbol
t = (symbol, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If no SID exists for symbol, return SID with the
# highest-but-not-over end_date
if len(candidates) == 0:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"order by end_date desc "
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc " +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
raise SymbolNotFound(symbol=symbol)
else:
t = (symbol,)
query = ("select sid from equities where symbol=?")
c.execute(query, t)
data = c.fetchall()
if len(data) == 1:
return self._retrieve_equity(data[0][0])
elif not data:
raise SymbolNotFound(symbol=symbol)
else:
options = []
for row in data:
sid = row[0]
asset = self._retrieve_equity(sid)
options.append(asset)
raise MultipleSymbolsFound(symbol=symbol,
options=options)
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""
If a fuzzy string is provided, then we try various symbols based on
the provided symbol. This is to facilitate mapping from a broker's
symbol to ours in cases where mapping to the broker's symbol loses
information. For example, if we have CMCS_A, but a broker has CMCSA,
when the broker provides CMCSA, it can also provide fuzzy='_',
so we can find a match by inserting an underscore.
"""
symbol = symbol.upper()
as_of_date = normalize_date(as_of_date)
if not fuzzy:
try:
return self.lookup_symbol_resolve_multiple(symbol, as_of_date)
except SymbolNotFound:
return None
else:
c = self.conn.cursor()
fuzzy = symbol.replace(self.fuzzy_char, '')
t = (fuzzy, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where fuzzy=? " +
"and start_date<=? " +
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
# If one SID exists for symbol, return that symbol
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc" +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
def lookup_future_chain(self, root_symbol, as_of_date, knowledge_date):
""" Return the futures chain for a given root symbol.
Parameters
----------
root_symbol : str
Root symbol of the desired future.
as_of_date : pd.Timestamp
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this
date is the primary contract, etc.
knowledge_date : pd.Timestamp
Date for determining which contracts exist for inclusion in
this chain. Contracts exist only if they have a start_date
on or before this date.
Returns
-------
list
A list of Future objects, the chain for the given
parameters.
Raises
------
RootSymbolNotFound
Raised when a future chain could not be found for the given
root symbol.
"""
c = self.conn.cursor()
t = {'root_symbol': root_symbol,
'as_of_date': as_of_date.value,
'knowledge_date': knowledge_date.value}
c.execute("""
select sid from futures
where root_symbol=:root_symbol
and :as_of_date < notice_date
and start_date <= :knowledge_date
order by notice_date asc
""", t)
sids = [r[0] for r in c.fetchall()]
if not sids:
# Check if root symbol exists.
c.execute("""
select count(sid) from futures where root_symbol=:root_symbol
""", t)
count = c.fetchone()[0]
if count == 0:
raise RootSymbolNotFound(root_symbol=root_symbol)
else:
# If symbol exists, return empty future chain.
return []
return [self._retrieve_futures_contract(sid) for sid in sids]
@property
def sids(self):
c = self.conn.cursor()
query = 'select sid from asset_router'
c.execute(query)
return [r[0] for r in c.fetchall()]
@property
def assets(self):
return self.cache.values()
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
try:
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
result = self.retrieve_asset(int(asset_convertible))
if result is None:
raise SymbolNotFound(symbol=asset_convertible)
matches.append(result)
elif isinstance(asset_convertible, string_types):
# Throws SymbolNotFound on failure to match.
matches.append(
self.lookup_symbol_resolve_multiple(
asset_convertible,
as_of_date,
)
)
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
except SymbolNotFound:
missing.append(asset_convertible)
return None
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidNotFound(sid=asset_convertible_or_iterable)
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
__________
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
_______
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# If symbols or Assets are provided, construction and mapping is
# necessary
self.consume_identifiers(index)
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
# Handle missing assets
if len(missing) > 0:
warnings.warn("Missing assets for identifiers: " + missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _insert_metadata(self, identifier, **kwargs):
"""
Inserts the given metadata kwargs to the entry for the given
identifier. Matching fields in the existing entry will be overwritten.
:param identifier: The identifier for which to insert metadata
:param kwargs: The keyed metadata to insert
"""
if identifier in self.metadata_cache:
# Multiple pass insertion no longer supported.
# This could and probably should raise an Exception, but is
# currently just a short-circuit for compatibility with existing
# testing structure in the test_algorithm module which creates
# multiple sources which all insert redundant metadata.
return
entry = {}
for key, value in kwargs.items():
# Do not accept invalid fields
if key not in ASSET_FIELDS:
continue
# Do not accept Nones
if value is None:
continue
# Do not accept empty strings
if value == '':
continue
# Do not accept nans from dataframes
if isinstance(value, float) and np.isnan(value):
continue
entry[key] = value
# Check if the sid is declared
try:
entry['sid']
except KeyError:
# If the identifier is not a sid, assign one
if hasattr(identifier, '__int__'):
entry['sid'] = identifier.__int__()
else:
if self.allow_sid_assignment:
# Assign the sid the value of its insertion order.
# This assumes that we are assigning values to all assets.
entry['sid'] = len(self.metadata_cache)
else:
raise SidAssignmentError(identifier=identifier)
# If the file_name is in the kwargs, it will be used as the symbol
try:
entry['symbol'] = entry.pop('file_name')
except KeyError:
pass
# If the identifier coming in was a string and there is no defined
# symbol yet, set the symbol to the incoming identifier
try:
entry['symbol']
pass
except KeyError:
if isinstance(identifier, string_types):
entry['symbol'] = identifier
# If the company_name is in the kwargs, it may be the asset_name
try:
company_name = entry.pop('company_name')
try:
entry['asset_name']
except KeyError:
entry['asset_name'] = company_name
except KeyError:
pass
# If dates are given as nanos, pop them
try:
entry['start_date'] = entry.pop('start_date_nano')
except KeyError:
pass
try:
entry['end_date'] = entry.pop('end_date_nano')
except KeyError:
pass
try:
entry['notice_date'] = entry.pop('notice_date_nano')
except KeyError:
pass
try:
entry['expiration_date'] = entry.pop('expiration_date_nano')
except KeyError:
pass
# Process dates to Timestamps
try:
entry['start_date'] = pd.Timestamp(entry['start_date'], tz='UTC')
except KeyError:
# Set a default start_date of the EPOCH, so that all date queries
# work when a start date is not provided.
entry['start_date'] = pd.Timestamp(0, tz='UTC')
try:
# Set a default end_date of 'now', so that all date queries
# work when a end date is not provided.
entry['end_date'] = pd.Timestamp(entry['end_date'], tz='UTC')
except KeyError:
entry['end_date'] = self.end_date_to_assign
try:
entry['notice_date'] = pd.Timestamp(entry['notice_date'],
tz='UTC')
except KeyError:
pass
try:
entry['expiration_date'] = pd.Timestamp(entry['expiration_date'],
tz='UTC')
except KeyError:
pass
# Build an Asset of the appropriate type, default to Equity
asset_type = entry.pop('asset_type', 'equity')
if asset_type.lower() == 'equity':
try:
fuzzy = entry['symbol'].replace(self.fuzzy_char, '') \
if self.fuzzy_char else None
except KeyError:
fuzzy = None
asset = Equity(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
fuzzy)
c.execute("""INSERT INTO equities(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
fuzzy)
VALUES(?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'equity')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
elif asset_type.lower() == 'future':
asset = Future(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
asset.root_symbol,
asset.notice_date.value if asset.notice_date else None,
asset.expiration_date.value
if asset.expiration_date else None,
asset.contract_multiplier)
c.execute("""INSERT INTO futures(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
root_symbol,
notice_date,
expiration_date,
contract_multiplier)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'future')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
else:
raise InvalidAssetType(asset_type=asset_type)
self.metadata_cache[identifier] = entry
def consume_identifiers(self, identifiers):
"""
Consumes the given identifiers in to the metadata cache of this
AssetFinder.
"""
for identifier in identifiers:
# Handle case where full Assets are passed in
# For example, in the creation of a DataFrameSource, the source's
# 'sid' args may be full Assets
if isinstance(identifier, Asset):
sid = identifier.sid
metadata = identifier.to_dict()
metadata['asset_type'] = identifier.__class__.__name__
self.insert_metadata(identifier=sid, **metadata)
else:
self.insert_metadata(identifier)
def consume_metadata(self, metadata):
"""
Consumes the provided metadata in to the metadata cache. The
existing values in the cache will be overwritten when there
is a conflict.
:param metadata: The metadata to be consumed
"""
# Handle dicts
if isinstance(metadata, dict):
self._insert_metadata_dict(metadata)
# Handle DataFrames
elif isinstance(metadata, pd.DataFrame):
self._insert_metadata_dataframe(metadata)
# Handle readables
elif hasattr(metadata, 'read'):
self._insert_metadata_readable(metadata)
else:
raise ConsumeAssetMetaDataError(obj=metadata)
def clear_metadata(self):
"""
Used for testing.
"""
self.metadata_cache = {}
self.conn = sqlite3.connect(':memory:')
self.create_db_tables()
def insert_metadata(self, identifier, **kwargs):
self._insert_metadata(identifier, **kwargs)
self.conn.commit()
def _insert_metadata_dataframe(self, dataframe):
for identifier, row in dataframe.iterrows():
self._insert_metadata(identifier, **row)
self.conn.commit()
def _insert_metadata_dict(self, dict):
for identifier, entry in dict.items():
self._insert_metadata(identifier, **entry)
self.conn.commit()
def _insert_metadata_readable(self, readable):
for row in readable.read():
# Parse out the row of the readable object
metadata_dict = {}
for field in ASSET_FIELDS:
try:
row_value = row[field]
# Avoid passing placeholders
if row_value and (row_value != 'None'):
metadata_dict[field] = row[field]
except KeyError:
continue
except IndexError:
continue
# Locate the identifier, fail if not found
if 'sid' in metadata_dict:
identifier = metadata_dict['sid']
elif 'symbol' in metadata_dict:
identifier = metadata_dict['symbol']
else:
raise ConsumeAssetMetaDataError(obj=row)
self._insert_metadata(identifier, **metadata_dict)
self.conn.commit()
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
| {
"content_hash": "68508c7386cca5e30c625ee043010920",
"timestamp": "",
"source": "github",
"line_count": 908,
"max_line_length": 79,
"avg_line_length": 33.78193832599119,
"alnum_prop": 0.5390558779422312,
"repo_name": "ronalcc/zipline",
"id": "199853193886ff19f0e92b8b116de2833e352213",
"size": "31257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zipline/assets/assets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Python",
"bytes": "1051737"
},
{
"name": "Shell",
"bytes": "4065"
}
],
"symlink_target": ""
} |
import sys
from mixbox.binding_utils import *
from . import cybox_common
from . import address_object
from . import network_route_entry_object
class RoutesType(GeneratedsSuper):
"""The RoutesType is intended to characterize a set network routes."""
subclass = None
superclass = None
def __init__(self, Route=None):
if Route is None:
self.Route = []
else:
self.Route = Route
def factory(*args_, **kwargs_):
if RoutesType.subclass:
return RoutesType.subclass(*args_, **kwargs_)
else:
return RoutesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Route(self): return self.Route
def set_Route(self, Route): self.Route = Route
def add_Route(self, value): self.Route.append(value)
def insert_Route(self, index, value): self.Route[index] = value
def hasContent_(self):
if (
self.Route
):
return True
else:
return False
def export(self, lwrite, level, namespace_='NetworkSubnetObj:', name_='RoutesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='RoutesType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='NetworkSubnetObj:', name_='RoutesType'):
pass
def exportChildren(self, lwrite, level, namespace_='NetworkSubnetObj:', name_='RoutesType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Route_ in self.Route:
Route_.export(lwrite, level, 'NetworkSubnetObj:', name_='Route', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Route':
obj_ = network_route_entry_object.NetworkRouteEntryObjectType.factory()
obj_.build(child_)
self.Route.append(obj_)
# end class RoutesType
class NetworkSubnetObjectType(cybox_common.ObjectPropertiesType):
"""The NetworkSubnetObjectType type is intended to characterize a
generic system network subnet."""
subclass = None
superclass = cybox_common.ObjectPropertiesType
def __init__(self, object_reference=None, Custom_Properties=None, xsi_type=None, Name=None, Description=None, Number_Of_IP_Addresses=None, Routes=None):
super(NetworkSubnetObjectType, self).__init__(object_reference, Custom_Properties, xsi_type )
self.Name = Name
self.Description = Description
self.Number_Of_IP_Addresses = Number_Of_IP_Addresses
self.Routes = Routes
def factory(*args_, **kwargs_):
if NetworkSubnetObjectType.subclass:
return NetworkSubnetObjectType.subclass(*args_, **kwargs_)
else:
return NetworkSubnetObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Name(self): return self.Name
def set_Name(self, Name): self.Name = Name
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_Number_Of_IP_Addresses(self): return self.Number_Of_IP_Addresses
def set_Number_Of_IP_Addresses(self, Number_Of_IP_Addresses): self.Number_Of_IP_Addresses = Number_Of_IP_Addresses
def validate_IntegerObjectPropertyType(self, value):
# Validate type cybox_common.IntegerObjectPropertyType, a restriction on None.
pass
def get_Routes(self): return self.Routes
def set_Routes(self, Routes): self.Routes = Routes
def hasContent_(self):
if (
self.Name is not None or
self.Description is not None or
self.Number_Of_IP_Addresses is not None or
self.Routes is not None or
super(NetworkSubnetObjectType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='NetworkSubnetObj:', name_='NetworkSubnetObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='NetworkSubnetObjectType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='NetworkSubnetObj:', name_='NetworkSubnetObjectType'):
super(NetworkSubnetObjectType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='NetworkSubnetObjectType')
def exportChildren(self, lwrite, level, namespace_='NetworkSubnetObj:', name_='NetworkSubnetObjectType', fromsubclass_=False, pretty_print=True):
super(NetworkSubnetObjectType, self).exportChildren(lwrite, level, 'NetworkSubnetObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Name is not None:
self.Name.export(lwrite, level, 'NetworkSubnetObj:', name_='Name', pretty_print=pretty_print)
if self.Description is not None:
self.Description.export(lwrite, level, 'NetworkSubnetObj:', name_='Description', pretty_print=pretty_print)
if self.Number_Of_IP_Addresses is not None:
self.Number_Of_IP_Addresses.export(lwrite, level, 'NetworkSubnetObj:', name_='Number_Of_IP_Addresses', pretty_print=pretty_print)
if self.Routes is not None:
self.Routes.export(lwrite, level, 'NetworkSubnetObj:', name_='Routes', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(NetworkSubnetObjectType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Name':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Name(obj_)
elif nodeName_ == 'Description':
obj_ = cybox_common.StructuredTextType.factory()
obj_.build(child_)
self.set_Description(obj_)
elif nodeName_ == 'Number_Of_IP_Addresses':
obj_ = cybox_common.IntegerObjectPropertyType.factory()
obj_.build(child_)
self.set_Number_Of_IP_Addresses(obj_)
elif nodeName_ == 'Routes':
obj_ = RoutesType.factory()
obj_.build(child_)
self.set_Routes(obj_)
super(NetworkSubnetObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class NetworkSubnetObjectType
GDSClassesMapping = {
'Build_Utility': cybox_common.BuildUtilityType,
'Errors': cybox_common.ErrorsType,
'Time': cybox_common.TimeType,
'Certificate_Issuer': cybox_common.StringObjectPropertyType,
'Metadata': cybox_common.MetadataType,
'Hash': cybox_common.HashType,
'Netmask': address_object.AddressObjectType,
'Information_Source_Type': cybox_common.ControlledVocabularyStringType,
'Block_Hash_Value': cybox_common.HashValueType,
'Fuzzy_Hash_Structure': cybox_common.FuzzyHashStructureType,
'SubDatum': cybox_common.MetadataType,
'Segment_Hash': cybox_common.HashValueType,
'Digital_Signature': cybox_common.DigitalSignatureInfoType,
'Code_Snippets': cybox_common.CodeSnippetsType,
'Route_Age': cybox_common.DurationObjectPropertyType,
'Value': cybox_common.StringObjectPropertyType,
'Length': cybox_common.IntegerObjectPropertyType,
'Origin': address_object.AddressObjectType,
'Protocol': cybox_common.StringObjectPropertyType,
'Encoding': cybox_common.ControlledVocabularyStringType,
'Internationalization_Settings': cybox_common.InternationalizationSettingsType,
'Tool_Configuration': cybox_common.ToolConfigurationType,
'English_Translation': cybox_common.StringObjectPropertyType,
'Functions': cybox_common.FunctionsType,
'String_Value': cybox_common.StringObjectPropertyType,
'Build_Utility_Platform_Specification': cybox_common.PlatformSpecificationType,
'Compiler_Informal_Description': cybox_common.CompilerInformalDescriptionType,
'System': cybox_common.ObjectPropertiesType,
'Platform': cybox_common.PlatformSpecificationType,
'Usage_Context_Assumptions': cybox_common.UsageContextAssumptionsType,
'Type': network_route_entry_object.RouteType,
'Compilers': cybox_common.CompilersType,
'Tool_Type': cybox_common.ControlledVocabularyStringType,
'String': cybox_common.ExtractedStringType,
'Tool': cybox_common.ToolInformationType,
'Preferred_Lifetime': cybox_common.DurationObjectPropertyType,
'Build_Information': cybox_common.BuildInformationType,
'Tool_Hashes': cybox_common.HashListType,
'Interface': cybox_common.StringObjectPropertyType,
'Compiler_Platform_Specification': cybox_common.PlatformSpecificationType,
'Valid_Lifetime': cybox_common.DurationObjectPropertyType,
'Error_Instances': cybox_common.ErrorInstancesType,
'Data_Segment': cybox_common.StringObjectPropertyType,
'Certificate_Subject': cybox_common.StringObjectPropertyType,
'Language': cybox_common.StringObjectPropertyType,
'Gateway_Address': address_object.AddressObjectType,
'Property': cybox_common.PropertyType,
'Strings': cybox_common.ExtractedStringsType,
'File_System_Offset': cybox_common.IntegerObjectPropertyType,
'Metric': cybox_common.UnsignedLongObjectPropertyType,
'Reference_Description': cybox_common.StructuredTextType,
'User_Account_Info': cybox_common.ObjectPropertiesType,
'Network_Route_Entry': network_route_entry_object.NetworkRouteEntryObjectType,
'Configuration_Settings': cybox_common.ConfigurationSettingsType,
'Simple_Hash_Value': cybox_common.SimpleHashValueType,
'Byte_String_Value': cybox_common.HexBinaryObjectPropertyType,
'Instance': cybox_common.ObjectPropertiesType,
'Import': cybox_common.StringObjectPropertyType,
'Identifier': cybox_common.PlatformIdentifierType,
'Tool_Specific_Data': cybox_common.ToolSpecificDataType,
'Execution_Environment': cybox_common.ExecutionEnvironmentType,
'Dependencies': cybox_common.DependenciesType,
'Offset': cybox_common.IntegerObjectPropertyType,
'Date': cybox_common.DateRangeType,
'Hashes': cybox_common.HashListType,
'Segments': cybox_common.HashSegmentsType,
'Number_Of_IP_Addresses': cybox_common.IntegerObjectPropertyType,
'Segment_Count': cybox_common.IntegerObjectPropertyType,
'Usage_Context_Assumption': cybox_common.StructuredTextType,
'Block_Hash': cybox_common.FuzzyHashBlockType,
'Dependency': cybox_common.DependencyType,
'Error': cybox_common.ErrorType,
'Trigger_Point': cybox_common.HexBinaryObjectPropertyType,
'Environment_Variable': cybox_common.EnvironmentVariableType,
'Byte_Run': cybox_common.ByteRunType,
'Contributors': cybox_common.PersonnelType,
'Image_Offset': cybox_common.IntegerObjectPropertyType,
'Imports': cybox_common.ImportsType,
'Library': cybox_common.LibraryType,
'References': cybox_common.ToolReferencesType,
'Internal_Strings': cybox_common.InternalStringsType,
'Custom_Properties': cybox_common.CustomPropertiesType,
'Configuration_Setting': cybox_common.ConfigurationSettingType,
'Libraries': cybox_common.LibrariesType,
'Destination_Address': address_object.AddressObjectType,
'Function': cybox_common.StringObjectPropertyType,
'Description': cybox_common.StructuredTextType,
'Code_Snippet': cybox_common.ObjectPropertiesType,
'Build_Configuration': cybox_common.BuildConfigurationType,
'VLAN_Name': cybox_common.StringObjectPropertyType,
'Address': address_object.AddressObjectType,
'Search_Within': cybox_common.IntegerObjectPropertyType,
'Segment': cybox_common.HashSegmentType,
'Compiler': cybox_common.CompilerType,
'Name': cybox_common.StringObjectPropertyType,
'Route': network_route_entry_object.NetworkRouteEntryObjectType,
'Address_Value': cybox_common.StringObjectPropertyType,
'VLAN_Num': cybox_common.IntegerObjectPropertyType,
'Signature_Description': cybox_common.StringObjectPropertyType,
'Block_Size': cybox_common.IntegerObjectPropertyType,
'Search_Distance': cybox_common.IntegerObjectPropertyType,
'Fuzzy_Hash_Value': cybox_common.FuzzyHashValueType,
'Dependency_Description': cybox_common.StructuredTextType,
'Contributor': cybox_common.ContributorType,
'Tools': cybox_common.ToolsInformationType,
'Data_Size': cybox_common.DataSizeType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Network_Subnet'
rootClass = NetworkSubnetObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_=rootTag,
# namespacedef_='',
# pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Network_Subnet'
rootClass = NetworkSubnetObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from mixbox.vendor.six import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Network_Subnet'
rootClass = NetworkSubnetObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_="Network_Subnet",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"NetworkSubnetObjectType",
"RoutesType"
]
| {
"content_hash": "87e6aa460ec2e8901fda705d5ef04eda",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 156,
"avg_line_length": 45.58490566037736,
"alnum_prop": 0.6828878902554399,
"repo_name": "CybOXProject/python-cybox",
"id": "3a7d07ac585bfd463a2a93a0ab334d34e76262aa",
"size": "17017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cybox/bindings/network_subnet_object.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4610747"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import typing # NOQA: F401
from nixnet import _frames
from nixnet import _funcs
from nixnet import _props
from nixnet._session import collection
from nixnet import constants
from nixnet import types
class Frames(collection.Collection):
"""Frames in a session."""
def _create_item(self, handle, index, name):
return Frame(handle, index, name)
@property
def payld_len_max(self):
# type: () -> int
"""int: Returns the maximum payload length of all frames in this session, expressed as bytes (0-254).
For CAN Stream (Input and Output), this property depends on the XNET
Cluster CAN I/O Mode property. If the I/O mode is
`constants.CanIoMode.CAN`, this property is 8 bytes. If the I/O mode is
'constants.CanIoMode.CAN_FD' or 'constants.CanIoMode.CAN_FD_BRS', this
property is 64 bytes.
For LIN Stream (Input and Output), this property always is 8 bytes.
For FlexRay Stream (Input and Output), this property is the same as the
XNET Cluster FlexRay Payload Length Maximum property value.
For Queued and Single-Point (Input and Output), this is the maximum
payload of all frames specified in the List property.
"""
return _props.get_session_payld_len_max(self._handle)
class InFrames(Frames):
"""Frames in a session."""
def read_bytes(
self,
num_bytes,
timeout=constants.TIMEOUT_NONE):
# type: (int, float) -> bytes
"""Read data as a list of raw bytes (frame data).
The raw bytes encode one or more frames using the Raw Frame Format.
Args:
num_bytes(int): The number of bytes to read.
timeout(float): The time in seconds to wait for number to read
frame bytes to become available.
To avoid returning a partial frame, even when
'num_bytes' are available from the hardware, this
read may return fewer bytes in buffer. For example, assume you
pass 'num_bytes' 70 bytes and 'timeout' of 10
seconds. During the read, two frames are received, the first 24
bytes in size, and the second 56 bytes in size, for a total of
80 bytes. The read returns after the two frames are received,
but only the first frame is copied to data. If the read copied
46 bytes of the second frame (up to the limit of 70), that frame
would be incomplete and therefore difficult to interpret. To
avoid this problem, the read always returns complete frames in
buffer.
If 'timeout' is positive, this function waits for
'num_bytes' frame bytes to be received, then
returns complete frames up to that number. If the bytes do not
arrive prior to the 'timeout', an error is returned.
If 'timeout' is 'constants.TIMEOUT_INFINITE', this
function waits indefinitely for 'num_bytes' frame bytes.
If 'timeout' is 'constants.TIMEOUT_NONE', this
function does not wait and immediately returns all available
frame bytes up to the limit 'num_bytes' specifies.
Returns:
A list of raw bytes representing the data.
"""
buffer, number_of_bytes_returned = _funcs.nx_read_frame(self._handle, num_bytes, timeout)
return buffer[0:number_of_bytes_returned]
def read(
self,
num_frames,
timeout=constants.TIMEOUT_NONE,
frame_type=types.XnetFrame):
# type: (int, float, typing.Type[types.FrameFactory]) -> typing.Iterable[types.Frame]
"""Read frames.
Args:
num_frames(int): Number of frames to read.
timeout(float): The time in seconds to wait for number to read
frame bytes to become available.
If 'timeout' is positive, this function waits for
'num_frames' frames to be received, then
returns complete frames up to that number. If the frames do not
arrive prior to the 'timeout', an error is returned.
If 'timeout' is 'constants.TIMEOUT_INFINITE', this function
waits indefinitely for 'num_frames' frames.
If 'timeout' is 'constants.TIMEOUT_NONE', this function does not
wait and immediately returns all available frames up to the
limit 'num_frames' specifies.
frame_type(:any:`nixnet.types.FrameFactory`): A factory for the
desired frame formats.
Yields:
:any:`nixnet.types.Frame`
"""
from_raw = typing.cast(typing.Callable[[types.RawFrame], types.Frame], frame_type.from_raw)
# NOTE: If the frame payload exceeds the base unit, this will return
# less than num_frames
num_bytes = num_frames * _frames.nxFrameFixed_t.size
buffer = self.read_bytes(num_bytes, timeout)
for frame in _frames.iterate_frames(buffer):
yield from_raw(frame)
class SinglePointInFrames(Frames):
"""Frames in a session."""
def read_bytes(
self,
num_bytes):
# type: (int) -> bytes
"""Read data as a list of raw bytes (frame data).
Args:
num_bytes(int): Number of bytes to read.
Returns:
bytes: Raw bytes representing the data.
"""
buffer, number_of_bytes_returned = _funcs.nx_read_frame(
self._handle,
num_bytes,
constants.TIMEOUT_NONE)
return buffer[0:number_of_bytes_returned]
def read(
self,
frame_type=types.XnetFrame):
# type: (typing.Type[types.FrameFactory]) -> typing.Iterable[types.Frame]
"""Read frames.
Args:
frame_type(:any:`nixnet.types.FrameFactory`): A factory for the
desired frame formats.
Yields:
:any:`nixnet.types.Frame`
"""
from_raw = typing.cast(typing.Callable[[types.RawFrame], types.Frame], frame_type.from_raw)
# NOTE: If the frame payload exceeds the base unit, this will return
# less than num_frames
num_frames = len(self)
num_bytes = num_frames * _frames.nxFrameFixed_t.size
buffer = self.read_bytes(num_bytes)
for frame in _frames.iterate_frames(buffer):
yield from_raw(frame)
class OutFrames(Frames):
"""Frames in a session."""
def write_bytes(
self,
frame_bytes,
timeout=10):
# type: (bytes, float) -> None
"""Write a list of raw bytes (frame data).
The raw bytes encode one or more frames using the Raw Frame Format.
Args:
frame_bytes(bytes): Frames to transmit.
timeout(float): The time in seconds to wait for number to read
frame bytes to become available.
If 'timeout' is positive, this function waits up to that 'timeout'
for space to become available in queues. If the space is not
available prior to the 'timeout', a 'timeout' error is returned.
If 'timeout' is 'constants.TIMEOUT_INFINITE', this functions
waits indefinitely for space to become available in queues.
If 'timeout' is 'constants.TIMEOUT_NONE', this function does not
wait and immediately returns with a 'timeout' error if all data
cannot be queued. Regardless of the 'timeout' used, if a 'timeout'
error occurs, none of the data is queued, so you can attempt to
call this function again at a later time with the same data.
"""
_funcs.nx_write_frame(self._handle, bytes(frame_bytes), timeout)
def write(
self,
frames,
timeout=10):
# type: (typing.Iterable[types.Frame], float) -> None
"""Write frame data.
Args:
frames(list of float): One or more :any:`nixnet.types.Frame` objects to be
written to the session.
timeout(float): The time in seconds to wait for number to read
frame bytes to become available.
If 'timeout' is positive, this function waits up to that 'timeout'
for space to become available in queues. If the space is not
available prior to the 'timeout', a 'timeout' error is returned.
If 'timeout' is 'constants.TIMEOUT_INFINITE', this functions
waits indefinitely for space to become available in queues.
If 'timeout' is 'constants.TIMEOUT_NONE', this function does not
wait and immediately returns with a 'timeout' error if all data
cannot be queued. Regardless of the 'timeout' used, if a 'timeout'
error occurs, none of the data is queued, so you can attempt to
call this function again at a later time with the same data.
"""
units = itertools.chain.from_iterable(
_frames.serialize_frame(frame.to_raw())
for frame in frames)
bytes = b"".join(units)
self.write_bytes(bytes, timeout)
class SinglePointOutFrames(Frames):
"""Frames in a session."""
def write_bytes(
self,
frame_bytes):
# type: (bytes) -> None
"""Write a list of raw bytes (frame data).
The raw bytes encode one or more frames using the Raw Frame Format.
Args:
frame_bytes(bytes): Frames to transmit.
"""
_funcs.nx_write_frame(self._handle, bytes(frame_bytes), constants.TIMEOUT_NONE)
def write(
self,
frames):
# type: (typing.Iterable[types.Frame]) -> None
"""Write frame data.
Args:
frames(list of float): One or more :any:`nixnet.types.Frame` objects to be
written to the session.
"""
units = itertools.chain.from_iterable(
_frames.serialize_frame(frame.to_raw())
for frame in frames)
bytes = b"".join(units)
self.write_bytes(bytes)
class Frame(collection.Item):
"""Frame configuration for a session."""
def set_can_start_time_off(self, offset):
# type: (float) -> None
"""Set CAN Start Time Offset.
Use this function to have more control over the schedule of frames on
the bus, to offer more determinism by configuring cyclic frames to be
spaced evenly.
If you do not call this function or you set it to a negative number,
NI-XNET chooses this start time offset based on the arbitration
identifier and periodic transmit time.
``offset`` takes effect whenever a session is started. If you stop a
session and restart it, the start time offset is re-evaluated.
Args:
offset(float): The amount of time that must elapse between the
session being started and the time that the first frame is
transmitted across the bus. This is different than the cyclic
rate, which determines the time between subsequent frame
transmissions.
"""
_props.set_session_can_start_time_off(self._handle, self._index, offset)
def set_can_tx_time(self, time):
# type: (float) -> None
"""Set CAN Transmit Time.
If you call this function while a frame object is currently started, the
frame object is stopped, the cyclic rate updated, and then the frame
object is restarted. Because of the stopping and starting, the frame's
start time offset is re-evaluated.
The first time a queued frame object is started, the XNET frame's
transmit time determines the object's default queue size. Changing this
rate has no impact on the queue size. Depending on how you change the
rate, the queue may not be sufficient to store data for an extended
period of time. You can mitigate this by setting the session Queue Size
property to provide sufficient storage for all rates you use. If you are
using a single-point session, this is not relevant.
Args:
time(float): Frame's transmit time while the session is running.
The transmit time is the amount of time that must elapse
between subsequent transmissions of a cyclic frame. The default
value of this property comes from the database (the XNET Frame
CAN Transmit Time property).
"""
_props.set_session_can_tx_time(self._handle, self._index, time)
def set_skip_n_cyclic_frames(self, n):
# type: (int) -> None
"""Set Skip N Cyclic Frames
When the frame's transmission time arrives and the skip count is
nonzero, a frame value is dequeued (if this is not a single-point
session), and the skip count is decremented, but the frame actually is
not transmitted across the bus. When the skip count decrements to zero,
subsequent cyclic transmissions resume.
This function is useful for testing of ECU behavior when a cyclic frame
is expected, but is missing for N cycles.
.. note:: Only CAN interfaces currently support this function.
.. note:: This property is valid only for output sessions and frames
with cyclic timing (that is, not event-based frames).
Args:
n(int): Skip the next N cyclic frames when nonzero.
"""
_props.set_session_skip_n_cyclic_frames(self._handle, self._index, n)
def set_lin_tx_n_corrupted_chksums(self, n):
# type: (int) -> None
"""Set LIN Transmit N Corrupted Checksums.
When set to a nonzero value, this function causes the next N number of
checksums to be corrupted. The checksum is corrupted by negating the
value calculated per the database; (EnhancedValue * -1) or
(ClassicValue * -1).
If the frame is transmitted in an unconditional or sporadic schedule
slot, N is always decremented for each frame transmission. If the frame
is transmitted in an event-triggered slot and a collision occurs, N is
not decremented. In that case, N is decremented only when the collision
resolving schedule is executed and the frame is successfully
transmitted. If the frame is the only one to transmit in the
event-triggered slot (no collision), N is decremented at
event-triggered slot time.
This function is useful for testing ECU behavior when a corrupted
checksum is transmitted.
.. note:: This function is valid only for output sessions.
Args:
n(int): Number of checksums to be corrupted.
"""
_props.set_session_lin_tx_n_corrupted_chksums(self._handle, self._index, n)
def set_j1939_addr_filter(self, address=""):
# type: (typing.Union[typing.Text, int]) -> None
"""Set J1939 Address Filter.
Define a filter for the source address of the PGN transmitting node.
You can use it when multiple nodes with different addresses are
transmitting the same PGN.
If the filter is active, the session accepts only frames transmitted by
a node with the defined address. All other frames with the same PGN but
transmitted by other nodes are ignored.
.. note:: You can use this function in input sessions only.
Args:
address(str or int): Decimal value of the address. Leave blank to
reset the filter.
"""
_props.set_session_j1939_addr_filter(self._handle, self._index, str(address))
| {
"content_hash": "064bf9a5dcab4e2ea79f5a5eb877eeae",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 109,
"avg_line_length": 40.8253164556962,
"alnum_prop": 0.6183802554880318,
"repo_name": "ni/nixnet-python",
"id": "64cc3a2ea63ac5c91957f86bd47b88283173fab2",
"size": "16128",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "nixnet/_session/frames.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "873641"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from users import views
urlpatterns = [
# url(r'^$', views.home, name='home'),
]
| {
"content_hash": "3fe6652ca41352c5aeef4ca8516eb187",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 15.125,
"alnum_prop": 0.6528925619834711,
"repo_name": "asyncee/django-project-template",
"id": "7c73c7a33deec99bd2b99ec6f23c33c18dd6a83f",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/users/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1055"
},
{
"name": "HTML",
"bytes": "24376"
},
{
"name": "JavaScript",
"bytes": "6045"
},
{
"name": "PureBasic",
"bytes": "2954"
},
{
"name": "Python",
"bytes": "36388"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TickformatstopdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickformatstopdefaults", parent_name="layout.xaxis", **kwargs
):
super(TickformatstopdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
| {
"content_hash": "6a3961e24478759d427400ae8a2a9818",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 88,
"avg_line_length": 32.5,
"alnum_prop": 0.582905982905983,
"repo_name": "plotly/python-api",
"id": "a9189ae00c9a59065e953d51b20ff370d663923f",
"size": "585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/xaxis/_tickformatstopdefaults.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from data_collection.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = 'E07000188'
addresses_name = 'parl.2017-06-08/Version 1/polling_station_export-2017-05-25.csv'
stations_name = 'parl.2017-06-08/Version 1/polling_station_export-2017-05-25.csv'
elections = ['parl.2017-06-08']
| {
"content_hash": "917abfc9e6d83ff9816e9141d8b6f8f6",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 87,
"avg_line_length": 52.142857142857146,
"alnum_prop": 0.7287671232876712,
"repo_name": "chris48s/UK-Polling-Stations",
"id": "7ac902803e892f4e86c164ce41b5fe91d3f0f9f2",
"size": "365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/data_collection/management/commands/import_sedgemoor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "347"
},
{
"name": "Gherkin",
"bytes": "3720"
},
{
"name": "HTML",
"bytes": "30715"
},
{
"name": "JavaScript",
"bytes": "3226"
},
{
"name": "Python",
"bytes": "589520"
}
],
"symlink_target": ""
} |
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TestInstance.string_value'
db.add_column('qa_testinstance', 'string_value',
self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, blank=True),
keep_default=False)
# Deleting field 'TestInstanceStatus.requires_comment'
db.delete_column('qa_testinstancestatus', 'requires_comment')
def backwards(self, orm):
# Deleting field 'TestInstance.string_value'
db.delete_column('qa_testinstance', 'string_value')
# Adding field 'TestInstanceStatus.requires_comment'
db.add_column('qa_testinstancestatus', 'requires_comment',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'qa.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'})
},
'qa.frequency': {
'Meta': {'ordering': "('nominal_interval',)", 'object_name': 'Frequency'},
'due_interval': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'nominal_interval': ('django.db.models.fields.PositiveIntegerField', [], {}),
'overdue_interval': ('django.db.models.fields.PositiveIntegerField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'qa.reference': {
'Meta': {'object_name': 'Reference'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reference_creators'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reference_modifiers'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'numerical'", 'max_length': '15'}),
'value': ('django.db.models.fields.FloatField', [], {})
},
'qa.test': {
'Meta': {'object_name': 'Test'},
'calculation_procedure': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Category']"}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'constant_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_creator'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_modifier'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'procedure': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'simple'", 'max_length': '10'})
},
'qa.testinstance': {
'Meta': {'ordering': "('work_completed',)", 'object_name': 'TestInstance'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_instance_creator'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_instance_modifier'", 'to': "orm['auth.User']"}),
'pass_fail': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'reference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Reference']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'review_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'reviewed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'skipped': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestInstanceStatus']"}),
'string_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'test_list_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestListInstance']", 'null': 'True', 'blank': 'True'}),
'tolerance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Tolerance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'unit_test_info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.UnitTestInfo']"}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'work_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'work_started': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'qa.testinstancestatus': {
'Meta': {'object_name': 'TestInstanceStatus'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'export_by_default': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'requires_review': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'qa.testlist': {
'Meta': {'object_name': 'TestList'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'qa_testlist_created'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'qa_testlist_modified'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'sublists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['qa.TestList']", 'null': 'True', 'blank': 'True'}),
'tests': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['qa.Test']", 'through': "orm['qa.TestListMembership']", 'symmetrical': 'False'})
},
'qa.testlistcycle': {
'Meta': {'object_name': 'TestListCycle'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'qa_testlistcycle_created'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'qa_testlistcycle_modified'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'test_lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['qa.TestList']", 'through': "orm['qa.TestListCycleMembership']", 'symmetrical': 'False'})
},
'qa.testlistcyclemembership': {
'Meta': {'ordering': "('order',)", 'object_name': 'TestListCycleMembership'},
'cycle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestListCycle']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'test_list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestList']"})
},
'qa.testlistinstance': {
'Meta': {'ordering': "('work_completed',)", 'object_name': 'TestListInstance'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_list_instance_creator'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_list_instance_modifier'", 'to': "orm['auth.User']"}),
'test_list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestList']"}),
'unit_test_collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.UnitTestCollection']"}),
'work_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'work_started': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'qa.testlistmembership': {
'Meta': {'ordering': "('order',)", 'unique_together': "(('test_list', 'test'),)", 'object_name': 'TestListMembership'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Test']"}),
'test_list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestList']"})
},
'qa.tolerance': {
'Meta': {'object_name': 'Tolerance'},
'act_high': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'act_low': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tolerance_creators'", 'to': "orm['auth.User']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mc_pass_choices': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'mc_tol_choices': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tolerance_modifiers'", 'to': "orm['auth.User']"}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'tol_high': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tol_low': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'qa.unittestcollection': {
'Meta': {'unique_together': "(('unit', 'frequency', 'content_type', 'object_id'),)", 'object_name': 'UnitTestCollection'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True'}),
'auto_schedule': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Frequency']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.TestListInstance']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['units.Unit']"}),
'visible_to': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[]', 'related_name': "'test_collection_visibility'", 'symmetrical': 'False', 'to': "orm['auth.Group']"})
},
'qa.unittestinfo': {
'Meta': {'unique_together': "(['test', 'unit'],)", 'object_name': 'UnitTestInfo'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Reference']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Test']"}),
'tolerance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['qa.Tolerance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['units.Unit']"})
},
'units.modality': {
'Meta': {'unique_together': "[('type', 'energy')]", 'object_name': 'Modality'},
'energy': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'units.unit': {
'Meta': {'ordering': "['number']", 'object_name': 'Unit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'install_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'modalities': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['units.Modality']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'serial_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['units.UnitType']"})
},
'units.unittype': {
'Meta': {'unique_together': "[('name', 'model')]", 'object_name': 'UnitType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['qa']
| {
"content_hash": "5061eec60a174911ab9155666e6c6eaa",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 199,
"avg_line_length": 86.50387596899225,
"alnum_prop": 0.5523792454521015,
"repo_name": "sharifelguindi/qatrackplus",
"id": "e64f74bcec2e9e6be8f7a9a1b172ec6a2d72df56",
"size": "22342",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qatrack/qa/migrations/0008_auto__0_2_6_to_0_2_7.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "52183"
},
{
"name": "HTML",
"bytes": "154711"
},
{
"name": "JavaScript",
"bytes": "1706051"
},
{
"name": "Python",
"bytes": "910613"
}
],
"symlink_target": ""
} |
import factory
import django.contrib.auth.models
from . import models
class User(factory.DjangoModelFactory):
FACTORY_FOR = django.contrib.auth.models.User
username = factory.Sequence(lambda n: 'user%d' % n)
first_name = 'John'
last_name = 'Doe'
email = 'john@doe.org'
# admin = False
class Client(factory.DjangoModelFactory):
FACTORY_FOR = models.Client
name = factory.Sequence(lambda n: 'client%d' % n)
address = factory.Sequence(lambda n: '%d rue de la paix' % n)
class InvoiceItem(factory.DjangoModelFactory):
FACTORY_FOR = models.InvoiceItem
description = factory.Sequence(lambda n: 'Item #%d' % n)
quantity = 2
vat = 20.0
amount = 100.0
class Invoice(factory.DjangoModelFactory):
FACTORY_FOR = models.Invoice
name = factory.Sequence(lambda n: 'Invoice #%d' % n)
client = factory.SubFactory(Client)
owner = factory.SubFactory(User)
class EstimateItem(factory.DjangoModelFactory):
FACTORY_FOR = models.EstimateItem
description = factory.Sequence(lambda n: 'Item #%d' % n)
quantity = 2
vat = 20.0
amount = 100.0
class Estimate(factory.DjangoModelFactory):
FACTORY_FOR = models.Estimate
name = factory.Sequence(lambda n: 'Estimate #%d' % n)
client = factory.SubFactory(Client)
owner = factory.SubFactory(User)
| {
"content_hash": "c7b9b92fd45ade8aa4db87a930e3989f",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 65,
"avg_line_length": 24.77777777777778,
"alnum_prop": 0.6853512705530643,
"repo_name": "linovia/wight-invoices",
"id": "9a17890b9599c4cec03c51ab6a5924e02f5b92ce",
"size": "1338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wightinvoices/invoice/factories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "138"
},
{
"name": "Python",
"bytes": "78075"
}
],
"symlink_target": ""
} |
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, BooleanField, SelectField,\
SubmitField, FileField
from wtforms.validators import Required, Length, Email, Regexp
from wtforms import ValidationError
from flask_pagedown.fields import PageDownField
from ..models import Role, User
class NameForm(FlaskForm):
name = StringField('What is your name?', validators = [Required()])
submit = SubmitField('Submit')
class EditProfileForm(FlaskForm):
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
class UploadAvatarForm(FlaskForm):
avatar = FileField('Avatar')
submit = SubmitField('Submit')
class EditProfileAdminForm(FlaskForm):
email = StringField('Email', validators= [Required(), Length(1,64),
Email()])
username = StringField('Username', validators = [
Required(), Length(1,64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
role = SelectField('Role', coerce=int)
name = StringField('Real name', validators = [Length(0, 64)])
location = StringField('Location', validators = [Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name)
for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if field.data != self.user.email and \
User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if field.data != self.user.username and \
User.query.filter_by(name=field.data).first():
raise ValidationError('Username already in use.')
class PostForm(FlaskForm):
body = TextAreaField("What's on your mind?", validators=[Required()])
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
body = StringField('Enter your comment', validators=[Required()])
submit = SubmitField('Submit')
class DeleteForm(FlaskForm):
delete = SubmitField('Delete')
back = SubmitField('Back') | {
"content_hash": "5194f82cadec441e97550209b4cdc4dd",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 78,
"avg_line_length": 38.19402985074627,
"alnum_prop": 0.6393122313403673,
"repo_name": "superchilli/webapp",
"id": "f34211bbc5f9b6991a7c30c878003e9a52c2938e",
"size": "2559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "13325"
},
{
"name": "HTML",
"bytes": "31292"
},
{
"name": "JavaScript",
"bytes": "21983"
},
{
"name": "Mako",
"bytes": "9463"
},
{
"name": "Python",
"bytes": "12957225"
},
{
"name": "Shell",
"bytes": "3202"
}
],
"symlink_target": ""
} |
import unittest
__author__ = 'Noel'
class GrammarTestCase(unittest.TestCase):
@staticmethod
def parse_function(parser):
"""
Return a function that parses a value with the specified parser and
returns the results as a list.
"""
def function(value):
return parser.parseString(value, parseAll=True).asList()
return function | {
"content_hash": "4c2d0244f8968e3685b33161adcfea97",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 75,
"avg_line_length": 23.11764705882353,
"alnum_prop": 0.6361323155216285,
"repo_name": "pyrapt/rapt",
"id": "18b31d595757aa7641e521540f318a58d7d15320",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/treebrd/grammars/grammar_test_case.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "203615"
},
{
"name": "TeX",
"bytes": "959"
}
],
"symlink_target": ""
} |
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
# font = ImageFont.truetype("Arial-Bold.ttf",14)
font = ImageFont.truetype("Arial.ttf",14)
img=Image.new("RGBA", (500,250),(255,255,255))
draw = ImageDraw.Draw(img)
draw.text((0, 0),"This is a test",(0,0,0),font=font)
draw = ImageDraw.Draw(img)
img.save("a_test.png")
| {
"content_hash": "e71e34f349df9666b0b20ec2c12f75ab",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 52,
"avg_line_length": 29.5,
"alnum_prop": 0.7175141242937854,
"repo_name": "poseidn/KungFoo-legacy",
"id": "de1157c4f0ca0c35eddc2e7acf98eea6876b9149",
"size": "354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Tools/drawGlyphs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1805688"
},
{
"name": "C++",
"bytes": "7183965"
},
{
"name": "Lua",
"bytes": "2178"
},
{
"name": "Objective-C",
"bytes": "68941"
},
{
"name": "Python",
"bytes": "187327"
},
{
"name": "Shell",
"bytes": "292236"
}
],
"symlink_target": ""
} |
import os
import brica1.gym
import numpy as np
import six.moves.cPickle as pickle
from ml.cnn_feature_extractor import CnnFeatureExtractor
from ml.agent import Agent
from config.model import CNN_FEATURE_EXTRACTOR, CAFFE_MODEL, MODEL_TYPE
from config.log import APP_KEY
import logging
app_logger = logging.getLogger(APP_KEY)
use_gpu = int(os.getenv('GPU', '-1'))
class VVCComponent(brica1.Component):
image_feature_count = 1
cnn_feature_extractor = CNN_FEATURE_EXTRACTOR
model = CAFFE_MODEL
model_type = MODEL_TYPE
image_feature_dim = 256 * 6 * 6
def __init__(self, n_output=10240, n_input=1):
# image_feature_count = 1
super(VVCComponent, self).__init__()
self.use_gpu = use_gpu
self.n_output = n_output
self.n_input = n_input
def set_model(self, feature_extractor):
self.feature_extractor = feature_extractor
def load_model(self, cnn_feature_extractor):
if os.path.exists(cnn_feature_extractor):
app_logger.info("loading... {}".format(cnn_feature_extractor))
self.feature_extractor = pickle.load(open(cnn_feature_extractor))
app_logger.info("done")
else:
self.feature_extractor = CnnFeatureExtractor(self.use_gpu, self.model, self.model_type,
self.image_feature_dim)
pickle.dump(self.feature_extractor, open(cnn_feature_extractor, 'w'))
app_logger.info("pickle.dump finished")
def fire(self):
observation = self.get_in_port('Isocortex#V1-Isocortex#VVC-Input').buffer
obs_array = self.feature_extractor.feature(observation, self.image_feature_count)
self.results['Isocortex#VVC-BG-Output'] = obs_array
self.results['Isocortex#VVC-UB-Output'] = obs_array
class BGComponent(brica1.Component):
def __init__(self, n_input=10240, n_output=1):
super(BGComponent, self).__init__()
self.use_gpu = use_gpu
self.epsilon = 1.0
epsilon_decay_steps = 10 ** 4
min_eps = 0.1
actions = [0, 1, 2]
self.input_dim = n_input
self.embedding_dim = 128
self.replay_size = 32
self.agent = Agent(self.use_gpu, self.epsilon, min_eps, epsilon_decay_steps,
self.input_dim, self.embedding_dim, len(actions), self.replay_size)
self.episode = 0
def start(self):
self.get_in_port('Isocortex#VVC-BG-Input').buffer
action = self.agent.sample_action_space()
return action
def end(self, reward): # Episode Terminated
app_logger.info('episode finished. Reward:{:.1f} / Epsilon:{:.6f}'.format(reward, self.epsilon))
reward = self.get_in_port('RB-BG-Input').buffer
features = self.get_in_port('Isocortex#VVC-BG-Input').buffer
self.get_in_port('UB-BG-Input').buffer
action, ind, dist, key, eps = self.agent.step(features, self.episode)
self.agent.update_memory_and_train(features, action, ind, dist, reward, key, True, self.episode)
self.episode = 1
def fire(self):
reward = self.get_in_port('RB-BG-Input').buffer
features = self.get_in_port('Isocortex#VVC-BG-Input').buffer
self.get_in_port('UB-BG-Input').buffer
action, ind, dist, key, eps = self.agent.step(features, self.episode)
self.agent.update_memory_and_train(features, action, ind, dist, reward, key, False, self.episode)
app_logger.info('Step:{} Action:{} Reward:{:.1f} Epsilon:{:.6f} Q_max:{:3f}'.format(
self.agent.t, action, reward[0], eps, 1.0 # q_max # TODO
))
self.epsilon = eps
self.results['BG-Isocortex#FL-Output'] = np.array([action])
class UBComponent(brica1.Component):
def __init__(self):
super(UBComponent, self).__init__()
hist_size = 1
dim = 10240
vvc_input = np.zeros((hist_size, dim), dtype=np.uint8)
self.last_state = vvc_input
self.state = vvc_input
self.time = 0
def end(self, action, reward):
self.time += 1
self.results['UB-BG-Output'] = [None, None, None, None, None, None]
def fire(self):
self.state = self.get_in_port('Isocortex#VVC-UB-Input').buffer
action, reward = self.get_in_port('Isocortex#FL-UB-Input').buffer
self.results['UB-BG-Output'] = [None, None, None, None, None, None]
self.last_state = self.state.copy()
self.time += 1
class FLComponent(brica1.Component):
def __init__(self):
super(FLComponent, self).__init__()
self.last_action = np.array([0])
def fire(self):
action = self.get_in_port('BG-Isocortex#FL-Input').buffer
reward = self.get_in_port('RB-Isocortex#FL-Input').buffer
self.results['Isocortex#FL-MO-Output'] = action
self.results['Isocortex#FL-UB-Output'] = [self.last_action, reward]
self.last_action = action
| {
"content_hash": "b43cdedc6328cbeffbbc13dc16ea71aa",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 105,
"avg_line_length": 35.992753623188406,
"alnum_prop": 0.6188846386148581,
"repo_name": "pekin0609/-",
"id": "2ef4d8ffa1fb3b8b6d3e5d5b63e8bfb1356dc344",
"size": "4984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agent/cognitive/module.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "329603"
},
{
"name": "JavaScript",
"bytes": "629"
},
{
"name": "Python",
"bytes": "77407"
},
{
"name": "ShaderLab",
"bytes": "1090"
},
{
"name": "Shell",
"bytes": "323"
}
],
"symlink_target": ""
} |
from pyxb_114.bundles.opengis.raw._ogc import *
| {
"content_hash": "7574018c231a7dab30b5263642e88a01",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 47,
"avg_line_length": 48,
"alnum_prop": 0.7708333333333334,
"repo_name": "msherry/PyXB-1.1.4",
"id": "f0aaa6f56aeb3b7e750686273c7ea5fca3bcb098",
"size": "48",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyxb_114/bundles/opengis/_ogc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6307"
},
{
"name": "Python",
"bytes": "1521054"
},
{
"name": "Shell",
"bytes": "23730"
}
],
"symlink_target": ""
} |
from google.cloud import network_security_v1
def sample_get_client_tls_policy():
# Create a client
client = network_security_v1.NetworkSecurityClient()
# Initialize request argument(s)
request = network_security_v1.GetClientTlsPolicyRequest(
name="name_value",
)
# Make the request
response = client.get_client_tls_policy(request=request)
# Handle the response
print(response)
# [END networksecurity_v1_generated_NetworkSecurity_GetClientTlsPolicy_sync_e21f794a]
| {
"content_hash": "ee787b0c1700fd5365252fc5d3a8eacc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 85,
"avg_line_length": 27,
"alnum_prop": 0.7290448343079922,
"repo_name": "googleapis/python-network-security",
"id": "f7b567d9477546e09724d6a831e6064b7fbc50f0",
"size": "1936",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/networksecurity_v1_generated_network_security_get_client_tls_policy_sync_e21f794a.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1853605"
},
{
"name": "Shell",
"bytes": "30690"
}
],
"symlink_target": ""
} |
import os, time, getpass, locale
# Third party modules #
import matplotlib
################################################################################
class Graph(object):
def __init__(self, parent, base_dir=None, short_name=None):
self.parent = parent
# Base dir #
if not base_dir: self.base_dir = self.parent.p.graphs_dir
else: self.base_dir = base_dir
# Short name #
if short_name: self.short_name = short_name
# Paths #
self.path = self.base_dir + self.short_name + '.pdf'
self.csv_path = self.base_dir + self.short_name + '.csv'
self.json_path = self.base_dir + self.short_name + '.json'
def save_plot(self, fig, axes, width=18.0, height=10.0, bottom=0.1, top=0.93, left=0.06, right=0.98, sep=()):
# Adjust #
fig.set_figwidth(width)
fig.set_figheight(height)
fig.subplots_adjust(hspace=0.0, bottom=bottom, top=top, left=left, right=right)
# Data and source #
fig.text(0.99, 0.98, time.asctime(), horizontalalignment='right')
job_name = os.environ.get('SLURM_JOB_NAME', 'Unnamed')
user_msg = 'user: %s, job: %s' % (getpass.getuser(), job_name)
fig.text(0.01, 0.98, user_msg, horizontalalignment='left')
# Nice digit grouping #
if 'x' in sep:
locale.setlocale(locale.LC_ALL, '')
seperate = lambda x,pos: locale.format("%d", x, grouping=True)
axes.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(seperate))
if 'y' in sep:
locale.setlocale(locale.LC_ALL, '')
seperate = lambda x,pos: locale.format("%d", x, grouping=True)
axes.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(seperate))
# Save it #
fig.savefig(self.path) | {
"content_hash": "7f052802432073a8c8f94c4bb8fa1f41",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 113,
"avg_line_length": 44.4390243902439,
"alnum_prop": 0.5724478594950604,
"repo_name": "inodb/gefes",
"id": "d545e68c226c91ba94cc3b5ba30d3a7b284c1fb4",
"size": "1843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gefes/graphs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66829"
}
],
"symlink_target": ""
} |
import pygame
import sys
class GUI:
def __init__(self, _game_state):
pygame.init()
self.state = _game_state
self.font_size = 30
self.window_height = 430
self.window_width = 570
self.colors = {"white": (255, 255, 255),
"black": (41, 36, 33),
"navy": (0, 0, 128),
"red": (139, 0, 0),
"blue": (0, 0, 255),
"dark": (3, 54, 73),
"yellow": (255, 255, 0),
"turquoise blue": (0, 199, 140),
"green": (0, 128, 0),
"light green": (118, 238, 0),
"turquoise": (0, 229, 238),
"gray": (152, 152, 152)}
self.text_color = self.colors["red"]
self.bg_color = self.colors["turquoise blue"]
self.tile_color = self.bg_color
self.display_surface = pygame.display.set_mode((self.window_width, self.window_height))
pygame.display.set_caption("Pipi Controlling Interface")
self.font = pygame.font.Font("assets\\fonts\Cutie Patootie Skinny.ttf", self.font_size)
self.font_bold = pygame.font.Font("assets\\fonts\Cutie Patootie.ttf", self.font_size)
self.typing_tag = False
self.prompt = Prompt((self.window_width/2-27, self.window_height/2-57), self)
self.pos_pad_modify_command = {
0: (90, 340),
2: (90, 390),
8: (90, 290),
4: (40, 340),
6: (140, 340)
}
self.pos_pad_modify_indication = {
0: "",
2: "moving backward",
8: "moving forward",
4: "moving to the left",
6: "moving to the right"
}
self.pos_pad_indication = "" # Default indication for the position of game pad
self.pos_pad = (90, 340) # Default position for the game pad
def make_text(self, text, color, bg_color, center):
"""
Make a text object for drawing
"""
text_surf = self.font.render(text, True, color, bg_color)
text_rect = text_surf.get_rect()
text_rect.center = center
return text_surf, text_rect
def set_typing_tag(self, val):
"""
Decide whether you want to type or not.
"""
self.typing_tag = val
def modify_pos_pad(self, command):
"""
Modify the position of the pad according to movement.
:return:
"""
self.pos_pad_indication = self.pos_pad_modify_indication[command]
self.pos_pad = self.pos_pad_modify_command[command]
def draw(self, state):
"""
Draw the scene.
"""
self.display_surface.fill(self.bg_color)
if state == "welcome":
start_point = 80
self.setting = Button('Settings', self.text_color, self.tile_color,
(self.window_width/2, start_point+60), self)
self.new = Button('New Season', self.text_color, self.tile_color,
(self.window_width/2, start_point), self)
self.quit = Button('Quit', self.text_color, self.tile_color,
(self.window_width/2, start_point+60*4), self)
self.help = Button('How to use this app', self.text_color, self.tile_color,
(self.window_width/2, start_point+60*2), self)
self.author = Button('About the author', self.text_color, self.tile_color,
(self.window_width/2, start_point+60*3), self)
self.buttons = [self.new, self.setting, self.quit, self.help, self.author]
self.display_surface.blit(self.setting.get_sr()[0], self.setting.get_sr()[1])
self.display_surface.blit(self.new.get_sr()[0], self.new.get_sr()[1])
self.display_surface.blit(self.quit.get_sr()[0], self.quit.get_sr()[1])
self.display_surface.blit(self.help.get_sr()[0], self.help.get_sr()[1])
self.display_surface.blit(self.author.get_sr()[0], self.author.get_sr()[1])
elif state == "help":
sys.stdin = open("assets/texts/instruction.txt")
for i in range(9):
instructions = sys.stdin.readline().strip()
self.instructions_sur, self.instructions_rect = self.make_text(instructions, self.colors["black"],
self.tile_color,
(self.window_width/2,
self.window_height/2-120+i*35))
self.display_surface.blit(self.instructions_sur, self.instructions_rect)
self.back = Button("Back", self.text_color, self.tile_color,
(self.window_width-60, self.window_height/8), self)
self.buttons = [self.back]
self.display_surface.blit(self.back.get_sr()[0], self.back.get_sr()[1])
elif state == "author":
sys.stdin = open("assets/texts/author.txt")
for i in range(8):
if i == 0:
instructions = sys.stdin.readline().strip()
self.instructions_sur, self.instructions_rect = self.make_text(instructions, self.colors["green"],
self.tile_color,
(self.window_width/2,
self.window_height/2-180+i*35))
self.display_surface.blit(self.instructions_sur, self.instructions_rect)
else:
instructions = sys.stdin.readline().strip()
self.instructions_sur, self.instructions_rect = self.make_text(instructions, self.colors["black"],
self.tile_color,
(self.window_width/2,
self.window_height/2-120+i*35))
self.display_surface.blit(self.instructions_sur, self.instructions_rect)
self.back = Button("Back", self.text_color, self.tile_color, (self.window_width-60, self.window_height/8), self)
self.buttons = [self.back]
self.display_surface.blit(self.back.get_sr()[0], self.back.get_sr()[1])
elif state == "new season":
self.back = Button("Back", self.text_color, self.tile_color, (self.window_width-60, self.window_height/8), self)
indi_sur, indi_rect = self.make_text(self.pos_pad_indication, self.text_color, self.tile_color,
(self.window_width/2, self.window_height/2))
self.buttons = [self.back]
self.display_surface.blit(self.back.get_sr()[0], self.back.get_sr()[1])
self.display_surface.blit(indi_sur, indi_rect)
pygame.draw.circle(self.display_surface, self.colors["white"], (90, 340), 50, 6)
pygame.draw.circle(self.display_surface, self.colors["gray"], self.pos_pad, 30, 30)
elif state == "setting":
self.prompt_rect = pygame.Rect(self.window_width/2-30, self.window_height/2-60, 60, 50)
pygame.draw.rect(self.display_surface, self.colors["white"], self.prompt_rect)
self.guide_sur, self.guide_rect = self.make_text("Specify your Bluetooth COM port below:",
self.colors["green"], self.tile_color,
(self.window_width/2, self.window_height/4))
self.save = Button("Save", self.text_color, self.tile_color, (self.window_width/2+90, self.window_height//2-45), self)
self.back = Button("Back", self.text_color, self.tile_color, (self.window_width-60, self.window_height/8), self)
self.buttons = [self.back, self.save]
self.display_surface.blit(self.back.get_sr()[0], self.back.get_sr()[1])
self.display_surface.blit(self.save.get_sr()[0], self.save.get_sr()[1])
self.display_surface.blit(self.guide_sur, self.guide_rect)
if self.typing_tag:
pygame.draw.line(self.display_surface, self.colors["black"],
(self.window_width/2-27, self.window_height/2-57),
(self.window_width/2-27, self.window_height/2-33), 2)
self.display_surface.blit(self.prompt.output()[1], self.prompt.output()[2])
elif state == "error":
sys.stdin = open("assets/texts/error_help.txt")
for i in range(9):
instructions = sys.stdin.readline().strip()
self.instructions_sur, self.instructions_rect = self.make_text(instructions, self.colors["black"],
self.tile_color,
(self.window_width/2,
self.window_height/2-120+i*35))
self.display_surface.blit(self.instructions_sur, self.instructions_rect)
self.back = Button("Back", self.text_color, self.tile_color,
(self.window_width-60, self.window_height/8), self)
self.buttons = [self.back]
self.display_surface.blit(self.back.get_sr()[0], self.back.get_sr()[1])
class Button:
def __init__(self, text, color, bg_color, center, _game_gui):
self.gui = _game_gui
self.text = text
self.center = center
self.color = color
self.bg_color = bg_color
self.bold = False
self.font = self.gui.font
self.font_bold = self.gui.font_bold
self.surf = self.font.render(text, True, color, bg_color)
self.rect = self.surf.get_rect()
self.rect.center = self.center
def make_text(self):
"""
Make a text object for drawing
"""
if not self.bold:
text_surf = self.font.render(self.text, True, self.color, self.bg_color)
else:
text_surf = self.font_bold.render(self.text, True, self.color, self.bg_color)
text_rect = text_surf.get_rect()
text_rect.center = self.center
return text_surf, text_rect
def get_rect(self):
return self.rect
def get_sr(self):
return self.surf, self.rect
def update_sr(self):
self.surf, self.rect = self.make_text()
def set_bold(self, pos):
"""
Highlight the button when the user hovers mouse over
"""
if self.rect.collidepoint(pos):
self.bold = True
self.update_sr()
self.gui.display_surface.blit(self.surf, self.rect)
class Prompt:
def __init__(self, topleft, _gui):
self.string = ""
self.color = _gui.text_color
self.bg_color = _gui.colors["white"]
self.topleft = topleft
self.font = _gui.font
def make_text(self):
"""
Make a text object for drawing
"""
text_surf = self.font.render(self.string, True, self.color, self.bg_color)
text_rect = text_surf.get_rect()
text_rect.topleft = self.topleft
return text_surf, text_rect
def take_char(self, char):
"""
Take in character or delete previous one.
:return:
"""
if char != "del":
if len(self.string) <= 3:
self.string += char
else:
self.string = self.string[:-1]
def output(self):
"""
Output the string
:return:
"""
sur, rect = self.make_text()
return self.string, sur, rect
def reset(self):
"""
Reset the prompt
:return:
"""
self.string = "" | {
"content_hash": "77535d1e7a267b923d49d64aa0e8cc0c",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 130,
"avg_line_length": 47.22605363984675,
"alnum_prop": 0.5057601817296771,
"repo_name": "sontung/pathfinder",
"id": "e692d96cb153a3087409379df896639c2570663b",
"size": "12326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interface/gui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "23233"
}
],
"symlink_target": ""
} |
"""
Cleans up Units of measurement for graphing
"""
from settings import Settings
def fix_units(list):
MMOLL_CONVERT_FACTOR = 18.0
if Settings.DISPLAY_UNIT == 'mmol/L':
for idx in range(0, len(list)):
# Recent Glucose:
if 'sgv' in list[idx]:
list[idx]['sgv'] = list[idx]['sgv'] / MMOLL_CONVERT_FACTOR
# Predicted Glucose:
# - "amount": 134.71313969351854, "unit": "mg/dL"
# or
# - "amount": 134.71313969351854 (and no unit supplied)
if 'amount' in list[idx]:
if ('unit' not in list[idx]) or (list[idx]['unit'] == 'mg/dL'):
list[idx]['amount'] = list[idx]['amount'] / MMOLL_CONVERT_FACTOR
return list
| {
"content_hash": "664afe56fe18b2229fe88e45609495c5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 84,
"avg_line_length": 30.76,
"alnum_prop": 0.5279583875162549,
"repo_name": "channemann/openaps-monitor",
"id": "120aefb72989cb5555b50ad14b1b3a820a1dd01c",
"size": "784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "units.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "504"
},
{
"name": "HTML",
"bytes": "3809"
},
{
"name": "JavaScript",
"bytes": "9915"
},
{
"name": "Python",
"bytes": "13936"
}
],
"symlink_target": ""
} |
import coherence.extern.louie as louie
from coherence.upnp.core.utils import generalise_boolean
from coherence.backend import Backend
DEFAULT_NAME_SIMPLE = 'SimpleLight'
DEFAULT_NAME_BETTER = 'BetterLight'
class SimpleLight(Backend):
""" this is a backend for a simple light
that only can be switched on or off
therefore we need to inform Coherence
about the state, and a method to change it
everything else is done by Coherence
"""
implements = ['BinaryLight']
logCategory = 'simple_light'
def __init__(self, server, **kwargs):
Backend.__init__(self, server)
self.name = kwargs.get('name', DEFAULT_NAME_SIMPLE)
self.server = server
self.state = 0 # we start switched off
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def upnp_init(self):
if self.server:
self.server.switch_power_server.set_variable(0, 'Target', self.state)
self.server.switch_power_server.set_variable(0, 'Status', self.state)
def upnp_SetTarget(self, **kwargs):
self.info('upnp_SetTarget %r', kwargs)
self.state = int(generalise_boolean(kwargs['NewTargetValue']))
if self.server:
self.server.switch_power_server.set_variable(0, 'Target', self.state)
self.server.switch_power_server.set_variable(0, 'Status', self.state)
print "we have been switched to state", self.state
return {}
class BetterLight(Backend):
implements = ['DimmableLight']
logCategory = 'better_light'
def __init__(self, server, **kwargs):
Backend.__init__(self, server)
self.name = kwargs.get('name', DEFAULT_NAME_BETTER)
self.server = server
self.state = 0 # we start switched off
self.loadlevel = 50 # we start with 50% brightness
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def upnp_init(self):
if self.server:
self.server.switch_power_server.set_variable(0, 'Target', self.state)
self.server.switch_power_server.set_variable(0, 'Status', self.state)
self.server.dimming_server.set_variable(0, 'LoadLevelTarget', self.loadlevel)
self.server.dimming_server.set_variable(0, 'LoadLevelStatus', self.loadlevel)
def upnp_SetTarget(self, **kwargs):
self.info('upnp_SetTarget %r', kwargs)
self.state = int(generalise_boolean(kwargs['NewTargetValue']))
if self.server:
self.server.switch_power_server.set_variable(0, 'Target', self.state)
self.server.switch_power_server.set_variable(0, 'Status', self.state)
print "we have been switched to state", self.state
return {}
def upnp_SetLoadLevelTarget(self, **kwargs):
self.info('SetLoadLevelTarget %r', kwargs)
self.loadlevel = int(kwargs['NewLoadlevelTarget'])
self.loadlevel = min(max(0, self.loadlevel), 100)
if self.server:
self.server.dimming_server.set_variable(0, 'LoadLevelTarget', self.loadlevel)
self.server.dimming_server.set_variable(0, 'LoadLevelStatus', self.loadlevel)
print "we have been dimmed to level", self.loadlevel
return {}
if __name__ == '__main__':
from coherence.base import Coherence
def main():
config = {}
config['logmode'] = 'warning'
c = Coherence(config)
f = c.add_plugin('SimpleLight')
f = c.add_plugin('BetterLight')
from twisted.internet import reactor
reactor.callWhenRunning(main)
reactor.run()
| {
"content_hash": "54b8ef03c5a5611943bd46eee88b06ca",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 89,
"avg_line_length": 35.772277227722775,
"alnum_prop": 0.6437863271519513,
"repo_name": "coherence-project/Coherence",
"id": "b2b344ecfb9d29c5f8eb034bbd1a9a10f9f5a546",
"size": "3752",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "coherence/backends/light.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1305048"
},
{
"name": "Roff",
"bytes": "712"
},
{
"name": "Shell",
"bytes": "1569"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from django.contrib.admin.views.decorators import staff_member_required
from ckeditor import views
urlpatterns = patterns(
'',
url(r'^upload/', staff_member_required(views.upload), name='ckeditor_upload'),
url(r'^browse/', staff_member_required(views.browse), name='ckeditor_browse'),
)
| {
"content_hash": "b17c446af83eebb0ce96b0b7f82f7ee6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 82,
"avg_line_length": 34.4,
"alnum_prop": 0.7412790697674418,
"repo_name": "sergey-romanov/django-ckeditor",
"id": "a88bb0d8f1c1e9907277da6e92d829c38bd2b1da",
"size": "344",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ckeditor/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "111361"
},
{
"name": "HTML",
"bytes": "197012"
},
{
"name": "JavaScript",
"bytes": "68846"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "18760"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.core.urlresolvers import reverse_lazy
from django.views import generic
from django.contrib.auth import get_user_model
# from django.contrib import auth
from django.contrib import messages
from authtools import views as authviews
from braces import views as bracesviews
from django.conf import settings
from TeaRoom import settings as TSettings
from . import forms
User = get_user_model()
class LoginView(bracesviews.AnonymousRequiredMixin,
authviews.LoginView):
template_name = "accounts/login.html"
form_class = forms.LoginForm
def form_valid(self, form):
redirect = super(LoginView, self).form_valid(form)
remember_me = form.cleaned_data.get('remember_me')
if remember_me is True:
ONE_MONTH = 30 * 24 * 60 * 60
expiry = getattr(settings, "KEEP_LOGGED_DURATION", ONE_MONTH)
self.request.session.set_expiry(expiry)
return redirect
class LogoutView(authviews.LogoutView):
url = reverse_lazy('home')
# Original template view.
# class SignUpView(bracesviews.AnonymousRequiredMixin,
# bracesviews.FormValidMessageMixin,
# generic.CreateView):
# form_class = forms.SignupForm
# model = User
# template_name = 'accounts/signup.html'
# success_url = reverse_lazy('home')
# form_valid_message = "You're signed up!"
# def form_valid(self, form):
# r = super(SignUpView, self).form_valid(form)
# username = form.cleaned_data["email"]
# password = form.cleaned_data["password1"]
# user = auth.authenticate(email=username, password=password)
# auth.login(self.request, user)
# return r
class SignUpCompleteView(bracesviews.AnonymousRequiredMixin,
bracesviews.FormValidMessageMixin,
generic.TemplateView,):
template_name = 'accounts/signup-complete.html'
class SignUpView(bracesviews.AnonymousRequiredMixin,
bracesviews.FormValidMessageMixin,
generic.CreateView):
form_class = forms.SignupForm
model = User
template_name = 'accounts/signup.html'
success_url = reverse_lazy('accounts:signup-complete')
form_valid_message = "You're signed up!"
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(SignUpView, self).get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['site'] = TSettings.Site
return context
class FirstPasswordSetConfirmView(bracesviews.AnonymousRequiredMixin, authviews.PasswordResetConfirmView):
template_name = 'accounts/first-password-set-confirm.html'
form_class = forms.SetPasswordForm
success_url = reverse_lazy('accounts:first-password-set-done')
def save_form(self, form):
user = super(FirstPasswordSetConfirmView, self).save_form(form)
user.is_active = True
user.status = 'registered'
user.profile.email_verified = True
user.save()
user.profile.save()
# import pdb; pdb.set_trace()
# user = auth.authenticate(username=self.user.get_username(),
# password=form.cleaned_data['new_password1'])
# auth.login(self.request, user)
return user
class FirstPasswordSetDoneView(authviews.PasswordResetDoneView):
template_name = 'accounts/first-password-set-done.html'
class PasswordChangeView(authviews.PasswordChangeView):
form_class = forms.PasswordChangeForm
template_name = 'accounts/password-change.html'
success_url = reverse_lazy('home')
def form_valid(self, form):
form.save()
messages.success(self.request,
"Your password was changed, "
"hence you have been logged out. Please relogin")
return super(PasswordChangeView, self).form_valid(form)
class PasswordResetView(authviews.PasswordResetView):
form_class = forms.PasswordResetForm
template_name = 'accounts/password-reset.html'
success_url = reverse_lazy('accounts:password-reset-done')
subject_template_name = 'accounts/emails/password-reset-subject.txt'
email_template_name = 'accounts/emails/password-reset-email.html'
class PasswordResetDoneView(authviews.PasswordResetDoneView):
template_name = 'accounts/password-reset-done.html'
class PasswordResetConfirmView(authviews.PasswordResetConfirmAndLoginView):
template_name = 'accounts/password-reset-confirm.html'
form_class = forms.SetPasswordForm
| {
"content_hash": "0eafdae7948d11439fcb2caed9330c44",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 106,
"avg_line_length": 35.65384615384615,
"alnum_prop": 0.6854368932038835,
"repo_name": "zazasa/TeaRoom",
"id": "eb7d2d28117b52a1dd9edb837c647d329ae626d0",
"size": "4635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/accounts/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "347763"
},
{
"name": "HTML",
"bytes": "977430"
},
{
"name": "JavaScript",
"bytes": "450093"
},
{
"name": "PowerShell",
"bytes": "298"
},
{
"name": "Python",
"bytes": "125611"
},
{
"name": "Ruby",
"bytes": "10748"
},
{
"name": "Shell",
"bytes": "380"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class MACContext(object):
@abc.abstractmethod
def update(self, data):
"""
Processes the provided bytes.
"""
@abc.abstractmethod
def finalize(self):
"""
Returns the message authentication code as bytes.
"""
@abc.abstractmethod
def copy(self):
"""
Return a MACContext that is a copy of the current context.
"""
@abc.abstractmethod
def verify(self, signature):
"""
Checks if the generated message authentication code matches the
signature.
"""
| {
"content_hash": "a9465e9def6d7a8baf32afe9e5b69524",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 71,
"avg_line_length": 21.705882352941178,
"alnum_prop": 0.5772357723577236,
"repo_name": "hipnusleo/laserjet",
"id": "40adb4ddb568b8f51db734f6fe0011192f6f00f5",
"size": "921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resource/pypi/cryptography-1.7.1/src/cryptography/hazmat/primitives/interfaces/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "13184"
},
{
"name": "C",
"bytes": "672858"
},
{
"name": "C++",
"bytes": "9678"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "850945"
},
{
"name": "Java",
"bytes": "14456"
},
{
"name": "Makefile",
"bytes": "14373"
},
{
"name": "Python",
"bytes": "5156663"
}
],
"symlink_target": ""
} |
import os
from tornado import web
from tornado.web import URLSpec as url
from tnb.contrib.urls import include
from tnb.settings import settings
from tnb.apps.core.views import (DocsHandler, CachingFrontendHandler, HomeHandler)
from tnb.config import PYTHON_PROJECT_DIR, ASSETS_FOLDER
urls = [
# NEED to capture "nothing" cuz tornado is weird
url(r"^/?([^.])$", HomeHandler, {'path': ASSETS_FOLDER}),
url(r"^/docs$", DocsHandler),
url(r"^/docs/version/(.*)$", web.StaticFileHandler, {"path": settings.DOCS_ROOT}),
url(r"^/static/(.*)$", web.StaticFileHandler, {"path": settings.STATIC_ROOT}),
url(r"^/(.*)$", CachingFrontendHandler, {'path': ASSETS_FOLDER}),
]
urls += include(r"/healthcheck", "tnb.apps.core.urls")
urls += include(r"/customers", "tnb.apps.customers.urls")
| {
"content_hash": "e3b9c9c9e62dbd2f838919940c66b872",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 86,
"avg_line_length": 36.45454545454545,
"alnum_prop": 0.6932668329177057,
"repo_name": "vladiibine/trust-network",
"id": "82e6c820f9faf9dc8bc08870331a28867cc3e1ae",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/trust_network_backend/tnb/apps/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "93916"
},
{
"name": "HTML",
"bytes": "5678"
},
{
"name": "JavaScript",
"bytes": "2308306"
},
{
"name": "Makefile",
"bytes": "1632"
},
{
"name": "Python",
"bytes": "22252"
}
],
"symlink_target": ""
} |
"""This is the setup.py file for the GRR client.
This is just a meta-package which pulls in the minimal requirements to create a
client.
This package needs to stay simple so that it can be installed on windows and
ancient versions of linux to build clients.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import platform
import shutil
import sys
from setuptools import find_packages
from setuptools import setup
from setuptools.command.sdist import sdist
# TODO: Fix this import once support for Python 2 is dropped.
# pylint: disable=g-import-not-at-top
if sys.version_info.major == 2:
import ConfigParser as configparser
else:
import configparser
# pylint: enable=g-import-not-at-top
THIS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
# If you run setup.py from the root GRR dir you get very different results since
# setuptools uses the MANIFEST.in from the root dir. Make sure we are in the
# package dir.
os.chdir(THIS_DIRECTORY)
def get_config():
"""Get INI parser with version.ini data."""
ini_path = os.path.join(THIS_DIRECTORY, "version.ini")
if not os.path.exists(ini_path):
ini_path = os.path.join(THIS_DIRECTORY, "../../version.ini")
if not os.path.exists(ini_path):
raise RuntimeError("Couldn't find version.ini")
config = configparser.SafeConfigParser()
config.read(ini_path)
return config
VERSION = get_config()
class Sdist(sdist):
"""Build sdist."""
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
sdist_version_ini = os.path.join(base_dir, "version.ini")
if os.path.exists(sdist_version_ini):
os.unlink(sdist_version_ini)
shutil.copy(
os.path.join(THIS_DIRECTORY, "../../version.ini"), sdist_version_ini)
setup_args = dict(
name="grr-response-client",
version=VERSION.get("Version", "packageversion"),
description="The GRR Rapid Response client.",
license="Apache License, Version 2.0",
maintainer="GRR Development Team",
maintainer_email="grr-dev@googlegroups.com",
url="https://github.com/google/grr",
entry_points={
"console_scripts": [
"grr_client = grr_response_client.distro_entry:Client",
("grr_fleetspeak_client = "
"grr_response_client.distro_entry:FleetspeakClient"),
"grr_pool_client = grr_response_client.distro_entry:PoolClient"
]
},
cmdclass={"sdist": Sdist},
packages=find_packages(),
include_package_data=True,
install_requires=[
"absl-py==0.6.1",
"grr-response-core==%s" % VERSION.get("Version", "packagedepends"),
# TODO: This is a backport of Python 3.2+ API, should be
# removed once support for Python 2 is dropped.
"subprocess32==3.5.3",
"pyinstaller==3.5",
],
extras_require={
# The following requirements are needed in Windows.
':sys_platform=="win32"': [
"WMI==1.4.9",
"pywin32==224",
],
},
)
if platform.system() == "Linux":
# TODO: 1.3.6 is a beta branch that has to be installed from
# source. For now we only care about it for Python 3 compatibility, so it is
# fine to use older one in normal circumstances.
chipsec_version = "1.2.4" if sys.version_info < (3, 0) else "1.3.6"
setup_args["install_requires"].append("chipsec=={}".format(chipsec_version))
if platform.system() != "Windows":
setup_args["install_requires"].append("xattr==0.9.2")
setup(**setup_args)
| {
"content_hash": "69e956ec647f874d213e848ae9518890",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 80,
"avg_line_length": 31.821428571428573,
"alnum_prop": 0.6736812570145904,
"repo_name": "demonchild2112/travis-test",
"id": "47d6482299cb9b099cd32290f25989815e5cc84a",
"size": "3586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/client/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3446"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "35549"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HCL",
"bytes": "7208"
},
{
"name": "HTML",
"bytes": "190212"
},
{
"name": "JavaScript",
"bytes": "11691"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7213255"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "48882"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "51"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_mtu
version_added: "2.2"
short_description: Manages MTU settings on Nexus switch.
description:
- Manages MTU settings on Nexus switch.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
notes:
- Either C(sysmtu) param is required or C(interface) AND C(mtu) params are req'd.
- C(state=absent) unconfigures a given MTU if that value is currently present.
options:
interface:
description:
- Full name of interface, i.e. Ethernet1/1.
required: false
default: null
mtu:
description:
- MTU for a specific interface.
required: false
default: null
sysmtu:
description:
- System jumbo MTU.
required: false
default: null
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Ensure system mtu is 9126
- nxos_mtu:
sysmtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Config mtu on Eth1/1 (routed interface)
- nxos_mtu:
interface: Ethernet1/1
mtu: 1600
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Config mtu on Eth1/3 (switched interface)
- nxos_mtu:
interface: Ethernet1/3
mtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Unconfigure mtu on a given interface
- nxos_mtu:
interface: Ethernet1/3
mtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
state: absent
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"mtu": "1700"}
existing:
description:
- k/v pairs of existing mtu/sysmtu on the interface/system
type: dict
sample: {"mtu": "1600", "sysmtu": "9216"}
end_state:
description: k/v pairs of mtu/sysmtu values after module execution
returned: always
type: dict
sample: {"mtu": "1700", sysmtu": "9216"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["interface vlan10", "mtu 1700"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0] or response[0] == '\n':
body = []
elif 'show run' in command:
body = response
else:
try:
body = [json.loads(response[0])]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_mtu(interface, module):
command = 'show interface {0}'.format(interface)
mtu = {}
body = execute_show_command(command, module)
try:
mtu_table = body[0]['TABLE_interface']['ROW_interface']
mtu['mtu'] = str(
mtu_table.get('eth_mtu',
mtu_table.get('svi_mtu', 'unreadable_via_api')))
mtu['sysmtu'] = get_system_mtu(module)['sysmtu']
except KeyError:
mtu = {}
return mtu
def get_system_mtu(module):
command = 'show run all | inc jumbomtu'
sysmtu = ''
body = execute_show_command(command, module, command_type='cli_show_ascii')
if body:
sysmtu = str(body[0].split(' ')[-1])
try:
sysmtu = int(sysmtu)
except:
sysmtu = ""
return dict(sysmtu=str(sysmtu))
def get_commands_config_mtu(delta, interface):
CONFIG_ARGS = {
'mtu': 'mtu {mtu}',
'sysmtu': 'system jumbomtu {sysmtu}',
}
commands = []
for param, value in delta.iteritems():
command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
mtu_check = delta.get('mtu', None)
if mtu_check:
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_commands_remove_mtu(delta, interface):
CONFIG_ARGS = {
'mtu': 'no mtu {mtu}',
'sysmtu': 'no system jumbomtu {sysmtu}',
}
commands = []
for param, value in delta.iteritems():
command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
mtu_check = delta.get('mtu', None)
if mtu_check:
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
if body == 'DNE':
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
mode = 'unknown'
interface_table = {}
body = execute_show_command(command, module)
try:
interface_table = body[0]['TABLE_interface']['ROW_interface']
except (KeyError, AttributeError, IndexError):
return mode
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode in ['access', 'trunk']:
mode = 'layer2'
elif mode == 'routed':
mode = 'layer3'
elif intf_type in ['loopback', 'svi']:
mode = 'layer3'
return mode
def main():
argument_spec = dict(
mtu=dict(type='str'),
interface=dict(type='str'),
sysmtu=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
module = get_network_module(argument_spec=argument_spec,
required_together=[['mtu', 'interface']],
supports_check_mode=True)
interface = module.params['interface']
mtu = module.params['mtu']
sysmtu = module.params['sysmtu']
state = module.params['state']
if sysmtu and (interface or mtu):
module.fail_json(msg='Proper usage-- either just use the sysmtu param '
'or use interface AND mtu params')
if interface:
intf_type = get_interface_type(interface)
if intf_type != 'ethernet':
if is_default(interface, module) == 'DNE':
module.fail_json(msg='Invalid interface. It does not exist '
'on the switch.')
existing = get_mtu(interface, module)
else:
existing = get_system_mtu(module)
if interface and mtu:
if intf_type == 'loopback':
module.fail_json(msg='Cannot set MTU for loopback interface.')
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
if intf_type in ['ethernet', 'portchannel']:
if mtu not in [existing['sysmtu'], '1500']:
module.fail_json(msg='MTU on L2 interfaces can only be set'
' to the system default (1500) or '
'existing sysmtu value which is '
' {0}'.format(existing['sysmtu']))
elif mode == 'layer3':
if intf_type in ['ethernet', 'portchannel', 'svi']:
if ((int(mtu) < 576 or int(mtu) > 9216) or
((int(mtu) % 2) != 0)):
module.fail_json(msg='Invalid MTU for Layer 3 interface'
'needs to be an even number between'
'576 and 9216')
if sysmtu:
if ((int(sysmtu) < 576 or int(sysmtu) > 9216 or
((int(sysmtu) % 2) != 0))):
module.fail_json(msg='Invalid MTU- needs to be an even '
'number between 576 and 9216')
args = dict(mtu=mtu, sysmtu=sysmtu)
proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
changed = False
end_state = existing
commands = []
if state == 'present':
if delta:
command = get_commands_config_mtu(delta, interface)
commands.append(command)
elif state == 'absent':
common = set(proposed.iteritems()).intersection(existing.iteritems())
if common:
command = get_commands_remove_mtu(dict(common), interface)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
execute_config_command(cmds, module)
if interface:
end_state = get_mtu(interface, module)
else:
end_state = get_system_mtu(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
module.exit_json(**results)
if __name__ == '__main__':
main()
| {
"content_hash": "1ccede8384fc8c016c5013b0d12d7921",
"timestamp": "",
"source": "github",
"line_count": 580,
"max_line_length": 85,
"avg_line_length": 30.643103448275863,
"alnum_prop": 0.5586563889045181,
"repo_name": "nwiizo/workspace_2017",
"id": "48a92c2f9413112659c31c2ac792d9df663adfc9",
"size": "18448",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "ansible-modules-core/network/nxos/nxos_mtu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "173"
},
{
"name": "C++",
"bytes": "7105"
},
{
"name": "CSS",
"bytes": "50021"
},
{
"name": "Go",
"bytes": "112005"
},
{
"name": "HTML",
"bytes": "66435"
},
{
"name": "JavaScript",
"bytes": "73266"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "PowerShell",
"bytes": "277598"
},
{
"name": "Python",
"bytes": "11925958"
},
{
"name": "Ruby",
"bytes": "3779"
},
{
"name": "Rust",
"bytes": "1484076"
},
{
"name": "Shell",
"bytes": "86558"
}
],
"symlink_target": ""
} |
from subconscious.model import RedisModel, Column, InvalidQuery
from uuid import uuid1
from datetime import datetime
from .base import BaseTestCase
import enum
class StatusEnum(enum.Enum):
ACTIVE = 'active'
class TestUser(RedisModel):
id = Column(primary_key=True)
name = Column(index=True, sort=True)
age = Column(index=True, type=int,)
locale = Column(index=True, type=int, required=False)
status = Column(type=str, enum=StatusEnum)
birth_date = Column(type=datetime, required=False)
class TestAll(BaseTestCase):
def setUp(self):
super(TestAll, self).setUp()
user_id = str(uuid1())
user = TestUser(id=user_id, name='Test name', age=100, status='active')
ret = self.loop.run_until_complete(user.save(self.db))
self.assertTrue(ret)
user_id = str(uuid1())
bdate = datetime(1854, 1, 6, 14, 35, 19)
user1 = TestUser(id=user_id, name='ZTest name', age=53, birth_date=bdate)
ret = self.loop.run_until_complete(user1.save(self.db))
self.assertTrue(ret)
user_id = str(uuid1())
user1 = TestUser(id=user_id, name='Test name2', age=53)
ret = self.loop.run_until_complete(user1.save(self.db))
self.assertTrue(ret)
def test_all(self):
async def _test_all():
async for x in TestUser.all(db=self.db):
self.assertEqual(type(x), TestUser)
self.assertTrue(x.name in ('Test name', 'ZTest name', 'Test name2'))
self.assertTrue(x.age in (100, 53))
if x.name == 'ZTest name':
bdate = datetime(1854, 1, 6, 14, 35, 19)
self.assertEqual(x.birth_date, bdate)
self.loop.run_until_complete(_test_all())
def test_all_with_order(self):
async def _test():
expected_in_order = ['Test name', 'Test name2', 'ZTest name']
result_list = []
async for x in TestUser.all(db=self.db, order_by='name'):
result_list.append(x.name)
self.assertEqual(result_list, expected_in_order)
expected_in_order.sort(reverse=True)
result_list = []
async for x in TestUser.all(db=self.db, order_by='-name'):
result_list.append(x)
self.assertEqual([x.name for x in result_list], expected_in_order)
# update a record to force sort order change
result_list[0].name = 'AATest name'
await result_list[0].save(self.db)
result_list = []
expected_in_order = ['AATest name', 'Test name', 'Test name2']
async for x in TestUser.all(db=self.db, order_by='name'):
result_list.append(x)
self.assertEqual([x.name for x in result_list], expected_in_order)
self.loop.run_until_complete(_test())
def test_filter_by_non_existing_fields_should_fail(self):
async def _test():
async for x in TestUser.filter_by(db=self.db, non_existing1='dummy', non_existing2=1):
assert x # Just to satisfy flake8
with self.assertRaises(InvalidQuery):
self.loop.run_until_complete(_test())
def test_filter_by_non_indexed_field_should_fail(self):
async def _test():
async for x in TestUser.filter_by(db=self.db, status='active',):
assert x # Just to satisfy flake8
with self.assertRaises(InvalidQuery):
self.loop.run_until_complete(_test())
def test_all_iter(self):
names_in_expected_order = ['Test name', 'Test name2', 'ZTest name']
result_array = []
async def _test_loop():
count = 0
async for x in TestUser.all(db=self.db, order_by='name'):
self.assertEqual(x.name, names_in_expected_order[count])
count += 1
result_array.append(x.name)
self.assertEqual(names_in_expected_order, result_array)
self.loop.run_until_complete(_test_loop())
class TestAllLimitOffset(TestAll):
def test_limit_only(self):
async def _test():
result_array = []
async for x in TestUser.all(db=self.db, order_by='name', limit=1):
result_array.append(x.name)
self.assertEqual(result_array, ['Test name'])
self.loop.run_until_complete(_test())
def test_limit_and_offset(self):
async def _test():
result_array = []
async for x in TestUser.all(db=self.db, order_by='name', limit=1, offset=1):
result_array.append(x.name)
self.assertEqual(result_array, ['Test name2'])
self.loop.run_until_complete(_test())
def test_offset_only(self):
async def _test():
result_array = []
async for x in TestUser.all(db=self.db, order_by='name', offset=1):
result_array.append(x.name)
self.assertEqual(result_array, ['Test name2', 'ZTest name'])
self.loop.run_until_complete(_test())
def test_over_offset(self):
async def _test():
result_array = []
async for x in TestUser.all(db=self.db, order_by='name', offset=999):
result_array.append(x.name)
self.assertEqual(result_array, [])
self.loop.run_until_complete(_test())
def test_nonbinding_limit(self):
async def _test():
result_array = []
async for x in TestUser.all(db=self.db, order_by='name', limit=999):
result_array.append(x.name)
self.assertEqual(result_array, ['Test name', 'Test name2', 'ZTest name'])
self.loop.run_until_complete(_test())
| {
"content_hash": "7608d3df1b0a9f5e4f9b6260890cdac8",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 98,
"avg_line_length": 38.87074829931973,
"alnum_prop": 0.5829541477073854,
"repo_name": "paxos-bankchain/subconscious",
"id": "4bf04872d8c0b4d426654bc2e9fe4c550bd5b9d3",
"size": "5714",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34889"
}
],
"symlink_target": ""
} |
"""Cary."""
# --- import --------------------------------------------------------------------------------------
import fnmatch
import queue
import pathlib
import os
from ._collection import Collection
# --- define --------------------------------------------------------------------------------------
__all__ = ["from_directory"]
# --- from function -------------------------------------------------------------------------------
def from_directory(filepath, from_methods, *, name=None, parent=None, verbose=True):
"""Create a WrightTools Collection from a directory of source files.
Parameters
----------
filepath: path-like
Path to the directory on the file system
from_methods: dict<str, callable>
Dictionary which maps patterns (using Unix-like glob wildcard patterns)
to functions which take a filepath, plus the keyword arguments
['name', 'parent', and 'verbose'].
(e.g. most from_<kind> methods within WrightTools)
The value can be `None` which results in that item being ignored.
The *first* matching pattern encountered will be used.
Therefore, if multiple patterns will match the same file, use and `OrderedDict`.
Patterns are matched on the file name level, not using the full path.
Keyword Arguments
-----------------
name: str
Name to use for the root data object. Default is the directory name.
parent: Collection
Parent collection to insert the directory structure into. Default is a new
collection in temp file.
verbose: bool
Print information as objects are created. Passed to the functions.
Examples
--------
>>> from_dict = {'*.data':wt.data.from_PyCMDS,
... '*.csv':wt.collections.from_Cary,
... 'unused':None,
... }
>>> col = wt.collection.from_directory('path/to/folder', from_dict)
"""
filepath = pathlib.Path(filepath).resolve()
if name is None:
name = filepath.name
if verbose:
print("Creating Collection:", name)
root = Collection(name=name, parent=parent)
q = queue.Queue()
for i in filepath.iterdir():
q.put((filepath, i.name, root))
while not q.empty():
path, fname, parent = q.get()
for pattern, func in from_methods.items():
if fnmatch.fnmatch(fname, pattern):
if func is not None:
func(
path / fname,
name=os.path.splitext(fname)[0],
parent=parent,
verbose=verbose,
)
break
else:
if (path / fname).is_dir():
if verbose:
print("Creating Collection at", pathlib.PurePosixPath(parent.name) / fname)
col = parent.create_collection(name=fname)
for i in (path / fname).iterdir():
q.put((path / fname, i.name, col))
return root
| {
"content_hash": "30d080298784b9b87b5be6097acbfdea",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 99,
"avg_line_length": 33.130434782608695,
"alnum_prop": 0.5259186351706037,
"repo_name": "wright-group/WrightTools",
"id": "9f269b6d73c7d0d3d15a1e3548b587aafe00f049",
"size": "3048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WrightTools/collection/_directory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AGS Script",
"bytes": "88851"
},
{
"name": "Python",
"bytes": "604837"
},
{
"name": "Shell",
"bytes": "54"
},
{
"name": "TeX",
"bytes": "11769"
}
],
"symlink_target": ""
} |
import datetime
import ftplib
import logging
import os.path
from airflow.hooks.base_hook import BaseHook
from past.builtins import basestring
def mlsd(conn, path="", facts=None):
"""
BACKPORT FROM PYTHON3 FTPLIB.
List a directory in a standardized format by using MLSD
command (RFC-3659). If path is omitted the current directory
is assumed. "facts" is a list of strings representing the type
of information desired (e.g. ["type", "size", "perm"]).
Return a generator object yielding a tuple of two elements
for every file found in path.
First element is the file name, the second one is a dictionary
including a variable number of "facts" depending on the server
and whether "facts" argument has been provided.
"""
facts = facts or []
if facts:
conn.sendcmd("OPTS MLST " + ";".join(facts) + ";")
if path:
cmd = "MLSD %s" % path
else:
cmd = "MLSD"
lines = []
conn.retrlines(cmd, lines.append)
for line in lines:
facts_found, _, name = line.rstrip(ftplib.CRLF).partition(' ')
entry = {}
for fact in facts_found[:-1].split(";"):
key, _, value = fact.partition("=")
entry[key.lower()] = value
yield (name, entry)
class FTPHook(BaseHook):
"""
Interact with FTP.
Errors that may occur throughout but should be handled
downstream.
"""
def __init__(self, ftp_conn_id='ftp_default'):
self.ftp_conn_id = ftp_conn_id
self.conn = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.conn is not None:
self.close_conn()
def get_conn(self):
"""
Returns a FTP connection object
"""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
self.conn = ftplib.FTP(params.host, params.login, params.password)
return self.conn
def close_conn(self):
"""
Closes the connection. An error will occur if the
connection wasn't ever opened.
"""
conn = self.conn
conn.quit()
def describe_directory(self, path):
"""
Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
:type path: str
"""
conn = self.get_conn()
conn.cwd(path)
try:
# only works in Python 3
files = dict(conn.mlsd())
except AttributeError:
files = dict(mlsd(conn))
return files
def list_directory(self, path, nlst=False):
"""
Returns a list of files on the remote system.
:param path: full path to the remote directory to list
:type path: str
"""
conn = self.get_conn()
conn.cwd(path)
files = conn.nlst()
return files
def create_directory(self, path):
"""
Creates a directory on the remote system.
:param path: full path to the remote directory to create
:type path: str
"""
conn = self.get_conn()
conn.mkd(path)
def delete_directory(self, path):
"""
Deletes a directory on the remote system.
:param path: full path to the remote directory to delete
:type path: str
"""
conn = self.get_conn()
conn.rmd(path)
def retrieve_file(self, remote_full_path, local_full_path_or_buffer):
"""
Transfers the remote file to a local location.
If local_full_path_or_buffer is a string path, the file will be put
at that location; if it is a file-like buffer, the file will
be written to the buffer but not closed.
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:type local_full_path: str or file-like buffer
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, basestring)
if is_path:
output_handle = open(local_full_path_or_buffer, 'wb')
else:
output_handle = local_full_path_or_buffer
remote_path, remote_file_name = os.path.split(remote_full_path)
conn.cwd(remote_path)
logging.info('Retrieving file from FTP: {}'.format(remote_full_path))
conn.retrbinary('RETR %s' % remote_file_name, output_handle.write)
logging.info('Finished retrieving file from FTP: {}'.format(
remote_full_path))
if is_path:
output_handle.close()
def store_file(self, remote_full_path, local_full_path_or_buffer):
"""
Transfers a local file to the remote location.
If local_full_path_or_buffer is a string path, the file will be read
from that location; if it is a file-like buffer, the file will
be read from the buffer but not closed.
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:type local_full_path_or_buffer: str or file-like buffer
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, basestring)
if is_path:
input_handle = open(local_full_path_or_buffer, 'rb')
else:
input_handle = local_full_path_or_buffer
remote_path, remote_file_name = os.path.split(remote_full_path)
conn.cwd(remote_path)
conn.storbinary('STOR %s' % remote_file_name, input_handle)
if is_path:
input_handle.close()
def delete_file(self, path):
"""
Removes a file on the FTP Server.
:param path: full path to the remote file
:type path: str
"""
conn = self.get_conn()
conn.delete(path)
def get_mod_time(self, path):
conn = self.get_conn()
ftp_mdtm = conn.sendcmd('MDTM ' + path)
return datetime.datetime.strptime(ftp_mdtm[4:], '%Y%m%d%H%M%S')
class FTPSHook(FTPHook):
def get_conn(self):
"""
Returns a FTPS connection object.
"""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
self.conn = ftplib.FTP_TLS(
params.host, params.login, params.password
)
return self.conn
| {
"content_hash": "a301e632bb7df6a7e3bd8edd55df0156",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 78,
"avg_line_length": 30.472477064220183,
"alnum_prop": 0.588438958301972,
"repo_name": "yiqingj/airflow",
"id": "e14bfe214a3daff45cb81195ec56b9acad6facdd",
"size": "7212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/ftp_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56952"
},
{
"name": "HTML",
"bytes": "129811"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1219864"
},
{
"name": "Shell",
"bytes": "17782"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import pytest
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from machina.core.db.models import get_model
from machina.core.loading import get_class
from machina.test.factories import GroupFactory
from machina.test.factories import PostFactory
from machina.test.factories import UserFactory
from machina.test.factories import create_category_forum
from machina.test.factories import create_forum
from machina.test.factories import create_topic
from machina.test.testcases import BaseClientTestCase
ForumProfile = get_model('forum_member', 'ForumProfile')
PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler')
assign_perm = get_class('forum_permission.shortcuts', 'assign_perm')
remove_perm = get_class('forum_permission.shortcuts', 'remove_perm')
class TestUserPostsView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Add some users
self.u1 = UserFactory.create()
self.g1 = GroupFactory.create()
self.u1.groups.add(self.g1)
self.user.groups.add(self.g1)
# Permission handler
self.perm_handler = PermissionHandler()
self.top_level_cat_1 = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat_1)
self.forum_2 = create_forum(parent=self.top_level_cat_1)
self.forum_3 = create_forum(parent=self.top_level_cat_1)
self.topic_1 = create_topic(forum=self.forum_2, poster=self.u1)
self.topic_1_post_1 = PostFactory.create(topic=self.topic_1, poster=self.u1)
self.topic_1_post_2 = PostFactory.create(topic=self.topic_1, poster=self.user)
self.topic_2 = create_topic(forum=self.forum_1, poster=self.user)
self.topic_2_post_1 = PostFactory.create(topic=self.topic_2, poster=self.user)
self.topic_2_post_2 = PostFactory.create(topic=self.topic_2, poster=self.u1)
self.topic_3 = create_topic(forum=self.forum_2, poster=self.u1)
self.topic_3_post_1 = PostFactory.create(topic=self.topic_3, poster=self.u1)
self.topic_4 = create_topic(forum=self.forum_2, poster=self.user)
self.topic_4_post_1 = PostFactory.create(topic=self.topic_4, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.g1, self.top_level_cat_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
assign_perm('can_read_forum', self.g1, self.forum_2)
def test_browsing_works(self):
# Setup
correct_url = reverse('forum_member:user_posts', args=(self.user.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_displays_only_posts_that_can_be_read_by_the_current_user(self):
# Setup
correct_url = reverse('forum_member:user_posts', args=(self.u1.pk, ))
remove_perm('can_read_forum', self.g1, self.forum_1)
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert list(response.context['posts']) == [self.topic_3_post_1, self.topic_1_post_1, ]
class TestForumProfileDetailView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Add some users
self.u1 = UserFactory.create()
self.g1 = GroupFactory.create()
self.u1.groups.add(self.g1)
self.user.groups.add(self.g1)
# Permission handler
self.perm_handler = PermissionHandler()
self.top_level_cat_1 = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat_1)
self.forum_2 = create_forum(parent=self.top_level_cat_1)
self.forum_3 = create_forum(parent=self.top_level_cat_1)
self.topic_1 = create_topic(forum=self.forum_2, poster=self.u1)
self.topic_1_post_1 = PostFactory.create(topic=self.topic_1, poster=self.u1)
self.topic_1_post_2 = PostFactory.create(topic=self.topic_1, poster=self.user)
self.topic_2 = create_topic(forum=self.forum_1, poster=self.user)
self.topic_2_post_1 = PostFactory.create(topic=self.topic_2, poster=self.user)
self.topic_2_post_2 = PostFactory.create(topic=self.topic_2, poster=self.u1)
self.topic_3 = create_topic(forum=self.forum_2, poster=self.u1)
self.topic_3_post_1 = PostFactory.create(topic=self.topic_3, poster=self.u1)
self.topic_4 = create_topic(forum=self.forum_2, poster=self.user)
self.topic_4_post_1 = PostFactory.create(topic=self.topic_4, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.g1, self.top_level_cat_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
assign_perm('can_read_forum', self.g1, self.forum_2)
def test_browsing_works(self):
# Setup
correct_url = reverse('forum_member:profile', kwargs={'pk': self.user.pk})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_includes_the_topics_count_in_the_context(self):
# Setup
correct_url = reverse('forum_member:profile', kwargs={'pk': self.user.pk})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert response.context['topics_count'] == 2
def test_includes_the_recent_posts_of_the_user_in_the_context(self):
# Setup
correct_url = reverse('forum_member:profile', kwargs={'pk': self.user.pk})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert list(response.context['recent_posts']) == [
self.topic_4_post_1,
self.topic_2_post_1,
self.topic_1_post_2,
]
def test_recent_posts_are_determined_using_current_user_permissions(self):
# Setup
self.user.groups.clear()
assign_perm('can_read_forum', self.user, self.top_level_cat_1)
assign_perm('can_read_forum', self.user, self.forum_2)
correct_url = reverse('forum_member:profile', kwargs={'pk': self.u1.pk})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert list(response.context['recent_posts']) == [
self.topic_3_post_1,
self.topic_1_post_1,
]
class TestForumProfileUpdateView(BaseClientTestCase):
def test_browsing_works(self):
# Setup
correct_url = reverse('forum_member:profile_update')
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_cannot_be_accessed_by_unauthenticated_users(self):
# Setup
self.client.logout()
correct_url = reverse('forum_member:profile_update')
# Run
response = self.client.get(correct_url, follow=False)
# Check
assert response.status_code == 302
def test_can_update_forum_profile(self):
# Setup
correct_url = reverse('forum_member:profile_update')
# Run
with open(settings.MEDIA_ROOT + 'attachment.jpg', 'rb') as upload_file:
post_data = {
'signature': '**Test**',
'avatar': SimpleUploadedFile(upload_file.name, upload_file.read()),
}
response = self.client.post(correct_url, post_data, follow=False)
# Check
assert response.status_code == 302
profile = ForumProfile.objects.get(user=self.user)
assert profile.signature.raw == '**Test**'
assert profile.avatar.file is not None
class TestTopicSubscribeView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Add some users
self.u1 = UserFactory.create()
self.g1 = GroupFactory.create()
self.u1.groups.add(self.g1)
self.user.groups.add(self.g1)
# Permission handler
self.perm_handler = PermissionHandler()
self.top_level_cat_1 = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat_1)
self.topic_1 = create_topic(forum=self.forum_1, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.g1, self.top_level_cat_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
def test_browsing_works(self):
# Setup
correct_url = reverse('forum_member:topic_subscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_can_add_a_topic_to_the_user_subscription_list(self):
# Setup
correct_url = reverse('forum_member:topic_subscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.post(correct_url, follow=False)
# Check
assert response.status_code == 302
assert self.topic_1 in self.user.topic_subscriptions.all()
def test_cannot_be_browsed_by_anonymous_users(self):
# Setup
self.client.logout()
correct_url = reverse('forum_member:topic_subscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=False)
# Check
assert response.status_code == 302
def test_cannot_be_browsed_by_users_that_do_not_have_the_appropriate_permission(self):
# Setup
remove_perm('can_read_forum', self.g1, self.forum_1)
correct_url = reverse('forum_member:topic_subscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 403
def test_cannot_be_browsed_if_the_user_has_already_subscribed_to_the_topic(self):
# Setup
self.topic_1.subscribers.add(self.user)
correct_url = reverse('forum_member:topic_subscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 403
class TestTopicUnsubscribeView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Add some users
self.u1 = UserFactory.create()
self.g1 = GroupFactory.create()
self.u1.groups.add(self.g1)
self.user.groups.add(self.g1)
# Permission handler
self.perm_handler = PermissionHandler()
self.top_level_cat_1 = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat_1)
self.topic_1 = create_topic(forum=self.forum_1, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.g1, self.top_level_cat_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
def test_browsing_works(self):
# Setup
self.topic_1.subscribers.add(self.user)
correct_url = reverse('forum_member:topic_unsubscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_can_remove_a_topic_from_the_user_subscription_list(self):
# Setup
self.topic_1.subscribers.add(self.user)
correct_url = reverse('forum_member:topic_unsubscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.post(correct_url, follow=False)
# Check
assert response.status_code == 302
assert not self.user.topic_subscriptions.all()
def test_cannot_be_browsed_by_anonymous_users(self):
# Setup
self.client.logout()
correct_url = reverse('forum_member:topic_unsubscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=False)
# Check
assert response.status_code == 302
def test_cannot_be_browsed_by_users_that_do_not_have_the_appropriate_permission(self):
# Setup
remove_perm('can_read_forum', self.g1, self.forum_1)
correct_url = reverse('forum_member:topic_unsubscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 403
def test_cannot_be_browsed_if_the_user_has_not_subscribed_to_the_topic(self):
# Setup
correct_url = reverse('forum_member:topic_unsubscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 403
class TestTopicSubscribtionListView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Add some users
self.u1 = UserFactory.create()
self.g1 = GroupFactory.create()
self.u1.groups.add(self.g1)
self.user.groups.add(self.g1)
# Permission handler
self.perm_handler = PermissionHandler()
self.top_level_cat_1 = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat_1)
self.forum_2 = create_forum(parent=self.top_level_cat_1)
self.forum_3 = create_forum(parent=self.top_level_cat_1)
self.topic_1 = create_topic(forum=self.forum_2, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.user)
self.topic_2 = create_topic(forum=self.forum_1, poster=self.user)
PostFactory.create(topic=self.topic_2, poster=self.user)
PostFactory.create(topic=self.topic_2, poster=self.u1)
self.topic_3 = create_topic(forum=self.forum_2, poster=self.u1)
PostFactory.create(topic=self.topic_3, poster=self.u1)
self.topic_4 = create_topic(forum=self.forum_2, poster=self.user)
PostFactory.create(topic=self.topic_4, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.g1, self.top_level_cat_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
assign_perm('can_read_forum', self.g1, self.forum_2)
def test_browsing_works(self):
# Setup
correct_url = reverse('forum_member:user_subscriptions')
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_cannot_be_browsed_by_anonymous_users(self):
# Setup
correct_url = reverse('forum_member:user_subscriptions')
self.client.logout()
# Run
response = self.client.get(correct_url, follow=False)
# Check
assert response.status_code == 302
def test_displays_only_topics_the_user_is_subscribed_to(self):
# Setup
self.user.topic_subscriptions.add(self.topic_2)
correct_url = reverse('forum_member:user_subscriptions')
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert list(response.context_data['topics']) == [self.topic_2, ]
| {
"content_hash": "8c2108c9bedcca5fd85c59c0a49f762d",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 94,
"avg_line_length": 39.19298245614035,
"alnum_prop": 0.6427931960608774,
"repo_name": "franga2000/django-machina",
"id": "7db220c3b55d6e3be024cef4f210d57a8f71f509",
"size": "15663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functional/member/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13665"
},
{
"name": "HTML",
"bytes": "138474"
},
{
"name": "JavaScript",
"bytes": "5866"
},
{
"name": "Makefile",
"bytes": "1599"
},
{
"name": "Python",
"bytes": "696565"
}
],
"symlink_target": ""
} |
import logging
from .utils.indexes import check_index_names
log = logging.getLogger(__name__)
class DataProcessingCollection:
def __init__(self, db, collection_name):
self._rules = db[collection_name]
async def index(self):
await check_index_names(self._rules, ['unique_id'])
await self._rules.create_index('id', unique=True, name='unique_id')
async def get(self):
"""
Return a list of all rules
"""
cursor = self._rules.find(None, {'_id': 0})
return await cursor.to_list(None)
async def get_one(self, rule_id):
"""
Return the rule for given id or None
"""
return await self._rules.find_one({'id': rule_id}, {'_id': 0})
async def insert(self, data):
"""
Insert a new data processing rule:
{
"id": "rule_id",
"name": "rule_name",
"config": {
"some": "configuration"
}
}
"""
query = {'id': data['id']}
log.info(
"Inserting data processing rule in collection '%s'",
self._rules.name
)
log.debug('upserting data: %s', data)
await self._rules.replace_one(query, data, upsert=True)
async def delete(self, rule_id=None):
"""
Delete a rule from its id or all rules
"""
query = {'id': rule_id} if rule_id is not None else None
log.info("Removing rule(s) from collection '%s'", self._rules.name)
log.debug('delete query: %s', query)
await self._rules.delete_one(query)
| {
"content_hash": "6285db26376c9baec9fc945c1c1e20db",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 75,
"avg_line_length": 28.31578947368421,
"alnum_prop": 0.5384138785625775,
"repo_name": "optiflows/nyuki",
"id": "e810d23dd9ca46ce12becd42cf91203ea27215b7",
"size": "1614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nyuki/workflow/db/data_processing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "217168"
},
{
"name": "Shell",
"bytes": "301"
}
],
"symlink_target": ""
} |
from ._models_py3 import CloudErrorBody
from ._models_py3 import DisableSerialConsoleResult
from ._models_py3 import EnableSerialConsoleResult
from ._models_py3 import GetSerialConsoleSubscriptionNotFound
from ._models_py3 import ProxyResource
from ._models_py3 import Resource
from ._models_py3 import SerialConsoleOperations
from ._models_py3 import SerialConsoleOperationsValueItem
from ._models_py3 import SerialConsoleOperationsValueItemDisplay
from ._models_py3 import SerialConsoleStatus
from ._models_py3 import SerialPort
from ._models_py3 import SerialPortConnectResult
from ._models_py3 import SerialPortListResult
from ._microsoft_serial_console_client_enums import SerialPortState
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"CloudErrorBody",
"DisableSerialConsoleResult",
"EnableSerialConsoleResult",
"GetSerialConsoleSubscriptionNotFound",
"ProxyResource",
"Resource",
"SerialConsoleOperations",
"SerialConsoleOperationsValueItem",
"SerialConsoleOperationsValueItemDisplay",
"SerialConsoleStatus",
"SerialPort",
"SerialPortConnectResult",
"SerialPortListResult",
"SerialPortState",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| {
"content_hash": "c83eda3033c0c8eff1484f0ffc4c5ae6",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 36.83783783783784,
"alnum_prop": 0.7784299339691856,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d0dff097b6e08719b87def692188bb4b6c2d65bf",
"size": "1831",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/serialconsole/azure-mgmt-serialconsole/azure/mgmt/serialconsole/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import unittest
from sensorfusion.height_provider import HeightProvider, correct_ultrasonic_angle
import time
import numpy as np
from sensorfusion.fusion_master import SensorFusionMaster
import logging
class TestFusionMaster(unittest.TestCase):
"""
Test to manually check the values that the sensor fusion produces.
"""
def test_zero(self):
master = SensorFusionMaster()
master.update()
# Some warmups
for x in range(100):
master.update()
time.sleep(0.01)
print("Warmup is done. Now the pressure sensor should be ready...")
# give the sensor time to collect values
time.sleep(0.05)
for x in range(10000):
state = master.update()
self.assertIsNotNone(state.temperature)
self.assertIsNotNone(state.attitude)
self.assertIsNotNone(state.height)
self.assertIsNotNone(state.gps)
self.assertGreater(state.air_pressure, 100)
self.assertGreater(state.height.height_above_ground, 0)
self.assertGreater(state.height.ground_height_barometer, 0)
logging.info("height above ground: {}".format(state.height.height_above_ground))
logging.info("vertical speed: {}".format(state.height.vertical_speed))
#logging.info("barometer ground height: {}".format(state.height.ground_height_barometer))
# TODO: correct axis
#logging.info("yaw, pitch, roll: {}, {}, {}".format(state.attitude.rotation.x, state.attitude.rotation.y, state.attitude.rotation.z))
time.sleep(0.001)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
np.set_printoptions(suppress=True)
unittest.main()
| {
"content_hash": "413f80aab43d1409da47c133e569a6a5",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 145,
"avg_line_length": 33.10909090909091,
"alnum_prop": 0.628775398132894,
"repo_name": "timdelbruegger/freecopter",
"id": "793252cc4ef8763a129fa693918c326ffa9f33a5",
"size": "1821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python3/sensorfusion/fusion_master_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "60278"
},
{
"name": "Python",
"bytes": "108849"
},
{
"name": "Shell",
"bytes": "43"
}
],
"symlink_target": ""
} |
"""The Tautulli integration."""
from __future__ import annotations
from pytautulli import PyTautulli, PyTautulliHostConfiguration
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_URL, CONF_VERIFY_SSL, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo, EntityDescription
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DEFAULT_NAME, DOMAIN
from .coordinator import TautulliDataUpdateCoordinator
PLATFORMS = [Platform.SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Tautulli from a config entry."""
host_configuration = PyTautulliHostConfiguration(
api_token=entry.data[CONF_API_KEY],
url=entry.data[CONF_URL],
verify_ssl=entry.data[CONF_VERIFY_SSL],
)
api_client = PyTautulli(
host_configuration=host_configuration,
session=async_get_clientsession(hass, entry.data[CONF_VERIFY_SSL]),
)
coordinator = TautulliDataUpdateCoordinator(hass, host_configuration, api_client)
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
hass.data.pop(DOMAIN)
return unload_ok
class TautulliEntity(CoordinatorEntity[TautulliDataUpdateCoordinator]):
"""Defines a base Tautulli entity."""
def __init__(
self,
coordinator: TautulliDataUpdateCoordinator,
description: EntityDescription,
) -> None:
"""Initialize the Tautulli entity."""
super().__init__(coordinator)
self.entity_description = description
self._attr_unique_id = f"{coordinator.config_entry.entry_id}_{description.key}"
self._attr_device_info = DeviceInfo(
configuration_url=coordinator.host_configuration.base_url,
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, coordinator.config_entry.entry_id)},
manufacturer=DEFAULT_NAME,
)
| {
"content_hash": "586d634ee05eebae11460f44358ad983",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 87,
"avg_line_length": 39.07936507936508,
"alnum_prop": 0.7303005686433793,
"repo_name": "toddeye/home-assistant",
"id": "fe6eeb9e3030a19b9cc3ca7b6b8113f83c6ca25c",
"size": "2462",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tautulli/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from .delete_security_groups import DeleteSecurityGroupsAction
from .add_security_group_rules import AddSecurityGroupRulesAction
from .describe_security_group_rules import DescribeSecurityGroupRulesAction
from .apply_security_group import ApplySecurityGroupAction
from .describe_security_groups import DescribeSecurityGroupsAction
from .create_security_group import CreateSecurityGroupAction
from .modify_security_group_attributes import ModifySecurityGroupAttributesAction
from .delete_security_group_rules import DeleteSecurityGroupRulesAction
from .modify_security_group_rule_attributes import ModifySecurityGroupRuleAttributesAction
from .describe_security_group_ipsets import DescribeSecurityGroupIPSetsAction
from .create_security_group_ipset import CreateSecurityGroupIPSetAction
from .modify_security_group_ipset_attributes import ModifySecurityGroupIPSetAttributesAction
from .delete_security_group_ipsets import DeleteSecurityGroupIPSetsAction
__all__ = [DeleteSecurityGroupsAction, AddSecurityGroupRulesAction,
DescribeSecurityGroupRulesAction, ApplySecurityGroupAction,
DescribeSecurityGroupsAction, CreateSecurityGroupAction,
ModifySecurityGroupAttributesAction, DeleteSecurityGroupRulesAction,
ModifySecurityGroupRuleAttributesAction,
DescribeSecurityGroupIPSetsAction, CreateSecurityGroupIPSetAction,
ModifySecurityGroupIPSetAttributesAction, DeleteSecurityGroupIPSetsAction]
| {
"content_hash": "7986b667d9d923069e25e48b420222b2",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 92,
"avg_line_length": 63.47826086956522,
"alnum_prop": 0.8595890410958904,
"repo_name": "yunify/qingcloud-cli",
"id": "6c5337999184814d739f172d4343064c0ec96715",
"size": "2293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qingcloud/cli/iaas_client/actions/sg/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "852"
},
{
"name": "Python",
"bytes": "607642"
}
],
"symlink_target": ""
} |
import unittest
from yama import database
class TestFilterString(unittest.TestCase):
def test_return_type(self):
self.assertEqual(type(database.filter_string('123')),type(''))
def test_check_casting(self):
self.assertEqual(type(database.filter_string(123)),type(''))
class TestConnectDb(unittest.TestCase):
def test_empty_hostname(self):
self.assertIsNone(database.connect_db(''))
if __name__ is '__main__':
unittest.main()
| {
"content_hash": "772a8b93ffb629f9b4c7aff4085818fb",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 70,
"avg_line_length": 26.11111111111111,
"alnum_prop": 0.6893617021276596,
"repo_name": "vitovitolo/yama",
"id": "61b167b58efff9c491ef4d7e6f6a8384c09ba3ec",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Pascal",
"bytes": "21"
},
{
"name": "Puppet",
"bytes": "21621"
},
{
"name": "Python",
"bytes": "14147"
},
{
"name": "Ruby",
"bytes": "956"
},
{
"name": "Shell",
"bytes": "1882"
}
],
"symlink_target": ""
} |
from google.transit import gtfs_realtime_pb2
import urllib
from pprint import pprint
#sudo -H pip install protobuf_to_dict
from protobuf_to_dict import protobuf_to_dict
from itertools import chain
#for deciding if the arduino should light up
import datetime
#for comparing the arrival times
import time
#for grabber
import math
import os
import traceback
#imports the url variables from config.py in the same folder
from config import *
from neopixel import *
#for the pause button
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
#variables to hold the lists
arrival_times_york_south = []
arrival_times_york_north = []
arrival_times_high_south_a = []
arrival_times_high_north_a = []
arrival_times_high_south_c = []
arrival_times_high_north_c = []
#variable to hold the string to send to arduino
light_list = []
# LED strip configuration:
LED_COUNT = 30 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
LED_STRIP = ws.WS2811_STRIP_RGB # Strip type and colour ordering
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
# Intialize the library (must be called once before other functions).
strip.begin()
#GPIO set up at pin 23
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#variables to hold the station IDs
HighS = ['A40S']
HighN = ['A40N']
YorkS = ['F18S']
YorkN = ['F18N']
#variables to hold the walk times
HighWalk = 13
YorkWalk = 7
#function to scrape the MTA site and add the arrival times to arrival_times lists
def grabber(station_ID, station_URL, station_line):
times = []
out = ''
try:
mtafeed = gtfs_realtime_pb2.FeedMessage()
#response = urllib.urlopen('http://datamine.mta.info/mta_esi.php?key=' + MTA_KEY + '&feed_id=26')
response = urllib.urlopen(station_URL)
mtafeed.ParseFromString(response.read())
current_time = datetime.datetime.now()
for stop in station_ID:
for entity in mtafeed.entity:
if entity.trip_update.trip.route_id == station_line:
if entity.trip_update:
for update in entity.trip_update.stop_time_update:
if update.stop_id == stop:
time = update.arrival.time
if time <= 0:
time = update.departure.time
time = datetime.datetime.fromtimestamp(time)
time = math.trunc(((time - current_time).total_seconds()) / 60)
times.append(time)
times.sort()
for time in times:
if time < 0:
times.remove(time)
for time in times[:NUM_TRAINS]:
out+=str(time)
out+=str(',')
out = out[:-1]
print times
return times
times = []
except Exception:
print traceback.format_exc()
print "Some sort of error getting the %s data" % station_ID
times = []
return times
#functionto convert arrival_times lists to lit LEDs
def lighter(arrival_list, time_start, light_one, light_two, light_three, light_four, light_five, line_R, line_G, line_B):
#resets lighs to off
#this setting is replaced in the if statements below
#if the light should be on
#however, this is necessary so lights don't linger on
strip.setPixelColorRGB(light_one, 0, 0, 0)
strip.setPixelColorRGB(light_two, 0, 0, 0)
strip.setPixelColorRGB(light_three, 0, 0, 0)
strip.setPixelColorRGB(light_four, 0, 0, 0)
strip.setPixelColorRGB(light_five, 0, 0, 0)
#walk through the numbers in the list
for item in arrival_list:
#convert the number to a number and see if it is in this
#all of the prints are for troubleshooting
if int(item) == time_start:
#if it is, turn on the corresponding LED in the correct color
strip.setPixelColorRGB(light_one, line_R, line_G, line_B)
print "Light %s activated because the time is %s" % (light_one, int(item))
elif time_start < int(item) <= (time_start + 2):
strip.setPixelColorRGB(light_two, line_R, line_G, line_B)
print "Light %s activated because the time is %s" % (light_two, int(item))
elif (time_start + 2) < int(item) <= (time_start + 4):
strip.setPixelColorRGB(light_three, line_R, line_G, line_B)
print "Light %s activated because the time is %s" % (light_three, int(item))
elif (time_start + 4) < int(item) <= (time_start + 7):
strip.setPixelColorRGB(light_four, line_R, line_G, line_B)
print "Light %s activated because the time is %s" % (light_four, int(item))
elif (time_start + 7) < int(item) <= (time_start + 12):
strip.setPixelColorRGB(light_five, line_R, line_G, line_B)
print "Light %s activated because the time is %s" % (light_five, int(item))
else:
pass
#clearn out arrival_times list for the next time around
#only relevant when this becomes a loop
arrival_list = []
#to turn off the lights when it is off time
def blackout():
#figure out the current date and time
d = datetime.datetime.now()
#during the week
if d.weekday() in range(0, 5):
#is it between 7am and 9pm
#DONT USE () FOR HOURS
if d.hour in range(7, 20):
print "lights would be on weekday"
#turn off all of the lights
else:
for i in range(LED_COUNT):
strip.setPixelColorRGB(i, 0, 0, 0)
print "lights would be off"
#on the weekend
elif d.weekday() in range(5, 7):
#between 8am and 10pm
if d.hour in range (10, 20):
print "lights would be on weekend"
else:
for i in range(LED_COUNT):
strip.setPixelColorRGB(i, 0, 0, 0)
print "lights would be off weekend"
else:
print "date error"
#for the pause button
def pause_button(channel):
print "pausing"
for i in range(LED_COUNT):
strip.setPixelColorRGB(i, 0, 0, 0)
strip.show()
#time is in seconds
time.sleep(30)
#checking for the pause button
GPIO.add_event_detect(23, GPIO.FALLING, callback=pause_button, bouncetime=300)
while True:
arrival_times_york_south = grabber(YorkS, URL_F,'F')
arrival_times_york_north = grabber(YorkN, URL_F,'F')
arrival_times_high_south_a = grabber(HighS, URL_AC, 'A')
arrival_times_high_north_a = grabber(HighN, URL_AC, 'A')
arrival_times_high_south_c = grabber(HighS, URL_AC, 'C')
arrival_times_high_north_c = grabber(HighN, URL_AC, 'C')
lighter(arrival_times_york_south, YorkWalk, 0, 1, 2, 3, 4, 208, 9, 107)
lighter(arrival_times_york_north, YorkWalk, 5, 6, 7, 8, 9, 208, 9,107)
lighter(arrival_times_high_south_a, HighWalk, 10, 11, 12, 13, 14, 7, 213, 244)
lighter(arrival_times_high_north_a, HighWalk, 15, 16, 17, 18, 19, 7, 213, 244)
lighter(arrival_times_high_south_c, HighWalk, 20, 21, 22, 23, 24, 10, 0, 255)
lighter(arrival_times_high_north_c, HighWalk, 25, 26, 27, 28, 29, 10, 0, 255)
blackout()
strip.show()
print "sleeping for 5 seconds"
time.sleep(5)
| {
"content_hash": "5d95a71fa54f8c8d96467189c2fa461d",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 121,
"avg_line_length": 31.847533632286996,
"alnum_prop": 0.6899464939453676,
"repo_name": "mwweinberg/NYC-MTA-Next-Train",
"id": "d21eaaf1b469eaf050403ff35785d7870b580be0",
"size": "7179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nycmtapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2949"
},
{
"name": "Python",
"bytes": "14728"
}
],
"symlink_target": ""
} |
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', './tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
#OLD:
# data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
#REVIEW:
data_dir = os.path.join(FLAGS.data_dir, 'tfrecords_inception')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
#
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
#OLD:
# data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
#REVIEW:
data_dir = os.path.join(FLAGS.data_dir, 'tfrecords_inception')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images, eval=False):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
print("*********Conv1 Shape Post Conv***********: \n", conv.get_shape())
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
print("*********Conv1 Shape pre_activation***********: \n", pre_activation.get_shape())
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
print("*********Conv1 Shape Post Activation***********: \n", conv1.get_shape())
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
print("*********Pool1 Shape***********: \n", pool1.get_shape())
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
print("*********Conv2 Shape***********: \n", conv2.get_shape())
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
print("*********Pool2 Shape***********: \n", pool2.get_shape())
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
print("*********Local3 Re-Shape***********: \n", dim)
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
print("*********Local3 Out Shape***********: \n", local3.get_shape())
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
print("*********Local4 Shape***********: \n", local4.get_shape())
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
#Check if inference is for eval(), if during an eval step normalize the logits!
if eval:
softmax_linear = tf.nn.softmax(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| {
"content_hash": "c2f1fe605d3be827b10bb5888404713d",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 91,
"avg_line_length": 36.94320987654321,
"alnum_prop": 0.6513835048790269,
"repo_name": "taylorpaul/cifar10_tf",
"id": "492c54f1038a5475be3aad3269f3c6c0fd03365d",
"size": "15652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cifar10.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "78847"
}
],
"symlink_target": ""
} |
from app.models import Slot
def create_slot_from_json(slot_json, match):
slot = Slot(
match=match,
team='radiant' if slot_json['player_slot'] < 5 else 'dire',
account_id=slot_json['account_id'],
hero_id=slot_json['hero_id'],
items=[slot_json['item_0'], slot_json['item_1'], slot_json['item_2'],
slot_json['item_3'], slot_json['item_4'], slot_json['item_5']],
kills=slot_json['kills'],
deaths=slot_json['deaths'],
assists=slot_json['assists'],
leaver_status=slot_json['leaver_status'],
last_hits=slot_json['last_hits'],
denies=slot_json['denies'],
gpm=slot_json['gold_per_min'],
xpm=slot_json['xp_per_min'],
level=slot_json['level'],
gold=slot_json['gold'],
gold_spent=slot_json['gold_spent'],
hero_damage=slot_json['hero_damage'],
tower_damage=slot_json['tower_damage'],
hero_healing=slot_json['hero_healing']
)
slot.save()
return slot
| {
"content_hash": "69814d74bc090fed8bd23e3cdbf550f4",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 36.464285714285715,
"alnum_prop": 0.5739471106758081,
"repo_name": "lucashanke/houseofdota",
"id": "5bd2e646110c22fed52c31ec93ebf528edaa3600",
"size": "1021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/business/slot_business.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6664"
},
{
"name": "Clojure",
"bytes": "4254"
},
{
"name": "HTML",
"bytes": "1914"
},
{
"name": "JavaScript",
"bytes": "69894"
},
{
"name": "Python",
"bytes": "120362"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1beta1
def sample_create_tensorboard():
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
tensorboard = aiplatform_v1beta1.Tensorboard()
tensorboard.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateTensorboardRequest(
parent="parent_value",
tensorboard=tensorboard,
)
# Make the request
operation = client.create_tensorboard(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_sync]
| {
"content_hash": "e4ea70daf53c9da568ac29a1f6f3ae93",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 26.925925925925927,
"alnum_prop": 0.7276478679504814,
"repo_name": "googleapis/python-aiplatform",
"id": "a747831c9d38aa4c19f6325f8dac54eaba78a389",
"size": "2136",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
import warnings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models.fields.related import OneToOneField
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
import six
class InheritanceQuerySet(QuerySet):
def select_subclasses(self, *subclasses):
if not subclasses:
subclasses = [rel.var_name for rel in self.model._meta.get_all_related_objects()
if isinstance(rel.field, OneToOneField)
and issubclass(rel.field.model, self.model)]
new_qs = self.select_related(*subclasses)
new_qs.subclasses = subclasses
return new_qs
def _clone(self, klass=None, setup=False, **kwargs):
for name in ['subclasses', '_annotated']:
if hasattr(self, name):
kwargs[name] = getattr(self, name)
return super(InheritanceQuerySet, self)._clone(klass, setup, **kwargs)
def annotate(self, *args, **kwargs):
qset = super(InheritanceQuerySet, self).annotate(*args, **kwargs)
qset._annotated = [a.default_alias for a in args] + list(six.iterkeys(kwargs))
return qset
def iterator(self):
iter = super(InheritanceQuerySet, self).iterator()
if getattr(self, 'subclasses', False):
for obj in iter:
def get_attr(obj, s):
try:
return getattr(obj,s)
except ObjectDoesNotExist:
return None
sub_obj = [getattr(obj, s) for s in self.subclasses if get_attr(obj, s)] or [obj]
sub_obj = sub_obj[0]
if getattr(self, '_annotated', False):
for k in self._annotated:
setattr(sub_obj, k, getattr(obj, k))
yield sub_obj
else:
for obj in iter:
yield obj
class InheritanceManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
return InheritanceQuerySet(self.model)
def select_subclasses(self, *subclasses):
return self.get_query_set().select_subclasses(*subclasses)
def get_subclass(self, *args, **kwargs):
return self.get_query_set().select_subclasses().get(*args, **kwargs)
class InheritanceCastMixin(object):
def cast(self):
results = tuple(self.values_list('pk', 'real_type'))
type_to_pks = {}
for pk, real_type_id in results:
type_to_pks.setdefault(real_type_id, []).append(pk)
content_types = ContentType.objects.in_bulk(list(six.iterkeys(type_to_pks)))
pk_to_child = {}
for real_type_id, pks in six.iteritems(type_to_pks):
content_type = content_types[real_type_id]
child_type = content_type.model_class()
children = child_type._default_manager.in_bulk(pks)
for pk, child in six.iteritems(children):
pk_to_child[pk] = child
children = []
# sort children into same order as parents where returned
for pk, real_type_id in results:
children.append(pk_to_child[pk])
return children
class QueryManager(models.Manager):
def __init__(self, *args, **kwargs):
if args:
self._q = args[0]
else:
self._q = models.Q(**kwargs)
super(QueryManager, self).__init__()
def order_by(self, *args):
self._order_by = args
return self
def get_query_set(self):
qs = super(QueryManager, self).get_query_set().filter(self._q)
if hasattr(self, '_order_by'):
return qs.order_by(*self._order_by)
return qs
class PassThroughManager(models.Manager):
"""
Inherit from this Manager to enable you to call any methods from your
custom QuerySet class from your manager. Simply define your QuerySet
class, and return an instance of it from your manager's `get_query_set`
method.
Alternately, if you don't need any extra methods on your manager that
aren't on your QuerySet, then just pass your QuerySet class to the
``for_queryset_class`` class method.
class PostQuerySet(QuerySet):
def enabled(self):
return self.filter(disabled=False)
class Post(models.Model):
objects = PassThroughManager.for_queryset_class(PostQuerySet)()
"""
# pickling causes recursion errors
_deny_methods = ['__getstate__', '__setstate__', '_db']
def __init__(self, queryset_cls=None):
self._queryset_cls = queryset_cls
super(PassThroughManager, self).__init__()
def __getattr__(self, name):
if name in self._deny_methods:
raise AttributeError(name)
return getattr(self.get_query_set(), name)
def get_query_set(self):
if self._queryset_cls is not None:
kargs = {'model': self.model}
if hasattr(self, '_db'):
kargs['using'] = self._db
return self._queryset_cls(**kargs)
return super(PassThroughManager, self).get_query_set()
@classmethod
def for_queryset_class(cls, queryset_cls):
class _PassThroughManager(cls):
def __init__(self):
return super(_PassThroughManager, self).__init__()
def get_query_set(self):
kwargs = {}
if hasattr(self, "_db"):
kwargs["using"] = self._db
return queryset_cls(self.model, **kwargs)
return _PassThroughManager
def manager_from(*mixins, **kwds):
"""
Returns a Manager instance with extra methods, also available and
chainable on generated querysets.
(By George Sakkis, originally posted at
http://djangosnippets.org/snippets/2117/)
:param mixins: Each ``mixin`` can be either a class or a function. The
generated manager and associated queryset subclasses extend the mixin
classes and include the mixin functions (as methods).
:keyword queryset_cls: The base queryset class to extend from
(``django.db.models.query.QuerySet`` by default).
:keyword manager_cls: The base manager class to extend from
(``django.db.models.manager.Manager`` by default).
"""
warnings.warn(
"manager_from is pending deprecation; use PassThroughManager instead.",
PendingDeprecationWarning,
stacklevel=2)
# collect separately the mixin classes and methods
bases = [kwds.get('queryset_cls', QuerySet)]
methods = {}
for mixin in mixins:
if isinstance(mixin, type):
bases.append(mixin)
else:
try: methods[mixin.__name__] = mixin
except AttributeError:
raise TypeError('Mixin must be class or function, not %s' %
mixin.__class__)
# create the QuerySet subclass
id = hash(mixins + tuple(six.iteritems(kwds)))
new_queryset_cls = type('Queryset_%d' % id, tuple(bases), methods)
# create the Manager subclass
bases[0] = manager_cls = kwds.get('manager_cls', Manager)
new_manager_cls = type('Manager_%d' % id, tuple(bases), methods)
# and finally override new manager's get_query_set
super_get_query_set = manager_cls.get_query_set
def get_query_set(self):
# first honor the super manager's get_query_set
qs = super_get_query_set(self)
# and then try to bless the returned queryset by reassigning it to the
# newly created Queryset class, though this may not be feasible
if not issubclass(new_queryset_cls, qs.__class__):
raise TypeError('QuerySet subclass conflict: cannot determine a '
'unique class for queryset instance')
qs.__class__ = new_queryset_cls
return qs
new_manager_cls.get_query_set = get_query_set
return new_manager_cls()
| {
"content_hash": "71cab43360610b1e076f317d79c0d09d",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 97,
"avg_line_length": 37.429906542056074,
"alnum_prop": 0.6111111111111112,
"repo_name": "coagulant/django-model-utils",
"id": "c10660393fbee314ac877556baa03375f185432f",
"size": "8010",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3",
"path": "model_utils/managers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "53972"
}
],
"symlink_target": ""
} |
from slackstocks.components.slackstocks import SlackStocks
| {
"content_hash": "7abbd026cfa5d8b83c758e783b6c1401",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 58,
"avg_line_length": 59,
"alnum_prop": 0.8983050847457628,
"repo_name": "mraypold/slack-stockbot",
"id": "087f3456d61e18a6b378e25a3a5ed051277b3459",
"size": "59",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slackstocks/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6058"
}
],
"symlink_target": ""
} |
import socketio
import eventlet
import eventlet.wsgi
from flask import Flask, render_template
# Your IP local address
SERVER = '192.168.1.110'
#################
## Documentation
#################
## https://github.com/miguelgrinberg/python-socketio
#################
sio = socketio.Server()
app = Flask(__name__)
@app.route('/')
def index():
return "Serve the client-side application."
@sio.on('connect', namespace='/detection')
def connect(sid, environ):
print("connect ", sid)
@sio.on('gesture', namespace='/detection')
def message(sid, data):
print("message ", data)
sio.emit('gesture', data)
@sio.on('disconnect', namespace='/detection')
def disconnect(sid):
print('disconnect ', sid)
if __name__ == '__main__':
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen((SERVER, 3000)), app) | {
"content_hash": "7d2bc5ba6a0dbb39476d1842e9f11690",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 62,
"avg_line_length": 24.153846153846153,
"alnum_prop": 0.6528662420382165,
"repo_name": "thiagomarques2015/Kaio-machine-learning-human-face-detection",
"id": "566ae839765d5c1421e98c610eb6a8e88a799207",
"size": "1124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "489"
},
{
"name": "Java",
"bytes": "11235"
},
{
"name": "JavaScript",
"bytes": "21460"
},
{
"name": "Jupyter Notebook",
"bytes": "926163"
},
{
"name": "Python",
"bytes": "18181"
}
],
"symlink_target": ""
} |
"""
Test client for the <TestEnv> simulation environment.
This simple program shows how to control a robot from Python.
For real applications, you may want to rely on a full middleware,
like ROS (www.ros.org).
"""
import sys
try:
from pymorse import Morse
except ImportError:
print("you need first to install pymorse, the Python bindings for MORSE!")
sys.exit(1)
print("Use WASD to control the robot")
with Morse() as simu:
motion = simu.robot.motion
pose = simu.robot.pose
v = 0.0
w = 0.0
while True:
key = input("WASD?")
if key.lower() == "w":
v += 0.1
elif key.lower() == "s":
v -= 0.1
elif key.lower() == "a":
w += 0.1
elif key.lower() == "d":
w -= 0.1
else:
continue
# here, we call 'get' on the pose sensor: this is a blocking
# call. Check pymorse documentation for alternatives, including
# asynchronous stream subscription.
print("The robot is currently at: %s" % pose.get())
motion.publish({"v": v, "w": w})
| {
"content_hash": "a0a17e6cb9c52b7074f5d772d31eadd2",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 78,
"avg_line_length": 22.72340425531915,
"alnum_prop": 0.598314606741573,
"repo_name": "zwarren/morse-car-controller",
"id": "014f39170fb33d54770b679404cc6555267d253b",
"size": "1092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simulation/TestEnv/scripts/TestEnv_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87350"
},
{
"name": "Shell",
"bytes": "630"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.