text stringlengths 4 1.02M | meta dict |
|---|---|
from django.contrib import admin
from .models import FAQ, GenericCopy, Menu, MenuLink
@admin.register(GenericCopy)
class GenericCopyAdmin(admin.ModelAdmin):
list_display = ("key", "content", "conference")
@admin.register(FAQ)
class FAQAdmin(admin.ModelAdmin):
list_display = ("conference",)
@admin.register(Menu)
class MenuAdmin(admin.ModelAdmin):
list_display = ("identifier", "title", "conference")
@admin.register(MenuLink)
class MenuLinkAdmin(admin.ModelAdmin):
list_display = ("title", "menu")
list_filter = ("menu", "is_primary")
| {
"content_hash": "d518f79241bba4b82eeda8b7ca609ccf",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 56,
"avg_line_length": 23.541666666666668,
"alnum_prop": 0.7168141592920354,
"repo_name": "patrick91/pycon",
"id": "7cb32ee2cd72ed67e07f05e4bdc3de92ba93f08c",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/cms/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1456"
},
{
"name": "Python",
"bytes": "13911"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(# package information
name="transientSources",
version="0.0.1dev",
description='',
long_description=''' ''',
# What code to include as packages
packages=['transientSources'],
packagedir={'transientSources':'transientSources'},
# What data to include as packages
include_package_data=True,
package_data={'transientSources':['example_data/2007uy/cfa_2007uy/*',
'example_data/2007uy/B/*',
'example_data/2007uy/V/*',
'example_data/2007uy/i/*',
'example_data/2007uy/r/*',
'example_data/filters/*']
}
)
| {
"content_hash": "a734cc3e346486800a3de8953e83b046",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 41.15,
"alnum_prop": 0.479951397326853,
"repo_name": "rbiswas4/calibrate_spectra",
"id": "ced519b869232eeb9a7f2a171aefc654787d60ce",
"size": "878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7740"
}
],
"symlink_target": ""
} |
import abc
import six
from neutron.api import extensions
from neutron.api.v2 import base
from neutron import manager
BRIDGE = 'bridge'
BRIDGES = '%ss' % BRIDGE
RESOURCE_ATTRIBUTE_MAP = {
BRIDGES: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'inbound_filter_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'outbound_filter_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'tenant_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': None},
'vxlan_port_id': {'allow_post': False, 'allow_put': False,
'is_visible': True}
}
}
class Bridge(extensions.ExtensionDescriptor):
"""Bridge extension."""
@classmethod
def get_name(cls):
return "Midonet Bridge"
@classmethod
def get_alias(cls):
return "bridge"
@classmethod
def get_description(cls):
return "midonet bridge extension"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/midonet-bridge/api/v1.0"
@classmethod
def get_updated(cls):
return "2014-07-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
exts = []
plugin = manager.NeutronManager.get_plugin()
collection_name = BRIDGES
params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict())
controller = base.create_resource(
collection_name, BRIDGE, plugin, params)
ex = extensions.ResourceExtension(collection_name, controller)
exts.append(ex)
return exts
def update_attributes_map(self, attributes):
for resource_map, attrs in RESOURCE_ATTRIBUTE_MAP.iteritems():
extended_attrs = attributes.get(resource_map)
if extended_attrs:
attrs.update(extended_attrs)
@classmethod
def get_extended_resources(cls, version):
if version == "2.0":
return dict(RESOURCE_ATTRIBUTE_MAP.items())
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class BridgePluginBase(object):
@abc.abstractmethod
def create_bridge(self, context, bridge):
pass
@abc.abstractmethod
def update_bridge(self, context, id, bridge):
pass
@abc.abstractmethod
def get_bridge(self, context, bridge, fields=None):
pass
@abc.abstractmethod
def delete_bridge(self, context, id):
pass
@abc.abstractmethod
def get_bridges(self, context, filters=None, fields=None):
pass
| {
"content_hash": "2f62e0255b12cabce331d0ecadddc036",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 71,
"avg_line_length": 29.42452830188679,
"alnum_prop": 0.5732606604680988,
"repo_name": "JoeMido/networking-midonet",
"id": "978d27022705e79cc873e783a77a86eb6dc8096e",
"size": "3697",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "midonet/neutron/extensions/bridge.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "252"
},
{
"name": "Python",
"bytes": "283127"
},
{
"name": "Shell",
"bytes": "16033"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from .models import Meeting, Topic, Presentor
class PresentorSerializer(serializers.ModelSerializer):
email = serializers.SerializerMethodField()
def get_email(self, obj):
request = self.context.get('request')
if request and request.user.is_staff:
return obj.email
else:
return ''
class Meta:
model = Presentor
fields = ('id', 'name', 'release', 'email')
class TopicSerializer(serializers.ModelSerializer):
presentors = PresentorSerializer(many=True)
class Meta:
model = Topic
fields = (
'id',
'title',
'presentors',
'length',
'description',
'embed_video',
'slides_link',
'start_time',
'approved',
'license'
)
depth = 1
class MeetingSerializer(serializers.ModelSerializer):
topics = TopicSerializer(many=True)
class Meta:
model = Meeting
fields = ('id', 'when', 'where', 'live_stream', 'topics')
depth = 2
| {
"content_hash": "863045afc74d2bf2bca1432b4ab02159",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 65,
"avg_line_length": 23.4375,
"alnum_prop": 0.5635555555555556,
"repo_name": "tanyaschlusser/chipy.org",
"id": "d011833c14eb9886e0eda7894db7dc59940814e8",
"size": "1125",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chipy_org/apps/meetings/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "199261"
},
{
"name": "HTML",
"bytes": "77240"
},
{
"name": "JavaScript",
"bytes": "36277"
},
{
"name": "Python",
"bytes": "158016"
}
],
"symlink_target": ""
} |
import asposecellscloud
from asposecellscloud.CellsApi import CellsApi
from asposecellscloud.CellsApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Cells API SDK
api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True)
cellsApi = CellsApi(api_client);
#set input file name
name = "Sample_Test_Book"
filename = name + ".xls"
sheetName = "Sheet1"
format = "png"
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Cells Cloud SDK API to add a new worksheet in a workbook
response = cellsApi.GetWorkSheetWithFormat(name=filename, sheetName=sheetName, format=format)
if response.Status == "OK":
#download converted document from response stream
outfilename = "c:/temp/" + name + "." + format
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
| {
"content_hash": "ced912554db7f23f5828078899a903b4",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 97,
"avg_line_length": 33.95348837209303,
"alnum_prop": 0.7143835616438357,
"repo_name": "asposecells/Aspose_Cells_Cloud",
"id": "90826792c7486bedfa757943754925bda4bc3d69",
"size": "1460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Examples/Python/Examples/ConvertWorkSheetToImage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "C#",
"bytes": "897367"
},
{
"name": "HTML",
"bytes": "110"
},
{
"name": "Java",
"bytes": "900042"
},
{
"name": "JavaScript",
"bytes": "664643"
},
{
"name": "Objective-C",
"bytes": "1142444"
},
{
"name": "PHP",
"bytes": "626745"
},
{
"name": "Python",
"bytes": "833397"
},
{
"name": "Ruby",
"bytes": "799033"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import pytest
import pandasdmx
from pandasdmx import Request
from .data import BASE_PATH as test_data_path
test_data_path = test_data_path / "INSEE"
DATAFLOW_FP = test_data_path / "dataflow.xml"
DATASETS = {
"IPI-2010-A21": {
"data-fp": test_data_path / "IPI-2010-A21.xml",
"datastructure-fp": test_data_path / "IPI-2010-A21-structure.xml",
"series_count": 20,
},
"CNA-2010-CONSO-SI-A17": {
"data-fp": test_data_path / "CNA-2010-CONSO-SI-A17.xml",
"datastructure-fp": (test_data_path / "CNA-2010-CONSO-SI-A17-structure.xml"),
"series_count": 1,
},
}
SERIES = {
"UNEMPLOYMENT_CAT_A_B_C": {"data-fp": test_data_path / "bug-series-freq-data.xml"}
}
class TestINSEE:
@pytest.fixture(scope="class")
def req(self):
return Request("INSEE")
def test_load_dataset(self, req):
dataset_code = "IPI-2010-A21"
# Load all dataflows
dataflows_response = pandasdmx.read_sdmx(DATAFLOW_FP)
dataflows = dataflows_response.dataflow
assert len(dataflows) == 663
assert dataset_code in dataflows
# Load datastructure for current dataset_code
fp_datastructure = DATASETS[dataset_code]["datastructure-fp"]
datastructure_response = pandasdmx.read_sdmx(fp_datastructure)
assert dataset_code in datastructure_response.dataflow
dsd = datastructure_response.dataflow[dataset_code].structure
# Verify dimensions list
dimensions = OrderedDict(
[dim.id, dim]
for dim in dsd.dimensions
if dim.id not in ["TIME", "TIME_PERIOD"]
)
dim_keys = list(dimensions.keys())
assert dim_keys == ["FREQ", "PRODUIT", "NATURE"]
# Load datas for the current dataset
fp_data = DATASETS[dataset_code]["data-fp"]
data = pandasdmx.read_sdmx(fp_data)
# Verify series count and values
series = data.data[0].series
series_count = len(series)
assert series_count == DATASETS[dataset_code]["series_count"]
first_series = series[0]
observations = first_series
first_obs = observations[0]
last_obs = observations[-1]
assert first_obs.dim == "2015-10"
assert first_obs.value == "105.61"
assert last_obs.dim == "1990-01"
assert last_obs.value == "139.22"
def test_fixe_key_names(self, req):
"""Verify key or attribute contains '-' in name."""
dataset_code = "CNA-2010-CONSO-SI-A17"
fp_datastructure = DATASETS[dataset_code]["datastructure-fp"]
datastructure_response = pandasdmx.read_sdmx(fp_datastructure)
assert dataset_code in datastructure_response.dataflow
dsd = datastructure_response.dataflow[dataset_code].structure
dimensions = OrderedDict(
[dim.id, dim]
for dim in dsd.dimensions
if dim.id not in ["TIME", "TIME_PERIOD"]
)
dim_keys = list(dimensions.keys())
assert dim_keys == ["SECT-INST", "OPERATION", "PRODUIT", "PRIX"]
fp_data = DATASETS[dataset_code]["data-fp"]
data = pandasdmx.read_sdmx(fp_data)
series = data.data[0].series
series_key = list(series.keys())[0]
assert list(series_key.values.keys()) == [
"SECT-INST",
"OPERATION",
"PRODUIT",
"PRIX",
]
assert list(series_key.attrib.keys()) == [
"FREQ",
"IDBANK",
"TITLE",
"LAST_UPDATE",
"UNIT_MEASURE",
"UNIT_MULT",
"REF_AREA",
"DECIMALS",
"BASE_PER",
"TIME_PER_COLLECT",
]
def test_freq_in_series_attribute(self, req):
# Test that we don't have regression on Issues #39 and #41
# INSEE time series provide the FREQ value as attribute on the series
# instead of a dimension. This caused a runtime error when writing as
# pandas dataframe.
data_response = pandasdmx.read_sdmx(SERIES["UNEMPLOYMENT_CAT_A_B_C"]["data-fp"])
pandasdmx.to_pandas(data_response)
| {
"content_hash": "9406353712d9d3451f3444f36a6fe591",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 88,
"avg_line_length": 32.045801526717554,
"alnum_prop": 0.5936160076226774,
"repo_name": "dr-leo/pandaSDMX",
"id": "e5e7611e98ab2929c6e3afb653d85ecb72983e86",
"size": "4267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandasdmx/tests/test_insee.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "346038"
}
],
"symlink_target": ""
} |
desc="""calling a function"""
setup="""
"""
cleanup="""
"""
notes="""
To call a function which is part of a package, or a function which
is part of another schema, use dotted notation like this:
<ul>
<li>package_name.function_name
<li>schema_name.function_name
<li>schema_name.package_name.function_name
</ul>
<p>
Parameters are passed as a list. callproc() returns a list
of the parameters passed in. If any parameters are OUT or
IN OUT, the returned list will have the modified values.
<p>
There's nothing special about calling a procedure during a
transaction. If the procedure modifies a table, you will
need to do a commit. It's possible that the procedure may
also do a commit (but that is generally a bad practice).
"""
output="""
"""
import sys
import cx_Oracle
def demo(conn,curs):
x = curs.callfunc('cxdemo.f0', cx_Oracle.NUMBER)
print x
x = curs.callfunc('cxdemo.f2', cx_Oracle.NUMBER, [1,2])
print x
r = curs.var(cx_Oracle.NUMBER)
curs.execute('begin :rv := cxdemo.f2(:x,:y); end;', rv=r, x=3, y=4)
print r
if __name__ == '__main__':
connstr = sys.argv[1]
conn = cx_Oracle.connect(connstr)
curs = conn.cursor()
demo(conn,curs)
conn.close()
| {
"content_hash": "8d6bb276ecaededc53c454df6a58acbf",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 71,
"avg_line_length": 24.73469387755102,
"alnum_prop": 0.6806930693069307,
"repo_name": "marhar/sqlminus",
"id": "4eed58158eb0a3115d1d4c428fed271d29dd0a3d",
"size": "1453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cx-oracle-demos/function.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "415"
},
{
"name": "HTML",
"bytes": "23121"
},
{
"name": "Makefile",
"bytes": "1569"
},
{
"name": "PLSQL",
"bytes": "899"
},
{
"name": "PLpgSQL",
"bytes": "43"
},
{
"name": "Python",
"bytes": "107916"
},
{
"name": "Shell",
"bytes": "673"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class JobAction(Model):
"""JobAction.
:param type: Gets or sets the job action type. Possible values include:
'Http', 'Https', 'StorageQueue', 'ServiceBusQueue', 'ServiceBusTopic'
:type type: str or :class:`JobActionType
<azure.mgmt.scheduler.models.JobActionType>`
:param request: Gets or sets the http requests.
:type request: :class:`HttpRequest
<azure.mgmt.scheduler.models.HttpRequest>`
:param queue_message: Gets or sets the storage queue message.
:type queue_message: :class:`StorageQueueMessage
<azure.mgmt.scheduler.models.StorageQueueMessage>`
:param service_bus_queue_message: Gets or sets the service bus queue
message.
:type service_bus_queue_message: :class:`ServiceBusQueueMessage
<azure.mgmt.scheduler.models.ServiceBusQueueMessage>`
:param service_bus_topic_message: Gets or sets the service bus topic
message.
:type service_bus_topic_message: :class:`ServiceBusTopicMessage
<azure.mgmt.scheduler.models.ServiceBusTopicMessage>`
:param retry_policy: Gets or sets the retry policy.
:type retry_policy: :class:`RetryPolicy
<azure.mgmt.scheduler.models.RetryPolicy>`
:param error_action: Gets or sets the error action.
:type error_action: :class:`JobErrorAction
<azure.mgmt.scheduler.models.JobErrorAction>`
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'JobActionType'},
'request': {'key': 'request', 'type': 'HttpRequest'},
'queue_message': {'key': 'queueMessage', 'type': 'StorageQueueMessage'},
'service_bus_queue_message': {'key': 'serviceBusQueueMessage', 'type': 'ServiceBusQueueMessage'},
'service_bus_topic_message': {'key': 'serviceBusTopicMessage', 'type': 'ServiceBusTopicMessage'},
'retry_policy': {'key': 'retryPolicy', 'type': 'RetryPolicy'},
'error_action': {'key': 'errorAction', 'type': 'JobErrorAction'},
}
def __init__(self, type=None, request=None, queue_message=None, service_bus_queue_message=None, service_bus_topic_message=None, retry_policy=None, error_action=None):
self.type = type
self.request = request
self.queue_message = queue_message
self.service_bus_queue_message = service_bus_queue_message
self.service_bus_topic_message = service_bus_topic_message
self.retry_policy = retry_policy
self.error_action = error_action
| {
"content_hash": "8c415ea5727d3f8eb66f8a599a16f0a4",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 170,
"avg_line_length": 49.14,
"alnum_prop": 0.689051689051689,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "6dcf9f3aaef2026aabb1aab7ee148a85447177ed",
"size": "2931",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "azure-mgmt-scheduler/azure/mgmt/scheduler/models/job_action.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
"""Test the Foscam config flow."""
from unittest.mock import patch
from libpyfoscam.foscam import (
ERROR_FOSCAM_AUTH,
ERROR_FOSCAM_CMD,
ERROR_FOSCAM_UNAVAILABLE,
ERROR_FOSCAM_UNKNOWN,
)
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.foscam import config_flow
from tests.common import MockConfigEntry
VALID_CONFIG = {
config_flow.CONF_HOST: "10.0.0.2",
config_flow.CONF_PORT: 88,
config_flow.CONF_USERNAME: "admin",
config_flow.CONF_PASSWORD: "1234",
config_flow.CONF_STREAM: "Main",
config_flow.CONF_RTSP_PORT: 554,
}
OPERATOR_CONFIG = {
config_flow.CONF_USERNAME: "operator",
}
INVALID_RESPONSE_CONFIG = {
config_flow.CONF_USERNAME: "interr",
}
CAMERA_NAME = "Mocked Foscam Camera"
CAMERA_MAC = "C0:C1:D0:F4:B4:D4"
def setup_mock_foscam_camera(mock_foscam_camera):
"""Mock FoscamCamera simulating behaviour using a base valid config."""
def configure_mock_on_init(host, port, user, passwd, verbose=False):
product_all_info_rc = 0
dev_info_rc = 0
dev_info_data = {}
if (
host != VALID_CONFIG[config_flow.CONF_HOST]
or port != VALID_CONFIG[config_flow.CONF_PORT]
):
product_all_info_rc = dev_info_rc = ERROR_FOSCAM_UNAVAILABLE
elif (
user
not in [
VALID_CONFIG[config_flow.CONF_USERNAME],
OPERATOR_CONFIG[config_flow.CONF_USERNAME],
INVALID_RESPONSE_CONFIG[config_flow.CONF_USERNAME],
]
or passwd != VALID_CONFIG[config_flow.CONF_PASSWORD]
):
product_all_info_rc = dev_info_rc = ERROR_FOSCAM_AUTH
elif user == INVALID_RESPONSE_CONFIG[config_flow.CONF_USERNAME]:
product_all_info_rc = dev_info_rc = ERROR_FOSCAM_UNKNOWN
elif user == OPERATOR_CONFIG[config_flow.CONF_USERNAME]:
dev_info_rc = ERROR_FOSCAM_CMD
else:
dev_info_data["devName"] = CAMERA_NAME
dev_info_data["mac"] = CAMERA_MAC
mock_foscam_camera.get_product_all_info.return_value = (product_all_info_rc, {})
mock_foscam_camera.get_dev_info.return_value = (dev_info_rc, dev_info_data)
return mock_foscam_camera
mock_foscam_camera.side_effect = configure_mock_on_init
async def test_user_valid(hass):
"""Test valid config from user input."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera, patch(
"homeassistant.components.foscam.async_setup_entry",
return_value=True,
) as mock_setup_entry:
setup_mock_foscam_camera(mock_foscam_camera)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
assert result["title"] == CAMERA_NAME
assert result["data"] == VALID_CONFIG
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_invalid_auth(hass):
"""Test we handle invalid auth from user input."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera:
setup_mock_foscam_camera(mock_foscam_camera)
invalid_user = VALID_CONFIG.copy()
invalid_user[config_flow.CONF_USERNAME] = "invalid"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
invalid_user,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["errors"] == {"base": "invalid_auth"}
async def test_user_cannot_connect(hass):
"""Test we handle cannot connect error from user input."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera:
setup_mock_foscam_camera(mock_foscam_camera)
invalid_host = VALID_CONFIG.copy()
invalid_host[config_flow.CONF_HOST] = "127.0.0.1"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
invalid_host,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_user_invalid_response(hass):
"""Test we handle invalid response error from user input."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera:
setup_mock_foscam_camera(mock_foscam_camera)
invalid_response = VALID_CONFIG.copy()
invalid_response[config_flow.CONF_USERNAME] = INVALID_RESPONSE_CONFIG[
config_flow.CONF_USERNAME
]
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
invalid_response,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["errors"] == {"base": "invalid_response"}
async def test_user_already_configured(hass):
"""Test we handle already configured from user input."""
entry = MockConfigEntry(
domain=config_flow.DOMAIN,
data=VALID_CONFIG,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera:
setup_mock_foscam_camera(mock_foscam_camera)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.FlowResultType.ABORT
assert result["reason"] == "already_configured"
async def test_user_unknown_exception(hass):
"""Test we handle unknown exceptions from user input."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera:
mock_foscam_camera.side_effect = Exception("test")
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["errors"] == {"base": "unknown"}
| {
"content_hash": "7451e272644a3e5c99d098f95ded5ff3",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 88,
"avg_line_length": 32.17004048582996,
"alnum_prop": 0.6390636798389127,
"repo_name": "w1ll1am23/home-assistant",
"id": "e8bdb6900f220772b38b5100a7282fab5a333b77",
"size": "7946",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/foscam/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
base_url = 'http://www.pythonchallenge.com/pc/def/{}.html'
print base_url.format(str(2**38)) | {
"content_hash": "893be11676e7cb82f7dc1e5060c40bce",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 58,
"avg_line_length": 31,
"alnum_prop": 0.6989247311827957,
"repo_name": "joeeoj/python-challenge-practice",
"id": "def88c86c98a706f2a4e9a311949fd1abd05e3c5",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "challenge_00.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2493"
}
],
"symlink_target": ""
} |
"""Provides device automations for ZHA devices that emit events."""
import voluptuous as vol
import homeassistant.components.automation.event as event
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM, CONF_TYPE
from . import DOMAIN
from .core.helpers import async_get_zha_device
CONF_SUBTYPE = "subtype"
DEVICE = "device"
DEVICE_IEEE = "device_ieee"
ZHA_EVENT = "zha_event"
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{vol.Required(CONF_TYPE): str, vol.Required(CONF_SUBTYPE): str}
)
async def async_validate_trigger_config(hass, config):
"""Validate config."""
config = TRIGGER_SCHEMA(config)
if "zha" in hass.config.components:
trigger = (config[CONF_TYPE], config[CONF_SUBTYPE])
try:
zha_device = await async_get_zha_device(hass, config[CONF_DEVICE_ID])
except (KeyError, AttributeError):
raise InvalidDeviceAutomationConfig
if (
zha_device.device_automation_triggers is None
or trigger not in zha_device.device_automation_triggers
):
raise InvalidDeviceAutomationConfig
return config
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
trigger = (config[CONF_TYPE], config[CONF_SUBTYPE])
try:
zha_device = await async_get_zha_device(hass, config[CONF_DEVICE_ID])
except (KeyError, AttributeError):
return None
if trigger not in zha_device.device_automation_triggers:
return None
trigger = zha_device.device_automation_triggers[trigger]
event_config = {
event.CONF_PLATFORM: "event",
event.CONF_EVENT_TYPE: ZHA_EVENT,
event.CONF_EVENT_DATA: {DEVICE_IEEE: str(zha_device.ieee), **trigger},
}
event_config = event.TRIGGER_SCHEMA(event_config)
return await event.async_attach_trigger(
hass, event_config, action, automation_info, platform_type="device"
)
async def async_get_triggers(hass, device_id):
"""List device triggers.
Make sure the device supports device automations and
if it does return the trigger list.
"""
zha_device = await async_get_zha_device(hass, device_id)
if not zha_device.device_automation_triggers:
return
triggers = []
for trigger, subtype in zha_device.device_automation_triggers.keys():
triggers.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_PLATFORM: DEVICE,
CONF_TYPE: trigger,
CONF_SUBTYPE: subtype,
}
)
return triggers
| {
"content_hash": "b2da484a2be8fb4c65ea201216b3a1b2",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 85,
"avg_line_length": 31.34065934065934,
"alnum_prop": 0.6711079943899019,
"repo_name": "pschmitt/home-assistant",
"id": "5f842d7f3804af3ad310e13ce593932333c4807c",
"size": "2852",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zha/device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
} |
from contextlib import contextmanager
import os
@contextmanager
def test_file(path):
try:
open_file = open(path, 'w')
yield open_file
finally:
open_file.close()
if os.path.exists(path):
os.unlink(path)
| {
"content_hash": "4c7aae86ea026fe0e9fd4f818ce12e16",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 37,
"avg_line_length": 21.25,
"alnum_prop": 0.6039215686274509,
"repo_name": "ActiveState/code",
"id": "2a3384de931a4a8655013657313a52f9c61c5faf",
"size": "255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/502278_Context_manager_managing_test/recipe-502278.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from testtools import TestCase
from twisted.internet.defer import succeed
from fusion_util import tap
class TapTests(TestCase):
"""
Tests for `fusion_util.tap.tap`.
"""
def test_tap(self):
"""
L{fusion_util.tap.tap} calls a function with a result and optional
positional and keyword arguments and discards its return value, instead
returning the original result.
"""
def func(result, a, b):
self.calledWith = result, a, b
return 5144
def _checkResult(result):
# The return value of func is discarded.
self.assertEquals(result, 42)
self.assertEquals(self.calledWith, (42, 1, 2))
d = succeed(42)
d.addCallback(tap(func), 1, b=2)
d.addCallback(_checkResult)
return d
| {
"content_hash": "b9c9333dd8fa44bb323235802f778db2",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 26.93548387096774,
"alnum_prop": 0.6035928143712574,
"repo_name": "fusionapp/fusion-util",
"id": "198daf788dd9016ef6f6351277f1111d49f00632",
"size": "835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fusion_util/test/test_tap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126605"
}
],
"symlink_target": ""
} |
pip install wikipedia
import wikipedia
wikipedia.search('Dynamic Connectivity')
results = wikipedia.search('Dynamic Connectivity')
results
wikipedia.search('Leo Editor')
leo_results = wikipedia.search('Leo Editor')
leo_results
page_name = results[0]
page = wikipedia.page(page_name)
page
page.title
page.html()
page.links
print page.content
| {
"content_hash": "c16e46b837a6b3fc748b1b397e0ad0f3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 50,
"avg_line_length": 13.538461538461538,
"alnum_prop": 0.7698863636363636,
"repo_name": "satishgoda/learning",
"id": "9e3b5f796ffe810444822cea2468c820513dfc99",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "meta/wikipedia/sandbox/pypi_wikipedia.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "635"
},
{
"name": "HTML",
"bytes": "1975916"
},
{
"name": "Java",
"bytes": "420"
},
{
"name": "JavaScript",
"bytes": "313"
},
{
"name": "Jupyter Notebook",
"bytes": "1279896"
},
{
"name": "Python",
"bytes": "15661"
}
],
"symlink_target": ""
} |
import json
import datetime
from twisted.python import log
from twisted.internet import defer
from autobahn import util
from autobahn.wamp import auth
from autobahn.wamp import types
from autobahn.twisted.wamp import ApplicationSession, RouterSession
from autobahn.twisted.websocket import WampWebSocketServerProtocol, WampWebSocketServerFactory
class UserDb:
"""
A fake user database.
"""
def __init__(self):
self._creds = {}
def add(self, authid, authrole, secret, salt = None):
if salt:
key = auth.derive_key(secret, salt)
else:
key = secret
self._creds[authid] = (salt, key, authrole)
return self._creds[authid]
def get(self, authid):
## we return a deferred to simulate an asynchronous lookup
return defer.succeed(self._creds.get(authid, (None, None, None)))
class PendingAuth:
"""
User for tracking pending authentications.
"""
def __init__(self, key, session, authid, authrole, authmethod, authprovider):
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
self.session = session
self.timestamp = util.utcnow()
self.nonce = util.newid()
challenge_obj = {
'authid': self.authid,
'authrole': self.authrole,
'authmethod': self.authmethod,
'authprovider': self.authprovider,
'session': self.session,
'nonce': self.nonce,
'timestamp': self.timestamp
}
self.challenge = json.dumps(challenge_obj)
self.signature = auth.compute_wcs(key, self.challenge)
class MyRouterSession(RouterSession):
"""
Our custom router session that authenticates via WAMP-CRA.
"""
@defer.inlineCallbacks
def onHello(self, realm, details):
"""
Callback fired when client wants to attach session.
"""
print("onHello: {} {}".format(realm, details))
self._pending_auth = None
if details.authmethods:
for authmethod in details.authmethods:
if authmethod == u"wampcra":
## lookup user in user DB
salt, key, role = yield self.factory.userdb.get(details.authid)
## if user found ..
if key:
## setup pending auth
self._pending_auth = PendingAuth(key, details.pending_session,
details.authid, role, authmethod, "userdb")
## send challenge to client
extra = {
u'challenge': self._pending_auth.challenge
}
## when using salted passwords, provide the client with
## the salt and then PBKDF2 parameters used
if salt:
extra[u'salt'] = salt
extra[u'iterations'] = 1000
extra[u'keylen'] = 32
defer.returnValue(types.Challenge(u'wampcra', extra))
## deny client
defer.returnValue(types.Deny())
def onAuthenticate(self, signature, extra):
"""
Callback fired when a client responds to an authentication challenge.
"""
print("onAuthenticate: {} {}".format(signature, extra))
## if there is a pending auth, and the signature provided by client matches ..
if self._pending_auth:
if signature == self._pending_auth.signature:
## accept the client
return types.Accept(authid = self._pending_auth.authid,
authrole = self._pending_auth.authrole,
authmethod = self._pending_auth.authmethod,
authprovider = self._pending_auth.authprovider)
else:
## deny client
return types.Deny(message = u"signature is invalid")
else:
## deny client
return types.Deny(message = u"no pending authentication")
class TimeService(ApplicationSession):
"""
A simple time service application component.
"""
def onJoin(self, details):
print("session attached")
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
self.register(utcnow, 'com.timeservice.now')
if __name__ == '__main__':
import sys, argparse
from twisted.python import log
from twisted.internet.endpoints import serverFromString
## parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action = "store_true",
help = "Enable debug output.")
parser.add_argument("-c", "--component", type = str, default = None,
help = "Start WAMP-WebSocket server with this application component, e.g. 'timeservice.TimeServiceBackend', or None.")
parser.add_argument("--websocket", type = str, default = "tcp:8080",
help = 'WebSocket server Twisted endpoint descriptor, e.g. "tcp:9000" or "unix:/tmp/mywebsocket".')
parser.add_argument("--wsurl", type = str, default = "ws://localhost:8080",
help = 'WebSocket URL (must suit the endpoint), e.g. "ws://localhost:9000".')
args = parser.parse_args()
log.startLogging(sys.stdout)
## we use an Autobahn utility to install the "best" available Twisted reactor
##
from autobahn.twisted.choosereactor import install_reactor
reactor = install_reactor()
if args.debug:
print("Running on reactor {}".format(reactor))
## create a WAMP router factory
##
from autobahn.twisted.wamp import RouterFactory
router_factory = RouterFactory()
## create a user DB
##
userdb = UserDb()
userdb.add(authid = "peter", authrole = "user", secret = "secret1", salt = "salt123")
userdb.add(authid = "joe", authrole = "user", secret = "secret2")
## create a WAMP router session factory
##
from autobahn.twisted.wamp import RouterSessionFactory
session_factory = RouterSessionFactory(router_factory)
session_factory.session = MyRouterSession
session_factory.userdb = userdb
## start an embedded application component ..
##
component_config = types.ComponentConfig(realm = "realm1")
component_session = TimeService(component_config)
session_factory.add(component_session)
## create a WAMP-over-WebSocket transport server factory
##
from autobahn.twisted.websocket import WampWebSocketServerFactory
transport_factory = WampWebSocketServerFactory(session_factory, args.wsurl, debug = False, debug_wamp = args.debug)
transport_factory.setProtocolOptions(failByDrop = False)
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.resource import WebSocketResource
## we serve static files under "/" ..
root = File(".")
## .. and our WebSocket server under "/ws"
resource = WebSocketResource(transport_factory)
root.putChild("ws", resource)
## run both under one Twisted Web Site
site = Site(root)
site.noisy = False
site.log = lambda _: None
## start the WebSocket server from an endpoint
##
server = serverFromString(reactor, args.websocket)
server.listen(site)
## now enter the Twisted reactor loop
##
reactor.run()
| {
"content_hash": "c83d6d6375f8c8c38377b79799fea8a0",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 141,
"avg_line_length": 28.70472440944882,
"alnum_prop": 0.6311891372925524,
"repo_name": "luhn/AutobahnPython",
"id": "4a4d5a5bff5a0da809f47d437e80d11bd5b85fde",
"size": "8066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/twisted/wamp/authentication/wampcra/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1342"
},
{
"name": "Python",
"bytes": "869364"
}
],
"symlink_target": ""
} |
import sqlite3
import collections
from itertools import tee, ifilterfalse
from gemini_subjects import Subject
def map_samples_to_indices(c):
"""Return a dict mapping samples names (key)
to sample indices in the numpy genotype arrays (value).
"""
sample_to_idx = {}
c.execute("select sample_id, name from samples")
for row in c:
name = str(row['name'])
idx = row['sample_id'] - 1
sample_to_idx[name] = idx
return sample_to_idx
def map_indices_to_samples(c):
"""Return a dict mapping samples indices in the
numpy arrays (key) to sample names.
"""
return {k: v.name for (k, v) in map_indices_to_sample_objects(c).items()}
# c.execute("select sample_id, name from samples")
# for row in c:
# name = str(row['name'])
# idx = row['sample_id'] - 1
# idx_to_sample[idx] = name
# return idx_to_sample
def map_indices_to_sample_objects(c):
idx_to_sample_object = {}
c.execute("select * from samples")
for row in c:
idx = row['sample_id'] - 1
idx_to_sample_object[idx] = Subject(row)
return idx_to_sample_object
def get_col_names_and_indices(sqlite_description, ignore_gt_cols=False):
"""Return a list of column namanes and a list of the row indices.
Optionally exclude gt_* columns.
"""
col_indices = []
col_names = []
for idx, col_tup in enumerate(sqlite_description):
# e.g., each col in sqlite desc is a tuple like:
# ('variant_id', None, None, None, None, None, None)
col_name = col_tup[0]
if ((not ignore_gt_cols) or
(ignore_gt_cols and not col_name.startswith('gt'))):
col_indices.append(idx)
col_names.append(col_name)
return col_names, col_indices
# http://code.activestate.com/recipes/576694/
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
# from: http://code.activestate.com/recipes/576693/
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
def itersubclasses(cls, _seen=None):
"""
snagged from: http://code.activestate.com/recipes/576949/
itersubclasses(cls)
Generator over all subclasses of a given class, in depth first order.
>>> list(itersubclasses(int)) == [bool]
True
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL (new-style) classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS
['type', ...'tuple', ...]
"""
if not isinstance(cls, type):
raise TypeError('itersubclasses must be called with '
'new-style classes, not %.100r' % cls)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def partition(pred, iterable):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = tee(iterable)
return list(ifilterfalse(pred, t1)), list(filter(pred, t2))
def quote_string(item):
""" if the item is a string, put quotes around it else leave it """
if isinstance(item, basestring):
item = "\"" + item + "\""
return item
| {
"content_hash": "b1bb94e35c5cbb8e31d467274432e6c6",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 87,
"avg_line_length": 32.513953488372096,
"alnum_prop": 0.5508189686002432,
"repo_name": "poojavade/Genomics_Docker",
"id": "549a46143e293b42ce700a2fbb9072539e91694e",
"size": "14003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/gemini-0.10.0-py2.7.egg/gemini/gemini_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "1265138"
},
{
"name": "C++",
"bytes": "4734960"
},
{
"name": "CSS",
"bytes": "17332"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "77173"
},
{
"name": "HTML",
"bytes": "395483"
},
{
"name": "Java",
"bytes": "9223"
},
{
"name": "JavaScript",
"bytes": "783663"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Lua",
"bytes": "28217"
},
{
"name": "Makefile",
"bytes": "77825"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Perl",
"bytes": "244796"
},
{
"name": "Python",
"bytes": "54562861"
},
{
"name": "R",
"bytes": "2568"
},
{
"name": "Shell",
"bytes": "40620"
},
{
"name": "Smarty",
"bytes": "21035"
},
{
"name": "TeX",
"bytes": "55310"
}
],
"symlink_target": ""
} |
import unittest
import pywraps2 as s2
class PyWrapS2TestCase(unittest.TestCase):
def testContainsIsWrappedCorrectly(self):
london = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.3368602, 0.4931979),
s2.S2LatLng.FromDegrees(51.7323965, 0.1495211))
e14lj = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.5213527, -0.0476026),
s2.S2LatLng.FromDegrees(51.5213527, -0.0476026))
self.assertTrue(london.Contains(e14lj))
def testS2CellIdEqualsIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
cell = s2.S2CellId(london)
same_cell = s2.S2CellId(london)
self.assertEqual(cell, same_cell)
def testS2CellIdComparsionIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
cell = s2.S2CellId(london)
self.assertLess(cell, cell.next())
self.assertGreater(cell.next(), cell)
def testS2CellIdGetEdgeNeighborsIsWrappedCorrectly(self):
cell = s2.S2CellId(0x466d319000000000)
expected_neighbors = [s2.S2CellId(0x466d31b000000000),
s2.S2CellId(0x466d317000000000),
s2.S2CellId(0x466d323000000000),
s2.S2CellId(0x466d31f000000000)]
neighbors = cell.GetEdgeNeighbors()
self.assertEqual(neighbors, expected_neighbors)
def testS2CellIdIntersectsIsTrueForOverlap(self):
cell1 = s2.S2CellId(0x89c259c000000000)
cell2 = s2.S2CellId(0x89c2590000000000)
self.assertTrue(cell1.intersects(cell2))
def testS2CellIdIntersectsIsFalseForNonOverlap(self):
cell1 = s2.S2CellId(0x89c259c000000000)
cell2 = s2.S2CellId(0x89e83d0000000000)
self.assertFalse(cell1.intersects(cell2))
def testS2HashingIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
cell = s2.S2CellId(london)
same_cell = s2.S2CellId(london)
self.assertEqual(hash(cell), hash(same_cell))
def testCovererIsWrappedCorrectly(self):
london = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.3368602, 0.4931979),
s2.S2LatLng.FromDegrees(51.7323965, 0.1495211))
e14lj = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.5213527, -0.0476026),
s2.S2LatLng.FromDegrees(51.5213527, -0.0476026))
coverer = s2.S2RegionCoverer()
coverer.set_max_cells(6)
self.assertEqual(6, coverer.max_cells())
covering = coverer.GetCovering(e14lj)
self.assertLessEqual(len(covering), 6)
for cellid in covering:
self.assertTrue(london.Contains(s2.S2Cell(cellid)))
interior = coverer.GetInteriorCovering(e14lj)
for cellid in interior:
self.assertTrue(london.Contains(s2.S2Cell(cellid)))
def testS2CellUnionIsWrappedCorrectly(self):
cell_union = s2.S2CellUnion()
cell_union.Init([0x466d319000000000, 0x466d31b000000000])
self.assertEqual(cell_union.num_cells(), 2)
trondheim = s2.S2LatLng.FromDegrees(63.431052, 10.395083)
self.assertTrue(cell_union.Contains(s2.S2CellId(trondheim)))
def testS2PolygonIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london)))
self.assertEqual(polygon.num_loops(), 1)
point = london.ToPoint()
self.assertTrue(polygon.Contains(point))
def testS2LoopIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london)))
loop = polygon.loop(0)
self.assertTrue(loop.IsValid())
self.assertEqual(0, loop.depth())
self.assertFalse(loop.is_hole())
self.assertEqual(4, loop.num_vertices())
point = london.ToPoint()
self.assertTrue(loop.Contains(point))
def testS2PolygonCopiesLoopInConstructorBecauseItTakesOwnership(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london)))
s2.S2Polygon(loop)
def testS2PolygonInitNestedIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
small_loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london)))
big_loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london).parent(1)))
polygon = s2.S2Polygon()
polygon.InitNested([big_loop, small_loop])
def testS2PolygonInitNestedWithIncorrectTypeIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london)))
polygon = s2.S2Polygon()
with self.assertRaises(TypeError):
polygon.InitNested([loop, s2.S2CellId()])
def testS2PolygonGetAreaIsWrappedCorrectly(self):
# Cell at level 10 containing central London.
london_level_10 = s2.S2CellId(
s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)).parent(10)
polygon = s2.S2Polygon(s2.S2Cell(london_level_10))
# Because S2Cell.ExactArea() isn't swigged, compare S2Polygon.GetArea() with
# S2CellUnion.ExactArea().
cell_union = s2.S2CellUnion()
cell_union.Init([london_level_10.id()])
self.assertAlmostEqual(cell_union.ExactArea(), polygon.GetArea(), places=10)
def testGetS2LatLngVertexIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london)))
loop = polygon.loop(0)
first_vertex = loop.GetS2LatLngVertex(0)
self.assertIsInstance(first_vertex, s2.S2LatLng)
self.assertEqual("51.500152,-0.126235", first_vertex.ToStringInDegrees())
second_vertex = loop.GetS2LatLngVertex(1)
self.assertIsInstance(second_vertex, s2.S2LatLng)
self.assertEqual("51.500153,-0.126235", second_vertex.ToStringInDegrees())
def testS2PolylineInitFromS2LatLngs(self):
e7_10deg = 0x5f5e100
list_ll = []
for lat, lng in [(0, 0), (0, e7_10deg), (e7_10deg, e7_10deg)]:
list_ll.append(s2.S2LatLng.FromE7(lat, lng))
line = s2.S2Polyline()
line.InitFromS2LatLngs(list_ll)
self.assertAlmostEqual(20.0, line.GetLength().degrees())
def testS2PolylineInitFromS2Points(self):
e7_10deg = 0x5f5e100
list_points = []
for lat, lng in [(0, 0), (0, e7_10deg), (e7_10deg, e7_10deg)]:
list_points.append(s2.S2LatLng.FromE7(lat, lng).ToPoint())
line = s2.S2Polyline()
line.InitFromS2Points(list_points)
self.assertAlmostEqual(20.0, line.GetLength().degrees())
def testS2PointsCanBeNormalized(self):
line = s2.S2Polyline()
line.InitFromS2LatLngs([s2.S2LatLng.FromDegrees(37.794484, -122.394871),
s2.S2LatLng.FromDegrees(37.762699, -122.435158)])
self.assertNotAlmostEqual(line.GetCentroid().Norm(), 1.0)
self.assertAlmostEqual(line.GetCentroid().Normalize().Norm(), 1.0)
def testS1AngleComparsionIsWrappedCorrectly(self):
ten_degrees = s2.S1Angle.Degrees(10)
one_hundred_degrees = s2.S1Angle.Degrees(100)
self.assertLess(ten_degrees, one_hundred_degrees)
self.assertGreater(one_hundred_degrees, ten_degrees)
def testS2PolygonIntersectsWithPolyline(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london).parent(15)))
line = s2.S2Polyline()
line.InitFromS2LatLngs([s2.S2LatLng.FromDegrees(51.5, -0.128),
s2.S2LatLng.FromDegrees(51.5, -0.125)])
intersections = polygon.IntersectWithPolyline(line)
self.assertEqual(1, len(intersections))
def testCrossingSign(self):
a = s2.S2LatLng.FromDegrees(-1, 0).ToPoint()
b = s2.S2LatLng.FromDegrees(1, 0).ToPoint()
c = s2.S2LatLng.FromDegrees(0, -1).ToPoint()
d = s2.S2LatLng.FromDegrees(0, 1).ToPoint()
# SWIG flattens namespaces, so this is just s2.CrossingSign,
# not s2.S2.CrossingSign.
self.assertEqual(1, s2.CrossingSign(a, b, c, d))
def testGetIntersection(self):
a = s2.S2LatLng.FromDegrees(-1, 0).ToPoint()
b = s2.S2LatLng.FromDegrees(1, 0).ToPoint()
c = s2.S2LatLng.FromDegrees(0, -1).ToPoint()
d = s2.S2LatLng.FromDegrees(0, 1).ToPoint()
# SWIG namespace flattening as above.
intersection = s2.GetIntersection(a, b, c, d)
self.assertEqual(
"0.000000,0.000000", s2.S2LatLng(intersection).ToStringInDegrees())
def testS2CellDistance(self):
# Level-0 cell (i.e. face) centered at (0, 0)
cell = s2.S2Cell(s2.S2CellId(0x1000000000000000))
p1 = s2.S2LatLng.FromDegrees(0, 0).ToPoint()
self.assertTrue(cell.Contains(p1))
d1 = cell.GetDistance(p1).ToAngle().degrees()
# Inside, so distance is 0, but boundary distance is not.
self.assertEqual(0.0, d1)
bd1 = cell.GetBoundaryDistance(p1).ToAngle().degrees()
self.assertEqual(45.0, bd1)
p2 = s2.S2LatLng.FromDegrees(0, 90).ToPoint()
self.assertFalse(cell.Contains(p2))
d2 = cell.GetDistance(p2).ToAngle().degrees()
self.assertAlmostEqual(45.0, d2)
bd2 = cell.GetBoundaryDistance(p2).ToAngle().degrees()
# Outside, so distance and boundary distance are the same.
self.assertAlmostEqual(45.0, bd2)
def testS2Rotate(self):
mtv_a = s2.S2LatLng.FromDegrees(37.4402777, -121.9638888).ToPoint()
mtv_b = s2.S2LatLng.FromDegrees(37.3613888, -121.9283333).ToPoint()
angle = s2.S1Angle.Radians(0.039678)
point = s2.Rotate(mtv_a, mtv_b, angle)
self.assertEqual("37.439095,-121.967802",
s2.S2LatLng(point).ToStringInDegrees())
def testS2TurnAngle(self):
mtv_a = s2.S2LatLng.FromDegrees(37.4402777, -121.9638888).ToPoint()
mtv_b = s2.S2LatLng.FromDegrees(37.3613888, -121.9283333).ToPoint()
mtv_c = s2.S2LatLng.FromDegrees(37.3447222, -122.0308333).ToPoint()
angle = s2.TurnAngle(mtv_a, mtv_b, mtv_c)
self.assertAlmostEqual(-1.7132025, angle)
def testEncodeDecode(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london).parent(15)))
self.assertEqual(polygon.num_loops(), 1)
encoder = s2.Encoder()
polygon.Encode(encoder)
encoded = encoder.buffer()
decoder = s2.Decoder(encoded)
decoded_polygon = s2.S2Polygon()
self.assertTrue(decoded_polygon.Decode(decoder))
self.assertEqual(decoded_polygon.num_loops(), 1)
self.assertTrue(decoded_polygon.Equals(polygon))
def testS2CapRegion(self):
center = s2.S2LatLng.FromDegrees(2.0, 3.0).ToPoint()
cap = s2.S2Cap(center, s2.S1Angle.Degrees(1.0))
inside = s2.S2LatLng.FromDegrees(2.1, 2.9).ToPoint()
outside = s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint()
self.assertTrue(cap.Contains(inside))
self.assertFalse(cap.Contains(outside))
self.assertTrue(cap.Contains(s2.S2Cell(inside)))
self.assertFalse(cap.Contains(s2.S2Cell(outside)))
self.assertTrue(cap.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(cap.MayIntersect(s2.S2Cell(outside)))
self.assertTrue(cap.ApproxEquals(cap.GetCapBound()))
rect_bound = cap.GetRectBound()
self.assertTrue(rect_bound.Contains(inside))
self.assertFalse(rect_bound.Contains(outside))
def testS2LatLngRectRegion(self):
rect = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(1.0, 2.0),
s2.S2LatLng.FromDegrees(3.0, 4.0))
inside = s2.S2LatLng.FromDegrees(2.0, 3.0).ToPoint()
outside = s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint()
self.assertTrue(rect.Contains(inside))
self.assertFalse(rect.Contains(outside))
self.assertTrue(rect.Contains(s2.S2Cell(inside)))
self.assertFalse(rect.Contains(s2.S2Cell(outside)))
self.assertTrue(rect.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(rect.MayIntersect(s2.S2Cell(outside)))
cap_bound = rect.GetCapBound()
self.assertTrue(cap_bound.Contains(inside))
self.assertFalse(cap_bound.Contains(outside))
self.assertTrue(rect.ApproxEquals(rect.GetRectBound()))
def testS2CellRegion(self):
cell = s2.S2Cell(s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8))
inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint()
outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint()
self.assertTrue(cell.Contains(inside))
self.assertFalse(cell.Contains(outside))
self.assertTrue(cell.Contains(s2.S2Cell(inside)))
self.assertFalse(cell.Contains(s2.S2Cell(outside)))
self.assertTrue(cell.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(cell.MayIntersect(s2.S2Cell(outside)))
cap_bound = cell.GetCapBound()
self.assertTrue(cap_bound.Contains(inside))
self.assertFalse(cap_bound.Contains(outside))
rect_bound = cell.GetRectBound()
self.assertTrue(rect_bound.Contains(inside))
self.assertFalse(rect_bound.Contains(outside))
def testS2CellUnionRegion(self):
cell_id = s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8)
cell_union = s2.S2CellUnion()
cell_union.Init([cell_id.id()])
inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint()
outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint()
self.assertTrue(cell_union.Contains(inside))
self.assertFalse(cell_union.Contains(outside))
self.assertTrue(cell_union.Contains(s2.S2Cell(inside)))
self.assertFalse(cell_union.Contains(s2.S2Cell(outside)))
self.assertTrue(cell_union.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(cell_union.MayIntersect(s2.S2Cell(outside)))
cap_bound = cell_union.GetCapBound()
self.assertTrue(cap_bound.Contains(inside))
self.assertFalse(cap_bound.Contains(outside))
rect_bound = cell_union.GetRectBound()
self.assertTrue(rect_bound.Contains(inside))
self.assertFalse(rect_bound.Contains(outside))
def testS2LoopRegion(self):
cell = s2.S2Cell(s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8))
loop = s2.S2Loop(cell)
inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint()
outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint()
self.assertTrue(loop.Contains(inside))
self.assertFalse(loop.Contains(outside))
self.assertTrue(loop.Contains(s2.S2Cell(inside)))
self.assertFalse(loop.Contains(s2.S2Cell(outside)))
self.assertTrue(loop.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(loop.MayIntersect(s2.S2Cell(outside)))
cap_bound = loop.GetCapBound()
self.assertTrue(cap_bound.Contains(inside))
self.assertFalse(cap_bound.Contains(outside))
rect_bound = loop.GetRectBound()
self.assertTrue(rect_bound.Contains(inside))
self.assertFalse(rect_bound.Contains(outside))
def testS2PolygonRegion(self):
cell = s2.S2Cell(s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8))
polygon = s2.S2Polygon(cell)
inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint()
outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint()
self.assertTrue(polygon.Contains(inside))
self.assertFalse(polygon.Contains(outside))
self.assertTrue(polygon.Contains(s2.S2Cell(inside)))
self.assertFalse(polygon.Contains(s2.S2Cell(outside)))
self.assertTrue(polygon.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(polygon.MayIntersect(s2.S2Cell(outside)))
cap_bound = polygon.GetCapBound()
self.assertTrue(cap_bound.Contains(inside))
self.assertFalse(cap_bound.Contains(outside))
rect_bound = polygon.GetRectBound()
self.assertTrue(rect_bound.Contains(inside))
self.assertFalse(rect_bound.Contains(outside))
def testS2PolylineRegion(self):
polyline = s2.S2Polyline()
polyline.InitFromS2LatLngs([s2.S2LatLng.FromDegrees(0.0, 0.0),
s2.S2LatLng.FromDegrees(1.0, 1.0)])
# Contains(S2Point) always return false.
self.assertFalse(
polyline.Contains(s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint()))
self.assertFalse(
polyline.Contains(s2.S2Cell(s2.S2LatLng.FromDegrees(0.0, 0.0))))
self.assertTrue(
polyline.MayIntersect(s2.S2Cell(s2.S2LatLng.FromDegrees(0.0, 0.0))))
self.assertFalse(
polyline.MayIntersect(s2.S2Cell(s2.S2LatLng.FromDegrees(3.0, 4.0))))
cap_bound = polyline.GetCapBound()
self.assertTrue(
cap_bound.Contains(s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint()))
self.assertFalse(
cap_bound.Contains(s2.S2LatLng.FromDegrees(2.0, 2.0).ToPoint()))
rect_bound = polyline.GetRectBound()
self.assertTrue(
rect_bound.Contains(s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint()))
self.assertFalse(
rect_bound.Contains(s2.S2LatLng.FromDegrees(2.0, 2.0).ToPoint()))
def testS2CellIdCenterSiTi(self):
cell = s2.S2CellId.FromFacePosLevel(3, 0x12345678, s2.S2CellId.kMaxLevel)
# Check that the (si, ti) coordinates of the center end in a
# 1 followed by (30 - level) 0s.
# Leaf level, 30.
face, si, ti = cell.GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 0, si & 1)
self.assertEqual(1 << 0, ti & 1)
# Level 29.
face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 1).GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 1, si & 3)
self.assertEqual(1 << 1, ti & 3)
# Level 28.
face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 2).GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 2, si & 7)
self.assertEqual(1 << 2, ti & 7)
# Level 20.
face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 10).GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 10, si & ((1 << 11) - 1))
self.assertEqual(1 << 10, ti & ((1 << 11) - 1))
# Level 10.
face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 20).GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 20, si & ((1 << 21) - 1))
self.assertEqual(1 << 20, ti & ((1 << 21) - 1))
# Level 0.
face, si, ti = cell.parent(0).GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 30, si & ((1 << 31) - 1))
self.assertEqual(1 << 30, ti & ((1 << 31) - 1))
def testS2CellIdToFromFaceIJ(self):
cell = s2.S2CellId.FromFaceIJ(3, 1234, 5678)
face, i, j, _ = cell.ToFaceIJOrientation()
self.assertEqual(3, face)
self.assertEqual(1234, i)
self.assertEqual(5678, j)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "b2c549c958dc95ecfde931c8edba347b",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 80,
"avg_line_length": 40.16035634743875,
"alnum_prop": 0.6955967169476486,
"repo_name": "graetzer/arangodb",
"id": "1c6f68846c62b9e7f64a6ae1acb38cfa1e62ab0a",
"size": "18635",
"binary": false,
"copies": "3",
"ref": "refs/heads/devel",
"path": "3rdParty/s2geometry/dfefe0c/src/python/pywraps2_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "391227"
},
{
"name": "Awk",
"bytes": "4272"
},
{
"name": "Batchfile",
"bytes": "63025"
},
{
"name": "C",
"bytes": "7952921"
},
{
"name": "C#",
"bytes": "96431"
},
{
"name": "C++",
"bytes": "274543069"
},
{
"name": "CMake",
"bytes": "646773"
},
{
"name": "CSS",
"bytes": "1054160"
},
{
"name": "Cuda",
"bytes": "52444"
},
{
"name": "DIGITAL Command Language",
"bytes": "259402"
},
{
"name": "Emacs Lisp",
"bytes": "14637"
},
{
"name": "Fortran",
"bytes": "1856"
},
{
"name": "Groovy",
"bytes": "131"
},
{
"name": "HTML",
"bytes": "2215528"
},
{
"name": "Java",
"bytes": "922156"
},
{
"name": "JavaScript",
"bytes": "53300241"
},
{
"name": "LLVM",
"bytes": "24129"
},
{
"name": "Lex",
"bytes": "1231"
},
{
"name": "Lua",
"bytes": "17899"
},
{
"name": "M4",
"bytes": "575204"
},
{
"name": "Makefile",
"bytes": "492694"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "28404"
},
{
"name": "Objective-C",
"bytes": "18435"
},
{
"name": "Objective-C++",
"bytes": "2503"
},
{
"name": "PHP",
"bytes": "107274"
},
{
"name": "Pascal",
"bytes": "150599"
},
{
"name": "Perl",
"bytes": "564374"
},
{
"name": "Perl6",
"bytes": "9918"
},
{
"name": "Python",
"bytes": "4527647"
},
{
"name": "QMake",
"bytes": "16692"
},
{
"name": "R",
"bytes": "5123"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Roff",
"bytes": "1007604"
},
{
"name": "Ruby",
"bytes": "929950"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "424800"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "32117"
},
{
"name": "Visual Basic",
"bytes": "11568"
},
{
"name": "XSLT",
"bytes": "551977"
},
{
"name": "Yacc",
"bytes": "53072"
}
],
"symlink_target": ""
} |
from tick.base import actual_kwargs
from tick.base.learner import LearnerGLM
from .model_linreg import ModelLinReg
class LinearRegression(LearnerGLM):
"""
Linear regression learner, with many choices of penalization and
solvers.
Parameters
----------
C : `float`, default=1e3
Level of penalization
penalty : {'none', 'l1', 'l2', 'elasticnet', 'tv'}, default='l2'
The penalization to use. Default is ridge penalization
solver : {'gd', 'agd', 'svrg'}, default='svrg'
The name of the solver to use
fit_intercept : `bool`, default=True
If `True`, include an intercept in the model
warm_start : `bool`, default=False
If true, learning will start from the last reached solution
step : `float`, default=None
Initial step size used for learning. Used in fista, sgd and svrg
solvers
tol : `float`, default=1e-5
The tolerance of the solver (iterations stop when the stopping
criterion is below it). By default the solver does ``max_iter``
iterations
max_iter : `int`, default=100
Maximum number of iterations of the solver
verbose : `bool`, default=False
If `True`, we verbose things, otherwise the solver does not
print anything (but records information in history anyway)
print_every : `int`, default=10
Print history information when ``n_iter`` (iteration number) is
a multiple of ``print_every``
record_every : `int`, default=1
Record history information when ``n_iter`` (iteration number) is
a multiple of ``record_every``
Other Parameters
----------------
elastic_net_ratio : `float`, default=0.95
Ratio of elastic net mixing parameter with 0 <= ratio <= 1.
For ratio = 0 this is ridge (L2 squared) regularization
For ratio = 1 this is lasso (L1) regularization
For 0 < ratio < 1, the regularization is a linear combination
of L1 and L2.
Used in 'elasticnet' penalty
random_state : `int` seed, `RandomState` instance, or `None` (default)
The seed that will be used by stochastic solvers. Used in 'sgd',
'svrg', and 'sdca' solvers
blocks_start : `numpy.array`, shape=(n_features,), default=None
The indices of the first column of each binarized feature blocks. It
corresponds to the ``feature_indices`` property of the
``FeaturesBinarizer`` preprocessing.
Used in 'binarsity' penalty
blocks_length : `numpy.array`, shape=(n_features,), default=None
The length of each binarized feature blocks. It corresponds to the
``n_values`` property of the ``FeaturesBinarizer`` preprocessing.
Used in 'binarsity' penalty
Attributes
----------
weights : `np.array`, shape=(n_features,)
The learned weights of the model (not including the intercept)
intercept : `float` or `None`
The intercept, if ``fit_intercept=True``, otherwise `None`
"""
_solvers = {'gd': 'GD', 'agd': 'AGD', 'svrg': 'SVRG'}
_attrinfos = {"_actual_kwargs": {"writable": False}}
@actual_kwargs
def __init__(self, fit_intercept=True, penalty='l2', C=1e3, solver='svrg',
step=None, tol=1e-5, max_iter=100, verbose=False,
warm_start=False, print_every=10, record_every=1,
elastic_net_ratio=0.95, random_state=None, blocks_start=None,
blocks_length=None):
self._actual_kwargs = LinearRegression.__init__.actual_kwargs
LearnerGLM.__init__(
self, fit_intercept=fit_intercept, penalty=penalty, C=C,
solver=solver, step=step, tol=tol, max_iter=max_iter,
verbose=verbose, warm_start=warm_start, print_every=print_every,
record_every=record_every, elastic_net_ratio=elastic_net_ratio,
random_state=random_state, blocks_start=blocks_start,
blocks_length=blocks_length)
def _construct_model_obj(self, fit_intercept=True):
return ModelLinReg(fit_intercept)
def predict(self, X):
"""Predict class for given samples
Parameters
----------
X : `np.ndarray` or `scipy.sparse.csr_matrix`, shape=(n_samples, n_features)
Features matrix to predict for.
Returns
-------
output : `np.array`, shape=(n_samples,)
Returns predicted values.
"""
if not self._fitted:
raise ValueError("You must call ``fit`` before")
else:
X = self._safe_array(X, dtype=X.dtype)
z = X.dot(self.weights)
if self.intercept:
z += self.intercept
return z
def score(self, X, y):
"""Returns the coefficient of determination R^2 of the fitted linear
regression model, computed on the given features matrix and labels.
Parameters
----------
X : `np.ndarray` or `scipy.sparse.csr_matrix`, shape=(n_samples, n_features)
Features matrix.
y : `np.ndarray`, shape = (n_samples,)
Labels vector.
Returns
-------
score : `float`
R^2 of self.predict(X) against y
"""
from sklearn.metrics import r2_score
return r2_score(y, self.predict(X))
| {
"content_hash": "df3bb6092115b2a6d764d8df29683227",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 84,
"avg_line_length": 36.108108108108105,
"alnum_prop": 0.6087200598802395,
"repo_name": "Dekken/tick",
"id": "68b009472e0092a3002aec4271cb92533fed5a11",
"size": "5369",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tick/linear_model/linear_regression.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6660"
},
{
"name": "C++",
"bytes": "1181742"
},
{
"name": "CMake",
"bytes": "22073"
},
{
"name": "Dockerfile",
"bytes": "2017"
},
{
"name": "Python",
"bytes": "1450866"
},
{
"name": "Shell",
"bytes": "33446"
}
],
"symlink_target": ""
} |
from .base import SourceTestCase
class TestCase(SourceTestCase):
generator = 'delimited'
output_name = 'chinook_tracks_csv.json'
def generate(self):
path = self.input_path('chinook_tracks.csv')
client = self.module.Client(uri=path)
return client.generate()
| {
"content_hash": "1d93bbaa4ca7d93aa5427299f06abedf",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 26.90909090909091,
"alnum_prop": 0.6756756756756757,
"repo_name": "chop-dbhi/prov-extractor",
"id": "2ad0146390d390d7939dd0c0016c6561716a17ee",
"size": "296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sources/delimited.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PLpgSQL",
"bytes": "1952523"
},
{
"name": "Python",
"bytes": "96517"
},
{
"name": "Shell",
"bytes": "2268"
}
],
"symlink_target": ""
} |
from smv import *
from smv.dqm import *
from smv.iomod import SmvCsvInputFile
from smv.conn import SmvHdfsEmptyConn
from pyspark.sql.functions import col, lit
class D1(SmvCsvStringData):
def schemaStr(self):
return "k:String;v:Integer"
def dataStr(self):
return "a,1;b,2"
class T(SmvCsvInputFile):
def connectionName(self):
return None
def get_connection(self):
return SmvHdfsEmptyConn
def fileName(self):
return "./target/python-test-export-csv.csv"
def csvAttr(self):
return self.smvApp.defaultCsvWithHeader()
class X(SmvModule):
def isEphemeral(self): return True
def requiresDS(self): return []
def run(self, i):
return self.smvApp.createDF("""k:String; t:Integer @metadata={"smvDesc":"the time sequence"}; v:Double""",
"z,1,0.2;z,2,1.4;z,5,2.2;a,1,0.3;")
| {
"content_hash": "770bd0108d54ea062b43f583d8b4d5bd",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 114,
"avg_line_length": 30.964285714285715,
"alnum_prop": 0.6643598615916955,
"repo_name": "TresAmigosSD/SMV",
"id": "83ed74ea64bfe166026a3922504f619284e05d94",
"size": "1427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/python/testDataFrameHelper/stage/modules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "753"
},
{
"name": "Dockerfile",
"bytes": "4707"
},
{
"name": "HTML",
"bytes": "587"
},
{
"name": "Java",
"bytes": "11519"
},
{
"name": "Jupyter Notebook",
"bytes": "288"
},
{
"name": "Makefile",
"bytes": "5176"
},
{
"name": "Python",
"bytes": "554607"
},
{
"name": "Scala",
"bytes": "473854"
},
{
"name": "Shell",
"bytes": "60010"
}
],
"symlink_target": ""
} |
import os, sys, tempfile, unittest
import lxml.etree as etree
ECMDS_INSTALL_DIR = os.path.normpath(os.path.join(
os.path.dirname(os.path.realpath(sys.argv[0])),
"..", ".."
))
ECMDS_TEST_DATA_DIR = os.path.join(
ECMDS_INSTALL_DIR,
"test",
"ut",
"data",
"plugin_picture"
)
sys.path.insert(1, ECMDS_INSTALL_DIR + os.sep + 'lib')
from net.ecromedos.error import ECMDSPluginError
import net.ecromedos.plugins.picture as picture
class UTTestPluginPicture(unittest.TestCase):
def test_gracefulFailOnFileNotFound(self):
tree = etree.parse(ECMDS_TEST_DATA_DIR + os.sep + "no_such_img_file.xml")
root = tree.getroot()
with tempfile.TemporaryDirectory() as tmpdir:
config = {
"latex_bin": "/usr/bin/latex",
"dvipng_bin": "/usr/bin/dvipng",
"convert_bin": "/usr/bin/convert",
"identify_bin": "/usr/bin/identify",
"tmp_dir": tmpdir
}
plugin = picture.getInstance(config)
try:
plugin.process(root.find("./img"), "xhtml")
except ECMDSPluginError as e:
self.assertTrue(e.msg().startswith(
"Could not find bitmap file at location"))
#end with
#end function
def test_targetPDFLatexEPStoPDF(self):
tree = etree.parse(ECMDS_TEST_DATA_DIR + os.sep + "ecromedos_eps.xml")
root = tree.getroot()
with tempfile.TemporaryDirectory() as tmpdir:
config = {
"latex_bin": "/usr/bin/latex",
"dvipng_bin": "/usr/bin/dvipng",
"convert_bin": "/usr/bin/convert",
"identify_bin": "/usr/bin/identify",
"tmp_dir": tmpdir
}
plugin = picture.getInstance(config)
plugin.process(root.find("./img"), "pdflatex")
plugin.flush()
#end with
os.unlink("img000001.pdf")
#end function
def test_targetLatexIMGtoEPS(self):
tree = etree.parse(ECMDS_TEST_DATA_DIR + os.sep + "ecromedos_png.xml")
root = tree.getroot()
with tempfile.TemporaryDirectory() as tmpdir:
config = {
"latex_bin": "/usr/bin/latex",
"dvipng_bin": "/usr/bin/dvipng",
"convert_bin": "/usr/bin/convert",
"identify_bin": "/usr/bin/identify",
"tmp_dir": tmpdir
}
plugin = picture.getInstance(config)
plugin.process(root.find("./img"), "latex")
plugin.flush()
#end with
os.unlink("img000001.eps")
#end function
def test_targetXHTMLSetScreenWidth(self):
tree = etree.parse(ECMDS_TEST_DATA_DIR + os.sep + "ecromedos_png_explicit_width.xml")
root = tree.getroot()
with tempfile.TemporaryDirectory() as tmpdir:
config = {
"latex_bin": "/usr/bin/latex",
"dvipng_bin": "/usr/bin/dvipng",
"convert_bin": "/usr/bin/convert",
"identify_bin": "/usr/bin/identify",
"tmp_dir": tmpdir
}
plugin = picture.getInstance(config)
plugin.process(root.find("./img"), "xhtml")
plugin.flush()
#end with
os.unlink("img000001.png")
#end function
def test_targetXHTMLIdentifyWidth(self):
tree = etree.parse(ECMDS_TEST_DATA_DIR + os.sep + "ecromedos_png.xml")
root = tree.getroot()
with tempfile.TemporaryDirectory() as tmpdir:
config = {
"latex_bin": "/usr/bin/latex",
"dvipng_bin": "/usr/bin/dvipng",
"convert_bin": "/usr/bin/convert",
"identify_bin": "/usr/bin/identify",
"tmp_dir": tmpdir
}
plugin = picture.getInstance(config)
plugin.process(root.find("./img"), "xhtml")
plugin.flush()
#end with
os.unlink("img000001.png")
#end function
def test_targetXHTMLEPStoIMG(self):
tree = etree.parse(ECMDS_TEST_DATA_DIR + os.sep + "ecromedos_eps.xml")
root = tree.getroot()
with tempfile.TemporaryDirectory() as tmpdir:
config = {
"latex_bin": "/usr/bin/latex",
"dvipng_bin": "/usr/bin/dvipng",
"convert_bin": "/usr/bin/convert",
"identify_bin": "/usr/bin/identify",
"tmp_dir": tmpdir
}
plugin = picture.getInstance(config)
plugin.process(root.find("./img"), "xhtml")
plugin.flush()
#end with
os.unlink("img000001.jpg")
#end function
#end class
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "2475bf5c39259b93e7ea1d72f81cbffe",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 93,
"avg_line_length": 31.561290322580646,
"alnum_prop": 0.5222812755519215,
"repo_name": "tobijk/ecromedos",
"id": "da0ad1b570f78317c20bf3d165320d49885a3e4d",
"size": "4941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/ut/test_plugin_picture.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10914"
},
{
"name": "Python",
"bytes": "180602"
},
{
"name": "Shell",
"bytes": "1247"
},
{
"name": "XSLT",
"bytes": "267254"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
if os.environ.get('USE_TWISTED', False):
from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
from twisted.internet.defer import succeed, DeferredList
from twisted.python import log
from twisted.trial import unittest
from six import PY3
from autobahn import util
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp import message, role, serializer, types
from autobahn.wamp.exception import ApplicationError, NotAuthorized
from autobahn.wamp.exception import InvalidUri, ProtocolError
if PY3:
long = int
class MockTransport(object):
def __init__(self, handler):
self._log = False
self._handler = handler
self._serializer = serializer.JsonSerializer()
self._registrations = {}
self._invocations = {}
#: str -> ID
self._subscription_topics = {}
self._handler.onOpen(self)
self._my_session_id = util.id()
roles = {u'broker': role.RoleBrokerFeatures(), u'dealer': role.RoleDealerFeatures()}
msg = message.Welcome(self._my_session_id, roles)
self._handler.onMessage(msg)
self._fake_router_session = ApplicationSession()
def send(self, msg):
if self._log:
payload, isbinary = self._serializer.serialize(msg)
print("Send: {0}".format(payload))
reply = None
if isinstance(msg, message.Publish):
if msg.topic.startswith(u'com.myapp'):
if msg.acknowledge:
reply = message.Published(msg.request, self._fake_router_session._request_id_gen.next())
elif len(msg.topic) == 0:
reply = message.Error(message.Publish.MESSAGE_TYPE, msg.request, u'wamp.error.invalid_uri')
else:
reply = message.Error(message.Publish.MESSAGE_TYPE, msg.request, u'wamp.error.not_authorized')
elif isinstance(msg, message.Call):
if msg.procedure == u'com.myapp.procedure1':
reply = message.Result(msg.request, args=[100])
elif msg.procedure == u'com.myapp.procedure2':
reply = message.Result(msg.request, args=[1, 2, 3])
elif msg.procedure == u'com.myapp.procedure3':
reply = message.Result(msg.request, args=[1, 2, 3], kwargs={u'foo': u'bar', u'baz': 23})
elif msg.procedure.startswith(u'com.myapp.myproc'):
registration = self._registrations[msg.procedure]
request = self._fake_router_session._request_id_gen.next()
if request in self._invocations:
raise ProtocolError("duplicate invocation")
self._invocations[request] = msg.request
reply = message.Invocation(
request, registration,
args=msg.args,
kwargs=msg.kwargs,
receive_progress=msg.receive_progress,
)
else:
reply = message.Error(message.Call.MESSAGE_TYPE, msg.request, u'wamp.error.no_such_procedure')
elif isinstance(msg, message.Yield):
if msg.request in self._invocations:
request = self._invocations[msg.request]
reply = message.Result(request, args=msg.args, kwargs=msg.kwargs, progress=msg.progress)
elif isinstance(msg, message.Subscribe):
topic = msg.topic
if topic in self._subscription_topics:
reply_id = self._subscription_topics[topic]
else:
reply_id = self._fake_router_session._request_id_gen.next()
self._subscription_topics[topic] = reply_id
reply = message.Subscribed(msg.request, reply_id)
elif isinstance(msg, message.Unsubscribe):
reply = message.Unsubscribed(msg.request)
elif isinstance(msg, message.Register):
registration = self._fake_router_session._request_id_gen.next()
self._registrations[msg.procedure] = registration
reply = message.Registered(msg.request, registration)
elif isinstance(msg, message.Unregister):
reply = message.Unregistered(msg.request)
elif isinstance(msg, message.Error):
# since I'm basically a Dealer, I forward on the
# error, but reply to my own request/invocation
request = self._invocations[msg.request]
reply = message.Error(
message.Call.MESSAGE_TYPE,
request,
msg.error,
args=msg.args,
kwargs=msg.kwargs,
)
if reply:
if self._log:
payload, isbinary = self._serializer.serialize(reply)
print("Receive: {0}".format(payload))
self._handler.onMessage(reply)
def isOpen(self):
return True
def close(self):
pass
def abort(self):
pass
class TestClose(unittest.TestCase):
def test_server_abort(self):
handler = ApplicationSession()
MockTransport(handler)
# this should not raise an exception, but did when this
# test-case was written
handler.onClose(False)
class TestPublisher(unittest.TestCase):
@inlineCallbacks
def test_publish(self):
handler = ApplicationSession()
MockTransport(handler)
publication = yield handler.publish(u'com.myapp.topic1')
self.assertEqual(publication, None)
publication = yield handler.publish(u'com.myapp.topic1', 1, 2, 3)
self.assertEqual(publication, None)
publication = yield handler.publish(u'com.myapp.topic1', 1, 2, 3, foo=23, bar='hello')
self.assertEqual(publication, None)
publication = yield handler.publish(u'com.myapp.topic1', options=types.PublishOptions(exclude_me=False))
self.assertEqual(publication, None)
publication = yield handler.publish(u'com.myapp.topic1', 1, 2, 3, foo=23, bar='hello', options=types.PublishOptions(exclude_me=False, exclude=[100, 200, 300]))
self.assertEqual(publication, None)
@inlineCallbacks
def test_publish_acknowledged(self):
handler = ApplicationSession()
MockTransport(handler)
publication = yield handler.publish(u'com.myapp.topic1', options=types.PublishOptions(acknowledge=True))
self.assertTrue(type(publication.id) in (int, long))
publication = yield handler.publish(u'com.myapp.topic1', 1, 2, 3, options=types.PublishOptions(acknowledge=True))
self.assertTrue(type(publication.id) in (int, long))
publication = yield handler.publish(u'com.myapp.topic1', 1, 2, 3, foo=23, bar='hello', options=types.PublishOptions(acknowledge=True))
self.assertTrue(type(publication.id) in (int, long))
publication = yield handler.publish(u'com.myapp.topic1', options=types.PublishOptions(exclude_me=False, acknowledge=True))
self.assertTrue(type(publication.id) in (int, long))
publication = yield handler.publish(u'com.myapp.topic1', 1, 2, 3, foo=23, bar='hello', options=types.PublishOptions(exclude_me=False, exclude=[100, 200, 300], acknowledge=True))
self.assertTrue(type(publication.id) in (int, long))
@inlineCallbacks
def test_publish_undefined_exception(self):
handler = ApplicationSession()
MockTransport(handler)
options = types.PublishOptions(acknowledge=True)
yield self.assertFailure(handler.publish(u'de.myapp.topic1', options=options), ApplicationError)
yield self.assertFailure(handler.publish(u'', options=options), ApplicationError)
@inlineCallbacks
def test_publish_defined_exception(self):
handler = ApplicationSession()
MockTransport(handler)
options = types.PublishOptions(acknowledge=True)
handler.define(NotAuthorized)
yield self.assertFailure(handler.publish(u'de.myapp.topic1', options=options), NotAuthorized)
handler.define(InvalidUri)
yield self.assertFailure(handler.publish(u'', options=options), InvalidUri)
@inlineCallbacks
def test_call(self):
handler = ApplicationSession()
MockTransport(handler)
res = yield handler.call(u'com.myapp.procedure1')
self.assertEqual(res, 100)
res = yield handler.call(u'com.myapp.procedure1', 1, 2, 3)
self.assertEqual(res, 100)
res = yield handler.call(u'com.myapp.procedure1', 1, 2, 3, foo=23, bar='hello')
self.assertEqual(res, 100)
res = yield handler.call(u'com.myapp.procedure1', options=types.CallOptions(timeout=10000))
self.assertEqual(res, 100)
res = yield handler.call(u'com.myapp.procedure1', 1, 2, 3, foo=23, bar='hello', options=types.CallOptions(timeout=10000))
self.assertEqual(res, 100)
@inlineCallbacks
def test_call_with_complex_result(self):
handler = ApplicationSession()
MockTransport(handler)
res = yield handler.call(u'com.myapp.procedure2')
self.assertIsInstance(res, types.CallResult)
self.assertEqual(res.results, (1, 2, 3))
self.assertEqual(res.kwresults, {})
res = yield handler.call(u'com.myapp.procedure3')
self.assertIsInstance(res, types.CallResult)
self.assertEqual(res.results, (1, 2, 3))
self.assertEqual(res.kwresults, {'foo': 'bar', 'baz': 23})
@inlineCallbacks
def test_subscribe(self):
handler = ApplicationSession()
MockTransport(handler)
def on_event(*args, **kwargs):
print("got event", args, kwargs)
subscription = yield handler.subscribe(on_event, u'com.myapp.topic1')
self.assertTrue(type(subscription.id) in (int, long))
subscription = yield handler.subscribe(on_event, u'com.myapp.topic1', options=types.SubscribeOptions(match=u'wildcard'))
self.assertTrue(type(subscription.id) in (int, long))
@inlineCallbacks
def test_double_subscribe(self):
handler = ApplicationSession()
MockTransport(handler)
event0 = Deferred()
event1 = Deferred()
subscription0 = yield handler.subscribe(
lambda: event0.callback(42), u'com.myapp.topic1')
subscription1 = yield handler.subscribe(
lambda: event1.callback('foo'), u'com.myapp.topic1')
# same topic, same ID
self.assertTrue(subscription0.id == subscription1.id)
# do a publish (MockTransport fakes the acknowledgement
# message) and then do an actual publish event. The IDs
# are the same, so we just do one Event.
publish = yield handler.publish(
u'com.myapp.topic1',
options=types.PublishOptions(acknowledge=True, exclude_me=False),
)
handler.onMessage(message.Event(subscription0.id, publish.id))
# ensure we actually got both callbacks
self.assertTrue(event0.called, "Missing callback")
self.assertTrue(event1.called, "Missing callback")
@inlineCallbacks
def test_double_subscribe_single_unsubscribe(self):
'''
Make sure we correctly deal with unsubscribing one of our handlers
from the same topic.
'''
handler = ApplicationSession()
MockTransport(handler)
# monkey-patch ApplicationSession to ensure we DO NOT get
# an Unsubscribed message -- since we only unsubscribe
# from ONE of our handlers for com.myapp.topic1
def onMessage(msg):
assert not isinstance(msg, message.Unsubscribed)
return ApplicationSession.onMessage(handler, msg)
handler.onMessage = onMessage
event0 = Deferred()
event1 = Deferred()
subscription0 = yield handler.subscribe(
lambda: event0.callback(42), u'com.myapp.topic1')
subscription1 = yield handler.subscribe(
lambda: event1.callback('foo'), u'com.myapp.topic1')
self.assertTrue(subscription0.id == subscription1.id)
yield subscription1.unsubscribe()
# do a publish (MockTransport fakes the acknowledgement
# message) and then do an actual publish event. Note the
# IDs are the same, so there's only one Event.
publish = yield handler.publish(
u'com.myapp.topic1',
options=types.PublishOptions(acknowledge=True, exclude_me=False),
)
handler.onMessage(message.Event(subscription0.id, publish.id))
# since we unsubscribed the second event handler, we
# should NOT have called its callback
self.assertTrue(event0.called, "Missing callback")
self.assertTrue(not event1.called, "Second callback fired.")
@inlineCallbacks
def test_double_subscribe_double_unsubscribe(self):
'''
If we subscribe twice, and unsubscribe twice, we should then get
an Unsubscribed message.
'''
handler = ApplicationSession()
MockTransport(handler)
# monkey-patch ApplicationSession to ensure we get our message
unsubscribed_d = Deferred()
def onMessage(msg):
if isinstance(msg, message.Unsubscribed):
unsubscribed_d.callback(msg)
return ApplicationSession.onMessage(handler, msg)
handler.onMessage = onMessage
event0 = Deferred()
event1 = Deferred()
subscription0 = yield handler.subscribe(
lambda: event0.callback(42), u'com.myapp.topic1')
subscription1 = yield handler.subscribe(
lambda: event1.callback('foo'), u'com.myapp.topic1')
self.assertTrue(subscription0.id == subscription1.id)
yield subscription0.unsubscribe()
yield subscription1.unsubscribe()
# after the second unsubscribe, we should have gotten an
# Unsubscribed message
assert unsubscribed_d.called
# do a publish (MockTransport fakes the acknowledgement
# message) and then do an actual publish event. Sending
# the Event should be an error, as we have no
# subscriptions left.
publish = yield handler.publish(
u'com.myapp.topic1',
options=types.PublishOptions(acknowledge=True, exclude_me=False),
)
try:
handler.onMessage(message.Event(subscription0.id, publish.id))
self.fail("Expected ProtocolError")
except ProtocolError:
pass
# since we unsubscribed the second event handler, we
# should NOT have called its callback
self.assertTrue(not event0.called, "First callback fired.")
self.assertTrue(not event1.called, "Second callback fired.")
@inlineCallbacks
def test_double_subscribe_errors(self):
"""
Test various error-conditions when we try to add a second
subscription-handler (its signature must match any
existing handlers).
"""
handler = ApplicationSession()
MockTransport(handler)
event0 = Deferred()
event1 = Deferred()
def second(*args, **kw):
# our EventDetails should have been passed as the
# "boom" kwarg; see "details_arg=" below
self.assertTrue('boom' in kw)
self.assertTrue(isinstance(kw['boom'], types.EventDetails))
event1.callback(args)
subscription0 = yield handler.subscribe(
lambda arg: event0.callback(arg), u'com.myapp.topic1')
subscription1 = yield handler.subscribe(
second, u'com.myapp.topic1',
types.SubscribeOptions(details_arg='boom'),
)
# same topic, same ID
self.assertTrue(subscription0.id == subscription1.id)
# MockTransport gives us the ack reply and then we do our
# own event message.
publish = yield handler.publish(
u'com.myapp.topic1',
options=types.PublishOptions(acknowledge=True, exclude_me=False),
)
# note that the protocol serializer converts all sequences
# to lists, so we pass "args" as a list, not a tuple on
# purpose.
handler.onMessage(
message.Event(subscription0.id, publish.id, args=['arg0']))
# each callback should have gotten called, each with its
# own args (we check the correct kwarg in second() above)
self.assertTrue(event0.called)
self.assertTrue(event1.called)
self.assertEqual(event0.result, 'arg0')
self.assertEqual(event1.result, ('arg0',))
@inlineCallbacks
def test_publish_callback_exception(self):
"""
Ensure we handle an exception from the user code.
"""
handler = ApplicationSession()
MockTransport(handler)
error_instance = RuntimeError("we have a problem")
got_err_d = Deferred()
def observer(kw):
if kw['isError'] and 'failure' in kw:
fail = kw['failure']
fail.trap(RuntimeError)
if error_instance == fail.value:
got_err_d.callback(True)
log.addObserver(observer)
def boom():
raise error_instance
try:
sub = yield handler.subscribe(boom, u'com.myapp.topic1')
# MockTransport gives us the ack reply and then we do our
# own event message
publish = yield handler.publish(
u'com.myapp.topic1',
options=types.PublishOptions(acknowledge=True, exclude_me=False),
)
msg = message.Event(sub.id, publish.id)
handler.onMessage(msg)
# we know it worked if our observer worked and did
# .callback on our Deferred above.
self.assertTrue(got_err_d.called)
# ...otherwise trial will fail the test anyway
self.flushLoggedErrors()
finally:
log.removeObserver(observer)
@inlineCallbacks
def test_unsubscribe(self):
handler = ApplicationSession()
MockTransport(handler)
def on_event(*args, **kwargs):
print("got event", args, kwargs)
subscription = yield handler.subscribe(on_event, u'com.myapp.topic1')
yield subscription.unsubscribe()
@inlineCallbacks
def test_register(self):
handler = ApplicationSession()
MockTransport(handler)
def on_call(*args, **kwargs):
print("got call", args, kwargs)
registration = yield handler.register(on_call, u'com.myapp.procedure1')
self.assertTrue(type(registration.id) in (int, long))
registration = yield handler.register(on_call, u'com.myapp.procedure1', options=types.RegisterOptions(match=u'prefix'))
self.assertTrue(type(registration.id) in (int, long))
@inlineCallbacks
def test_unregister(self):
handler = ApplicationSession()
MockTransport(handler)
def on_call(*args, **kwargs):
print("got call", args, kwargs)
registration = yield handler.register(on_call, u'com.myapp.procedure1')
yield registration.unregister()
@inlineCallbacks
def test_invoke(self):
handler = ApplicationSession()
MockTransport(handler)
def myproc1():
return 23
yield handler.register(myproc1, u'com.myapp.myproc1')
res = yield handler.call(u'com.myapp.myproc1')
self.assertEqual(res, 23)
@inlineCallbacks
def test_invoke_twice(self):
handler = ApplicationSession()
MockTransport(handler)
def myproc1():
return 23
yield handler.register(myproc1, u'com.myapp.myproc1')
d0 = handler.call(u'com.myapp.myproc1')
d1 = handler.call(u'com.myapp.myproc1')
res = yield DeferredList([d0, d1])
self.assertEqual(res, [(True, 23), (True, 23)])
@inlineCallbacks
def test_invoke_request_id_sequences(self):
"""
make sure each session independently generates sequential IDs
"""
handler0 = ApplicationSession()
handler1 = ApplicationSession()
trans0 = MockTransport(handler0)
trans1 = MockTransport(handler1)
# the ID sequences for each session should both start at 0
# (the register) and then increment for the call()
def verify_seq_id(orig, msg):
if isinstance(msg, message.Register):
self.assertEqual(msg.request, 0)
elif isinstance(msg, message.Call):
self.assertEqual(msg.request, 1)
return orig(msg)
orig0 = trans0.send
orig1 = trans1.send
trans0.send = lambda msg: verify_seq_id(orig0, msg)
trans1.send = lambda msg: verify_seq_id(orig1, msg)
def myproc1():
return 23
yield handler0.register(myproc1, u'com.myapp.myproc1')
yield handler1.register(myproc1, u'com.myapp.myproc1')
d0 = handler0.call(u'com.myapp.myproc1')
d1 = handler1.call(u'com.myapp.myproc1')
res = yield DeferredList([d0, d1])
self.assertEqual(res, [(True, 23), (True, 23)])
@inlineCallbacks
def test_invoke_user_raises(self):
handler = ApplicationSession()
handler.traceback_app = True
MockTransport(handler)
name_error = NameError('foo')
def bing():
raise name_error
# see MockTransport, must start with "com.myapp.myproc"
yield handler.register(bing, u'com.myapp.myproc99')
try:
yield handler.call(u'com.myapp.myproc99')
self.fail("Expected an error")
except Exception as e:
# XXX should/could we export all the builtin types?
# right now, we always get ApplicationError
# self.assertTrue(isinstance(e, NameError))
self.assertTrue(isinstance(e, RuntimeError))
# also, we should have logged the real NameError to
# Twisted.
errs = self.flushLoggedErrors()
self.assertEqual(1, len(errs))
self.assertEqual(name_error, errs[0].value)
@inlineCallbacks
def test_invoke_progressive_result(self):
handler = ApplicationSession()
MockTransport(handler)
@inlineCallbacks
def bing(details=None):
self.assertTrue(details is not None)
self.assertTrue(details.progress is not None)
for i in range(10):
details.progress(i)
yield succeed(i)
returnValue(42)
progressive = list(map(lambda _: Deferred(), range(10)))
def progress(arg):
progressive[arg].callback(arg)
# see MockTransport, must start with "com.myapp.myproc"
yield handler.register(
bing,
u'com.myapp.myproc2',
types.RegisterOptions(details_arg='details'),
)
res = yield handler.call(
u'com.myapp.myproc2',
options=types.CallOptions(on_progress=progress),
)
self.assertEqual(42, res)
# make sure we got *all* our progressive results
for i in range(10):
self.assertTrue(progressive[i].called)
self.assertEqual(i, progressive[i].result)
@inlineCallbacks
def test_invoke_progressive_result_error(self):
handler = ApplicationSession()
MockTransport(handler)
@inlineCallbacks
def bing(arg, details=None, key=None):
self.assertTrue(details is not None)
self.assertTrue(details.progress is not None)
self.assertEqual(key, 'word')
self.assertEqual('arg', arg)
details.progress('life', something='nothing')
yield succeed('meaning of')
returnValue(42)
got_progress = Deferred()
progress_error = NameError('foo')
def progress(arg, something=None):
self.assertEqual('nothing', something)
got_progress.callback(arg)
raise progress_error
# see MockTransport, must start with "com.myapp.myproc"
yield handler.register(
bing,
u'com.myapp.myproc2',
types.RegisterOptions(details_arg='details'),
)
res = yield handler.call(
u'com.myapp.myproc2',
'arg',
options=types.CallOptions(on_progress=progress),
key='word',
)
self.assertEqual(42, res)
# our progress handler raised an error, but not before
# recording success.
self.assertTrue(got_progress.called)
self.assertEqual('life', got_progress.result)
# make sure our progress-handler error was logged
errs = self.flushLoggedErrors()
self.assertEqual(1, len(errs))
self.assertEqual(progress_error, errs[0].value)
@inlineCallbacks
def test_invoke_progressive_result_no_args(self):
handler = ApplicationSession()
MockTransport(handler)
@inlineCallbacks
def bing(details=None):
self.assertTrue(details is not None)
self.assertTrue(details.progress is not None)
details.progress()
yield succeed(True)
returnValue(42)
got_progress = Deferred()
def progress():
got_progress.callback('intentionally left blank')
# see MockTransport, must start with "com.myapp.myproc"
yield handler.register(
bing,
u'com.myapp.myproc2',
types.RegisterOptions(details_arg='details'),
)
res = yield handler.call(
u'com.myapp.myproc2',
options=types.CallOptions(on_progress=progress),
)
self.assertEqual(42, res)
self.assertTrue(got_progress.called)
@inlineCallbacks
def test_invoke_progressive_result_just_kwargs(self):
handler = ApplicationSession()
MockTransport(handler)
@inlineCallbacks
def bing(details=None):
self.assertTrue(details is not None)
self.assertTrue(details.progress is not None)
details.progress(key='word')
yield succeed(True)
returnValue(42)
got_progress = Deferred()
def progress(key=None):
got_progress.callback(key)
# see MockTransport, must start with "com.myapp.myproc"
yield handler.register(
bing,
u'com.myapp.myproc2',
types.RegisterOptions(details_arg='details'),
)
res = yield handler.call(
u'com.myapp.myproc2',
options=types.CallOptions(on_progress=progress),
)
self.assertEqual(42, res)
self.assertTrue(got_progress.called)
self.assertEqual('word', got_progress.result)
# ## variant 1: works
# def test_publish1(self):
# d = self.handler.publish(u'de.myapp.topic1')
# self.assertFailure(d, ApplicationError)
# ## variant 2: works
# @inlineCallbacks
# def test_publish2(self):
# yield self.assertFailure(self.handler.publish(u'de.myapp.topic1'), ApplicationError)
# ## variant 3: does NOT work
# @inlineCallbacks
# def test_publish3(self):
# with self.assertRaises(ApplicationError):
# yield self.handler.publish(u'de.myapp.topic1')
| {
"content_hash": "9c3d3ef094b6ff35184983139cceaa8f",
"timestamp": "",
"source": "github",
"line_count": 759,
"max_line_length": 189,
"avg_line_length": 39.361001317523055,
"alnum_prop": 0.570276150627615,
"repo_name": "tomwire/AutobahnPython",
"id": "9cd081afc675426687d153b1663dca12ff825c96",
"size": "31152",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "autobahn/wamp/test/test_protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3305"
},
{
"name": "Python",
"bytes": "882787"
}
],
"symlink_target": ""
} |
"""Module containing class `CalendarMonth`."""
import functools
@functools.total_ordering
class CalendarMonth:
"""
A calendar month, with a year and a month.
A `CalendarMonth` object has integer `year` and `month` attributes.
`CalendarMonth` objects are immutable, hashable, and totally ordered.
You can add and subtract integer numbers of months to and from a
`CalendarMonth`. Subtracting one `CalendarMonth` from another yields
an integer number of months.
The `range` method iterates over sequences of `CalendarMonth` values.
"""
@staticmethod
def from_date(date):
return CalendarMonth(date.year, date.month)
@staticmethod
def range(from_month, to_month):
if not isinstance(from_month, CalendarMonth) or \
not isinstance(to_month, CalendarMonth):
raise TypeError('Both arguments must be `CalendarMonth` objects.')
if to_month > from_month:
month = from_month
while month != to_month:
yield month
month += 1
def __init__(self, year, month):
if not isinstance(year, int) or not isinstance(month, int):
raise TypeError('Year and month must both be integers.')
if month < 1 or month > 12:
raise ValueError(
'Specified month {} is not between 1 and 12.'.format(month))
self._n = year * 12 + (month - 1)
@property
def year(self):
return _year(self._n)
@property
def month(self):
return _month(self._n)
def __repr__(self):
return 'CalendarMonth({}, {})'.format(self.year, self.month)
def __str__(self):
return '{:4d}-{:02d}'.format(self.year, self.month)
def __hash__(self):
return self._n
def __eq__(self, other):
if not isinstance(other, CalendarMonth):
return False
else:
return other._n == self._n
def __lt__(self, other):
if not isinstance(other, CalendarMonth):
return False
else:
return self._n < other._n
def __add__(self, i):
if not isinstance(i, int):
return NotImplemented
else:
n = self._n + i
return CalendarMonth(_year(n), _month(n))
def __radd__(self, i):
return self.__add__(i)
def __sub__(self, other):
if isinstance(other, CalendarMonth):
return self._n - other._n
elif isinstance(other, int):
n = self._n - other
return CalendarMonth(_year(n), _month(n))
else:
raise NotImplemented
def _year(n):
return n // 12
def _month(n):
return (n % 12) + 1
| {
"content_hash": "e2be9de774cb4e769c309d178d537e7f",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 24.88235294117647,
"alnum_prop": 0.5254981425194191,
"repo_name": "HaroldMills/Vesper",
"id": "66b253f6784cf72ea001d99ac47b54cb06512b1d",
"size": "2961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vesper/util/calendar_month.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "92"
},
{
"name": "CSS",
"bytes": "9101"
},
{
"name": "Dockerfile",
"bytes": "1678"
},
{
"name": "HTML",
"bytes": "70614"
},
{
"name": "JavaScript",
"bytes": "410277"
},
{
"name": "Python",
"bytes": "2697554"
},
{
"name": "Shell",
"bytes": "2772"
},
{
"name": "TypeScript",
"bytes": "30001"
}
],
"symlink_target": ""
} |
import os
from pathlib import Path
import re
import tempfile
import pytest
from versionner.cli import execute
from versionner.config import Config
from test.streamcatcher import catch_streams
def bootstrap_env():
dir = tempfile.TemporaryDirectory()
os.chdir(dir.name)
execute('ver', ['init'])
return dir
class TestDefault:
@pytest.fixture(autouse=True)
def set_dev(self):
self.dir = bootstrap_env()
self.root = Path(self.dir.name)
self.cfg = Config()
def test_empty(self):
version = self.cfg.default_init_version
version_file = self.root / self.cfg.version_file
with catch_streams():
execute('ver', [])
assert version_file.is_file(), "%s is not a file (exists: %s)" % (version_file, version_file.exists())
with version_file.open('r') as fh:
assert fh.read().strip() == version
def test_not_initialized(self):
version_file = self.root / self.cfg.version_file
version_file.unlink()
with catch_streams() as streams, \
pytest.raises(SystemExit):
execute('ver', [])
assert re.search(r'(?ms).*Version file .* doesn\'t exists', streams.err.getvalue())
if __name__ == '__main__':
pytest.main()
| {
"content_hash": "51bbfc1bb0f76daee239ee5dab8160f9",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 110,
"avg_line_length": 23.436363636363637,
"alnum_prop": 0.6167571761055082,
"repo_name": "mysz/versionner",
"id": "561d7ac24532dcd832785d1184f0003d5e9ca73f",
"size": "1312",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_default.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "48644"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_command
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Run arbitrary commands on an Arista EOS device
description:
- Sends an arbitrary set of commands to an EOS node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: eos
notes:
- Tested against EOS 4.15
options:
commands:
description:
- The commands to send to the remote EOS device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of I(retries) has been exceeded.
required: true
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
required: false
default: null
aliases: ['waitfor']
version_added: "2.2"
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the I(wait_for) must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
version_added: "2.2"
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
- name: run show version on remote devices
eos_command:
commands: show version
- name: run show version and check to see if output contains Arista
eos_command:
commands: show version
wait_for: result[0] contains Arista
- name: run multiple commands on remote nodes
eos_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
eos_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains Arista
- result[1] contains Loopback0
- name: run commands and specify the output format
eos_command:
commands:
- command: show version
output: json
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import string_types
from ansible.module_utils.netcli import Conditional
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.eos import run_commands
from ansible.module_utils.eos import eos_argument_spec, check_args
VALID_KEYS = ['command', 'output', 'prompt', 'response']
def to_lines(stdout):
lines = list()
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
lines.append(item)
return lines
def parse_commands(module, warnings):
spec = dict(
command=dict(key=True),
output=dict(),
prompt=dict(),
answer=dict()
)
transform = ComplexList(spec, module)
commands = transform(module.params['commands'])
if module.check_mode:
for item in list(commands):
if not item['command'].startswith('show'):
warnings.append(
'Only show commands are supported when using check_mode, not '
'executing %s' % item['command']
)
commands.remove(item)
return commands
def to_cli(obj):
cmd = obj['command']
if obj.get('output') == 'json':
cmd += ' | json'
return cmd
def main():
"""entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(eos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
if warnings:
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError:
exc = get_exception()
module.fail_json(msg=str(exc))
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': to_lines(responses)
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| {
"content_hash": "5dd704edf9969f77f2450047fb3b6125",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 82,
"avg_line_length": 29.81779661016949,
"alnum_prop": 0.6389086258348728,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "52dc321dbee5d1010483aecc186248ffa4deba49",
"size": "7712",
"binary": false,
"copies": "23",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/eos/eos_command.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
} |
import requests
import json
class BadCommandStatus(Exception):
def __init__(self, content):
self.content = content
def __str__(self):
return "Bad executed command status in response content:\n %s" % str(self.content)
class BadStatusCode(Exception):
def __init__(self, response):
self.response = response
def __str__(self):
return "Status code is not 200 in:\n code: %s\nheaders: %s\n\ncontent: %s" % (
self.response.status_code, self.response.headers, self.response.content)
class vmmaster(object):
prefix = "/vmmaster"
_commands = {
"run_script": ("POST", prefix + "/runScript"),
"label": ("POST", prefix + "/vmmasterLabel")
}
def __init__(self, driver):
self._driver = driver
self.command_executor_url = driver.command_executor._url
self.session_id = driver.session_id
def _process_response(self, response):
if response.status_code != 200:
raise BadStatusCode(response)
data = json.loads(response.content)
if data.get('status', 1) > 0:
raise BadCommandStatus(data)
return data
def _make_request(self, command, data):
method, url = self._commands.get(command, None)
if command is None:
raise Exception("no such command: %s" % command)
address = "%s/session/%s%s" % (self.command_executor_url, self.session_id, url)
return self._process_response(requests.request(method, address, data=data))
def run_script(self, script, command=None):
data = {
"script": script
}
if command:
data.update({"command": command})
return self._make_request("run_script", json.dumps(data))
def label(self, label):
data = {
"label": label
}
return self._make_request("label", json.dumps(data))
| {
"content_hash": "7cfc7b8be7d960a21a317aa69a0a13fb",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 90,
"avg_line_length": 31.278688524590162,
"alnum_prop": 0.5964360587002097,
"repo_name": "2gis/vmmaster-client",
"id": "c3c483047b66beb279f82a8307a22a1b6aed8c2f",
"size": "1924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vmmaster_client/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2377"
}
],
"symlink_target": ""
} |
import pytest
from anchore_engine.subsys.object_store import S3ObjectStorageDriver
class TestS3ObjectStorageDriver:
@pytest.mark.parametrize(
"param",
[
pytest.param(
{
"uri": "s3://bucket/key",
"expected": ("bucket", "key"),
},
id="success",
)
],
)
def test_parse_uri(self, param):
assert (
S3ObjectStorageDriver({"unittest": True})._parse_uri(param["uri"])
== param["expected"]
)
# Implicitly tests build_key too
@pytest.mark.parametrize(
"param",
[
pytest.param(
{
"userId": "dakaneye",
"bucket": "dakabucket",
"key": "dakakey",
"expected": "s3://dakabucketname/dakaneye/dakabucket/dakakey",
},
id="success",
)
],
)
def test_uri_for(self, param):
assert (
S3ObjectStorageDriver(
{"unittest": True, "bucket": "dakabucketname"}
).uri_for(param["userId"], param["bucket"], param["key"])
== param["expected"]
)
| {
"content_hash": "915b8b30265bf910c4027e5db3f64b01",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 82,
"avg_line_length": 27.434782608695652,
"alnum_prop": 0.4461172741679873,
"repo_name": "anchore/anchore-engine",
"id": "6be1a5bbd7b975bb7b34c4b40f2501f6b0e45035",
"size": "1262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/anchore_engine/subsys/object_store/drivers/test_s3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
} |
"""Jumping Tasks."""
from gym_jumping_task.envs.jumping_colors_task import COLORS
from gym_jumping_task.envs.jumping_colors_task import JumpTaskEnvWithColors
from gym_jumping_task.envs.jumping_coordinates_task import JumpTaskEnvWithCoordinates
from gym_jumping_task.envs.jumping_task import JumpTaskEnv
| {
"content_hash": "21a2b581ce5636f4509cf95225b6aa9e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 85,
"avg_line_length": 50.666666666666664,
"alnum_prop": 0.8388157894736842,
"repo_name": "google-research/jumping-task",
"id": "556eed4e45a65eeac15b29174f0627fda77e5244",
"size": "1454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gym_jumping_task/envs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import logging
import os
import subprocess as subprocess
import shutil
import sys
import tempfile
from telemetry.core import util
from telemetry.core.chrome import browser_backend
from telemetry.core.chrome import cros_util
class DesktopBrowserBackend(browser_backend.BrowserBackend):
"""The backend for controlling a locally-executed browser instance, on Linux,
Mac or Windows.
"""
def __init__(self, options, executable, is_content_shell):
super(DesktopBrowserBackend, self).__init__(
is_content_shell=is_content_shell,
supports_extensions=not is_content_shell, options=options)
# Initialize fields so that an explosion during init doesn't break in Close.
self._proc = None
self._tmpdir = None
self._tmp_output_file = None
self._executable = executable
if not self._executable:
raise Exception('Cannot create browser, no executable found!')
if len(options.extensions_to_load) > 0 and is_content_shell:
raise browser_backend.ExtensionsNotSupportedException(
'Content shell does not support extensions.')
self._port = util.GetAvailableLocalPort()
self._supports_net_benchmarking = True
self._LaunchBrowser(options)
# For old chrome versions, might have to relaunch to have the
# correct benchmarking switch.
if self._chrome_branch_number < 1418:
self.Close()
self._supports_net_benchmarking = False
self._LaunchBrowser(options)
if self.options.cros_desktop:
cros_util.NavigateLogin(self)
def _LaunchBrowser(self, options):
args = [self._executable]
args.extend(self.GetBrowserStartupArgs())
if not options.show_stdout:
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._proc = subprocess.Popen(
args, stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
else:
self._proc = subprocess.Popen(args)
try:
self._WaitForBrowserToComeUp()
self._PostBrowserStartupInitialization()
except:
self.Close()
raise
def GetBrowserStartupArgs(self):
args = super(DesktopBrowserBackend, self).GetBrowserStartupArgs()
args.append('--remote-debugging-port=%i' % self._port)
if not self.is_content_shell:
args.append('--window-size=1280,1024')
if self._supports_net_benchmarking:
args.append('--enable-net-benchmarking')
else:
args.append('--enable-benchmarking')
if not self.options.dont_override_profile:
self._tmpdir = tempfile.mkdtemp()
if self.options.profile_dir:
if self.is_content_shell:
logging.critical('Profiles cannot be used with content shell')
sys.exit(1)
shutil.rmtree(self._tmpdir)
shutil.copytree(self.options.profile_dir, self._tmpdir)
args.append('--user-data-dir=%s' % self._tmpdir)
if self.options.cros_desktop:
ext_path = os.path.join(os.path.dirname(__file__), 'chromeos_login_ext')
args.extend(['--login-manager', '--login-profile=user',
'--stub-cros', '--login-screen=login',
'--auth-ext-path=%s' % ext_path])
return args
@property
def pid(self):
if self._proc:
return self._proc.pid
return None
def IsBrowserRunning(self):
return self._proc.poll() == None
def GetStandardOutput(self):
assert self._tmp_output_file, "Can't get standard output with show_stdout"
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
def __del__(self):
self.Close()
def Close(self):
super(DesktopBrowserBackend, self).Close()
if self._proc:
def IsClosed():
if not self._proc:
return True
return self._proc.poll() != None
# Try to politely shutdown, first.
self._proc.terminate()
try:
util.WaitFor(IsClosed, timeout=1)
self._proc = None
except util.TimeoutException:
pass
# Kill it.
if not IsClosed():
self._proc.kill()
try:
util.WaitFor(IsClosed, timeout=5)
self._proc = None
except util.TimeoutException:
self._proc = None
raise Exception('Could not shutdown the browser.')
if self._tmpdir and os.path.exists(self._tmpdir):
shutil.rmtree(self._tmpdir, ignore_errors=True)
self._tmpdir = None
if self._tmp_output_file:
self._tmp_output_file.close()
self._tmp_output_file = None
def CreateForwarder(self, *port_pairs):
return DoNothingForwarder(*port_pairs)
class DoNothingForwarder(object):
def __init__(self, *port_pairs):
self._host_port = port_pairs[0].local_port
@property
def url(self):
assert self._host_port
return 'http://127.0.0.1:%i' % self._host_port
def Close(self):
self._host_port = None
| {
"content_hash": "bc289ef8b1a840e73ea14130c6320dcd",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 80,
"avg_line_length": 30.459627329192546,
"alnum_prop": 0.6500815660685155,
"repo_name": "timopulkkinen/BubbleFish",
"id": "4e94d9364ee74c7801de70faefed7ec3cc880dd7",
"size": "5070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/chrome/desktop_browser_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1174304"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "75801820"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "161884021"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3531849"
},
{
"name": "JavaScript",
"bytes": "18556005"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "Matlab",
"bytes": "5234"
},
{
"name": "Objective-C",
"bytes": "7254742"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "933011"
},
{
"name": "Python",
"bytes": "8808682"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3621"
},
{
"name": "Shell",
"bytes": "1537764"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XML",
"bytes": "13493"
}
],
"symlink_target": ""
} |
import ast
from typing import (
Any,
Dict,
List,
)
class ArgumentVisitor(ast.NodeVisitor):
"""Reports which arguments a function contains."""
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[str, Any]) -> None
# https://github.com/python/mypy/issues/5887
super(ArgumentVisitor, self).__init__(*args, **kwargs) # type: ignore
# The arguments found in the function.
self.arguments = list() # type: List[str]
self.types = list() # type: List[str]
def add_arg_by_name(self, name, arg):
self.arguments.append(name)
if arg.annotation is not None and hasattr(arg.annotation, 'id'):
self.types.append(arg.annotation.id)
else:
self.types.append(None)
def visit_arguments(self, node):
# type: (ast.arguments) -> ast.AST
if hasattr(node, 'posonlyargs'):
for arg in node.posonlyargs:
self.add_arg_by_name(arg.arg, arg)
for arg in node.args:
self.add_arg_by_name(arg.arg, arg)
for arg in node.kwonlyargs:
self.add_arg_by_name(arg.arg, arg)
# Handle single-star arguments.
if node.vararg is not None:
name = '*' + node.vararg.arg
self.add_arg_by_name(name, node.vararg)
if node.kwarg is not None:
name = '**' + node.kwarg.arg
self.add_arg_by_name(name, node.kwarg)
return self.generic_visit(node)
| {
"content_hash": "f90168fda4ed30b83647085c00df5519",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.5740987983978638,
"repo_name": "terrencepreilly/darglint",
"id": "6ea7536a99cd631987cb96eb586dcdb9dd6e546d",
"size": "1498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "darglint/analysis/argument_visitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1960"
},
{
"name": "Elm",
"bytes": "25621"
},
{
"name": "HTML",
"bytes": "391"
},
{
"name": "Makefile",
"bytes": "1145"
},
{
"name": "Python",
"bytes": "678190"
}
],
"symlink_target": ""
} |
import unittest
from .storage_test_tables import StorageTestTables
from .storage_test_methods import StorageTestMethods
try:
from builtins import range
except ImportError:
pass
import tempfile
import os
from sqlalchemy import create_engine, MetaData
import time
from flask_prose import Storage
class StorageTest():
def _create_storage(self):
temp_dir = tempfile.gettempdir()
self._dbfile = os.path.join(temp_dir, "temp.db")
self.engine = create_engine('sqlite:///'+self._dbfile)
self.storage = Storage(self.engine)
self.metadata = MetaData(bind=self.engine, reflect=True)
def tearDown(self):
os.remove(self._dbfile)
pass
class TestSQLiteStorageTables(StorageTest, StorageTestTables, unittest.TestCase):
def dummy(self):
pass
class TestSQLiteStorageMethods(StorageTest, StorageTestMethods, unittest.TestCase):
def dummy(self):
pass
| {
"content_hash": "3c881f99c1adcc2e493bda316370737f",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 83,
"avg_line_length": 25.2972972972973,
"alnum_prop": 0.7158119658119658,
"repo_name": "slippers/Flask-Prose",
"id": "6044f4849acf88f652f86b38b51bdbef4105eb59",
"size": "936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sqlite_storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55560"
},
{
"name": "Shell",
"bytes": "1224"
}
],
"symlink_target": ""
} |
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
class RolesNegativeTestJSON(base.BaseIdentityV2AdminTest):
def _get_role_params(self):
user = self.setup_test_user()
tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
role = self.setup_test_role()
return (user, tenant, role)
@test.attr(type=['negative'])
@test.idempotent_id('d5d5f1df-f8ca-4de0-b2ef-259c1cc67025')
def test_list_roles_by_unauthorized_user(self):
# Non-administrator user should not be able to list roles
self.assertRaises(lib_exc.Forbidden,
self.non_admin_roles_client.list_roles)
@test.attr(type=['negative'])
@test.idempotent_id('11a3c7da-df6c-40c2-abc2-badd682edf9f')
def test_list_roles_request_without_token(self):
# Request to list roles without a valid token should fail
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(lib_exc.Unauthorized, self.roles_client.list_roles)
self.client.auth_provider.clear_auth()
@test.attr(type=['negative'])
@test.idempotent_id('c0b89e56-accc-4c73-85f8-9c0f866104c1')
def test_role_create_blank_name(self):
# Should not be able to create a role with a blank name
self.assertRaises(lib_exc.BadRequest, self.roles_client.create_role,
name='')
@test.attr(type=['negative'])
@test.idempotent_id('585c8998-a8a4-4641-a5dd-abef7a8ced00')
def test_create_role_by_unauthorized_user(self):
# Non-administrator user should not be able to create role
role_name = data_utils.rand_name(name='role')
self.assertRaises(lib_exc.Forbidden,
self.non_admin_roles_client.create_role,
name=role_name)
@test.attr(type=['negative'])
@test.idempotent_id('a7edd17a-e34a-4aab-8bb7-fa6f498645b8')
def test_create_role_request_without_token(self):
# Request to create role without a valid token should fail
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
role_name = data_utils.rand_name(name='role')
self.assertRaises(lib_exc.Unauthorized,
self.roles_client.create_role, name=role_name)
self.client.auth_provider.clear_auth()
@test.attr(type=['negative'])
@test.idempotent_id('c0cde2c8-81c1-4bb0-8fe2-cf615a3547a8')
def test_role_create_duplicate(self):
# Role names should be unique
role_name = data_utils.rand_name(name='role-dup')
body = self.roles_client.create_role(name=role_name)['role']
role1_id = body.get('id')
self.addCleanup(self.roles_client.delete_role, role1_id)
self.assertRaises(lib_exc.Conflict, self.roles_client.create_role,
name=role_name)
@test.attr(type=['negative'])
@test.idempotent_id('15347635-b5b1-4a87-a280-deb2bd6d865e')
def test_delete_role_by_unauthorized_user(self):
# Non-administrator user should not be able to delete role
role_name = data_utils.rand_name(name='role')
body = self.roles_client.create_role(name=role_name)['role']
self.addCleanup(self.roles_client.delete_role, body['id'])
role_id = body.get('id')
self.assertRaises(lib_exc.Forbidden,
self.non_admin_roles_client.delete_role, role_id)
@test.attr(type=['negative'])
@test.idempotent_id('44b60b20-70de-4dac-beaf-a3fc2650a16b')
def test_delete_role_request_without_token(self):
# Request to delete role without a valid token should fail
role_name = data_utils.rand_name(name='role')
body = self.roles_client.create_role(name=role_name)['role']
self.addCleanup(self.roles_client.delete_role, body['id'])
role_id = body.get('id')
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(lib_exc.Unauthorized,
self.roles_client.delete_role,
role_id)
self.client.auth_provider.clear_auth()
@test.attr(type=['negative'])
@test.idempotent_id('38373691-8551-453a-b074-4260ad8298ef')
def test_delete_role_non_existent(self):
# Attempt to delete a non existent role should fail
non_existent_role = data_utils.rand_uuid_hex()
self.assertRaises(lib_exc.NotFound, self.roles_client.delete_role,
non_existent_role)
@test.attr(type=['negative'])
@test.idempotent_id('391df5cf-3ec3-46c9-bbe5-5cb58dd4dc41')
def test_assign_user_role_by_unauthorized_user(self):
# Non-administrator user should not be authorized to
# assign a role to user
(user, tenant, role) = self._get_role_params()
self.assertRaises(
lib_exc.Forbidden,
self.non_admin_roles_client.create_user_role_on_project,
tenant['id'], user['id'], role['id'])
@test.attr(type=['negative'])
@test.idempotent_id('f0d2683c-5603-4aee-95d7-21420e87cfd8')
def test_assign_user_role_request_without_token(self):
# Request to assign a role to a user without a valid token
(user, tenant, role) = self._get_role_params()
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(
lib_exc.Unauthorized,
self.roles_client.create_user_role_on_project, tenant['id'],
user['id'], role['id'])
self.client.auth_provider.clear_auth()
@test.attr(type=['negative'])
@test.idempotent_id('99b297f6-2b5d-47c7-97a9-8b6bb4f91042')
def test_assign_user_role_for_non_existent_role(self):
# Attempt to assign a non existent role to user should fail
(user, tenant, role) = self._get_role_params()
non_existent_role = data_utils.rand_uuid_hex()
self.assertRaises(lib_exc.NotFound,
self.roles_client.create_user_role_on_project,
tenant['id'], user['id'], non_existent_role)
@test.attr(type=['negative'])
@test.idempotent_id('b2285aaa-9e76-4704-93a9-7a8acd0a6c8f')
def test_assign_user_role_for_non_existent_tenant(self):
# Attempt to assign a role on a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
non_existent_tenant = data_utils.rand_uuid_hex()
self.assertRaises(lib_exc.NotFound,
self.roles_client.create_user_role_on_project,
non_existent_tenant, user['id'], role['id'])
@test.attr(type=['negative'])
@test.idempotent_id('5c3132cd-c4c8-4402-b5ea-71eb44e97793')
def test_assign_duplicate_user_role(self):
# Duplicate user role should not get assigned
(user, tenant, role) = self._get_role_params()
self.roles_client.create_user_role_on_project(tenant['id'],
user['id'],
role['id'])
self.assertRaises(lib_exc.Conflict,
self.roles_client.create_user_role_on_project,
tenant['id'], user['id'], role['id'])
@test.attr(type=['negative'])
@test.idempotent_id('d0537987-0977-448f-a435-904c15de7298')
def test_remove_user_role_by_unauthorized_user(self):
# Non-administrator user should not be authorized to
# remove a user's role
(user, tenant, role) = self._get_role_params()
self.roles_client.create_user_role_on_project(tenant['id'],
user['id'],
role['id'])
self.assertRaises(
lib_exc.Forbidden,
self.non_admin_roles_client.delete_role_from_user_on_project,
tenant['id'], user['id'], role['id'])
@test.attr(type=['negative'])
@test.idempotent_id('cac81cf4-c1d2-47dc-90d3-f2b7eb572286')
def test_remove_user_role_request_without_token(self):
# Request to remove a user's role without a valid token
(user, tenant, role) = self._get_role_params()
self.roles_client.create_user_role_on_project(tenant['id'],
user['id'],
role['id'])
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(lib_exc.Unauthorized,
self.roles_client.delete_role_from_user_on_project,
tenant['id'], user['id'], role['id'])
self.client.auth_provider.clear_auth()
@test.attr(type=['negative'])
@test.idempotent_id('ab32d759-cd16-41f1-a86e-44405fa9f6d2')
def test_remove_user_role_non_existent_role(self):
# Attempt to delete a non existent role from a user should fail
(user, tenant, role) = self._get_role_params()
self.roles_client.create_user_role_on_project(tenant['id'],
user['id'],
role['id'])
non_existent_role = data_utils.rand_uuid_hex()
self.assertRaises(lib_exc.NotFound,
self.roles_client.delete_role_from_user_on_project,
tenant['id'], user['id'], non_existent_role)
@test.attr(type=['negative'])
@test.idempotent_id('67a679ec-03dd-4551-bbfc-d1c93284f023')
def test_remove_user_role_non_existent_tenant(self):
# Attempt to remove a role from a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
self.roles_client.create_user_role_on_project(tenant['id'],
user['id'],
role['id'])
non_existent_tenant = data_utils.rand_uuid_hex()
self.assertRaises(lib_exc.NotFound,
self.roles_client.delete_role_from_user_on_project,
non_existent_tenant, user['id'], role['id'])
@test.attr(type=['negative'])
@test.idempotent_id('7391ab4c-06f3-477a-a64a-c8e55ce89837')
def test_list_user_roles_by_unauthorized_user(self):
# Non-administrator user should not be authorized to list
# a user's roles
(user, tenant, role) = self._get_role_params()
self.roles_client.create_user_role_on_project(tenant['id'],
user['id'],
role['id'])
self.assertRaises(
lib_exc.Forbidden,
self.non_admin_roles_client.list_user_roles_on_project,
tenant['id'], user['id'])
@test.attr(type=['negative'])
@test.idempotent_id('682adfb2-fd5f-4b0a-a9ca-322e9bebb907')
def test_list_user_roles_request_without_token(self):
# Request to list user's roles without a valid token should fail
(user, tenant, role) = self._get_role_params()
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
try:
self.assertRaises(lib_exc.Unauthorized,
self.roles_client.list_user_roles_on_project,
tenant['id'],
user['id'])
finally:
self.client.auth_provider.clear_auth()
| {
"content_hash": "033d0969c4f4af9db6144098a6418f37",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 77,
"avg_line_length": 48.684647302904565,
"alnum_prop": 0.5934543594988494,
"repo_name": "sebrandon1/tempest",
"id": "7116913e44f5de0e1c096682afa58aa203385d54",
"size": "12377",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/api/identity/admin/v2/test_roles_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3618834"
},
{
"name": "Shell",
"bytes": "9310"
}
],
"symlink_target": ""
} |
import sys
from setuptools import setup
from setuptools.extension import Extension
if sys.platform.startswith("darwin"):
# Do something different for mac
pass
setup(
name="travis_testing",
version="0.0.1",
url='http://github.com/CTPUG/pygame_cffi',
license='MIT',
description="A package for testing travis configurations.",
long_description=open('README.md', 'r').read(),
packages=['thing'],
include_package_data=True,
scripts=[
],
setup_requires=[
'cffi>=1.3.0',
],
install_requires=[
'cffi>=1.3.0',
],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| {
"content_hash": "050a6136fc5ec76fe1eb871c8374c00e",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 71,
"avg_line_length": 25.743589743589745,
"alnum_prop": 0.601593625498008,
"repo_name": "drnlm/travis-testing",
"id": "9ba33c03945f306d993ae2528bc5673803cd58d8",
"size": "1004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "484"
},
{
"name": "C++",
"bytes": "154"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "Python",
"bytes": "4225"
}
],
"symlink_target": ""
} |
from django.http import HttpResponseRedirect
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
from re import compile
EXEMPT_URLS = [compile(settings.LOGIN_URL.lstrip('/'))]
if hasattr(settings, 'LOGIN_EXEMPT_URLS'):
EXEMPT_URLS += [compile(expr) for expr in settings.LOGIN_EXEMPT_URLS]
class LoginRequiredMiddleware(MiddlewareMixin):
"""
Middleware that requires a user to be authenticated to view any page other
than LOGIN_URL. Exemptions to this requirement can optionally be specified
in settings via a list of regular expressions in LOGIN_EXEMPT_URLS (which
you can copy from your urls.py).
Requires authentication middleware and template context processors to be
loaded. You'll get an error if they aren't.
"""
def process_request(self, request):
assert hasattr(request, 'user'), "The Login Required middleware\
requires authentication middleware to be installed. Edit your\
MIDDLEWARE_CLASSES setting to insert\
'django.contrib.auth.middlware.AuthenticationMiddleware'. If that doesn't\
work, ensure your TEMPLATE_CONTEXT_PROCESSORS setting includes\
'django.core.context_processors.auth'."
if not request.user.is_authenticated():
path = request.path_info.lstrip('/')
if not any(m.match(path) for m in EXEMPT_URLS):
return HttpResponseRedirect(settings.LOGIN_URL)
| {
"content_hash": "95604818e48f709190e012d3b0073b25",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 44.15625,
"alnum_prop": 0.7416843595187544,
"repo_name": "forestdussault/olc_webportalv2",
"id": "57c551840a57262921e1d1e4c977a91b6a96905a",
"size": "1413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "olc_webportalv2/users/middleware.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8924"
},
{
"name": "HTML",
"bytes": "86876"
},
{
"name": "JavaScript",
"bytes": "839"
},
{
"name": "Python",
"bytes": "153640"
},
{
"name": "Shell",
"bytes": "9850"
}
],
"symlink_target": ""
} |
"""
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# Raghav RV <rvraghav93@gmail.com>
# License: BSD 3 clause
from collections.abc import Iterable
import warnings
from itertools import chain, combinations
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
from inspect import signature
import numpy as np
from ..utils import indexable, check_random_state, safe_indexing
from ..utils import _approximate_mode
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import comb
from ..base import _pprint
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'RepeatedStratifiedKFold',
'RepeatedKFold',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(metaclass=ABCMeta):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
if n_samples <= 1:
raise ValueError(
'Cannot perform LeaveOneOut with n_samples={}.'.format(
n_samples)
)
return range(n_samples)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets. Must be strictly greater than the number of
samples.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
if n_samples <= self.p:
raise ValueError(
'p={} must be strictly less than the number of '
'samples={}'.format(self.p, n_samples)
)
for combination in combinations(range(n_samples), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(BaseCrossValidator, metaclass=ABCMeta):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: n_samples={1}.")
.format(self.n_splits, n_samples))
for train, test in super().split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf)
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=5, shuffle=False,
random_state=None):
super().__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=5):
super().__init__(n_splits, shuffle=False, random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,), optional
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : boolean, optional
Whether to shuffle each class's samples before splitting into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf)
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
Train and test sizes may be different in each fold, with a difference of at
most ``n_classes``.
See also
--------
RepeatedStratifiedKFold: Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, shuffle=False, random_state=None):
super().__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None):
rng = check_random_state(self.random_state)
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ('binary', 'multiclass')
if type_of_target_y not in allowed_target_types:
raise ValueError(
'Supported target types are: {}. Got {!r} instead.'.format(
allowed_target_types, type_of_target_y))
y = column_or_1d(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = np.bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("n_splits=%d cannot be greater than the"
" number of members in each class."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of members in any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super().split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
max_train_size : int, optional
Maximum size for a single training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> tscv = TimeSeriesSplit()
>>> print(tscv)
TimeSeriesSplit(max_train_size=None, n_splits=5)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
TRAIN: [0 1 2 3] TEST: [4]
TRAIN: [0 1 2 3 4] TEST: [5]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=5, max_train_size=None):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_size = max_train_size
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,)
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
if self.max_train_size and self.max_train_size < test_start:
yield (indices[test_start - self.max_train_size:test_start],
indices[test_start:test_start + test_size])
else:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> logo.get_n_splits(groups=groups) # 'groups' is always required
2
>>> print(logo)
LeaveOneGroupOut()
>>> for train_index, test_index in logo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
return len(np.unique(groups))
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples, optional
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> lpgo.get_n_splits(groups=groups) # 'groups' is always required
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpgo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1))
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples, optional
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class _RepeatedSplits(metaclass=ABCMeta):
"""Repeated splits for an arbitrary randomized CV splitter.
Repeats splits for cross-validators n times with different randomization
in each repetition.
Parameters
----------
cv : callable
Cross-validator class.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
**cvargs : additional params
Constructor parameters for cv. Must not contain random_state
and shuffle.
"""
def __init__(self, cv, n_repeats=10, random_state=None, **cvargs):
if not isinstance(n_repeats, numbers.Integral):
raise ValueError("Number of repetitions must be of Integral type.")
if n_repeats <= 0:
raise ValueError("Number of repetitions must be greater than 0.")
if any(key in cvargs for key in ('random_state', 'shuffle')):
raise ValueError(
"cvargs must not contain random_state or shuffle.")
self.cv = cv
self.n_repeats = n_repeats
self.random_state = random_state
self.cvargs = cvargs
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
for train_index, test_index in cv.split(X, y, groups):
yield train_index, test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
rng = check_random_state(self.random_state)
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
return cv.get_n_splits(X, y, groups) * self.n_repeats
class RepeatedKFold(_RepeatedSplits):
"""Repeated K-Fold cross validator.
Repeats K-Fold n times with different randomization in each repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import RepeatedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124)
>>> for train_index, test_index in rkf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [0 1] TEST: [2 3]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.
See also
--------
RepeatedStratifiedKFold: Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super().__init__(
KFold, n_repeats, random_state, n_splits=n_splits)
class RepeatedStratifiedKFold(_RepeatedSplits):
"""Repeated Stratified K-Fold cross validator.
Repeats Stratified K-Fold n times with different randomization in each
repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import RepeatedStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2,
... random_state=36851234)
>>> for train_index, test_index in rskf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.
See also
--------
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super().__init__(
StratifiedKFold, n_repeats, random_state, n_splits=n_splits)
class BaseShuffleSplit(metaclass=ABCMeta):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=None, train_size=None,
random_state=None):
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._default_test_size = 0.1
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default 10
Number of re-shuffling & splitting iterations.
test_size : float, int, None, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.1.
train_size : float, int, or None, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=5, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
5
>>> print(rs)
ShuffleSplit(n_splits=5, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
TRAIN: [1 3 0 4] TEST: [5 2]
TRAIN: [4 0 2 5] TEST: [1 3]
TRAIN: [1 2 4 0] TEST: [3 5]
TRAIN: [3 4 1 0] TEST: [5 2]
TRAIN: [3 5 1 0] TEST: [2 4]
>>> rs = ShuffleSplit(n_splits=5, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
TRAIN: [1 3 0] TEST: [5 2]
TRAIN: [4 0 2] TEST: [1 3]
TRAIN: [1 2 4] TEST: [3 5]
TRAIN: [3 4 1] TEST: [5 2]
TRAIN: [3 5 1] TEST: [2 4]
"""
def __init__(self, n_splits=10, test_size=None, train_size=None,
random_state=None):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self._default_test_size = 0.1
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(
n_samples, self.test_size, self.train_size,
default_test_size=self._default_test_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float, int, None, optional (default=None)
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test groups. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.2.
train_size : float, int, or None, default is None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
'''
def __init__(self, n_splits=5, test_size=None, train_size=None,
random_state=None):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self._default_test_size = 0.2
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super()._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,), optional
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.
"""
return super().split(X, y, groups)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default 10
Number of re-shuffling & splitting iterations.
test_size : float, int, None, optional (default=None)
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.1.
train_size : float, int, or None, default is None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 0, 1, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
5
>>> print(sss)
StratifiedShuffleSplit(n_splits=5, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [5 2 3] TEST: [4 1 0]
TRAIN: [5 1 4] TEST: [0 2 3]
TRAIN: [5 0 2] TEST: [4 3 1]
TRAIN: [4 1 0] TEST: [2 3 5]
TRAIN: [0 5 1] TEST: [3 4 2]
"""
def __init__(self, n_splits=10, test_size=None, train_size=None,
random_state=None):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self._default_test_size = 0.1
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(
n_samples, self.test_size, self.train_size,
default_test_size=self._default_test_size)
if y.ndim == 2:
# for multi-label y, map each distinct row to a string repr
# using join because str(row) uses an ellipsis if len(row) > 1000
y = np.array([' '.join(row.astype('str')) for row in y])
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
# Find the sorted list of instances for each class:
# (np.unique above performs a sort, so code is O(n logn) already)
class_indices = np.split(np.argsort(y_indices, kind='mergesort'),
np.cumsum(class_counts)[:-1])
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i in range(n_classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = class_indices[i].take(permutation,
mode='clip')
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super().split(X, y, groups)
def _validate_shuffle_split(n_samples, test_size, train_size,
default_test_size=None):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if test_size is None and train_size is None:
test_size = default_test_size
test_size_type = np.asarray(test_size).dtype.kind
train_size_type = np.asarray(train_size).dtype.kind
if (test_size_type == 'i' and (test_size >= n_samples or test_size <= 0)
or test_size_type == 'f' and (test_size <= 0 or test_size >= 1)):
raise ValueError('test_size={0} should be either positive and smaller'
' than the number of samples {1} or a float in the '
'(0, 1) range'.format(test_size, n_samples))
if (train_size_type == 'i' and (train_size >= n_samples or train_size <= 0)
or train_size_type == 'f' and (train_size <= 0 or train_size >= 1)):
raise ValueError('train_size={0} should be either positive and smaller'
' than the number of samples {1} or a float in the '
'(0, 1) range'.format(train_size, n_samples))
if train_size is not None and train_size_type not in ('i', 'f'):
raise ValueError("Invalid value for train_size: {}".format(train_size))
if test_size is not None and test_size_type not in ('i', 'f'):
raise ValueError("Invalid value for test_size: {}".format(test_size))
if (train_size_type == 'f' and test_size_type == 'f' and
train_size + test_size > 1):
raise ValueError(
'The sum of test_size and train_size = {}, should be in the (0, 1)'
' range. Reduce test_size and/or train_size.'
.format(train_size + test_size))
if test_size_type == 'f':
n_test = ceil(test_size * n_samples)
elif test_size_type == 'i':
n_test = float(test_size)
if train_size_type == 'f':
n_train = floor(train_size * n_samples)
elif train_size_type == 'i':
n_train = float(train_size)
if train_size is None:
n_train = n_samples - n_test
elif test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
n_train, n_test = int(n_train), int(n_test)
if n_train == 0:
raise ValueError(
'With n_samples={}, test_size={} and train_size={}, the '
'resulting train set will be empty. Adjust any of the '
'aforementioned parameters.'.format(n_samples, test_size,
train_size)
)
return n_train, n_test
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Provides train/test indices to split data into train/test sets using a
predefined scheme specified by the user with the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : array-like, shape (n_samples,)
The entry ``test_fold[i]`` represents the index of the test set that
sample ``i`` belongs to. It is possible to exclude sample ``i`` from
any test set (i.e. include sample ``i`` in every training set) by
setting ``test_fold[i]`` equal to -1.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps)
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=5, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value changed from 3-fold to 5-fold.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
cv = 5 if cv is None else cv
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int or None, optional (default=None)
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.25.
train_size : float, int, or None, (default=None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : boolean, optional (default=True)
Whether or not to shuffle the data before splitting. If shuffle=False
then stratify must be None.
stratify : array-like or None (default=None)
If not None, data is split in a stratified fashion, using this as
the class labels.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
>>> train_test_split(y, shuffle=False)
[[0, 1, 2], [3, 4]]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
shuffle = options.pop('shuffle', True)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(n_samples, test_size, train_size,
default_test_size=0.25)
if shuffle is False:
if stratify is not None:
raise ValueError(
"Stratified train/test split is not implemented for "
"shuffle=False")
train = np.arange(n_train)
test = np.arange(n_train, n_train + n_test)
else:
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=n_test,
train_size=n_train,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
# Tell nose that train_test_split is not a test.
# (Needed for external libraries that may use nose.)
train_test_split.__test__ = False
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| {
"content_hash": "8727b9cbcefaf0b4b367dd90ad36c962",
"timestamp": "",
"source": "github",
"line_count": 2136,
"max_line_length": 79,
"avg_line_length": 36.16058052434457,
"alnum_prop": 0.5933919393052732,
"repo_name": "chrsrds/scikit-learn",
"id": "730ba12f88ad4e2308aa6fc50d24fddaf811322a",
"size": "77239",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/model_selection/_split.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1388"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5255814"
},
{
"name": "Shell",
"bytes": "4031"
}
],
"symlink_target": ""
} |
import club_lib as ccl
CLUB_WELCOME_MSG = (
'''
Welcome to the Cotton Club Info Terminal.
Enter commands or -h.
''')
CLUB_CLOSE_MSG = 'Bye!'
print (CLUB_WELCOME_MSG)
# Create club object (Open Club)
cotton_club = ccl.Club('Cotton Club')
# Add random visitors
cotton_club.add_visitors_randoom(10)
# Notify about played music
cotton_club.played_music_notify()
# show club and visitor status
print(cotton_club.show_status())
# input command to update information or status.
input_command = ''
while True:
try:
input_command = input('enter command: ')
except KeyboardInterrupt:
break
except EOFError:
break
if input_command == '-h':
print('''
Commands List:
-s Show information about visitors and play music.
-m Change played music to next track.
-r Random change visitors.
-e Exit terminal.
''')
elif input_command == '-s':
print (cotton_club.show_status())
elif input_command == '-m':
cotton_club.change_track()
print (cotton_club.show_status())
elif input_command == '-r':
cotton_club.change_visitiors(10)
print (cotton_club.show_status())
elif input_command == '-e':
break
print(CLUB_CLOSE_MSG)
| {
"content_hash": "365626cd3f3596297ce8328082defe3e",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 69,
"avg_line_length": 23.29824561403509,
"alnum_prop": 0.6024096385542169,
"repo_name": "radisvaliullin/test_task_0",
"id": "ff80356852a68335cac6d7bc9233ca60862940d7",
"size": "1601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "club_run.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11137"
}
],
"symlink_target": ""
} |
from marshmallow import post_load
from marshmallow_sqlalchemy import field_for
from models import ma, User, Device, Measurement
class UserSchema(ma.Schema):
id = field_for(User, 'id', dump_only=True)
class Meta:
# Fields to expose
fields = ('id', 'name', 'username')
model = User
@post_load
def make_user(self, data):
return User(**data)
class DeviceSchema(ma.Schema):
id = field_for(Device, 'id', dump_only=True)
class Meta:
# Fields to expose
fields = ('id', 'model', 'manufacturerID')
model = Device
@post_load
def make_device(self, data):
return Device(**data)
class MeasurementSchema(ma.Schema):
id = field_for(Measurement, 'id', dump_only=True)
class Meta:
# Fields to expose
model = Measurement
@post_load
def make_measurement(self, data):
return Measurement(**data)
| {
"content_hash": "8e8d1173496872d1278e8d4ba3be2370",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 53,
"avg_line_length": 23.58974358974359,
"alnum_prop": 0.6206521739130435,
"repo_name": "KMSkelton/cgm_flask",
"id": "0d3ab4fa9eb8f307c28999aaf0b29e8a745bff5c",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "562"
},
{
"name": "HTML",
"bytes": "1588"
},
{
"name": "JavaScript",
"bytes": "6721"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "22466"
}
],
"symlink_target": ""
} |
"""
pdbtm.py
Basic parsing for the PDBTM:Protein Data Bank of Transmembrane Proteins. This is currently only used to extract a list of PDB identifiers.
Created by Shane O'Connor 2014
"""
# We use xml.etree.ElementTree at present which runs slowly compared to xml.sax but the code is quicker to read/write.
import io
from lxml import etree
from klab.parsers.xml import fast_iter
import re
class record_iterator(object):
'''This class is deprecated by PDBTM.get_xml.'''
def __init__(self, xml_contents):
self.records = re.findall('(<pdbtm\s*.*?</pdbtm>)', xml_contents, re.DOTALL)
def get(self, pdb_id):
for r in self.records:
id = re.match(r'<pdbtm.*?ID="(.*?)".*>', r, re.DOTALL)
assert(id)
id = id.group(1)
if id.upper() == pdb_id.upper():
return r
class EarlyOut(Exception): pass
class PDBTM(object):
PDBTM_entry_tag_type = '{http://pdbtm.enzim.hu}pdbtm'
PDBTM_membrane_tag_type = '{http://pdbtm.enzim.hu}MEMBRANE'
PDBTM_rawres_tag_type = '{http://pdbtm.enzim.hu}RAWRES'
PDBTM_tmtype_tag_type = '{http://pdbtm.enzim.hu}TMTYPE'
non_transmembrane_tmtypes = set(['Soluble', 'No_Protein', 'Nucleotide', 'Virus', 'Pilus', 'Ca_Globular', 'Tm_Part'])
transmembrane_tmtypes = set(['Tm_Alpha', 'Tm_Beta', 'Tm_Coil', 'Tm_Ca'])
def __init__(self, xml_contents, restrict_to_transmembrane_proteins = True):
self.xml_contents = xml_contents.strip()
# At some point, this tag crept into the PDBTM XML which the parser below cannot handle
self.xml_contents = self.xml_contents.replace('''<?xml version="1.0"?>''', '')
self.restrict_to_transmembrane_proteins = restrict_to_transmembrane_proteins
@staticmethod
def _get_tm_type(elem):
for child in elem:
if child.tag == PDBTM.PDBTM_rawres_tag_type:
for gchild in child:
if gchild.tag == PDBTM.PDBTM_tmtype_tag_type:
return gchild.text.strip()
return 'N/A'
def _get_pdb_id(self, elem, **kwargs):
'''If self.restrict_to_transmembrane_proteins is False then this adds all ids to self.ids. Otherwise, only transmembrane protein ids are added.'''
id = elem.attrib['ID']
if self.restrict_to_transmembrane_proteins:
tmp = elem.attrib['TMP']
assert(tmp == 'no' or tmp == 'yes' or tmp == 'not')
if tmp == 'yes':
self.ids[id] = PDBTM._get_tm_type(elem)
else:
self.ids[id] = self.ids.get(id, 0) + 1
def get_pdb_ids(self):
'''Returns the sorted list of PDB IDs from the records.'''
return sorted(self.get_pdb_id_map().keys())
def get_pdb_id_map(self):
''' Returns a dict mapping PDB IDs to:
i) their number of associated records, if self.restrict_to_transmembrane_proteins is False;
ii) the type of transmembrane protein if self.restrict_to_transmembrane_proteins is True.
At the time of writing this (2014-12-03), there were 106,094 PDB IDs and 106,090 unique IDs.
These records had duplicate entries: '2amk', '2ar1', '3b4r', '4k5y'.'''
self.ids = {}
context = etree.iterparse(io.BytesIO(self.xml_contents), events=('end',), tag=self.PDBTM_entry_tag_type)
fast_iter(context, self._get_pdb_id)
return self.ids
def _get_membrane_xml(self, elem, pdb_id):
assert(elem.tag == self.PDBTM_entry_tag_type)
id = elem.attrib['ID'] or ''
if id.upper() == pdb_id:
for child in elem:
if child.tag == self.PDBTM_membrane_tag_type:
self.tmp_string = etree.tostring(child)
raise EarlyOut()
def get_membrane_xml(self, pdb_id):
''' Returns the <MEMBRANE> tag XML for pdb_id if the tag exists.'''
self.tmp_string = None
context = etree.iterparse(io.BytesIO(self.xml_contents), events=('end',), tag=self.PDBTM_entry_tag_type)
try:
fast_iter(context, self._get_membrane_xml, pdb_id = pdb_id.upper())
except EarlyOut: pass
return self.tmp_string
def _get_xml(self, elem, pdb_id):
assert(elem.tag == self.PDBTM_entry_tag_type)
id = elem.attrib['ID'] or ''
if id.upper() == pdb_id:
self.tmp_string = etree.tostring(elem)
raise EarlyOut()
def get_xml(self, pdb_id):
''' Returns the XML for pdb_id if the tag exists.'''
self.tmp_string = None
context = etree.iterparse(io.BytesIO(self.xml_contents), events=('end',), tag=self.PDBTM_entry_tag_type)
try:
fast_iter(context, self._get_xml, pdb_id = pdb_id.upper())
except EarlyOut: pass
return self.tmp_string
| {
"content_hash": "a6d279c851041ded87e1a4b207edda0c",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 154,
"avg_line_length": 38.854838709677416,
"alnum_prop": 0.6044001660440017,
"repo_name": "Kortemme-Lab/klab",
"id": "fdf92e39ac3c1a4269d6ee1789b50a786fba2c39",
"size": "4854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "klab/bio/pdbtm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "62782"
},
{
"name": "Python",
"bytes": "2074156"
},
{
"name": "R",
"bytes": "4487"
},
{
"name": "Shell",
"bytes": "4382"
},
{
"name": "TeX",
"bytes": "2107"
}
],
"symlink_target": ""
} |
class ICAPResponseHeader (object):
__slots__ = ['info', 'version', 'code', 'status', 'headers', 'header_string', 'offsets', 'content_length', 'body_complete']
def __init__ (self, version, code, status, headers, header_string, offsets, content_length, body_complete):
self.info = version, code, status
self.version = version
self.code = code
self.status = status
self.headers = headers
self.header_string = header_string
self.offsets = offsets
self.content_length = content_length
self.body_complete = body_complete
class ICAPResponseHeaderFactory (object):
def __init__ (self, configuration):
self.configuration = configuration
def create (self, version, code, status, headers, header_string, offsets, content_length, body_complete):
return ICAPResponseHeader(version, code, status, headers, header_string, offsets, content_length, body_complete)
| {
"content_hash": "785c803a8b883c065a55b62ea28fd01b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 124,
"avg_line_length": 39.90909090909091,
"alnum_prop": 0.7300683371298405,
"repo_name": "PrFalken/exaproxy",
"id": "2fab7151d9265f4268a39b884681b4f519af3cf9",
"size": "878",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/exaproxy/icap/header.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "158278"
},
{
"name": "Python",
"bytes": "479454"
},
{
"name": "Shell",
"bytes": "6599"
}
],
"symlink_target": ""
} |
"""Definition of various recurrent neural network cells."""
from __future__ import print_function
import warnings
import functools
from .. import symbol, init, ndarray
from ..base import string_types, numeric_types
def _cells_state_shape(cells):
return sum([c.state_shape for c in cells], [])
def _cells_state_info(cells):
return sum([c.state_info for c in cells], [])
def _cells_begin_state(cells, **kwargs):
return sum([c.begin_state(**kwargs) for c in cells], [])
def _cells_unpack_weights(cells, args):
for cell in cells:
args = cell.unpack_weights(args)
return args
def _cells_pack_weights(cells, args):
for cell in cells:
args = cell.pack_weights(args)
return args
def _normalize_sequence(length, inputs, layout, merge, in_layout=None):
assert inputs is not None, \
"unroll(inputs=None) has been deprecated. " \
"Please create input variables outside unroll."
axis = layout.find('T')
in_axis = in_layout.find('T') if in_layout is not None else axis
if isinstance(inputs, symbol.Symbol):
if merge is False:
assert len(inputs.list_outputs()) == 1, \
"unroll doesn't allow grouped symbol as input. Please convert " \
"to list with list(inputs) first or let unroll handle splitting."
inputs = list(symbol.split(inputs, axis=in_axis, num_outputs=length,
squeeze_axis=1))
else:
assert length is None or len(inputs) == length
if merge is True:
inputs = [symbol.expand_dims(i, axis=axis) for i in inputs]
inputs = symbol.Concat(*inputs, dim=axis)
in_axis = axis
if isinstance(inputs, symbol.Symbol) and axis != in_axis:
inputs = symbol.swapaxes(inputs, dim0=axis, dim1=in_axis)
return inputs, axis
class RNNParams(object):
"""Container for holding variables.
Used by RNN cells for parameter sharing between cells.
Parameters
----------
prefix : str
Names of all variables created by this container will
be prepended with prefix.
"""
def __init__(self, prefix=''):
self._prefix = prefix
self._params = {}
def get(self, name, **kwargs):
"""Get the variable given a name if one exists or create a new one if missing.
Parameters
----------
name : str
name of the variable
**kwargs :
more arguments that's passed to symbol.Variable
"""
name = self._prefix + name
if name not in self._params:
self._params[name] = symbol.Variable(name, **kwargs)
return self._params[name]
class BaseRNNCell(object):
"""Abstract base class for RNN cells
Parameters
----------
prefix : str, optional
Prefix for names of layers
(this prefix is also used for names of weights if `params` is None
i.e. if `params` are being created and not reused)
params : RNNParams, default None.
Container for weight sharing between cells.
A new RNNParams container is created if `params` is None.
"""
def __init__(self, prefix='', params=None):
if params is None:
params = RNNParams(prefix)
self._own_params = True
else:
self._own_params = False
self._prefix = prefix
self._params = params
self._modified = False
self.reset()
def reset(self):
"""Reset before re-using the cell for another graph."""
self._init_counter = -1
self._counter = -1
if hasattr(self, '_cells'):
for cell in self._cells:
cell.reset()
def __call__(self, inputs, states):
"""Unroll the RNN for one time step.
Parameters
----------
inputs : sym.Variable
input symbol, 2D, batch * num_units
states : list of sym.Variable
RNN state from previous step or the output of begin_state().
Returns
-------
output : Symbol
Symbol corresponding to the output from the RNN when unrolling
for a single time step.
states : nested list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of begin_state().
This can be used as input state to the next time step
of this RNN.
See Also
--------
begin_state: This function can provide the states for the first time step.
unroll: This function unrolls an RNN for a given number of (>=1) time steps.
"""
raise NotImplementedError()
@property
def params(self):
"""Parameters of this cell"""
self._own_params = False
return self._params
@property
def state_info(self):
"""shape and layout information of states"""
raise NotImplementedError()
@property
def state_shape(self):
"""shape(s) of states"""
return [ele['shape'] for ele in self.state_info]
@property
def _gate_names(self):
"""name(s) of gates"""
return ()
def begin_state(self, func=symbol.zeros, **kwargs):
"""Initial state for this cell.
Parameters
----------
func : callable, default symbol.zeros
Function for creating initial state. Can be symbol.zeros,
symbol.uniform, symbol.Variable etc.
Use symbol.Variable if you want to directly
feed input as states.
**kwargs :
more keyword arguments passed to func. For example
mean, std, dtype, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
"""
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
states = []
for info in self.state_info:
self._init_counter += 1
if info is None:
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**kwargs)
else:
kwargs.update(info)
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**kwargs)
states.append(state)
return states
def unpack_weights(self, args):
"""Unpack fused weight matrices into separate
weight matrices.
For example, say you use a module object `mod` to run a network that has an lstm cell.
In `mod.get_params()[0]`, the lstm parameters are all represented as a single big vector.
`cell.unpack_weights(mod.get_params()[0])` will unpack this vector into a dictionary of
more readable lstm parameters - c, f, i, o gates for i2h (input to hidden) and
h2h (hidden to hidden) weights.
Parameters
----------
args : dict of str -> NDArray
Dictionary containing packed weights.
usually from `Module.get_params()[0]`.
Returns
-------
args : dict of str -> NDArray
Dictionary with unpacked weights associated with
this cell.
See Also
--------
pack_weights: Performs the reverse operation of this function.
"""
args = args.copy()
if not self._gate_names:
return args
h = self._num_hidden
for group_name in ['i2h', 'h2h']:
weight = args.pop('%s%s_weight'%(self._prefix, group_name))
bias = args.pop('%s%s_bias' % (self._prefix, group_name))
for j, gate in enumerate(self._gate_names):
wname = '%s%s%s_weight' % (self._prefix, group_name, gate)
args[wname] = weight[j*h:(j+1)*h].copy()
bname = '%s%s%s_bias' % (self._prefix, group_name, gate)
args[bname] = bias[j*h:(j+1)*h].copy()
return args
def pack_weights(self, args):
"""Pack separate weight matrices into a single packed
weight.
Parameters
----------
args : dict of str -> NDArray
Dictionary containing unpacked weights.
Returns
-------
args : dict of str -> NDArray
Dictionary with packed weights associated with
this cell.
"""
args = args.copy()
if not self._gate_names:
return args
for group_name in ['i2h', 'h2h']:
weight = []
bias = []
for gate in self._gate_names:
wname = '%s%s%s_weight'%(self._prefix, group_name, gate)
weight.append(args.pop(wname))
bname = '%s%s%s_bias'%(self._prefix, group_name, gate)
bias.append(args.pop(bname))
args['%s%s_weight'%(self._prefix, group_name)] = ndarray.concatenate(weight)
args['%s%s_bias'%(self._prefix, group_name)] = ndarray.concatenate(bias)
return args
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
"""Unroll an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if layout == 'NTC',
or (length, batch_size, ...) if layout == 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_state : nested list of Symbol, default None
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if None.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If False, return outputs as a list of Symbols.
If True, concatenate output across time steps
and return a single symbol with shape
(batch_size, length, ...) if layout == 'NTC',
or (length, batch_size, ...) if layout == 'TNC'.
If None, output whatever is faster.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : nested list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of begin_state().
"""
self.reset()
inputs, _ = _normalize_sequence(length, inputs, layout, False)
if begin_state is None:
begin_state = self.begin_state()
states = begin_state
outputs = []
for i in range(length):
output, states = self(inputs[i], states)
outputs.append(output)
outputs, _ = _normalize_sequence(length, outputs, layout, merge_outputs)
return outputs, states
#pylint: disable=no-self-use
def _get_activation(self, inputs, activation, **kwargs):
"""Get activation function. Convert if is string"""
if isinstance(activation, string_types):
return symbol.Activation(inputs, act_type=activation, **kwargs)
else:
return activation(inputs, **kwargs)
class RNNCell(BaseRNNCell):
"""Simple recurrent neural network cell.
Parameters
----------
num_hidden : int
Number of units in output symbol.
activation : str or Symbol, default 'tanh'
Type of activation function. Options are 'relu' and 'tanh'.
prefix : str, default ``'rnn_'``
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
"""
def __init__(self, num_hidden, activation='tanh', prefix='rnn_', params=None):
super(RNNCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
self._activation = activation
self._iW = self.params.get('i2h_weight')
self._iB = self.params.get('i2h_bias')
self._hW = self.params.get('h2h_weight')
self._hB = self.params.get('h2h_bias')
@property
def state_info(self):
return [{'shape': (0, self._num_hidden), '__layout__': 'NC'}]
@property
def _gate_names(self):
return ('',)
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h = symbol.FullyConnected(data=inputs, weight=self._iW, bias=self._iB,
num_hidden=self._num_hidden,
name='%si2h'%name)
h2h = symbol.FullyConnected(data=states[0], weight=self._hW, bias=self._hB,
num_hidden=self._num_hidden,
name='%sh2h'%name)
output = self._get_activation(i2h + h2h, self._activation,
name='%sout'%name)
return output, [output]
class LSTMCell(BaseRNNCell):
"""Long-Short Term Memory (LSTM) network cell.
Parameters
----------
num_hidden : int
Number of units in output symbol.
prefix : str, default ``'lstm_'``
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
forget_bias : bias added to forget gate, default 1.0.
Jozefowicz et al. 2015 recommends setting this to 1.0
"""
def __init__(self, num_hidden, prefix='lstm_', params=None, forget_bias=1.0,
cublas_algo_verbose=False, cublas_off=False, cublas_tensor_core=None,
cublas_algo_fwd=None, cublas_algo_bwd_data=None, cublas_algo_bwd_weights=None,
cublas_algo_fwd_prec='None', cublas_algo_bwd_prec='None'):
super(LSTMCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
# parameters to give detailed control over internal GEMM behavior
self._cublas_algo_verbose = cublas_algo_verbose
self._cublas_off = cublas_off
self._cublas_tensor_core = cublas_tensor_core
self._cublas_algo_fwd = cublas_algo_fwd
self._cublas_algo_bwd_data = cublas_algo_bwd_data
self._cublas_algo_bwd_weights = cublas_algo_bwd_weights
self._cublas_algo_fwd_prec = cublas_algo_fwd_prec
self._cublas_algo_bwd_prec = cublas_algo_bwd_prec
self._iW = self.params.get('i2h_weight')
self._hW = self.params.get('h2h_weight')
# we add the forget_bias to i2h_bias, this adds the bias to the forget gate activation
self._iB = self.params.get('i2h_bias', init=init.LSTMBias(forget_bias=forget_bias))
self._hB = self.params.get('h2h_bias')
@property
def state_info(self):
return [{'shape': (0, self._num_hidden), '__layout__': 'NC'},
{'shape': (0, self._num_hidden), '__layout__': 'NC'}]
@property
def _gate_names(self):
return ['_i', '_f', '_c', '_o']
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h = symbol.FullyConnected(data=inputs, weight=self._iW, bias=self._iB,
num_hidden=self._num_hidden*4,
name='%si2h'%name,
cublas_algo_verbose=self._cublas_algo_verbose,
cublas_off=self._cublas_off,
cublas_tensor_core=self._cublas_tensor_core,
cublas_algo_fwd=self._cublas_algo_fwd,
cublas_algo_bwd_data=self._cublas_algo_bwd_data,
cublas_algo_bwd_weights=self._cublas_algo_bwd_weights,
cublas_algo_fwd_prec=self._cublas_algo_fwd_prec,
cublas_algo_bwd_prec=self._cublas_algo_bwd_prec)
h2h = symbol.FullyConnected(data=states[0], weight=self._hW, bias=self._hB,
num_hidden=self._num_hidden*4,
name='%sh2h'%name,
cublas_algo_verbose=self._cublas_algo_verbose,
cublas_off=self._cublas_off,
cublas_tensor_core=self._cublas_tensor_core,
cublas_algo_fwd=self._cublas_algo_fwd,
cublas_algo_bwd_data=self._cublas_algo_bwd_data,
cublas_algo_bwd_weights=self._cublas_algo_bwd_weights,
cublas_algo_fwd_prec=self._cublas_algo_fwd_prec,
cublas_algo_bwd_prec=self._cublas_algo_bwd_prec)
gates = i2h + h2h
slice_gates = symbol.SliceChannel(gates, num_outputs=4,
name="%sslice"%name)
in_gate = symbol.Activation(slice_gates[0], act_type="sigmoid",
name='%si'%name)
forget_gate = symbol.Activation(slice_gates[1], act_type="sigmoid",
name='%sf'%name)
in_transform = symbol.Activation(slice_gates[2], act_type="tanh",
name='%sc'%name)
out_gate = symbol.Activation(slice_gates[3], act_type="sigmoid",
name='%so'%name)
next_c = symbol._internal._plus(forget_gate * states[1], in_gate * in_transform,
name='%sstate'%name)
next_h = symbol._internal._mul(out_gate, symbol.Activation(next_c, act_type="tanh"),
name='%sout'%name)
return next_h, [next_h, next_c]
class GRUCell(BaseRNNCell):
"""Gated Rectified Unit (GRU) network cell.
Note: this is an implementation of the cuDNN version of GRUs
(slight modification compared to Cho et al. 2014).
Parameters
----------
num_hidden : int
Number of units in output symbol.
prefix : str, default ``'gru_'``
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
"""
def __init__(self, num_hidden, prefix='gru_', params=None):
super(GRUCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
self._iW = self.params.get("i2h_weight")
self._iB = self.params.get("i2h_bias")
self._hW = self.params.get("h2h_weight")
self._hB = self.params.get("h2h_bias")
@property
def state_info(self):
return [{'shape': (0, self._num_hidden),
'__layout__': 'NC'}]
@property
def _gate_names(self):
return ['_r', '_z', '_o']
def __call__(self, inputs, states):
# pylint: disable=too-many-locals
self._counter += 1
seq_idx = self._counter
name = '%st%d_' % (self._prefix, seq_idx)
prev_state_h = states[0]
i2h = symbol.FullyConnected(data=inputs,
weight=self._iW,
bias=self._iB,
num_hidden=self._num_hidden * 3,
name="%s_i2h" % name)
h2h = symbol.FullyConnected(data=prev_state_h,
weight=self._hW,
bias=self._hB,
num_hidden=self._num_hidden * 3,
name="%s_h2h" % name)
i2h_r, i2h_z, i2h = symbol.SliceChannel(i2h, num_outputs=3, name="%s_i2h_slice" % name)
h2h_r, h2h_z, h2h = symbol.SliceChannel(h2h, num_outputs=3, name="%s_h2h_slice" % name)
reset_gate = symbol.Activation(i2h_r + h2h_r, act_type="sigmoid",
name="%s_r_act" % name)
update_gate = symbol.Activation(i2h_z + h2h_z, act_type="sigmoid",
name="%s_z_act" % name)
next_h_tmp = symbol.Activation(i2h + reset_gate * h2h, act_type="tanh",
name="%s_h_act" % name)
next_h = symbol._internal._plus((1. - update_gate) * next_h_tmp, update_gate * prev_state_h,
name='%sout' % name)
return next_h, [next_h]
class FusedRNNCell(BaseRNNCell):
"""Fusing RNN layers across time step into one kernel.
Improves speed but is less flexible. Currently only
supported if using cuDNN on GPU.
Parameters
----------
num_hidden : int
Number of units in output symbol.
num_layers : int, default 1
Number of layers in the cell.
mode : str, default 'lstm'
Type of RNN. options are 'rnn_relu', 'rnn_tanh', 'lstm', 'gru'.
bidirectional : bool, default False
Whether to use bidirectional unroll. The output dimension size is doubled if bidrectional.
dropout : float, default 0.
Fraction of the input that gets dropped out during training time.
get_next_state : bool, default False
Whether to return the states that can be used as starting states next time.
forget_bias : bias added to forget gate, default 1.0.
Jozefowicz et al. 2015 recommends setting this to 1.0
prefix : str, default ``'$mode_'`` such as ``'lstm_'``
Prefix for names of layers
(this prefix is also used for names of weights if `params` is None
i.e. if `params` are being created and not reused)
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
"""
def __init__(self, num_hidden, num_layers=1, mode='lstm', bidirectional=False,
dropout=0., get_next_state=False, forget_bias=1.0,
prefix=None, params=None,
cudnn_algo_verbose=False,
cudnn_algo=-1,
cudnn_tensor_core=None):
if prefix is None:
prefix = '%s_'%mode
super(FusedRNNCell, self).__init__(prefix=prefix, params=params)
self._num_hidden = num_hidden
self._num_layers = num_layers
self._mode = mode
self._bidirectional = bidirectional
self._dropout = dropout
self._get_next_state = get_next_state
self._directions = ['l', 'r'] if bidirectional else ['l']
self._cudnn_algo_verbose = cudnn_algo_verbose
self._cudnn_algo = cudnn_algo
self._cudnn_tensor_core = cudnn_tensor_core
initializer = init.FusedRNN(None, num_hidden, num_layers, mode,
bidirectional, forget_bias,
cudnn_algo_verbose, cudnn_algo, cudnn_tensor_core)
self._parameter = self.params.get('parameters', init=initializer)
@property
def state_info(self):
b = self._bidirectional + 1
n = (self._mode == 'lstm') + 1
return [{'shape': (b*self._num_layers, 0, self._num_hidden), '__layout__': 'LNC'}
for _ in range(n)]
@property
def _gate_names(self):
return {'rnn_relu': [''],
'rnn_tanh': [''],
'lstm': ['_i', '_f', '_c', '_o'],
'gru': ['_r', '_z', '_o']}[self._mode]
@property
def _num_gates(self):
return len(self._gate_names)
def _slice_weights(self, arr, li, lh):
"""slice fused rnn weights"""
args = {}
gate_names = self._gate_names
directions = self._directions
b = len(directions)
p = 0
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_weight'%(self._prefix, direction, layer, gate)
if layer > 0:
size = b*lh*lh
args[name] = arr[p:p+size].reshape((lh, b*lh))
else:
size = li*lh
args[name] = arr[p:p+size].reshape((lh, li))
p += size
for gate in gate_names:
name = '%s%s%d_h2h%s_weight'%(self._prefix, direction, layer, gate)
size = lh**2
args[name] = arr[p:p+size].reshape((lh, lh))
p += size
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
for gate in gate_names:
name = '%s%s%d_h2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
assert p == arr.size, "Invalid parameters size for FusedRNNCell"
return args
def unpack_weights(self, args):
args = args.copy()
arr = args.pop(self._parameter.name)
b = len(self._directions)
m = self._num_gates
h = self._num_hidden
num_input = arr.size//b//h//m - (self._num_layers - 1)*(h+b*h+2) - h - 2
nargs = self._slice_weights(arr, num_input, self._num_hidden)
args.update({name: nd.copy() for name, nd in nargs.items()})
return args
def pack_weights(self, args):
args = args.copy()
b = self._bidirectional + 1
m = self._num_gates
c = self._gate_names
h = self._num_hidden
w0 = args['%sl0_i2h%s_weight'%(self._prefix, c[0])]
num_input = w0.shape[1]
total = (num_input+h+2)*h*m*b + (self._num_layers-1)*m*h*(h+b*h+2)*b
arr = ndarray.zeros((total,), ctx=w0.context, dtype=w0.dtype)
for name, nd in self._slice_weights(arr, num_input, h).items():
nd[:] = args.pop(name)
args[self._parameter.name] = arr
return args
def __call__(self, inputs, states):
raise NotImplementedError("FusedRNNCell cannot be stepped. Please use unroll")
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, axis = _normalize_sequence(length, inputs, layout, True)
if axis == 1:
warnings.warn("NTC layout detected. Consider using "
"TNC for FusedRNNCell for faster speed")
inputs = symbol.swapaxes(inputs, dim1=0, dim2=1)
else:
assert axis == 0, "Unsupported layout %s"%layout
if begin_state is None:
begin_state = self.begin_state()
states = begin_state
if self._mode == 'lstm':
states = {'state': states[0], 'state_cell': states[1]} # pylint: disable=redefined-variable-type
else:
states = {'state': states[0]}
rnn = symbol.RNN(data=inputs, parameters=self._parameter,
state_size=self._num_hidden, num_layers=self._num_layers,
bidirectional=self._bidirectional, p=self._dropout,
state_outputs=self._get_next_state,
mode=self._mode, name=self._prefix+'rnn',
cudnn_algo_verbose=self._cudnn_algo_verbose,
cudnn_algo=self._cudnn_algo,
**states)
attr = {'__layout__' : 'LNC'}
if not self._get_next_state:
outputs, states = rnn, []
elif self._mode == 'lstm':
rnn[1]._set_attr(**attr)
rnn[2]._set_attr(**attr)
outputs, states = rnn[0], [rnn[1], rnn[2]]
else:
rnn[1]._set_attr(**attr)
outputs, states = rnn[0], [rnn[1]]
if axis == 1:
outputs = symbol.swapaxes(outputs, dim1=0, dim2=1)
outputs, _ = _normalize_sequence(length, outputs, layout, merge_outputs)
return outputs, states
def unfuse(self):
"""Unfuse the fused RNN in to a stack of rnn cells.
Returns
-------
cell : mxnet.rnn.SequentialRNNCell
unfused cell that can be used for stepping, and can run on CPU.
"""
stack = SequentialRNNCell()
get_cell = {'rnn_relu': lambda cell_prefix: RNNCell(self._num_hidden,
activation='relu',
prefix=cell_prefix),
'rnn_tanh': lambda cell_prefix: RNNCell(self._num_hidden,
activation='tanh',
prefix=cell_prefix),
'lstm': lambda cell_prefix: LSTMCell(self._num_hidden,
prefix=cell_prefix),
'gru': lambda cell_prefix: GRUCell(self._num_hidden,
prefix=cell_prefix)}[self._mode]
for i in range(self._num_layers):
if self._bidirectional:
stack.add(BidirectionalCell(
get_cell('%sl%d_'%(self._prefix, i)),
get_cell('%sr%d_'%(self._prefix, i)),
output_prefix='%sbi_l%d_'%(self._prefix, i)))
else:
stack.add(get_cell('%sl%d_'%(self._prefix, i)))
if self._dropout > 0 and i != self._num_layers - 1:
stack.add(DropoutCell(self._dropout, prefix='%s_dropout%d_'%(self._prefix, i)))
return stack
class SequentialRNNCell(BaseRNNCell):
"""Sequantially stacking multiple RNN cells.
Parameters
----------
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
"""
def __init__(self, params=None):
super(SequentialRNNCell, self).__init__(prefix='', params=params)
self._override_cell_params = params is not None
self._cells = []
def add(self, cell):
"""Append a cell into the stack.
Parameters
----------
cell : BaseRNNCell
The cell to be appended. During unroll, previous cell's output (or raw inputs if
no previous cell) is used as the input to this cell.
"""
self._cells.append(cell)
if self._override_cell_params:
assert cell._own_params, \
"Either specify params for SequentialRNNCell " \
"or child cells, not both."
cell.params._params.update(self.params._params)
self.params._params.update(cell.params._params)
@property
def state_info(self):
return _cells_state_info(self._cells)
def begin_state(self, **kwargs): # pylint: disable=arguments-differ
assert not self._modified, \
"After applying modifier cells (e.g. ZoneoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
return _cells_begin_state(self._cells, **kwargs)
def unpack_weights(self, args):
return _cells_unpack_weights(self._cells, args)
def pack_weights(self, args):
return _cells_pack_weights(self._cells, args)
def __call__(self, inputs, states):
self._counter += 1
next_states = []
p = 0
for cell in self._cells:
assert not isinstance(cell, BidirectionalCell)
n = len(cell.state_info)
state = states[p:p+n]
p += n
inputs, state = cell(inputs, state)
next_states.append(state)
return inputs, sum(next_states, [])
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
num_cells = len(self._cells)
if begin_state is None:
begin_state = self.begin_state()
p = 0
next_states = []
for i, cell in enumerate(self._cells):
n = len(cell.state_info)
states = begin_state[p:p+n]
p += n
inputs, states = cell.unroll(length, inputs=inputs, begin_state=states, layout=layout,
merge_outputs=None if i < num_cells-1 else merge_outputs)
next_states.extend(states)
return inputs, next_states
class DropoutCell(BaseRNNCell):
"""Apply dropout on input.
Parameters
----------
dropout : float
Percentage of elements to drop out, which
is 1 - percentage to retain.
prefix : str, default ``'dropout_'``
Prefix for names of layers
(this prefix is also used for names of weights if `params` is None
i.e. if `params` are being created and not reused)
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
"""
def __init__(self, dropout, prefix='dropout_', params=None):
super(DropoutCell, self).__init__(prefix, params)
assert isinstance(dropout, numeric_types), "dropout probability must be a number"
self.dropout = dropout
@property
def state_info(self):
return []
def __call__(self, inputs, states):
if self.dropout > 0:
inputs = symbol.Dropout(data=inputs, p=self.dropout)
return inputs, states
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, _ = _normalize_sequence(length, inputs, layout, merge_outputs)
if isinstance(inputs, symbol.Symbol):
return self(inputs, [])
else:
return super(DropoutCell, self).unroll(
length, inputs, begin_state=begin_state, layout=layout,
merge_outputs=merge_outputs)
class ModifierCell(BaseRNNCell):
"""Base class for modifier cells. A modifier
cell takes a base cell, apply modifications
on it (e.g. Zoneout), and returns a new cell.
After applying modifiers the base cell should
no longer be called directly. The modifer cell
should be used instead.
"""
def __init__(self, base_cell):
super(ModifierCell, self).__init__()
base_cell._modified = True
self.base_cell = base_cell
@property
def params(self):
self._own_params = False
return self.base_cell.params
@property
def state_info(self):
return self.base_cell.state_info
def begin_state(self, init_sym=symbol.zeros, **kwargs): # pylint: disable=arguments-differ
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
self.base_cell._modified = False
begin = self.base_cell.begin_state(init_sym, **kwargs)
self.base_cell._modified = True
return begin
def unpack_weights(self, args):
return self.base_cell.unpack_weights(args)
def pack_weights(self, args):
return self.base_cell.pack_weights(args)
def __call__(self, inputs, states):
raise NotImplementedError
class ZoneoutCell(ModifierCell):
"""Apply Zoneout on base cell.
Parameters
----------
base_cell : BaseRNNCell
Cell on whose states to perform zoneout.
zoneout_outputs : float, default 0.
Fraction of the output that gets dropped out during training time.
zoneout_states : float, default 0.
Fraction of the states that gets dropped out during training time.
"""
def __init__(self, base_cell, zoneout_outputs=0., zoneout_states=0.):
assert not isinstance(base_cell, FusedRNNCell), \
"FusedRNNCell doesn't support zoneout. " \
"Please unfuse first."
assert not isinstance(base_cell, BidirectionalCell), \
"BidirectionalCell doesn't support zoneout since it doesn't support step. " \
"Please add ZoneoutCell to the cells underneath instead."
assert not isinstance(base_cell, SequentialRNNCell) or not base_cell._bidirectional, \
"Bidirectional SequentialRNNCell doesn't support zoneout. " \
"Please add ZoneoutCell to the cells underneath instead."
super(ZoneoutCell, self).__init__(base_cell)
self.zoneout_outputs = zoneout_outputs
self.zoneout_states = zoneout_states
self.prev_output = None
def reset(self):
super(ZoneoutCell, self).reset()
self.prev_output = None
def __call__(self, inputs, states):
cell, p_outputs, p_states = self.base_cell, self.zoneout_outputs, self.zoneout_states
next_output, next_states = cell(inputs, states)
mask = lambda p, like: symbol.Dropout(symbol.ones_like(like), p=p)
prev_output = self.prev_output if self.prev_output is not None else symbol.zeros((0, 0))
output = (symbol.where(mask(p_outputs, next_output), next_output, prev_output)
if p_outputs != 0. else next_output)
states = ([symbol.where(mask(p_states, new_s), new_s, old_s) for new_s, old_s in
zip(next_states, states)] if p_states != 0. else next_states)
self.prev_output = output
return output, states
class ResidualCell(ModifierCell):
"""Adds residual connection as described in Wu et al, 2016
(https://arxiv.org/abs/1609.08144).
Output of the cell is output of the base cell plus input.
Parameters
----------
base_cell : BaseRNNCell
Cell on whose outputs to add residual connection.
"""
def __init__(self, base_cell):
super(ResidualCell, self).__init__(base_cell)
def __call__(self, inputs, states):
output, states = self.base_cell(inputs, states)
output = symbol.elemwise_add(output, inputs, name="%s_plus_residual" % output.name)
return output, states
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
self.base_cell._modified = False
outputs, states = self.base_cell.unroll(length, inputs=inputs, begin_state=begin_state,
layout=layout, merge_outputs=merge_outputs)
self.base_cell._modified = True
merge_outputs = isinstance(outputs, symbol.Symbol) if merge_outputs is None else \
merge_outputs
inputs, _ = _normalize_sequence(length, inputs, layout, merge_outputs)
if merge_outputs:
outputs = symbol.elemwise_add(outputs, inputs, name="%s_plus_residual" % outputs.name)
else:
outputs = [symbol.elemwise_add(output_sym, input_sym,
name="%s_plus_residual" % output_sym.name)
for output_sym, input_sym in zip(outputs, inputs)]
return outputs, states
class BidirectionalCell(BaseRNNCell):
"""Bidirectional RNN cell.
Parameters
----------
l_cell : BaseRNNCell
cell for forward unrolling
r_cell : BaseRNNCell
cell for backward unrolling
params : RNNParams, default None.
Container for weight sharing between cells.
A new RNNParams container is created if `params` is None.
output_prefix : str, default ``'bi_'``
prefix for name of output
"""
def __init__(self, l_cell, r_cell, params=None, output_prefix='bi_'):
super(BidirectionalCell, self).__init__('', params=params)
self._output_prefix = output_prefix
self._override_cell_params = params is not None
if self._override_cell_params:
assert l_cell._own_params and r_cell._own_params, \
"Either specify params for BidirectionalCell " \
"or child cells, not both."
l_cell.params._params.update(self.params._params)
r_cell.params._params.update(self.params._params)
self.params._params.update(l_cell.params._params)
self.params._params.update(r_cell.params._params)
self._cells = [l_cell, r_cell]
def unpack_weights(self, args):
return _cells_unpack_weights(self._cells, args)
def pack_weights(self, args):
return _cells_pack_weights(self._cells, args)
def __call__(self, inputs, states):
raise NotImplementedError("Bidirectional cannot be stepped. Please use unroll")
@property
def state_info(self):
return _cells_state_info(self._cells)
def begin_state(self, **kwargs): # pylint: disable=arguments-differ
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
return _cells_begin_state(self._cells, **kwargs)
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, axis = _normalize_sequence(length, inputs, layout, False)
if begin_state is None:
begin_state = self.begin_state()
states = begin_state
l_cell, r_cell = self._cells
l_outputs, l_states = l_cell.unroll(length, inputs=inputs,
begin_state=states[:len(l_cell.state_info)],
layout=layout, merge_outputs=merge_outputs)
r_outputs, r_states = r_cell.unroll(length,
inputs=list(reversed(inputs)),
begin_state=states[len(l_cell.state_info):],
layout=layout, merge_outputs=merge_outputs)
if merge_outputs is None:
merge_outputs = (isinstance(l_outputs, symbol.Symbol)
and isinstance(r_outputs, symbol.Symbol))
if not merge_outputs:
if isinstance(l_outputs, symbol.Symbol):
l_outputs = list(symbol.SliceChannel(l_outputs, axis=axis,
num_outputs=length, squeeze_axis=1))
if isinstance(r_outputs, symbol.Symbol):
r_outputs = list(symbol.SliceChannel(r_outputs, axis=axis,
num_outputs=length, squeeze_axis=1))
if merge_outputs:
l_outputs = [l_outputs]
r_outputs = [symbol.reverse(r_outputs, axis=axis)]
else:
r_outputs = list(reversed(r_outputs))
outputs = [symbol.Concat(l_o, r_o, dim=1+merge_outputs,
name=('%sout'%(self._output_prefix) if merge_outputs
else '%st%d'%(self._output_prefix, i)))
for i, l_o, r_o in
zip(range(len(l_outputs)), l_outputs, r_outputs)]
if merge_outputs:
outputs = outputs[0]
states = [l_states, r_states]
return outputs, states
class BaseConvRNNCell(BaseRNNCell):
"""Abstract base class for Convolutional RNN cells"""
def __init__(self, input_shape, num_hidden,
h2h_kernel, h2h_dilate,
i2h_kernel, i2h_stride,
i2h_pad, i2h_dilate,
i2h_weight_initializer, h2h_weight_initializer,
i2h_bias_initializer, h2h_bias_initializer,
activation, prefix='', params=None, conv_layout='NCHW'):
super(BaseConvRNNCell, self).__init__(prefix=prefix, params=params)
# Convolution setting
self._h2h_kernel = h2h_kernel
assert (self._h2h_kernel[0] % 2 == 1) and (self._h2h_kernel[1] % 2 == 1), \
"Only support odd number, get h2h_kernel= %s" % str(h2h_kernel)
self._h2h_pad = (h2h_dilate[0] * (h2h_kernel[0] - 1) // 2,
h2h_dilate[1] * (h2h_kernel[1] - 1) // 2)
self._h2h_dilate = h2h_dilate
self._i2h_kernel = i2h_kernel
self._i2h_stride = i2h_stride
self._i2h_pad = i2h_pad
self._i2h_dilate = i2h_dilate
self._num_hidden = num_hidden
self._input_shape = input_shape
self._conv_layout = conv_layout
self._activation = activation
# Infer state shape
data = symbol.Variable('data')
self._state_shape = symbol.Convolution(data=data,
num_filter=self._num_hidden,
kernel=self._i2h_kernel,
stride=self._i2h_stride,
pad=self._i2h_pad,
dilate=self._i2h_dilate,
layout=conv_layout)
self._state_shape = self._state_shape.infer_shape(data=input_shape)[1][0]
self._state_shape = (0, ) + self._state_shape[1:]
# Get params
self._iW = self.params.get('i2h_weight', init=i2h_weight_initializer)
self._hW = self.params.get('h2h_weight', init=h2h_weight_initializer)
self._iB = self.params.get('i2h_bias', init=i2h_bias_initializer)
self._hB = self.params.get('h2h_bias', init=h2h_bias_initializer)
@property
def _num_gates(self):
return len(self._gate_names)
@property
def state_info(self):
return [{'shape': self._state_shape, '__layout__': self._conv_layout},
{'shape': self._state_shape, '__layout__': self._conv_layout}]
def _conv_forward(self, inputs, states, name):
i2h = symbol.Convolution(name='%si2h'%name,
data=inputs,
num_filter=self._num_hidden*self._num_gates,
kernel=self._i2h_kernel,
stride=self._i2h_stride,
pad=self._i2h_pad,
dilate=self._i2h_dilate,
weight=self._iW,
bias=self._iB,
layout=self._conv_layout)
h2h = symbol.Convolution(name='%sh2h'%name,
data=states[0],
num_filter=self._num_hidden*self._num_gates,
kernel=self._h2h_kernel,
dilate=self._h2h_dilate,
pad=self._h2h_pad,
stride=(1, 1),
weight=self._hW,
bias=self._hB,
layout=self._conv_layout)
return i2h, h2h
def __call__(self, inputs, states):
raise NotImplementedError("BaseConvRNNCell is abstract class for convolutional RNN")
class ConvRNNCell(BaseConvRNNCell):
"""Convolutional RNN cells
Parameters
----------
input_shape : tuple of int
Shape of input in single timestep.
num_hidden : int
Number of units in output symbol.
h2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in state-to-state transitions.
h2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in state-to-state transitions.
i2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in input-to-state transitions.
i2h_stride : tuple of int, default (1, 1)
Stride of Convolution operator in input-to-state transitions.
i2h_pad : tuple of int, default (1, 1)
Pad of Convolution operator in input-to-state transitions.
i2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in input-to-state transitions.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the convolution
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the convolution
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
activation : str or Symbol,
default functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2)
Type of activation function.
prefix : str, default ``'ConvRNN_'``
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
conv_layout : str, , default 'NCHW'
Layout of ConvolutionOp
"""
def __init__(self, input_shape, num_hidden,
h2h_kernel=(3, 3), h2h_dilate=(1, 1),
i2h_kernel=(3, 3), i2h_stride=(1, 1),
i2h_pad=(1, 1), i2h_dilate=(1, 1),
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
activation=functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2),
prefix='ConvRNN_', params=None, conv_layout='NCHW'):
super(ConvRNNCell, self).__init__(input_shape=input_shape, num_hidden=num_hidden,
h2h_kernel=h2h_kernel, h2h_dilate=h2h_dilate,
i2h_kernel=i2h_kernel, i2h_stride=i2h_stride,
i2h_pad=i2h_pad, i2h_dilate=i2h_dilate,
i2h_weight_initializer=i2h_weight_initializer,
h2h_weight_initializer=h2h_weight_initializer,
i2h_bias_initializer=i2h_bias_initializer,
h2h_bias_initializer=h2h_bias_initializer,
activation=activation, prefix=prefix,
params=params, conv_layout=conv_layout)
@property
def _gate_names(self):
return ('',)
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h, h2h = self._conv_forward(inputs, states, name)
output = self._get_activation(i2h + h2h, self._activation,
name='%sout'%name)
return output, [output]
@property
def state_info(self):
return [{'shape': self._state_shape, '__layout__': self._conv_layout}]
class ConvLSTMCell(BaseConvRNNCell):
"""Convolutional LSTM network cell.
Reference:
Xingjian et al. NIPS2015
Parameters
----------
input_shape : tuple of int
Shape of input in single timestep.
num_hidden : int
Number of units in output symbol.
h2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in state-to-state transitions.
h2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in state-to-state transitions.
i2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in input-to-state transitions.
i2h_stride : tuple of int, default (1, 1)
Stride of Convolution operator in input-to-state transitions.
i2h_pad : tuple of int, default (1, 1)
Pad of Convolution operator in input-to-state transitions.
i2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in input-to-state transitions.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the convolution
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the convolution
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
activation : str or Symbol
default functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2)
Type of activation function.
prefix : str, default ``'ConvLSTM_'``
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
conv_layout : str, , default 'NCHW'
Layout of ConvolutionOp
"""
def __init__(self, input_shape, num_hidden,
h2h_kernel=(3, 3), h2h_dilate=(1, 1),
i2h_kernel=(3, 3), i2h_stride=(1, 1),
i2h_pad=(1, 1), i2h_dilate=(1, 1),
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
activation=functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2),
prefix='ConvLSTM_', params=None,
conv_layout='NCHW'):
super(ConvLSTMCell, self).__init__(input_shape=input_shape, num_hidden=num_hidden,
h2h_kernel=h2h_kernel, h2h_dilate=h2h_dilate,
i2h_kernel=i2h_kernel, i2h_stride=i2h_stride,
i2h_pad=i2h_pad, i2h_dilate=i2h_dilate,
i2h_weight_initializer=i2h_weight_initializer,
h2h_weight_initializer=h2h_weight_initializer,
i2h_bias_initializer=i2h_bias_initializer,
h2h_bias_initializer=h2h_bias_initializer,
activation=activation, prefix=prefix,
params=params, conv_layout=conv_layout)
@property
def _gate_names(self):
return ['_i', '_f', '_c', '_o']
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_'%(self._prefix, self._counter)
i2h, h2h = self._conv_forward(inputs, states, name)
gates = i2h + h2h
slice_gates = symbol.SliceChannel(gates, num_outputs=4, axis=self._conv_layout.find('C'),
name="%sslice"%name)
in_gate = symbol.Activation(slice_gates[0], act_type="sigmoid",
name='%si'%name)
forget_gate = symbol.Activation(slice_gates[1], act_type="sigmoid",
name='%sf'%name)
in_transform = self._get_activation(slice_gates[2], self._activation,
name='%sc'%name)
out_gate = symbol.Activation(slice_gates[3], act_type="sigmoid",
name='%so'%name)
next_c = symbol._internal._plus(forget_gate * states[1], in_gate * in_transform,
name='%sstate'%name)
next_h = symbol._internal._mul(out_gate, self._get_activation(next_c, self._activation),
name='%sout'%name)
return next_h, [next_h, next_c]
@property
def state_info(self):
return [{'shape': self._state_shape, '__layout__': self._conv_layout},
{'shape': self._state_shape, '__layout__': self._conv_layout}]
class ConvGRUCell(BaseConvRNNCell):
"""Convolutional Gated Rectified Unit (GRU) network cell.
Parameters
----------
input_shape : tuple of int
Shape of input in single timestep.
num_hidden : int
Number of units in output symbol.
h2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in state-to-state transitions.
h2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in state-to-state transitions.
i2h_kernel : tuple of int, default (3, 3)
Kernel of Convolution operator in input-to-state transitions.
i2h_stride : tuple of int, default (1, 1)
Stride of Convolution operator in input-to-state transitions.
i2h_pad : tuple of int, default (1, 1)
Pad of Convolution operator in input-to-state transitions.
i2h_dilate : tuple of int, default (1, 1)
Dilation of Convolution operator in input-to-state transitions.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the convolution
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the convolution
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer, default zeros
Initializer for the bias vector.
activation : str or Symbol,
default functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2)
Type of activation function.
prefix : str, default ``'ConvGRU_'``
Prefix for name of layers (and name of weight if params is None).
params : RNNParams, default None
Container for weight sharing between cells. Created if None.
conv_layout : str, , default 'NCHW'
Layout of ConvolutionOp
"""
def __init__(self, input_shape, num_hidden,
h2h_kernel=(3, 3), h2h_dilate=(1, 1),
i2h_kernel=(3, 3), i2h_stride=(1, 1),
i2h_pad=(1, 1), i2h_dilate=(1, 1),
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
activation=functools.partial(symbol.LeakyReLU, act_type='leaky', slope=0.2),
prefix='ConvGRU_', params=None, conv_layout='NCHW'):
super(ConvGRUCell, self).__init__(input_shape=input_shape, num_hidden=num_hidden,
h2h_kernel=h2h_kernel, h2h_dilate=h2h_dilate,
i2h_kernel=i2h_kernel, i2h_stride=i2h_stride,
i2h_pad=i2h_pad, i2h_dilate=i2h_dilate,
i2h_weight_initializer=i2h_weight_initializer,
h2h_weight_initializer=h2h_weight_initializer,
i2h_bias_initializer=i2h_bias_initializer,
h2h_bias_initializer=h2h_bias_initializer,
activation=activation, prefix=prefix,
params=params, conv_layout=conv_layout)
@property
def _gate_names(self):
return ['_r', '_z', '_o']
@property
def state_info(self):
return [{'shape': self._state_shape, '__layout__': self._conv_layout}]
def __call__(self, inputs, states):
self._counter += 1
seq_idx = self._counter
name = '%st%d_' % (self._prefix, seq_idx)
i2h, h2h = self._conv_forward(inputs, states, name)
i2h_r, i2h_z, i2h = symbol.SliceChannel(i2h, num_outputs=3, name="%s_i2h_slice" % name)
h2h_r, h2h_z, h2h = symbol.SliceChannel(h2h, num_outputs=3, name="%s_h2h_slice" % name)
reset_gate = symbol.Activation(i2h_r + h2h_r, act_type="sigmoid",
name="%s_r_act" % name)
update_gate = symbol.Activation(i2h_z + h2h_z, act_type="sigmoid",
name="%s_z_act" % name)
next_h_tmp = self._get_activation(i2h + reset_gate * h2h, self._activation,
name="%s_h_act" % name)
next_h = symbol._internal._plus((1. - update_gate) * next_h_tmp, update_gate * states[0],
name='%sout' % name)
return next_h, [next_h]
| {
"content_hash": "de5c7feaaddd4ad1acc09da3f965758c",
"timestamp": "",
"source": "github",
"line_count": 1452,
"max_line_length": 108,
"avg_line_length": 42.03099173553719,
"alnum_prop": 0.5532779498271314,
"repo_name": "mlperf/training_results_v0.6",
"id": "92cb910e21c51b4e3950eec731554b13bb848e60",
"size": "62006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/python/mxnet/rnn/rnn_cell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
"""
A list of valid values for the 'platform_id' identifier code in FT_CharMapRec
and FT_SfntName structures.
TT_PLATFORM_APPLE_UNICODE
Used by Apple to indicate a Unicode character map and/or name entry. See
TT_APPLE_ID_XXX for corresponding 'encoding_id' values. Note that name
entries in this format are coded as big-endian UCS-2 character codes only.
TT_PLATFORM_MACINTOSH
Used by Apple to indicate a MacOS-specific charmap and/or name entry. See
TT_MAC_ID_XXX for corresponding 'encoding_id' values. Note that most TrueType
fonts contain an Apple roman charmap to be usable on MacOS systems (even if
they contain a Microsoft charmap as well).
TT_PLATFORM_ISO
This value was used to specify ISO/IEC 10646 charmaps. It is however now
deprecated. See TT_ISO_ID_XXX for a list of corresponding 'encoding_id'
values.
TT_PLATFORM_MICROSOFT
Used by Microsoft to indicate Windows-specific charmaps. See TT_MS_ID_XXX for
a list of corresponding 'encoding_id' values. Note that most fonts contain a
Unicode charmap using (TT_PLATFORM_MICROSOFT, TT_MS_ID_UNICODE_CS).
TT_PLATFORM_CUSTOM
Used to indicate application-specific charmaps.
TT_PLATFORM_ADOBE
This value isn't part of any font format specification, but is used by
FreeType to report Adobe-specific charmaps in an FT_CharMapRec structure. See
TT_ADOBE_ID_XXX.
"""
TT_PLATFORMS = {
'TT_PLATFORM_APPLE_UNICODE' : 0,
'TT_PLATFORM_MACINTOSH' : 1,
'TT_PLATFORM_ISO' : 2, # deprecated
'TT_PLATFORM_MICROSOFT' : 3,
'TT_PLATFORM_CUSTOM' : 4,
'TT_PLATFORM_ADOBE' : 7} # artificial
globals().update(TT_PLATFORMS)
| {
"content_hash": "8c05effb6845ec7c28ba298f641347ec",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 30.4,
"alnum_prop": 0.7266746411483254,
"repo_name": "glumpy/glumpy",
"id": "7b3c3974a71b65d6eab14e828b5c8820cb895134",
"size": "2012",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "glumpy/ext/freetype/ft_enums/tt_platforms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "26075"
},
{
"name": "Cython",
"bytes": "660"
},
{
"name": "GLSL",
"bytes": "177965"
},
{
"name": "Makefile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "1320773"
}
],
"symlink_target": ""
} |
"""
WSGI config for nexus_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nexus_django.settings")
application = get_wsgi_application()
| {
"content_hash": "31164aff889758c4695333c28d4b3e90",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.125,
"alnum_prop": 0.7711442786069652,
"repo_name": "utarsuno/urbtek",
"id": "cebf5e38d042a7114f0f3d86d6b153150f1bc5ce",
"size": "402",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "nexus_django/nexus_django/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "390447"
},
{
"name": "HTML",
"bytes": "80203"
},
{
"name": "JavaScript",
"bytes": "298511"
},
{
"name": "Python",
"bytes": "880231"
},
{
"name": "Shell",
"bytes": "22758"
}
],
"symlink_target": ""
} |
"""Wraps toco interface with python lazy loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# We need to import pywrap_tensorflow prior to the toco wrapper.
# pylint: disable=invalud-import-order,g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python import _pywrap_toco_api
# TODO(b/137402359): Remove lazy loading wrapper
def wrapped_toco_convert(model_flags_str, toco_flags_str, input_data_str,
debug_info_str, enable_mlir_converter):
"""Wraps TocoConvert with lazy loader."""
return _pywrap_toco_api.TocoConvert(
model_flags_str,
toco_flags_str,
input_data_str,
False, # extended_return
debug_info_str,
enable_mlir_converter)
def wrapped_get_potentially_supported_ops():
"""Wraps TocoGetPotentiallySupportedOps with lazy loader."""
return _pywrap_toco_api.TocoGetPotentiallySupportedOps()
| {
"content_hash": "ed7aaad32aae8ee7e14e81055b12b346",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 80,
"avg_line_length": 34.724137931034484,
"alnum_prop": 0.7269116186693148,
"repo_name": "renyi533/tensorflow",
"id": "213f31c6fe4f8339ed3f2ed3b69a379df2db1e66",
"size": "1696",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/lite/python/wrap_toco.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31572"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "903309"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82507951"
},
{
"name": "CMake",
"bytes": "6967"
},
{
"name": "Dockerfile",
"bytes": "113964"
},
{
"name": "Go",
"bytes": "1871425"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "988219"
},
{
"name": "Jupyter Notebook",
"bytes": "550861"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "2073744"
},
{
"name": "Makefile",
"bytes": "66796"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "319021"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37811412"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "6846"
},
{
"name": "Shell",
"bytes": "696058"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3655758"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import os
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from paste.deploy import loadapp
from urlmap.test.util import make_app
HERE = os.path.dirname(os.path.abspath(__file__))
sys.path.append(HERE)
class Test(unittest.TestCase):
"""Throw error when path specified as 'domain'"""
def _broken(self):
return loadapp('config:test_path_twice.ini', relative_to=HERE)
def test_port_twice(self):
self.assertRaises(ValueError, self._broken)
| {
"content_hash": "9bbf2ea73027856f994eb6db20357913",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 24.238095238095237,
"alnum_prop": 0.7151277013752456,
"repo_name": "zefciu/urlmap",
"id": "ec0e37147d400035deb05d29dcdd94c8b2fd80a9",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/urlmap/test/test_exc/test_path_twice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18258"
}
],
"symlink_target": ""
} |
if __name__ == "__main__":
import sys
sys.path.append('..')
import logging
from Top import Top
log = logging.getLogger(__name__)
class Topology(Top):
"""
Basically, it is just a named dictionary of dictionaries,
adapted to store connectivity matrix
For NBO topologies, atomic indexing starts from 1!
"""
def __init__(self,name):
self.name = name
self.data = {}
def __len__(self):
return len(self.data)
def increaseOrder(self,at1,at2):
if not at1 in self.data:
self.data[at1] = {}
if not at2 in self.data[at1]:
self.data[at1][at2] = 0
self.data[at1][at2] += 1
#
#
#
#
#
if __name__ == "__main__":
t = Topology('test')
t.increaseOrder(1,2)
t.increaseOrder(1,2)
t.increaseOrder(2,3)
t.increaseOrder(3,4)
t.increaseOrder(3,4)
print t
| {
"content_hash": "17d5a4293202b40afc5fa4b4a8cabd64",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 61,
"avg_line_length": 19.533333333333335,
"alnum_prop": 0.565415244596132,
"repo_name": "mtthwflst/terse",
"id": "87bce52cf120ac84841ceed90bdeed4aa776675d",
"size": "879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Containers/Topology.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "282"
},
{
"name": "DIGITAL Command Language",
"bytes": "1364"
},
{
"name": "HTML",
"bytes": "4318"
},
{
"name": "Python",
"bytes": "194740"
}
],
"symlink_target": ""
} |
import scrapy
class TutorialItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
## Adding container to hold scraped data using scrapy.Item as the parent class
class DmozItem(scrapy.Item):
title = scrapy.Field()
link = scrapy.Field()
desc = scrapy.Field()
| {
"content_hash": "b11fe4071af4ccdf7c566aa0fffcbf8e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 24.846153846153847,
"alnum_prop": 0.6934984520123839,
"repo_name": "sgangi03/big-data-python-class",
"id": "71809610bbac2c9ef424e40cc609d4c4401825c8",
"size": "475",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Lectures/code/tutorial/tutorial/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2169"
},
{
"name": "Python",
"bytes": "425674"
}
],
"symlink_target": ""
} |
import collections
import mock
import testscenarios
from ceilometer.central import manager
from ceilometer.objectstore import swift
from ceilometer.tests import base
from keystoneclient import exceptions
from swiftclient import client as swift_client
load_tests = testscenarios.load_tests_apply_scenarios
ACCOUNTS = [('tenant-000', {'x-account-object-count': 12,
'x-account-bytes-used': 321321321,
'x-account-container-count': 7,
}),
('tenant-001', {'x-account-object-count': 34,
'x-account-bytes-used': 9898989898,
'x-account-container-count': 17,
})]
class TestManager(manager.AgentManager):
def __init__(self):
super(TestManager, self).__init__()
self.keystone = mock.MagicMock()
class TestSwiftPollster(base.TestCase):
# Define scenarios to run all of the tests against all of the
# pollsters.
scenarios = [
('storage.objects',
{'factory': swift.ObjectsPollster}),
('storage.objects.size',
{'factory': swift.ObjectsSizePollster}),
('storage.objects.containers',
{'factory': swift.ObjectsContainersPollster}),
]
@staticmethod
def fake_ks_service_catalog_url_for(*args, **kwargs):
raise exceptions.EndpointNotFound("Fake keystone exception")
def fake_iter_accounts(self, ksclient, cache):
for i in ACCOUNTS:
yield i
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def setUp(self):
super(TestSwiftPollster, self).setUp()
self.pollster = self.factory()
self.manager = TestManager()
def test_iter_accounts_no_cache(self):
def empty_account_info(obj, ksclient, cache):
return []
self.stubs.Set(self.factory, '_get_account_info',
empty_account_info)
cache = {}
data = list(self.pollster._iter_accounts(mock.Mock(), cache))
self.assertTrue(self.pollster.CACHE_KEY_TENANT in cache)
self.assertTrue(self.pollster.CACHE_KEY_HEAD in cache)
self.assertEqual(data, [])
def test_iter_accounts_tenants_cached(self):
# Verify that if there are tenants pre-cached then the account
# info loop iterates over those instead of asking for the list
# again.
ksclient = mock.Mock()
ksclient.tenants.list.side_effect = AssertionError(
'should not be called',
)
self.stubs.Set(swift_client, 'head_account',
ksclient)
self.stubs.Set(self.factory, '_neaten_url',
mock.Mock())
Tenant = collections.namedtuple('Tenant', 'id')
cache = {
self.pollster.CACHE_KEY_TENANT: [Tenant(ACCOUNTS[0][0])],
}
data = list(self.pollster._iter_accounts(mock.Mock(), cache))
self.assertTrue(self.pollster.CACHE_KEY_HEAD in cache)
self.assertEqual(data[0][0], ACCOUNTS[0][0])
def test_neaten_url(self):
test_endpoint = 'http://127.0.0.1:8080'
test_tenant_id = 'a7fd1695fa154486a647e44aa99a1b9b'
standard_url = test_endpoint + '/v1/' + 'AUTH_' + test_tenant_id
self.assertEqual(standard_url,
swift._Base._neaten_url(test_endpoint,
test_tenant_id))
self.assertEqual(standard_url,
swift._Base._neaten_url(test_endpoint + '/',
test_tenant_id))
self.assertEqual(standard_url,
swift._Base._neaten_url(test_endpoint + '/v1',
test_tenant_id))
self.assertEqual(standard_url,
swift._Base._neaten_url(standard_url,
test_tenant_id))
def test_metering(self):
self.stubs.Set(self.factory, '_iter_accounts',
self.fake_iter_accounts)
samples = list(self.pollster.get_samples(self.manager, {}))
self.assertEqual(len(samples), 2)
def test_get_meter_names(self):
self.stubs.Set(self.factory, '_iter_accounts',
self.fake_iter_accounts)
samples = list(self.pollster.get_samples(self.manager, {}))
self.assertEqual(set([s.name for s in samples]),
set([samples[0].name]))
def test_endpoint_notfound(self):
self.stubs.Set(self.manager.keystone.service_catalog, 'url_for',
self.fake_ks_service_catalog_url_for)
samples = list(self.pollster.get_samples(self.manager, {}))
self.assertEqual(len(samples), 0)
| {
"content_hash": "14e93dc3de980390d359fe6b2c63323a",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 72,
"avg_line_length": 38.648,
"alnum_prop": 0.5723452701304078,
"repo_name": "citrix-openstack-build/ceilometer",
"id": "e89e260dcdb2700c3997dfd68a06dcc510f06111",
"size": "5534",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/objectstore/test_swift.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6284"
},
{
"name": "JavaScript",
"bytes": "304636"
},
{
"name": "Python",
"bytes": "1776303"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from cloudmesh_base.logger import LOGGER
from cloudmesh.cm_mongo import cm_MongoBase
from cloudmesh_base.ConfigDict import ConfigDict
import os
from pprint import pprint
# ----------------------------------------------------------------------
# SETTING UP A LOGGER
# ----------------------------------------------------------------------
log = LOGGER(__file__)
class cm_pagestatus(cm_MongoBase):
"""
This methods holds some status information that is associated with a web
page
"""
cm_kind = 'pagestatus'
def __init__(self):
self.cm_type = "pagestatus"
self.connect()
def delete(self, user, page=None):
'''
Deletes the state values associated with a page. If non is specified for
page all page state values are deleted
:param user: the user for which the state values are recorded
:type user: string
:param page: the page base url
:type page: of the form /uri (string)
'''
if page is None:
self.db_mongo.remove({"cm_type": self.cm_type, "cm_user_id": user})
else:
self.db_mongo.remove({"cm_type": self.cm_type,
"page": page,
"cm_user_id": user})
def add(self, user, page, attribute, value):
'''
adds the state value for a user and page
:param user:
:type user:
:param page:
:type page:
:param attribute:
:type attribute:
:param value:
:type value:
'''
self.update({
'cm_kind': self.cm_kind,
'cm_user_id': user,
'page': page,
'attribute': attribute
}, {
'cm_kind': self.cm_kind,
'cm_user_id': user,
'page': page,
'attribute': attribute,
'value': value})
def get(self, user, page, attribute):
'''
get the state value for a user and a page
:param user:
:type user:
:param page:
:type page:
:param attribute:
:type attribute:
'''
result = m.find_one({'cm_user_id': user,
'page': page,
'attribute': attribute})
return result['value']
if __name__ == "__main__":
m = cm_pagestatus()
m.clear()
m.add('gregor', '/hello', 'VMs', '100')
m.add('gregor', '/hello', 'images', '99')
m.add('gregor', '/hello', 'dict', {"a": 1, "b": {"c": 1}})
cursor = m.find({})
for element in cursor:
print('element', element)
print(m.get('gregor', '/hello', 'VMs'))
print(m.get('gregor', '/hello', 'images'))
print(m.get('gregor', '/hello', 'dict'))
pprint(m.get('gregor', '/hello', 'dict')['b'])
| {
"content_hash": "eacae71d87eac25ff4edbfe531327564",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 80,
"avg_line_length": 26.747663551401867,
"alnum_prop": 0.48951781970649894,
"repo_name": "rajpushkar83/cloudmesh",
"id": "4fbfa81699dc7496f72c2f158788a4c7a4250781",
"size": "2862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudmesh/user/cm_pagestatus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "390396"
},
{
"name": "HTML",
"bytes": "4158355"
},
{
"name": "Java",
"bytes": "369"
},
{
"name": "JavaScript",
"bytes": "2803977"
},
{
"name": "Makefile",
"bytes": "7572"
},
{
"name": "PHP",
"bytes": "183557"
},
{
"name": "Python",
"bytes": "1736957"
},
{
"name": "Ruby",
"bytes": "10670"
},
{
"name": "Shell",
"bytes": "32263"
}
],
"symlink_target": ""
} |
""" Store only meta data but no real data (except from store state of nodes) """
import logging
import os
import yaml
from pySPACE.resources.dataset_defs.base import BaseDataset
from pySPACE.tools.filesystem import get_author
class DummyDataset(BaseDataset):
""" Class to store only meta data of collection
This class overrides the 'store' method
in a way that only the collection meta data files are stored.
This type is intended to be passed to pySPACE as a result
by the NilSinkNode.
**Parameters**
:dataset_md:
The meta data of the current dataset.
(*optional, default: None*)
:Author: David Feess (david.feess@dfki.de)
:Created: 2010/03/30
"""
def __init__(self, dataset_md = None):
super(DummyDataset, self).__init__(dataset_md = dataset_md)
def store(self, result_dir, s_format = "None"):
if not s_format == "None":
self._log("The format %s is not supported!"%s_format, level=logging.CRITICAL)
return
# Update the meta data
author = get_author()
self.update_meta_data({"type": "only output of individual nodes stored",
"storage_format": s_format,
"author" : author,
"data_pattern": "no data stored"})
# Store meta data
BaseDataset.store_meta_data(result_dir,self.meta_data) | {
"content_hash": "88965e125465d06afe6f42f1265a8c99",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 89,
"avg_line_length": 33.97674418604651,
"alnum_prop": 0.6036960985626283,
"repo_name": "pyspace/pyspace",
"id": "5536b5da89bf626f577224d9eb50f218b135b4e6",
"size": "1461",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pySPACE/resources/dataset_defs/dummy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11128"
},
{
"name": "C++",
"bytes": "309606"
},
{
"name": "Matlab",
"bytes": "3768"
},
{
"name": "Python",
"bytes": "3160853"
},
{
"name": "QMake",
"bytes": "3217"
},
{
"name": "Shell",
"bytes": "253"
}
],
"symlink_target": ""
} |
from .analysis import *
from .connectivity import *
version = "0.1.4"
| {
"content_hash": "29904b7e7e08795644085d4482b892b9",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 27,
"avg_line_length": 17.75,
"alnum_prop": 0.704225352112676,
"repo_name": "NeuroDataDesign/seelviz",
"id": "e8eb22036705d68f8661db614b335d3e32427311",
"size": "71",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "jon/ipybns/clarityviz/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "700780"
},
{
"name": "HTML",
"bytes": "450426870"
},
{
"name": "JavaScript",
"bytes": "758169"
},
{
"name": "Jupyter Notebook",
"bytes": "627265127"
},
{
"name": "Matlab",
"bytes": "4476"
},
{
"name": "PHP",
"bytes": "1015"
},
{
"name": "Python",
"bytes": "723496"
},
{
"name": "Shell",
"bytes": "178"
}
],
"symlink_target": ""
} |
from telemetry.page import page_test
from measurements import smoothness_controller
class Smoothness(page_test.PageTest):
def __init__(self):
super(Smoothness, self).__init__()
self._smoothness_controller = None
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
options.AppendExtraBrowserArgs('--touch-events=enabled')
options.AppendExtraBrowserArgs('--running-performance-benchmark')
def WillNavigateToPage(self, page, tab):
self._smoothness_controller = smoothness_controller.SmoothnessController()
self._smoothness_controller.SetUp(page, tab)
def WillRunActions(self, page, tab):
self._smoothness_controller.Start(tab)
def DidRunActions(self, page, tab):
self._smoothness_controller.Stop(tab)
def ValidateAndMeasurePage(self, page, tab, results):
self._smoothness_controller.AddResults(tab, results)
def CleanUpAfterPage(self, page, tab):
if self._smoothness_controller:
self._smoothness_controller.CleanUp(tab)
| {
"content_hash": "e92a08e212d44ba199dfef914f395fcb",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 33,
"alnum_prop": 0.7509469696969697,
"repo_name": "ltilve/chromium",
"id": "04551340286a52c996b2df886b76ae22252b73b1",
"size": "1219",
"binary": false,
"copies": "1",
"ref": "refs/heads/igalia-sidebar",
"path": "tools/perf/measurements/smoothness.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23829"
},
{
"name": "C",
"bytes": "4118701"
},
{
"name": "C++",
"bytes": "234094836"
},
{
"name": "CSS",
"bytes": "939350"
},
{
"name": "Emacs Lisp",
"bytes": "988"
},
{
"name": "HTML",
"bytes": "28170463"
},
{
"name": "Java",
"bytes": "9881553"
},
{
"name": "JavaScript",
"bytes": "19877257"
},
{
"name": "Makefile",
"bytes": "68017"
},
{
"name": "Objective-C",
"bytes": "1485658"
},
{
"name": "Objective-C++",
"bytes": "8718816"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "177185"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "460217"
},
{
"name": "Python",
"bytes": "7973452"
},
{
"name": "Shell",
"bytes": "480424"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
"""Helper functions which don't fit anywhere else"""
import ast
import inspect
import os
import re
import hashlib
import warnings
from collections import deque
from contextlib import contextmanager
from importlib import import_module
from pkgutil import iter_modules
from textwrap import dedent
from w3lib.html import replace_entities
from scrapy.utils.datatypes import LocalWeakReferencedCache
from scrapy.utils.python import flatten, to_unicode
from scrapy.item import _BaseItem
from scrapy.utils.deprecate import ScrapyDeprecationWarning
_ITERABLE_SINGLE_VALUES = dict, _BaseItem, str, bytes
def arg_to_iter(arg):
"""Convert an argument to an iterable. The argument can be a None, single
value, or an iterable.
Exception: if arg is a dict, [arg] will be returned
"""
if arg is None:
return []
elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
return arg
else:
return [arg]
def load_object(path):
"""Load an object given its absolute object path, and return it.
The object can be the import path of a class, function, variable or an
instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'.
If ``path`` is not a string, but is a callable object, such as a class or
a function, then return it as is.
"""
if not isinstance(path, str):
if callable(path):
return path
else:
raise TypeError("Unexpected argument type, expected string "
"or object, got: %s" % type(path))
try:
dot = path.rindex('.')
except ValueError:
raise ValueError(f"Error loading object '{path}': not a full path")
module, name = path[:dot], path[dot + 1:]
mod = import_module(module)
try:
obj = getattr(mod, name)
except AttributeError:
raise NameError(f"Module '{module}' doesn't define any object named '{name}'")
return obj
def walk_modules(path):
"""Loads a module and all its submodules from the given module path and
returns them. If *any* module throws an exception while importing, that
exception is thrown back.
For example: walk_modules('scrapy.utils')
"""
mods = []
mod = import_module(path)
mods.append(mod)
if hasattr(mod, '__path__'):
for _, subpath, ispkg in iter_modules(mod.__path__):
fullpath = path + '.' + subpath
if ispkg:
mods += walk_modules(fullpath)
else:
submod = import_module(fullpath)
mods.append(submod)
return mods
def extract_regex(regex, text, encoding='utf-8'):
"""Extract a list of unicode strings from the given text/encoding using the following policies:
* if the regex contains a named group called "extract" that will be returned
* if the regex contains multiple numbered groups, all those will be returned (flattened)
* if the regex doesn't contain any group the entire regex matching is returned
"""
warnings.warn(
"scrapy.utils.misc.extract_regex has moved to parsel.utils.extract_regex.",
ScrapyDeprecationWarning,
stacklevel=2
)
if isinstance(regex, str):
regex = re.compile(regex, re.UNICODE)
try:
strings = [regex.search(text).group('extract')] # named group
except Exception:
strings = regex.findall(text) # full regex or numbered groups
strings = flatten(strings)
if isinstance(text, str):
return [replace_entities(s, keep=['lt', 'amp']) for s in strings]
else:
return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])
for s in strings]
def md5sum(file):
"""Calculate the md5 checksum of a file-like object without reading its
whole content in memory.
>>> from io import BytesIO
>>> md5sum(BytesIO(b'file content to hash'))
'784406af91dd5a54fbb9c84c2236595a'
"""
m = hashlib.md5()
while True:
d = file.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
def rel_has_nofollow(rel):
"""Return True if link rel attribute has nofollow type"""
return rel is not None and 'nofollow' in rel.split()
def create_instance(objcls, settings, crawler, *args, **kwargs):
"""Construct a class instance using its ``from_crawler`` or
``from_settings`` constructors, if available.
At least one of ``settings`` and ``crawler`` needs to be different from
``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.
If ``crawler`` is ``None``, only the ``from_settings`` constructor will be
tried.
``*args`` and ``**kwargs`` are forwarded to the constructors.
Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
.. versionchanged:: 2.2
Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an
extension has not been implemented correctly).
"""
if settings is None:
if crawler is None:
raise ValueError("Specify at least one of settings and crawler.")
settings = crawler.settings
if crawler and hasattr(objcls, 'from_crawler'):
instance = objcls.from_crawler(crawler, *args, **kwargs)
method_name = 'from_crawler'
elif hasattr(objcls, 'from_settings'):
instance = objcls.from_settings(settings, *args, **kwargs)
method_name = 'from_settings'
else:
instance = objcls(*args, **kwargs)
method_name = '__new__'
if instance is None:
raise TypeError(f"{objcls.__qualname__}.{method_name} returned None")
return instance
@contextmanager
def set_environ(**kwargs):
"""Temporarily set environment variables inside the context manager and
fully restore previous environment afterwards
"""
original_env = {k: os.environ.get(k) for k in kwargs}
os.environ.update(kwargs)
try:
yield
finally:
for k, v in original_env.items():
if v is None:
del os.environ[k]
else:
os.environ[k] = v
def walk_callable(node):
"""Similar to ``ast.walk``, but walks only function body and skips nested
functions defined within the node.
"""
todo = deque([node])
walked_func_def = False
while todo:
node = todo.popleft()
if isinstance(node, ast.FunctionDef):
if walked_func_def:
continue
walked_func_def = True
todo.extend(ast.iter_child_nodes(node))
yield node
_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)
def is_generator_with_return_value(callable):
"""
Returns True if a callable is a generator function which includes a
'return' statement with a value different than None, False otherwise
"""
if callable in _generator_callbacks_cache:
return _generator_callbacks_cache[callable]
def returns_none(return_node):
value = return_node.value
return value is None or isinstance(value, ast.NameConstant) and value.value is None
if inspect.isgeneratorfunction(callable):
tree = ast.parse(dedent(inspect.getsource(callable)))
for node in walk_callable(tree):
if isinstance(node, ast.Return) and not returns_none(node):
_generator_callbacks_cache[callable] = True
return _generator_callbacks_cache[callable]
_generator_callbacks_cache[callable] = False
return _generator_callbacks_cache[callable]
def warn_on_generator_with_return_value(spider, callable):
"""
Logs a warning if a callable is a generator function and includes
a 'return' statement with a value different than None
"""
if is_generator_with_return_value(callable):
warnings.warn(
f'The "{spider.__class__.__name__}.{callable.__name__}" method is '
'a generator and includes a "return" statement with a value '
'different than None. This could lead to unexpected behaviour. Please see '
'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '
'for details about the semantics of the "return" statement within generators',
stacklevel=2,
)
| {
"content_hash": "a0b08706f11113d5d252e05ffa8235ad",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 99,
"avg_line_length": 32.84584980237154,
"alnum_prop": 0.6450060168471721,
"repo_name": "dangra/scrapy",
"id": "081cd33f1aa8280b3e56301136f79ce70d65cb79",
"size": "8310",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scrapy/utils/misc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2790"
},
{
"name": "Python",
"bytes": "1670720"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
#sys.path.insert(0, "/pylobnek/pylobnek")
sys.path.insert(0, "/pyaddepar/")
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.viewcode'
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyaddepar'
copyright = '2017, Lobnek Wealth Management'
author = 'Lobnek Wealth Management'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.2'
# The full version, including alpha/beta/rc tags.
release = '3.2.0'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyaddepar'
| {
"content_hash": "17aa144d26989076a517304b13b967bd",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 26.4,
"alnum_prop": 0.6864801864801865,
"repo_name": "lobnek/pyaddepar",
"id": "a847b8808406e0f46900a06f251503edf63a7157",
"size": "1739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "901"
},
{
"name": "Makefile",
"bytes": "1712"
},
{
"name": "Python",
"bytes": "18723"
}
],
"symlink_target": ""
} |
"""Support for IHC switches."""
from homeassistant.components.switch import SwitchDevice
from . import IHC_CONTROLLER, IHC_DATA, IHC_INFO
from .const import CONF_OFF_ID, CONF_ON_ID
from .ihcdevice import IHCDevice
from .util import async_pulse, async_set_bool
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IHC switch platform."""
if discovery_info is None:
return
devices = []
for name, device in discovery_info.items():
ihc_id = device['ihc_id']
product_cfg = device['product_cfg']
product = device['product']
# Find controller that corresponds with device id
ctrl_id = device['ctrl_id']
ihc_key = IHC_DATA.format(ctrl_id)
info = hass.data[ihc_key][IHC_INFO]
ihc_controller = hass.data[ihc_key][IHC_CONTROLLER]
ihc_off_id = product_cfg.get(CONF_OFF_ID)
ihc_on_id = product_cfg.get(CONF_ON_ID)
switch = IHCSwitch(ihc_controller, name, ihc_id, ihc_off_id, ihc_on_id,
info, product)
devices.append(switch)
add_entities(devices)
class IHCSwitch(IHCDevice, SwitchDevice):
"""Representation of an IHC switch."""
def __init__(self, ihc_controller, name: str, ihc_id: int, ihc_off_id: int,
ihc_on_id: int, info: bool, product=None) -> None:
"""Initialize the IHC switch."""
super().__init__(ihc_controller, name, ihc_id, product)
self._ihc_off_id = ihc_off_id
self._ihc_on_id = ihc_on_id
self._state = False
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
if self._ihc_on_id:
await async_pulse(self.hass, self.ihc_controller, self._ihc_on_id)
else:
await async_set_bool(self.hass, self.ihc_controller,
self.ihc_id, True)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
if self._ihc_off_id:
await async_pulse(self.hass, self.ihc_controller, self._ihc_off_id)
else:
await async_set_bool(self.hass, self.ihc_controller,
self.ihc_id, False)
def on_ihc_change(self, ihc_id, value):
"""Handle IHC resource change."""
self._state = value
self.schedule_update_ha_state()
| {
"content_hash": "27dcde1717ebb23ca2d192e44712bf57",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 36.220588235294116,
"alnum_prop": 0.5935850588712952,
"repo_name": "auduny/home-assistant",
"id": "6d3a72a3b661cdb5f25626c6f3fcaa0b7e59847c",
"size": "2463",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ihc/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15129018"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
import copy
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Any, Callable, Dict, Hashable, Union
import torch
from pyro.ops.welford import WelfordCovariance
class StreamingStats(ABC):
"""
Abstract base class for streamable statistics of trees of tensors.
Derived classes must implelement :meth:`update`, :meth:`merge`, and
:meth:`get`.
"""
@abstractmethod
def update(self, sample) -> None:
"""
Update state from a single sample.
This mutates ``self`` and returns nothing. Updates should be
independent of order, i.e. samples should be exchangeable.
:param sample: A sample value which is a nested dictionary of
:class:`torch.Tensor` leaves. This can have arbitrary nesting and
shape shape, but assumes shape is constant across calls to
``.update()``.
"""
raise NotImplementedError
@abstractmethod
def merge(self, other) -> "StreamingStats":
"""
Select two aggregate statistics, e.g. from different MCMC chains.
This is a pure function: it returns a new :class:`StreamingStats`
object and does not modify either ``self`` or ``other``.
:param other: Another streaming stats instance of the same type.
"""
assert isinstance(other, type(self))
raise NotImplementedError
@abstractmethod
def get(self) -> Any:
"""
Return the aggregate statistic.
"""
raise NotImplementedError
class CountStats(StreamingStats):
"""
Statistic tracking only the number of samples.
For example::
>>> stats = CountStats()
>>> stats.update(torch.randn(3, 3))
>>> stats.get()
{'count': 1}
"""
def __init__(self):
self.count = 0
super().__init__()
def update(self, sample) -> None:
self.count += 1
def merge(self, other: "CountStats") -> "CountStats":
assert isinstance(other, type(self))
result = CountStats()
result.count = self.count + other.count
return result
def get(self) -> Dict[str, int]:
"""
:returns: A dictionary with keys ``count: int``.
:rtype: dict
"""
return {"count": self.count}
class StatsOfDict(StreamingStats):
"""
Statistics of samples that are dictionaries with constant set of keys.
For example the following are equivalent::
# Version 1. Hand encode statistics.
>>> a_stats = CountStats()
>>> b_stats = CountMeanStats()
>>> a_stats.update(torch.tensor(0.))
>>> b_stats.update(torch.tensor([1., 2.]))
>>> summary = {"a": a_stats.get(), "b": b_stats.get()}
# Version 2. Collect samples into dictionaries.
>>> stats = StatsOfDict({"a": CountStats, "b": CountMeanStats})
>>> stats.update({"a": torch.tensor(0.), "b": torch.tensor([1., 2.])})
>>> summary = stats.get()
>>> summary
{'a': {'count': 1}, 'b': {'count': 1, 'mean': tensor([1., 2.])}}
:param default: Default type of statistics of values of the dictionary.
Defaults to the inexpensive :class:`CountStats`.
:param dict types: Dictionary mapping key to type of statistic that should
be recorded for values corresponding to that key.
"""
def __init__(
self,
types: Dict[Hashable, Callable[[], StreamingStats]] = {},
default: Callable[[], StreamingStats] = CountStats,
):
self.stats: Dict[Hashable, StreamingStats] = defaultdict(default)
self.stats.update({k: v() for k, v in types.items()})
super().__init__()
def update(self, sample: Dict[Hashable, Any]) -> None:
for k, v in sample.items():
self.stats[k].update(v)
def merge(self, other: "StatsOfDict") -> "StatsOfDict":
assert isinstance(other, type(self))
result = copy.deepcopy(self)
for k in set(self.stats).union(other.stats):
if k not in self.stats:
result.stats[k] = copy.deepcopy(other.stats[k])
elif k in other.stats:
result.stats[k] = self.stats[k].merge(other.stats[k])
return result
def get(self) -> Dict[Hashable, Any]:
"""
:returns: A dictionary of statistics. The keys of this dictionary are
the same as the keys of the samples from which this object is
updated.
:rtype: dict
"""
return {k: v.get() for k, v in self.stats.items()}
class StackStats(StreamingStats):
"""
Statistic collecting a stream of tensors into a single stacked tensor.
"""
def __init__(self):
self.samples = []
def update(self, sample: torch.Tensor) -> None:
assert isinstance(sample, torch.Tensor)
self.samples.append(sample)
def merge(self, other: "StackStats") -> "StackStats":
assert isinstance(other, type(self))
result = StackStats()
result.samples = self.samples + other.samples
return result
def get(self) -> Dict[str, Union[int, torch.Tensor]]:
"""
:returns: A dictionary with keys ``count: int`` and (if any samples
have been collected) ``samples: torch.Tensor``.
:rtype: dict
"""
if not self.samples:
return {"count": 0}
return {"count": len(self.samples), "samples": torch.stack(self.samples)}
class CountMeanStats(StreamingStats):
"""
Statistic tracking the count and mean of a single :class:`torch.Tensor`.
"""
def __init__(self):
self.count = 0
self.mean = 0
super().__init__()
def update(self, sample: torch.Tensor) -> None:
assert isinstance(sample, torch.Tensor)
self.count += 1
self.mean += (sample.detach() - self.mean) / self.count
def merge(self, other: "CountMeanStats") -> "CountMeanStats":
assert isinstance(other, type(self))
result = CountMeanStats()
result.count = self.count + other.count
p = self.count / max(result.count, 1)
q = other.count / max(result.count, 1)
result.mean = p * self.mean + q * other.mean
return result
def get(self) -> Dict[str, Union[int, torch.Tensor]]:
"""
:returns: A dictionary with keys ``count: int`` and (if any samples
have been collected) ``mean: torch.Tensor``.
:rtype: dict
"""
if self.count == 0:
return {"count": 0}
return {"count": self.count, "mean": self.mean}
class CountMeanVarianceStats(StreamingStats):
"""
Statistic tracking the count, mean, and (diagonal) variance of a single
:class:`torch.Tensor`.
"""
def __init__(self):
self.shape = None
self.welford = WelfordCovariance(diagonal=True)
super().__init__()
def update(self, sample: torch.Tensor) -> None:
assert isinstance(sample, torch.Tensor)
if self.shape is None:
self.shape = sample.shape
assert sample.shape == self.shape
self.welford.update(sample.detach().reshape(-1))
def merge(self, other: "CountMeanVarianceStats") -> "CountMeanVarianceStats":
assert isinstance(other, type(self))
if self.shape is None:
return copy.deepcopy(other)
if other.shape is None:
return copy.deepcopy(self)
result = copy.deepcopy(self)
res = result.welford
lhs = self.welford
rhs = other.welford
res.n_samples = lhs.n_samples + rhs.n_samples
lhs_weight = lhs.n_samples / res.n_samples
rhs_weight = rhs.n_samples / res.n_samples
res._mean = lhs_weight * lhs._mean + rhs_weight * rhs._mean
res._m2 = (
lhs._m2
+ rhs._m2
+ (lhs.n_samples * rhs.n_samples / res.n_samples)
* (lhs._mean - rhs._mean) ** 2
)
return result
def get(self) -> Dict[str, Union[int, torch.Tensor]]:
"""
:returns: A dictionary with keys ``count: int`` and (if any samples
have been collected) ``mean: torch.Tensor`` and ``variance:
torch.Tensor``.
:rtype: dict
"""
if self.shape is None:
return {"count": 0}
count = self.welford.n_samples
mean = self.welford._mean.reshape(self.shape)
variance = self.welford.get_covariance(regularize=False).reshape(self.shape)
return {"count": count, "mean": mean, "variance": variance}
# Note this is ordered logically for sphinx rather than alphabetically.
__all__ = [
"StreamingStats",
"StatsOfDict",
"StackStats",
"CountStats",
"CountMeanStats",
"CountMeanVarianceStats",
]
| {
"content_hash": "1ffcdfcae65acb81b7f12c11218af68c",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 84,
"avg_line_length": 32.18248175182482,
"alnum_prop": 0.58777500567022,
"repo_name": "uber/pyro",
"id": "1886b265a11686e87161d30e5e0ff29850dd0bcf",
"size": "8903",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pyro/ops/streaming.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "6121"
},
{
"name": "CSS",
"bytes": "478"
},
{
"name": "Dockerfile",
"bytes": "1635"
},
{
"name": "Makefile",
"bytes": "6857"
},
{
"name": "Python",
"bytes": "3388193"
},
{
"name": "Shell",
"bytes": "6465"
},
{
"name": "TeX",
"bytes": "3649"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# pyeq2 is a collection of equations expressed as Python classes
#
# Copyright (C) 2013 James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
#
# email: zunzun@zunzun.com
#
# License: BSD-style (see LICENSE.txt in main source directory)
import sys, os
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..'))
import pyeq2
import numpy # implicitly required by compiling the userFunctionCodeObject in the method EvaluateCachedData() below
numpy.seterr(all= 'ignore')
import StringIO, parser, types
import pyeq2.Model_2D_BaseClass
class UserDefinedFunction(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
userDefinedFunctionFlag = True
# based on http://lybniz2.sourceforge.net/safeeval.html
functionDictionary = {'Arithmetic Operations': ['power', 'mod'],
'Exponents And Logarithms': ['exp', 'log', 'log10', 'log2'],
'Trigonometric Functions': ['sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'hypot', 'arctan2', 'deg2rad', 'rad2deg'],
'Hyperbolic Trig Functions':['arcsinh', 'arccosh', 'arctanh', 'sinh', 'cosh', 'tanh'],
'Other Special Functions': ['sinc'],
'Miscellaneous': ['sqrt', 'square', 'fabs', 'sign']
}
constantsDictionary = {'Constants':['pi', 'e']}
_baseName = "User Defined Function"
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = False
autoGenerateReciprocalForm = False
autoGenerateInverseForms = False
autoGenerateGrowthAndDecayForms = False
def __init__(self, inFittingTarget = 'SSQABS', inExtendedVersionName = 'Default', inUserFunctionString = ''):
if inUserFunctionString:
self.ParseAndCompileUserFunctionString(inUserFunctionString)
pyeq2.Model_2D_BaseClass.Model_2D_BaseClass.__init__(self, inFittingTarget, inExtendedVersionName) # call superclass
def GetDisplayHTML(self):
return 'y = User Defined Function'
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return functionList
def GetTokensFromTupleParsingHelper(self, tup, inList=None):
if inList is None:
inList = []
if type(tup) is types.TupleType:
tupleLength = len(tup)
if tupleLength > 1 and type(tup[0]) is not types.TupleType:
if tup[0] == 1:
inList.append(tup[1])
if tupleLength == 2: # so a caret character can be trapped later
if tup[0] == 33:
if tup[1] == '^':
inList.append('^')
for i in tup:
inList = self.GetTokensFromTupleParsingHelper(i, inList)
return inList
def ParseAndCompileUserFunctionString(self, inString):
# shift user functions into numpy namespace at run time, do not import time
numpySafeTokenList = []
for key in list(self.functionDictionary.keys()):
numpySafeTokenList += self.functionDictionary[key]
for key in list(self.constantsDictionary.keys()):
numpySafeTokenList += self.constantsDictionary[key]
# to shift user functions such as "power" into the numpy namespace "numpy.power" for evaluation
for token in numpySafeTokenList:
exec(token + ' = numpy.' + token)
# no blank lines of text, StringIO() allows using file methods on text
stringToConvert = ''
rawData = StringIO.StringIO(inString).readlines()
for line in rawData:
stripped = line.strip()
if len(stripped) > 0: # no empty strings
if stripped[0] != '#': # no comment-only lines
stringToConvert += stripped + '\n'
# convert brackets to parentheses
stringToConvert = stringToConvert.replace('[', '(').replace(']', ')')
if stringToConvert == '':
raise Exception('You must enter some function text for the software to use.')
if -1 != stringToConvert.find('='):
raise Exception('Please do not use an equals sign "=" in your text.')
st = parser.expr(stringToConvert)
tup = st.totuple()
tokens = self.GetTokensFromTupleParsingHelper(tup)
if '^' in tokens:
raise Exception('The caret symbol "^" is not recognized by the parser, please substitute double asterisks "**" for "^".')
if 'ln' in tokens:
raise Exception("The parser uses log() for the natural log function, not ln(). Please use log() in your text.")
if 'abs' in tokens:
raise Exception("The parser uses fabs() for the absolute value, not abs(). Please use fabs() in your text.")
if 'EXP' in tokens:
raise Exception("The parser uses lower case exp(), not upper case EXP(). Please use lower case exp() in your text.")
if 'LOG' in tokens:
raise Exception("The parser uses lower case log(), not upper case LOG(). Please use lower case log() in your text.")
# test for required reserved tokens
tokenNames = list(set(tokens) - set(numpySafeTokenList))
if 'X' not in tokenNames:
raise Exception('You must use a separate upper case "X" in your function to enter a valid function of X.')
self._coefficientDesignators = sorted(list(set(tokenNames) - set(['X'])))
if len(self._coefficientDesignators) == 0:
raise Exception('I could not find any equation parameter or coefficient names, please check the function text')
# now compile code object using safe tokens with integer conversion
self.safe_dict = dict([ (k, locals().get(k, None)) for k in numpySafeTokenList ])
# convert integer use such as (3/2) into floats such as (3.0/2.0)
st = parser.expr(stringToConvert)
stList = parser.st2list(st)
stList = self.RecursivelyConvertIntStringsToFloatStrings(stList)
st = parser.sequence2st(stList)
# later evals re-use this compiled code for improved performance in EvaluateCachedData() methods
self.userFunctionCodeObject = parser.compilest(st)
def ShouldDataBeRejected(self, inModel):
return False
def AreCoefficientsWithinBounds(self, inCoeffs):
return True # User Defined Functions do not have coefficient bounds
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
self.safe_dict['X'] = inDataCacheDictionary['X']
if self.GetDimensionality() == 3:
self.safe_dict['Y'] = inDataCacheDictionary['Y']
# define coefficient values before calling eval
for i in range(len(self._coefficientDesignators)):
self.safe_dict[self._coefficientDesignators[i]] = inCoeffs[i]
# eval uses previously compiled code for improved performance
# based on http://lybniz2.sourceforge.net/safeeval._HTML
try:
temp = eval(self.userFunctionCodeObject, {"__builtins__":None, 'numpy':numpy}, self.safe_dict)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['X'])) * 1.0E300
def Solve(self, inUserFunctionString = None, inAlgorithmName="Levenberg-Marquardt"):
if inUserFunctionString:
self.ParseAndCompileUserFunctionString(inUserFunctionString)
# starting point
if len(self.estimatedCoefficients) == 0:
self.estimatedCoefficients = pyeq2.solverService().SolveUsingDE(self)
if self.fittingTarget == 'ODR':
return pyeq2.solverService().SolveUsingODR(self)
self.estimatedCoefficients = pyeq2.solverService().SolveUsingSelectedAlgorithm(self, inAlgorithmName=inAlgorithmName)
return pyeq2.solverService().SolveUsingSimplex(self)
| {
"content_hash": "3eab8891d562a13a2947987e077242ac",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 148,
"avg_line_length": 42.68341708542714,
"alnum_prop": 0.6317400518012715,
"repo_name": "jamesrp/pyeq2",
"id": "9dabd21f8a9e1fff59190c4650c293a675efdb52",
"size": "8494",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Models_2D/UserDefinedFunction.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1882602"
},
{
"name": "Shell",
"bytes": "119"
}
],
"symlink_target": ""
} |
from orator.orm import Model
class User(Model):
__fillable__ = ["name"]
| {
"content_hash": "0a3ab25e0fc2b298b3215641027b710f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 28,
"avg_line_length": 13.166666666666666,
"alnum_prop": 0.6329113924050633,
"repo_name": "sdispater/orator",
"id": "75b600d5c9a38404e42cc6d25418dac0dd1c9bc0",
"size": "104",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.9",
"path": "tests/orm/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2170"
},
{
"name": "Python",
"bytes": "1013569"
}
],
"symlink_target": ""
} |
from pyvisdk.thirdparty import Enum
HostNumericSensorHealthState = Enum(
'green',
'red',
'unknown',
'yellow',
)
| {
"content_hash": "54b21316d5fb4de190c0f143de4365ad",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 36,
"avg_line_length": 14.444444444444445,
"alnum_prop": 0.6461538461538462,
"repo_name": "xuru/pyvisdk",
"id": "14d1b67f80f1275082d1e3c57d62c18c0d0bb8df",
"size": "255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/enums/host_numeric_sensor_health_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
import logging
import pytest
from src.piece import Color
from src.game import Game
from src.position import Position, Square
from src.util import to_algebraic, to_numeric
from tst.util import InvalidMoveException
from tst.util import DidNotFindAllLegalMovesException
from tst.standard_starting_position_data import position as position
from tst.standard_starting_position_data import legal_moves
def check_number_opening_moves(pos, expected) -> None:
pos = Position(STANDARD_STARTING_POSITION, Color.white)
assert len(pos.successors()) == expected
def check_legal_moves(pos, square, expected):
moves = pos.legal_moves_for_square(square)
for move in moves:
algebraic_move = to_algebraic(move.destination)
try:
expected.remove(algebraic_move)
except KeyError:
raise InvalidMoveException('{} is an invalid move for square {} in position\n{}'.format(
algebraic_move, square, pos))
try:
assert len(expected) == 0
except Exception as e:
raise DidNotFindAllLegalMovesException("Didn't find all the moves... moves: {}, expected: {}".format(
moves, expected))
def test_white() -> None:
for square, expected in legal_moves['white_playing'].items():
square = position['white_playing'][square]
check_legal_moves(position['white_playing'], square, expected)
def test_black() -> None:
for square, expected in legal_moves['black_playing'].items():
square = position['black_playing'][square]
check_legal_moves(position['black_playing'], square, expected)
| {
"content_hash": "110b0631c4ab8908d0a99f22ee1e85b7",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 109,
"avg_line_length": 38.142857142857146,
"alnum_prop": 0.6991260923845194,
"repo_name": "mcqueenjordan/chess-engine",
"id": "51dd96f6fbc2be67d1f60ad390f4000e6f3596c3",
"size": "1602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tst/test_starting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36818"
}
],
"symlink_target": ""
} |
"""Component for the Somfy MyLink device supporting the Synergy API."""
import asyncio
import logging
from somfy_mylink_synergy import SomfyMyLinkSynergy
import voluptuous as vol
from homeassistant.components.cover import ENTITY_ID_FORMAT
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.util import slugify
from .const import (
CONF_DEFAULT_REVERSE,
CONF_ENTITY_CONFIG,
CONF_REVERSE,
CONF_REVERSED_TARGET_IDS,
CONF_SYSTEM_ID,
DATA_SOMFY_MYLINK,
DEFAULT_PORT,
DOMAIN,
MYLINK_STATUS,
PLATFORMS,
)
CONFIG_OPTIONS = (CONF_DEFAULT_REVERSE, CONF_ENTITY_CONFIG)
UNDO_UPDATE_LISTENER = "undo_update_listener"
_LOGGER = logging.getLogger(__name__)
def validate_entity_config(values):
"""Validate config entry for CONF_ENTITY."""
entity_config_schema = vol.Schema({vol.Optional(CONF_REVERSE): cv.boolean})
if not isinstance(values, dict):
raise vol.Invalid("expected a dictionary")
entities = {}
for entity_id, config in values.items():
entity = cv.entity_id(entity_id)
config = entity_config_schema(config)
entities[entity] = config
return entities
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_SYSTEM_ID): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DEFAULT_REVERSE, default=False): cv.boolean,
vol.Optional(
CONF_ENTITY_CONFIG, default={}
): validate_entity_config,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the MyLink platform."""
conf = config.get(DOMAIN)
hass.data.setdefault(DOMAIN, {})
if not conf:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Somfy MyLink from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
config = entry.data
somfy_mylink = SomfyMyLinkSynergy(
config[CONF_SYSTEM_ID], config[CONF_HOST], config[CONF_PORT]
)
try:
mylink_status = await somfy_mylink.status_info()
except asyncio.TimeoutError as ex:
raise ConfigEntryNotReady(
"Unable to connect to the Somfy MyLink device, please check your settings"
) from ex
if not mylink_status or "error" in mylink_status:
_LOGGER.error(
"Somfy Mylink failed to setup because of an error: %s",
mylink_status.get("error", {}).get(
"message", "Empty response from mylink device"
),
)
return False
if "result" not in mylink_status:
raise ConfigEntryNotReady("The Somfy MyLink device returned an empty result")
_async_migrate_entity_config(hass, entry, mylink_status)
undo_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
DATA_SOMFY_MYLINK: somfy_mylink,
MYLINK_STATUS: mylink_status,
UNDO_UPDATE_LISTENER: undo_listener,
}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
data = dict(entry.data)
modified = False
for importable_option in CONFIG_OPTIONS:
if importable_option not in options and importable_option in data:
options[importable_option] = data.pop(importable_option)
modified = True
if modified:
hass.config_entries.async_update_entry(entry, data=data, options=options)
@callback
def _async_migrate_entity_config(
hass: HomeAssistant, entry: ConfigEntry, mylink_status: dict
):
if CONF_ENTITY_CONFIG not in entry.options:
return
options = dict(entry.options)
reversed_target_ids = options[CONF_REVERSED_TARGET_IDS] = {}
legacy_entry_config = options[CONF_ENTITY_CONFIG]
default_reverse = options.get(CONF_DEFAULT_REVERSE)
for cover in mylink_status["result"]:
legacy_entity_id = ENTITY_ID_FORMAT.format(slugify(cover["name"]))
target_id = cover["targetID"]
entity_config = legacy_entry_config.get(legacy_entity_id, {})
if entity_config.get(CONF_REVERSE, default_reverse):
reversed_target_ids[target_id] = True
for legacy_key in (CONF_DEFAULT_REVERSE, CONF_ENTITY_CONFIG):
if legacy_key in options:
del options[legacy_key]
hass.config_entries.async_update_entry(entry, data=entry.data, options=options)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| {
"content_hash": "3e931a4e0853f50dc37edbf27bc44905",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 88,
"avg_line_length": 30.331632653061224,
"alnum_prop": 0.6509671993271657,
"repo_name": "adrienbrault/home-assistant",
"id": "40240306dc473f6ba3731c5f41aaad8182f5ade2",
"size": "5945",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/somfy_mylink/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
import io
from setuptools import find_packages, setup
with io.open('README.rst', 'rt', encoding='utf8') as f:
readme = f.read()
setup(
name='websitemixer',
version='1.0.0',
url='https://websitemixer.com',
license='MIT',
maintainer='Brad Derstine',
maintainer_email='brad@bizzartech.com',
description='A Python/Flask alternative to WordPress and Drupal',
long_description=readme,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'flask',
],
extras_require={
'test': [
'pytest',
'coverage',
],
},
)
| {
"content_hash": "c14c742f812ec6138f3e3e80fc3b7096",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 69,
"avg_line_length": 22.448275862068964,
"alnum_prop": 0.5990783410138248,
"repo_name": "bderstine/WebsiteMixer-App-Base",
"id": "52a50ce3fb3bc7df6be32ac29fc3bec7a161630d",
"size": "651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2531"
},
{
"name": "HTML",
"bytes": "50502"
},
{
"name": "Python",
"bytes": "55393"
},
{
"name": "Shell",
"bytes": "131"
}
],
"symlink_target": ""
} |
from neutron_lib import context
from neutron_lib import exceptions
from neutron.db import db_base_plugin_v2 as base_plugin
from neutron.db.quota import api as quota_api
from neutron.db.quota import driver
from neutron.objects import quota as quota_obj
from neutron.quota import resource
from neutron.tests import base
from neutron.tests.unit import quota as test_quota
from neutron.tests.unit import testlib_api
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
def _count_resource(context, resource, project_id):
"""A fake counting function to determine current used counts"""
if resource[-1] == 's':
resource = resource[:-1]
result = quota_obj.QuotaUsage.get_object_dirty_protected(
context, resource=resource)
return 0 if not result else result.in_use
class FakePlugin(base_plugin.NeutronDbPluginV2, driver.DbQuotaDriver):
"""A fake plugin class containing all DB methods."""
class TestResource(object):
"""Describe a test resource for quota checking."""
def __init__(self, name, default, fake_count=0):
self.name = name
self.quota = default
self.fake_count = fake_count
@property
def default(self):
return self.quota
def count(self, *args, **kwargs):
return self.fake_count
class TestTrackedResource(resource.TrackedResource):
"""Describes a test tracked resource for detailed quota checking"""
def __init__(self, name, model_class, flag=None,
plural_name=None):
super(TestTrackedResource, self).__init__(
name, model_class, flag=flag, plural_name=None)
@property
def default(self):
return self.flag
class TestCountableResource(resource.CountableResource):
"""Describes a test countable resource for detailed quota checking"""
def __init__(self, name, count, flag=-1, plural_name=None):
super(TestCountableResource, self).__init__(
name, count, flag=flag, plural_name=None)
@property
def default(self):
return self.flag
PROJECT = 'prj_test'
RESOURCE = 'res_test'
ALT_RESOURCE = 'res_test_meh'
class TestDbQuotaDriver(testlib_api.SqlTestCase,
base.BaseTestCase):
def setUp(self):
super(TestDbQuotaDriver, self).setUp()
self.plugin = FakePlugin()
self.context = context.get_admin_context()
self.setup_coreplugin(core_plugin=DB_PLUGIN_KLASS)
self.quota_driver = driver.DbQuotaDriver()
self.project_1, self.project_2 = 'prj_test_1', 'prj_test_2'
self.resource_1, self.resource_2 = 'res_test_1', 'res_test_2'
self.projects = (self.project_1, self.project_2)
self.resources = (self.resource_1, self.resource_2)
def test_create_quota_limit(self):
defaults = {RESOURCE: TestResource(RESOURCE, 4)}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
quotas = self.plugin.get_project_quotas(self.context, defaults,
PROJECT)
self.assertEqual(2, quotas[RESOURCE])
def test_update_quota_limit(self):
defaults = {RESOURCE: TestResource(RESOURCE, 4)}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 3)
quotas = self.plugin.get_project_quotas(self.context, defaults,
PROJECT)
self.assertEqual(3, quotas[RESOURCE])
def test_delete_project_quota_restores_default_limit(self):
defaults = {RESOURCE: TestResource(RESOURCE, 4)}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.plugin.delete_project_quota(self.context, PROJECT)
quotas = self.plugin.get_project_quotas(self.context, defaults,
PROJECT)
self.assertEqual(4, quotas[RESOURCE])
def test_get_default_quotas(self):
defaults = {RESOURCE: TestResource(RESOURCE, 4)}
user_ctx = context.Context(user_id=PROJECT, tenant_id=PROJECT)
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
quotas = self.plugin.get_default_quotas(user_ctx, defaults, PROJECT)
self.assertEqual(4, quotas[RESOURCE])
def test_get_project_quotas(self):
user_ctx = context.Context(user_id=PROJECT, tenant_id=PROJECT)
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
quotas = self.plugin.get_project_quotas(user_ctx, {}, PROJECT)
self.assertEqual(2, quotas[RESOURCE])
def test_get_project_quotas_different_project(self):
user_ctx = context.Context(user_id=PROJECT,
tenant_id='another_project')
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
# It is appropriate to use assertFalse here as the expected return
# value is an empty dict (the defaults passed in the statement below
# after the request context)
self.assertFalse(self.plugin.get_project_quotas(user_ctx, {}, PROJECT))
def test_get_all_quotas(self):
resources = {self.resource_1: TestResource(self.resource_1, 3),
self.resource_2: TestResource(self.resource_2, 5)}
self.plugin.update_quota_limit(self.context, self.project_1,
self.resource_1, 7)
self.plugin.update_quota_limit(self.context, self.project_2,
self.resource_2, 9)
quotas = self.plugin.get_all_quotas(self.context, resources)
# Expect two projects' quotas
self.assertEqual(2, len(quotas))
# But not quotas for the same project twice
self.assertNotEqual(quotas[0]['project_id'], quotas[1]['project_id'])
# Check the expected limits. The quotas can be in any order.
for quota in quotas:
project = quota['project_id']
self.assertIn(project, (self.project_1, self.project_2))
if project == self.project_1:
expected_limit_r1 = 7
expected_limit_r2 = 5
if project == self.project_2:
expected_limit_r1 = 3
expected_limit_r2 = 9
self.assertEqual(expected_limit_r1, quota[self.resource_1])
self.assertEqual(expected_limit_r2, quota[self.resource_2])
def test_limit_check(self):
resources = {RESOURCE: TestResource(RESOURCE, 2)}
values = {RESOURCE: 1}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.plugin.limit_check(self.context, PROJECT, resources, values)
def test_limit_check_over_quota(self):
resources = {RESOURCE: TestResource(RESOURCE, 2)}
values = {RESOURCE: 3}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.assertRaises(exceptions.OverQuota, self.plugin.limit_check,
context.get_admin_context(), PROJECT, resources,
values)
def test_limit_check_equals_to_quota(self):
resources = {RESOURCE: TestResource(RESOURCE, 2)}
values = {RESOURCE: 2}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.plugin.limit_check(self.context, PROJECT, resources, values)
def test_limit_check_value_lower_than_zero(self):
resources = {RESOURCE: TestResource(RESOURCE, 2)}
values = {RESOURCE: -1}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.assertRaises(exceptions.InvalidQuotaValue,
self.plugin.limit_check, context.get_admin_context(),
PROJECT, resources, values)
def _test_make_reservation_success(self, quota_driver,
resource_name, deltas):
resources = {resource_name: TestResource(resource_name, 2)}
self.plugin.update_quota_limit(self.context, PROJECT, resource_name, 2)
reservation = quota_driver.make_reservation(
self.context,
self.context.project_id,
resources,
deltas,
self.plugin)
self.assertIn(resource_name, reservation.deltas)
self.assertEqual(deltas[resource_name],
reservation.deltas[resource_name])
self.assertEqual(self.context.project_id,
reservation.project_id)
def test_make_reservation_single_resource(self):
self._test_make_reservation_success(
self.quota_driver, RESOURCE, {RESOURCE: 1})
def test_make_reservation_fill_quota(self):
self._test_make_reservation_success(
self.quota_driver, RESOURCE, {RESOURCE: 2})
def test_make_reservation_multiple_resources(self):
resources = {RESOURCE: TestResource(RESOURCE, 2),
ALT_RESOURCE: TestResource(ALT_RESOURCE, 2)}
deltas = {RESOURCE: 1, ALT_RESOURCE: 2}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.plugin.update_quota_limit(self.context, PROJECT, ALT_RESOURCE, 2)
reservation = self.quota_driver.make_reservation(
self.context,
self.context.project_id,
resources,
deltas,
self.plugin)
self.assertIn(RESOURCE, reservation.deltas)
self.assertIn(ALT_RESOURCE, reservation.deltas)
self.assertEqual(1, reservation.deltas[RESOURCE])
self.assertEqual(2, reservation.deltas[ALT_RESOURCE])
self.assertEqual(self.context.project_id,
reservation.project_id)
def test_make_reservation_over_quota_fails(self):
resources = {RESOURCE: TestResource(RESOURCE, 2,
fake_count=2)}
deltas = {RESOURCE: 1}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
self.assertRaises(exceptions.OverQuota,
self.quota_driver.make_reservation,
self.context,
self.context.project_id,
resources,
deltas,
self.plugin)
def test_get_detailed_project_quotas_resource(self):
res = {RESOURCE: TestTrackedResource(RESOURCE, test_quota.MehModel)}
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 6)
self.quota_driver.make_reservation(self.context, PROJECT, res,
{RESOURCE: 1}, self.plugin)
quota_api.set_quota_usage(self.context, RESOURCE, PROJECT, 2)
detailed_quota = self.plugin.get_detailed_project_quotas(
self.context, res, PROJECT)
self.assertEqual(6, detailed_quota[RESOURCE]['limit'])
self.assertEqual(2, detailed_quota[RESOURCE]['used'])
self.assertEqual(1, detailed_quota[RESOURCE]['reserved'])
def _create_resources(self):
return {
self.resource_1:
TestTrackedResource(self.resource_1, test_quota.MehModel),
self.resource_2:
TestCountableResource(self.resource_2, _count_resource)}
def test_get_detailed_project_quotas_multiple_resource(self):
resources = self._create_resources()
self.plugin.update_quota_limit(self.context, self.project_1,
self.resource_1, 6)
self.plugin.update_quota_limit(self.context, self.project_1,
self.resource_2, 9)
self.quota_driver.make_reservation(
self.context, self.project_1, resources,
{self.resource_1: 1, self.resource_2: 7}, self.plugin)
quota_api.set_quota_usage(self.context, self.resource_1,
self.project_1, 2)
quota_api.set_quota_usage(self.context, self.resource_2,
self.project_1, 3)
detailed_quota = self.plugin.get_detailed_project_quotas(
self.context, resources, self.project_1)
self.assertEqual(6, detailed_quota[self.resource_1]['limit'])
self.assertEqual(1, detailed_quota[self.resource_1]['reserved'])
self.assertEqual(2, detailed_quota[self.resource_1]['used'])
self.assertEqual(9, detailed_quota[self.resource_2]['limit'])
self.assertEqual(7, detailed_quota[self.resource_2]['reserved'])
self.assertEqual(3, detailed_quota[self.resource_2]['used'])
def test_quota_limit_check(self):
resources = self._create_resources()
self.plugin.update_quota_limit(self.context, self.project_1,
self.resource_1, 10)
self.plugin.update_quota_limit(self.context, self.project_1,
self.resource_2, 10)
reservations = {self.resource_1: 8}
self.quota_driver.make_reservation(
self.context, self.project_1, resources, reservations, self.plugin)
resources[self.resource_2]._count_func = lambda x, y, z: 8
self.assertIsNone(self.quota_driver.quota_limit_check(
self.context, self.project_1, resources, {self.resource_1: 2}))
self.assertIsNone(self.quota_driver.quota_limit_check(
self.context, self.project_1, resources, {self.resource_2: 2}))
self.assertRaises(
exceptions.OverQuota, self.quota_driver.quota_limit_check,
self.context, self.project_1, resources, {self.resource_1: 3})
self.assertRaises(
exceptions.OverQuota, self.quota_driver.quota_limit_check,
self.context, self.project_1, resources, {self.resource_2: 3})
self.assertRaises(
exceptions.OverQuota, self.quota_driver.quota_limit_check,
self.context, self.project_1, resources,
{self.resource_1: 3, self.resource_2: 3})
def test_quota_limit_check_unlimited(self):
resources = self._create_resources()
self.plugin.update_quota_limit(self.context, self.project_1,
self.resource_1, -1)
self.plugin.update_quota_limit(self.context, self.project_1,
self.resource_2, -1)
reservations = {self.resource_1: 8}
self.quota_driver.make_reservation(
self.context, self.project_1, resources, reservations, self.plugin)
resources[self.resource_2]._count_func = lambda x, y, z: 8
self.assertIsNone(self.quota_driver.quota_limit_check(
self.context, self.project_1, resources, {self.resource_1: 2}))
self.assertIsNone(self.quota_driver.quota_limit_check(
self.context, self.project_1, resources, {self.resource_2: 2}))
self.assertIsNone(self.quota_driver.quota_limit_check(
self.context, self.project_1, resources,
{self.resource_1: 10 ** 9, self.resource_2: 10 ** 9}))
def test_quota_limit_check_untracked_resource(self):
resources = self._create_resources()
self.plugin.update_quota_limit(self.context, self.project_1,
self.resource_1, -1)
self.plugin.update_quota_limit(self.context, self.project_1,
self.resource_2, -1)
reservations = {self.resource_1: 8}
self.quota_driver.make_reservation(
self.context, self.project_1, resources, reservations, self.plugin)
resources[self.resource_2]._count_func = lambda x, y, z: 8
self.assertIsNone(self.quota_driver.quota_limit_check(
self.context, self.project_1, resources,
{self.resource_1: 10 ** 9}))
self.assertIsNone(self.quota_driver.quota_limit_check(
self.context, self.project_1, resources,
{self.resource_2: 10 ** 9}))
self.assertIsNone(self.quota_driver.quota_limit_check(
self.context, self.project_1, resources, {'untracked': 10 ** 9}))
| {
"content_hash": "4c84f8817998512c43fded2aca38cee0",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 79,
"avg_line_length": 44.90807799442897,
"alnum_prop": 0.6193400322540628,
"repo_name": "openstack/neutron",
"id": "8334ae9ffc2a517e6191920c61fa192823937441",
"size": "16713",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/db/quota/test_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
} |
from minimop.hardware.ftRadar import FTRadar
radar = FTRadar()
while True:
radar.update()
| {
"content_hash": "ce99fb4af01d8853b29cf6ab0f3b87c9",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 44,
"avg_line_length": 13.857142857142858,
"alnum_prop": 0.7319587628865979,
"repo_name": "eXpire163/minimop",
"id": "f6d9693526cb09333fbd2a588470ae998c2cc4f5",
"size": "97",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "minimop/run/runRadar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32136"
},
{
"name": "Shell",
"bytes": "1631"
}
],
"symlink_target": ""
} |
import os
import re
from urlparse import urlparse
def get_env(src):
ret = {}
for l in src.splitlines():
mat = re.match('export\s+([^=]+)="?([^"]+)"?', l)
if mat:
groups = mat.groups()
ret[groups[0]] = groups[1]
return ret
def test_celery_not_running(host):
with host.sudo():
assert not host.supervisor('celery').is_running
def test_gunicorn_not_running(host):
with host.sudo():
assert host.process.filter(comm='gunicorn') == []
| {
"content_hash": "a4c054d3b13e0056d5d73d2e63e17a4c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 57,
"avg_line_length": 23.136363636363637,
"alnum_prop": 0.5756385068762279,
"repo_name": "FireCARES/firecares-ansible",
"id": "1628f9293afddaee33ab46b06b906139d3f9368a",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test-backup-beat-infrastructure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "728"
},
{
"name": "Python",
"bytes": "108752"
},
{
"name": "Shell",
"bytes": "18127"
}
],
"symlink_target": ""
} |
import os, sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CourseBuilder.settings")
from django.core.management import execute_from_command_line
import CourseBuilder.startup as startup
startup.run()
execute_from_command_line(sys.argv)
| {
"content_hash": "a4df8690831d916f4b544f7868e0fd45",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 77,
"avg_line_length": 36,
"alnum_prop": 0.71875,
"repo_name": "Darkmer/masterchief",
"id": "fa806a6b9251488d1986a1430dee32441cf2d646",
"size": "310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19743"
},
{
"name": "JavaScript",
"bytes": "1514"
},
{
"name": "Python",
"bytes": "1045801"
},
{
"name": "Shell",
"bytes": "3823"
}
],
"symlink_target": ""
} |
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
# '-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++14',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
# c/c++ include path
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/usr/include/c++/4.8',
'-isystem',
'/usr/include/c++/4.8.5',
'-isystem',
'/usr/include/c++/4.9.3',
'-isystem',
'/usr/include/c++/5',
'-isystem',
'/usr/include/c++/5.2.1',
'-isystem',
'/usr/include/c++/5.3.1',
'-isystem',
'/usr/include/c++/5.4.0',
'-isystem',
'/usr/include/c++/6',
'-isystem',
'/usr/include/c++/6.1.0',
# 3rdParty include path
'-isystem',
'/usr/local/3rd-party/boost/include',
'-isystem',
'/usr/local/3rd-party/protobuf/include',
'-isystem',
'./spdlog/include',
# project include path
'-isystem',
'./',
'-isystem',
'./czrpc',
'-isystem',
'./test',
'-isystem',
'./test/proto/code',
'-isystem',
'./test/server',
'-isystem',
'./test/sync_rpc_client',
'-isystem',
'./test/async_rpc_client',
'-isystem',
'./test/pub_client',
'-isystem',
'./test/sub_client',
'-isystem',
'./samples',
'-isystem',
'./samples/proto/code',
#'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
#'/System/Library/Frameworks/Python.framework/Headers',
#'-isystem',
#'../llvm/include',
#'-isystem',
#'../llvm/tools/clang/include',
#'-I',
#'.',
#'-I',
#'./ClangCompleter',
#'-isystem',
#'./tests/gmock/gtest',
#'-isystem',
#'./tests/gmock/gtest/include',
#'-isystem',
#'./tests/gmock',
#'-isystem',
#'./tests/gmock/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| {
"content_hash": "261d9138246aad2c107ded617dcf162b",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 80,
"avg_line_length": 27.64788732394366,
"alnum_prop": 0.6688741721854304,
"repo_name": "chxuan/czrpc",
"id": "858802b020876d13e7aeb2f030a97cbd4695852f",
"size": "7289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".ycm_extra_conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "95379"
},
{
"name": "CMake",
"bytes": "3903"
},
{
"name": "Protocol Buffer",
"bytes": "350"
},
{
"name": "Python",
"bytes": "7289"
},
{
"name": "Shell",
"bytes": "62"
}
],
"symlink_target": ""
} |
"""Support for LiteJet lights."""
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_TRANSITION,
LightEntity,
)
from .const import CONF_DEFAULT_TRANSITION, DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTR_NUMBER = "number"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up entry."""
system = hass.data[DOMAIN]
def get_entities(system):
entities = []
for i in system.loads():
name = system.get_load_name(i)
entities.append(LiteJetLight(config_entry, system, i, name))
return entities
async_add_entities(await hass.async_add_executor_job(get_entities, system), True)
class LiteJetLight(LightEntity):
"""Representation of a single LiteJet light."""
def __init__(self, config_entry, lj, i, name):
"""Initialize a LiteJet light."""
self._config_entry = config_entry
self._lj = lj
self._index = i
self._brightness = 0
self._name = name
async def async_added_to_hass(self):
"""Run when this Entity has been added to HA."""
self._lj.on_load_activated(self._index, self._on_load_changed)
self._lj.on_load_deactivated(self._index, self._on_load_changed)
async def async_will_remove_from_hass(self):
"""Entity being removed from hass."""
self._lj.unsubscribe(self._on_load_changed)
def _on_load_changed(self):
"""Handle state changes."""
_LOGGER.debug("Updating due to notification for %s", self._name)
self.schedule_update_ha_state(True)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
@property
def name(self):
"""Return the light's name."""
return self._name
@property
def unique_id(self):
"""Return a unique identifier for this light."""
return f"{self._config_entry.entry_id}_{self._index}"
@property
def brightness(self):
"""Return the light's brightness."""
return self._brightness
@property
def is_on(self):
"""Return if the light is on."""
return self._brightness != 0
@property
def should_poll(self):
"""Return that lights do not require polling."""
return False
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return {ATTR_NUMBER: self._index}
def turn_on(self, **kwargs):
"""Turn on the light."""
# If neither attribute is specified then the simple activate load
# LiteJet API will use the per-light default brightness and
# transition values programmed in the LiteJet system.
if ATTR_BRIGHTNESS not in kwargs and ATTR_TRANSITION not in kwargs:
self._lj.activate_load(self._index)
return
# If either attribute is specified then Home Assistant must
# control both values.
default_transition = self._config_entry.options.get(CONF_DEFAULT_TRANSITION, 0)
transition = kwargs.get(ATTR_TRANSITION, default_transition)
brightness = int(kwargs.get(ATTR_BRIGHTNESS, 255) / 255 * 99)
self._lj.activate_load_at(self._index, brightness, int(transition))
def turn_off(self, **kwargs):
"""Turn off the light."""
if ATTR_TRANSITION in kwargs:
self._lj.activate_load_at(self._index, 0, kwargs[ATTR_TRANSITION])
return
# If transition attribute is not specified then the simple
# deactivate load LiteJet API will use the per-light default
# transition value programmed in the LiteJet system.
self._lj.deactivate_load(self._index)
def update(self):
"""Retrieve the light's brightness from the LiteJet system."""
self._brightness = int(self._lj.get_load_level(self._index) / 99 * 255)
| {
"content_hash": "ca07a4a85685dffb374f004f182e6fec",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 87,
"avg_line_length": 32.008,
"alnum_prop": 0.6323419145213697,
"repo_name": "aronsky/home-assistant",
"id": "172e46c441ac5bcbadd3225168b5452a9d203e29",
"size": "4001",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/litejet/light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import pytest
import numpy as np
pytest.importorskip("xgboost")
import xgboost as xgb
import vaex.ml.xgboost
import vaex.ml.datasets
# the parameters of the model
params_multiclass = {
'learning_rate': 0.1, # learning rate
'max_depth': 3, # max depth of the tree
'colsample_bytree': 0.8, # subsample ratio of columns when constructing each tree
'subsample': 0.8, # subsample ratio of the training instance
'reg_lambda': 1, # L2 regularisation
'reg_alpha': 0, # L1 regularisation
'min_child_weight': 1, # minimum sum of instance weight (hessian) needed in a child
'objective': 'multi:softmax', # learning task objective
'num_class': 3, # number of target classes (if classification)
'random_state': 42, # fixes the seed, for reproducibility
'silent': 1, # silent mode
'n_jobs': -1 # cpu cores used
}
# xgboost params
params_reg = {
'learning_rate': 0.1, # learning rate
'max_depth': 3, # max depth of the tree
'colsample_bytree': 0.8, # subsample ratio of columns when constructing each tree
'subsample': 0.8, # subsample ratio of the training instance
'reg_lambda': 1, # L2 regularisation
'reg_alpha': 0, # L1 regularisation
'min_child_weight': 1, # minimum sum of instance weight (hessian) needed in a child
'objective': 'reg:linear', # learning task objective
'random_state': 42, # fixes the seed, for reproducibility
'silent': 1, # silent mode
'n_jobs': -1 # cpu cores used
}
def test_xgboost():
ds = vaex.ml.datasets.load_iris()
ds_train, ds_test = ds.ml.train_test_split(test_size=0.2, verbose=False)
features = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
booster = vaex.ml.xgboost.XGBoostModel(num_boost_round=10,
params=params_multiclass,
features=features,
target='class_')
booster.fit(ds_train)
class_predict = booster.predict(ds_test)
assert np.all(ds_test.class_.values == class_predict)
ds_train = booster.transform(ds_train) # this will add the xgboost_prediction column
state = ds_train.state_get()
ds_test.state_set(state)
assert np.all(ds_test.class_.values == ds_test.xgboost_prediction.values)
def test_xgboost_numerical_validation():
ds = vaex.ml.datasets.load_iris()
features = ['sepal_width', 'petal_length', 'sepal_length', 'petal_width']
# Vanilla xgboost
dtrain = xgb.DMatrix(ds[features].values, label=ds.class_.values)
xgb_bst = xgb.train(params=params_multiclass, dtrain=dtrain, num_boost_round=3)
xgb_pred = xgb_bst.predict(dtrain)
# xgboost through vaex
booster = vaex.ml.xgboost.XGBoostModel(features=features, target='class_', params=params_multiclass, num_boost_round=3)
booster.fit(ds)
vaex_pred = booster.predict(ds)
# Comparing the the predictions of xgboost vs vaex.ml
np.testing.assert_equal(vaex_pred, xgb_pred, verbose=True,
err_msg='The predictions of vaex.ml.xboost do not match those of pure xgboost')
def test_xgboost_serialize(tmpdir):
ds = vaex.ml.datasets.load_iris()
features = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
target = 'class_'
gbm = ds.ml.xgboost_model(target=target, features=features, num_boost_round=20, params=params_multiclass, transform=False)
pl = vaex.ml.Pipeline([gbm])
pl.save(str(tmpdir.join('test.json')))
pl.load(str(tmpdir.join('test.json')))
gbm = ds.ml.xgboost_model(target=target, features=features, num_boost_round=20, params=params_multiclass, transform=False)
gbm.state_set(gbm.state_get())
pl = vaex.ml.Pipeline([gbm])
pl.save(str(tmpdir.join('test.json')))
pl.load(str(tmpdir.join('test.json')))
def test_xgboost_validation_set():
# read data
ds = vaex.example()
# Train and test split
train, test = ds.ml.train_test_split(verbose=False)
# Define the training featuress
features = ['vx', 'vy', 'vz', 'Lz', 'L']
# history of the booster (evaluations of the train and validation sets)
history = {}
# instantiate the booster model
booster = vaex.ml.xgboost.XGBoostModel(features=features, target='E', num_boost_round=10, params=params_reg)
# fit the booster - including saving the history of the validation sets
booster.fit(train, evals=[(train, 'train'), (test, 'test')],
early_stopping_rounds=2, evals_result=history)
assert booster.booster.best_ntree_limit == 10
assert booster.booster.best_iteration == 9
assert len(history['train']['rmse']) == 10
assert len(history['test']['rmse']) == 10
def test_xgboost_pipeline():
# read data
ds = vaex.example()
# train test splot
train, test = ds.ml.train_test_split(verbose=False)
# add virtual columns
train['r'] = np.sqrt(train.x**2 + train.y**2 + train.z**2)
# Do a pca
features = ['vx', 'vy', 'vz', 'Lz', 'L']
pca = train.ml.pca(n_components=3, features=features, transform=False)
train = pca.transform(train)
# Do state transfer
st = train.ml.state_transfer()
# now the xgboost model thingy
features = ['r', 'PCA_0', 'PCA_1', 'PCA_2']
# define the boosting model
booster = train.ml.xgboost_model(target='E', num_boost_round=10, features=features, params=params_reg, transform=False)
# Create a pipeline
pp = vaex.ml.Pipeline([st, booster])
# Use the pipeline
pred = pp.predict(test) # This works
trans = pp.transform(test) # This will crash (softly)
# trans.evaluate('xgboost_prediction') # This is where the problem happens
np.testing.assert_equal(pred,
trans.evaluate('xgboost_prediction'),
verbose=True,
err_msg='The predictions from the predict and transform method do not match')
| {
"content_hash": "36c4581798fb25301f5479d9d3067210",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 126,
"avg_line_length": 43.56737588652482,
"alnum_prop": 0.6273807585870096,
"repo_name": "maartenbreddels/vaex",
"id": "c490278068d3b15c8a075fd5ed3929953727ea1f",
"size": "6143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ml/xgboost_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1888"
},
{
"name": "C++",
"bytes": "81166"
},
{
"name": "CSS",
"bytes": "6604"
},
{
"name": "GLSL",
"bytes": "6204"
},
{
"name": "HTML",
"bytes": "177613"
},
{
"name": "JavaScript",
"bytes": "1489136"
},
{
"name": "Makefile",
"bytes": "432"
},
{
"name": "PHP",
"bytes": "33807"
},
{
"name": "Python",
"bytes": "1893232"
},
{
"name": "Shell",
"bytes": "4639"
}
],
"symlink_target": ""
} |
import logging
import tornado.auth
import tornado.escape
import tornado.ioloop
import tornado.options
import tornado.web
import os.path
import uuid
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/auth/login", AuthLoginHandler),
(r"/auth/logout", AuthLogoutHandler),
(r"/a/message/new", MessageNewHandler),
(r"/a/message/updates", MessageUpdatesHandler),
]
settings = dict(
cookie_secret="43oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
login_url="/auth/login",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
)
tornado.web.Application.__init__(self, handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
user_json = self.get_secure_cookie("user")
if not user_json: return None
return tornado.escape.json_decode(user_json)
class MainHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render("index.html", messages=MessageMixin.cache)
class MessageMixin(object):
waiters = []
cache = []
cache_size = 200
def wait_for_messages(self, callback, cursor=None):
cls = MessageMixin
if cursor:
index = 0
for i in xrange(len(cls.cache)):
index = len(cls.cache) - i - 1
if cls.cache[index]["id"] == cursor: break
recent = cls.cache[index + 1:]
if recent:
callback(recent)
return
cls.waiters.append(callback)
def new_messages(self, messages):
cls = MessageMixin
logging.info("Sending new message to %r listeners", len(cls.waiters))
for callback in cls.waiters:
try:
callback(messages)
except:
logging.error("Error in waiter callback", exc_info=True)
cls.waiters = []
cls.cache.extend(messages)
if len(cls.cache) > self.cache_size:
cls.cache = cls.cache[-self.cache_size:]
class MessageNewHandler(BaseHandler, MessageMixin):
@tornado.web.authenticated
def post(self):
message = {
"id": str(uuid.uuid4()),
"from": self.current_user["first_name"],
"body": self.get_argument("body"),
}
message["html"] = self.render_string("message.html", message=message)
if self.get_argument("next", None):
self.redirect(self.get_argument("next"))
else:
self.write(message)
self.new_messages([message])
class MessageUpdatesHandler(BaseHandler, MessageMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def post(self):
cursor = self.get_argument("cursor", None)
self.wait_for_messages(self.async_callback(self.on_new_messages),
cursor=cursor)
def on_new_messages(self, messages):
# Closed client connection
if self.request.connection.stream.closed():
return
self.finish(dict(messages=messages))
class AuthLoginHandler(BaseHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect(ax_attrs=["name"])
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
self.set_secure_cookie("user", tornado.escape.json_encode(user))
self.redirect("/")
class AuthLogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.write("You are now logged out")
def main():
tornado.options.parse_command_line()
app = Application()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| {
"content_hash": "72ffa0b9cec10b5766d042cd8eba0b27",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 79,
"avg_line_length": 30.769784172661872,
"alnum_prop": 0.6053308393733926,
"repo_name": "cloudkick/cast-site",
"id": "b9ad716b21116889f97b023402a3ff049f2339fd",
"size": "4874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyde/lib/tornado/demos/chat/chatdemo.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3085"
},
{
"name": "Emacs Lisp",
"bytes": "349"
},
{
"name": "JavaScript",
"bytes": "23058"
},
{
"name": "Python",
"bytes": "990673"
}
],
"symlink_target": ""
} |
import setuptools # isort:skip # noqa
import distutils # isort:skip # noqa
| {
"content_hash": "d183f760f25013315393cdfb5b2b0992",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 38,
"avg_line_length": 38.5,
"alnum_prop": 0.7402597402597403,
"repo_name": "omry/omegaconf",
"id": "aa0c875c59f1a4fe5aaa5cff742b0e290627105e",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build_helpers/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ANTLR",
"bytes": "7876"
},
{
"name": "Gherkin",
"bytes": "203"
},
{
"name": "Python",
"bytes": "1020026"
}
],
"symlink_target": ""
} |
"""
Draws an simple RGB image
- Left-drag pans the plot.
- Mousewheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import zeros, uint8
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
from chaco.tools.image_inspector_tool import ImageInspectorTool, \
ImageInspectorOverlay
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some RGBA image data
image = zeros((200,400,4), dtype=uint8)
image[:,0:40,0] += 255 # Vertical red stripe
image[0:25,:,1] += 255 # Horizontal green stripe; also yellow square
image[-80:,-160:,2] += 255 # Blue square
image[:,:,3] = 255
# Create a plot data obect and give it this data
pd = ArrayPlotData()
pd.set_data("imagedata", image)
# Create the plot
plot = Plot(pd, default_origin="top left")
plot.x_axis.orientation = "top"
img_plot = plot.img_plot("imagedata")[0]
# Tweak some of the plot properties
plot.bgcolor = "white"
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
plot.overlays.append(ZoomTool(component=plot,
tool_mode="box", always_on=False))
imgtool = ImageInspectorTool(img_plot)
img_plot.tools.append(imgtool)
plot.overlays.append(ImageInspectorOverlay(component=img_plot,
image_inspector=imgtool))
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (600, 600)
title="Simple image plot"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
| {
"content_hash": "7fb62bb23d37fac5ee3830c845f84ed8",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 80,
"avg_line_length": 34.235955056179776,
"alnum_prop": 0.5428290121430915,
"repo_name": "tommy-u/chaco",
"id": "c6bd340c3590c3d8f6b30ffc011e7b3cd20e5451",
"size": "3069",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "examples/demo/basic/image_plot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "57089"
},
{
"name": "C++",
"bytes": "9881"
},
{
"name": "Gnuplot",
"bytes": "611"
},
{
"name": "Python",
"bytes": "2475987"
}
],
"symlink_target": ""
} |
class Session:
def __init__(self):
self.current_dialogue_topic = None
self.active_topics = []
self.reset_flag = False
def set_current_dialogue_topic(self, current_dialogue):
self.current_dialogue_topic = current_dialogue
def get_current_dialogue_topic(self):
return self.current_dialogue_topic
def add_active_topic(self, topic):
self.active_topics.append(topic)
def get_active_topics(self):
return self.active_topics
def set_reset_flag(self, value):
self.reset_flag = value
| {
"content_hash": "861e37d229615c627ca5718320b47166",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 59,
"avg_line_length": 25.863636363636363,
"alnum_prop": 0.6467486818980668,
"repo_name": "amos-ws17/amos-ws17-proj1",
"id": "8e8a92d65e628167a5c067a3d93b3d3042ee0503",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workstreambot/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "92392"
},
{
"name": "Shell",
"bytes": "672"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .base import Filter
import re
EXTENSION_EXC_VALUES = re.compile('|'.join((re.escape(x) for x in (
# Random plugins/extensions
'top.GLOBALS',
# See: http://blog.errorception.com/2012/03/tale-of-unfindable-js-error. html
'originalCreateNotification',
'canvas.contentDocument',
'MyApp_RemoveAllHighlights',
'http://tt.epicplay.com',
'Can\'t find variable: ZiteReader',
'jigsaw is not defined',
'ComboSearch is not defined',
'http://loading.retry.widdit.com/',
'atomicFindClose',
# Facebook borked
'fb_xd_fragment',
# ISP "optimizing" proxy - `Cache-Control: no-transform` seems to
# reduce this. (thanks @acdha)
# See http://stackoverflow.com/questions/4113268
'bmi_SafeAddOnload',
'EBCallBackMessageReceived',
# See https://groups.google.com/a/chromium.org/forum/#!topic/chromium-discuss/7VU0_VvC7mE
'_gCrWeb',
# See http://toolbar.conduit.com/Developer/HtmlAndGadget/Methods/JSInjection.aspx
'conduitPage'
))), re.I)
EXTENSION_EXC_SOURCES = re.compile('|'.join((
# Facebook flakiness
r'graph\.facebook\.com'
# Facebook blocked
r'connect\.facebook\.net\/en_US\/all\.js',
# Woopra flakiness
r'eatdifferent\.com\.woopra-ns\.com',
r'static\.woopra\.com\/js\/woopra\.js',
# Chrome extensions
r'^chrome(?:-extension)?:\/\/',
# Cacaoweb
r'127\.0\.0\.1:4001\/isrunning',
# Other
r'webappstoolbarba\.texthelp\.com\/',
r'metrics\.itunes\.apple\.com\.edgesuite\.net\/',
)), re.I)
class BrowserExtensionsFilter(Filter):
id = 'browser-extensions'
name = 'Filter out errors known to be caused by browser extensions'
description = 'Certain browser extensions will inject inline scripts and are known to cause errors.'
def get_exception_value(self, data):
try:
return data['sentry.interfaces.Exception']['values'][0]['value']
except (LookupError, TypeError):
return ''
def get_exception_source(self, data):
try:
return data['sentry.interfaces.Exception']['values'][0]['stacktrace']['frames'][-1]['abs_path']
except (LookupError, TypeError):
return ''
def test(self, data):
"""
Test the exception value to determine if it looks like the error is
caused by a common browser extension.
"""
if data.get('platform') != 'javascript':
return False
exc_value = self.get_exception_value(data)
if exc_value:
if EXTENSION_EXC_VALUES.search(exc_value):
return True
exc_source = self.get_exception_source(data)
if exc_source:
if EXTENSION_EXC_SOURCES.match(exc_source):
return True
return False
| {
"content_hash": "92c0dee949294ceea8a36e71c9375029",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 107,
"avg_line_length": 32.80232558139535,
"alnum_prop": 0.6334633108826657,
"repo_name": "zenefits/sentry",
"id": "80e63ad55dba5d3ae8bdb080132089f72a670516",
"size": "2821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/filters/browser_extensions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "249557"
},
{
"name": "HTML",
"bytes": "293019"
},
{
"name": "JavaScript",
"bytes": "975797"
},
{
"name": "Lua",
"bytes": "22367"
},
{
"name": "Makefile",
"bytes": "5959"
},
{
"name": "Python",
"bytes": "12550461"
},
{
"name": "Ruby",
"bytes": "4026"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
} |
from os import environ
from distutils.core import setup, Extension
# define the name of the extension to use
extension_name = 'thrift_nicejson'
extension_version = '1.0'
# define the directories to search for include files
# to get this to work, you may need to include the path
# to your boost installation. Mine was in
# '/usr/local/include', hence the corresponding entry.
include_dirs = [ '/usr/include' , '../cpp' , environ['thrift_INCLUDEDIR'] ]
# define the library directories to include any extra
# libraries that may be needed. The boost::python
# library for me was located in '/usr/local/lib'
library_dirs = [ '../cpp' , '/usr/lib' , environ['thrift_LIBDIR'] ]
# define the libraries to link with the boost python library
libraries = [ 'nicejson', 'boost_python', 'boost_system', 'boost_filesystem' , 'thrift' ]
# define the source files for the extension
source_files = [ 'thrift_nicejson.cpp' ]
# create the extension and add it to the python distribution
setup(
name='thrift_nicejson',
version='1.0',
packages = [
'thrift_nicejson'
],
package_dir={ 'thrift_nicejson' : 'src' },
ext_modules=[Extension('thrift_nicejson_binary',
[ 'thrift_nicejson.cpp' ],
include_dirs=[ '/usr/include' , '../cpp' , environ['thrift_INCLUDEDIR'] ],
library_dirs=[ '../cpp' , '/usr/lib' , environ['thrift_LIBDIR'] ],
libraries=[ 'nicejson', 'boost_python', 'boost_system', 'boost_filesystem' , 'thrift' ],
extra_compile_args=['-std=gnu++11'],
extra_link_args=[ '-Wl,-rpath,' + environ['thrift_LIBDIR'] ]
)]
)
| {
"content_hash": "b7f6db4f44ae4e61089e4ab43d08a77a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 96,
"avg_line_length": 39.09756097560975,
"alnum_prop": 0.66126013724267,
"repo_name": "chetmurthy/thrift-nicejson",
"id": "a5b3c05aa605a4c74957ba467f8ed3f884d9a4d5",
"size": "1603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lib/py/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "626489"
},
{
"name": "M4",
"bytes": "45646"
},
{
"name": "Makefile",
"bytes": "554552"
},
{
"name": "OCaml",
"bytes": "40249"
},
{
"name": "Python",
"bytes": "28204"
},
{
"name": "Shell",
"bytes": "731134"
},
{
"name": "Thrift",
"bytes": "1782"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import include, url
from django.core import urlresolvers
from django.utils.html import format_html, format_html_join
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import Permission
from wagtail.wagtailcore import hooks
from wagtail.wagtailadmin.menu import MenuItem
from wagtail.wagtailadmin.site_summary import SummaryItem
from wagtail.wagtaildocs import admin_urls
from wagtail.wagtaildocs.models import Document
from wagtail.wagtaildocs.rich_text import DocumentLinkHandler
@hooks.register('register_admin_urls')
def register_admin_urls():
return [
url(r'^documents/', include(admin_urls, namespace='wagtaildocs')),
]
class DocumentsMenuItem(MenuItem):
def is_shown(self, request):
return request.user.has_perm('wagtaildocs.add_document') or request.user.has_perm('wagtaildocs.change_document')
@hooks.register('register_admin_menu_item')
def register_documents_menu_item():
return DocumentsMenuItem(
_('Documents'),
urlresolvers.reverse('wagtaildocs:index'),
name='documents',
classnames='icon icon-doc-full-inverse',
order=400
)
@hooks.register('insert_editor_js')
def editor_js():
js_files = [
'wagtaildocs/js/hallo-plugins/hallo-wagtaildoclink.js',
'wagtaildocs/js/document-chooser.js',
]
js_includes = format_html_join(
'\n', '<script src="{0}{1}"></script>',
((settings.STATIC_URL, filename) for filename in js_files)
)
return js_includes + format_html(
"""
<script>
window.chooserUrls.documentChooser = '{0}';
registerHalloPlugin('hallowagtaildoclink');
</script>
""",
urlresolvers.reverse('wagtaildocs:chooser')
)
@hooks.register('register_permissions')
def register_permissions():
return Permission.objects.filter(content_type__app_label='wagtaildocs',
codename__in=['add_document', 'change_document'])
@hooks.register('register_rich_text_link_handler')
def register_document_link_handler():
return ('document', DocumentLinkHandler)
class DocumentsSummaryItem(SummaryItem):
order = 300
template = 'wagtaildocs/homepage/site_summary_documents.html'
def get_context(self):
return {
'total_docs': Document.objects.count(),
}
@hooks.register('construct_homepage_summary_items')
def add_documents_summary_item(request, items):
items.append(DocumentsSummaryItem(request))
| {
"content_hash": "cc52510ccb3ab623db4f9d8b96dd4ec1",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 120,
"avg_line_length": 30.702380952380953,
"alnum_prop": 0.694067468010857,
"repo_name": "serzans/wagtail",
"id": "61e6fd393c13ff2b7b64ee21ca4559a1e0428d3b",
"size": "2579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail/wagtaildocs/wagtail_hooks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "152982"
},
{
"name": "HTML",
"bytes": "251781"
},
{
"name": "JavaScript",
"bytes": "92398"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "1670621"
},
{
"name": "Shell",
"bytes": "7388"
}
],
"symlink_target": ""
} |
'''
Created on Nov 27, 2014
@author: Sarraju
'''
import smtpd, threading, asyncore, socket, os
from datetime import datetime
last_eml = ""
class SMTPSimulationServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
self.thread = threading.Thread(self.storeEmail(data))
self.thread.start()
def storeEmail(self, data):
#print 'Email: '+data
try:
file_name = self.getFileName()+'.eml'
f = open('emails/'+file_name,'a+b')
f.write(data)
f.close()
global last_eml
last_eml = file_name
except Exception as e:
print 'log it'+e
def getFileName(self):
fileid = 'FILENAME_GENERATION_ERROR'
try:
fileid = datetime.now().strftime('%d%m%Y%H%M%S%f')
c = 0
while os.path.exists('emails/'+fileid+'.eml') and c != 100:
fileid = str(int(fileid) + 1)
c += 1
if c==100:
return 'FILENAME_GENERATION_ERROR'
except Exception as e:
print e
return 'FILENAME_GENERATION_ERROR'
return fileid
class ServerManager(object):
smtp = None
def start(self):
'''
if not self.canStartServer():
print 'Cannot start Server.'
#return
'''
self.smtp = SMTPSimulationServer(('', 25), None)
self.thread = threading.Thread(target=asyncore.loop,kwargs = {'timeout':1} )
self.thread.start()
print 'Started'
def stop(self):
if self.smtp!=None and self.smtp.accepting:
self.smtp.close()
self.thread.join()
print 'Stopped'
def canStartServer(self):
return self.isPortOpen()
def isPortOpen(self):
result = None
try:
s = socket.socket()
result = s.connect(('', 25))
print 'Result '+result
#s.shutdown(2)
return True
except socket.error, e:
print socket.error
print '========'
print e
return False
| {
"content_hash": "806c365f639baecdba975178daad8634",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 85,
"avg_line_length": 26.043956043956044,
"alnum_prop": 0.47848101265822784,
"repo_name": "SVSagi/simple_smtp_simulator_py",
"id": "854e5cbf5de62ac51532aae63e6dbaf1e466bcdf",
"size": "2370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smtp_simulator_1/simulator/smtp_simulator_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14164"
}
],
"symlink_target": ""
} |
import MalmoPython
import os
import sys
import time
import random
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
items = {'red_flower':'flower',
'apple':'apple',
'iron_sword':'sword',
'iron_pickaxe':'pickaxe',
'diamond_sword':'sword'
}
obj_id = items.keys()[random.randint(0, len(items)-1)]
mission_xml = '''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Name the first item you see.</Summary>
</About>
<ServerSection>
<ServerInitialConditions>
<Time>
<StartTime>6000</StartTime>
<AllowPassageOfTime>false</AllowPassageOfTime>
</Time>
<Weather>clear</Weather>
<AllowSpawning>false</AllowSpawning>
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,220*1,5*3,2;3;,biome_1"/>
<DrawingDecorator>
<!-- coordinates for cuboid are inclusive -->
<DrawCuboid x1="0" y1="46" z1="0" x2="7" y2="52" z2="7" type="quartz_block" /> <!-- limits of our arena -->
<DrawCuboid x1="1" y1="47" z1="1" x2="6" y2="51" z2="6" type="air" /> <!-- limits of our arena -->
<DrawCuboid x1="1" y1="50" z1="1" x2="6" y2="49" z2="6" type="glowstone" /> <!-- limits of our arena -->
<DrawItem x="4" y="47" z="2" type="'''+obj_id+'''" />
</DrawingDecorator>
<ServerQuitFromTimeUp timeLimitMs="5000"/>
<ServerQuitWhenAnyAgentFinishes/>
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>Chatty</Name>
<AgentStart>
<Placement x="3" y="47.0" z="3" pitch="30" yaw="270"/>
</AgentStart>
<AgentHandlers>
<ObservationFromFullStats/>
<VideoProducer want_depth="false">
<Width>640</Width>
<Height>480</Height>
</VideoProducer>
<DiscreteMovementCommands />
<ChatCommands />
<RewardForSendingMatchingChatMessage>
<ChatMatch reward="100.0" regex="'''+items[obj_id]+'''" description="Anything that matches the object."/>
</RewardForSendingMatchingChatMessage>
<RewardForSendingCommand reward="-1"/>
</AgentHandlers>
</AgentSection>
</Mission>
'''
# Create default Malmo objects:
agent_host = MalmoPython.AgentHost()
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print 'ERROR:',e
print agent_host.getUsage()
exit(1)
if agent_host.receivedArgument("help"):
print agent_host.getUsage()
exit(0)
my_mission = MalmoPython.MissionSpec(mission_xml, True)
my_mission_record = MalmoPython.MissionRecordSpec("chat_reward.tgz")
# Attempt to start a mission:
max_retries = 3
for retry in range(max_retries):
try:
agent_host.startMission( my_mission, my_mission_record )
break
except RuntimeError as e:
if retry == max_retries - 1:
print "Error starting mission:",e
exit(1)
else:
time.sleep(2)
# Loop until mission starts:
print "Waiting for the mission to start ",
world_state = agent_host.getWorldState()
while not world_state.has_mission_begun:
sys.stdout.write(".")
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
print "Error:",error.text
print
print "Mission running ",
if world_state.is_mission_running:
time.sleep(0.5)
print "\nSending action: chat %s" % items[obj_id]
agent_host.sendCommand("chat %s" % items[obj_id])
time.sleep(1.5)
# Loop until mission ends:
while world_state.is_mission_running:
sys.stdout.write(".")
time.sleep(0.5)
world_state = agent_host.getWorldState()
for reward in world_state.rewards:
if reward.getValue() > 0:
print "\nReceived reward: %.2f" % reward.getValue()
for error in world_state.errors:
print "Error:",error.text
print
print "Mission ended"
# Mission has ended. | {
"content_hash": "c03ec5f788381b5ac3368baa98b91ca9",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 123,
"avg_line_length": 31.16793893129771,
"alnum_prop": 0.6274797942689199,
"repo_name": "tnarik/malmo",
"id": "07301f811dc0ecd7c423f3ab3d8ba49f0fb62564",
"size": "5421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Malmo/samples/Python_examples/chat_reward.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2871"
},
{
"name": "C#",
"bytes": "28300"
},
{
"name": "C++",
"bytes": "373607"
},
{
"name": "CMake",
"bytes": "99615"
},
{
"name": "Java",
"bytes": "861745"
},
{
"name": "Lua",
"bytes": "27331"
},
{
"name": "Python",
"bytes": "409584"
},
{
"name": "Shell",
"bytes": "3516"
}
],
"symlink_target": ""
} |
import os
import tempfile
import unittest
import mock
import numpy
import pytest
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import link
from chainer import links
from chainer import optimizers
from chainer.serializers import npz
from chainer import testing
from chainer.testing import attr
import chainerx
class TestDictionarySerializer(unittest.TestCase):
def setUp(self):
self.serializer = npz.DictionarySerializer({})
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def test_get_item(self):
child = self.serializer['x']
self.assertIsInstance(child, npz.DictionarySerializer)
self.assertEqual(child.path, 'x/')
def test_get_item_strip_slashes(self):
child = self.serializer['/x/']
self.assertEqual(child.path, 'x/')
def check_serialize(self, data, query):
ret = self.serializer(query, data)
dset = self.serializer.target['w']
self.assertIsInstance(dset, numpy.ndarray)
self.assertEqual(dset.shape, data.shape)
self.assertEqual(dset.size, data.size)
self.assertEqual(dset.dtype, data.dtype)
numpy.testing.assert_array_equal(dset, backend.CpuDevice().send(data))
self.assertIs(ret, data)
@attr.chainerx
def test_serialize_chainerx(self):
self.check_serialize(chainerx.asarray(self.data), 'w')
def test_serialize_cpu(self):
self.check_serialize(self.data, 'w')
@attr.gpu
def test_serialize_gpu(self):
self.check_serialize(cuda.to_gpu(self.data), 'w')
def test_serialize_cpu_strip_slashes(self):
self.check_serialize(self.data, '/w')
@attr.gpu
def test_serialize_gpu_strip_slashes(self):
self.check_serialize(cuda.to_gpu(self.data), '/w')
def test_serialize_scalar(self):
ret = self.serializer('x', 10)
dset = self.serializer.target['x']
self.assertIsInstance(dset, numpy.ndarray)
self.assertEqual(dset.shape, ())
self.assertEqual(dset.size, 1)
self.assertEqual(dset.dtype, int)
self.assertEqual(dset[()], 10)
self.assertIs(ret, 10)
def test_serialize_none(self):
ret = self.serializer('x', None)
dset = self.serializer.target['x']
self.assertIsInstance(dset, numpy.ndarray)
self.assertEqual(dset.shape, ())
self.assertEqual(dset.dtype, numpy.object)
self.assertIs(dset[()], None)
self.assertIs(ret, None)
@testing.parameterize(*testing.product({'compress': [False, True]}))
class TestNpzDeserializer(unittest.TestCase):
def setUp(self):
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with open(path, 'wb') as f:
savez = numpy.savez_compressed if self.compress else numpy.savez
savez(
f, **{'x/': None, 'y': self.data, 'z': numpy.asarray(10),
'zf32': numpy.array(-2**60, dtype=numpy.float32),
'zi64': numpy.array(-2**60, dtype=numpy.int64),
'w': None})
try:
self.npzfile = numpy.load(path, allow_pickle=True)
except TypeError:
self.npzfile = numpy.load(path)
self.deserializer = npz.NpzDeserializer(self.npzfile)
def tearDown(self):
if hasattr(self, 'npzfile'):
self.npzfile.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_get_item(self):
child = self.deserializer['x']
self.assertIsInstance(child, npz.NpzDeserializer)
self.assertEqual(child.path[-2:], 'x/')
def test_get_item_strip_slashes(self):
child = self.deserializer['/x/']
self.assertEqual(child.path, 'x/')
def check_deserialize(self, y, query):
ret = self.deserializer(query, y)
numpy.testing.assert_array_equal(
backend.CpuDevice().send(y), self.data)
self.assertIs(ret, y)
def check_deserialize_by_passing_none(self, y, query):
ret = self.deserializer(query, None)
numpy.testing.assert_array_equal(
backend.CpuDevice().send(ret), self.data)
@attr.chainerx
def test_deserialize_chainerx(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(chainerx.asarray(y), 'y')
@attr.chainerx
@attr.gpu
def test_deserialize_chainerx_non_native(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(chainerx.asarray(y, device='cuda:0'), 'y')
def test_deserialize_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(y, 'y')
def test_deserialize_by_passing_none_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize_by_passing_none(y, 'y')
@attr.gpu
def test_deserialize_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(cuda.to_gpu(y), 'y')
@attr.ideep
def test_deserialize_ideep(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(intel64.mdarray(y), 'y')
@attr.gpu
def test_deserialize_by_passing_none_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize_by_passing_none(cuda.to_gpu(y), 'y')
def test_deserialize_cpu_strip_slashes(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(y, '/y')
@attr.gpu
def test_deserialize_gpu_strip_slashes(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(cuda.to_gpu(y), '/y')
def test_deserialize_different_dtype_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float16)
ret = self.deserializer('y', y)
numpy.testing.assert_array_equal(y, self.data.astype(numpy.float16))
self.assertIs(ret, y)
@attr.gpu
def test_deserialize_different_dtype_gpu(self):
y = cuda.cupy.empty((2, 3), dtype=numpy.float16)
ret = self.deserializer('y', y)
numpy.testing.assert_array_equal(
y.get(), self.data.astype(numpy.float16))
self.assertIs(ret, y)
def test_deserialize_scalar(self):
z = 5
ret = self.deserializer('z', z)
self.assertEqual(ret, 10)
def test_deserialize_int64_to_int(self):
z = int(5)
ret = self.deserializer('zi64', z)
assert ret == -2**60
def test_deserialize_int64_to_uint32(self):
z = numpy.uint32(5)
with pytest.raises(TypeError):
self.deserializer('zi64', z)
def test_deserialize_float32_to_int(self):
z = int(5)
with pytest.raises(TypeError):
self.deserializer('zf32', z)
def test_deserialize_none(self):
ret = self.deserializer('w', None)
self.assertIs(ret, None)
def test_deserialize_by_passing_array(self):
y = numpy.empty((1,), dtype=numpy.float32)
ret = self.deserializer('w', y)
self.assertIs(ret, None)
class TestNpzDeserializerNonStrict(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with open(path, 'wb') as f:
numpy.savez(
f, **{'x': numpy.asarray(10)})
self.npzfile = numpy.load(path)
self.deserializer = npz.NpzDeserializer(self.npzfile, strict=False)
def tearDown(self):
if hasattr(self, 'npzfile'):
self.npzfile.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_deserialize_partial(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
ret = self.deserializer('y', y)
self.assertIs(ret, y)
@testing.parameterize(
{'ignore_names': 'yy'},
{'ignore_names': ['yy']},
{'ignore_names': lambda key: key == 'yy'},
{'ignore_names': [lambda key: key == 'yy']},
)
class TestNpzDeserializerIgnoreNames(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with open(path, 'wb') as f:
numpy.savez(
f, **{'x': numpy.asarray(10), 'yy': numpy.empty((2, 3))})
self.npzfile = numpy.load(path)
self.deserializer = npz.NpzDeserializer(
self.npzfile, ignore_names=self.ignore_names)
def tearDown(self):
if hasattr(self, 'npzfile'):
self.npzfile.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_deserialize_ignore_names(self):
yy = numpy.ones((2, 1), dtype=numpy.float32)
ret = self.deserializer('yy', yy)
self.assertIs(ret, yy)
@testing.parameterize(
{'ignore_names': 'yy'},
{'ignore_names': ['yy']},
{'ignore_names': lambda key: key == 'yy'},
{'ignore_names': [lambda key: key == 'yy']},
)
class TestLoadNpzIgnoreNames(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
self.x = numpy.asarray(10, dtype=numpy.float32)
self.yy = numpy.ones((2, 3), dtype=numpy.float32)
with open(path, 'wb') as f:
numpy.savez(
f, **{'x': self.x, 'yy': self.yy})
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_load_npz_ignore_names(self):
chain = link.Chain()
with chain.init_scope():
chain.x = chainer.variable.Parameter(shape=())
chain.yy = chainer.variable.Parameter(shape=(2, 3))
npz.load_npz(
self.temp_file_path, chain, ignore_names=self.ignore_names)
self.assertEqual(chain.x.data, self.x)
self.assertFalse(numpy.all(chain.yy.data == self.yy))
@testing.parameterize(*testing.product({'file_type': ['filename', 'bytesio']}))
class TestNpzDeserializerNonStrictGroupHierachy(unittest.TestCase):
def setUp(self):
if self.file_type == 'filename':
fd, path = tempfile.mkstemp()
os.close(fd)
self.file = path
elif self.file_type == 'bytesio':
self.file = six.BytesIO()
else:
assert False
# Create and save a link
child = link.Chain()
with child.init_scope():
child.linear = links.Linear(2, 3)
parent = link.Chain()
with parent.init_scope():
parent.linear = links.Linear(3, 2)
parent.child = child
npz.save_npz(self.file, parent)
self.source = parent
if self.file_type == 'bytesio':
self.file.seek(0)
self.npzfile = numpy.load(self.file)
self.deserializer = npz.NpzDeserializer(self.npzfile, strict=False)
def tearDown(self):
if hasattr(self, 'npzfile'):
self.npzfile.close()
if self.file_type == 'filename':
os.remove(self.file)
def test_deserialize_hierarchy(self):
# Load a link
child = link.Chain()
with child.init_scope():
child.linear2 = links.Linear(2, 3)
target = link.Chain()
with target.init_scope():
target.linear = links.Linear(3, 2)
target.child = child
target_child_W = numpy.copy(child.linear2.W.data)
target_child_b = numpy.copy(child.linear2.b.data)
self.deserializer.load(target)
# Check
numpy.testing.assert_array_equal(
self.source.linear.W.data, target.linear.W.data)
numpy.testing.assert_array_equal(
self.source.linear.W.data, target.linear.W.data)
numpy.testing.assert_array_equal(
self.source.linear.b.data, target.linear.b.data)
numpy.testing.assert_array_equal(
target.child.linear2.W.data, target_child_W)
numpy.testing.assert_array_equal(
target.child.linear2.b.data, target_child_b)
class TestSerialize(unittest.TestCase):
def test_serialize(self):
obj = mock.MagicMock()
target = npz.serialize(obj)
assert obj.serialize.call_count == 1
(serializer,), _ = obj.serialize.call_args
assert isinstance(serializer, npz.DictionarySerializer)
assert isinstance(target, dict)
@testing.parameterize(
{'ignore_names': ['linear/W', 'child/linear2/b']},
{'ignore_names': lambda key: key in ['linear/W', 'child/linear2/b']},
{'ignore_names': [
lambda key: key in ['linear/W'],
lambda key: key in ['child/linear2/b']]},
{'ignore_names': [
lambda key: key in ['linear/W'],
'child/linear2/b']},
)
class TestNpzDeserializerIgnoreNamesGroupHierachy(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
child = link.Chain()
with child.init_scope():
child.linear2 = links.Linear(2, 3)
parent = link.Chain()
with parent.init_scope():
parent.linear = links.Linear(3, 2)
parent.child = child
npz.save_npz(self.temp_file_path, parent)
self.source = parent
self.npzfile = numpy.load(path)
self.deserializer = npz.NpzDeserializer(
self.npzfile, ignore_names=self.ignore_names)
def tearDown(self):
if hasattr(self, 'npzfile'):
self.npzfile.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_deserialize_ignore_names(self):
child = link.Chain()
with child.init_scope():
child.linear2 = links.Linear(2, 3)
target = link.Chain()
with target.init_scope():
target.linear = links.Linear(3, 2)
target.child = child
target_W = numpy.copy(target.linear.W.data)
target_child_b = numpy.copy(child.linear2.b.data)
self.deserializer.load(target)
numpy.testing.assert_array_equal(
self.source.linear.b.data, target.linear.b.data)
numpy.testing.assert_array_equal(
self.source.child.linear2.W.data, target.child.linear2.W.data)
numpy.testing.assert_array_equal(
target.linear.W.data, target_W)
numpy.testing.assert_array_equal(
target.child.linear2.b.data, target_child_b)
@testing.parameterize(*testing.product({
'compress': [False, True],
'file_type': ['filename', 'bytesio'],
}))
class TestSaveNpz(unittest.TestCase):
def setUp(self):
if self.file_type == 'filename':
fd, path = tempfile.mkstemp()
os.close(fd)
self.file = path
elif self.file_type == 'bytesio':
self.file = six.BytesIO()
else:
assert False
def tearDown(self):
if self.file_type == 'filename':
os.remove(self.file)
def test_save(self):
obj = mock.MagicMock()
npz.save_npz(self.file, obj, self.compress)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertIsInstance(serializer, npz.DictionarySerializer)
@testing.parameterize(*testing.product({
'compress': [False, True],
'file_type': ['filename', 'bytesio'],
}))
class TestLoadNpz(unittest.TestCase):
def setUp(self):
if self.file_type == 'filename':
fd, path = tempfile.mkstemp()
os.close(fd)
self.file = path
elif self.file_type == 'bytesio':
self.file = six.BytesIO()
else:
assert False
child = link.Chain()
with child.init_scope():
child.child_linear = links.Linear(2, 3)
parent = link.Chain()
with parent.init_scope():
parent.parent_linear = links.Linear(3, 2)
parent.child = child
npz.save_npz(self.file, parent, self.compress)
if self.file_type == 'bytesio':
self.file.seek(0)
self.source_child = child
self.source_parent = parent
def tearDown(self):
if self.file_type == 'filename':
os.remove(self.file)
def test_load_with_strict(self):
obj = mock.MagicMock()
npz.load_npz(self.file, obj)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertIsInstance(serializer, npz.NpzDeserializer)
self.assertTrue(serializer.strict)
def test_load_without_strict(self):
obj = mock.MagicMock()
npz.load_npz(self.file, obj, strict=False)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertFalse(serializer.strict)
self.assertIsInstance(serializer, npz.NpzDeserializer)
def test_load_with_path(self):
target = link.Chain()
with target.init_scope():
target.child_linear = links.Linear(2, 3)
npz.load_npz(self.file, target, 'child/')
numpy.testing.assert_array_equal(
self.source_child.child_linear.W.data, target.child_linear.W.data)
def test_load_without_path(self):
target = link.Chain()
with target.init_scope():
target.parent_linear = links.Linear(3, 2)
npz.load_npz(self.file, target, path='')
numpy.testing.assert_array_equal(
self.source_parent.parent_linear.W.data,
target.parent_linear.W.data)
@testing.parameterize(*testing.product({
'compress': [False, True],
'file_type': ['filename', 'bytesio'],
}))
class TestGroupHierachy(unittest.TestCase):
def setUp(self):
if self.file_type == 'filename':
fd, path = tempfile.mkstemp()
os.close(fd)
self.file = path
elif self.file_type == 'bytesio':
self.file = six.BytesIO()
else:
assert False
child = link.Chain()
with child.init_scope():
child.linear = links.Linear(2, 3)
child.Wc = chainer.Parameter(shape=(2, 3))
self.parent = link.Chain()
with self.parent.init_scope():
self.parent.child = child
self.parent.Wp = chainer.Parameter(shape=(2, 3))
self.optimizer = optimizers.AdaDelta()
self.optimizer.setup(self.parent)
self.parent.cleargrads()
self.optimizer.update() # init all states
self.savez = numpy.savez_compressed if self.compress else numpy.savez
def tearDown(self):
if self.file_type == 'filename':
os.remove(self.file)
def _save(self, target, obj, name):
serializer = npz.DictionarySerializer(target, name)
serializer.save(obj)
def _savez(self, file, d):
if self.file_type == 'filename':
f = open(self.file, 'wb')
elif self.file_type == 'bytesio':
f = self.file
else:
assert False
self.savez(f, **d)
if self.file_type == 'bytesio':
self.file.seek(0)
def _save_npz(self, file, obj, compress):
npz.save_npz(file, obj, compress)
if self.file_type == 'bytesio':
self.file.seek(0)
def _check_chain_group(self, npzfile, state, prefix=''):
keys = ('child/linear/W',
'child/linear/b',
'child/Wc') + state
self.assertSetEqual(set(npzfile.keys()), {prefix + x for x in keys})
def _check_optimizer_group(self, npzfile, state, prefix=''):
keys = ('child/linear/W/t',
'child/linear/W/msg',
'child/linear/W/msdx',
'child/linear/b/t',
'child/linear/b/msg',
'child/linear/b/msdx',
'child/Wc/t',
'child/Wc/msg',
'child/Wc/msdx') + state
self.assertEqual(set(npzfile.keys()),
{prefix + x for x in keys})
def test_save_chain(self):
d = {}
self._save(d, self.parent, 'test/')
self._savez(self.file, d)
with numpy.load(self.file) as f:
self._check_chain_group(f, ('Wp',), 'test/')
def test_save_optimizer(self):
d = {}
self._save(d, self.optimizer, 'test/')
self._savez(self.file, d)
with numpy.load(self.file) as npzfile:
self._check_optimizer_group(
npzfile, ('Wp/t', 'Wp/msg', 'Wp/msdx', 'epoch', 't'), 'test/')
def test_save_chain2(self):
self._save_npz(self.file, self.parent, self.compress)
with numpy.load(self.file) as npzfile:
self._check_chain_group(npzfile, ('Wp',))
def test_save_optimizer2(self):
self._save_npz(self.file, self.optimizer, self.compress)
with numpy.load(self.file) as npzfile:
self._check_optimizer_group(
npzfile, ('Wp/t', 'Wp/msg', 'Wp/msdx', 'epoch', 't'))
def test_load_optimizer_with_strict(self):
for param in self.parent.params():
param.data.fill(1)
self._save_npz(self.file, self.parent, self.compress)
for param in self.parent.params():
param.data.fill(0)
npz.load_npz(self.file, self.parent)
for param in self.parent.params():
self.assertTrue((param.data == 1).all())
def test_load_optimizer_without_strict(self):
for param in self.parent.params():
param.data.fill(1)
self._save_npz(self.file, self.parent, self.compress)
# Remove a param
del self.parent.child.linear.b
for param in self.parent.params():
param.data.fill(0)
npz.load_npz(self.file, self.parent, strict=False)
for param in self.parent.params():
self.assertTrue((param.data == 1).all())
self.assertFalse(hasattr(self.parent.child.linear, 'b'))
testing.run_module(__name__, __file__)
| {
"content_hash": "ba36a733d4e7b120548d0e3f4b751b46",
"timestamp": "",
"source": "github",
"line_count": 684,
"max_line_length": 79,
"avg_line_length": 32.53216374269006,
"alnum_prop": 0.5904188387560668,
"repo_name": "niboshi/chainer",
"id": "71bf791afdcbbf848f5eab6448aa1d69030c4aea",
"size": "22252",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/serializers_tests/test_npz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3796"
},
{
"name": "C",
"bytes": "1099"
},
{
"name": "C++",
"bytes": "1685561"
},
{
"name": "CMake",
"bytes": "51563"
},
{
"name": "Cuda",
"bytes": "191182"
},
{
"name": "Dockerfile",
"bytes": "6422"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "6334795"
},
{
"name": "Shell",
"bytes": "47473"
}
],
"symlink_target": ""
} |
import os, sys
from glob import glob
from re import sub
import argparse
def escape(s):
s = s.decode()
lookup = {
"\0": "\\0",
"\t": "\\t",
"\n": '\\n"\n"',
"\r": "\\r",
"\\": "\\\\",
'"': '\\"',
}
return '""\n"{}"'.format("".join([lookup[x] if x in lookup else x for x in s]))
def chew_filename(t):
return {"func": "test_{}_fn".format(sub(r"/|\.|-", "_", t)), "desc": t}
def script_to_map(test_file):
r = {"name": chew_filename(test_file)["func"]}
with open(test_file, "rb") as f:
r["script"] = escape(f.read())
with open(test_file + ".exp", "rb") as f:
r["output"] = escape(f.read())
return r
test_function = (
"void {name}(void* data) {{\n"
" static const char pystr[] = {script};\n"
" static const char exp[] = {output};\n"
' printf("\\n");\n'
" upytest_set_expected_output(exp, sizeof(exp) - 1);\n"
" upytest_execute_test(pystr);\n"
' printf("result: ");\n'
"}}"
)
testcase_struct = "struct testcase_t {name}_tests[] = {{\n{body}\n END_OF_TESTCASES\n}};"
testcase_member = ' {{ "{desc}", {func}, TT_ENABLED_, 0, 0 }},'
testgroup_struct = "struct testgroup_t groups[] = {{\n{body}\n END_OF_GROUPS\n}};"
testgroup_member = ' {{ "{name}", {name}_tests }},'
## XXX: may be we could have `--without <groups>` argument...
# currently these tests are selected because they pass on qemu-arm
test_dirs = (
"basics",
"micropython",
"misc",
"extmod",
"float",
"inlineasm",
"qemu-arm",
) # 'import', 'io',)
exclude_tests = (
# pattern matching in .exp
"basics/bytes_compare3.py",
"extmod/ticks_diff.py",
"extmod/time_ms_us.py",
"extmod/uheapq_timeq.py",
# unicode char issue
"extmod/ujson_loads.py",
# doesn't output to python stdout
"extmod/ure_debug.py",
"extmod/vfs_basic.py",
"extmod/vfs_fat_ramdisk.py",
"extmod/vfs_fat_fileio.py",
"extmod/vfs_fat_fsusermount.py",
"extmod/vfs_fat_oldproto.py",
# rounding issues
"float/float_divmod.py",
# requires double precision floating point to work
"float/float2int_doubleprec_intbig.py",
"float/float_parse_doubleprec.py",
# inline asm FP tests (require Cortex-M4)
"inlineasm/asmfpaddsub.py",
"inlineasm/asmfpcmp.py",
"inlineasm/asmfpldrstr.py",
"inlineasm/asmfpmuldiv.py",
"inlineasm/asmfpsqrt.py",
# different filename in output
"micropython/emg_exc.py",
"micropython/heapalloc_traceback.py",
# don't have emergency exception buffer
"micropython/heapalloc_exc_compressed_emg_exc.py",
# pattern matching in .exp
"micropython/meminfo.py",
# needs sys stdfiles
"misc/print_exception.py",
# settrace .exp files are too large
"misc/sys_settrace_loop.py",
"misc/sys_settrace_generator.py",
"misc/sys_settrace_features.py",
# don't have f-string
"basics/string_fstring.py",
"basics/string_fstring_debug.py",
)
output = []
tests = []
argparser = argparse.ArgumentParser(
description="Convert native MicroPython tests to tinytest/upytesthelper C code"
)
argparser.add_argument("--stdin", action="store_true", help="read list of tests from stdin")
argparser.add_argument("--exclude", action="append", help="exclude test by name")
args = argparser.parse_args()
if not args.stdin:
if args.exclude:
exclude_tests += tuple(args.exclude)
for group in test_dirs:
tests += [test for test in glob("{}/*.py".format(group)) if test not in exclude_tests]
else:
for l in sys.stdin:
tests.append(l.rstrip())
output.extend([test_function.format(**script_to_map(test)) for test in tests])
testcase_members = [testcase_member.format(**chew_filename(test)) for test in tests]
output.append(testcase_struct.format(name="", body="\n".join(testcase_members)))
testgroup_members = [testgroup_member.format(name=group) for group in [""]]
output.append(testgroup_struct.format(body="\n".join(testgroup_members)))
## XXX: may be we could have `--output <filename>` argument...
# Don't depend on what system locale is set, use utf8 encoding.
sys.stdout.buffer.write("\n\n".join(output).encode("utf8"))
| {
"content_hash": "4cb73e2106df15e3d22ef11b27247d3d",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 94,
"avg_line_length": 31.34328358208955,
"alnum_prop": 0.6230952380952381,
"repo_name": "bvernoux/micropython",
"id": "f1169a34d4bdd76b43ff56021131b3209c76fe08",
"size": "4224",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/tinytest-codegen.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "50694"
},
{
"name": "C",
"bytes": "19869126"
},
{
"name": "C++",
"bytes": "2489380"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "49218"
},
{
"name": "Objective-C",
"bytes": "8382"
},
{
"name": "Python",
"bytes": "856777"
},
{
"name": "Shell",
"bytes": "6229"
}
],
"symlink_target": ""
} |
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
from datetime import datetime
project = u'hcn.sitetheme'
copyright = u'%s, Christoph Boehner.' % datetime.now().year
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'sitethemedoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index',
'sitetheme.tex',
u'hcn.sitetheme Documentation',
u'', 'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| {
"content_hash": "33967b8dc4befc199ac3d29448cfc027",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 79,
"avg_line_length": 31.296969696969697,
"alnum_prop": 0.7124322230828815,
"repo_name": "a25kk/hcn",
"id": "e1442ed3cb26d644a6a79f04a681aab375702b4a",
"size": "5999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12029"
},
{
"name": "Shell",
"bytes": "1467"
}
],
"symlink_target": ""
} |
import sys
import unittest
import Tkinter as tkinter
import ttk
from test.test_support import requires, run_unittest, swap_attr
from test_ttk.support import AbstractTkTest, destroy_default_root
requires('gui')
class LabeledScaleTest(AbstractTkTest, unittest.TestCase):
def tearDown(self):
self.root.update_idletasks()
super(LabeledScaleTest, self).tearDown()
def test_widget_destroy(self):
# automatically created variable
x = ttk.LabeledScale(self.root)
var = x._variable._name
x.destroy()
self.assertRaises(tkinter.TclError, x.tk.globalgetvar, var)
# manually created variable
myvar = tkinter.DoubleVar(self.root)
name = myvar._name
x = ttk.LabeledScale(self.root, variable=myvar)
x.destroy()
if self.wantobjects:
self.assertEqual(x.tk.globalgetvar(name), myvar.get())
else:
self.assertEqual(float(x.tk.globalgetvar(name)), myvar.get())
del myvar
self.assertRaises(tkinter.TclError, x.tk.globalgetvar, name)
# checking that the tracing callback is properly removed
myvar = tkinter.IntVar(self.root)
# LabeledScale will start tracing myvar
x = ttk.LabeledScale(self.root, variable=myvar)
x.destroy()
# Unless the tracing callback was removed, creating a new
# LabeledScale with the same var will cause an error now. This
# happens because the variable will be set to (possibly) a new
# value which causes the tracing callback to be called and then
# it tries calling instance attributes not yet defined.
ttk.LabeledScale(self.root, variable=myvar)
if hasattr(sys, 'last_type'):
self.assertNotEqual(sys.last_type, tkinter.TclError)
def test_initialization_no_master(self):
# no master passing
with swap_attr(tkinter, '_default_root', None), \
swap_attr(tkinter, '_support_default_root', True):
try:
x = ttk.LabeledScale()
self.assertIsNotNone(tkinter._default_root)
self.assertEqual(x.master, tkinter._default_root)
self.assertEqual(x.tk, tkinter._default_root.tk)
x.destroy()
finally:
destroy_default_root()
def test_initialization(self):
# master passing
master = tkinter.Frame(self.root)
x = ttk.LabeledScale(master)
self.assertEqual(x.master, master)
x.destroy()
# variable initialization/passing
passed_expected = (('0', 0), (0, 0), (10, 10),
(-1, -1), (sys.maxint + 1, sys.maxint + 1))
if self.wantobjects:
passed_expected += ((2.5, 2),)
for pair in passed_expected:
x = ttk.LabeledScale(self.root, from_=pair[0])
self.assertEqual(x.value, pair[1])
x.destroy()
x = ttk.LabeledScale(self.root, from_='2.5')
self.assertRaises(ValueError, x._variable.get)
x.destroy()
x = ttk.LabeledScale(self.root, from_=None)
self.assertRaises(ValueError, x._variable.get)
x.destroy()
# variable should have its default value set to the from_ value
myvar = tkinter.DoubleVar(self.root, value=20)
x = ttk.LabeledScale(self.root, variable=myvar)
self.assertEqual(x.value, 0)
x.destroy()
# check that it is really using a DoubleVar
x = ttk.LabeledScale(self.root, variable=myvar, from_=0.5)
self.assertEqual(x.value, 0.5)
self.assertEqual(x._variable._name, myvar._name)
x.destroy()
# widget positionment
def check_positions(scale, scale_pos, label, label_pos):
self.assertEqual(scale.pack_info()['side'], scale_pos)
self.assertEqual(label.place_info()['anchor'], label_pos)
x = ttk.LabeledScale(self.root, compound='top')
check_positions(x.scale, 'bottom', x.label, 'n')
x.destroy()
x = ttk.LabeledScale(self.root, compound='bottom')
check_positions(x.scale, 'top', x.label, 's')
x.destroy()
# invert default positions
x = ttk.LabeledScale(self.root, compound='unknown')
check_positions(x.scale, 'top', x.label, 's')
x.destroy()
x = ttk.LabeledScale(self.root) # take default positions
check_positions(x.scale, 'bottom', x.label, 'n')
x.destroy()
# extra, and invalid, kwargs
self.assertRaises(tkinter.TclError, ttk.LabeledScale, master, a='b')
def test_horizontal_range(self):
lscale = ttk.LabeledScale(self.root, from_=0, to=10)
lscale.pack()
lscale.wait_visibility()
lscale.update()
linfo_1 = lscale.label.place_info()
prev_xcoord = lscale.scale.coords()[0]
self.assertEqual(prev_xcoord, int(linfo_1['x']))
# change range to: from -5 to 5. This should change the x coord of
# the scale widget, since 0 is at the middle of the new
# range.
lscale.scale.configure(from_=-5, to=5)
# The following update is needed since the test doesn't use mainloop,
# at the same time this shouldn't affect test outcome
lscale.update()
curr_xcoord = lscale.scale.coords()[0]
self.assertNotEqual(prev_xcoord, curr_xcoord)
# the label widget should have been repositioned too
linfo_2 = lscale.label.place_info()
self.assertEqual(lscale.label['text'], 0 if self.wantobjects else '0')
self.assertEqual(curr_xcoord, int(linfo_2['x']))
# change the range back
lscale.scale.configure(from_=0, to=10)
self.assertNotEqual(prev_xcoord, curr_xcoord)
self.assertEqual(prev_xcoord, int(linfo_1['x']))
lscale.destroy()
def test_variable_change(self):
x = ttk.LabeledScale(self.root)
x.pack()
x.wait_visibility()
x.update()
curr_xcoord = x.scale.coords()[0]
newval = x.value + 1
x.value = newval
# The following update is needed since the test doesn't use mainloop,
# at the same time this shouldn't affect test outcome
x.update()
self.assertEqual(x.label['text'],
newval if self.wantobjects else str(newval))
self.assertGreater(x.scale.coords()[0], curr_xcoord)
self.assertEqual(x.scale.coords()[0],
int(x.label.place_info()['x']))
# value outside range
if self.wantobjects:
conv = lambda x: x
else:
conv = int
x.value = conv(x.scale['to']) + 1 # no changes shouldn't happen
x.update()
self.assertEqual(conv(x.label['text']), newval)
self.assertEqual(x.scale.coords()[0],
int(x.label.place_info()['x']))
x.destroy()
def test_resize(self):
x = ttk.LabeledScale(self.root)
x.pack(expand=True, fill='both')
x.wait_visibility()
x.update()
width, height = x.master.winfo_width(), x.master.winfo_height()
width_new, height_new = width * 2, height * 2
x.value = 3
x.update()
x.master.wm_geometry("%dx%d" % (width_new, height_new))
self.assertEqual(int(x.label.place_info()['x']),
x.scale.coords()[0])
# Reset geometry
x.master.wm_geometry("%dx%d" % (width, height))
x.destroy()
class OptionMenuTest(AbstractTkTest, unittest.TestCase):
def setUp(self):
super(OptionMenuTest, self).setUp()
self.textvar = tkinter.StringVar(self.root)
def tearDown(self):
del self.textvar
super(OptionMenuTest, self).tearDown()
def test_widget_destroy(self):
var = tkinter.StringVar(self.root)
optmenu = ttk.OptionMenu(self.root, var)
name = var._name
optmenu.update_idletasks()
optmenu.destroy()
self.assertEqual(optmenu.tk.globalgetvar(name), var.get())
del var
self.assertRaises(tkinter.TclError, optmenu.tk.globalgetvar, name)
def test_initialization(self):
self.assertRaises(tkinter.TclError,
ttk.OptionMenu, self.root, self.textvar, invalid='thing')
optmenu = ttk.OptionMenu(self.root, self.textvar, 'b', 'a', 'b')
self.assertEqual(optmenu._variable.get(), 'b')
self.assertTrue(optmenu['menu'])
self.assertTrue(optmenu['textvariable'])
optmenu.destroy()
def test_menu(self):
items = ('a', 'b', 'c')
default = 'a'
optmenu = ttk.OptionMenu(self.root, self.textvar, default, *items)
found_default = False
for i in range(len(items)):
value = optmenu['menu'].entrycget(i, 'value')
self.assertEqual(value, items[i])
if value == default:
found_default = True
self.assertTrue(found_default)
optmenu.destroy()
# default shouldn't be in menu if it is not part of values
default = 'd'
optmenu = ttk.OptionMenu(self.root, self.textvar, default, *items)
curr = None
i = 0
while True:
last, curr = curr, optmenu['menu'].entryconfigure(i, 'value')
if last == curr:
# no more menu entries
break
self.assertNotEqual(curr, default)
i += 1
self.assertEqual(i, len(items))
# check that variable is updated correctly
optmenu.pack()
optmenu.wait_visibility()
optmenu['menu'].invoke(0)
self.assertEqual(optmenu._variable.get(), items[0])
# changing to an invalid index shouldn't change the variable
self.assertRaises(tkinter.TclError, optmenu['menu'].invoke, -1)
self.assertEqual(optmenu._variable.get(), items[0])
optmenu.destroy()
# specifying a callback
success = []
def cb_test(item):
self.assertEqual(item, items[1])
success.append(True)
optmenu = ttk.OptionMenu(self.root, self.textvar, 'a', command=cb_test,
*items)
optmenu['menu'].invoke(1)
if not success:
self.fail("Menu callback not invoked")
optmenu.destroy()
tests_gui = (LabeledScaleTest, OptionMenuTest)
if __name__ == "__main__":
run_unittest(*tests_gui)
| {
"content_hash": "6d887d4b306467de554abaed66c923e0",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 79,
"avg_line_length": 36.72852233676976,
"alnum_prop": 0.5811190119760479,
"repo_name": "Jeff-Tian/mybnb",
"id": "9273febbac8df97a7c582d88ae0a705783fe463a",
"size": "10688",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python27/Lib/lib-tk/test/test_ttk/test_extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "455330"
},
{
"name": "Batchfile",
"bytes": "6263"
},
{
"name": "C",
"bytes": "2304983"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "31815"
},
{
"name": "CSS",
"bytes": "30628"
},
{
"name": "Cucumber",
"bytes": "248616"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "31983"
},
{
"name": "HTML",
"bytes": "376863"
},
{
"name": "JavaScript",
"bytes": "20239"
},
{
"name": "M4",
"bytes": "67848"
},
{
"name": "Makefile",
"bytes": "142926"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "19913027"
},
{
"name": "REXX",
"bytes": "3862"
},
{
"name": "Ruby",
"bytes": "14954382"
},
{
"name": "Shell",
"bytes": "366205"
},
{
"name": "Tcl",
"bytes": "2150972"
},
{
"name": "TeX",
"bytes": "230259"
},
{
"name": "Visual Basic",
"bytes": "494"
},
{
"name": "XSLT",
"bytes": "3736"
},
{
"name": "Yacc",
"bytes": "14342"
}
],
"symlink_target": ""
} |
from .core.log import RedisPub
class RedisPubMixin(object):
def get_conf(self, name):
try:
from flask import Flask
if isinstance(self, Flask):
return self.config[name]
except ImportError:
pass
try:
from celery import Celery
if isinstance(self, Celery):
return self.conf[name]
except ImportError:
pass
assert False
@property
def redispub(self):
if not hasattr(self, '_redispub'):
self._redispub = RedisPub(
bubble=self.get_conf('TORABOT_BUBBLE_LOG')
)
return self._redispub
| {
"content_hash": "490c3865d8362ae7f229727e290a92d0",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 58,
"avg_line_length": 23.82758620689655,
"alnum_prop": 0.5311143270622286,
"repo_name": "Answeror/torabot",
"id": "d1d51c399ec7170f38d24e7627e9f1eb7da83322",
"size": "691",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "torabot/app_mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "174712"
},
{
"name": "JavaScript",
"bytes": "2849805"
},
{
"name": "Python",
"bytes": "552234"
},
{
"name": "Shell",
"bytes": "822"
},
{
"name": "TeX",
"bytes": "3381"
},
{
"name": "XSLT",
"bytes": "5063"
}
],
"symlink_target": ""
} |
from functools import partial
from typing import Any, Dict, Optional
from inspect import signature
import re
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message, \
validate_extract_webhook_http_header, UnexpectedWebhookEventType
from zerver.lib.webhooks.git import EMPTY_SHA, \
TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE, \
get_commits_comment_action_message, get_issue_event_message, \
get_pull_request_event_message, get_push_commits_event_message, \
get_push_tag_event_message, get_remove_branch_event_message
from zerver.models import UserProfile
def fixture_to_headers(fixture_name: str) -> Dict[str, Any]:
if fixture_name.startswith("build"):
return {} # Since there are 2 possible event types.
# Map "push_hook__push_commits_more_than_limit.json" into GitLab's
# HTTP event title "Push Hook".
return {"HTTP_X_GITLAB_EVENT": fixture_name.split("__")[0].replace("_", " ").title()}
def get_push_event_body(payload: Dict[str, Any]) -> str:
if payload.get('after') == EMPTY_SHA:
return get_remove_branch_event_body(payload)
return get_normal_push_event_body(payload)
def get_normal_push_event_body(payload: Dict[str, Any]) -> str:
compare_url = u'{}/compare/{}...{}'.format(
get_repository_homepage(payload),
payload['before'],
payload['after']
)
commits = [
{
'name': commit.get('author').get('name'),
'sha': commit.get('id'),
'message': commit.get('message'),
'url': commit.get('url')
}
for commit in payload['commits']
]
return get_push_commits_event_message(
get_user_name(payload),
compare_url,
get_branch_name(payload),
commits
)
def get_remove_branch_event_body(payload: Dict[str, Any]) -> str:
return get_remove_branch_event_message(
get_user_name(payload),
get_branch_name(payload)
)
def get_tag_push_event_body(payload: Dict[str, Any]) -> str:
return get_push_tag_event_message(
get_user_name(payload),
get_tag_name(payload),
action="pushed" if payload.get('checkout_sha') else "removed"
)
def get_issue_created_event_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
description = payload['object_attributes'].get('description')
# Filter out multiline hidden comments
if description is not None:
description = re.sub('<!--.*?-->', '', description, 0, re.DOTALL)
description = description.rstrip()
return get_issue_event_message(
get_issue_user_name(payload),
'created',
get_object_url(payload),
payload['object_attributes'].get('iid'),
description,
get_objects_assignee(payload),
payload.get('assignees'),
title=payload['object_attributes'].get('title') if include_title else None
)
def get_issue_event_body(payload: Dict[str, Any], action: str,
include_title: Optional[bool]=False) -> str:
return get_issue_event_message(
get_issue_user_name(payload),
action,
get_object_url(payload),
payload['object_attributes'].get('iid'),
title=payload['object_attributes'].get('title') if include_title else None
)
def get_merge_request_updated_event_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
if payload['object_attributes'].get('oldrev'):
return get_merge_request_event_body(
payload, "added commit(s) to",
include_title=include_title
)
return get_merge_request_open_or_updated_body(
payload, "updated",
include_title=include_title
)
def get_merge_request_event_body(payload: Dict[str, Any], action: str,
include_title: Optional[bool]=False) -> str:
pull_request = payload['object_attributes']
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
pull_request.get('iid'),
type='MR',
title=payload['object_attributes'].get('title') if include_title else None
)
def get_merge_request_open_or_updated_body(payload: Dict[str, Any], action: str,
include_title: Optional[bool]=False) -> str:
pull_request = payload['object_attributes']
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
pull_request.get('iid'),
pull_request.get('source_branch'),
pull_request.get('target_branch'),
pull_request.get('description'),
get_objects_assignee(payload),
type='MR',
title=payload['object_attributes'].get('title') if include_title else None
)
def get_objects_assignee(payload: Dict[str, Any]) -> Optional[str]:
assignee_object = payload.get('assignee')
if assignee_object:
return assignee_object.get('name')
else:
assignee_object = payload.get('assignees')
if assignee_object:
for assignee in payload.get('assignees'):
return assignee['name']
return None
def get_commented_commit_event_body(payload: Dict[str, Any]) -> str:
comment = payload['object_attributes']
action = u'[commented]({})'.format(comment['url'])
return get_commits_comment_action_message(
get_issue_user_name(payload),
action,
payload['commit'].get('url'),
payload['commit'].get('id'),
comment['note'],
)
def get_commented_merge_request_event_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
comment = payload['object_attributes']
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/merge_requests/{}'.format(
payload['project'].get('web_url'),
payload['merge_request'].get('iid')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload['merge_request'].get('iid'),
message=comment['note'],
type='MR',
title=payload.get('merge_request').get('title') if include_title else None
)
def get_commented_issue_event_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
comment = payload['object_attributes']
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/issues/{}'.format(
payload['project'].get('web_url'),
payload['issue'].get('iid')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload['issue'].get('iid'),
message=comment['note'],
type='Issue',
title=payload.get('issue').get('title') if include_title else None
)
def get_commented_snippet_event_body(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
comment = payload['object_attributes']
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/snippets/{}'.format(
payload['project'].get('web_url'),
payload['snippet'].get('id')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload['snippet'].get('id'),
message=comment['note'],
type='Snippet',
title=payload.get('snippet').get('title') if include_title else None
)
def get_wiki_page_event_body(payload: Dict[str, Any], action: str) -> str:
return u"{} {} [Wiki Page \"{}\"]({}).".format(
get_issue_user_name(payload),
action,
payload['object_attributes'].get('title'),
payload['object_attributes'].get('url'),
)
def get_build_hook_event_body(payload: Dict[str, Any]) -> str:
build_status = payload.get('build_status')
if build_status == 'created':
action = 'was created'
elif build_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(build_status)
return u"Build {} from {} stage {}.".format(
payload.get('build_name'),
payload.get('build_stage'),
action
)
def get_test_event_body(payload: Dict[str, Any]) -> str:
return u"Webhook for **{repo}** has been configured successfully! :tada:".format(
repo=get_repo_name(payload))
def get_pipeline_event_body(payload: Dict[str, Any]) -> str:
pipeline_status = payload['object_attributes'].get('status')
if pipeline_status == 'pending':
action = 'was created'
elif pipeline_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(pipeline_status)
builds_status = u""
for build in payload['builds']:
builds_status += u"* {} - {}\n".format(build.get('name'), build.get('status'))
return u"Pipeline {} with build(s):\n{}.".format(action, builds_status[:-1])
def get_repo_name(payload: Dict[str, Any]) -> str:
return payload['project']['name']
def get_user_name(payload: Dict[str, Any]) -> str:
return payload['user_name']
def get_issue_user_name(payload: Dict[str, Any]) -> str:
return payload['user']['name']
def get_repository_homepage(payload: Dict[str, Any]) -> str:
return payload['repository']['homepage']
def get_branch_name(payload: Dict[str, Any]) -> str:
return payload['ref'].replace('refs/heads/', '')
def get_tag_name(payload: Dict[str, Any]) -> str:
return payload['ref'].replace('refs/tags/', '')
def get_object_url(payload: Dict[str, Any]) -> str:
return payload['object_attributes']['url']
EVENT_FUNCTION_MAPPER = {
'Push Hook': get_push_event_body,
'Tag Push Hook': get_tag_push_event_body,
'Test Hook': get_test_event_body,
'Issue Hook open': get_issue_created_event_body,
'Issue Hook close': partial(get_issue_event_body, action='closed'),
'Issue Hook reopen': partial(get_issue_event_body, action='reopened'),
'Issue Hook update': partial(get_issue_event_body, action='updated'),
'Confidential Issue Hook open': get_issue_created_event_body,
'Confidential Issue Hook close': partial(get_issue_event_body, action='closed'),
'Confidential Issue Hook reopen': partial(get_issue_event_body, action='reopened'),
'Confidential Issue Hook update': partial(get_issue_event_body, action='updated'),
'Note Hook Commit': get_commented_commit_event_body,
'Note Hook MergeRequest': get_commented_merge_request_event_body,
'Note Hook Issue': get_commented_issue_event_body,
'Confidential Note Hook Issue': get_commented_issue_event_body,
'Note Hook Snippet': get_commented_snippet_event_body,
'Merge Request Hook approved': partial(get_merge_request_event_body, action='approved'),
'Merge Request Hook open': partial(get_merge_request_open_or_updated_body, action='created'),
'Merge Request Hook update': get_merge_request_updated_event_body,
'Merge Request Hook merge': partial(get_merge_request_event_body, action='merged'),
'Merge Request Hook close': partial(get_merge_request_event_body, action='closed'),
'Merge Request Hook reopen': partial(get_merge_request_event_body, action='reopened'),
'Wiki Page Hook create': partial(get_wiki_page_event_body, action='created'),
'Wiki Page Hook update': partial(get_wiki_page_event_body, action='updated'),
'Job Hook': get_build_hook_event_body,
'Build Hook': get_build_hook_event_body,
'Pipeline Hook': get_pipeline_event_body,
}
@api_key_only_webhook_view("Gitlab")
@has_request_variables
def api_gitlab_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body'),
branches: Optional[str]=REQ(default=None),
user_specified_topic: Optional[str]=REQ("topic", default=None)) -> HttpResponse:
event = get_event(request, payload, branches)
if event is not None:
event_body_function = get_body_based_on_event(event)
if 'include_title' in signature(event_body_function).parameters:
body = event_body_function(
payload,
include_title=user_specified_topic is not None
)
else:
body = event_body_function(payload)
topic = get_subject_based_on_event(event, payload)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
def get_body_based_on_event(event: str) -> Any:
return EVENT_FUNCTION_MAPPER[event]
def get_subject_based_on_event(event: str, payload: Dict[str, Any]) -> str:
if event == 'Push Hook':
return u"{} / {}".format(get_repo_name(payload), get_branch_name(payload))
elif event == 'Job Hook' or event == 'Build Hook':
return u"{} / {}".format(payload['repository'].get('name'), get_branch_name(payload))
elif event == 'Pipeline Hook':
return u"{} / {}".format(
get_repo_name(payload),
payload['object_attributes'].get('ref').replace('refs/heads/', ''))
elif event.startswith('Merge Request Hook'):
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='MR',
id=payload['object_attributes'].get('iid'),
title=payload['object_attributes'].get('title')
)
elif event.startswith('Issue Hook') or event.startswith('Confidential Issue Hook'):
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Issue',
id=payload['object_attributes'].get('iid'),
title=payload['object_attributes'].get('title')
)
elif event == 'Note Hook Issue' or event == 'Confidential Note Hook Issue':
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Issue',
id=payload['issue'].get('iid'),
title=payload['issue'].get('title')
)
elif event == 'Note Hook MergeRequest':
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='MR',
id=payload['merge_request'].get('iid'),
title=payload['merge_request'].get('title')
)
elif event == 'Note Hook Snippet':
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Snippet',
id=payload['snippet'].get('id'),
title=payload['snippet'].get('title')
)
return get_repo_name(payload)
def get_event(request: HttpRequest, payload: Dict[str, Any], branches: Optional[str]) -> Optional[str]:
event = validate_extract_webhook_http_header(request, 'X_GITLAB_EVENT', 'GitLab')
if event in ['Confidential Issue Hook', 'Issue Hook', 'Merge Request Hook', 'Wiki Page Hook']:
action = payload['object_attributes'].get('action')
event = "{} {}".format(event, action)
elif event in ['Confidential Note Hook', 'Note Hook']:
action = payload['object_attributes'].get('noteable_type')
event = "{} {}".format(event, action)
elif event == 'Push Hook':
if branches is not None:
branch = get_branch_name(payload)
if branches.find(branch) == -1:
return None
if event in list(EVENT_FUNCTION_MAPPER.keys()):
return event
raise UnexpectedWebhookEventType('GitLab', event)
| {
"content_hash": "f808517d671da2b45673e88ea887490d",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 103,
"avg_line_length": 39.899749373433586,
"alnum_prop": 0.6197236180904523,
"repo_name": "tommyip/zulip",
"id": "8dd99cdd94fc92b9fc669203de5bc34689fdfc8d",
"size": "15920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/webhooks/gitlab/view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400301"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "718599"
},
{
"name": "JavaScript",
"bytes": "3092201"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71123"
},
{
"name": "Python",
"bytes": "6889539"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119898"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
} |
import collections
import sys
import unicodedata
counter = collections.Counter()
for line in sys.stdin:
for c in line.strip():
counter[c] += 1
for c, count in counter.most_common():
print("{} {} {} {}".format(c,count,unicodedata.name(c, "WHOKNOWS"), unicodedata.category(c)))
| {
"content_hash": "162a70d4b01c80a8c0be26450affa181",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 97,
"avg_line_length": 23.076923076923077,
"alnum_prop": 0.66,
"repo_name": "psmit/kaldi-recipes",
"id": "5bc0c4c9fdbc07cc794bd855f0059ffa1e0649be",
"size": "324",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "common/print_chars.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "5556"
},
{
"name": "Python",
"bytes": "83019"
},
{
"name": "Shell",
"bytes": "367059"
}
],
"symlink_target": ""
} |
from django.db import models
from django.db.models.deletion import ProtectedError
from ..config import HARD_DELETE_NOCASCADE
from ..models import SafeDeleteModel
from .testcase import SafeDeleteTestCase
class NoCascadeModel(SafeDeleteModel):
_safedelete_policy = HARD_DELETE_NOCASCADE
class CascadeChild(models.Model):
parent = models.ForeignKey(
NoCascadeModel,
on_delete=models.CASCADE
)
class ProtectedChild(models.Model):
parent = models.ForeignKey(
NoCascadeModel,
on_delete=models.PROTECT
)
class NullChild(models.Model):
parent = models.ForeignKey(
NoCascadeModel,
on_delete=models.SET_NULL,
null=True
)
class DefaultChild(models.Model):
parent = models.ForeignKey(
NoCascadeModel,
on_delete=models.SET_DEFAULT,
null=True,
default=None
)
def get_default():
return None
class SetChild(models.Model):
parent = models.ForeignKey(
NoCascadeModel,
on_delete=models.SET(get_default),
null=True,
default=None
)
class NoCascadeTestCase(SafeDeleteTestCase):
def setUp(self):
self.instance = NoCascadeModel.objects.create()
def test_cascade(self):
cascade_child = CascadeChild.objects.create(
parent=self.instance
)
self.assertSoftDelete(self.instance)
cascade_child.delete()
self.assertHardDelete(self.instance)
def test_protected(self):
ProtectedChild.objects.create(
parent=self.instance
)
self.assertRaises(
ProtectedError,
self.instance.delete
)
def test_null(self):
NullChild.objects.create(
parent=self.instance
)
self.assertHardDelete(self.instance)
def test_default(self):
DefaultChild.objects.create(
parent=self.instance
)
self.assertHardDelete(self.instance)
def test_set(self):
SetChild.objects.create(
parent=self.instance
)
self.assertHardDelete(self.instance)
| {
"content_hash": "0869636db9ad6329cfbc4c910bad0ddc",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 55,
"avg_line_length": 22.357894736842105,
"alnum_prop": 0.641713747645951,
"repo_name": "makinacorpus/django-safedelete",
"id": "f1d70531c50ff8683bb905525c837cc83035b604",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "safedelete/tests/test_no_cascade.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "90"
},
{
"name": "HTML",
"bytes": "823"
},
{
"name": "Python",
"bytes": "108362"
}
],
"symlink_target": ""
} |
import re
import sqlparse
from sqlparse.tokens import Name
from collections import defaultdict
from .sql_literals.main import get_literals
white_space_regex = re.compile("\\s+", re.MULTILINE)
def _compile_regex(keyword):
# Surround the keyword with word boundaries and replace interior whitespace
# with whitespace wildcards
pattern = "\\b" + white_space_regex.sub(r"\\s+", keyword) + "\\b"
return re.compile(pattern, re.MULTILINE | re.IGNORECASE)
keywords = get_literals("keywords")
keyword_regexs = dict((kw, _compile_regex(kw)) for kw in keywords)
class PrevalenceCounter(object):
def __init__(self):
self.keyword_counts = defaultdict(int)
self.name_counts = defaultdict(int)
def update(self, text):
self.update_keywords(text)
self.update_names(text)
def update_names(self, text):
for parsed in sqlparse.parse(text):
for token in parsed.flatten():
if token.ttype in Name:
self.name_counts[token.value] += 1
def clear_names(self):
self.name_counts = defaultdict(int)
def update_keywords(self, text):
# Count keywords. Can't rely for sqlparse for this, because it's
# database agnostic
for keyword, regex in keyword_regexs.items():
for _ in regex.finditer(text):
self.keyword_counts[keyword] += 1
def keyword_count(self, keyword):
return self.keyword_counts[keyword]
def name_count(self, name):
return self.name_counts[name]
| {
"content_hash": "de4d48e4b7a8d18aa2e2fbc0fb48cfa1",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 29.78846153846154,
"alnum_prop": 0.6507424144609425,
"repo_name": "dbcli/sqlcomplete",
"id": "d9231d67a83637a816a468152879d89e12a171ff",
"size": "1549",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sqlcomplete/prioritization.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "93621"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
def read(filename):
try:
with open(filename) as f:
return f.read()
except NameError:
with open(filename, 'r') as f:
return f.read()
long_description = u'\n\n'.join([read('README.rst'),
read('CREDITS.rst'),
read('CHANGES.rst')])
class Install(_install):
def run(self):
_install.run(self)
print("Post Install")
version = '0.3.1.dev0'
setup(
name='saruman',
version=version,
packages=find_packages(),
url='https://github.com/tychota/saruman',
license='MIT',
author='tychota',
author_email='tycho.tatitscheff+saruman@gadz.org',
description='A firewall that leverage AMQP workqueue ! Build by iresam for iresam !',
long_description=long_description,
install_requires=[
'celery==3.1.19',
'plumbum==1.6.1.post0',
'click==6.2',
'colorlog==2.6.0',
'zest.releaser==6.4',
'coverage==4.0.3',
'pyyaml==3.11',
'pygments',
'sphinx_bootstrap_theme'
],
test_suite="tests",
entry_points='''
[console_scripts]
saruman=saruman.__main__:cli
''',
cmdclass={'install': Install},
zip_safe=False,
keywords=['firewall', 'amqp', 'nftables', 'dhcp, reverse-proxy'],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'Natural Language :: French',
'Operating System :: POSIX :: Linux',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: Internet :: Proxy Servers',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Firewalls',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| {
"content_hash": "8be6621563c8e3276904f4861e919da8",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 93,
"avg_line_length": 32.391304347826086,
"alnum_prop": 0.5194630872483221,
"repo_name": "tychota/saruman",
"id": "3badbbf77dc3c65c0e418569e911549f76cdcddb",
"size": "2258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19798"
}
],
"symlink_target": ""
} |
"""Python wrappers for reader Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import convert
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util.tf_export import tf_export
# TODO(b/64974358): Increase default buffer size to 256 MB.
_DEFAULT_READER_BUFFER_SIZE_BYTES = 256 * 1024 # 256 KB
@tf_export("data.TextLineDataset")
class TextLineDataset(dataset_ops.Dataset):
"""A `Dataset` comprising lines from one or more text files."""
def __init__(self, filenames, compression_type=None, buffer_size=None):
"""Creates a `TextLineDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
to buffer. A value of 0 results in the default buffering values chosen
based on the compression type.
"""
super(TextLineDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
def _as_variant_tensor(self):
return gen_dataset_ops.text_line_dataset(
self._filenames, self._compression_type, self._buffer_size)
def _inputs(self):
return []
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.scalar()
@property
def output_types(self):
return dtypes.string
class _TFRecordDataset(dataset_ops.Dataset):
"""A `Dataset` comprising records from one or more TFRecord files."""
def __init__(self, filenames, compression_type=None, buffer_size=None):
"""Creates a `TFRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes in the read buffer. 0 means no buffering.
"""
super(_TFRecordDataset, self).__init__()
# Force the type to string even if filenames is an empty list.
self._filenames = ops.convert_to_tensor(
filenames, dtypes.string, name="filenames")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size",
buffer_size,
argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)
def _as_variant_tensor(self):
return gen_dataset_ops.tf_record_dataset(
self._filenames, self._compression_type, self._buffer_size)
def _inputs(self):
return []
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.TensorShape([])
@property
def output_types(self):
return dtypes.string
class ParallelInterleaveDataset(dataset_ops.InterleaveDataset):
"""A `Dataset` that maps a function over its input and flattens the result."""
def __init__(self, input_dataset, map_func, cycle_length, block_length,
sloppy, buffer_output_elements, prefetch_input_elements):
"""See `tf.data.experimental.parallel_interleave()` for details."""
super(ParallelInterleaveDataset, self).__init__(input_dataset, map_func,
cycle_length, block_length)
self._sloppy = ops.convert_to_tensor(
sloppy, dtype=dtypes.bool, name="sloppy")
self._buffer_output_elements = convert.optional_param_to_tensor(
"buffer_output_elements",
buffer_output_elements,
argument_default=2 * block_length)
self._prefetch_input_elements = convert.optional_param_to_tensor(
"prefetch_input_elements",
prefetch_input_elements,
argument_default=2 * cycle_length)
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_dataset_ops.parallel_interleave_dataset(
self._input_dataset._as_variant_tensor(),
self._map_func.captured_inputs,
self._cycle_length,
self._block_length,
self._sloppy,
self._buffer_output_elements,
self._prefetch_input_elements,
f=self._map_func,
**dataset_ops.flat_structure(self))
# pylint: enable=protected-access
def _transformation_name(self):
return "tf.data.experimental.parallel_interleave()"
@tf_export("data.TFRecordDataset")
class TFRecordDataset(dataset_ops.Dataset):
"""A `Dataset` comprising records from one or more TFRecord files."""
def __init__(self, filenames, compression_type=None, buffer_size=None,
num_parallel_reads=None):
"""Creates a `TFRecordDataset` to read for one or more TFRecord files.
NOTE: The `num_parallel_reads` argument can be used to improve performance
when reading from a remote filesystem.
Args:
filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or
more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes in the read buffer. 0 means no buffering.
num_parallel_reads: (Optional.) A `tf.int64` scalar representing the
number of files to read in parallel. Defaults to reading files
sequentially.
Raises:
TypeError: If any argument does not have the expected type.
ValueError: If any argument does not have the expected shape.
"""
super(TFRecordDataset, self).__init__()
if isinstance(filenames, dataset_ops.Dataset):
if filenames.output_types != dtypes.string:
raise TypeError(
"`filenames` must be a `tf.data.Dataset` of `tf.string` elements.")
if not filenames.output_shapes.is_compatible_with(tensor_shape.scalar()):
raise ValueError(
"`filenames` must be a `tf.data.Dataset` of scalar `tf.string` "
"elements.")
else:
filenames = ops.convert_to_tensor(filenames, dtype=dtypes.string)
filenames = array_ops.reshape(filenames, [-1], name="flat_filenames")
filenames = dataset_ops.Dataset.from_tensor_slices(filenames)
self._filenames = filenames
self._compression_type = compression_type
self._buffer_size = buffer_size
self._num_parallel_reads = num_parallel_reads
def read_one_file(filename):
return _TFRecordDataset(filename, compression_type, buffer_size)
if num_parallel_reads is None:
self._impl = filenames.flat_map(read_one_file)
else:
self._impl = ParallelInterleaveDataset(
filenames, read_one_file, cycle_length=num_parallel_reads,
block_length=1, sloppy=False, buffer_output_elements=None,
prefetch_input_elements=None)
def _clone(self,
filenames=None,
compression_type=None,
buffer_size=None,
num_parallel_reads=None):
return TFRecordDataset(filenames or self._filenames,
compression_type or self._compression_type,
buffer_size or self._buffer_size,
num_parallel_reads or self._num_parallel_reads)
def _as_variant_tensor(self):
return self._impl._as_variant_tensor() # pylint: disable=protected-access
def _inputs(self):
return self._impl._inputs() # pylint: disable=protected-access
@property
def output_classes(self):
return self._impl.output_classes
@property
def output_shapes(self):
return self._impl.output_shapes
@property
def output_types(self):
return self._impl.output_types
@tf_export("data.FixedLengthRecordDataset")
class FixedLengthRecordDataset(dataset_ops.Dataset):
"""A `Dataset` of fixed-length records from one or more binary files."""
def __init__(self,
filenames,
record_bytes,
header_bytes=None,
footer_bytes=None,
buffer_size=None):
"""Creates a `FixedLengthRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
record_bytes: A `tf.int64` scalar representing the number of bytes in
each record.
header_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to skip at the start of a file.
footer_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to ignore at the end of a file.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes to buffer when reading.
"""
super(FixedLengthRecordDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._record_bytes = ops.convert_to_tensor(
record_bytes, dtype=dtypes.int64, name="record_bytes")
self._header_bytes = convert.optional_param_to_tensor(
"header_bytes", header_bytes)
self._footer_bytes = convert.optional_param_to_tensor(
"footer_bytes", footer_bytes)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
def _as_variant_tensor(self):
return gen_dataset_ops.fixed_length_record_dataset(
self._filenames, self._header_bytes, self._record_bytes,
self._footer_bytes, self._buffer_size)
def _inputs(self):
return []
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.scalar()
@property
def output_types(self):
return dtypes.string
| {
"content_hash": "2e7466fd0511962df7bcf4d176ae28ac",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 80,
"avg_line_length": 36.463667820069205,
"alnum_prop": 0.6696716644524577,
"repo_name": "dancingdan/tensorflow",
"id": "d08da6704caf8b6c3bc94b49d0fce6ecb8157a75",
"size": "11227",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/ops/readers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "339398"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49741628"
},
{
"name": "CMake",
"bytes": "195409"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254047"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867093"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58612"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41593453"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "476832"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from .memory_usage import Plugin
| {
"content_hash": "a6e46778c05931e7bb4c337687c5fc86",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 32,
"avg_line_length": 33,
"alnum_prop": 0.8181818181818182,
"repo_name": "camerongray1515/Honours-Project",
"id": "b9bc7fb566ded2abb9ebbefe6f1cd6968a7a59f0",
"size": "33",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "application/prophasis_agent/prophasis_agent/plugin_repo/memory_usage/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "503322"
},
{
"name": "HTML",
"bytes": "1877635"
},
{
"name": "JavaScript",
"bytes": "3021685"
},
{
"name": "Lua",
"bytes": "6257"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "162224"
},
{
"name": "TeX",
"bytes": "54472"
}
],
"symlink_target": ""
} |
import importlib
import inspect
import functools
from .symtab import *
ATTRIB_COMPONENT = '_pype_component'
def component(func):
"""
Marks a functions as compatible for exposing as a component in PyPE.
Parameters
----------
func: function
Function to be marked as compatible for exposing as a component in PyPE.
"""
func._attributes={ATTRIB_COMPONENT:True}
return func
def is_component(func):
"""
Checks whether the @component decorator was applied to a function..
Parameters
----------
func: function
Function needs to be checked.
"""
if hasattr(func, '_attributes'):
return func._attributes[ATTRIB_COMPONENT] == True
class LibraryImporter(object):
def __init__(self, modname=None):
self.mod = None
if modname is not None:
self.import_module(modname)
def import_module(self, modname):
self.mod = importlib.import_module(modname)
def add_symbols(self, symtab):
"""
add a symbol to symtab
it should be named name
its type should be a libraryfunction SymbolType
its ref should be the object itself (obj)
check if method was decorated like before
add a symbol like before, but with type librarymethod
(the ref should be the method, not obj)
Parameters
----------
func: function
Function needs to be checked.
"""
assert self.mod is not None, 'No module specified or loaded'
for (name,obj) in inspect.getmembers(self.mod):
if inspect.isroutine(obj) and is_component(obj):
symtab.addsym( Symbol(name, SymbolType.libraryfunction, obj) )
elif inspect.isclass(obj):
for (methodname,method) in inspect.getmembers(obj):
if inspect.isroutine(method) and is_component(method):
symtab.addsym( Symbol(methodname, SymbolType.librarymethod, method))
return symtab
| {
"content_hash": "a6e9891936d8dea93b826864f34e9ffd",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 84,
"avg_line_length": 28.757575757575758,
"alnum_prop": 0.6628029504741834,
"repo_name": "cs207-project/TimeSeries",
"id": "d3e063e2b74aea9a7cd5a7a3d20b4e74c0c41177",
"size": "1898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pype/lib_import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "351769"
},
{
"name": "C++",
"bytes": "125053"
},
{
"name": "Jupyter Notebook",
"bytes": "145413"
},
{
"name": "Python",
"bytes": "261946"
},
{
"name": "Shell",
"bytes": "1437"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
import itertools
# Makefile-fuzz-generated.am is created from this template.
MAKEFILE_FUZZ = """# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c) 2018 Intel Corporation
# All rights reserved.
if ENABLE_TCTI_FUZZING
TESTS_FUZZ = %s
%s
endif # ENABLE_TCTI_FUZZING
"""
# Each fuzz target in Makefile-fuzz-generated.am is created from this template.
MAKEFILE_FUZZ_TARGET = """
noinst_PROGRAMS += test/fuzz/%s.fuzz
test_fuzz_%s_fuzz_CFLAGS = $(FUZZ_CFLAGS)
test_fuzz_%s_fuzz_LDADD = $(FUZZLDADD)
nodist_test_fuzz_%s_fuzz_SOURCES = test/fuzz/main-sys.c \\
test/fuzz/%s.fuzz.c
DISTCLEANFILES += test/fuzz/%s.fuzz.c"""
# Common include definitions needed for fuzzing an SYS call
SYS_TEMPLATE_HEADER = """/* SPDX-License-Identifier: BSD-2-Clause */
/***********************************************************************
* Copyright (c) 2018, Intel Corporation
*
* All rights reserved.
***********************************************************************/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <inttypes.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <poll.h>
#include <stdarg.h>
#include <setjmp.h>
#include "tss2_mu.h"
#include "tss2_sys.h"
#include "tss2_tcti_device.h"
#include "tss2-tcti/tcti-common.h"
#include "tss2-tcti/tcti-device.h"
#define LOGMODULE fuzz
#include "tss2_tcti.h"
#include "util/log.h"
#include "test.h"
#include "test-options.h"
#include "context-util.h"
#include "tss2-sys/sysapi_util.h"
#include "tcti/tcti-fuzzing.h"
int
test_invoke (
TSS2_SYS_CONTEXT *sysContext)"""
# Template to call a SYS _Complete function which takes no arguments
SYS_COMPLETE_TEMPLATE_NO_ARGS = (
SYS_TEMPLATE_HEADER
+ """
{
%s (sysContext);
return EXIT_SUCCESS;
}
"""
)
# Template to call a SYS _Complete function which takes arguments
SYS_COMPLETE_TEMPLATE_HAS_ARGS = (
SYS_TEMPLATE_HEADER
+ """
{
%s
%s (
sysContext,
%s
);
return EXIT_SUCCESS;
}
"""
)
# Template to call a SYS _Prepare function
SYS_PREPARE_TEMPLATE_HAS_ARGS = (
SYS_TEMPLATE_HEADER
+ """
{
int ret;
%s
ret = fuzz_fill (
sysContext,
%d,
%s
);
if (ret) {
return ret;
}
%s (
sysContext,
%s
);
return EXIT_SUCCESS;
}
"""
)
def gen_file(function):
"""
Generate a c file used as the fuzz target given the function definition
from a header file.
"""
# Parse the function name from the function definition
function_name = (
function.split("\n")[0].replace("TSS2_RC", "").replace("(", "").strip()
)
# Parse the function arguments into an array. Do not include sysContext.
args = [
arg.strip()
for arg in function[function.index("(") + 1 : function.index(");")].split(",")
if not "TSS2_SYS_CONTEXT" in arg
]
# Prepare and Complete functions require different methods of generation.
# Call the appropriate function to generate a c target specific to that
# type of function.
if "_Complete" in function_name:
return gen_complete(function, function_name, args)
if "_Prepare" in function_name:
return gen_prepare(function, function_name, args)
raise NotImplementedError("Unknown function type %r" % (function_name,))
def gen_complete(function, function_name, args):
"""
Generate the c fuzz target for a SYS _Complete call
"""
if not args:
# Fill in the no args template. Simple case.
return function_name, SYS_COMPLETE_TEMPLATE_NO_ARGS % (function_name)
# Generate the c variable definitions.
arg_definitions = (";\n" + " " * 4).join(
[arg.replace("*", "") for arg in args]
) + ";"
# Generate the c arguments. For arguments that are pointers find replace *
# with & so that we pass a pointer to the definition which has been
# allocated on the stack.
arg_call = (",\n" + " " * 8).join(
[arg.replace("*", "&").split()[-1] for arg in args]
)
# Fill in the template
return (
function_name,
SYS_COMPLETE_TEMPLATE_HAS_ARGS % (arg_definitions, function_name, arg_call),
)
def gen_prepare(function, function_name, args):
"""
Generate the c fuzz target for a SYS _Prepare call
"""
if not args:
return function_name, None
# Generate the c variable definitions. Make sure to initialize to empty
# structs (works for initializing anything) or c compiler will complain.
arg_definitions = (" = {0};\n" + " " * 4).join(
[arg.replace("*", "").replace("const", "") for arg in args]
) + " = {0};"
# Generate the c arguments. For arguments that are pointers find replace *
# with & so that we pass a pointer to the definition which has been
# allocated on the stack.
arg_call = (",\n" + " " * 8).join(
[arg.replace("*", "&").split()[-1] for arg in args]
)
# Generate the call to fuzz_fill. The call should be the sysContext, double
# the number of arguments for the _Prepare call, and then for each _Prepare
# argument pass two to fuzz_fill, the sizeof the _Prepare argument, and a
# pointer to it.
fill_fuzz_args = (",\n" + " " * 8).join(
[
("sizeof (%s), &%s" % tuple([arg.replace("*", "").split()[-1]] * 2))
for arg in args
]
)
# Fill in the template
return (
function_name,
SYS_PREPARE_TEMPLATE_HAS_ARGS
% (arg_definitions, len(args) * 2, fill_fuzz_args, function_name, arg_call),
)
def functions_from_include(header):
"""
Parse out and yield each function definition from a header file.
"""
with open(header, "r") as header_fd:
current_function = ""
for line in header_fd:
# Functions we are interested in start with _Complete or _Prepare
if "_Complete" in line or "_Prepare" in line:
# Set the current_function to this line
current_function = line
elif current_function and ");" in line:
# When we reach the closing parenthesis yield the function
yield current_function + line.rstrip()
current_function = ""
elif current_function:
# Add all the arguments to the function
current_function += line
def gen_files(header):
# Generate a fuzz target c file from each function in the header file
for current_function in functions_from_include(header):
function_name, contents = gen_file(current_function)
# Skip the yield if there is no fuzz target that can be generated
if contents is None:
continue
# Yield the function name and the contents of its generated file
yield function_name, contents
def main():
parser = argparse.ArgumentParser(description="Generate libfuzzer for sys")
parser.add_argument(
"--header",
default="include/tss2/tss2_sys.h",
help="Header file to look in (default include/tss2/tss2_sys.h)",
)
args = parser.parse_args()
functions = dict(gen_files(args.header))
# Write the generated target to the file for its function name
for function_name, contents in functions.items():
filepath = os.path.join("test", "fuzz", function_name + ".fuzz.c")
with open(filepath, "w") as fuzzer_fd:
fuzzer_fd.write(contents)
# Fill in the Makefile-fuzz-generated.am template using the function names.
# Create a list of the compiled fuzz targets
files = " \\\n ".join(
["test/fuzz/%s.fuzz" % (function) for function in functions]
)
# Create the Makefile targets for each generated file
targets = "\n".join(
[
MAKEFILE_FUZZ_TARGET % tuple(list(itertools.chain(([function] * 6))))
for function in functions
]
)
# Write out the Makefile-fuzz-generated.am file
with open("Makefile-fuzz-generated.am", "w") as makefile_fd:
makefile_fd.write(MAKEFILE_FUZZ % (files, targets))
if __name__ == "__main__":
main()
| {
"content_hash": "79a03d18e92ee1ba95b7bfca5a91492a",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 86,
"avg_line_length": 30.645522388059703,
"alnum_prop": 0.6134177523438451,
"repo_name": "tpm2-software/tpm2-tss",
"id": "87807d30e87113d9e4adb8dc61d2ff6d2942a850",
"size": "8213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/gen_fuzz.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "7914863"
},
{
"name": "M4",
"bytes": "32808"
},
{
"name": "Makefile",
"bytes": "46138"
},
{
"name": "Python",
"bytes": "18341"
},
{
"name": "Shell",
"bytes": "42103"
}
],
"symlink_target": ""
} |
from kinds import lowercase_first_word
class Token(object):
"""
Represents the specification for a Token in the TokenSyntax file.
"""
def __init__(self, name, kind, text=None, is_keyword=False):
self.name = name
self.kind = kind
self.text = text or ""
self.is_keyword = is_keyword
def swift_kind(self):
name = lowercase_first_word(self.name)
if self.is_keyword:
return name + 'Keyword'
return name
class Keyword(Token):
"""
Represents a keyword token.
"""
def __init__(self, name, text):
Token.__init__(self, name, 'kw_' + text, text=text, is_keyword=True)
SYNTAX_TOKENS = [
Keyword('Associatedtype', 'associatedtype'),
Keyword('Class', 'class'),
Keyword('Deinit', 'deinit'),
Keyword('Enum', 'enum'),
Keyword('Extension', 'extension'),
Keyword('Func', 'func'),
Keyword('Import', 'import'),
Keyword('Init', 'init'),
Keyword('Inout', 'inout'),
Keyword('Let', 'let'),
Keyword('Operator', 'operator'),
Keyword('Precedencegroup', 'precedencegroup'),
Keyword('Protocol', 'protocol'),
Keyword('Struct', 'struct'),
Keyword('Subscript', 'subscript'),
Keyword('Typealias', 'typealias'),
Keyword('Var', 'var'),
Keyword('Fileprivate', 'fileprivate'),
Keyword('Internal', 'internal'),
Keyword('Private', 'private'),
Keyword('Public', 'public'),
Keyword('Static', 'static'),
Keyword('Defer', 'defer'),
Keyword('If', 'if'),
Keyword('Guard', 'guard'),
Keyword('Do', 'do'),
Keyword('Repeat', 'repeat'),
Keyword('Else', 'else'),
Keyword('For', 'for'),
Keyword('In', 'in'),
Keyword('While', 'while'),
Keyword('Return', 'return'),
Keyword('Break', 'break'),
Keyword('Continue', 'continue'),
Keyword('Fallthrough', 'fallthrough'),
Keyword('Switch', 'switch'),
Keyword('Case', 'case'),
Keyword('Default', 'default'),
Keyword('Where', 'where'),
Keyword('Catch', 'catch'),
Keyword('As', 'as'),
Keyword('Any', 'Any'),
Keyword('False', 'false'),
Keyword('Is', 'is'),
Keyword('Nil', 'nil'),
Keyword('Rethrows', 'rethrows'),
Keyword('Super', 'super'),
Keyword('Self', 'self'),
Keyword('CapitalSelf', 'Self'),
Keyword('Throw', 'throw'),
Keyword('True', 'true'),
Keyword('Try', 'try'),
Keyword('Throws', 'throws'),
Keyword('__FILE__', '__FILE__'),
Keyword('__LINE__', '__LINE__'),
Keyword('__COLUMN__', '__COLUMN__'),
Keyword('__FUNCTION__', '__FUNCTION__'),
Keyword('__DSO_HANDLE__', '__DSO_HANDLE__'),
Keyword('Wildcard', '_'),
Token('PoundAvailable', 'pound_available', text='#available',
is_keyword=True),
Token('PoundEndif', 'pound_endif', text='#endif',
is_keyword=True),
Token('PoundElse', 'pound_else', text='#else',
is_keyword=True),
Token('PoundElseif', 'pound_elseif', text='#elseif',
is_keyword=True),
Token('PoundIf', 'pound_if', text='#if',
is_keyword=True),
Token('PoundSourceLocation', 'pound_sourceLocation',
text='#sourceLocation', is_keyword=True),
Token('PoundFile', 'pound_file', text='#file',
is_keyword=True),
Token('PoundLine', 'pound_line', text='#line',
is_keyword=True),
Token('PoundColumn', 'pound_column', text='#column',
is_keyword=True),
Token('PoundFunction', 'pound_function', text='#function',
is_keyword=True),
Token('Arrow', 'arrow', text='->'),
Token('AtSign', 'at_sign', text='@'),
Token('Colon', 'colon', text=':'),
Token('Semicolon', 'semi', text=';'),
Token('Comma', 'comma', text=','),
Token('Period', 'period', text='.'),
Token('Equal', 'equal', text='='),
Token('PrefixPeriod', 'period_prefix', text='.'),
Token('LeftParen', 'l_paren', text='('),
Token('RightParen', 'r_paren', text=')'),
Token('LeftBrace', 'l_brace', text='{'),
Token('RightBrace', 'r_brace', text='}'),
Token('LeftSquareBracket', 'l_square', text='['),
Token('RightSquareBracket', 'r_square', text=']'),
Token('LeftAngle', 'l_angle', text='<'),
Token('RightAngle', 'r_angle', text='>'),
Token('PrefixAmpersand', 'amp_prefix', text='&'),
Token('PostfixQuestionMark', 'question_postfix', text='?'),
Token('InfixQuestionMark', 'question_infix', text='?'),
Token('ExclamationMark', 'exclaim_postfix', text='!'),
Token('Identifier', 'identifier'),
Token('DollarIdentifier', 'dollarident'),
Token('UnspacedBinaryOperator', 'oper_binary_unspaced'),
Token('SpacedBinaryOperator', 'oper_binary_spaced'),
Token('PrefixOperator', 'oper_prefix'),
Token('PostfixOperator', 'oper_postfix'),
Token('IntegerLiteral', 'integer_literal'),
Token('FloatingLiteral', 'floating_literal'),
Token('StringLiteral', 'string_literal'),
Token('StringInterpolationAnchor', 'string_interpolation_anchor'),
Token('ContextualKeyword', 'contextual_keyword'),
]
SYNTAX_TOKEN_MAP = {token.name + 'Token': token for token in SYNTAX_TOKENS}
| {
"content_hash": "6dc1168e3a42e3083c2bed0e86d832de",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 76,
"avg_line_length": 35.479166666666664,
"alnum_prop": 0.5866118614210217,
"repo_name": "frootloops/swift",
"id": "83411eca234e632efe7491dac12f200040a456f0",
"size": "5109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/gyb_syntax_support/Token.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "34"
},
{
"name": "C",
"bytes": "71563"
},
{
"name": "C++",
"bytes": "26067180"
},
{
"name": "CMake",
"bytes": "386418"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2438"
},
{
"name": "Emacs Lisp",
"bytes": "57055"
},
{
"name": "LLVM",
"bytes": "62046"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "333187"
},
{
"name": "Objective-C++",
"bytes": "200829"
},
{
"name": "Perl",
"bytes": "2211"
},
{
"name": "Python",
"bytes": "1018108"
},
{
"name": "Ruby",
"bytes": "2091"
},
{
"name": "Shell",
"bytes": "198717"
},
{
"name": "Swift",
"bytes": "21669370"
},
{
"name": "Vim script",
"bytes": "15610"
}
],
"symlink_target": ""
} |
import sys
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("bedfile", help="File with allowable regions.")
ap.add_argument("countsFile", help="Original counts file.")
ap.add_argument("countsOut", help="Filtered counts file.")
args = ap.parse_args()
bedFile = open(args.bedfile)
queries = { "/".join(line.split()[0:3]) : True for line in bedFile }
countsFile = open(args.countsFile)
countsOut = open(args.countsOut, 'w')
for line in countsFile:
vals = line.split()
if (vals[0] in queries):
countsOut.write(line)
| {
"content_hash": "13d87041d156ba048dd6ac814b7701e8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 69,
"avg_line_length": 28.789473684210527,
"alnum_prop": 0.7020109689213894,
"repo_name": "yunlongliukm/chm1_scripts",
"id": "b51ddb51242e436d4f487214adf19f3031d6e72b",
"size": "570",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Genotyping/FilterCounts.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "50662"
},
{
"name": "Java",
"bytes": "488"
},
{
"name": "Makefile",
"bytes": "15037"
},
{
"name": "Python",
"bytes": "375549"
},
{
"name": "R",
"bytes": "50744"
},
{
"name": "Shell",
"bytes": "22590"
}
],
"symlink_target": ""
} |
"""Validate device conditions."""
from __future__ import annotations
from typing import TYPE_CHECKING, Protocol, cast
import voluptuous as vol
from homeassistant.const import CONF_DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import ConfigType
from . import DeviceAutomationType, async_get_device_automation_platform
from .exceptions import InvalidDeviceAutomationConfig
if TYPE_CHECKING:
from homeassistant.helpers import condition
class DeviceAutomationConditionProtocol(Protocol):
"""Define the format of device_condition modules.
Each module must define either CONDITION_SCHEMA or async_validate_condition_config.
"""
CONDITION_SCHEMA: vol.Schema
async def async_validate_condition_config(
self, hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate config."""
raise NotImplementedError
def async_condition_from_config(
self, hass: HomeAssistant, config: ConfigType
) -> condition.ConditionCheckerType:
"""Evaluate state based on configuration."""
raise NotImplementedError
async def async_validate_condition_config(
hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate device condition config."""
try:
config = cv.DEVICE_CONDITION_SCHEMA(config)
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], DeviceAutomationType.CONDITION
)
if hasattr(platform, "async_validate_condition_config"):
return await platform.async_validate_condition_config(hass, config)
return cast(ConfigType, platform.CONDITION_SCHEMA(config))
except InvalidDeviceAutomationConfig as err:
raise vol.Invalid(str(err) or "Invalid condition configuration") from err
async def async_condition_from_config(
hass: HomeAssistant, config: ConfigType
) -> condition.ConditionCheckerType:
"""Test a device condition."""
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], DeviceAutomationType.CONDITION
)
return platform.async_condition_from_config(hass, config)
| {
"content_hash": "efca19fff24b7147ed07d678134316ba",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 87,
"avg_line_length": 34.890625,
"alnum_prop": 0.7375727720555306,
"repo_name": "rohitranjan1991/home-assistant",
"id": "1c226ee8c29296e7a9ee247a50d2a725e31a7bfd",
"size": "2233",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/device_automation/condition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
import re
import math
from .base import units
class Length(object):
def __init__(self, content, mode='x', parent=None):
if not content:
self._unit = None
self._value = 0
self._computed_value = 0
return
re_number = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?'
re_unit= r'em|ex|px|in|cm|mm|pt|pc|%'
re_length = r'(?P<value>%s)\s*(?P<unit>%s)*' % (re_number, re_unit)
match = re.match(re_length, content)
if match:
self._value = float(match.group("value"))
self._unit = match.group("unit") or "px"
else:
self._value = 0.0
self._unit = None
scale = 1
if self._unit == '%':
if not parent:
print("No parent for computing length using percent")
elif hasattr(parent, 'viewport'):
w, h = parent.viewport
if mode == 'x':
scale = w
elif mode == 'y':
scale = h
elif mode == 'xy':
scale = math.sqrt(w*w+h*h)/math.sqrt(2.0)
else:
print("Parent doesn't have a viewport")
self._computed_value = self._value * units[self._unit] * scale
def __float__(self):
return self._computed_value
@property
def value(self):
return self._computed_value
def __repr__(self):
if self._unit:
return "%g%s" % (self._value, self._unit)
else:
return "%g" % (self._value)
class XLength(Length):
def __init__(self, content, parent=None):
Length.__init__(self, content, 'x', parent)
class YLength(Length):
def __init__(self, content, parent=None):
Length.__init__(self, content, 'y', parent)
class XYLength(Length):
def __init__(self, content, parent=None):
Length.__init__(self, content, 'xy', parent)
| {
"content_hash": "b2867463c2104223662622f20833f01f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 75,
"avg_line_length": 28.434782608695652,
"alnum_prop": 0.48827726809378186,
"repo_name": "glumpy/glumpy",
"id": "5909ea0fe89a0e855352e39462a7130f464168cc",
"size": "2232",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "glumpy/graphics/svg/length.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "26075"
},
{
"name": "Cython",
"bytes": "660"
},
{
"name": "GLSL",
"bytes": "177965"
},
{
"name": "Makefile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "1320773"
}
],
"symlink_target": ""
} |
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerSession](https://docs.talon.one/integration-api/#operation/updateCustomerSessionV2) endpoint is `https://mycompany.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class CampaignCollectionWithoutPayload(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'created': 'datetime',
'account_id': 'int',
'modified': 'datetime',
'description': 'str',
'name': 'str',
'modified_by': 'int',
'created_by': 'int',
'application_id': 'int',
'campaign_id': 'int'
}
attribute_map = {
'id': 'id',
'created': 'created',
'account_id': 'accountId',
'modified': 'modified',
'description': 'description',
'name': 'name',
'modified_by': 'modifiedBy',
'created_by': 'createdBy',
'application_id': 'applicationId',
'campaign_id': 'campaignId'
}
def __init__(self, id=None, created=None, account_id=None, modified=None, description=None, name=None, modified_by=None, created_by=None, application_id=None, campaign_id=None, local_vars_configuration=None): # noqa: E501
"""CampaignCollectionWithoutPayload - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._created = None
self._account_id = None
self._modified = None
self._description = None
self._name = None
self._modified_by = None
self._created_by = None
self._application_id = None
self._campaign_id = None
self.discriminator = None
self.id = id
self.created = created
self.account_id = account_id
self.modified = modified
if description is not None:
self.description = description
self.name = name
if modified_by is not None:
self.modified_by = modified_by
self.created_by = created_by
if application_id is not None:
self.application_id = application_id
if campaign_id is not None:
self.campaign_id = campaign_id
@property
def id(self):
"""Gets the id of this CampaignCollectionWithoutPayload. # noqa: E501
Unique ID for this entity. Not to be confused with the Integration ID, which is set by your integration layer and used in most endpoints. # noqa: E501
:return: The id of this CampaignCollectionWithoutPayload. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CampaignCollectionWithoutPayload.
Unique ID for this entity. Not to be confused with the Integration ID, which is set by your integration layer and used in most endpoints. # noqa: E501
:param id: The id of this CampaignCollectionWithoutPayload. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def created(self):
"""Gets the created of this CampaignCollectionWithoutPayload. # noqa: E501
The exact moment this entity was created. # noqa: E501
:return: The created of this CampaignCollectionWithoutPayload. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this CampaignCollectionWithoutPayload.
The exact moment this entity was created. # noqa: E501
:param created: The created of this CampaignCollectionWithoutPayload. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created is None: # noqa: E501
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
self._created = created
@property
def account_id(self):
"""Gets the account_id of this CampaignCollectionWithoutPayload. # noqa: E501
The ID of the account that owns this entity. # noqa: E501
:return: The account_id of this CampaignCollectionWithoutPayload. # noqa: E501
:rtype: int
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this CampaignCollectionWithoutPayload.
The ID of the account that owns this entity. # noqa: E501
:param account_id: The account_id of this CampaignCollectionWithoutPayload. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and account_id is None: # noqa: E501
raise ValueError("Invalid value for `account_id`, must not be `None`") # noqa: E501
self._account_id = account_id
@property
def modified(self):
"""Gets the modified of this CampaignCollectionWithoutPayload. # noqa: E501
The exact moment this entity was last modified. # noqa: E501
:return: The modified of this CampaignCollectionWithoutPayload. # noqa: E501
:rtype: datetime
"""
return self._modified
@modified.setter
def modified(self, modified):
"""Sets the modified of this CampaignCollectionWithoutPayload.
The exact moment this entity was last modified. # noqa: E501
:param modified: The modified of this CampaignCollectionWithoutPayload. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and modified is None: # noqa: E501
raise ValueError("Invalid value for `modified`, must not be `None`") # noqa: E501
self._modified = modified
@property
def description(self):
"""Gets the description of this CampaignCollectionWithoutPayload. # noqa: E501
A short description of the purpose of this collection. # noqa: E501
:return: The description of this CampaignCollectionWithoutPayload. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CampaignCollectionWithoutPayload.
A short description of the purpose of this collection. # noqa: E501
:param description: The description of this CampaignCollectionWithoutPayload. # noqa: E501
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this CampaignCollectionWithoutPayload. # noqa: E501
The name of this collection. # noqa: E501
:return: The name of this CampaignCollectionWithoutPayload. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CampaignCollectionWithoutPayload.
The name of this collection. # noqa: E501
:param name: The name of this CampaignCollectionWithoutPayload. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and not re.search(r'^[^[:cntrl:]\s][^[:cntrl:]]*$', name)): # noqa: E501
raise ValueError(r"Invalid value for `name`, must be a follow pattern or equal to `/^[^[:cntrl:]\s][^[:cntrl:]]*$/`") # noqa: E501
self._name = name
@property
def modified_by(self):
"""Gets the modified_by of this CampaignCollectionWithoutPayload. # noqa: E501
ID of the user who last updated this effect if available. # noqa: E501
:return: The modified_by of this CampaignCollectionWithoutPayload. # noqa: E501
:rtype: int
"""
return self._modified_by
@modified_by.setter
def modified_by(self, modified_by):
"""Sets the modified_by of this CampaignCollectionWithoutPayload.
ID of the user who last updated this effect if available. # noqa: E501
:param modified_by: The modified_by of this CampaignCollectionWithoutPayload. # noqa: E501
:type: int
"""
self._modified_by = modified_by
@property
def created_by(self):
"""Gets the created_by of this CampaignCollectionWithoutPayload. # noqa: E501
ID of the user who created this effect. # noqa: E501
:return: The created_by of this CampaignCollectionWithoutPayload. # noqa: E501
:rtype: int
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this CampaignCollectionWithoutPayload.
ID of the user who created this effect. # noqa: E501
:param created_by: The created_by of this CampaignCollectionWithoutPayload. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and created_by is None: # noqa: E501
raise ValueError("Invalid value for `created_by`, must not be `None`") # noqa: E501
self._created_by = created_by
@property
def application_id(self):
"""Gets the application_id of this CampaignCollectionWithoutPayload. # noqa: E501
The ID of the Application that owns this entity. # noqa: E501
:return: The application_id of this CampaignCollectionWithoutPayload. # noqa: E501
:rtype: int
"""
return self._application_id
@application_id.setter
def application_id(self, application_id):
"""Sets the application_id of this CampaignCollectionWithoutPayload.
The ID of the Application that owns this entity. # noqa: E501
:param application_id: The application_id of this CampaignCollectionWithoutPayload. # noqa: E501
:type: int
"""
self._application_id = application_id
@property
def campaign_id(self):
"""Gets the campaign_id of this CampaignCollectionWithoutPayload. # noqa: E501
The ID of the campaign that owns this entity. # noqa: E501
:return: The campaign_id of this CampaignCollectionWithoutPayload. # noqa: E501
:rtype: int
"""
return self._campaign_id
@campaign_id.setter
def campaign_id(self, campaign_id):
"""Sets the campaign_id of this CampaignCollectionWithoutPayload.
The ID of the campaign that owns this entity. # noqa: E501
:param campaign_id: The campaign_id of this CampaignCollectionWithoutPayload. # noqa: E501
:type: int
"""
self._campaign_id = campaign_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CampaignCollectionWithoutPayload):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CampaignCollectionWithoutPayload):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "2116579e6fc7e8fc90f446c852f3bd3b",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 732,
"avg_line_length": 36.3125,
"alnum_prop": 0.6252151462994836,
"repo_name": "talon-one/talon_one.py",
"id": "72b79c649e987b1df89239f7ffa606c9f21fc7b5",
"size": "13961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talon_one/models/campaign_collection_without_payload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "5139586"
},
{
"name": "Shell",
"bytes": "1826"
}
],
"symlink_target": ""
} |
"""
Author: Junhong Chen
"""
import os
import sys
import argparse
import re
#-------------------- Arguments --------------------
#argParser = argparse.ArgumentParser( description="Create a degenerate FASTA from original FASTA and GTF")
#argParser.add_argument('--fasta', help="Reference sequence (FASTA format)")
#argParser.add_argument('--gtf', help="Reference annotation (GTF / GFFv2 format)")
#argParser.add_argument('--output', help="Output reference")
degenHash = { 'TTT' : 'TTY', 'TTC' : 'TTY', 'TTA' : 'TTR', 'TTG' : 'TTR',
'CTT' : 'CTN', 'CTC' : 'CTN', 'CTA' : 'CTN', 'CTG' : 'CTN',
'ATT' : 'ATH', 'ATC' : 'ATH', 'ATA' : 'ATH', 'ATG' : 'ATG',
'GTT' : 'GTN', 'GTC' : 'GTN', 'GTA' : 'GTN', 'GTG' : 'GTN',
'TCT' : 'TCN', 'TCC' : 'TCN', 'TCA' : 'TCN', 'TCG' : 'TCN',
'CCT' : 'CCN', 'CCC' : 'CCN', 'CCA' : 'CCN', 'CCG' : 'CCN',
'ACT' : 'ACN', 'ACC' : 'ACN', 'ACA' : 'ACN', 'ACG' : 'ACN',
'GCT' : 'GCN', 'GCC' : 'GCN', 'GCA' : 'GCN', 'GCG' : 'GCN',
'TAT' : 'TAY', 'TAC' : 'TAY', 'TAA' : 'TAR', 'TAG' : 'TAR',
'CAT' : 'CAY', 'CAC' : 'CAY', 'CAA' : 'CAR', 'CAG' : 'CAR',
'AAT' : 'AAY', 'AAC' : 'AAY', 'AAA' : 'AAR', 'AAG' : 'AAR',
'GAT' : 'GAY', 'GAC' : 'GAY', 'GAA' : 'GAR', 'GAG' : 'GAR',
'TGT' : 'TGY', 'TGC' : 'TGY', 'TGA' : 'TGA', 'TGG' : 'TGG',
'CGT' : 'CGN', 'CGC' : 'CGN', 'CGA' : 'CGN', 'CGG' : 'CGN',
'AGT' : 'AGY', 'AGC' : 'AGY', 'AGA' : 'AGR', 'AGG' : 'AGR',
'GGT' : 'GGN', 'GGC' : 'GGN', 'GGA' : 'GGN', 'GGG' : 'GGN',
}
iupacHash = { 'A' : 'A', 'C' : 'C', 'G' : 'G', 'T' : 'T',
'R' : 'AG', 'Y' : 'CT', 'S' : 'CG', 'W' : 'AT',
'K' : 'GT', 'M' : 'AC', 'B' : 'CGT', 'D' : 'AGT',
'H' : 'ACT', 'V' : 'ACG', 'N' : 'ACGT',
}
complementHash = { 'A' : 'T', 'C' : 'G', 'G' : 'C', 'T' : 'A',
'M' : 'K', 'R' : 'Y', 'W' : 'W', 'S' : 'S',
'Y' : 'R', 'K' : 'M', 'V' : 'B', 'H' : 'D',
'D' : 'H', 'B' : 'V', 'N' : 'N' }
iupacHashRev = dict (zip(iupacHash.values(), iupacHash.keys()))
class FastaFramework:
"""FASTA Framework for I/O and Parsing"""
# Parse all headers and corresponding sequences from a FASTA file
def __init__(self, passFileName):
self.valid = False
self.contigs = []
self.selected = 0
self.fileName = passFileName
try:
inFile = open( self.fileName, 'r' )
except:
print("Unable to open {0}".format(self.fileName))
return
# Iterate through FASTA file
currentContigs = []
for currentLine in inFile:
if currentLine.startswith('>'):
# Header - close out previous sequence, start new
if currentContigs:
self.contigs[-1]['seq'] = ''.join(currentContigs)
self.contigs[-1]['dmask'] = bytearray(len(self.contigs[-1]['seq']))
self.contigs.append({'header':'', 'seq':'', 'dmask':''})
self.contigs[-1]['header'] = currentLine.rstrip()
else:
# Sequence data
currentContigs.append(currentLine.rstrip())
# Close out final sequence and file, mark active
if currentContigs:
self.contigs[-1]['seq'] = ''.join(currentContigs)
self.contigs[-1]['dmask'] = bytearray(len(self.contigs[-1]['seq']))
inFile.close()
self.valid = True
# Get number of sequences contigs
def getCount(self):
return len(self.contigs)
# Get sequence ID/description header for selected contig
def getHeader(self):
return self.contigs[self.selected]['header'][1:]
# Get sequence for currently selected contig
def getSequence(self):
return self.contigs[self.selected]['seq']
def checkComplement(self, passSequence, passStep):
# Only complement reverse direction
if passStep == 1: return passSequence
retSequence = ""
for nucleotide in passSequence:
retSequence += complementHash[nucleotide]
return retSequence
# Given two IUPAC codes, refine to the most ambiguous code that satisfies both constraints
def refineDBase(self, passNew, passExist):
# Check for existing mask values - if none present, use new nucleotide
if not passExist:
return ord(passNew)
# Expand possible nucleotides of each IUPAC code
newPossible = set(iupacHash[passNew])
existPossible = set(iupacHash[chr(passExist)])
# Refine ambiguity by calculating intersection
refinePossible = newPossible.intersection(existPossible)
# Sort lexicographically, then match back to IUPAC code
refineNuc = ''.join(sorted(refinePossible))
return ord(iupacHashRev[refineNuc])
# Given a CDS start and stop index, and direction, calculate the degenerate codon for each read frame
def calculateDMask(self, passStart, passEnd, passStrand, passFrame):
# Generate iteration indices by strand direction
if passStrand == '+':
seqRange, step = range(passStart + passFrame, passEnd, 3), 1
else:
seqRange, step = range(passEnd - 1 - passFrame, passStart - 1, -3), -1
# Iterate through each codon in the coding sequence
for seqIdx in seqRange:
# Obtain codon (complement if necessary)
codon = self.contigs[self.selected]['seq'][seqIdx:seqIdx + 3 * step:step]
codon = self.checkComplement(codon, step)
# Calculate degenerate codon (complement if necessary
if codon not in degenHash.keys(): continue
dCodon = degenHash[codon]
dCodon = self.checkComplement(dCodon, step)
# Iterate through each nucleotide in the codon
for baseIdx in range(0, 3):
# Calculate refined nucleotide
dNucleotide = self.refineDBase(dCodon[baseIdx], self.contigs[self.selected]['dmask'][seqIdx + (baseIdx * step)])
# Update Mask
self.contigs[self.selected]['dmask'][seqIdx + (baseIdx * step)] = dNucleotide
# Take the current degenerate mask, and apply to the nucleotide sequence where a value exists
def applyDMask(self):
for seqIdx in range(0, len(self.contigs[self.selected]['seq'])):
if not self.contigs[self.selected]['dmask'][seqIdx]:
self.contigs[self.selected]['dmask'][seqIdx] = ord(self.contigs[self.selected]['seq'][seqIdx])
self.contigs[self.selected]['seq'] = self.contigs[self.selected]['dmask'].decode("ascii")
# Parse GTF line and return dictionary with relevant information
def getRowData(passLine):
gtfColumns = ('seqname', 'source', 'feature', 'first', 'last', 'score', 'strand', 'frame', 'attribute')
# Parse line
gtfMatch = re.match('^(.*?)\\t(.*?)\\t(.*?)\\t(.*?)\\t(.*?)\\t(.*?)\\t(.*?)\\t(.*?)\\t(.*?)$', passLine)
# Check for comment
if not gtfMatch: return
# Load into dictionary
gtfDictionary = dict(zip(gtfColumns, gtfMatch.groups()))
# Fixup default frames
if gtfDictionary['frame'] == '.': gtfDictionary['frame'] = 0
return gtfDictionary
# Write output for each contig
def writeOutput(passFASTA, passFile):
outputHandle = open(passFile, 'wb')
for contigIdx in range(0, passFASTA.getCount()):
passFASTA.selected = contigIdx
# Write header
outputHandle.write(bytes(">{0}\n".format(passFASTA.getHeader()), "ascii"))
# Write output sequence
sequenceBytes = bytes(passFASTA.getSequence(), "ascii")
for sequenceIdx in range(0, len(sequenceBytes), 70):
outputHandle.write(sequenceBytes[sequenceIdx:sequenceIdx+70] + bytes('\n', "ascii"))
outputHandle.close()
#--------------------------- Main --------------------------------------
def degenAll(fasta,gtf,output):
# Load FASTA reference
fastaRef = FastaFramework(fasta)
if not fastaRef.valid: exit()
# Open GTF for reading
gtfHandle = open(gtf, 'r')
# Iterate through all GTF entries
for currentLine in gtfHandle:
# Retrieve data for current GTF row
currentData = getRowData(currentLine)
# Skip until encountering a CDS
if not currentData: continue
if currentData['feature'] != 'CDS': continue;
# Calculate the degenerate mask for the current CDS
fastaRef.calculateDMask(int(currentData['first'])-1, int(currentData['last']), currentData['strand'], int(currentData['frame']))
# Close GTF file
gtfHandle.close()
# Apply the degenerate mask
fastaRef.applyDMask()
# Write output
writeOutput(fastaRef, output)
if __name__ == "__main__":
# Set the directory you want to start from
rootDir = sys.argv[1]
for dirName, subdirList, fileList in os.walk(rootDir):
print('Found directory: %s' % dirName)
for fname in fileList:
if fname.endswith("fna"):
gname = fname.split(".")[0]
fasta = dirName+"/"+fname
gtf = dirName+"/"+gname +".gff"
output = dirName+"/"+gname+".fd"
degenAll(fasta,gtf,output)
| {
"content_hash": "f2c8199a1dfc19fa368b807b87ce85b0",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 136,
"avg_line_length": 40.703252032520325,
"alnum_prop": 0.5234195545790472,
"repo_name": "macmanes-lab/MCBS913",
"id": "73ca0c84c0652de07fa8eb5799b77dee31f6d01f",
"size": "10081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/Junhong Chen/degenerateAllFastaInOne.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "548191"
},
{
"name": "C++",
"bytes": "18737"
},
{
"name": "CSS",
"bytes": "17714"
},
{
"name": "Groff",
"bytes": "26156"
},
{
"name": "HTML",
"bytes": "293511"
},
{
"name": "JavaScript",
"bytes": "214635"
},
{
"name": "Makefile",
"bytes": "3162"
},
{
"name": "Perl",
"bytes": "12751"
},
{
"name": "Python",
"bytes": "159654"
},
{
"name": "Shell",
"bytes": "10964"
},
{
"name": "TeX",
"bytes": "49312"
}
],
"symlink_target": ""
} |
import pytest
from spacy.tokens import Doc, Span, DocBin
from spacy.training import Example
from spacy.training.converters.conllu_to_docs import conllu_to_docs
from spacy.lang.en import English
from spacy.kb import KnowledgeBase
from spacy.vocab import Vocab
from spacy.language import Language
from spacy.util import ensure_path, load_model_from_path
import numpy
import pickle
from ..util import make_tempdir
def test_issue4528(en_vocab):
"""Test that user_data is correctly serialized in DocBin."""
doc = Doc(en_vocab, words=["hello", "world"])
doc.user_data["foo"] = "bar"
# This is how extension attribute values are stored in the user data
doc.user_data[("._.", "foo", None, None)] = "bar"
doc_bin = DocBin(store_user_data=True)
doc_bin.add(doc)
doc_bin_bytes = doc_bin.to_bytes()
new_doc_bin = DocBin(store_user_data=True).from_bytes(doc_bin_bytes)
new_doc = list(new_doc_bin.get_docs(en_vocab))[0]
assert new_doc.user_data["foo"] == "bar"
assert new_doc.user_data[("._.", "foo", None, None)] == "bar"
@pytest.mark.parametrize(
"text,words", [("A'B C", ["A", "'", "B", "C"]), ("A-B", ["A-B"])]
)
def test_gold_misaligned(en_tokenizer, text, words):
doc = en_tokenizer(text)
Example.from_dict(doc, {"words": words})
def test_issue4651_with_phrase_matcher_attr():
"""Test that the EntityRuler PhraseMatcher is deserialized correctly using
the method from_disk when the EntityRuler argument phrase_matcher_attr is
specified.
"""
text = "Spacy is a python library for nlp"
nlp = English()
patterns = [{"label": "PYTHON_LIB", "pattern": "spacy", "id": "spaCy"}]
ruler = nlp.add_pipe("entity_ruler", config={"phrase_matcher_attr": "LOWER"})
ruler.add_patterns(patterns)
doc = nlp(text)
res = [(ent.text, ent.label_, ent.ent_id_) for ent in doc.ents]
nlp_reloaded = English()
with make_tempdir() as d:
file_path = d / "entityruler"
ruler.to_disk(file_path)
nlp_reloaded.add_pipe("entity_ruler").from_disk(file_path)
doc_reloaded = nlp_reloaded(text)
res_reloaded = [(ent.text, ent.label_, ent.ent_id_) for ent in doc_reloaded.ents]
assert res == res_reloaded
def test_issue4651_without_phrase_matcher_attr():
"""Test that the EntityRuler PhraseMatcher is deserialized correctly using
the method from_disk when the EntityRuler argument phrase_matcher_attr is
not specified.
"""
text = "Spacy is a python library for nlp"
nlp = English()
patterns = [{"label": "PYTHON_LIB", "pattern": "spacy", "id": "spaCy"}]
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
doc = nlp(text)
res = [(ent.text, ent.label_, ent.ent_id_) for ent in doc.ents]
nlp_reloaded = English()
with make_tempdir() as d:
file_path = d / "entityruler"
ruler.to_disk(file_path)
nlp_reloaded.add_pipe("entity_ruler").from_disk(file_path)
doc_reloaded = nlp_reloaded(text)
res_reloaded = [(ent.text, ent.label_, ent.ent_id_) for ent in doc_reloaded.ents]
assert res == res_reloaded
def test_issue4665():
"""
conllu_to_docs should not raise an exception if the HEAD column contains an
underscore
"""
input_data = """
1 [ _ PUNCT -LRB- _ _ punct _ _
2 This _ DET DT _ _ det _ _
3 killing _ NOUN NN _ _ nsubj _ _
4 of _ ADP IN _ _ case _ _
5 a _ DET DT _ _ det _ _
6 respected _ ADJ JJ _ _ amod _ _
7 cleric _ NOUN NN _ _ nmod _ _
8 will _ AUX MD _ _ aux _ _
9 be _ AUX VB _ _ aux _ _
10 causing _ VERB VBG _ _ root _ _
11 us _ PRON PRP _ _ iobj _ _
12 trouble _ NOUN NN _ _ dobj _ _
13 for _ ADP IN _ _ case _ _
14 years _ NOUN NNS _ _ nmod _ _
15 to _ PART TO _ _ mark _ _
16 come _ VERB VB _ _ acl _ _
17 . _ PUNCT . _ _ punct _ _
18 ] _ PUNCT -RRB- _ _ punct _ _
"""
conllu_to_docs(input_data)
def test_issue4674():
"""Test that setting entities with overlapping identifiers does not mess up IO"""
nlp = English()
kb = KnowledgeBase(nlp.vocab, entity_vector_length=3)
vector1 = [0.9, 1.1, 1.01]
vector2 = [1.8, 2.25, 2.01]
with pytest.warns(UserWarning):
kb.set_entities(
entity_list=["Q1", "Q1"],
freq_list=[32, 111],
vector_list=[vector1, vector2],
)
assert kb.get_size_entities() == 1
# dumping to file & loading back in
with make_tempdir() as d:
dir_path = ensure_path(d)
if not dir_path.exists():
dir_path.mkdir()
file_path = dir_path / "kb"
kb.to_disk(str(file_path))
kb2 = KnowledgeBase(nlp.vocab, entity_vector_length=3)
kb2.from_disk(str(file_path))
assert kb2.get_size_entities() == 1
@pytest.mark.skip(reason="API change: disable just disables, new exclude arg")
def test_issue4707():
"""Tests that disabled component names are also excluded from nlp.from_disk
by default when loading a model.
"""
nlp = English()
nlp.add_pipe("sentencizer")
nlp.add_pipe("entity_ruler")
assert nlp.pipe_names == ["sentencizer", "entity_ruler"]
exclude = ["tokenizer", "sentencizer"]
with make_tempdir() as tmpdir:
nlp.to_disk(tmpdir, exclude=exclude)
new_nlp = load_model_from_path(tmpdir, disable=exclude)
assert "sentencizer" not in new_nlp.pipe_names
assert "entity_ruler" in new_nlp.pipe_names
def test_issue4725_1():
""" Ensure the pickling of the NER goes well"""
vocab = Vocab(vectors_name="test_vocab_add_vector")
nlp = English(vocab=vocab)
config = {
"update_with_oracle_cut_size": 111,
}
ner = nlp.create_pipe("ner", config=config)
with make_tempdir() as tmp_path:
with (tmp_path / "ner.pkl").open("wb") as file_:
pickle.dump(ner, file_)
assert ner.cfg["update_with_oracle_cut_size"] == 111
with (tmp_path / "ner.pkl").open("rb") as file_:
ner2 = pickle.load(file_)
assert ner2.cfg["update_with_oracle_cut_size"] == 111
def test_issue4725_2():
# ensures that this runs correctly and doesn't hang or crash because of the global vectors
# if it does crash, it's usually because of calling 'spawn' for multiprocessing (e.g. on Windows),
# or because of issues with pickling the NER (cf test_issue4725_1)
vocab = Vocab(vectors_name="test_vocab_add_vector")
data = numpy.ndarray((5, 3), dtype="f")
data[0] = 1.0
data[1] = 2.0
vocab.set_vector("cat", data[0])
vocab.set_vector("dog", data[1])
nlp = English(vocab=vocab)
nlp.add_pipe("ner")
nlp.initialize()
docs = ["Kurt is in London."] * 10
for _ in nlp.pipe(docs, batch_size=2, n_process=2):
pass
def test_issue4849():
nlp = English()
patterns = [
{"label": "PERSON", "pattern": "joe biden", "id": "joe-biden"},
{"label": "PERSON", "pattern": "bernie sanders", "id": "bernie-sanders"},
]
ruler = nlp.add_pipe("entity_ruler", config={"phrase_matcher_attr": "LOWER"})
ruler.add_patterns(patterns)
text = """
The left is starting to take aim at Democratic front-runner Joe Biden.
Sen. Bernie Sanders joined in her criticism: "There is no 'middle ground' when it comes to climate policy."
"""
# USING 1 PROCESS
count_ents = 0
for doc in nlp.pipe([text], n_process=1):
count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])
assert count_ents == 2
# USING 2 PROCESSES
count_ents = 0
for doc in nlp.pipe([text], n_process=2):
count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])
assert count_ents == 2
@Language.factory("my_pipe")
class CustomPipe:
def __init__(self, nlp, name="my_pipe"):
self.name = name
Span.set_extension("my_ext", getter=self._get_my_ext)
Doc.set_extension("my_ext", default=None)
def __call__(self, doc):
gathered_ext = []
for sent in doc.sents:
sent_ext = self._get_my_ext(sent)
sent._.set("my_ext", sent_ext)
gathered_ext.append(sent_ext)
doc._.set("my_ext", "\n".join(gathered_ext))
return doc
@staticmethod
def _get_my_ext(span):
return str(span.end)
def test_issue4903():
"""Ensure that this runs correctly and doesn't hang or crash on Windows /
macOS."""
nlp = English()
nlp.add_pipe("sentencizer")
nlp.add_pipe("my_pipe", after="sentencizer")
text = ["I like bananas.", "Do you like them?", "No, I prefer wasabi."]
docs = list(nlp.pipe(text, n_process=2))
assert docs[0].text == "I like bananas."
assert docs[1].text == "Do you like them?"
assert docs[2].text == "No, I prefer wasabi."
def test_issue4924():
nlp = Language()
example = Example.from_dict(nlp.make_doc(""), {})
nlp.evaluate([example])
| {
"content_hash": "cc086f25e80852661f1aaec24d82fd20",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 111,
"avg_line_length": 35.08764940239044,
"alnum_prop": 0.619393664130805,
"repo_name": "spacy-io/spaCy",
"id": "6dbbc233bb445f474fab3433f07659a8177df3b7",
"size": "8807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacy/tests/regression/test_issue4501-5000.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "567025"
},
{
"name": "C++",
"bytes": "12785"
},
{
"name": "CSS",
"bytes": "57480"
},
{
"name": "Groff",
"bytes": "188349"
},
{
"name": "HTML",
"bytes": "582292"
},
{
"name": "JavaScript",
"bytes": "54065"
},
{
"name": "M4",
"bytes": "11398"
},
{
"name": "Makefile",
"bytes": "256492"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PostScript",
"bytes": "460967"
},
{
"name": "Python",
"bytes": "682585"
},
{
"name": "Shell",
"bytes": "95525"
}
],
"symlink_target": ""
} |
import mock
from oslo_config import cfg
from neutron.agent.linux import dhcp
from neutron.agent.linux import ip_lib
from neutron.common import utils as common_utils
from neutron.conf.agent import common as config
from neutron.conf.agent import dhcp as dhcp_conf
from neutron.conf import common as common_conf
from neutron.tests import base as tests_base
from neutron.tests.common import net_helpers
from neutron.tests.functional import base as functional_base
class TestDhcp(functional_base.BaseSudoTestCase):
def setUp(self):
super(TestDhcp, self).setUp()
conf = cfg.ConfigOpts()
config.register_interface_driver_opts_helper(conf)
config.register_interface_opts(conf)
conf.register_opts(common_conf.core_opts)
conf.register_opts(dhcp_conf.DHCP_AGENT_OPTS)
conf.set_override('interface_driver', 'openvswitch')
conf.set_override('host', 'foo-host')
self.conf = conf
br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
self.conf.set_override('ovs_integration_bridge', br_int.br_name)
def test_cleanup_stale_devices(self):
plugin = mock.MagicMock()
dev_mgr = dhcp.DeviceManager(self.conf, plugin)
network = {
'id': 'foo_id',
'tenant_id': 'foo_tenant',
'namespace': 'qdhcp-foo_id',
'ports': [],
'subnets': [tests_base.AttributeDict({'id': 'subnet_foo_id',
'enable_dhcp': True,
'ipv6_address_mode': None,
'ipv6_ra_mode': None,
'cidr': '10.0.0.0/24',
'ip_version': 4,
'gateway_ip': '10.0.0.1'})]}
dhcp_port = {
'id': 'foo_port_id',
'mac_address': '10:22:33:44:55:67',
'fixed_ips': [tests_base.AttributeDict(
{'subnet_id': 'subnet_foo_id', 'ip_address': '10.0.0.1'})]
}
plugin.create_dhcp_port.return_value = tests_base.AttributeDict(
dhcp_port)
dev_mgr.driver.plug("foo_id",
"foo_id2",
"tapfoo_id2",
"10:22:33:44:55:68",
namespace="qdhcp-foo_id")
dev_mgr.driver.plug("foo_id",
"foo_id3",
"tapfoo_id3",
"10:22:33:44:55:69",
namespace="qdhcp-foo_id")
ipw = ip_lib.IPWrapper(namespace="qdhcp-foo_id")
devices = ipw.get_devices()
self.addCleanup(ipw.netns.delete, 'qdhcp-foo_id')
self.assertEqual(sorted(["tapfoo_id2", "tapfoo_id3"]),
sorted(map(str, devices)))
# setting up dhcp for the network
dev_mgr.setup(tests_base.AttributeDict(network))
common_utils.wait_until_true(
lambda: 1 == len(ipw.get_devices()),
timeout=5,
sleep=0.1,
exception=RuntimeError("only one non-loopback device must remain"))
devices = ipw.get_devices()
self.assertEqual("tapfoo_port_id", devices[0].name)
| {
"content_hash": "680c0d321475b82e07497e09b70b2024",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 44.54666666666667,
"alnum_prop": 0.5267883867105657,
"repo_name": "eayunstack/neutron",
"id": "0fa1fdb871e44463082424c67759417a718f3ef5",
"size": "3952",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/functional/agent/linux/test_dhcp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "10593193"
},
{
"name": "Shell",
"bytes": "8804"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stats', '0004_auto_20171128_1516'),
]
operations = [
migrations.AddField(
model_name='activitylevel',
name='text',
field=models.CharField(default='a', max_length=1000),
preserve_default=False,
),
migrations.AddField(
model_name='programgoal',
name='text',
field=models.CharField(default='a', max_length=1000),
preserve_default=False,
),
migrations.AddField(
model_name='workactivity',
name='text',
field=models.CharField(default='a', max_length=1000),
preserve_default=False,
),
]
| {
"content_hash": "59d013428c1ece86f40237f3c317a4a7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 65,
"avg_line_length": 27.129032258064516,
"alnum_prop": 0.5576694411414982,
"repo_name": "airportmarc/the416life",
"id": "01f3e07c5589854d2290447c915e8354ee10c9ac",
"size": "914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/stats/migrations/0005_auto_20171128_1526.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "18"
},
{
"name": "CSS",
"bytes": "430385"
},
{
"name": "HTML",
"bytes": "174632"
},
{
"name": "JavaScript",
"bytes": "224762"
},
{
"name": "Python",
"bytes": "477212"
},
{
"name": "Shell",
"bytes": "4240"
},
{
"name": "Vue",
"bytes": "80363"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.