text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
sentry
~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from .runner import main
main()
|
{
"content_hash": "3e358721e94debf8bd375ec8bbb157a4",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 70,
"avg_line_length": 17.2,
"alnum_prop": 0.6802325581395349,
"repo_name": "mitsuhiko/sentry",
"id": "418225eb7804ce3918e5fa1aa84f9778b160c37b",
"size": "172",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/__main__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "171113"
},
{
"name": "Python",
"bytes": "877258"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
def create_training_pipeline_tabular_regression_sample(
project: str,
display_name: str,
dataset_id: str,
model_display_name: str,
target_column: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
# set the columns used for training and their data types
transformations = [
{"auto": {"column_name": "STRING_5000unique_NULLABLE"}},
{"auto": {"column_name": "INTEGER_5000unique_NULLABLE"}},
{"auto": {"column_name": "FLOAT_5000unique_NULLABLE"}},
{"auto": {"column_name": "FLOAT_5000unique_REPEATED"}},
{"auto": {"column_name": "NUMERIC_5000unique_NULLABLE"}},
{"auto": {"column_name": "BOOLEAN_2unique_NULLABLE"}},
{
"timestamp": {
"column_name": "TIMESTAMP_1unique_NULLABLE",
"invalid_values_allowed": True,
}
},
{"auto": {"column_name": "DATE_1unique_NULLABLE"}},
{"auto": {"column_name": "TIME_1unique_NULLABLE"}},
{
"timestamp": {
"column_name": "DATETIME_1unique_NULLABLE",
"invalid_values_allowed": True,
}
},
{"auto": {"column_name": "STRUCT_NULLABLE.STRING_5000unique_NULLABLE"}},
{"auto": {"column_name": "STRUCT_NULLABLE.INTEGER_5000unique_NULLABLE"}},
{"auto": {"column_name": "STRUCT_NULLABLE.FLOAT_5000unique_NULLABLE"}},
{"auto": {"column_name": "STRUCT_NULLABLE.FLOAT_5000unique_REQUIRED"}},
{"auto": {"column_name": "STRUCT_NULLABLE.FLOAT_5000unique_REPEATED"}},
{"auto": {"column_name": "STRUCT_NULLABLE.NUMERIC_5000unique_NULLABLE"}},
{"auto": {"column_name": "STRUCT_NULLABLE.BOOLEAN_2unique_NULLABLE"}},
{"auto": {"column_name": "STRUCT_NULLABLE.TIMESTAMP_1unique_NULLABLE"}},
]
training_task_inputs_dict = {
# required inputs
"targetColumn": target_column,
"predictionType": "regression",
"transformations": transformations,
"trainBudgetMilliNodeHours": 8000,
# optional inputs
"disableEarlyStopping": False,
# supported regression optimisation objectives: minimize-rmse,
# minimize-mae, minimize-rmsle
"optimizationObjective": "minimize-rmse",
}
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
training_pipeline = {
"display_name": display_name,
"training_task_definition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_tabular_1.0.0.yaml",
"training_task_inputs": training_task_inputs,
"input_data_config": {
"dataset_id": dataset_id,
"fraction_split": {
"training_fraction": 0.8,
"validation_fraction": 0.1,
"test_fraction": 0.1,
},
},
"model_to_upload": {"display_name": model_display_name},
}
parent = f"projects/{project}/locations/{location}"
response = client.create_training_pipeline(
parent=parent, training_pipeline=training_pipeline
)
print("response:", response)
# [END aiplatform_create_training_pipeline_tabular_regression_sample]
|
{
"content_hash": "50603122d3680b5055c8efc6b791d8ac",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 123,
"avg_line_length": 42.793103448275865,
"alnum_prop": 0.6220789685737309,
"repo_name": "sasha-gitg/python-aiplatform",
"id": "02607c2788afc68b295c6cd0d96f16ee267ffb48",
"size": "4371",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/snippets/pipeline_service/create_training_pipeline_tabular_regression_sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "11216304"
},
{
"name": "Shell",
"bytes": "30838"
}
],
"symlink_target": ""
}
|
#Created on 2013-8-17
#Copyright 2013 nuoqingyun xuqifeng
import socket
from oslo.config import cfg
CONF = cfg.CONF
def _get_my_ip():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(('8.8.8.8',80))
(addr, port) = sock.getsockname()
sock.close()
return addr
except socket.error:
return "127.0.0.1"
addr_opts = [
cfg.StrOpt('my_ip',
default = _get_my_ip(),
help="this machine's public address"),
cfg.StrOpt('host',
default = socket.gethostname(),
help="Name of this machine"),
cfg.StrOpt('use_ipv6',
default=False,
help="use_ipv6")
]
CONF.register_opts(addr_opts)
|
{
"content_hash": "a7f8d8b981e3fd7f0abeaee4f35d4811",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 63,
"avg_line_length": 23.939393939393938,
"alnum_prop": 0.5341772151898734,
"repo_name": "homhei/glance",
"id": "8ef1feec2ccc7a95f532d8cbb59e87e3f99a283e",
"size": "872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/utils/addressconf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "357559"
},
{
"name": "Shell",
"bytes": "341"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('gwasdb', '0011_auto_20170809_0946'),
]
operations = [
migrations.AddField(
model_name='study',
name='create_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='study',
name='update_date',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
|
{
"content_hash": "7890e06340293ff3b39b6c2cd90982d4",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 93,
"avg_line_length": 26.72,
"alnum_prop": 0.6017964071856288,
"repo_name": "1001genomes/AraGWAS",
"id": "9029f734622dd2da649142a21c43721c399f9088",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aragwas_server/gwasdb/migrations/0012_auto_20170811_1327.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "376"
},
{
"name": "Dockerfile",
"bytes": "1310"
},
{
"name": "HTML",
"bytes": "407"
},
{
"name": "JavaScript",
"bytes": "212102"
},
{
"name": "Python",
"bytes": "218225"
},
{
"name": "Shell",
"bytes": "7351"
},
{
"name": "TypeScript",
"bytes": "20680"
},
{
"name": "Vue",
"bytes": "290140"
}
],
"symlink_target": ""
}
|
from typing import Iterable
from typing import Union
from cleo.formatters.formatter import Formatter
from .output import Output
from .output import Type
from .output import Verbosity
class NullOutput(Output):
@property
def verbosity(self) -> Verbosity:
return Verbosity.QUIET
def is_decorated(self) -> bool:
return False
def decorated(self, decorated: bool = True) -> None:
pass
def supports_utf8(self) -> bool:
return True
def set_verbosity(self, verbosity: Verbosity) -> None:
pass
def is_quiet(self) -> bool:
return True
def is_verbose(self) -> bool:
return False
def is_very_verbose(self) -> bool:
return False
def is_debug(self) -> bool:
return False
def write_line(
self,
messages: Union[str, Iterable[str]],
verbosity: Verbosity = Verbosity.NORMAL,
type: Type = Type.NORMAL,
) -> None:
pass
def write(
self,
messages: Union[str, Iterable[str]],
new_line: bool = False,
verbosity: Verbosity = Verbosity.NORMAL,
type: Type = Type.NORMAL,
) -> None:
pass
def flush(self) -> None:
pass
def _write(self, message: str, new_line: bool = False) -> None:
pass
|
{
"content_hash": "938261c41d7f0fde23fee3cd3b312bbb",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 67,
"avg_line_length": 21.540983606557376,
"alnum_prop": 0.5974124809741248,
"repo_name": "sdispater/cleo",
"id": "5382514ce65180ed70fde0f1c1e674f9398825cc",
"size": "1314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cleo/io/outputs/null_output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "335366"
}
],
"symlink_target": ""
}
|
"""Email templates """
from email.mime.text import MIMEText
from hal.internet.email.utils import get_email_content
class EmailTemplate:
"""Default email template"""
def __init__(self,
recipient,
subject,
content_file,
footer_file, extra_args=None):
"""
:param recipient: Name and surname of email recipient
:param subject: Title of email
:param content_file: Path to file containing email actual content
:param footer_file: Path to file containing email footer (ending)
:param extra_args: Extra arguments and details about recipient
"""
self.recipient = str(recipient).title().strip()
self.email_subject = subject
self.content_file = str(content_file)
self.footer_file = str(footer_file)
self.data = {} if not extra_args else extra_args
def get_email_header(self):
"""Gets email header
:return: Email header
"""
return "<h2>Ciao " + str(self.recipient).title() + "!</h2><br>"
def get_email_footer(self):
"""Gets email footer
:return: Email text (html formatted)
"""
return get_email_content(self.footer_file)
def get_mime_message(self):
"""Gets email MIME message
:return: Email formatted as HTML ready to be sent
"""
message = MIMEText(
"<html>" +
self.get_email_header() +
get_email_content(self.content_file) +
self.get_email_footer() +
"</html>", "html"
)
message["subject"] = self.email_subject
return message
|
{
"content_hash": "92f589bffca30dbc2814bbfd4e8307d2",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 73,
"avg_line_length": 30.178571428571427,
"alnum_prop": 0.5739644970414202,
"repo_name": "sirfoga/hal",
"id": "fc13d1001097531f4c8a2f6858ad28da68973e5e",
"size": "1715",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hal/internet/email/templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "101879"
}
],
"symlink_target": ""
}
|
import communication_app.models
def get_my_pets(request):
if not request.user:
return []
return communication_app.models.Pet.objects.filter(
user=request.user)
|
{
"content_hash": "29a380d02aff7acafcc0371669f4c62b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 23.25,
"alnum_prop": 0.6881720430107527,
"repo_name": "petwitter/petwitter",
"id": "0f8160a01ee7685c9e453be4831281f12fdb8799",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thesite/communication_app/shortcuts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "103"
},
{
"name": "HTML",
"bytes": "6362"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "19551"
}
],
"symlink_target": ""
}
|
'''
Created on May 11, 2015
@author: yunli
'''
#### ================================================================
#### ================================================================
#### OpenClos Error Code
#### ================================================================
#### ================================================================
EC_OK = 0
# client side
# validation error at 1000 level
EC_INVALID_CONFIGURATION = 1000
EC_INVALID_REQUEST = 1001
EC_MISSING_MANDATORY_ATTRIBUTE = 1002
EC_INSUFFICIENT_LOOPBACK_IP = 1003
EC_INSUFFICIENT_VLAN_IP = 1004
EC_INSUFFICIENT_INTERCONNECT_IP = 1005
EC_INSUFFICIENT_MANAGEMENT_IP = 1006
EC_CAPACITY_CANNOT_CHANGE = 1007
EC_CAPACITY_MISMATCH = 1008
EC_ENUMERATION_MISMATCH = 1009
EC_INVALID_UPLINK_THRESHOLD = 1010
EC_INVALID_IP_FORMAT = 1011
# "not found" error at 1100 level
EC_POD_NOT_FOUND = 1100
EC_CABLING_PLAN_NOT_FOUND = 1101
EC_DEVICE_CONFIGURATION_NOT_FOUND = 1102
EC_DEVICE_NOT_FOUND = 1103
EC_IMAGE_NOT_FOUND = 1104
# server side
# error at 2000 level
EC_CREATE_POD_FAILED = 2000
EC_UPDATE_POD_FAILED = 2001
EC_DEVICE_CONNECT_FAILED = 2002
EC_DEVICE_RPC_FAILED = 2003
EC_L2_DATA_COLLECTION_FAILED = 2004
EC_L3_DATA_COLLECTION_FAILED = 2005
EC_TWO_STAGE_CONFIGURATION_FAILED = 2006
EC_TRAP_DAEMON_ERROR = 2007
dictErrorCode = {
EC_OK : "Success",
EC_INVALID_CONFIGURATION : "Invalid configuration: %s",
EC_INVALID_REQUEST : "Invalid request: %s",
EC_MISSING_MANDATORY_ATTRIBUTE : "Missing mandatory attribute: %s",
EC_INSUFFICIENT_LOOPBACK_IP : "Insufficient loopback ip: %s",
EC_INSUFFICIENT_VLAN_IP : "Insufficient vlan ip: %s",
EC_INSUFFICIENT_INTERCONNECT_IP : "Insufficient interconnect ip: %s",
EC_INSUFFICIENT_MANAGEMENT_IP : "Insufficient management ip: %s",
EC_CAPACITY_CANNOT_CHANGE : "Capacity cannot be changed: %s",
EC_CAPACITY_MISMATCH : "Device count does not match capacity: %s",
EC_ENUMERATION_MISMATCH : "Invalid enumeration value: %s",
EC_INVALID_UPLINK_THRESHOLD : "Invalid uplink threshold: %s",
EC_INVALID_IP_FORMAT : "Invalid ip format: %s",
EC_POD_NOT_FOUND : "Pod not found: %s",
EC_CABLING_PLAN_NOT_FOUND : "Cabling plan not found: %s",
EC_DEVICE_NOT_FOUND : "Device not found: %s",
EC_DEVICE_CONFIGURATION_NOT_FOUND : "Device configuration not found: %s",
EC_IMAGE_NOT_FOUND : "Image not found: %s",
EC_CREATE_POD_FAILED : "Failed to create pod: %s",
EC_UPDATE_POD_FAILED : "Failed to update pod: %s",
EC_DEVICE_CONNECT_FAILED : "Failed to connect to device: %s",
EC_DEVICE_RPC_FAILED : "Failed to execute RPC command on device: %s",
EC_L2_DATA_COLLECTION_FAILED : "Failed to collect L2 data: %s",
EC_L3_DATA_COLLECTION_FAILED : "Failed to collect L3 data: %s",
EC_TWO_STAGE_CONFIGURATION_FAILED : "Failed to execute two stage configuration: %s",
EC_TRAP_DAEMON_ERROR : "Trap daemon error: %s",
}
def getErrorMessage(errorCode):
assert errorCode in dictErrorCode.keys()
return dictErrorCode[errorCode]
|
{
"content_hash": "b3060444733608704906c2df905d095d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 100,
"avg_line_length": 53.794871794871796,
"alnum_prop": 0.4659199237368923,
"repo_name": "Juniper/OpenClos",
"id": "a2408e7e95a77f1eff9c96557fffdab2dbef3101",
"size": "4196",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jnpr/openclos/error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "438927"
},
{
"name": "Shell",
"bytes": "838"
}
],
"symlink_target": ""
}
|
""" Support for toolset definition.
"""
import sys
import feature, property, generators, property_set
import b2.util.set
import bjam
from b2.util import cached, qualify_jam_action, is_iterable_typed, is_iterable
from b2.util.utility import *
from b2.util import bjam_signature, sequence
from b2.manager import get_manager
__re_split_last_segment = re.compile (r'^(.+)\.([^\.])*')
__re_two_ampersands = re.compile ('(&&)')
__re_first_segment = re.compile ('([^.]*).*')
__re_first_group = re.compile (r'[^.]*\.(.*)')
_ignore_toolset_requirements = '--ignore-toolset-requirements' not in sys.argv
# Flag is a mechanism to set a value
# A single toolset flag. Specifies that when certain
# properties are in build property set, certain values
# should be appended to some variable.
#
# A flag applies to a specific action in specific module.
# The list of all flags for a module is stored, and each
# flag further contains the name of the rule it applies
# for,
class Flag:
def __init__(self, variable_name, values, condition, rule = None):
assert isinstance(variable_name, basestring)
assert is_iterable(values) and all(
isinstance(v, (basestring, type(None))) for v in values)
assert is_iterable_typed(condition, property_set.PropertySet)
assert isinstance(rule, (basestring, type(None)))
self.variable_name = variable_name
self.values = values
self.condition = condition
self.rule = rule
def __str__(self):
return("Flag(" + str(self.variable_name) + ", " + str(self.values) +\
", " + str(self.condition) + ", " + str(self.rule) + ")")
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __module_flags, __flags, __stv
# Mapping from module name to a list of all flags that apply
# to either that module directly, or to any rule in that module.
# Each element of the list is Flag instance.
# So, for module named xxx this might contain flags for 'xxx',
# for 'xxx.compile', for 'xxx.compile.c++', etc.
__module_flags = {}
# Mapping from specific rule or module name to a list of Flag instances
# that apply to that name.
# Say, it might contain flags for 'xxx.compile.c++'. If there are
# entries for module name 'xxx', they are flags for 'xxx' itself,
# not including any rules in that module.
__flags = {}
# A cache for varaible settings. The key is generated from the rule name and the properties.
__stv = {}
reset ()
# FIXME: --ignore-toolset-requirements
def using(toolset_module, *args):
if isinstance(toolset_module, (list, tuple)):
toolset_module = toolset_module[0]
loaded_toolset_module= get_manager().projects().load_module(toolset_module, [os.getcwd()]);
loaded_toolset_module.init(*args)
# FIXME push-checking-for-flags-module ....
# FIXME: investigate existing uses of 'hack-hack' parameter
# in jam code.
@bjam_signature((["rule_or_module", "variable_name", "condition", "*"],
["values", "*"]))
def flags(rule_or_module, variable_name, condition, values = []):
""" Specifies the flags (variables) that must be set on targets under certain
conditions, described by arguments.
rule_or_module: If contains dot, should be a rule name.
The flags will be applied when that rule is
used to set up build actions.
If does not contain dot, should be a module name.
The flags will be applied for all rules in that
module.
If module for rule is different from the calling
module, an error is issued.
variable_name: Variable that should be set on target
condition A condition when this flag should be applied.
Should be set of property sets. If one of
those property sets is contained in build
properties, the flag will be used.
Implied values are not allowed:
"<toolset>gcc" should be used, not just
"gcc". Subfeatures, like in "<toolset>gcc-3.2"
are allowed. If left empty, the flag will
always used.
Propery sets may use value-less properties
('<a>' vs. '<a>value') to match absent
properties. This allows to separately match
<architecture>/<address-model>64
<architecture>ia64/<address-model>
Where both features are optional. Without this
syntax we'd be forced to define "default" value.
values: The value to add to variable. If <feature>
is specified, then the value of 'feature'
will be added.
"""
assert isinstance(rule_or_module, basestring)
assert isinstance(variable_name, basestring)
assert is_iterable_typed(condition, basestring)
assert is_iterable(values) and all(isinstance(v, (basestring, type(None))) for v in values)
caller = bjam.caller()
if not '.' in rule_or_module and caller and caller[:-1].startswith("Jamfile"):
# Unqualified rule name, used inside Jamfile. Most likely used with
# 'make' or 'notfile' rules. This prevents setting flags on the entire
# Jamfile module (this will be considered as rule), but who cares?
# Probably, 'flags' rule should be split into 'flags' and
# 'flags-on-module'.
rule_or_module = qualify_jam_action(rule_or_module, caller)
else:
# FIXME: revive checking that we don't set flags for a different
# module unintentionally
pass
if condition and not replace_grist (condition, ''):
# We have condition in the form '<feature>', that is, without
# value. That's a previous syntax:
#
# flags gcc.link RPATH <dll-path> ;
# for compatibility, convert it to
# flags gcc.link RPATH : <dll-path> ;
values = [ condition ]
condition = None
if condition:
transformed = []
for c in condition:
# FIXME: 'split' might be a too raw tool here.
pl = [property.create_from_string(s,False,True) for s in c.split('/')]
pl = feature.expand_subfeatures(pl);
transformed.append(property_set.create(pl))
condition = transformed
property.validate_property_sets(condition)
__add_flag (rule_or_module, variable_name, condition, values)
def set_target_variables (manager, rule_or_module, targets, ps):
"""
"""
assert isinstance(rule_or_module, basestring)
assert is_iterable_typed(targets, basestring)
assert isinstance(ps, property_set.PropertySet)
settings = __set_target_variables_aux(manager, rule_or_module, ps)
if settings:
for s in settings:
for target in targets:
manager.engine ().set_target_variable (target, s [0], s[1], True)
def find_satisfied_condition(conditions, ps):
"""Returns the first element of 'property-sets' which is a subset of
'properties', or an empty list if no such element exists."""
assert is_iterable_typed(conditions, property_set.PropertySet)
assert isinstance(ps, property_set.PropertySet)
for condition in conditions:
found_all = True
for i in condition.all():
if i.value:
found = i.value in ps.get(i.feature)
else:
# Handle value-less properties like '<architecture>' (compare with
# '<architecture>x86').
# If $(i) is a value-less property it should match default
# value of an optional property. See the first line in the
# example below:
#
# property set properties result
# <a> <b>foo <b>foo match
# <a> <b>foo <a>foo <b>foo no match
# <a>foo <b>foo <b>foo no match
# <a>foo <b>foo <a>foo <b>foo match
found = not ps.get(i.feature)
found_all = found_all and found
if found_all:
return condition
return None
def register (toolset):
""" Registers a new toolset.
"""
assert isinstance(toolset, basestring)
feature.extend('toolset', [toolset])
def inherit_generators (toolset, properties, base, generators_to_ignore = []):
assert isinstance(toolset, basestring)
assert is_iterable_typed(properties, basestring)
assert isinstance(base, basestring)
assert is_iterable_typed(generators_to_ignore, basestring)
if not properties:
properties = [replace_grist (toolset, '<toolset>')]
base_generators = generators.generators_for_toolset(base)
for g in base_generators:
id = g.id()
if not id in generators_to_ignore:
# Some generator names have multiple periods in their name, so
# $(id:B=$(toolset)) doesn't generate the right new_id name.
# e.g. if id = gcc.compile.c++, $(id:B=darwin) = darwin.c++,
# which is not what we want. Manually parse the base and suffix
# (if there's a better way to do this, I'd love to see it.)
# See also register in module generators.
(base, suffix) = split_action_id(id)
new_id = toolset + '.' + suffix
generators.register(g.clone(new_id, properties))
def inherit_flags(toolset, base, prohibited_properties = []):
"""Brings all flag definitions from the 'base' toolset into the 'toolset'
toolset. Flag definitions whose conditions make use of properties in
'prohibited-properties' are ignored. Don't confuse property and feature, for
example <debug-symbols>on and <debug-symbols>off, so blocking one of them does
not block the other one.
The flag conditions are not altered at all, so if a condition includes a name,
or version of a base toolset, it won't ever match the inheriting toolset. When
such flag settings must be inherited, define a rule in base toolset module and
call it as needed."""
assert isinstance(toolset, basestring)
assert isinstance(base, basestring)
assert is_iterable_typed(prohibited_properties, basestring)
for f in __module_flags.get(base, []):
if not f.condition or b2.util.set.difference(f.condition, prohibited_properties):
match = __re_first_group.match(f.rule)
rule_ = None
if match:
rule_ = match.group(1)
new_rule_or_module = ''
if rule_:
new_rule_or_module = toolset + '.' + rule_
else:
new_rule_or_module = toolset
__add_flag (new_rule_or_module, f.variable_name, f.condition, f.values)
def inherit_rules(toolset, base):
engine = get_manager().engine()
new_actions = {}
for action_name, action in engine.actions.iteritems():
module, id = split_action_id(action_name)
if module == base:
new_action_name = toolset + '.' + id
# make sure not to override any existing actions
# that may have been declared already
if new_action_name not in engine.actions:
new_actions[new_action_name] = action
engine.actions.update(new_actions)
######################################################################################
# Private functions
@cached
def __set_target_variables_aux (manager, rule_or_module, ps):
""" Given a rule name and a property set, returns a list of tuples of
variables names and values, which must be set on targets for that
rule/properties combination.
"""
assert isinstance(rule_or_module, basestring)
assert isinstance(ps, property_set.PropertySet)
result = []
for f in __flags.get(rule_or_module, []):
if not f.condition or find_satisfied_condition (f.condition, ps):
processed = []
for v in f.values:
# The value might be <feature-name> so needs special
# treatment.
processed += __handle_flag_value (manager, v, ps)
for r in processed:
result.append ((f.variable_name, r))
# strip away last dot separated part and recurse.
next = __re_split_last_segment.match(rule_or_module)
if next:
result.extend(__set_target_variables_aux(
manager, next.group(1), ps))
return result
def __handle_flag_value (manager, value, ps):
assert isinstance(value, basestring)
assert isinstance(ps, property_set.PropertySet)
result = []
if get_grist (value):
f = feature.get(value)
values = ps.get(f)
for value in values:
if f.dependency:
# the value of a dependency feature is a target
# and must be actualized
result.append(value.actualize())
elif f.path or f.free:
# Treat features with && in the value
# specially -- each &&-separated element is considered
# separate value. This is needed to handle searched
# libraries, which must be in specific order.
if not __re_two_ampersands.search(value):
result.append(value)
else:
result.extend(value.split ('&&'))
else:
result.append (value)
else:
result.append (value)
return sequence.unique(result, stable=True)
def __add_flag (rule_or_module, variable_name, condition, values):
""" Adds a new flag setting with the specified values.
Does no checking.
"""
assert isinstance(rule_or_module, basestring)
assert isinstance(variable_name, basestring)
assert is_iterable_typed(condition, property_set.PropertySet)
assert is_iterable(values) and all(
isinstance(v, (basestring, type(None))) for v in values)
f = Flag(variable_name, values, condition, rule_or_module)
# Grab the name of the module
m = __re_first_segment.match (rule_or_module)
assert m
module = m.group(1)
__module_flags.setdefault(module, []).append(f)
__flags.setdefault(rule_or_module, []).append(f)
__requirements = []
def requirements():
"""Return the list of global 'toolset requirements'.
Those requirements will be automatically added to the requirements of any main target."""
return __requirements
def add_requirements(requirements):
"""Adds elements to the list of global 'toolset requirements'. The requirements
will be automatically added to the requirements for all main targets, as if
they were specified literally. For best results, all requirements added should
be conditional or indirect conditional."""
assert is_iterable_typed(requirements, basestring)
if _ignore_toolset_requirements:
__requirements.extend(requirements)
# Make toolset 'toolset', defined in a module of the same name,
# inherit from 'base'
# 1. The 'init' rule from 'base' is imported into 'toolset' with full
# name. Another 'init' is called, which forwards to the base one.
# 2. All generators from 'base' are cloned. The ids are adjusted and
# <toolset> property in requires is adjusted too
# 3. All flags are inherited
# 4. All rules are imported.
def inherit(toolset, base):
assert isinstance(toolset, basestring)
assert isinstance(base, basestring)
get_manager().projects().load_module(base, ['.']);
inherit_generators(toolset, [], base)
inherit_flags(toolset, base)
inherit_rules(toolset, base)
|
{
"content_hash": "06dbff6d04f03e6dcbe5d295366ff9e5",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 96,
"avg_line_length": 39.23039215686274,
"alnum_prop": 0.6121454454579532,
"repo_name": "keichan100yen/ode-ext",
"id": "cf2b24b2e79a122afc9d4461244d97c1d14e42fc",
"size": "16331",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "boost/tools/build/src/build/toolset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "309067"
},
{
"name": "Batchfile",
"bytes": "37875"
},
{
"name": "C",
"bytes": "2967570"
},
{
"name": "C#",
"bytes": "40804"
},
{
"name": "C++",
"bytes": "189322982"
},
{
"name": "CMake",
"bytes": "119251"
},
{
"name": "CSS",
"bytes": "456744"
},
{
"name": "Cuda",
"bytes": "52444"
},
{
"name": "DIGITAL Command Language",
"bytes": "6246"
},
{
"name": "Fortran",
"bytes": "1856"
},
{
"name": "Groff",
"bytes": "5189"
},
{
"name": "HTML",
"bytes": "181460055"
},
{
"name": "IDL",
"bytes": "28"
},
{
"name": "JavaScript",
"bytes": "419776"
},
{
"name": "Lex",
"bytes": "1231"
},
{
"name": "M4",
"bytes": "29689"
},
{
"name": "Makefile",
"bytes": "1088024"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Objective-C",
"bytes": "11406"
},
{
"name": "Objective-C++",
"bytes": "630"
},
{
"name": "PHP",
"bytes": "68641"
},
{
"name": "Perl",
"bytes": "36491"
},
{
"name": "Perl6",
"bytes": "2053"
},
{
"name": "Python",
"bytes": "1612978"
},
{
"name": "QML",
"bytes": "593"
},
{
"name": "QMake",
"bytes": "16692"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Ruby",
"bytes": "5532"
},
{
"name": "Shell",
"bytes": "354720"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "32117"
},
{
"name": "XSLT",
"bytes": "553585"
},
{
"name": "Yacc",
"bytes": "19623"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from setuptools import setup, find_packages
install_requires = [
"Jinja2>=2.8",
"boto>=2.36.0",
"httpretty==0.8.10",
"requests",
"xmltodict",
"six",
"werkzeug",
"pytz",
"python-dateutil",
]
extras_require = {
# No builtin OrderedDict before 2.7
':python_version=="2.6"': ['ordereddict'],
'server': ['flask'],
}
setup(
name='moto',
version='0.4.31',
description='A library that allows your python tests to easily'
' mock out the boto library',
author='Steve Pulec',
author_email='spulec@gmail.com',
url='https://github.com/spulec/moto',
entry_points={
'console_scripts': [
'moto_server = moto.server:main',
],
},
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=install_requires,
extras_require=extras_require,
license="Apache",
test_suite="tests",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Testing",
],
)
|
{
"content_hash": "cc8a1af38eefef21e3f6e4339ac03cf5",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 67,
"avg_line_length": 26.8,
"alnum_prop": 0.5850746268656717,
"repo_name": "silveregg/moto",
"id": "bfd8bbb87d308f110c69958a367cfd8950995296",
"size": "1362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "230"
},
{
"name": "Python",
"bytes": "2435907"
}
],
"symlink_target": ""
}
|
LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN'
PROTOCOL_TERMINATED_HTTPS = 'TERMINATED_HTTPS'
PROTOCOL_HTTP = 'HTTP'
PROTOCOL_TCP = 'TCP'
PROTOCOL_HTTPS = 'HTTPS'
# Statuses for a10 objects (certificate bindings)
STATUS_CREATING = 0
STATUS_CREATED = 1
STATUS_DELETING = 2
|
{
"content_hash": "33e3479650c0d035178006f515254ca2",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 49,
"avg_line_length": 26.4,
"alnum_prop": 0.75,
"repo_name": "hthompson6/a10-neutron-lbaas",
"id": "54a911eb19ccd83762247357e178407cc16accad",
"size": "838",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10_neutron_lbaas/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1083"
},
{
"name": "Python",
"bytes": "543752"
},
{
"name": "Shell",
"bytes": "6672"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from .common import *
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":test:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
},
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
|
{
"content_hash": "6d684ad0a937cf32d3709c09325394db",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 52,
"avg_line_length": 19.63157894736842,
"alnum_prop": 0.5254691689008043,
"repo_name": "stepanovsh/project_template",
"id": "c676e9b06db3c1bb55a57837f56c185898680f53",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.repo_name}}/config/settings/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5140"
},
{
"name": "CSS",
"bytes": "778"
},
{
"name": "HTML",
"bytes": "20928"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Makefile",
"bytes": "5652"
},
{
"name": "Python",
"bytes": "52732"
},
{
"name": "Ruby",
"bytes": "1378"
},
{
"name": "Shell",
"bytes": "2375"
}
],
"symlink_target": ""
}
|
import re, email, html, sys
html_hack_re = re.compile(r"<!\w{1,8}>")
html_tag_re = re.compile(r"<.*?>")
word_re = re.compile(r"\w(?:[\w']+)\w")
banned_attrs = {"src":None, "href":None, "alt":None,
"id":None, "action":None, "background":None}
def unique(i):
u = {}
for t in i:
if t not in u:
u[t] = None
return u.keys()
def mangle(prefix, list):
return [prefix + x for x in list]
def tokenize_body(msg, config):
if msg.is_multipart():
rv = []
for m in msg.get_payload():
rv += tokenize(m, config)
return rv
else:
type = msg.get("content-type", "text/plain")
if type.startswith("text/"):
payload = msg.get_payload(decode=True)
if payload:
tokens = []
if type.startswith("text/html"):
try:
(payload, tags) = html.parse(payload)
tags = [(x,y) for (x,y) in tags
if x not in banned_attrs]
tags = [y and "%s=%s" % (x,y) or x for (x,y) in tags]
tokens += mangle("HTML", [x[:251] for x in tags])
except Exception, e:
# print >> sys.stderr, "crap:", e
tokens += ["BUGhtml"]
try:
payload = html_tag_re.sub("", payload)
except:
pass
words = word_re.findall(payload)
tokens += mangle("BODY",
[x for x in words if 3 <= len(x) <= 20])
if len(words) > 1 and config.double:
tokens += mangle("BODY",
["%s %s".lower() % (x, y)
for (x,y) in zip(words[:-1], words[1:])
if 3 <= len(x) <= 20 and 3 <= len(y) <= 20])
for key, body in config.bodies.iteritems():
tokens += body.get_tokens(payload)
return tokens
return []
def tokenize_headers(msg, config):
tokens = []
for key, header in config.headers.iteritems():
tokens += header.get_tokens(msg.get_all(header.header, []))
return tokens
def check_SA_part(msg):
part = msg.get_payload(0).get_payload(decode=True)
return part and part.find("---- Start SpamAssassin results") >= 0
def tokenize(msg, config):
tokens = tokenize_headers(msg, config)
# if the message was rewritten by SA, then slurp out the original
# message and work on that
if (msg.get("content-type", "").startswith("multipart/mixed")
and msg.get("X-Spam-Status", "").startswith("Yes")
and check_SA_part(msg)):
tokens += tokenize(msg.get_payload(1), config)
else:
tokens += tokenize_body(msg, config)
tokens = unique(tokens)
return tokens
if __name__ == "__main__":
import email.Parser, sys, config, util
opt = util.Options()
opt.getopt(sys.argv[1:], "c")
config = config.Config(opt["c"])
parser = email.Parser.Parser()
msg = parser.parse(open(opts.arg[0]))
toks = tokenize(msg, config)
toks.sort()
print toks
|
{
"content_hash": "adf36a7b60f2cade2d98e842893aa920",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 82,
"avg_line_length": 33.24,
"alnum_prop": 0.47924187725631767,
"repo_name": "mizerlou/hammer",
"id": "fd2f727a7112b5d4efc3b141e9836281f938a97c",
"size": "3324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hammer/tokens.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34846"
}
],
"symlink_target": ""
}
|
__author__ = 'mpetyx'
import xlrd
import csv
import json
def csv_from_excel():
wb = xlrd.open_workbook('SampleData.xls')
names = wb.sheet_names()
sh = wb.sheet_by_name(names[0])
your_csv_file = open('your_csv_file.csv', 'wb')
wr = csv.writer(your_csv_file, quoting=csv.QUOTE_ALL)
for rownum in xrange(sh.nrows):
# print sh.row_values(rownum)
try:
wr.writerow(sh.row_values(rownum))
except :
continue
your_csv_file.close()
def json_from_csv():
f = open( 'your_csv_file.csv', 'rU' )
# Change each fieldname to the appropriate field name. I know, so difficult.
reader = csv.DictReader( f)#, fieldnames = ( "fieldname0","fieldname1","fieldname2","fieldname3","fieldname4","fieldname5" ))
# Parse the CSV into JSON
print reader
for row in reader:
print row
# break
out = json.dumps( [ row for row in reader ] )
print "JSON parsed!"
# Save the JSON
f = open( 'your_csv_file.json', 'w')
f.write(out)
print "JSON saved!"
csv_from_excel()
json_from_csv()
|
{
"content_hash": "0f387403a49559e1bf06dacd3839cae7",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 129,
"avg_line_length": 24.377777777777776,
"alnum_prop": 0.6071103008204193,
"repo_name": "Suite5/DataColibri",
"id": "60746f715d3741a92f3106b845869bbca6e10c41",
"size": "1097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engage/jsonHandler/ExceltoCsv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "381"
},
{
"name": "CSS",
"bytes": "944246"
},
{
"name": "HTML",
"bytes": "566711"
},
{
"name": "JavaScript",
"bytes": "1510227"
},
{
"name": "PHP",
"bytes": "972"
},
{
"name": "Python",
"bytes": "1046512"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
}
|
##
## Server for TCP octet trickling tests.
##
from twisted.internet import reactor, protocol
class TricklingServerProtocol(protocol.Protocol):
def __init__(self):
pass
def connectionMade(self):
print "client accepted"
self.transport.setTcpNoDelay(True)
self.stats = {}
def connectionLost(self, reason):
print "client lost"
for s in sorted(self.stats):
print "%dx chop of length %d" % (self.stats[s], s)
def dataReceived(self, data):
l = len(data)
self.stats[l] = self.stats.get(l, 0) + 1
#print data
class TricklingServerFactory(protocol.ServerFactory):
protocol = TricklingServerProtocol
def __init__(self):
pass
def startFactory(self):
pass
def stopFactory(self):
pass
if __name__ == '__main__':
factory = TricklingServerFactory()
reactor.listenTCP(9000, factory)
reactor.run()
|
{
"content_hash": "aa04924ad3452764a60f34dc552b3b2d",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 59,
"avg_line_length": 20.782608695652176,
"alnum_prop": 0.6098326359832636,
"repo_name": "operasoftware/presto-testo",
"id": "2456411db6cdfb0539da4964129b58724b28f6fb",
"size": "1738",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "wpt/websockets/autobahn/oberstet-Autobahn-643d2ee/test/trickling/trickling_server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "2312"
},
{
"name": "ActionScript",
"bytes": "23470"
},
{
"name": "AutoHotkey",
"bytes": "8832"
},
{
"name": "Batchfile",
"bytes": "5001"
},
{
"name": "C",
"bytes": "116512"
},
{
"name": "C++",
"bytes": "279128"
},
{
"name": "CSS",
"bytes": "208905"
},
{
"name": "Groff",
"bytes": "674"
},
{
"name": "HTML",
"bytes": "106576719"
},
{
"name": "Haxe",
"bytes": "3874"
},
{
"name": "Java",
"bytes": "185827"
},
{
"name": "JavaScript",
"bytes": "22531460"
},
{
"name": "Makefile",
"bytes": "13409"
},
{
"name": "PHP",
"bytes": "524372"
},
{
"name": "POV-Ray SDL",
"bytes": "6542"
},
{
"name": "Perl",
"bytes": "321672"
},
{
"name": "Python",
"bytes": "954636"
},
{
"name": "Ruby",
"bytes": "1006850"
},
{
"name": "Shell",
"bytes": "12140"
},
{
"name": "Smarty",
"bytes": "1860"
},
{
"name": "XSLT",
"bytes": "2567445"
}
],
"symlink_target": ""
}
|
"""
The :ref:`aiomysql` connector for MySQL/MariaDB databases.
"""
import asyncio
import logging
import typing
import aiomysql
import pymysql
from asyncqlio.backends.base import BaseConnector, BaseResultSet, BaseTransaction, DictRow
from asyncqlio.exc import DatabaseException, IntegrityError
logger = logging.getLogger(__name__)
# hijack aiomysql a bit
aiomysql.DictCursor.dict_type = DictRow
class AiomysqlResultSet(BaseResultSet):
"""
Represents a result set returned by the MySQL database.
"""
def __init__(self, cursor: aiomysql.DictCursor):
self.cursor = cursor
self._keys = None
@property
def keys(self):
return self._keys
async def close(self):
return await self.cursor.close()
async def fetch_row(self) -> typing.Dict[typing.Any, typing.Any]:
"""
Fetches the next row in this result set.
"""
row = await self.cursor.fetchone()
if self._keys is None and row is not None:
self._keys = row.keys()
return row
async def fetch_many(self, n: int):
"""
Fetches the next N rows.
"""
return await self.cursor.fetchmany(size=n)
async def fetch_all(self):
"""
Fetches ALL the rows.
"""
return await self.cursor.fetchall()
class AiomysqlTransaction(BaseTransaction):
"""
Represents a transaction for aiomysql.
"""
def __init__(self, connector: 'AiomysqlConnector'):
super().__init__(connector)
#: The current acquired connection for this transaction.
self.connection = None # type: aiomysql.Connection
async def close(self, *, has_error: bool = False):
"""
Closes the current connection.
"""
if has_error:
self.connection.close()
# release it back to the pool so we don't eat all the connections
self.connector.pool.release(self.connection)
async def begin(self):
"""
Begins the current transaction.
"""
self.connection = await self.connector.pool.acquire() # type: aiomysql.Connection
await self.connection.begin()
return self
async def execute(self, sql: str, params=None):
"""
Executes some SQL in the current transaction.
"""
# parse DictCursor in order to get a dict-like cursor back
# this will use the custom DictRow class passed from before
cursor = await self.connection.cursor(cursor=aiomysql.DictCursor)
# the doc lies btw
# we can pass a dict in instead of a list/tuple
# i don't fucking trust this at all though.
try:
res = await cursor.execute(sql, params)
except pymysql.err.IntegrityError as e:
raise IntegrityError(*e.args)
except (pymysql.err.ProgrammingError, pymysql.err.InternalError) as e:
raise DatabaseException(*e.args)
finally:
await cursor.close()
return res
async def cursor(self, sql: str, params: typing.Union[typing.Mapping, typing.Iterable] = None) \
-> 'AiomysqlResultSet':
"""
Returns a :class:`.AiomysqlResultSet` for the specified SQL.
"""
logger.debug("Executing query {} with params {}".format(sql, params))
cursor = await self.connection.cursor(cursor=aiomysql.DictCursor)
await cursor.execute(sql, params)
return AiomysqlResultSet(cursor)
async def rollback(self, checkpoint: str = None):
"""
Rolls back the current transaction.
:param checkpoint: Ignored.
"""
await self.connection.rollback()
async def commit(self):
"""
Commits the current transaction.
"""
await self.connection.commit()
class AiomysqlConnector(BaseConnector):
"""
A connector that uses the `aiomysql <https://github.com/aio-libs/aiomysql>`_ library.
"""
def __init__(self, dsn):
super().__init__(dsn)
#: The current connection pool for this connector.
self.pool = None # type: aiomysql.Pool
async def connect(self, *, loop: asyncio.AbstractEventLoop = None) -> 'AiomysqlConnector':
"""
Connects this connector.
"""
# aiomysql doesnt support a nice dsn
port = self.port or 3306
loop = loop or asyncio.get_event_loop()
# XXX: Force SQL mode to be ANSI.
# This means we don't break randomly, because we attempt to use ANSI when possible.
self.params['sql_mode'] = 'ansi'
logger.info("Connecting to MySQL on mysql://{}:{}/{}".format(self.host, port, self.db))
self.pool = await aiomysql.create_pool(host=self.host, user=self.username,
password=self.password, port=port,
db=self.db, loop=loop, **self.params)
return self
async def close(self, forcefully: bool = False):
"""
Closes this connector.
"""
if forcefully:
self.pool.terminate()
else:
self.pool.close()
await self.pool.wait_closed()
def get_transaction(self) -> BaseTransaction:
"""
Gets a new transaction object.
"""
return AiomysqlTransaction(self)
def emit_param(self, name: str) -> str:
if pymysql.paramstyle == "pyformat":
return "%({})s".format(name)
elif pymysql.paramstyle == "named":
return ":{}".format(name)
else:
raise ValueError("Cannot work with paramstyle {}".format(pymysql.paramstyle))
async def get_db_server_version(self):
tr = self.get_transaction()
async with tr:
cur = await tr.cursor("SELECT VERSION();")
row = await cur.fetch_row()
return row[0]
CONNECTOR_TYPE = AiomysqlConnector
|
{
"content_hash": "1a9aeacf3ef240b96fd03b66fbdc2834",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 100,
"avg_line_length": 30.466666666666665,
"alnum_prop": 0.5998990069011951,
"repo_name": "SunDwarf/asyncqlio",
"id": "db07814a718ebc093bdc50e72cd03ad7ad36c16a",
"size": "5941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asyncqlio/backends/mysql/aiomysql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "256060"
}
],
"symlink_target": ""
}
|
"""Run through the python agents tutorial as a user would.
"""
import sys
import unittest
import os
import subprocess
import yaml
from integration.test import geopm_test_launcher
from integration.test import util
@util.skip_unless_do_launch()
@util.skip_unless_stressng()
class TestIntegration_tutorial_python_agents(unittest.TestCase):
@classmethod
def setUpClass(cls):
sys.stdout.write('(' + os.path.basename(__file__).split('.')[0] +
'.' + cls.__name__ + ') ...')
cls._test_name = 'python_agents_tutorial'
cls._skip_launch = not util.do_launch()
cls._script_dir = os.path.dirname(os.path.realpath(__file__))
cls._base_dir = os.path.dirname(os.path.dirname(cls._script_dir))
cls._tutorial_dir = os.path.join(cls._base_dir, 'tutorial', 'python_agents')
cls._readme_path = os.path.join(cls._tutorial_dir, 'README.rst')
cls._expected_stress_ng_timeout_seconds = 5
cls._expected_frequency_limit = 1.5e9
if not cls._skip_launch:
cls.launch()
def tearDown(self):
if sys.exc_info() != (None, None, None):
TestIntegration_tutorial_python_agents._keep_files = True
@classmethod
def launch(cls):
"Run the tutorial scripts"
script_bodies = util.get_scripts_from_readme(cls._readme_path)
cls._initial_frequency_control = geopm_test_launcher.geopmread(
'CPU_FREQUENCY_MAX_CONTROL board 0')
for script_body in script_bodies:
print('Executing:', script_body)
subprocess.check_call(script_body, shell=True, cwd=cls._tutorial_dir)
cls._final_frequency_control = geopm_test_launcher.geopmread(
'CPU_FREQUENCY_MAX_CONTROL board 0')
def test_monitor_report(self):
with open(os.path.join(self._tutorial_dir, 'stress-monitor.report')) as f:
report = yaml.load(f, Loader=yaml.SafeLoader)
self.assertEqual(dict(), report['Policy']['Initial Controls'])
host_data = next(iter(report['Hosts'].values()))
self.assertAlmostEqual(
self._expected_stress_ng_timeout_seconds,
host_data['Application Totals']['runtime (s)'],
places=0)
def test_package_energy_report(self):
with open(os.path.join(self._tutorial_dir, 'stress-package-energy.report')) as f:
report = yaml.load(f, Loader=yaml.SafeLoader)
self.assertEqual(dict(), report['Policy']['Initial Controls'])
host_data = next(iter(report['Hosts'].values()))
self.assertAlmostEqual(
self._expected_stress_ng_timeout_seconds,
host_data['Application Totals']['runtime (s)'],
places=0)
self.assertGreater(
host_data['Application Totals']['CPU_ENERGY@package-0'],
host_data['Application Totals']['CPU_ENERGY@package-1'])
def test_frequency_limit_report(self):
with open(os.path.join(self._tutorial_dir, 'stress-frequency-limit.report')) as f:
report = yaml.load(f, Loader=yaml.SafeLoader)
self.assertAlmostEqual(
self._expected_frequency_limit,
report['Policy']['Initial Controls']['CPU_FREQUENCY_MAX_CONTROL'],
places=0)
host_data = next(iter(report['Hosts'].values()))
self.assertAlmostEqual(
self._expected_stress_ng_timeout_seconds,
host_data['Application Totals']['runtime (s)'],
places=0)
self.assertGreater(
host_data['Application Totals']['CPU_ENERGY@package-0'],
host_data['Application Totals']['CPU_ENERGY@package-1'])
# Test that the frequency control had an effect for the duration of
# the reported application. This loosely tests that the next P-state
# above the request was not achieved.
self.assertLess(
host_data['Application Totals']['frequency (Hz)'],
self._expected_frequency_limit + 1e8)
# Test that the agent correctly applies controls between
# the controller's save/restore events.
self.assertEqual(self._initial_frequency_control, self._final_frequency_control)
if __name__ == '__main__':
# Call do_launch to clear non-pyunit command line option
util.do_launch()
unittest.main()
|
{
"content_hash": "808294be4d476078e7ce12bf9109a65a",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 92,
"avg_line_length": 42.198113207547166,
"alnum_prop": 0.6078694388553544,
"repo_name": "geopm/geopm",
"id": "50111bcc52332b11fa9e407af73fc40e6dcea8bf",
"size": "4589",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "integration/test/test_tutorial_python_agents.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "342266"
},
{
"name": "C++",
"bytes": "3265994"
},
{
"name": "Fortran",
"bytes": "106333"
},
{
"name": "HTML",
"bytes": "1251"
},
{
"name": "M4",
"bytes": "50970"
},
{
"name": "Makefile",
"bytes": "171548"
},
{
"name": "Nasal",
"bytes": "2898"
},
{
"name": "Python",
"bytes": "1180435"
},
{
"name": "Shell",
"bytes": "148018"
}
],
"symlink_target": ""
}
|
"""Control Flow Operations. See the @{python/control_flow_ops} guide.
@@identity
@@tuple
@@group
@@no_op
@@count_up_to
@@cond
@@case
@@while_loop
@@logical_and
@@logical_not
@@logical_or
@@logical_xor
@@equal
@@not_equal
@@less
@@less_equal
@@greater
@@greater_equal
@@where
@@is_finite
@@is_inf
@@is_nan
@@verify_tensor_all_finite
@@check_numerics
@@add_check_numerics_ops
@@Assert
@@Print
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import control_flow_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_control_flow_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
# We override the 'tuple' for a control flow op, so we keep python's
# existing 'tuple' for later use in this module.
_basetuple = tuple
# pylint: disable=protected-access
# Assert and Print are special symbols in python, so we must
# use an upper-case version of them.
def Assert(condition, data, summarize=None, name=None):
"""Asserts that the given condition is true.
If `condition` evaluates to false, print the list of tensors in `data`.
`summarize` determines how many entries of the tensors to print.
NOTE: To ensure that Assert executes, one usually attaches a dependency:
```python
# Ensure maximum element of x is smaller or equal to 1
assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x])
with tf.control_dependencies([assert_op]):
... code using x ...
```
Args:
condition: The condition to evaluate.
data: The tensors to print out when condition is false.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional).
Returns:
assert_op: An `Operation` that, when executed, raises a
`tf.errors.InvalidArgumentError` if `condition` is not true.
"""
with ops.name_scope(name, "Assert", [condition, data]) as name:
xs = ops.convert_n_to_tensor(data)
if all([x.dtype in {dtypes.string, dtypes.int32} for x in xs]):
# As a simple heuristic, we assume that string and int32 are
# on host to avoid the need to use cond. If it is not case,
# we will pay the price copying the tensor to host memory.
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
else:
condition = ops.convert_to_tensor(condition, name="Condition")
def true_assert():
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
guarded_assert = cond(
condition, no_op, true_assert, name="AssertGuard")
return guarded_assert.op
def _Identity(data, name=None):
"""Return a tensor with the same shape and contents as the input tensor.
Args:
data: A Tensor.
name: A name for this operation (optional).
Returns:
A Tensor with the same type and value as the input Tensor.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return gen_array_ops._ref_identity(data, name=name)
else:
return array_ops.identity(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Identity(data.values, name=name)
indices = array_ops.identity(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = array_ops.identity(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = array_ops.identity(data.dense_shape, name="dense_shape")
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def _NextIteration(data, name=None):
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return ref_next_iteration(data, name=name)
else:
return next_iteration(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _NextIteration(data.values, name=name)
indices = next_iteration(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = next_iteration(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = next_iteration(data.dense_shape, name="dense_shape")
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def _Enter(data, frame_name, is_constant=False, parallel_iterations=10,
use_ref=True, use_input_shape=True, name=None):
"""Creates or finds a child frame, and makes `data` available to it.
The unique `frame_name` is used by the `Executor` to identify frames. If
`is_constant` is true, `data` is a constant in the child frame; otherwise
it may be changed in the child frame. At most `parallel_iterations`
iterations are run in parallel in the child frame.
Args:
data: The tensor to be made available to the child frame.
frame_name: The name of the child frame.
is_constant: If true, the output is constant within the child frame.
parallel_iterations: The number of iterations allowed to run in parallel.
use_ref: If true, use ref_enter if data is of ref type.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype and use_ref: # pylint: disable=protected-access
result = ref_enter(data, frame_name, is_constant, parallel_iterations,
name=name)
else:
result = enter(data, frame_name, is_constant, parallel_iterations,
name=name)
if use_input_shape:
result.set_shape(data.get_shape())
return result
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Enter(data.values, frame_name, is_constant,
parallel_iterations=parallel_iterations,
use_input_shape=use_input_shape, name=name)
indices = enter(data.indices, frame_name, is_constant,
parallel_iterations, name="indices")
if use_input_shape:
indices.set_shape(data.indices.get_shape())
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = enter(dense_shape, frame_name, is_constant,
parallel_iterations, name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.dense_shape.get_shape())
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = enter(data.dense_shape, frame_name, is_constant,
parallel_iterations, name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.dense_shape.get_shape())
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def exit(data, name=None):
"""Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
Args:
data: The tensor to be made available to the parent frame.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return gen_control_flow_ops._ref_exit(data, name)
else:
return gen_control_flow_ops._exit(data, name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = exit(data.values, name=name)
indices = gen_control_flow_ops._exit(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = gen_control_flow_ops._exit(dense_shape, name)
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = gen_control_flow_ops._exit(data.dense_shape, name)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def switch(data, pred, dtype=None, name=None):
"""Forwards `data` to an output determined by `pred`.
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
dtype: Optional element type for the returned tensor. If missing,
the type is inferred from the type of `value`.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded
to `output_true`, otherwise it goes to `output_false`.
"""
with ops.name_scope(name, "Switch", [data, pred]) as name:
data = ops.internal_convert_to_tensor_or_indexed_slices(
data, dtype=dtype, name="data", as_ref=True)
pred = ops.convert_to_tensor(pred, name="pred")
if isinstance(data, ops.Tensor):
return gen_control_flow_ops._switch(data, pred, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
val, ind = data.values, data.indices
val_f, val_t = gen_control_flow_ops._switch(val, pred, name=name)
ind_f, ind_t = gen_control_flow_ops._switch(ind, pred, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
dense_shape, pred, name="dense_shape")
else:
dense_shape_f, dense_shape_t = None, None
return (ops.IndexedSlices(val_f, ind_f, dense_shape_f),
ops.IndexedSlices(val_t, ind_t, dense_shape_t))
else:
dense_shape = data.dense_shape
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
data.dense_shape, pred, name="dense_shape")
return (sparse_tensor.SparseTensor(ind_f, val_f, dense_shape_f),
sparse_tensor.SparseTensor(ind_t, val_t, dense_shape_t))
def _SwitchRefOrTensor(data, pred, name="Switch"):
"""Forwards `data` to an output determined by `pred`.
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
name: A name for this operation (optional).
Returns:
`(output_false, output_false)`: If `pred` is true, data will be forwarded to
`output_true`, otherwise it goes to `output_false`.
Raises:
TypeError: if data is not a Tensor or IndexedSlices
"""
data = ops.convert_to_tensor_or_indexed_slices(data, name="data")
# NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below
# addresses the following scenario.
#
# Assume you execute Optimizer.apply_gradients() in a branch of a cond().
#
# 1. The update op is created inside a `with ops.colocate(var):` block
#
# 2. Some tensor `data` is captured and a switch is created in a
# `with ops.colocate_with(data):` block.
#
# with ops.colocate_with(var):
# with ops.colocate_with(data):
# op = ...
#
# var and data may be pinned to different devices, so we want to ops
# created within ops.colocate_with(data) to ignore the existing stack.
with ops.colocate_with(data, ignore_existing=True):
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return ref_switch(data, pred, name=name)
return switch(data, pred, name=name)
def merge(inputs, name=None):
"""Returns the value of an available element of `inputs`.
This op tests each of the tensors in `inputs` in turn to determine if any of
them is available. If it finds an available tensor, it returns it and its
index in `inputs`.
It is an error if more than one tensor in `inputs` is available. If no tensor
in `inputs` is available, the returned tensor and index are not set.
This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of
`Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices
before merging.
Args:
inputs: The input tensors, at most one of which is available.
name: A name for this operation (optional).
Returns:
A tuple containing the chosen input tensor and its index in `inputs`.
Raises:
ValueError: If any of the inputs is None, or inputs are IndexedSlices and
some but not all have a dense_shape property.
"""
if any([inp is None for inp in inputs]):
raise ValueError("At least one of the merge inputs is None: %s" % inputs)
with ops.name_scope(name, "Merge", inputs) as name:
inputs = [ops.internal_convert_to_tensor_or_indexed_slices(inp, as_ref=True)
for inp in inputs]
if all([isinstance(v, ops.Tensor) for v in inputs]):
if all([v.dtype._is_ref_dtype for v in inputs]): # pylint: disable=protected-access
return gen_control_flow_ops._ref_merge(inputs, name)
else:
return gen_control_flow_ops._merge(inputs, name)
elif all([isinstance(v, sparse_tensor.SparseTensor) for v in inputs]):
# Only handle the case when all inputs are SparseTensor.
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops._merge(
[inp.indices for inp in inputs], name="indices")
dense_shape, _ = gen_control_flow_ops._merge(
[inp.dense_shape for inp in inputs], name="dense_shape")
return (sparse_tensor.SparseTensor(indices, values, dense_shape),
chosen_index)
else:
# For now convert all the inputs as IndexedSlices.
inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops._merge(
[inp.indices for inp in inputs], name="indices")
if any(inp.dense_shape is not None for inp in inputs):
if any(inp.dense_shape is None for inp in inputs):
raise ValueError("Either all merged IndexedSlices must have a "
"dense_shape, or none must have a dense_shape.")
dense_shape, _ = gen_control_flow_ops._merge(
[inp.dense_shape for inp in inputs], name="dense_shape")
else:
dense_shape = None
return ops.IndexedSlices(values, indices, dense_shape), chosen_index
# pylint: enable=protected-access
def _convert_tensorarrays_to_flows(tensors_or_tensor_arrays):
return [ta.flow if isinstance(ta, tensor_array_ops.TensorArray)
else ta
for ta in tensors_or_tensor_arrays]
def _make_tensor_array(ta, t_or_flow):
new_ta = tensor_array_ops.TensorArray(
dtype=ta.dtype, handle=ta.handle, flow=t_or_flow,
infer_shape=ta._infer_shape)
new_ta._element_shape = ta._element_shape # pylint: disable=protected-access
return new_ta
def _convert_flows_to_tensorarrays(tensors_or_tensorarrays, tensors_or_flows):
if len(tensors_or_tensorarrays) != len(tensors_or_flows):
raise ValueError(
"Lengths of original Tensor list and new list do not match: %d vs. %d"
% (len(tensors_or_tensorarrays), len(tensors_or_flows)))
return [
_make_tensor_array(ta, t_or_flow)
if isinstance(ta, tensor_array_ops.TensorArray)
else t_or_flow
for (ta, t_or_flow) in zip(tensors_or_tensorarrays, tensors_or_flows)]
def _IsLoopConstantEnter(op):
"""Return true iff op is a loop invariant."""
is_enter = (op.type == "Enter" or op.type == "RefEnter")
return is_enter and op.get_attr("is_constant")
def _GetLoopConstantEnter(value):
"""Return the enter op if we can infer `value` to be a loop invariant."""
id_ops = {"Switch", "RefSwitch", "Identity", "RefIdentity"}
op = value.op
while op.type in id_ops:
op = op.inputs[0].op
return op if _IsLoopConstantEnter(op) else None
def _GetOutputContext(op):
"""Return the control flow context for the output of an op."""
ctxt = op._get_control_flow_context()
if IsLoopExit(op):
ctxt = ctxt.outer_context
return ctxt
def _ShapeLessThanOrEqual(shape1, shape2):
if shape2.dims is None:
return True
if shape1.ndims != shape2.ndims:
return False
for dim1, dim2 in zip(shape1.dims, shape2.dims):
if dim2.value is not None and dim1.value != dim2.value:
return False
return True
def _SetShapeInvariants(input_vars, enter_vars, shapes):
"""Set the shapes of the tensors in `enter_vars` to `shapes`.
Args:
input_vars: A list of tensors that are inputs to `enter_vars`.
enter_vars: A list of tensors whose shapes will be set.
shapes: A (possibly nested) list of shapes.
Raises:
ValueError: If any tensor in `enter_vars` has a less specific shape
than its corresponding shape in `shapes`.
"""
if shapes is None:
return
flat_shapes = nest.flatten(shapes)
if not all([isinstance(s, tensor_shape.TensorShape) for s in flat_shapes]):
raise ValueError("`shapes` must be a (possibly nested) list of shapes.")
# Check that the shapes of the inputs are less than the shape invariants,
# and set the shapes of `enter_vars` to the shape invariants.
for inp, var, shape in zip(input_vars, enter_vars, flat_shapes):
if isinstance(var, ops.Tensor):
if not _ShapeLessThanOrEqual(inp.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the loop variable. It enters the loop "
"with shape %s, but the specified shape invariant is %s."
% (inp.name, inp.get_shape(), shape))
var.set_shape(shape)
else:
if not isinstance(var, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
if not _ShapeLessThanOrEqual(inp.values.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the values tensor of this IndexedSlices. "
"It enters the loop with shape %s, but the specified shape "
"invariant is %s."
% (inp.values.name, inp.values.get_shape(), shape))
var.values.set_shape(shape)
var.indices.set_shape(tensor_shape.TensorShape([shape[0]]))
if var.dense_shape is not None:
var.dense_shape.set_shape(tensor_shape.TensorShape([shape.ndims]))
else:
if not _ShapeLessThanOrEqual(inp.dense_shape.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the shape tensor of this SparseTensor. "
"It enters the loop with shape %s, but the specified shape "
"invariant is %s."
% (inp.dense_shape.name, inp.dense_shape.get_shape(), shape))
var.values.set_shape(tensor_shape.TensorShape([None]))
var.indices.set_shape(tensor_shape.TensorShape([None, shape.ndims]))
var.dense_shape.set_shape(shape)
def _EnforceShapeInvariant(merge_var, next_var):
"""Check if the shapes of the loops variables are invariants.
Args:
merge_vars: The list of tensors representing the initial values of the
loop variables.
next_vars: The list of tensors representing the values of the loop
variables after one loop iteration.
Raises:
ValueError: If any tensor in `merge_vars` has a more specific shape than
its correspnding tensor in `next_var`.
"""
if isinstance(merge_var, ops.Tensor):
m_shape = merge_var.get_shape()
n_shape = next_var.get_shape()
if not _ShapeLessThanOrEqual(n_shape, m_shape):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape %s, but has shape %s after one iteration. "
"Provide shape invariants using either the `shape_invariants` "
"argument of tf.while_loop or set_shape() on the loop variables."
% (merge_var.name, m_shape, n_shape))
else:
if not isinstance(var, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
m_values_shape = merge_var.values.get_shape()
m_indices_shape = merge_var.indices.get_shape()
m_shape_shape = tensor_shape.TensorShape(None)
if merge_var.dense_shape is not None:
m_shape_shape = merge_var.dense_shape.get_shape()
n_values_shape = next_var.values.get_shape()
n_indices_shape = next_var.indices.get_shape()
n_shape_shape = tensor_shape.TensorShape(None)
if next_var.dense_shape is not None:
n_shape_shape = next_var.dense_shape.get_shape()
if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or
not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape)):
if not _ShapeLessThanOrEqual(n_values_shape, m_values_shape):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) "
"after one iteration. Provide shape invariants using either the "
"`shape_invariants` argument of tf.while_loop or set_shape() "
"on the loop variables."
% (merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,
n_values_shape, n_indices_shape, n_shape_shape))
else:
m_values_shape = merge_var.values.get_shape()
m_indices_shape = merge_var.indices.get_shape()
m_shape_shape = merge_var.dense_shape.get_shape()
n_values_shape = next_var.values.get_shape()
n_indices_shape = next_var.indices.get_shape()
n_shape_shape = next_var.dense_shape.get_shape()
if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or
not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape) or
not _ShapeLessThanOrEqual(n_shape_shape, m_shape_shape)):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) "
"after one iteration. Provide shape invariants using either "
"the `shape_invariants` argument of tf.while_loop or set_shape() "
"on the loop variables."
% (merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,
n_values_shape, n_indices_shape, n_shape_shape))
def _AddNextAndBackEdge(m, v):
"""Add NextIteration and back edge from v to m."""
if isinstance(m, ops.Tensor):
v = ops.convert_to_tensor(v)
v = _NextIteration(v)
m.op._update_input(1, v) # pylint: disable=protected-access
elif isinstance(m, ops.IndexedSlices):
# pylint: disable=protected-access
v = math_ops._as_indexed_slices(v, optimize=False)
v = _NextIteration(v)
m.values.op._update_input(1, v.values)
m.indices.op._update_input(1, v.indices)
# pylint: enable=protected-access
if m.dense_shape is not None:
if v.dense_shape is None:
raise ValueError("Must have dense shape: %s" % v.name)
m.dense_shape.op._update_input(1, v.dense_shape)
elif isinstance(m, sparse_tensor.SparseTensor):
if not isinstance(v, sparse_tensor.SparseTensor):
raise ValueError("Must be a sparse tensor: %s" % v.name)
v = _NextIteration(v)
# pylint: disable=protected-access
m.values.op._update_input(1, v.values)
m.indices.op._update_input(1, v.indices)
m.dense_shape.op._update_input(1, v.dense_shape)
# pylint: enable=protected-access
else:
raise TypeError("Type %s not supported" % type(m))
return v
class GradLoopState(object):
"""The state used for constructing the gradient graph for a while loop.
We create a GradLoopState for each while loop in forward and its
corresponding while loop in backprop. This gives us access to both
the forward and the backprop WhileContexts.
During the construction of gradient graph, any time when we detect
a forward value that is needed for backprop, we create a history
accumulator and add it to `history_map`. Any time when we backprop
a loop switch op (in _SwitchGrad), we add the grad merge op in
`switch_map`.
"""
def __init__(self, forward_ctxt, outer_grad_state):
# The grad loop state for the outer while loop.
self._outer_grad_state = None
# The while loop context for forward.
self._forward_context = None
# The loop counter added by AddForwardLoopCounter. It is the value
# of the loop counter for the next iteration.
self._forward_index = None
# A sync op for forward.
self._forward_sync = None
# The while loop context for backprop.
self._grad_context = None
# The loop counter added by AddBackPropLoopCounter. It is the value
# of the loop counter for the current iteration.
self._grad_index = None
# A sync op for backprop.
self._grad_sync = None
# Information needed by backprop.
self._history_map = {}
self._switch_map = {}
self._unused_exits = []
self._deferred_exits = []
self._forward_loop_exits = list(forward_ctxt.loop_exits)
self._pending_exits_count = len(forward_ctxt.loop_exits)
self._outer_grad_state = outer_grad_state
if outer_grad_state:
outer_forward_ctxt = outer_grad_state.forward_context
else:
outer_forward_ctxt = forward_ctxt.outer_context
# Add the forward loop counter.
if outer_forward_ctxt: outer_forward_ctxt.Enter()
cnt, forward_index = forward_ctxt.AddForwardLoopCounter(outer_grad_state)
if outer_forward_ctxt: outer_forward_ctxt.Exit()
self._forward_context = forward_ctxt
self._forward_index = forward_index
# Add the backprop WhileContext, and the backprop loop counter.
if outer_grad_state:
# This is a nested loop. Remember the iteration counts for each
# execution of this inner loop.
outer_forward_ctxt.AddName(cnt.name)
history_cnt = outer_grad_state.AddForwardAccumulator(cnt)
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
self._grad_context = WhileContext(forward_ctxt.parallel_iterations,
forward_ctxt.back_prop,
forward_ctxt.swap_memory,
forward_ctxt.name,
self)
real_cnt = outer_grad_state.AddBackPropAccumulatedValue(history_cnt, cnt)
self._grad_index = self._grad_context.AddBackPropLoopCounter(
real_cnt, outer_grad_state)
outer_grad_ctxt.Exit()
else:
if outer_forward_ctxt: outer_forward_ctxt.Enter()
self._grad_context = WhileContext(forward_ctxt.parallel_iterations,
forward_ctxt.back_prop,
forward_ctxt.swap_memory,
forward_ctxt.name,
self)
self._grad_index = self._grad_context.AddBackPropLoopCounter(
cnt, outer_grad_state)
if outer_forward_ctxt: outer_forward_ctxt.Exit()
@property
def outer_grad_state(self):
"""The grad loop state for outer loop."""
return self._outer_grad_state
@property
def forward_context(self):
"""The while loop context for forward."""
return self._forward_context
@property
def forward_index(self):
"""The loop index of forward loop."""
return self._forward_index
@property
def forward_sync(self):
"""A control trigger node for synchronization in the forward loop.
One main use is to keep the push ops of a stack executed in the
iteration order.
"""
if self._forward_sync is None:
with ops.control_dependencies(None):
self._forward_sync = control_trigger(name="f_sync")
self._forward_sync._set_control_flow_context(self._forward_context)
self._forward_index.op._add_control_input(self._forward_sync)
return self._forward_sync
@property
def grad_context(self):
"""The corresponding WhileContext for gradient."""
return self._grad_context
@property
def grad_index(self):
"""The loop index of backprop loop."""
return self._grad_index
@property
def grad_sync(self):
"""A control trigger node for synchronization in the grad loop.
One main use is to keep the pop ops of a stack executed in the
iteration order.
"""
if self._grad_sync is None:
with ops.control_dependencies(None):
self._grad_sync = control_trigger(name="b_sync")
self._grad_sync._set_control_flow_context(self._grad_context)
self._grad_index.op._add_control_input(self._grad_sync)
return self._grad_sync
@property
def history_map(self):
"""The map that records all the tensors needed for backprop."""
return self._history_map
@property
def switch_map(self):
"""The map that records all the Switch ops for the while loop."""
return self._switch_map
@property
def unused_exits(self):
"""The list of "unused" exits."""
return self._unused_exits
@property
def deferred_exits(self):
"""The list of "deferred" exits."""
return self._deferred_exits
@property
def forward_loop_exits(self):
"""The list of exits of the forward loop."""
return self._forward_loop_exits
@property
def pending_exits_count(self):
"""The number of exits we expect to see but haven't."""
return self._pending_exits_count
@pending_exits_count.setter
def pending_exits_count(self, cnt):
"""Set the pending count to cnt."""
self._pending_exits_count = cnt
def AddForwardAccumulator(self, value, dead_branch=False):
"""Add an accumulator for each forward tensor that is needed in backprop.
This is added to the forward loop at the first time when a tensor
in the forward loop is used by backprop gradient computation loop.
We create an accumulator that accumulates the value of tensor at each
iteration. Called in the control flow context where gradients() is called.
The pseudocode is:
```
acc = stack();
while (_pivot) {
acc = stack_push(acc, value);
}
```
We make sure that the stack push op in one iteration is executed before
next iteration. This is achieved by adding a control edge from
`forward_index.op.inputs[0].op` to the push op, and another control
edge from the push op to either `forward_index.op` or `forward_sync`.
Args:
value: The source tensor in forward that is to be accumulated.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The stack that contains the accumulated history of the tensor.
Raises:
TypeError: For internal errors involving the value condition context.
"""
curr_ctxt = ops.get_default_graph()._get_control_flow_context()
with ops.control_dependencies(None):
if curr_ctxt: curr_ctxt.Enter()
with ops.colocate_with(value):
# pylint: disable=protected-access
acc = gen_data_flow_ops._stack(value.dtype.base_dtype, name="f_acc")
# pylint: enable=protected-access
if curr_ctxt: curr_ctxt.Exit()
# Make acc available in the forward context.
enter_acc = self.forward_context.AddValue(acc)
# Add the stack_push op in the context of value.op.
swap_enabled = self.forward_context.swap_memory
value_ctxt = _GetOutputContext(value.op)
if value_ctxt == self.forward_context:
# value is not nested in the forward context.
self.forward_context.Enter()
push = gen_data_flow_ops._stack_push(
enter_acc, value, swap_memory=swap_enabled)
self.forward_context.Exit()
# Protect stack push and order it before forward_index.
self.forward_index.op._add_control_input(push.op)
else:
# value is in a cond context within the forward context.
if not isinstance(value_ctxt, CondContext):
raise TypeError(
"value_ctxt is not a CondContext: %s" % value_ctxt)
if dead_branch:
# The special case for creating a zero tensor for a dead
# branch of a switch. See ControlFlowState.ZerosLike().
value_ctxt.outer_context.Enter()
push = gen_data_flow_ops._stack_push(
enter_acc, value, swap_memory=swap_enabled)
value_ctxt.outer_context.Exit()
push.op._set_control_flow_context(value_ctxt)
else:
value_ctxt.Enter()
push = gen_data_flow_ops._stack_push(
enter_acc, value, swap_memory=swap_enabled)
value_ctxt.Exit()
# Protect stack push and order it before forward_sync.
self.forward_sync._add_control_input(push.op)
# Order stack push after the successor of forward_index
add_op = self.forward_index.op.inputs[0].op
push.op._add_control_input(add_op)
return acc
def AddBackPropAccumulatedValue(self, history_value, value,
dead_branch=False):
"""Add the getter for an accumulated value in the grad context.
This is added to the backprop loop. Called in the grad context to
get the value of an accumulated value. The stack pop op must be guarded
by the pred of the controlling cond.
Args:
history_value: The history (a stack) of a value.
value: The value that is pushed onto the stack.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The current value (the top of the stack).
"""
history_ctxt = history_value.op._get_control_flow_context()
# Find the cond context that controls history_value if any.
cond_ctxt = None
value_ctxt = value.op._get_control_flow_context()
while value_ctxt and value_ctxt != history_ctxt:
if isinstance(value_ctxt, CondContext):
cond_ctxt = value_ctxt
break
value_ctxt = value_ctxt.outer_context
with ops.control_dependencies(None):
self.grad_context.Enter()
if cond_ctxt:
# Guard stack pop with a switch if it is controlled by a cond.
grad_state = self
pred = None
while pred is None and grad_state:
pred = grad_state.history_map.get(cond_ctxt.pred.name)
grad_state = grad_state.outer_grad_state
if pred is None:
pred = cond_ctxt.pred
branch = (1 - cond_ctxt.branch) if dead_branch else cond_ctxt.branch
history_value = _SwitchRefOrTensor(history_value, pred)[branch]
pop = gen_data_flow_ops._stack_pop(history_value, value.dtype.base_dtype)
pop.set_shape(value.get_shape())
self.grad_context.Exit()
parallel_iterations = self.grad_context.parallel_iterations
if parallel_iterations > 1:
# All pops are ordered after pivot_for_body and before grad_sync.
self.grad_sync._add_control_input(pop.op)
return pop
def GetRealValue(self, value):
"""Get the real value of `value`.
If backprop "uses" a value produced by forward inference, an accumulator
is added in the forward loop to accumulate its values. We use the
accumulated value. This method must be called in the grad loop context.
`value` must be in forward and needed for backprop.
Args:
value: A tensor to be captured.
Returns:
The same tensor obtained from the saved history.
"""
assert value.op.type not in ["Variable", "VariableV2"]
real_value = self._history_map.get(value.name)
if real_value is None:
cur_value = value
cur_grad_state = self
while True:
enter_op = _GetLoopConstantEnter(cur_value)
if enter_op:
# Special case: cur_value comes from a constant Enter node.
cur_value = enter_op.inputs[0]
cur_grad_state = cur_grad_state.outer_grad_state
if cur_grad_state is None:
# We are now outside all nested loops for this gradient(),
# so `value` is a loop invariant and there is no need to
# save the history of value. Just make cur_value to enter
# the right control flow context.
real_value = self._grad_context.AddValue(cur_value)
break
else:
# Record the history of this value in forward_ctxt.
# TODO(yuanbyu): Avoid recording constants.
self._grad_context.Exit()
history_value = cur_grad_state.AddForwardAccumulator(cur_value)
self._grad_context.Enter()
break
if real_value is None:
# Add the stack pop op in the grad context.
real_value = cur_grad_state.AddBackPropAccumulatedValue(history_value,
cur_value)
if cur_grad_state != self:
real_value = self._grad_context.AddValue(real_value)
self._history_map[value.name] = real_value
return real_value
def _GetWhileContext(op):
"""Get the WhileContext to which this op belongs."""
ctxt = op._get_control_flow_context()
if ctxt:
ctxt = ctxt.GetWhileContext()
return ctxt
class ControlFlowState(object):
"""Maintain the mapping from the loops to their grad states."""
def __init__(self):
self._map = {} # maps forward loop context to GradLoopState
def GetGradState(self, op, before):
"""Return the grad state for this op if it's in a forward loop context."""
if before and IsLoopExit(op):
forward_ctxt = op._get_control_flow_context()
forward_ctxt = forward_ctxt.outer_context
if forward_ctxt:
forward_ctxt = forward_ctxt.GetWhileContext()
else:
forward_ctxt = _GetWhileContext(op)
if forward_ctxt:
return self._map.get(forward_ctxt)
return None
def ProcessUnusedLoopExits(self, pending_count, to_ops_set):
"""Process all the "unused" loop exits.
The "unused" exits of the loops are added to `unused_exits`. An exit is
unused if its pending_count is 0. If there is an exit with real gradient,
all these deferred exits will enter the backprop loop with zero gradient.
Otherwise, they will enter the backprop loop with None. As an example,
people often write:
```
v1, _ = tf.while_loop(p, b, [x1, x2])
result = gradients(v1, x1)
```
The exit node for x2 is not included by the betweenness analysis. But we
need to backprop x2 if x2 is involved in computing v1.
Args:
pending_count: The number of backprop inputs for every op.
to_ops_set: The set of ops for ys in gradients(ys, xs)
Returns:
The set of unused loop exits that we know at this point we need
to backprop.
"""
loop_exits = []
for _, grad_state in self._map.items():
for y in grad_state.forward_loop_exits:
# pylint: disable=protected-access
if pending_count[y.op._id] == 0:
grad_state.pending_exits_count -= 1
if y.op._id not in to_ops_set:
grad_state.unused_exits.append(y)
if grad_state.pending_exits_count == 0:
loop_exits.extend(grad_state.unused_exits)
# pylint: enable=protected-access
return loop_exits
def EnterGradWhileContext(self, op, before):
"""Enter the WhileContext for gradient computation."""
grad_state = self.GetGradState(op, before)
if grad_state:
grad_state.grad_context.Enter()
def ExitGradWhileContext(self, op, before):
"""Exit the WhileContext for gradient computation."""
grad_state = self.GetGradState(op, before)
if grad_state:
grad_state.grad_context.Exit()
def AddWhileContext(self, op, between_op_list, between_ops):
"""Add the grad state for the while loop that op belongs to.
Note that op is an Exit, and this method must be called in
the control flow context where gradients() is called.
Note that this method modifies `between_op_list` and `between_ops`.
"""
forward_ctxt = _GetWhileContext(op)
grad_state = self._map.get(forward_ctxt)
if grad_state is None:
# This is a new while loop so create a grad state for it.
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
grad_state = GradLoopState(forward_ctxt, outer_grad_state)
self._map[forward_ctxt] = grad_state
# We need to include all exits of a loop for backprop.
for loop_exit in grad_state.forward_loop_exits:
if not between_ops[loop_exit.op._id]:
between_ops[loop_exit.op._id] = True
between_op_list.append(loop_exit.op)
def ZerosLikeForExit(self, val):
"""Create zeros_like gradient for a loop exit.
If the result of a loop variable is not used but is involved in
computing the result of some needed loop variable, we create a
zero-valued tensor that is fed as gradient for the Exit node of that
loop variable. Note that val.op is an Exit, and this method must be
called in the control flow context where gradients() is called.
Args:
val: The output tensor of an Exit op.
Returns:
A zero tensor of the same shape of val.
"""
val_shape = val.get_shape()
forward_ctxt = val.op._get_control_flow_context()
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
if outer_grad_state:
# This is a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape in the right context.
outer_grad_state.grad_context.Enter()
result = array_ops.zeros(val_shape.dims, val.dtype)
outer_grad_state.grad_context.Exit()
else:
# Only the shape of value is needed for backprop.
forward_ctxt.outer_context.Enter()
shape = array_ops.shape_internal(val, optimize=False)
forward_ctxt.outer_context.Exit()
# Save the shape to a stack.
history_shape = outer_grad_state.AddForwardAccumulator(shape)
# Get the shape back from the stack.
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
real_shape = outer_grad_state.AddBackPropAccumulatedValue(
history_shape, shape)
result = array_ops.zeros(real_shape, val.dtype)
outer_grad_ctxt.Exit()
else:
# This is not a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape.
result = array_ops.zeros(val_shape.dims, val.dtype)
else:
result = array_ops.zeros_like(val, optimize=False)
return result
def ZerosLike(self, op, index):
"""Create zeros_like for the specified output of an op.
If op is in a while loop that is part of gradients(), this method
must be called in its grad loop context.
Args:
op: A tensorflow operation.
index: the index for a specific output of the op.
Returns:
A zero tensor of the same shape of op.outputs[index].
"""
if IsLoopSwitch(op): return None
dead_branch = IsSwitch(op)
forward_ctxt = _GetWhileContext(op)
grad_state = self._map.get(forward_ctxt)
if grad_state is None:
# op is not in a while loop that is part of gradients().
return ZerosLikeOutsideLoop(op, index)
op_ctxt = op._get_control_flow_context()
val = ops.convert_to_tensor(op.outputs[index], name="tensor")
shape = val.get_shape()
if shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor with
# the right shape in the grad loop context.
result = constant_op.constant(0, shape=shape.dims, dtype=val.dtype)
if dead_branch:
# op is a cond switch. Guard the zero tensor with a switch.
pred = grad_state.history_map.get(op_ctxt.pred.name)
branch = op_ctxt.branch
result = _SwitchRefOrTensor(result, pred)[1 - branch]
else:
# Unknown shape so keep a history of the shape at runtime.
if dead_branch:
# Need to add a special switch to guard the value.
pred = op_ctxt.pred
branch = op_ctxt.branch
op_ctxt.outer_context.Enter()
val = _SwitchRefOrTensor(op.inputs[0], pred)[1 - branch]
zeros_shape = array_ops.shape_internal(val, optimize=False)
op_ctxt.outer_context.Exit()
val.op._set_control_flow_context(op_ctxt)
zeros_shape.op._set_control_flow_context(op_ctxt)
else:
op_ctxt.Enter()
zeros_shape = array_ops.shape_internal(val, optimize=False)
op_ctxt.Exit()
# Add forward accumulator for shape.
grad_state.grad_context.Exit()
history_zeros_shape = grad_state.AddForwardAccumulator(
zeros_shape, dead_branch=dead_branch)
grad_state.grad_context.Enter()
# Create a zero tensor with the right shape.
shape = grad_state.AddBackPropAccumulatedValue(
history_zeros_shape, zeros_shape, dead_branch)
result = array_ops.zeros(shape, val.dtype)
return result
def PostProcessing(self):
"""Perform postprocessing at the end of gradients().
We have created the gradient graph at this point. So this function
can be used to perform any postprocessing on the gradient graph.
We currently perform the following postprocessing:
1. Patch the gradient graph if the output of a loop variable
doesn't depend on its input.
"""
for _, grad_state in self._map.items():
for _, b_merge in grad_state.switch_map.items():
if b_merge.op.inputs[0] == b_merge.op.inputs[1]:
# The value of this loop variable at iteration i+1 doesn't
# depend on its value at iteration i. So use zeros as the
# gradients for all iterations > 0.
dtype = b_merge.op.inputs[0].dtype
shape = b_merge.op.inputs[0].get_shape()
# pylint: disable=protected-access
if shape.is_fully_defined():
grad_state.grad_context.Enter()
# Create a zeros and use it for iterations > 0.
grad_val = constant_op.constant(0, dtype=dtype, shape=shape)
next_grad_val = _NextIteration(grad_val)
grad_state.grad_context.Exit()
else:
# Create a zeros in the outer grad context.
outer_grad_ctxt = grad_state.grad_context.outer_context
if outer_grad_ctxt: outer_grad_ctxt.Enter()
enter_grad_op = b_merge.op.inputs[0].op
enter_grad = enter_grad_op.inputs[0]
grad_shape = array_ops.shape_internal(enter_grad, optimize=False)
grad_val = array_ops.zeros(grad_shape)
if outer_grad_ctxt: outer_grad_ctxt.Exit()
# Use the zeros for iterations > 0.
grad_state.grad_context.Enter()
next_grad_val = _NextIteration(grad_val)
grad_state.grad_context.Exit()
b_merge.op._update_input(1, next_grad_val)
# pylint: enable=protected-access
def MaybeCreateControlFlowState(between_op_list, between_ops,
colocate_gradients_with_ops):
"""Create the state for all the while loops involved in one gradients().
We create a ControlFlowState when there are while loops involved in
gradients(). In gradients(), control flow logic is only invoked when
the ControlFlowState is not None.
Note that this method modifies `between_op_list` and `between_ops`.
"""
loop_state = None
for op in between_op_list:
if IsLoopExit(op):
if loop_state is None:
loop_state = ControlFlowState()
if colocate_gradients_with_ops:
with ops.colocate_with(op):
loop_state.AddWhileContext(op, between_op_list, between_ops)
else:
loop_state.AddWhileContext(op, between_op_list, between_ops)
return loop_state
def IsSwitch(op):
"""Return true if `op` is a Switch."""
return op.type == "Switch" or op.type == "RefSwitch"
def IsLoopExit(op):
"""Return true if `op` is an Exit."""
return op.type == "Exit" or op.type == "RefExit"
def IsLoopSwitch(op):
"""Return true if `op` is the Switch for a while loop."""
if IsSwitch(op):
ctxt = op._get_control_flow_context()
return ctxt and isinstance(ctxt, WhileContext)
return False
def ZerosLikeOutsideLoop(op, index):
"""Create zeros_like for the specified output of an op."""
val = op.outputs[index]
if not IsSwitch(op):
return array_ops.zeros_like(val, optimize=False)
else:
op_ctxt = op._get_control_flow_context()
pred = op_ctxt.pred
branch = op_ctxt.branch
switch_val = switch(op.inputs[0], pred)[1 - branch]
zeros_shape = array_ops.shape_internal(switch_val, optimize=False)
return array_ops.zeros(zeros_shape, dtype=val.dtype)
class ControlFlowContext(object):
"""The base class for control flow context.
The usage pattern is a sequence of (Enter, Exit) followed by a final
ExitResult.
We maintain the following state for control flow contexts during graph
construction:
1. graph has _control_flow_context: the current context used to
construct new nodes. Changed by ctxt.Enter() and ctxt.Exit()
2. op has _control_flow_context: the context to which the op belongs.
Set at the time the op is created. Immutable.
3. A ControlFlowContext has _outer_context: the context in which this
context is created. Set at the time a context is created. Immutable.
4. A ControlFlowContext has _context_stack.
Pushed and popped by ctxt.Enter() and ctxt.Exit()
"""
def __init__(self, values_def=None, import_scope=None):
self._outer_context = ops.get_default_graph()._get_control_flow_context()
self._context_stack = []
if values_def:
self._init_values_from_proto(values_def,
import_scope=import_scope)
else:
# Values that have been already seen in this context.
self._values = set()
# Values referenced by but external to this context.
self._external_values = {}
def _init_values_from_proto(self, values_def, import_scope=None):
"""Initializes values and external_values from `ValuesDef` protocol buffer.
Args:
values_def: `ValuesDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(values_def, control_flow_pb2.ValuesDef)
self._values = set(values_def.values)
g = ops.get_default_graph()
self._external_values = {}
for k, v in values_def.external_values.items():
self._external_values[k] = g.as_graph_element(v)
op_names = set([op.split(":")[0]
for op in self._values - set(self._external_values)])
for op in op_names:
# pylint: disable=protected-access
g.as_graph_element(ops.prepend_name_scope(
op, import_scope))._set_control_flow_context(self)
# pylint: enable=protected-access
@property
def outer_context(self):
"""Return the context containing this context."""
return self._outer_context
@property
def grad_state(self):
raise NotImplementedError("Abstract method")
@property
def back_prop(self):
raise NotImplementedError("Abstract method")
def _to_proto(self, export_scope=None):
"""Converts the values to a `ValuesDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `ValuesDef` protocol buffer.
"""
values_def = control_flow_pb2.ValuesDef()
values_def.values.extend(
[ops.strip_name_scope(v, export_scope)
for v in sorted(self._values)])
for k, v in self._external_values.items():
values_def.external_values[k] = ops.strip_name_scope(
v.name, export_scope)
return values_def
@staticmethod
def _from_proto(values_def, import_scope=None):
"""Returns a `ControlFlowContext` created from `values_def`."""
return ControlFlowContext(values_def=values_def,
import_scope=import_scope)
def AddName(self, name):
self._values.add(name)
# pylint: disable=protected-access
def Enter(self):
"""Enter this control flow context."""
graph = ops.get_default_graph()
self._context_stack.append(graph._get_control_flow_context())
graph._set_control_flow_context(self)
def Exit(self):
"""Exit this control flow context."""
graph = ops.get_default_graph()
last_context = self._context_stack.pop()
graph._set_control_flow_context(last_context)
def ExitResult(self, result):
"""Make a list of tensors available in the outer context."""
if self._outer_context:
for x in result:
self._outer_context.AddName(x.name)
def GetWhileContext(self):
"""Return the while context containing this context."""
if self._outer_context:
return self._outer_context.GetWhileContext()
return None
def _IsInOuterContext(self, op):
op_ctxt = _GetOutputContext(op)
outer_ctxt = self.outer_context
while outer_ctxt != op_ctxt:
if outer_ctxt is None:
return False
outer_ctxt = outer_ctxt.outer_context
return True
def _RemoveExternalControlEdges(self, op):
"""Remove any external control dependency on this op."""
while_ctxt = self.GetWhileContext()
# A control input of `op` is internal if it is in the same while
# loop context as the enclosing while loop context of self.
if while_ctxt is None:
internal_control_inputs = op.control_inputs
else:
internal_control_inputs = []
for x in op.control_inputs:
ctxt = _GetOutputContext(x)
if ctxt is not None and ctxt.GetWhileContext() == while_ctxt:
internal_control_inputs.append(x)
if len(internal_control_inputs) != len(op.control_inputs):
del op.control_inputs[:]
op._add_control_inputs(internal_control_inputs)
return internal_control_inputs
# pylint: enable=protected-access
class CondContext(ControlFlowContext):
"""The context for the conditional construct."""
def __init__(self, pred=None, pivot=None, branch=None,
name="cond_text", context_def=None, import_scope=None):
"""Creates a `CondContext`.
Args:
pred: The `boolean` tensor for the conditional predicate.
pivot: The predicate tensor in this branch.
branch: 0 or 1 representing this branch.
name: Name of the `CondContext` python object.
context_def: Optional `ContextDef` protocol buffer to initialize the
`CondContext` object from.
import_scope: Optional `string`. Name scope to add. Only used when
initialing from protocol buffer.
"""
self._name = ops.get_default_graph().unique_name(name)
if context_def:
self._init_from_proto(context_def, import_scope=import_scope)
else:
# Initializes the default fields.
ControlFlowContext.__init__(self)
self._pred = pred # The boolean tensor for the cond predicate
self._pivot = pivot # The predicate tensor in this branch
self._branch = branch # 0 or 1 representing this branch
# Values considered to have been already seen in this context.
self._values.add(pred.name)
self._values.add(pivot.name)
def _init_from_proto(self, context_def, import_scope=None):
"""Creates a new `CondContext` from protocol buffer.
Args:
context_def: `CondContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(context_def, control_flow_pb2.CondContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(
context_def.context_name, import_scope)
self._pred = g.as_graph_element(ops.prepend_name_scope(
context_def.pred_name, import_scope))
self._pivot = g.as_graph_element(ops.prepend_name_scope(
context_def.pivot_name, import_scope))
self._branch = context_def.branch
super(CondContext, self).__init__(values_def=context_def.values_def,
import_scope=import_scope)
@property
def name(self):
return self._name
@property
def pred(self):
return self._pred
@property
def pivot(self):
return self._pivot
@property
def branch(self):
return self._branch
@property
def grad_state(self):
if self.GetWhileContext():
return self.GetWhileContext().grad_state
return None
@property
def back_prop(self):
if self.GetWhileContext():
self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
def to_proto(self, export_scope=None):
"""Converts a `CondContext` to a `CondContextDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `CondContextDef` protocol buffer.
"""
if (export_scope is None or
self.name.startswith(export_scope)):
context_def = control_flow_pb2.CondContextDef()
context_def.context_name = ops.strip_name_scope(
self.name, export_scope)
context_def.pred_name = ops.strip_name_scope(
self._pred.name, export_scope)
context_def.pivot_name = ops.strip_name_scope(
self._pivot.name, export_scope)
context_def.branch = self._branch
context_def.values_def.MergeFrom(super(CondContext, self)._to_proto(
export_scope))
return context_def
else:
return None
@staticmethod
def from_proto(context_def, import_scope=None):
"""Returns a `CondContext` object created from `context_def`."""
return CondContext(context_def=context_def,
import_scope=import_scope)
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
if val.name in self._values:
# Use the real value if it comes from outer context. This is needed in
# particular for nested conds.
result = self._external_values.get(val.name)
result = val if result is None else result
else:
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
with ops.control_dependencies(None):
result = _SwitchRefOrTensor(result, self._pred)[self._branch]
result.op.graph.prevent_fetching(result.op)
# pylint: disable=protected-access
result.op._set_control_flow_context(self)
# pylint: enable=protected-access
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
"""Add `op` to the current context."""
if not op.inputs:
# Remove any external control dependency on this op
self._RemoveExternalControlEdges(op)
# pylint: disable=protected-access
op._add_control_input(self._pivot.op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
# pylint: disable=protected-access
op._update_input(index, real_x)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
if self._outer_context or not IsLoopExit(op):
op.graph.prevent_fetching(op)
def _ProcessOutputTensor(self, val):
"""Process an output tensor of a conditional branch."""
real_val = val
if val.name not in self._values:
# Handle the special case of lambda: x
self._values.add(val.name)
if self._outer_context:
real_val = self._outer_context.AddValue(val)
self._values.add(real_val.name)
real_val = _SwitchRefOrTensor(real_val, self._pred)[self._branch]
self._external_values[val.name] = real_val
else:
external_val = self._external_values.get(val.name)
if external_val is not None:
real_val = external_val
return real_val
def BuildCondBranch(self, fn):
"""Add the subgraph defined by fn() to the graph."""
r = fn()
original_r = r
result = []
if r is not None:
if not isinstance(r, list) and not isinstance(r, _basetuple):
r = [r]
original_r = [original_r]
r = _convert_tensorarrays_to_flows(r)
for v in r:
real_v = v
if isinstance(v, ops.Operation):
# Use pivot as the proxy for this op.
real_v = with_dependencies([v], self._pivot)
else:
if isinstance(v, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
values = self._ProcessOutputTensor(v.values)
indices = self._ProcessOutputTensor(v.indices)
if isinstance(v, ops.IndexedSlices):
dense_shape = v.dense_shape
if dense_shape is not None:
dense_shape = self._ProcessOutputTensor(dense_shape)
real_v = ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = self._ProcessOutputTensor(v.dense_shape)
real_v = sparse_tensor.SparseTensor(indices, values, dense_shape)
else:
real_v = self._ProcessOutputTensor(v)
result.append(real_v)
return original_r, result
def cond(pred, fn1, fn2, name=None):
"""Return either fn1() or fn2() based on the boolean predicate `pred`.
`fn1` and `fn2` both return lists of output tensors. `fn1` and `fn2` must have
the same non-zero number and type of outputs.
Note that the conditional execution applies only to the operations defined in
fn1 and fn2. Consider the following simple program:
```python
z = tf.multiply(a, b)
result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y))
```
If x < y, the `tf.add` operation will be executed and tf.square
operation will not be executed. Since z is needed for at least one
branch of the cond, the tf.mul operation is always executed, unconditionally.
Although this behavior is consistent with the dataflow model of TensorFlow,
it has occasionally surprised some users who expected a lazier semantics.
Args:
pred: A scalar determining whether to return the result of `fn1` or `fn2`.
fn1: The callable to be performed if pred is true.
fn2: The callable to be performed if pref is false.
name: Optional name prefix for the returned tensors.
Returns:
Tensors returned by the call to either `fn1` or `fn2`. If the callables
return a singleton list, the element is extracted from the list.
Raises:
TypeError: if `fn1` or `fn2` is not callable.
ValueError: if `fn1` and `fn2` do not return the same number of tensors, or
return tensors of different types.
Example:
```python
x = tf.constant(2)
y = tf.constant(5)
def f1(): return tf.multiply(x, 17)
def f2(): return tf.add(y, 23)
r = tf.cond(tf.less(x, y), f1, f2)
# r is set to f1().
# Operations in f2 (e.g., tf.add) are not executed.
```
"""
with ops.name_scope(name, "cond", [pred]) as name:
if not callable(fn1):
raise TypeError("fn1 must be callable.")
if not callable(fn2):
raise TypeError("fn2 must be callable.")
# Add the Switch to the graph.
if isinstance(pred, bool):
raise TypeError("pred must not be a Python bool")
p_2, p_1 = switch(pred, pred)
pivot_1 = array_ops.identity(p_1, name="switch_t")
pivot_2 = array_ops.identity(p_2, name="switch_f")
pred = array_ops.identity(pred, name="pred_id")
# Disable the fetching of tensors that are only on one branch of cond.
for tensor in [p_1, p_2, pivot_1, pivot_2, pred]:
tensor.op.graph.prevent_fetching(tensor.op)
# Build the graph for the true branch in a new context.
context_t = CondContext(pred, pivot_1, branch=1)
context_t.Enter()
orig_res, res_t = context_t.BuildCondBranch(fn1)
context_t.ExitResult(res_t)
context_t.Exit()
# Build the graph for the false branch in a new context.
context_f = CondContext(pred, pivot_2, branch=0)
context_f.Enter()
_, res_f = context_f.BuildCondBranch(fn2)
context_f.ExitResult(res_f)
context_f.Exit()
# Add the final merge to the graph.
if len(res_t) != len(res_f):
raise ValueError("fn1 and fn2 must return the same number of results.")
if not res_t:
raise ValueError("fn1 and fn2 must return at least one result.")
for x, y in zip(res_f, res_t):
assert ((isinstance(x, ops.IndexedSlices) and
isinstance(y, ops.IndexedSlices)) or
(isinstance(x, sparse_tensor.SparseTensor) and
isinstance(y, sparse_tensor.SparseTensor)) or
(isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)))
val_x = x if isinstance(x, ops.Tensor) else x.values
val_y = y if isinstance(y, ops.Tensor) else y.values
if val_x.dtype.base_dtype != val_y.dtype.base_dtype:
raise ValueError("Outputs of fn1 and fn2 must have the same type: "
"%s, %s" % (val_x.dtype.name, val_y.dtype.name))
merges = [merge([x[0], x[1]])[0] for x in zip(res_f, res_t)]
merges = _convert_flows_to_tensorarrays(orig_res, merges)
# Add to collections
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_t)
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_f)
return merges[0] if len(merges) == 1 else merges
# TODO(yuanbyu): Consider having a unified notion of context for
# not only conditionals and loops but also control dependency and
# subgraphs.
class WhileContext(ControlFlowContext):
"""The context for the loop construct."""
def __init__(self, parallel_iterations=10, back_prop=True, swap_memory=False,
name="while_context", grad_state=None, context_def=None,
import_scope=None):
""""Creates a `WhileContext`.
Args:
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
grad_state: The gradient loop state.
context_def: Optional `WhileContextDef` protocol buffer to initialize
the `Whilecontext` python object from.
import_scope: Optional `string`. Name scope to add. Only used when
initialing from protocol buffer.
"""
if context_def:
self._init_from_proto(context_def, import_scope=import_scope)
else:
ControlFlowContext.__init__(self)
self._init_from_args(parallel_iterations, back_prop, swap_memory,
name)
# The gradient loop state.
self._grad_state = grad_state
def _init_from_args(self, parallel_iterations, back_prop, swap_memory,
name):
"""Creates a new `WhileContext` from arguments.
Args:
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
Raises:
ValueError: If `parallel_iterations` has invalid value.
"""
if not isinstance(parallel_iterations, int) or (parallel_iterations <= 0):
raise ValueError("`parallel_iterations` must be a positive integer: "
"%s" % parallel_iterations)
self._name = ops.get_default_graph().unique_name(name)
self._parallel_iterations = parallel_iterations
self._back_prop = back_prop
self._swap_memory = swap_memory
# We use this node to control constants created by the pred lambda.
self._pivot_for_pred = None
# We use this node to control constants created by the body lambda.
self._pivot_for_body = None
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation
self._pivot = None
# The list of exit tensors for loop variables.
self._loop_exits = []
def _init_from_proto(self, context_def, import_scope=None):
"""Creates a new `WhileContext` from protocol buffer.
Args:
context_def: `WhileContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(context_def, control_flow_pb2.WhileContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(
context_def.context_name, import_scope)
self._parallel_iterations = context_def.parallel_iterations
self._back_prop = context_def.back_prop
self._swap_memory = context_def.swap_memory
self._pivot_for_pred = g.as_graph_element(ops.prepend_name_scope(
context_def.pivot_for_pred_name, import_scope))
# We use this node to control constants created by the body lambda.
self._pivot_for_body = g.as_graph_element(ops.prepend_name_scope(
context_def.pivot_for_body_name, import_scope))
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation.
self._pivot = g.as_graph_element(
ops.prepend_name_scope(context_def.pivot_name, import_scope))
# The list of exit tensors for loop variables.
self._loop_exits = [g.as_graph_element(
ops.prepend_name_scope(exit_name, import_scope))
for exit_name in context_def.loop_exit_names]
super(WhileContext, self).__init__(values_def=context_def.values_def,
import_scope=import_scope)
@property
def name(self):
return self._name
@property
def parallel_iterations(self):
"""The number of iterations allowed to run in parallel."""
return self._parallel_iterations
@property
def back_prop(self):
"""True iff backprop is enabled for this while loop."""
return self._back_prop
@property
def swap_memory(self):
"""True iff GPU-CPU memory swap is enabled for this while loop."""
return self._swap_memory
@property
def pivot(self):
"""The boolean tensor representing the loop termination condition."""
return self._pivot
@property
def loop_exits(self):
"""The list of exit tensors for loop variables."""
return self._loop_exits
@property
def grad_state(self):
"""The gradient loop state."""
return self._grad_state
def to_proto(self, export_scope=None):
"""Converts a `WhileContext` to a `WhileContextDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `WhileContextDef` protocol buffer.
"""
if (export_scope is None or
self.name.startswith(export_scope)):
context_def = control_flow_pb2.WhileContextDef()
context_def.context_name = ops.strip_name_scope(
self.name, export_scope)
context_def.parallel_iterations = self._parallel_iterations
context_def.back_prop = self._back_prop
context_def.swap_memory = self._swap_memory
context_def.pivot_for_pred_name = ops.strip_name_scope(
self._pivot_for_pred.name, export_scope)
context_def.pivot_for_body_name = ops.strip_name_scope(
self._pivot_for_body.name, export_scope)
context_def.pivot_name = ops.strip_name_scope(
self._pivot.name, export_scope)
if self._loop_exits:
context_def.loop_exit_names.extend(
[ops.strip_name_scope(l.name, export_scope)
for l in self._loop_exits])
context_def.values_def.MergeFrom(
super(WhileContext, self)._to_proto(
export_scope=export_scope))
return context_def
else:
return None
@staticmethod
def from_proto(context_def, import_scope=None):
"""Returns a `WhileContext` object created from `context_def`.
Args:
context_def: A `WhileContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
Returns:
A `WhileContext` Python object.
"""
return WhileContext(context_def=context_def,
import_scope=import_scope)
def GetWhileContext(self):
return self
def GetControlPivot(self):
if self._pivot_for_body is not None:
return self._pivot_for_body
return self._pivot_for_pred
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
result = val
if val.name not in self._values:
self._values.add(val.name)
# If we are in a grad context and val is from its forward context,
# use GetRealValue(), which adds the logic to save the history of
# val in forward.
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
forward_ctxt = _GetWhileContext(val.op)
if IsLoopExit(val.op):
forward_ctxt = forward_ctxt.outer_context
if forward_ctxt:
forward_ctxt = forward_ctxt.GetWhileContext()
if forward_ctxt == grad_ctxt.grad_state.forward_context:
real_val = grad_ctxt.grad_state.GetRealValue(val)
self._external_values[val.name] = real_val
return real_val
if self._outer_context is not None:
result = self._outer_context.AddValue(val)
# Create an Enter to make `result` known to this loop context.
with ops.control_dependencies(None):
enter = _Enter(result, self._name, is_constant=True,
parallel_iterations=self._parallel_iterations)
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext([enter])
# Add `enter` in this context.
self._values.add(enter.name)
self._external_values[val.name] = enter
result = enter
else:
actual_val = self._external_values.get(val.name)
if actual_val is not None:
result = actual_val
return result
def AddOp(self, op):
"""Add `op` to the current context."""
# For a reduction op, if op is in a grad context and its input is from
# its forward context, moving op to the forward context means we would
# store the tensor after the reduction as opposed to the tensor before
# reduction, and therefore could significantly reduce memory consumption.
# For now, we do this only for a few ops.
if op.type in {"Shape", "Size", "Rank"}:
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
op_input_forward_ctxt = _GetWhileContext(op.inputs[0].op)
if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context:
op_input_ctxt = op.inputs[0].op._get_control_flow_context()
op._set_control_flow_context(op_input_ctxt)
op_input_ctxt._AddOpInternal(op)
return
self._AddOpInternal(op)
def _AddOpInternal(self, op):
"""Add `op` to the current context.
In the case that op has only external data inputs, we remove all of its
external control inputs so all its inputs are in the same while loop
context. This is valid because op now has an Enter input that has all
the right control dependency.
"""
if not op.inputs:
# Remove any external control dependency on this op
control_inputs = self._RemoveExternalControlEdges(op)
# Add a control edge from the control pivot to this op.
if not control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x)
# Remove any external control dependency on this op.
self._RemoveExternalControlEdges(op)
# Add a control dependency to prevent loop invariants from
# enabling ops that should not be executed.
self._MaybeAddControlDependency(op)
for x in op.outputs:
self._values.add(x.name)
if self._outer_context or not IsLoopExit(op):
op.graph.prevent_fetching(op)
def _MaybeAddControlDependency(self, op):
"""Add a control input to the op if it only depends on loop invariants."""
def _IsOpFree(op):
if op.control_inputs:
return False
for x in op.inputs:
if not _IsLoopConstantEnter(x.op):
return False
return True
if _IsOpFree(op):
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
def AddForwardLoopCounter(self, outer_grad_state):
"""Adds a loop that counts the number of iterations.
This is added to the forward loop at the time when we start to
create the loop for backprop gradient computation. Called in
the outer context of this forward context.
The pseudocode is:
`n = 0; while (_pivot) { n++; }`
Note that a control dependency is added to `n` to ensure the correct
execution order of stack push ops.
Args:
outer_grad_state: The outer grad state. None if not nested.
Returns:
The number of iterations taken by the forward loop and the loop index.
"""
n = constant_op.constant(0, name="f_count")
if outer_grad_state is not None:
# Force the stack pushes of i-th execution of an inner loop to be ordered
# before the pushes of (i+1)-th execution of the same inner loop.
outer_add_op = outer_grad_state.forward_index.op.inputs[0].op
n.op._add_control_input(outer_add_op) # pylint: disable=protected-access
self.Enter()
self.AddName(n.name)
enter_n = _Enter(n, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="f_count")
merge_n = merge([enter_n, enter_n])[0]
switch_n = switch(merge_n, self._pivot)
index = math_ops.add(switch_n[1], 1)
next_n = _NextIteration(index)
merge_n.op._update_input(1, next_n)
total_iterations = exit(switch_n[0], name="f_count")
self.loop_exits.append(total_iterations)
self.ExitResult([total_iterations])
self.Exit()
return total_iterations, next_n
def AddBackPropLoopCounter(self, count, outer_grad_state):
"""Add the backprop loop that controls the iterations.
This is added to the backprop loop. It is used to control the loop
termination of the backprop loop. Called in the outer context of
this grad context.
The pseudocode is:
`n = count; while (n >= 1) { n--; }`
Note that a control dependency is added to `final_zero` to ensure the
correct execution order of stack pop ops.
Args:
count: The number of iterations for backprop.
outer_grad_state: The outer grad state. None if not nested.
Returns:
The loop index.
"""
one = constant_op.constant(1, name="b_count")
self.Enter()
self.AddName(count.name)
enter_count = _Enter(count, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_count")
merge_count = merge([enter_count, enter_count])[0]
self._pivot_for_pred = merge_count
pred = math_ops.greater_equal(merge_count, one)
self._pivot = loop_cond(pred, name="b_count")
switch_count = switch(merge_count, self._pivot)
index = math_ops.subtract(switch_count[1], one)
self._pivot_for_body = index
next_count = _NextIteration(index)
merge_count.op._update_input(1, next_count)
final_zero = exit(switch_count[0], name="b_count")
self.loop_exits.append(final_zero)
if outer_grad_state is not None:
# Force the stack pops of i-th execution of an inner loop to be ordered
# before the pops of (i+1)-th execution of the same inner loop.
# pylint: disable=protected-access
outer_grad_state.grad_sync._add_control_input(final_zero.op)
# pylint: enable=protected-access
self.ExitResult([final_zero])
self.Exit()
return next_count
def AddBackPropAccumulator(self, op, grad):
"""Add an accumulation loop for every loop invariant.
This is added to the backprop loop. It is used to accumulate partial
gradients within each loop iteration. Called when in the gradient while
context.
The pseudocode is:
```
acc = 0.0;
while (_pivot) {
acc += grad;
}
```
Args:
op: The Enter op for a loop invariant.
grad: The partial gradient of an iteration for a loop invariant.
Returns:
The gradient for a loop invariant.
"""
self.Exit()
# Create a zeros tensor with the right shape for acc. If we don't
# know the full shape statically, we will have to get the shape
# dynamically from the forward inference. Getting the shape right
# for the zeros is only needed for the base case when the loop exits
# without running any iterations.
shape = grad.get_shape()
if shape.is_fully_defined():
if self.outer_context: self.outer_context.Enter()
acc = constant_op.constant(0, grad.dtype, shape=shape, name="b_acc")
if self.outer_context: self.outer_context.Exit()
else:
value = op.inputs[0]
if (isinstance(self.outer_context, WhileContext) and
self.outer_context.grad_state is not None):
# We are in a nested while loop.
forward_ctxt = self.grad_state.forward_context
forward_ctxt.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
forward_ctxt.outer_context.Exit()
outer_grad_state = self.grad_state.outer_grad_state
history_zeros_shape = outer_grad_state.AddForwardAccumulator(
zeros_shape)
self.outer_context.Enter()
real_shape = outer_grad_state.AddBackPropAccumulatedValue(
history_zeros_shape, zeros_shape)
acc = array_ops.zeros(real_shape, grad.dtype)
self.outer_context.Exit()
else:
if self.outer_context: self.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
acc = array_ops.zeros(zeros_shape, grad.dtype)
if self.outer_context: self.outer_context.Exit()
acc._shape = grad.get_shape() # pylint: disable=protected-access
self.Enter()
self.AddName(acc.name)
enter_acc = _Enter(acc, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_acc")
merge_acc = merge([enter_acc, enter_acc], name="b_acc")[0]
switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot)
add_acc = math_ops.add(switch_acc_true, grad)
next_acc = _NextIteration(add_acc)
merge_acc.op._update_input(1, next_acc) # pylint: disable=protected-access
acc_result = exit(switch_acc_false, name="b_acc")
self.loop_exits.append(acc_result)
self.ExitResult([acc_result])
return acc_result
def AddBackPropIndexedSlicesAccumulator(self, op, grad):
"""This is used for accumulating gradients that are IndexedSlices.
This is essentially the equavalent of AddBackPropAccumulator but optimized
for things like updating embeddings from within a while loop.
Args:
op: The Enter op for a loop invariant.
grad: The partial gradients represented as an IndexedSlices.
Returns:
The accumulated IndexedSlices gradient of the loop invariant.
"""
values = grad.values
indices = grad.indices
dense_shape = grad.dense_shape
self.Exit()
if self.outer_context: self.outer_context.Enter()
if values.get_shape().is_fully_defined():
values_shape = tensor_shape.TensorShape(
[tensor_shape.Dimension(1)] + values.get_shape().dims[1:])
if self.outer_context: self.outer_context.Enter()
values_acc = constant_op.constant(0, values.dtype, shape=values_shape,
name="b_acc")
if self.outer_context: self.outer_context.Exit()
else:
values_shape = array_ops.shape_internal(op.inputs[0], optimize=False)[1:]
values_shape = array_ops.concat([[1], values_shape], 0)
values_acc = array_ops.zeros(values_shape, dtype=values.dtype)
indices_acc = constant_op.constant([0], indices.dtype)
shape_acc = None
if dense_shape is not None:
if dense_shape.get_shape().is_fully_defined():
if self.outer_context: self.outer_context.Enter()
shape_acc = constant_op.constant(0, dense_shape.dtype,
shape=dense_shape.get_shape())
if self.outer_context: self.outer_context.Exit()
else:
shape_acc = array_ops.zeros_like(
array_ops.shape_internal(op.inputs[0], optimize=False),
optimize=False)
if self.outer_context: self.outer_context.Exit()
self.Enter()
self.AddName(values_acc.name)
self.AddName(indices_acc.name)
init_acc = [indices_acc, values_acc]
if shape_acc is not None:
self.AddName(shape_acc.name)
init_acc.append(shape_acc)
enter_acc = [_Enter(x, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_acc") for x in init_acc]
merge_acc = [merge([x, x], name="b_acc")[0] for x in enter_acc]
switch_acc = [switch(x, self._pivot) for x in merge_acc]
# The actual accumulation.
acc_indexed_slices = [
array_ops.concat([xa[1], xv], 0)
for xa, xv in zip(switch_acc[:2], [indices, values])
]
if shape_acc is not None:
# For the shape we just keep the maximum
acc_indexed_slices.append(
math_ops.maximum(dense_shape, switch_acc[2][1]))
next_acc = [_NextIteration(x) for x in acc_indexed_slices]
for xm, xn in zip(merge_acc, next_acc):
xm.op._update_input(1, xn) # pylint: disable=protected-access
acc_exits = [exit(x[0], name="b_acc") for x in switch_acc]
self.loop_exits.extend(acc_exits)
self.ExitResult(acc_exits)
return ops.IndexedSlices(
indices=acc_exits[0], values=acc_exits[1],
dense_shape=acc_exits[2] if shape_acc is not None else None)
def _InitializeValues(self, values):
"""Makes the values known to this context."""
self._values = set()
for x in values:
if isinstance(x, ops.Tensor):
self._values.add(x.name)
else:
self._values.add(x.values.name)
self._values.add(x.indices.name)
if isinstance(x, ops.IndexedSlices):
dense_shape = x.dense_shape
elif isinstance(x, sparse_tensor.SparseTensor):
dense_shape = x.dense_shape
else:
raise TypeError("Type %s not supported" % type(x))
if dense_shape is not None:
self._values.add(dense_shape.name)
def _BuildLoop(self, pred, body, original_loop_vars, loop_vars,
shape_invariants):
"""Core: Add the loop termination condition and body to the graph."""
flat_loop_vars = nest.flatten(original_loop_vars)
# Let the context know the loop variables so the loop variables
# would be added in the outer contexts properly.
self._InitializeValues(loop_vars)
real_vars = loop_vars
if self._outer_context:
real_vars = [self._outer_context.AddValue(x) for x in loop_vars]
with ops.control_dependencies(None):
enter_vars = [_Enter(x, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
use_input_shape=(shape_invariants is None))
for x in real_vars]
if self._outer_context:
control_pivot = self._outer_context.GetControlPivot().op
for var in enter_vars:
if _IsLoopConstantEnter(var.op.inputs[0].op):
# pylint: disable=protected-access
var.op._add_control_input(control_pivot)
# pylint: enable=protected-access
_SetShapeInvariants(real_vars, enter_vars, shape_invariants)
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext(enter_vars)
self._InitializeValues(enter_vars)
merge_vars = [merge([x, x])[0] for x in enter_vars]
self._pivot_for_pred = merge_vars[0]
# Build the graph for pred.
merge_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_loop_vars, merge_vars))
packed_vars = nest.pack_sequence_as(
structure=original_loop_vars,
flat_sequence=merge_vars_with_tensor_arrays)
c = ops.convert_to_tensor(pred(*packed_vars))
self._pivot = loop_cond(c, name="LoopCond")
switch_vars = [_SwitchRefOrTensor(x, self._pivot) for x in merge_vars]
# Build the graph for body.
vars_for_body = [_Identity(x[1]) for x in switch_vars]
self._pivot_for_body = vars_for_body[0]
# Convert TensorArray flow variables inside the context back into
# their associated TensorArrays for calling the body.
vars_for_body_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_loop_vars, vars_for_body))
packed_vars_for_body = nest.pack_sequence_as(
structure=original_loop_vars,
flat_sequence=vars_for_body_with_tensor_arrays)
body_result = body(*packed_vars_for_body)
if not nest.is_sequence(body_result):
body_result = [body_result]
# Compare the structure types of input and output of body.
# For backwards compatibility, the first layer is forced to a list
# during this comparison, because inputs are typically lists and
# outputs of the body are typically tuples.
nest.assert_same_structure(list(packed_vars_for_body), list(body_result))
# Store body_result to keep track of TensorArrays returned by body
original_body_result = body_result
# Convert TensorArrays returned by body into their flow variables
flat_result = nest.flatten(body_result)
result = _convert_tensorarrays_to_flows(flat_result)
result = ops.convert_n_to_tensor_or_indexed_slices(result)
# Add NextIteration and the back edges to complete the loop.
if len(merge_vars) != len(result):
raise ValueError("Number of inputs and outputs of body must match "
"loop_vars: %d, %d" % (len(merge_vars), len(result)))
next_vars = []
for m, v in zip(merge_vars, result):
next_vars.append(_AddNextAndBackEdge(m, v))
# Add the exit ops.
exit_vars = [exit(x[0]) for x in switch_vars]
self._loop_exits = exit_vars
# Make sure the shapes of loop outputs are correct.
for m_var, n_var in zip(merge_vars, next_vars):
if isinstance(m_var, ops.Tensor):
_EnforceShapeInvariant(m_var, n_var)
# Exit the loop.
self.ExitResult(exit_vars)
return original_body_result, exit_vars
def BuildLoop(self, pred, body, loop_vars, shape_invariants):
"""Add the loop termination condition and body to the graph."""
# Keep original_loop_vars to identify which are TensorArrays
original_loop_vars = loop_vars
flat_loop_vars = nest.flatten(loop_vars)
# Convert TensorArrays to their flow variables
loop_vars = _convert_tensorarrays_to_flows(flat_loop_vars)
loop_vars = ops.convert_n_to_tensor_or_indexed_slices(loop_vars)
try:
self.Enter()
original_body_result, exit_vars = self._BuildLoop(
pred, body, original_loop_vars, loop_vars, shape_invariants)
finally:
self.Exit()
flat_result = nest.flatten(original_body_result)
# Convert TensorArray flow variables outside the context back into
# their associated TensorArrays for returning to caller.
exit_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_result, exit_vars))
packed_exit_vars = nest.pack_sequence_as(
structure=original_body_result,
flat_sequence=exit_vars_with_tensor_arrays)
return (packed_exit_vars[0] if len(exit_vars) == 1
else packed_exit_vars)
def _FixControlInputsAndContext(self, enters):
graph = ops.get_default_graph()
# pylint: disable=protected-access
for e in enters:
if isinstance(e, ops.Tensor):
xs = [e]
else:
if not isinstance(e, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(e))
xs = [e.values, e.indices]
shape = e.dense_shape
if shape is not None:
xs.append(shape)
for x in xs:
inp_op = x.op.inputs[0]
control_inputs = graph._control_dependencies_for_inputs([inp_op])
outer_control_inputs = [op for op in control_inputs
if self._IsInOuterContext(op)]
x.op._set_control_flow_context(self)
x.op._add_control_inputs(outer_control_inputs)
graph._record_op_seen_by_control_dependencies(x.op)
# pylint: enable=protected-access
def while_loop(cond, body, loop_vars, shape_invariants=None,
parallel_iterations=10, back_prop=True, swap_memory=False,
name=None):
"""Repeat `body` while the condition `cond` is true.
`cond` is a callable returning a boolean scalar tensor. `body` is a callable
returning a (possibly nested) tuple, namedtuple or list of tensors of the same
arity (length and structure) and types as `loop_vars`. `loop_vars` is a
(possibly nested) tuple, namedtuple or list of tensors that is passed to both
`cond` and `body`. `cond` and `body` both take as many arguments as there are
`loop_vars`.
While `cond` evaluates to true, `body` is executed.
In addition to regular Tensors or IndexedSlices, the body may accept and
return TensorArray objects. The flows of the TensorArray objects will
be appropriately forwarded between loops and during gradient calculations.
For correctness, `tf.while_loop()` strictly enforces shape invariants for
the loop variables. A shape invariant is a (possibly partial) shape that
is unchanged across the iterations of the loop. An error will be raised
if the shape of a loop variable after an iteration is determined to be more
general than or incompatible with its shape invariant. For example, a shape
of [11, None] is more general than a shape of [11, 17], and [11, 21] is not
compatible with [11, 17]. By default (if the argument `shape_invariants` is
not specified), it is assumed that the initial shape of each tensor in
`loop_vars` is the same in every iteration. The `shape_invariants` argument
allows the caller to specify a less specific shape invariant for each loop
variable, which is needed if the shape varies between iterations. The
@{tf.Tensor.set_shape}
function may also be used in the `body` function to indicate that
the output loop variable has a particular shape. The shape invariant for
SparseTensor and IndexedSlices are treated specially as follows:
a) If a loop variable is a SparseTensor, the shape invariant must be
TensorShape([r]) where r is the rank of the dense tensor represented
by the sparse tensor. It means the shapes of the three tensors of the
SparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here
is the shape of the SparseTensor.dense_shape property. It must be the shape of
a vector.
b) If a loop variable is an IndexedSlices, the shape invariant must be
a shape invariant of the values tensor of the IndexedSlices. It means
the shapes of the three tensors of the IndexedSlices are (shape, [shape[0]],
[shape.ndims]).
`while_loop` implements non-strict semantics, enabling multiple iterations
to run in parallel. The maximum number of parallel iterations can be
controlled by `parallel_iterations`, which gives users some control over
memory consumption and execution order. For correct programs, `while_loop`
should return the same result for any parallel_iterations > 0.
For training, TensorFlow remembers the tensors that are produced in the
forward inference but needed in back propagation. These tensors can be a
main source of memory consumption and often cause OOM problems when training
on GPUs. When the flag swap_memory is true, we swap out these tensors from
GPU to CPU. This for example allows us to train RNN models with very long
sequences and large batches.
Args:
cond: A callable that represents the termination condition of the loop.
body: A callable that represents the loop body.
loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array,
`Tensor`, and `TensorArray` objects.
shape_invariants: The shape invariants for the loop variables.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
Returns:
The output tensors for the loop variables after the loop. When the length
of `loop_vars` is 1 this is a Tensor, TensorArray or IndexedSlice and when
the length of `loop_vars` is greater than 1 it returns a list.
Raises:
TypeError: if `cond` or `body` is not callable.
ValueError: if `loop_vars` is empty.
Example:
```python
i = tf.constant(0)
c = lambda i: tf.less(i, 10)
b = lambda i: tf.add(i, 1)
r = tf.while_loop(c, b, [i])
```
Example with nesting and a namedtuple:
```python
import collections
Pair = collections.namedtuple('Pair', 'j, k')
ijk_0 = (tf.constant(0), Pair(tf.constant(1), tf.constant(2)))
c = lambda i, p: i < 10
b = lambda i, p: (i + 1, Pair((p.j + p.k), (p.j - p.k)))
ijk_final = tf.while_loop(c, b, ijk_0)
```
Example using shape_invariants:
```python
i0 = tf.constant(0)
m0 = tf.ones([2, 2])
c = lambda i, m: i < 10
b = lambda i, m: [i+1, tf.concat([m, m], axis=0)]
tf.while_loop(
c, b, loop_vars=[i0, m0],
shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])])
```
"""
with ops.name_scope(name, "while", loop_vars) as name:
if not loop_vars:
raise ValueError("No loop variables provided")
if not callable(cond):
raise TypeError("cond must be callable.")
if not callable(body):
raise TypeError("body must be callable.")
if parallel_iterations < 1:
raise TypeError("parallel_iterations must be a positive integer.")
if shape_invariants is not None:
nest.assert_same_structure(loop_vars, shape_invariants)
context = WhileContext(parallel_iterations, back_prop, swap_memory, name)
ops.add_to_collection(ops.GraphKeys.WHILE_CONTEXT, context)
result = context.BuildLoop(cond, body, loop_vars, shape_invariants)
return result
def _AsTensorList(x, p):
"""Return x as a list of Tensors or IndexedSlices.
For entries of `x` that are Operations, this returns an Identity of `p`
with a dependency on the operation.
Args:
x: A Tensor/IndexedSlices/Operation or a list or tuple of them.
p: A Tensor to return for entries in `x` that are Operations.
Returns:
A list of Tensors or IndexedSlices.
"""
if not isinstance(x, (list, _basetuple)):
x = [x]
l = []
for v in x:
if isinstance(v, ops.Operation):
v = with_dependencies([v], p)
v = ops.convert_to_tensor_or_indexed_slices(v)
if isinstance(v, ops.Tensor):
l.append(array_ops.identity(v))
else:
l.append(ops.IndexedSlices(array_ops.identity(v.values),
array_ops.identity(v.indices)))
return l
def _CheckResults(a, b):
assert len(a) == len(b), (
"Values returned by a() and b() must have the same length.")
for x, y in zip(a, b):
assert x.dtype == y.dtype, (
"Values returned by a() [%s] and b() [%s] must have "
"the same type: %s, %s." %
(x.name, y.name, x.dtype.name, y.dtype.name))
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be
consumed externally only after some other dependencies have run
first. This function ensures returns `output_tensor`, but only after all
operations in `dependencies` have run. Note that this means that there is
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also `tuple` and `group`.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
with ops.name_scope(name, "control_dependency",
list(dependencies) + [output_tensor]) as name:
with ops.colocate_with(output_tensor):
with ops.control_dependencies(dependencies):
output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)
if isinstance(output_tensor, ops.Tensor):
return _Identity(output_tensor, name=name)
else:
return ops.IndexedSlices(_Identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
def _GroupControlDeps(dev, deps, name=None):
with ops.control_dependencies(deps):
if dev is None:
return no_op(name=name)
else:
with ops.device(dev):
return no_op(name=name)
# TODO(touts): Accept "inputs" as a list.
def group(*inputs, **kwargs):
"""Create an op that groups multiple operations.
When this op finishes, all ops in `input` have finished. This op has no
output.
See also `tuple` and `with_dependencies`.
Args:
*inputs: Zero or more tensors to group.
**kwargs: Optional parameters to pass when constructing the NodeDef.
name: A name for this operation (optional).
Returns:
An Operation that executes all its inputs.
Raises:
ValueError: If an unknown keyword argument is provided.
"""
name = kwargs.pop("name", None)
if kwargs:
raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys()))
with ops.name_scope(name, "group_deps", inputs) as name:
# Grouping no inputs means do nothing
if not inputs:
return no_op(name=name)
# Sorts *inputs according to their devices.
ops_on_device = {} # device -> operations specified on the device.
for inp in inputs:
dev = inp.device
if dev in ops_on_device:
ops_on_device[dev].append(inp)
else:
ops_on_device[dev] = [inp]
if len(ops_on_device) == 1:
# 1-level tree. The root node is the returned NoOp node.
(dev, deps), = ops_on_device.items()
return _GroupControlDeps(dev, deps, name=name)
# 2-level tree. The root node is the returned NoOp node.
# deps contains 1 NoOp node for each device.
deps = []
def device_key(dev):
"""A sort key that allows None to be compared to strings."""
return "" if dev is None else dev
for dev in sorted(six.iterkeys(ops_on_device), key=device_key):
deps.append(_GroupControlDeps(dev, ops_on_device[dev]))
with ops.control_dependencies(deps):
return no_op(name=name)
def tuple(tensors, name=None, control_inputs=None):
"""Group tensors together.
This creates a tuple of tensors with the same values as the `tensors`
argument, except that the value of each tensor is only returned after the
values of all tensors have been computed.
`control_inputs` contains additional ops that have to finish before this op
finishes, but whose outputs are not returned.
This can be used as a "join" mechanism for parallel computations: all the
argument tensors can be computed in parallel, but the values of any tensor
returned by `tuple` are only available after all the parallel computations
are done.
See also `group` and `with_dependencies`.
Args:
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
name: (optional) A name to use as a `name_scope` for the operation.
control_inputs: List of additional ops to finish before returning.
Returns:
Same as `tensors`.
Raises:
ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.
TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`
objects.
"""
with ops.name_scope(name, "tuple", tensors) as name:
gating_ops = [t.op for t in tensors if t is not None]
if control_inputs:
for c in control_inputs:
if isinstance(c, ops.Tensor):
c = c.op
elif not isinstance(c, ops.Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
gating_ops.append(c)
# Note that in order to ensure ordering in the pbtxt, we must take care to
# ensure the order here.
gating_ops = sorted(set(gating_ops), key=lambda op: op._id) # Uniquify ops.
if not gating_ops:
raise ValueError("Must have at least one Tensor: %s" % tensors)
gate = group(*gating_ops)
tpl = []
for t in tensors:
if t is not None:
tpl.append(with_dependencies([gate], t))
else:
tpl.append(None)
return tpl
def case(pred_fn_pairs, default, exclusive=False, name="case"):
"""Create a case operation.
The `pred_fn_pairs` parameter is a dict or list of pairs of size N.
Each pair contains a boolean scalar tensor and a python callable that
creates the tensors to be returned if the boolean evaluates to True.
`default` is a callable generating a list of tensors. All the callables
in `pred_fn_pairs` as well as `default` should return the same number
and types of tensors.
If `exclusive==True`, all predicates are evaluated, and an exception is
thrown if more than one of the predicates evaluates to `True`.
If `exclusive==False`, execution stops are the first predicate which
evaluates to True, and the tensors generated by the corresponding function
are returned immediately. If none of the predicates evaluate to True, this
operation returns the tensors generated by `default`.
Example 1:
Pseudocode:
```
if (x < y) return 17;
else return 23;
```
Expressions:
```
f1 = lambda: tf.constant(17)
f2 = lambda: tf.constant(23)
r = case([(tf.less(x, y), f1)], default=f2)
```
Example 2:
Pseudocode:
```
if (x < y && x > z) raise OpError("Only one predicate may evaluate true");
if (x < y) return 17;
else if (x > z) return 23;
else return -1;
```
Expressions:
```
x = tf.constant(0)
y = tf.constant(1)
z = tf.constant(2)
def f1(): return tf.constant(17)
def f2(): return tf.constant(23)
def f3(): return tf.constant(-1)
r = case({tf.less(x, y): f1, tf.greater(x, z): f2},
default=f3, exclusive=True)
```
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: A callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
pfp = pred_fn_pairs # For readability
if not (isinstance(pfp, list) or isinstance(pfp, _basetuple)
or isinstance(pfp, dict)):
raise TypeError("fns must be a list, tuple, or dict")
if isinstance(pfp, dict):
pfp = pfp.items()
if not exclusive:
logging.warn("%s: Provided dictionary of predicate/fn pairs, but "
"exclusive=False. Order of conditional tests is "
"not guaranteed.", name)
for tup in pfp:
if not isinstance(tup, _basetuple) or len(tup) != 2:
raise TypeError("Each entry in pred_fn_pairs must be a 2-tuple")
pred, fn = tup
if pred.dtype != dtypes.bool:
raise TypeError("pred must be of type bool: %s", pred.name)
if not callable(fn):
raise TypeError("fn for pred %s must be callable." % pred.name)
if not callable(default):
raise TypeError("default must be callable.")
preds, fns = map(list, zip(*pfp))
with ops.name_scope(name, "case", [preds]):
if not preds:
return default()
not_preds = []
for i, p in enumerate(preds):
with ops.name_scope("not_%d" % i):
not_preds.append(math_ops.logical_not(p))
and_not_preds = [constant_op.constant(True, name="always_true")]
for i, notp in enumerate(not_preds):
with ops.name_scope("and_not_%d" % i):
and_not_preds.append(math_ops.logical_and(and_not_preds[-1], notp))
# preds = [p1, p2, p3]
# fns = [f1, f2, f3]
# not_preds = [~p1, ~p2, ~p3]
# and_not_preds = [True, ~p1, ~p1 & ~p2, ~p1 & ~p2 & ~p3]
# case_preds = [p1,
# p2 & ~p1,
# p3 & ~p2 & ~p1,
# ~p3 & ~p2 & ~p1]
case_preds = []
for i, (p, and_not_p_prev) in enumerate(zip(preds, and_not_preds[:-1])):
with ops.name_scope("case_%d" % i):
case_preds.append(math_ops.logical_and(p, and_not_p_prev))
with ops.name_scope("case_none_are_true"):
case_preds.append(and_not_preds[-1])
# Create an empty tensor, or list, with the right type and shape
with ops.name_scope("case_create_empty"):
dummy_value = default()
def _correct_empty(v):
if isinstance(v, ops.Operation):
return no_op()
elif v.dtype == dtypes.string:
return array_ops.constant("")
else:
return array_ops.constant(v.dtype.as_numpy_dtype())
if isinstance(dummy_value, collections.Sequence):
dummy_type = type(dummy_value)
empty = lambda: dummy_type(_correct_empty(v) for v in dummy_value)
else:
empty = lambda: _correct_empty(dummy_value)
# case_sequence = [
# cond(~p3 & ~p2 & ~p1, default, empty),
# cond(p3 & ~p2 & ~p1, f3, lambda: case_sequence[0]),
# cond(p2 & ~p1, f2, lambda: case_sequence[1]),
# cond(p1, f1, lambda: case_sequence[2])
# ]
#
# And the return value will be case_sequence[-1]
def _build_case():
all_fns = [fn for fn in fns]
all_fns.append(default)
prev_case = None
for i, (cp, fn) in enumerate(list(zip(case_preds, all_fns))[::-1]):
prev_case = cond(
cp, fn,
empty if i == 0 else lambda: prev_case,
name="If_%d" % i)
return prev_case
if exclusive:
preds_c = array_ops.stack(preds, name="preds_c")
num_true_conditions = math_ops.reduce_sum(
math_ops.cast(preds_c, dtypes.int32), name="num_true_conds")
at_most_one_true_condition = math_ops.less(
num_true_conditions, constant_op.constant(2, name="two_true_conds"))
error_msg = [
("More than one condition evaluated as True but "
"exclusive=True. Conditions: (%s), Values:"
% ", ".join([p.name for p in preds])),
preds_c]
with ops.control_dependencies([
Assert(condition=at_most_one_true_condition,
data=error_msg, summarize=len(preds))]):
case_seq = _build_case()
else:
case_seq = _build_case()
return case_seq
ops.register_proto_function(ops.GraphKeys.COND_CONTEXT,
proto_type=control_flow_pb2.CondContextDef,
to_proto=CondContext.to_proto,
from_proto=CondContext.from_proto)
ops.register_proto_function(ops.GraphKeys.WHILE_CONTEXT,
proto_type=control_flow_pb2.WhileContextDef,
to_proto=WhileContext.to_proto,
from_proto=WhileContext.from_proto)
|
{
"content_hash": "a8286d47e193230689ed8466e1b33ba6",
"timestamp": "",
"source": "github",
"line_count": 2973,
"max_line_length": 90,
"avg_line_length": 38.76589303733603,
"alnum_prop": 0.6573391987922014,
"repo_name": "jjas0nn/solvem",
"id": "9a1b38dd28d785cedefdfc0b9c599e38e085d626",
"size": "115941",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/lib/python2.7/site-packages/tensorflow/python/ops/control_flow_ops.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "67"
},
{
"name": "C",
"bytes": "309086"
},
{
"name": "C++",
"bytes": "10234032"
},
{
"name": "CMake",
"bytes": "307"
},
{
"name": "CSS",
"bytes": "1891"
},
{
"name": "Fortran",
"bytes": "6361"
},
{
"name": "HTML",
"bytes": "2989"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "18261384"
},
{
"name": "Shell",
"bytes": "3246"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class ValidateResponse(Model):
"""Describes the result of resource validation.
:param status: Result of validation.
:type status: str
:param error: Error details for the case when validation fails.
:type error: ~azure.mgmt.web.models.ValidateResponseError
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ValidateResponseError'},
}
def __init__(self, status=None, error=None):
self.status = status
self.error = error
|
{
"content_hash": "ed5daed1c3b4e7c68f2ed227acc09aaa",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 67,
"avg_line_length": 29.1,
"alnum_prop": 0.6374570446735395,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "1ca74d062b7f34b6944c3cdae7a32f91ee579183",
"size": "1056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-web/azure/mgmt/web/models/validate_response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
from controller import WORKER_SCRIPT
from launch_instance import launch
from upload_download_s3 import upload_to_s3, download_from_s3, remove_s3_bucket
from utils import timestring
from boto import sqs, ec2, s3
from boto import Config
import json
from time import sleep, time
import os
# Assuming that the testing system has the AWS config already set.
try:
proc_name = "aws_ska_test_worker_" + timestring()
region = "us-west-2"
# Read in credentials
config = Config()
config.load_credential_file(os.path.expanduser("~/.aws/credentials"))
info = config.items("default")[2:]
key = info[0][1]
secret = info[1][1]
# Create a test file and upload to S3
if not os.path.exists("tests/test.txt"):
test_string = "ALLGLORYTOTHEHYPNOTOAD"
with open("tests/test.txt", "w") as f:
f.write(test_string)
print("Uploading to S3")
upload_to_s3(proc_name, "tests/test.txt", key_prefix="data/",
aws_access={"aws_access_key_id": key,
"aws_secret_access_key": secret},
create_bucket=True)
# Create an SQS queue and message for the worker
queue = sqs.connect_to_region(region).create_queue(proc_name)
mess = {}
mess["proc_name"] = proc_name
mess["bucket"] = proc_name
mess['key_name'] = "data/test.txt"
mess['command'] = ["ls /home/ubuntu/data", "ls -la /home/ubuntu/data/"]
mess['parameters'] = ""
mess = queue.new_message(body=json.dumps(mess))
queue.write(mess)
print("Launching instance")
# Launch an instance with the worker script
user_data = WORKER_SCRIPT \
% {"USER": "ubuntu",
"QUEUE_NAME": proc_name,
"REGION": region,
"KEY": key,
"SECRET": secret,
"RESP_QUEUE_NAME": proc_name + "_response",
"CUSTOM_LINES": ''}
inst = launch(key_name=None, region=region, image_id="ami-b7fc75d7",
user_data=user_data)
# sleep 1 min
t0 = time()
while time() < t0 + 60:
update = inst.update()
print update
if update in [u"stopping", u"stopped"]:
print("Instance shutting down.")
break
sleep(5)
else:
print("Reached time limit. Terminating after 1 min.")
inst.terminate()
print("Checking for response message.")
resp_queue = sqs.connect_to_region(region).create_queue(proc_name + "_response")
if resp_queue.count() > 0:
mess = resp_queue.read(10)
content = json.loads(mess.get_body())
print("Saving content.")
with open("tests/test_response.txt", "w") as f:
json.dump(content, f)
else:
print("No message received!")
# Now download the output file
download_from_s3("data_products/*", proc_name,
aws_access={"aws_access_key_id": key,
"aws_secret_access_key": secret},
output_dir="tests/")
except Exception as e:
print("Failed with :")
print(e)
queue.delete()
resp_queue.delete()
remove_s3_bucket(proc_name, s3.connection.S3Connection())
|
{
"content_hash": "b06b4b5b6fd6665ff27983ad0edff7bc",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 84,
"avg_line_length": 30.057142857142857,
"alnum_prop": 0.594106463878327,
"repo_name": "Astroua/aws_controller",
"id": "6024d96ebc00bd02665ba6f1ed3ff71d76c491dc",
"size": "3157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27533"
},
{
"name": "Shell",
"bytes": "2562"
}
],
"symlink_target": ""
}
|
import inspect
from datetime import datetime
from django.db import models
from django.test import SimpleTestCase
from dimagi.ext.couchdbkit import Document
from dimagi.utils.couch.migration import (
SyncCouchToSQLMixin,
SyncSQLToCouchMixin,
)
class ModelAttrEqualityHelper(SimpleTestCase):
"""
Helper class to test the equality of couch and a SQL models during a couch to sql migration.
Update `couch_only_attrs` and `sql_only_attrs` as per requirements
"""
class DummySQLModel(models.Model, SyncSQLToCouchMixin):
pass
class DummyCouchModel(Document, SyncCouchToSQLMixin):
pass
couch_only_attrs = set()
sql_only_attrs = set()
@classmethod
def _get_user_defined_attrs(cls, model_cls, dummy_model):
model_attrs = dir(dummy_model)
return {item[0]
for item in inspect.getmembers(model_cls)
if item[0] not in model_attrs}
@classmethod
def get_sql_attrs(cls, model_cls):
return cls._get_user_defined_attrs(model_cls, cls.DummySQLModel)
@classmethod
def get_cleaned_couch_attrs(cls, couch_model_cls):
couch_attrs = cls._get_user_defined_attrs(couch_model_cls, cls.DummyCouchModel)
extra_attrs = cls.couch_only_attrs
new_attrs = cls.sql_only_attrs
return (couch_attrs - extra_attrs).union(new_attrs)
def is_monday():
return datetime.utcnow().isoweekday() == 1
|
{
"content_hash": "c29e395a9d48254d5cd0678d96bc09eb",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 96,
"avg_line_length": 29.306122448979593,
"alnum_prop": 0.6866295264623955,
"repo_name": "dimagi/commcare-hq",
"id": "7de9af2c122e788579df0325a23a06a4add91ae3",
"size": "1436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/cleanup/tests/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
"""
Module used to define H5 queries.
"""
from __future__ import print_function
from __future__ import absolute_import
import tables
import numpy as np
def get_data(h5file, **kwargs):
"""
Get raw data from an h5file meeting given criteria.
Parameters
----------
h5file: string or open pytables file
The path to the h5file or open file handle.
**kwargs
Optional search modifiers. (e.g. min_r=0, max_mz=100, precursor_MZ=1).
Use verbose=True for displaying query messages.
Returns
-------
out : dictionary
Dictionary with arrays for 'i', 'mz', and 'rt' values meeting criteria.
"""
is_h5_file = False
if not isinstance(h5file, tables.File):
is_h5_file = True
h5file = tables.open_file(h5file)
# Select the ms_level
ms_level = kwargs.get('ms_level', None)
if ms_level is None:
ms1 = h5file.root.ms1_neg.nrows + h5file.root.ms1_pos.nrows
ms2 = h5file.root.ms2_neg.nrows + h5file.root.ms2_pos.nrows
ms_level = 1 if ms1 > ms2 else 2
# Select the polarity
polarity = kwargs.get('polarity', None)
if polarity is None:
if ms_level == 1:
polarity = h5file.root.ms1_pos.nrows > h5file.root.ms1_neg.nrows
else:
polarity = h5file.root.ms2_pos.nrows > h5file.root.ms2_neg.nrows
# Select the table
if ms_level == 1:
name = 'ms1_pos' if polarity else 'ms1_neg'
else:
name = 'ms2_pos' if polarity else 'ms2_neg'
if 'rt' in kwargs or 'min_rt' in kwargs or 'max_rt' in kwargs:
data_table = h5file.get_node('/' + name)
else:
try:
data_table = h5file.get_node('/' + name + '_mz')
except Exception:
# Fall back on original node.
data_table = h5file.get_node('/' + name)
# Get the selected entries
queries = []
for name in ['rt', 'mz', 'precursor_MZ', 'precursor_intensity',
'collision_energy']:
if 'min_%s' % name in kwargs:
queries.append('(%s >= %s)' % (name, kwargs['min_%s' % name]))
if 'max_%s' % name in kwargs:
queries.append('(%s <= %s)' % (name, kwargs['max_%s' % name]))
if name in kwargs:
queries.append('(%s == %s)' % (name, kwargs[name]))
query = ' & '.join(queries)
if kwargs.get('verbose', None):
print('Querying: %s from %s' % (query, data_table._v_name))
if not query:
data = data_table.read()
else:
data = data_table.read_where(query)
if not data.size:
raise ValueError('No data found matching criteria')
if kwargs.get('verbose', None):
print('Query complete')
# close the file if opened
if is_h5_file:
h5file.close()
return data
def get_chromatogram(h5file, min_mz, max_mz, aggregator=np.sum, **kwargs):
"""
Get Chromatogram data - RT vs. intensity aggregation
Parameters
----------
h5file: string or open pytables file
The path to the h5file or open file handle.
min_mz : float
Minimum m/z value.
max_mz : float
Maximum m/z value.
ms_level : int
MS Level.
polarity: int
Plus proton (1) or Minus proton (0).
aggregator: function
Function to aggregate the intensity data. Defaults to np.sum,
producing an XIC. For Base Peak Chromatogram, use np.max.
**kwargs
Optional search modifiers. (e.g. precursor_MZ=1,
min_collision_energy=4)
Returns
-------
out : tuple of arrays
(rt_vals, i_vals) arrays in the desired range.
"""
data = get_data(h5file, min_mz=min_mz, max_mz=max_mz, **kwargs)
if data is None:
return [], []
rt = np.unique(data['rt'])
if len(rt)==1:
return [rt], [np.sum(data['i'])]
if aggregator == np.sum:
d = np.diff(rt) / 2
edges = np.hstack([rt[0] - d[0], rt[0:-1] + d, rt[-1] + d[-1]])
i, _ = np.histogram(data['rt'], bins=edges, weights=data['i'])
else:
i = []
for val in rt:
indices = np.argwhere(data['rt'] == val)
i.append(aggregator(np.take(data['i'], indices)))
i = np.array(i)
return rt, i
def get_heatmap(h5file, mz_bins, **kwargs):
"""
Get a HeatMap of RT vs MZ.
Parameters
----------
h5file: string or open pytables file
The path to the h5file or open file handle.
mz_steps : int or array-like
Bins to use for the mz axis.
**kwargs
Optional search modifiers. (e.g. precursor_MZ=1,
min_collision_energy=4)
Returns
-------
out : dict
Dictionary containing: 'arr', 'rt_bins', 'mz_bins'.
"""
data = get_data(h5file, **kwargs)
if data is None:
return None
rt_values = np.unique(data['rt'])
rt_bins = np.hstack((rt_values, rt_values[-1] + 1))
arr, mz_bins, _ = np.histogram2d(data['mz'], data['rt'],
weights=data['i'],
bins=(mz_bins, rt_bins))
mz_centroid = (np.sum(np.multiply(np.sum(arr, axis=1), mz_bins[:-1]))
/ np.sum(arr))
return dict(arr=arr, rt_bins=rt_values, mz_bins=mz_bins,
mz_centroid=mz_centroid)
def get_spectrogram(h5file, min_rt, max_rt, bins=2000, **kwargs):
"""
Get cumulative I vs MZ in RT Range (spectrogram)
Parameters
----------
h5file : table file handle
Handle to an open tables file.
min_rt : float
Minimum retention time.
max_rt : float
Maximum retention time.
bins : int or array-like
Desired bins for the histogram.
**kwargs
Optional search modifiers. (e.g. precursor_MZ=1,
min_collision_energy=4)
Returns
-------
out : tuple of arrays
(mz_vals, i_vals) arrays in the desired range.
"""
data = get_data(h5file, min_rt=min_rt, max_rt=max_rt, **kwargs)
if data is None:
return [], []
i, mz = np.histogram(data['mz'], bins=bins, weights=data['i'])
# center the bins
mz = (mz[:-1] + mz[1:]) / 2
return mz, i
def get_info(h5file):
"""Get info about an LCMS HDF file
Parameters
----------
h5file: string or open pytables file
The path to the h5file or open file handle.
Returns
-------
out : dict
Number of rows for all of the tables in the file.
"""
is_h5_file = False
if not isinstance(h5file, tables.File):
is_h5_file = True
h5file = tables.open_file(h5file)
info = dict()
for table_name in ['ms1_neg', 'ms1_pos', 'ms2_neg', 'ms2_pos']:
table = h5file.get_node('/%s' % table_name)
data = dict()
data['nrows'] = table.nrows
if not table.nrows:
info[table_name] = data
continue
data['min_mz'] = table.col('mz').min()
data['max_mz'] = table.col('mz').max()
data['min_rt'] = table.col('rt').min()
data['max_rt'] = table.col('rt').max()
info[table_name] = data
# close the file if opened
if is_h5_file:
h5file.close()
return info
if __name__ == '__main__': # pragma: no cover
from os.path import expandvars
import sys
# sys.path.insert(0,'/global/homes/b/bpb/metatlas/metatlas/helpers/' )
sys.path.insert(0, expandvars('$HOME/dev/metatlas'))
import argparse
import os
import matplotlib.pyplot as plt
from metatlas import plot_chromatogram, plot_spectrogram, plot_heatmap
desc = "Query and plot MZML data from HDF files"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-x", "--xic", action="store_true",
help="Get and plot XIC")
parser.add_argument("-s", "--spectrogram", action="store_true",
help="Get and plot Spectrogram")
parser.add_argument("--heatmap", action="store_true",
help="Get and plot Heatmap")
parser.add_argument('input_file', help="Input HDF file",
action='store')
args = parser.parse_args()
fname = args.input_file
fid = tables.open_file(fname)
basename = os.path.splitext(fname)[0]
if args.xic:
x, y = get_chromatogram(fid, 0, 1000)
plot_chromatogram(x, y, title=basename)
if args.spectrogram:
x, y = get_spectrogram(fid, 1, 5)
plot_spectrogram(x, y)
if args.heatmap:
data = get_heatmap(fid, 1000)
plot_heatmap(data['arr'], data['rt_bins'], data['mz_bins'])
# close the file
fid.close()
plt.show()
|
{
"content_hash": "14ba9342ab46b9dd5d5705093ffb9524",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 79,
"avg_line_length": 29.063758389261746,
"alnum_prop": 0.5632144094215449,
"repo_name": "metabolite-atlas/metatlas",
"id": "9a2255e1800334de219a8b6587c7052d975812fb",
"size": "8661",
"binary": false,
"copies": "1",
"ref": "refs/heads/oo_mads",
"path": "metatlas/io/h5_query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "121821"
},
{
"name": "Makefile",
"bytes": "1333"
},
{
"name": "Python",
"bytes": "167786"
},
{
"name": "R",
"bytes": "78"
},
{
"name": "Shell",
"bytes": "2327"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job_board', '0013_siteconfig_twitter'),
]
operations = [
migrations.RemoveField(
model_name='siteconfig',
name='twitter',
),
migrations.AddField(
model_name='siteconfig',
name='twitter_access_token',
field=models.CharField(blank=True, max_length=100),
),
migrations.AddField(
model_name='siteconfig',
name='twitter_access_token_secret',
field=models.CharField(blank=True, max_length=100),
),
migrations.AddField(
model_name='siteconfig',
name='twitter_consumer_key',
field=models.CharField(blank=True, max_length=100),
),
migrations.AddField(
model_name='siteconfig',
name='twitter_consumer_secret',
field=models.CharField(blank=True, max_length=100),
),
migrations.AddField(
model_name='siteconfig',
name='twitter_user',
field=models.CharField(blank=True, help_text="Your site's Twitter username, fill in to have a Follow icon appear on select pages", max_length=15),
),
]
|
{
"content_hash": "3b9db828f0fad95c91a0123d007df7f6",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 158,
"avg_line_length": 31.952380952380953,
"alnum_prop": 0.5774962742175856,
"repo_name": "wfhio/tramcar",
"id": "2f78acc7d1095a239abca59a22feebbc63903a8b",
"size": "1415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "job_board/migrations/0014_auto_20161123_0242.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "260"
},
{
"name": "HTML",
"bytes": "29392"
},
{
"name": "Python",
"bytes": "112378"
},
{
"name": "Ruby",
"bytes": "3313"
}
],
"symlink_target": ""
}
|
"""
WSGI config for blog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blog.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
# import django.core.handlers.wsgi
# application = django.core.handlers.wsgi.WSGIHandler()
|
{
"content_hash": "cf9543cbd14860fb6f2291f26e1ef1d1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 25.818181818181817,
"alnum_prop": 0.7869718309859155,
"repo_name": "divyamodi128/My-Story-The-Blogging-App",
"id": "faeb489d586ec40657dcf2ac14bc1b494181c09f",
"size": "568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/blog/wsgi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54168"
},
{
"name": "HTML",
"bytes": "15641"
},
{
"name": "JavaScript",
"bytes": "335787"
},
{
"name": "Python",
"bytes": "54349"
}
],
"symlink_target": ""
}
|
import unittest
from expressy.quotes import split_quoted, process_unquoted
class SplitQuotedTest(unittest.TestCase):
def test_trivial(self):
self.assertEqual(split_quoted(""), [""])
self.assertEqual(split_quoted("a"), ["a"])
def test_splits(self):
self.assertEqual(split_quoted("''"), ["", "", ""])
self.assertEqual(split_quoted(
r"I say, 'hello,' and I don\'t mean, 'hi'."),
["I say, ", "hello,", " and I don\\'t mean, ", "hi", "."])
def test_failure(self):
with self.assertRaises(ValueError):
print(split_quoted(r"'"))
split_quoted(r"\'")
with self.assertRaises(ValueError):
split_quoted(r"\''")
split_quoted(r"''")
with self.assertRaises(ValueError):
split_quoted(r"a ' b")
split_quoted(r"a \' b")
with self.assertRaises(ValueError):
split_quoted(r"a \' b '")
split_quoted(r"a' b ' c")
class ProcessUnquotedTest(unittest.TestCase):
def process(self, s):
def sub(x):
return x and ' '.join(['X', x, 'X'])
return process_unquoted(s, sub)
def test_trivial(self):
self.assertEqual(self.process(""), "")
def test_simple(self):
self.assertEqual(self.process("hello"), "X hello X")
def test_quotes(self):
self.assertEqual(
self.process(r"I say, 'hello,' and I don\'t mean, 'hi'."),
"X I say, X'hello,'X and I don\\'t mean, X'hi'X . X")
|
{
"content_hash": "52c208cb45cc822b7d0e34a2de46f2bc",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 70,
"avg_line_length": 32.255319148936174,
"alnum_prop": 0.5507915567282322,
"repo_name": "timedata-org/expressy",
"id": "ee6a6d9fa05102034cdd2020beab26459f9d3d9b",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/quotes_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27046"
},
{
"name": "Shell",
"bytes": "247"
}
],
"symlink_target": ""
}
|
"""
The-O-Kay-Blog utility functions
:Copyright: (c) 2009 Victor Goh <victorgoh@gmail.com>,
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import re
import logging
import blog.config as config
from kay.utils import render_to_string
# Some helper methods
def slugify(s):
return re.sub('[^a-zA-Z0-9-]+', '-', s).strip('-')
def format_post_path(post, num):
slug = slugify(post.title)
if num > 0:
slug += "-" + str(num)
return config.post_path_format % {
'slug': slug,
'year': post.published.year,
'month': post.published.month,
'day': post.published.day,
}
def render_template(template, template_vals):
"""
render_to_string returns a unicode string, the rendered template needs to
be a string to be stored in BlobProperty
"""
return render_to_string(template, template_vals)
def truncate_html_words(s, num):
"""
Truncates html to a certain number of words (not counting tags and
comments). Closes opened tags if they were correctly closed in the given
html.
"""
#s = force_unicode(s)
length = int(num)
if length <= 0:
return u''
html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input')
# Set up regular expressions
re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U)
re_tag = re.compile(r'<(/)?([^ ]+?)(?: (/)| .*?)?>')
# Count non-HTML words and keep note of open tags
pos = 0
ellipsis_pos = 0
words = 0
open_tags = []
while words <= length:
m = re_words.search(s, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word
words += 1
if words == length:
ellipsis_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or ellipsis_pos:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
tagname = tagname.lower() # Element names are always case-insensitive
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag, all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i+1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if words <= length:
# Don't try to close tags if we don't need to truncate
return s
out = s[:ellipsis_pos] + ' ...'
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
# Jinja2 Filter for datetime formatting
def datetimeformat(value, format='%H:%M / %d-%m-%Y'):
return value.strftime(format)
|
{
"content_hash": "52cea70adc0bb94ce8aef1156a7f06a0",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 132,
"avg_line_length": 31.603960396039604,
"alnum_prop": 0.5667293233082706,
"repo_name": "calvinchengx/O-Kay-Blog-wih-Kay-0.10.0",
"id": "e9af98a81b77fe199a5a28b85ef194fe94a245c5",
"size": "3216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import gzip
import os
import pysam
from TestUtils import TABIX_DATADIR
FN_COMPRESSED = "example.bed.gz"
FN_UNCOMPRESSED = "example.bed"
FN_LARGE_COMPRESSED = "example_large.bed.gz"
FN_LARGE_UNCOMPRESSED = "example_large.bed"
def read_python_compressed(fn):
'''iterate through with python.'''
with gzip.open(fn, mode="r") as f:
return len([x.split(b"\t") for x in f])
def read_python_uncompressed(fn):
with open(fn) as f:
return len([x.split("\t") for x in f])
def fetch_plain(fn):
with pysam.Tabixfile(fn) as f:
return len(list(f.fetch()))
def fetch_parsed(fn):
with pysam.Tabixfile(fn) as f:
return len(list(f.fetch(parser=pysam.asBed())))
def iterate_generic_compressed(fn):
with gzip.open(fn) as f:
return len(list(pysam.tabix_generic_iterator(f, parser=pysam.asBed())))
def iterate_generic_uncompressed(fn):
with open(fn) as f:
return len(list(pysam.tabix_generic_iterator(f, parser=pysam.asBed())))
def iterate_parsed_compressed(fn):
with gzip.open(fn) as f:
return len(list(pysam.tabix_iterator(f, parser=pysam.asBed())))
def iterate_parsed_uncompressed(fn):
with open(fn) as f:
return len(list(pysam.tabix_iterator(f, parser=pysam.asBed())))
def iterate_file_compressed(fn):
with gzip.open(fn) as f:
return len(list(pysam.tabix_file_iterator(f, parser=pysam.asBed())))
def iterate_file_uncompressed(fn):
with open(fn) as f:
return len(list(pysam.tabix_file_iterator(f, parser=pysam.asBed())))
def test_read_python_compressed(benchmark):
result = benchmark(read_python_compressed,
os.path.join(TABIX_DATADIR, FN_COMPRESSED))
assert result == 164
def test_read_python_uncompressed(benchmark):
result = benchmark(read_python_uncompressed,
os.path.join(TABIX_DATADIR, FN_UNCOMPRESSED))
assert result == 164
def test_fetch_plain(benchmark):
result = benchmark(fetch_plain, os.path.join(TABIX_DATADIR, FN_COMPRESSED))
assert result == 164
def test_fetch_parsed(benchmark):
result = benchmark(fetch_parsed, os.path.join(
TABIX_DATADIR, FN_COMPRESSED))
assert result == 164
def test_iterate_generic_compressed(benchmark):
result = benchmark(iterate_generic_compressed,
os.path.join(TABIX_DATADIR, FN_COMPRESSED))
assert result == 164
def test_iterate_generic_uncompressed(benchmark):
result = benchmark(iterate_generic_uncompressed,
os.path.join(TABIX_DATADIR, FN_UNCOMPRESSED))
assert result == 164
def test_iterate_parsed_compressed(benchmark):
result = benchmark(iterate_parsed_compressed,
os.path.join(TABIX_DATADIR, FN_COMPRESSED))
assert result == 164
def test_iterate_parsed_uncompressed(benchmark):
result = benchmark(iterate_parsed_uncompressed,
os.path.join(TABIX_DATADIR, FN_UNCOMPRESSED))
assert result == 164
def test_iterate_file_compressed(benchmark):
result = benchmark(iterate_file_compressed,
os.path.join(TABIX_DATADIR, FN_COMPRESSED))
assert result == 164
def test_iterate_file_uncompressed(benchmark):
result = benchmark(iterate_file_uncompressed,
os.path.join(TABIX_DATADIR, FN_UNCOMPRESSED))
assert result == 164
def test_read_python_large_compressed(benchmark):
result = benchmark(read_python_compressed, os.path.join(
TABIX_DATADIR, FN_LARGE_COMPRESSED))
assert result == 100000
def test_read_python_large_uncompressed(benchmark):
result = benchmark(read_python_uncompressed, os.path.join(
TABIX_DATADIR, FN_LARGE_UNCOMPRESSED))
assert result == 100000
def test_fetch_plain(benchmark):
result = benchmark(fetch_plain, os.path.join(
TABIX_DATADIR, FN_LARGE_COMPRESSED))
assert result == 100000
def test_fetch_parsed(benchmark):
result = benchmark(fetch_parsed, os.path.join(
TABIX_DATADIR, FN_LARGE_COMPRESSED))
assert result == 100000
def test_iterate_generic_large_compressed(benchmark):
result = benchmark(iterate_generic_compressed, os.path.join(
TABIX_DATADIR, FN_LARGE_COMPRESSED))
assert result == 100000
def test_iterate_generic_large_uncompressed(benchmark):
result = benchmark(iterate_generic_uncompressed, os.path.join(
TABIX_DATADIR, FN_LARGE_UNCOMPRESSED))
assert result == 100000
def test_iterate_parsed_large_compressed(benchmark):
result = benchmark(iterate_parsed_compressed, os.path.join(
TABIX_DATADIR, FN_LARGE_COMPRESSED))
assert result == 100000
def test_iterate_parsed_large_uncompressed(benchmark):
result = benchmark(iterate_parsed_uncompressed, os.path.join(
TABIX_DATADIR, FN_LARGE_UNCOMPRESSED))
assert result == 100000
def test_iterate_file_large_compressed(benchmark):
result = benchmark(iterate_file_compressed, os.path.join(
TABIX_DATADIR, FN_LARGE_COMPRESSED))
assert result == 100000
def test_iterate_file_large_uncompressed(benchmark):
result = benchmark(iterate_file_uncompressed, os.path.join(
TABIX_DATADIR, FN_LARGE_UNCOMPRESSED))
assert result == 100000
|
{
"content_hash": "73b63482b36ce7903cbfb163278947b5",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 79,
"avg_line_length": 29.083333333333332,
"alnum_prop": 0.686341929321872,
"repo_name": "pysam-developers/pysam",
"id": "ce7077de2cfe2f8fc173fdd85b3d00d48272e64a",
"size": "5235",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/tabix_bench.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9696525"
},
{
"name": "Cython",
"bytes": "683859"
},
{
"name": "Dockerfile",
"bytes": "306"
},
{
"name": "M4",
"bytes": "44877"
},
{
"name": "Makefile",
"bytes": "63438"
},
{
"name": "Perl",
"bytes": "1822"
},
{
"name": "Python",
"bytes": "437130"
},
{
"name": "Shell",
"bytes": "20787"
}
],
"symlink_target": ""
}
|
import functools
from django import http
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404
import commonware.log
from addons.models import Addon
log = commonware.log.getLogger('mkt.purchase')
def addon_view(f, qs=Addon.objects.all):
@functools.wraps(f)
def wrapper(request, addon_id=None, app_slug=None, *args, **kw):
"""Provides an addon given either an addon_id or app_slug."""
assert addon_id or app_slug, 'Must provide addon_id or app_slug'
get = lambda **kw: get_object_or_404(qs(), **kw)
if addon_id and addon_id.isdigit():
addon = get(id=addon_id)
# Don't get in an infinite loop if addon.slug.isdigit().
if addon.slug != addon_id:
url = request.path.replace(addon_id, addon.slug, 1)
if request.GET:
url += '?' + request.GET.urlencode()
return http.HttpResponsePermanentRedirect(url)
elif addon_id:
addon = get(slug=addon_id)
elif app_slug:
addon = get(app_slug=app_slug)
return f(request, addon, *args, **kw)
return wrapper
def addon_view_factory(qs):
# Don't evaluate qs or the locale will get stuck on whatever the server
# starts with. The addon_view() decorator will call qs with no arguments
# before doing anything, so lambdas are ok.
# GOOD: Addon.objects.valid
# GOOD: lambda: Addon.objects.valid().filter(type=1)
# BAD: Addon.objects.valid()
return functools.partial(addon_view, qs=qs)
def has_purchased(f):
"""
If the addon is premium, require a purchase.
Must be called after addon_view decorator.
"""
@functools.wraps(f)
def wrapper(request, addon, *args, **kw):
if addon.is_premium() and not addon.has_purchased(request.amo_user):
log.info('Not purchased: %d' % addon.pk)
raise PermissionDenied
return f(request, addon, *args, **kw)
return wrapper
def can_become_premium(f):
"""Check that the addon can become premium."""
@functools.wraps(f)
def wrapper(request, addon_id, addon, *args, **kw):
if not addon.can_become_premium():
log.info('Cannot become premium: %d' % addon.pk)
raise PermissionDenied
return f(request, addon_id, addon, *args, **kw)
return wrapper
|
{
"content_hash": "08407ada78410567d0a5040e368084b5",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 76,
"avg_line_length": 35.205882352941174,
"alnum_prop": 0.6299081035923141,
"repo_name": "jinankjain/zamboni",
"id": "1d4607271e14416130af43b056ff9b8fbbe22957",
"size": "2394",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/addons/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from inspect import getargspec
import sys
import import_string
def configure_extension(name, **kwargs):
configurator = import_string(name)
args = getargspec(configurator).args
configurator(**{key: val for key, val in kwargs.items() if key in args})
def configure_extensions(app, admin=None):
"""Configure extensions provided in config file"""
sys.path.insert(0, './modules')
extensions = app.config.get(
'CORE_EXTENSIONS', []
) + app.config.get(
'EXTRA_EXTENSIONS', []
)
for configurator_name in extensions:
configure_extension(configurator_name, app=app, admin=admin)
return app
|
{
"content_hash": "8c56f940c886816a93636e80d8ebcff2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 29.454545454545453,
"alnum_prop": 0.6805555555555556,
"repo_name": "abnerpc/quokka",
"id": "4e155e8b05b4ebe313aa267d0e89066f6591ee32",
"size": "664",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "quokka/core/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "104"
},
{
"name": "CSS",
"bytes": "32332"
},
{
"name": "HTML",
"bytes": "119354"
},
{
"name": "JavaScript",
"bytes": "494398"
},
{
"name": "Makefile",
"bytes": "503"
},
{
"name": "Python",
"bytes": "199573"
},
{
"name": "Shell",
"bytes": "12305"
}
],
"symlink_target": ""
}
|
import unittest
import tempfile
import io
import os
from unittest.mock import patch
from helper_util import relpath, do_long_tests
from ppci.cli.hexutil import hexutil
def new_temp_file(suffix):
""" Generate a new temporary filename """
handle, filename = tempfile.mkstemp(suffix=suffix)
os.close(handle)
return filename
@unittest.skipUnless(do_long_tests('any'), 'skipping slow tests')
class HexutilTestCase(unittest.TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_hexutil_help(self, mock_stdout):
""" Check hexutil help message """
with self.assertRaises(SystemExit) as cm:
hexutil(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('info,new,merge', mock_stdout.getvalue())
@patch('sys.stderr', new_callable=io.StringIO)
def test_hexutil_address_format(self, mock_stderr):
file1 = new_temp_file('.hex')
datafile = relpath('..', 'examples', 'build.xml')
with self.assertRaises(SystemExit) as cm:
hexutil(['new', file1, '10000000', datafile])
self.assertEqual(2, cm.exception.code)
self.assertIn('argument address', mock_stderr.getvalue())
@patch('sys.stdout', new_callable=io.StringIO)
def test_hexutil_no_command(self, mock_stdout):
""" No command given """
with self.assertRaises(SystemExit) as cm:
hexutil([])
self.assertNotEqual(0, cm.exception.code)
@patch('sys.stdout', new_callable=io.StringIO)
def test_hexutil_merge(self, mock_stdout):
""" Create three hexfiles and manipulate those """
file1 = new_temp_file('file1.hex')
file2 = new_temp_file('file2.hex')
file3 = new_temp_file('file3.hex')
datafile = relpath('..', 'docs', 'logo', 'logo.png')
hexutil(['new', file1, '0x10000000', datafile])
hexutil(['new', file2, '0x20000000', datafile])
hexutil(['merge', file1, file2, file3])
hexutil(['info', file3])
self.assertIn("Hexfile containing 2832 bytes", mock_stdout.getvalue())
@patch('sys.stdout', new_callable=io.StringIO)
def test_hexutil_info(self, mock_stdout):
file1 = new_temp_file('file1.hex')
datafile = relpath('..', 'docs', 'logo', 'logo.png')
hexutil(['new', file1, '0x10000000', datafile])
hexutil(['info', file1])
self.assertIn("Hexfile containing 1416 bytes", mock_stdout.getvalue())
if __name__ == '__main__':
unittest.main(verbosity=2)
|
{
"content_hash": "404b2860e3966e06bf323eefeb845c5b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 78,
"avg_line_length": 37.53731343283582,
"alnum_prop": 0.6381709741550696,
"repo_name": "windelbouwman/ppci-mirror",
"id": "c8a0c618d957991f7b1ce086f2ac3f6ebfaedc1d",
"size": "2515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_hexutil.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "94"
},
{
"name": "Brainfuck",
"bytes": "5867"
},
{
"name": "C",
"bytes": "229265"
},
{
"name": "C++",
"bytes": "1257"
},
{
"name": "Coq",
"bytes": "98028"
},
{
"name": "HTML",
"bytes": "363"
},
{
"name": "JavaScript",
"bytes": "2165"
},
{
"name": "LLVM",
"bytes": "11206"
},
{
"name": "Python",
"bytes": "2991165"
},
{
"name": "Shell",
"bytes": "960"
},
{
"name": "Verilog",
"bytes": "9363"
}
],
"symlink_target": ""
}
|
'''
Richards
'''
# based on a Java version:
# Based on original version written in BCPL by Dr Martin Richards
# in 1981 at Cambridge University Computer Laboratory, England
# and a C++ version derived from a Smalltalk version written by
# L Peter Deutsch.
# Java version: Copyright (C) 1995 Sun Microsystems, Inc.
# Translation from C++, Mario Wolczko
# Outer loop added by Alex Jacoby
from time import clock
# Task IDs
I_IDLE = 1
I_WORK = 2
I_HANDLERA = 3
I_HANDLERB = 4
I_DEVA = 5
I_DEVB = 6
# Packet types
K_DEV = 1000
K_WORK = 1001
# Packet
macro( BUFSIZE=4 )
with stack:
class Packet(object):
def __init__(self,link:Packet, ident:int, kind:int):
self.link = addr(link)
self.ident = ident
self.kind = kind
let self.datum:int = 0
let self.data : [BUFSIZE]int
def append_to(self, lst:Packet) ->Packet:
self.link = None
if lst is None:
return self
else:
p = lst
next = p.link
while next is not None:
p = next
next = p.link
p.link = self
return lst
# Task Records
class TaskRec(object):
pass
class DeviceTaskRec(TaskRec):
def __init__(self):
let self.pending : Packet = None
class IdleTaskRec(TaskRec):
def __init__(self):
let self.control : int = 1
let self.count : int = 10000
class HandlerTaskRec(TaskRec):
def __init__(self):
let self.work_in : Packet = None
let self.device_in : Packet = None
def workInAdd(self, p:Packet) ->Packet:
## error: taking addr of temp##self.work_in = addr(p.append_to(self.work_in))
a = p.append_to(self.work_in)
self.work_in = addr(a)
return self.work_in
def deviceInAdd(self, p:Packet) ->Packet:
a = p.append_to(self.device_in)
self.device_in = addr(a)
return self.device_in
class WorkerTaskRec(TaskRec):
def __init__(self):
let self.destination : int = I_HANDLERA
let self.count : int = 0
# Task
class TaskState(object):
def __init__(self):
let self.packet_pending:bool = True
let self.task_waiting:bool = False
let self.task_holding:bool = False
def packetPending(self) -> self:
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
return self
def waiting(self) -> self:
self.packet_pending = False
self.task_waiting = True
self.task_holding = False
return self
def running(self) -> self:
self.packet_pending = False
self.task_waiting = False
self.task_holding = False
return self
def waitingWithPacket(self) -> self:
self.packet_pending = True
self.task_waiting = True
self.task_holding = False
return self
def isPacketPending(self) -> bool:
return self.packet_pending
def isTaskWaiting(self) -> bool:
return self.task_waiting
def isTaskHolding(self) -> bool:
return self.task_holding
def isTaskHoldingOrWaiting(self) -> bool:
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isWaitingWithPacket(self) -> bool:
return self.packet_pending and self.task_waiting and not self.task_holding
tracing = False
layout = 0
def trace(a:string):
global layout
layout -= 1
if layout <= 0:
print()
layout = 50
print(a)
class Task(TaskState):
# note: r:TaskRec is the super class, TODO cast to its subclass.
def __init__(self,ident:int, priority:int, input:Packet, initialState:TaskState, handle:TaskRec):
print 'init new Task...'
let self.link : Task = taskWorkArea.taskList
self.ident = ident
self.priority = priority
self.input = addr(input)
let self.packet_pending : bool = initialState.isPacketPending()
let self.task_waiting : bool = initialState.isTaskWaiting()
let self.task_holding : bool = initialState.isTaskHolding()
print 'setting handle'
self.handle = addr(handle) ## generic - some subclass
print 'setting taskList'
#taskWorkArea.taskList = self as Task
#taskWorkArea.taskList[...] = self as Task ## compiles but segfaults at runtime
#ptr = addr(self) ## error: lvalue required as unary ‘&’ operand
ptr = addr(self[...])
taskWorkArea.taskList = self as 'Task*'
print 'setting taskTab'
taskWorkArea.taskTab[ident] = self as Task
print 'init OK'
def addPacket(self,p:Packet, old:Task) -> Task:
if self.input is None:
self.input = addr(p)
self.packet_pending = True
if self.priority > old.priority:
return self as Task
else:
p.append_to(self.input)
return old
@abstractmethod
def fn(self, pkt:Packet, r:TaskRec) -> self:
return self
def runTask(self) -> Task:
let msg : Packet = None
if self.isWaitingWithPacket():
msg = self.input
self.input = msg.link
if self.input is None:
self.running()
else:
self.packetPending()
return self.fn(msg,self.handle)
def waitTask(self) -> self:
self.task_waiting = True
return self
def hold(self) -> Task:
taskWorkArea.holdCount += 1
self.task_holding = True
return self.link
def release(self,i:int) -> Task:
t = self.findtcb(i)
t.task_holding = False
if t.priority > self.priority:
return t
else:
return self as Task
def qpkt(self, pkt:Packet) -> Task:
t = self.findtcb(pkt.ident)
taskWorkArea.qpktCount += 1
pkt.link = None
pkt.ident = self.ident
#return t.addPacket(pkt,self)
return t.addPacket(
pkt,
self as Task
)
def findtcb(self,id:int) -> Task:
t = taskWorkArea.taskTab[id]
if t is None:
print('Exception in findtcb')
return t
# DeviceTask
class DeviceTask(Task):
def __init__(self,i:int, p:int, w:Packet, s:TaskState, r:TaskRec):
Task.__init__(self,i,p,w,s,r)
#######def fn(self,pkt:*Packet, d:TaskRec) -> self:
def fn(self,pkt:Packet, d:DeviceTaskRec) -> Task:
if pkt is None:
pkt = d.pending
if pkt is None:
return self.waitTask()
else:
d.pending = None
return self.qpkt(pkt)
else:
d.pending = addr(pkt)
return self.hold()
class HandlerTask(Task):
def __init__(self,i:int, p:int, w:Packet, s:TaskState, r:TaskRec):
Task.__init__(self,i,p,w,s,r)
def fn(self, pkt:Packet, h:HandlerTaskRec) -> Task:
if pkt is not None:
if pkt.kind == K_WORK:
h.workInAdd(pkt)
else:
h.deviceInAdd(pkt)
work = h.work_in
if work is None:
return self.waitTask()
count = work->datum
if count >= BUFSIZE:
h.work_in = work->link
return self.qpkt(work)
dev = h.device_in
if dev is None:
return self.waitTask()
h.device_in = dev->link
dev->datum = work->data[count]
work->datum = count + 1
return self.qpkt(dev)
# IdleTask
class IdleTask(Task):
def __init__(self,i:int, p:int, w:Packet, s:TaskState, r:TaskRec):
Task.__init__(self,i,0,None,s,r)
def fn(self,pkt:Packet, ir:TaskRec) -> Task:
ptr = addr(ir)
i = ptr as IdleTaskRec
i.count -= 1
if i.count == 0:
return self.hold()
elif i.control & 1 == 0:
i.control //= 2
return self.release(I_DEVA)
else:
i.control = i.control//2 ^ 0xd008
return self.release(I_DEVB)
# WorkTask
A = ord('A')
class WorkTask(Task):
def __init__(self,i:int, p:int, w:Packet, s:TaskState, r:TaskRec):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt:Packet, worker:TaskRec) -> Task:
w = addr(worker) as WorkerTaskRec
if pkt is None:
return self.waitTask()
dest = 0
if w.destination == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
for i in range(BUFSIZE):
w.count += 1
if w.count > 26:
w.count = 1
pkt.data[i] = A + w.count - 1
return self.qpkt(pkt)
class TaskWorkArea(object):
def __init__(self, taskTab:[10]Task ):
self.taskTab[:] = taskTab
let self.taskList : Task = None
let self.holdCount:int = 0
let self.qpktCount:int = 0
#global_tasks = []Task(None for i in range(10))
#global_tasks = []Task()
let global_tasks: [10]Task
taskWorkArea = TaskWorkArea(global_tasks)
def schedule():
print 'schedule...'
t = taskWorkArea.taskList
while t is not None:
#pkt = None
#if tracing:
# print("tcb =",t.ident)
print t->ident
if t->isTaskHoldingOrWaiting():
print 'holding.', t
t = t->link
else:
###########if tracing: trace(chr(ord("0")+t.ident))
#print 'running.', t
#t[...] = t->runTask()
res = t->runTask()
if res is None:
print 'res is null'
t = None
break
else:
ptr = addr(res)
#print 'ptr to res:', ptr ## BUG: always prints same address
t = ptr
class Richards(object):
def run(self, iterations:int) ->bool:
for i in range(iterations):
taskWorkArea.holdCount = 0
taskWorkArea.qpktCount = 0
#IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec()) ##??
#IdleTask(I_IDLE, 1, None, TaskState().running(), IdleTaskRec())
tsi = TaskState()
IdleTask(I_IDLE, 1, None, tsi.running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq , 0, K_WORK)
tsw = TaskState()
WorkTask(I_WORK, 1000, wkq, tsw.waitingWithPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
tsh = TaskState()
HandlerTask(I_HANDLERA, 2000, wkq, tsh.waitingWithPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
tsh2 = TaskState()
HandlerTask(I_HANDLERB, 3000, wkq, tsh2.waitingWithPacket(), HandlerTaskRec())
wkq = None;
tsd1 = TaskState()
tsd2 = TaskState()
DeviceTask(I_DEVA, 4000, wkq, tsd1.waiting(), DeviceTaskRec());
DeviceTask(I_DEVB, 5000, wkq, tsd2.waiting(), DeviceTaskRec());
schedule()
if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246:
pass
else:
return False
return True
def entry_point(iterations:int) ->double:
r = Richards()
startTime = clock()
result = r.run(iterations)
if not result:
print('#ERROR incorrect results!')
return clock() - startTime
def main():
print global_tasks
for ptr in global_tasks:
assert ptr is None
print 'starting benchmark...'
iterations=10
#print("#Richards benchmark (Python) starting. iterations="+str(iterations))
total_s = entry_point(iterations)
#print("#Total time for %s iterations: %s secs" %(iterations,total_s))
s = total_s / iterations
#print("#Average seconds per iteration:", s)
print(s)
|
{
"content_hash": "cf639fcc4a9514b5fed3e4aff2ab721f",
"timestamp": "",
"source": "github",
"line_count": 456,
"max_line_length": 99,
"avg_line_length": 23.06578947368421,
"alnum_prop": 0.6402357862711542,
"repo_name": "secureosv/pythia",
"id": "4a678845c4da590a5a118d6d2ae980f72eb65a72",
"size": "10522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regtests/bench/richards-typed-stack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "23667"
},
{
"name": "HTML",
"bytes": "44433"
},
{
"name": "Perl",
"bytes": "66040"
},
{
"name": "Python",
"bytes": "464271"
},
{
"name": "Shell",
"bytes": "1274"
}
],
"symlink_target": ""
}
|
import m5
from m5.objects import *
# both traffic generator and communication monitor are only available
# if we have protobuf support, so potentially skip this test
require_sim_object("TrafficGen")
require_sim_object("CommMonitor")
# even if this is only a traffic generator, call it cpu to make sure
# the scripts are happy
cpu = TrafficGen(config_file = "tests/quick/se/70.tgen/tgen-simple-mem.cfg")
# system simulated
system = System(cpu = cpu, physmem = SimpleMemory(),
membus = NoncoherentXBar(width = 16),
clk_domain = SrcClockDomain(clock = '1GHz',
voltage_domain =
VoltageDomain()))
# add a communication monitor, and also trace all the packets and
# calculate and verify stack distance
system.monitor = CommMonitor(trace_file = "monitor.ptrc.gz",
trace_enable = True,
stack_dist_calc = StackDistCalc(verify = True))
# connect the traffic generator to the bus via a communication monitor
system.cpu.port = system.monitor.slave
system.monitor.master = system.membus.slave
# connect the system port even if it is not used in this example
system.system_port = system.membus.slave
# connect memory to the membus
system.physmem.port = system.membus.master
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
|
{
"content_hash": "8b8185dc5fe26a00bed86a2e602b6be8",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 76,
"avg_line_length": 36.53658536585366,
"alnum_prop": 0.6475300400534045,
"repo_name": "lokeshjindal15/pd-gem5",
"id": "be700ac7af8daa29e028fa2f218d5ce0e8fe0264",
"size": "3597",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/configs/tgen-simple-mem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10138943"
},
{
"name": "Awk",
"bytes": "19269"
},
{
"name": "C",
"bytes": "469972635"
},
{
"name": "C++",
"bytes": "18163034"
},
{
"name": "CMake",
"bytes": "2202"
},
{
"name": "Clojure",
"bytes": "333"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Groff",
"bytes": "63956"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Hack",
"bytes": "2489"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "Jupyter Notebook",
"bytes": "1231954"
},
{
"name": "Lex",
"bytes": "59257"
},
{
"name": "M4",
"bytes": "52982"
},
{
"name": "Makefile",
"bytes": "1453704"
},
{
"name": "Objective-C",
"bytes": "1315749"
},
{
"name": "Perl",
"bytes": "716374"
},
{
"name": "Perl6",
"bytes": "3727"
},
{
"name": "Protocol Buffer",
"bytes": "3246"
},
{
"name": "Python",
"bytes": "4102365"
},
{
"name": "Scilab",
"bytes": "21433"
},
{
"name": "Shell",
"bytes": "512873"
},
{
"name": "SourcePawn",
"bytes": "4687"
},
{
"name": "UnrealScript",
"bytes": "10556"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XS",
"bytes": "1239"
},
{
"name": "Yacc",
"bytes": "121715"
}
],
"symlink_target": ""
}
|
import boto3
import json
print('Loading function')
dynamo = boto3.client('dynamodb')
VOUCHES_PER_POINT = 3
def respond(err, res=None):
return {
'statusCode': '400' if err else '200',
'body': json.dumps(str(err) if err else res),
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
}
}
def lambda_handler(event, context):
operation = event['httpMethod']
if operation != 'GET':
return respond(ValueError('Unsupported method "{}"'.format(operation)))
payload = event['queryStringParameters']
itemResult = dynamo.get_item(TableName=payload['TableName'], Key={'userId': {'S': payload['userId']}})
if 'Item' not in itemResult:
return respond(ValueError('UserId not found'))
# Existing data
item = itemResult['Item']
userPoints = int(item['points']['N'])
userUnderVote = int(item['underVote']['N'])
userVouchers = item['vouchers']['SS'] if 'vouchers' in item else []
# Request parameters
voteDirection = -1 if 'direction' in payload and payload['direction'] == 'down' else 1
isVouching = True if 'vouching' in payload and payload['vouching'] == '1' else False
# Current user ID
sub = event['requestContext']['authorizer']['claims']['sub']
# Initiate vote / vouch vote process
updateExpr = ''
attrValues = {}
if not isVouching:
# Cancel if any vouching is in progress
if userUnderVote != 0:
return respond('Deja se voteaza')
# Can't drop below 0
if voteDirection == -1 and userPoints == 0:
return respond('Nicio bila de sters')
# Can't get more than 3 points
if voteDirection == 1 and userPoints == 3:
return respond('Are deja 3 bile, hai sa eliminam din ele inainte de altceva')
# Start the vouching process & add me to the vouchers list
(updateExpr, attrValues) = start_vote(sub, voteDirection)
else:
# Cancel if there are no vouches
if userUnderVote == 0:
return respond('Nu se mai voteaza')
# Clear underVote and vouchers if there is a pending point in the opposite direction
existingVoteDirection = userUnderVote / abs(userUnderVote)
if existingVoteDirection != voteDirection:
(updateExpr, attrValues) = empty_vote()
else:
# Can not vote twice
if sub in userVouchers:
return respond('Deja ai votat')
# If there are enough vouches, update the points
if abs(userUnderVote + voteDirection) >= VOUCHES_PER_POINT:
(updateExpr, attrValues) = update_points(newPoints=userPoints + voteDirection)
# Otherwise update the underVote and add me to vouchers
else:
(updateExpr, attrValues) = vote_point(sub, voteDirection)
# Execute query
if updateExpr != '':
dynamo.update_item(
TableName=payload['TableName'],
Key={'userId': {'S': payload['userId']}},
UpdateExpression=updateExpr,
ExpressionAttributeValues=attrValues
)
return respond(None, 'OK')
def empty_vote():
updateExpr = 'SET underVote = :zero REMOVE vouchers'
attrValues = {
':zero': {'N': '0'}
}
return updateExpr, attrValues
def start_vote(sub, voteDirection):
updateExpr = 'SET underVote = :direction, vouchers = :sub'
attrValues = {
':direction': {'N': str(voteDirection)},
':sub': {'SS': [sub]}
}
return updateExpr, attrValues
def vote_point(sub, voteDirection):
updateExpr = 'ADD underVote :voteDirection, vouchers :sub'
attrValues = {
':voteDirection': {'N': str(voteDirection)},
':sub': {'SS': [sub]}
}
return updateExpr, attrValues
def update_points(newPoints):
updateExpr = 'SET points = :points, underVote = :zero REMOVE vouchers'
attrValues = {
':points': {'N': str(newPoints)},
':zero': {'N': '0'}
}
return updateExpr, attrValues
|
{
"content_hash": "1f3018a2e522e1cf2e742727a01e951a",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 106,
"avg_line_length": 31.49230769230769,
"alnum_prop": 0.6072300928187592,
"repo_name": "nicolaes/apt-points",
"id": "67cf6065aab33fd22cda287807ee53b7fc74e8ea",
"size": "4094",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "lambda/movePoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "118867"
},
{
"name": "HTML",
"bytes": "16535"
},
{
"name": "JavaScript",
"bytes": "7817"
},
{
"name": "Python",
"bytes": "6126"
},
{
"name": "Shell",
"bytes": "11041"
},
{
"name": "TypeScript",
"bytes": "68116"
}
],
"symlink_target": ""
}
|
"""Utility module for optimization."""
import copy
from typing import Any, Dict, Iterable, List, Sequence
# Amount of space to leave between product data and appended attributes
_SEPARATOR_LENGTH = len(' ')
def optimization_exclusion_specified(entry: Dict[str, Any],
optimizer_parameter: str) -> bool:
"""Returns true if the optimizer exclusion attribute was set and matches the given optimizer parameter."""
return (entry.get('excludeOptimizers') and
isinstance(entry.get('excludeOptimizers'), list) and
optimizer_parameter in entry.get('excludeOptimizers'))
def cut_list_to_limit_list_length(target_list: Sequence[Any],
max_length: int) -> Sequence[Any]:
"""Cuts a list to the max length, returning the result."""
return target_list[:max_length]
def cut_list_to_limit_concatenated_str_length(
target_list: Sequence[str], separator: str,
max_total_str_length: int) -> List[str]:
"""Removes the last items from the list to limit the length of concatenated string of the items.
For example, when target_list = ['Hello', 'Shoptimizer'] and separator = ',',
the concatenated string is 'Hello,Shoptimizer' and the length of the string is
17. If max_total_str_length = 10, the length exceeds the maximum. So, this
function removes the last item and returns transformed list: ['Hello'].
Args:
target_list: A list to be cut.
separator: Characters used as separator when concatenating the items in the
list.
max_total_str_length: The maximum length of the concatenated string.
Returns:
A list cut from target_list.
"""
output_list = list(copy.deepcopy(target_list))
# Concatenated string length > max str length.
while len(separator.join(output_list)) > max_total_str_length:
output_list.pop()
return output_list
def cut_list_elements_over_max_length(target_list: Sequence[str],
max_length: int) -> List[str]:
"""Removes elements from a list that are over a certain length."""
return [element for element in target_list if len(element) <= max_length]
def append_keywords_to_field(target_field: str, keywords: Sequence[str],
chars_to_preserve: int, max_length: int) -> str:
"""Appends keywords to the target field.
If necessary, this function removes the final characters of the target field
up to chars_to_preserve to make room for keywords. If a keyword is already in
the target field, it is not appended.
Args:
target_field: The field to be transformed.
keywords: keywords that are appended to the target field.
chars_to_preserve: The number of original chars to preserve from the start
of the string to make sure these chars are not removed when appending
keywords. E.g., use to preserve original title.
max_length (object): The maximum limit of the field length.
Returns:
The target field with keywords appended to the back.
"""
lowercase_target_field = target_field.lower()
# An ellipsis and space will be appended before keywords are appended, so
# subtract the length of 2 separators.
space_left_to_append_keywords = (max_length - chars_to_preserve) - (
_SEPARATOR_LENGTH * 2)
keywords_text = _get_keywords_text(keywords, lowercase_target_field,
space_left_to_append_keywords)
if not keywords_text:
return target_field
# Checks if enough whitespace available, and if so appends keywords using
# whitespace.
if max_length - len(target_field) >= len(keywords_text):
field_with_keywords_appended = target_field.strip() + keywords_text
else:
# If not enough whitespace is available, trims some chars to make space
# to append the keywords text.
insert_pos = _calculate_keywords_insert_pos(target_field, keywords_text,
max_length, chars_to_preserve)
field_with_keywords_appended = target_field[:insert_pos].strip(
) + keywords_text
return field_with_keywords_appended.strip()
def _get_keywords_text(keywords: Sequence[str], lowercase_target_field: str,
space_left_to_append_keywords: int) -> str:
"""Generates a string of keywords to be appended to the target field.
Args:
keywords: keywords to be appended to the target field.
lowercase_target_field: The field to append keywords to.
space_left_to_append_keywords: The space left in the target field to append
keywords to.
Returns:
A string consisting of an ellipsis, space, and space-separated keywords,
or an empty string if no keywords could be appended.
"""
keywords_not_in_field = []
for keyword in keywords:
enough_space_to_append_keyword = (
len(keyword) + _SEPARATOR_LENGTH <= space_left_to_append_keywords)
keyword_not_in_field = (
len(keyword) > 1 and keyword.lower() not in lowercase_target_field)
if enough_space_to_append_keyword and (keyword_not_in_field or
len(keyword) < 2):
keywords_not_in_field.append(keyword)
space_left_to_append_keywords -= len(keyword) + _SEPARATOR_LENGTH
if space_left_to_append_keywords <= 0:
break
if not keywords_not_in_field:
return ''
elif lowercase_target_field:
return '… ' + ' '.join(keywords_not_in_field)
else:
return ' '.join(keywords_not_in_field)
def _calculate_keywords_insert_pos(target_field: str, keywords_text: str,
max_length: int,
chars_to_preserve: int) -> int:
"""Calculates the position to insert the keywords text in the target field.
Args:
target_field: The field to be transformed.
keywords_text: A string containing a list of keywords to append to the
target field.
max_length: The max length of the target field.
chars_to_preserve: The number of chars to preserve in the target field.
Returns:
The position in the target field to insert the keywords text.
"""
# Calculates the num of chars the target field and keywords text overflows by.
# E.g. | Title (10) | Desc (20) | Keywords text (10) | = 40 chars
# If max_length is 30 chars, then the overflow is 10 chars.
# Therefore, the keywords text should be inserted 10 chars to the left
# of the target field.
overflowing_chars = (len(target_field) + len(keywords_text)) - max_length
insert_pos = len(target_field) - overflowing_chars
# Truncating a digit (1.5, 600, etc.) can lead to inaccurate product
# information, so we need to decrement the insert position until we hit a
# non-digit character, or the start of chars_to_preserve.
# (chars_to_preserve usually represents the original product title.)
while (target_field[insert_pos] == '.' or
target_field[insert_pos].isdigit()) and insert_pos > chars_to_preserve:
insert_pos -= 1
return insert_pos
def is_particular_google_product_category(given_category: str,
category_keywords: Iterable[str],
category_ids: Iterable[str]) -> bool:
"""Checks if a given google_product_category value is in a set of particular google_product_category values.
Args:
given_category: google_product_category value to be checked.
category_keywords: Keywords that are in the target google_product_category.
category_ids: Target google_product_category IDs.
Returns:
Whether a given value is the particular google_product_category or not.
"""
for category_word in category_keywords:
if category_word in given_category:
return True
if given_category in category_ids:
return True
return False
|
{
"content_hash": "9b9d44d12e406b1fff1cccd06c11362a",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 110,
"avg_line_length": 40.651041666666664,
"alnum_prop": 0.6757206918641896,
"repo_name": "google/shoptimizer",
"id": "4a01def2fcce75a792e2864f45c843ad0e791959",
"size": "8398",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "shoptimizer_api/util/optimization_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1390"
},
{
"name": "Python",
"bytes": "558972"
},
{
"name": "Shell",
"bytes": "4697"
}
],
"symlink_target": ""
}
|
from highton.models import HightonModel
from highton.highton_constants import HightonConstants
from highton import fields
from highton import call_mixins
class Task(
HightonModel,
call_mixins.CreateCallMixin,
call_mixins.DetailCallMixin,
call_mixins.DeleteTagCallMixin,
call_mixins.UpdateCallMixin,
):
"""
:ivar id: fields.IntegerField(name=HightonConstants.ID)
:ivar recording_id: fields.IntegerField(name=HightonConstants.RECORDING_ID)
:ivar subject_id: fields.IntegerField(name=HightonConstants.SUBJECT_ID)
:ivar subject_type: fields.StringField(name=HightonConstants.SUBJECT_TYPE)
:ivar subject_name: fields.StringField(name=HightonConstants.SUBJECT_NAME)
:ivar category_id: fields.IntegerField(name=HightonConstants.CATEGORY_ID, required=True)
:ivar body: fields.StringField(name=HightonConstants.BODY, required=True)
:ivar frame: fields.StringField(name=HightonConstants.FRAME, required=True)
:ivar due_at: fields.DatetimeField(name=HightonConstants.DUE_AT, required=True)
:ivar alert_at: fields.DatetimeField(name=HightonConstants.ALERT_AT)
:ivar created_at: fields.DatetimeField(name=HightonConstants.CREATED_AT)
:ivar author_id: fields.IntegerField(name=HightonConstants.AUTHOR_ID)
:ivar updated_at: fields.DatetimeField(name=HightonConstants.UPDATED_AT)
:ivar public: fields.BooleanField(name=HightonConstants.PUBLIC)
:ivar recurring_period: fields.StringField(name=HightonConstants.RECURRING_PERIOD)
:ivar anchor_type: fields.IntegerField(name=HightonConstants.ANCHOR_TYPE)
:ivar done_at: fields.DatetimeField(name=HightonConstants.DONE_AT)
:ivar owner_id: fields.IntegerField(name=HightonConstants.OWNER_ID)
"""
TAG_NAME = HightonConstants.TASK
ENDPOINT = HightonConstants.TASKS
def __init__(self, **kwargs):
self.recording_id = fields.IntegerField(name=HightonConstants.RECORDING_ID)
self.subject_id = fields.IntegerField(name=HightonConstants.SUBJECT_ID)
self.subject_type = fields.StringField(name=HightonConstants.SUBJECT_TYPE)
self.subject_name = fields.StringField(name=HightonConstants.SUBJECT_NAME)
self.category_id = fields.IntegerField(name=HightonConstants.CATEGORY_ID, required=True)
self.body = fields.StringField(name=HightonConstants.BODY, required=True)
self.frame = fields.StringField(name=HightonConstants.FRAME, required=True)
self.due_at = fields.DatetimeField(name=HightonConstants.DUE_AT, required=True)
self.alert_at = fields.DatetimeField(name=HightonConstants.ALERT_AT)
self.created_at = fields.DatetimeField(name=HightonConstants.CREATED_AT)
self.author_id = fields.IntegerField(name=HightonConstants.AUTHOR_ID)
self.updated_at = fields.DatetimeField(name=HightonConstants.UPDATED_AT)
self.public = fields.BooleanField(name=HightonConstants.PUBLIC)
self.recurring_period = fields.StringField(name=HightonConstants.RECURRING_PERIOD)
self.anchor_type = fields.IntegerField(name=HightonConstants.ANCHOR_TYPE)
self.done_at = fields.DatetimeField(name=HightonConstants.DONE_AT)
self.owner_id = fields.IntegerField(name=HightonConstants.OWNER_ID)
super().__init__(**kwargs)
def complete(self):
"""
Complete current task
:return:
:rtype: requests.models.Response
"""
return self._post_request(
data='',
endpoint=self.ENDPOINT + '/' + str(self.id) + '/complete'
)
@classmethod
def list_upcoming(cls):
"""
Returns a collection of upcoming tasks (tasks that have not yet been completed,
regardless of whether they’re overdue) for the authenticated user
:return:
:rtype: list
"""
return fields.ListField(name=cls.ENDPOINT, init_class=cls).decode(
cls.element_from_string(
cls._get_request(endpoint=cls.ENDPOINT + '/upcoming').text
)
)
@classmethod
def list_assigned(cls):
"""
Returns a collection of upcoming tasks (tasks that have not yet been completed,
regardless of whether they’re overdue) that were created by the authenticated user,
but assigned to somebody else.
:return:
:rtype: list
"""
return fields.ListField(name=cls.ENDPOINT, init_class=cls).decode(
cls.element_from_string(
cls._get_request(endpoint=cls.ENDPOINT + '/assigned').text
)
)
@classmethod
def list_completed(cls):
"""
Returns a collection of completed tasks.
:return:
:rtype: list
"""
return fields.ListField(name=cls.ENDPOINT, init_class=cls).decode(
cls.element_from_string(
cls._get_request(endpoint=cls.ENDPOINT + '/completed').text
)
)
@classmethod
def list_today(cls):
"""
Returns a collection of uncompleted tasks due for the rest of today for the authenticated user.
:return:
:rtype: list
"""
return fields.ListField(name=cls.ENDPOINT, init_class=cls).decode(
cls.element_from_string(
cls._get_request(endpoint=cls.ENDPOINT + '/today').text
)
)
@classmethod
def list_all(cls):
"""
Returns a collection of all tasks visible to the current user.
:return:
:rtype: list
"""
return fields.ListField(name=cls.ENDPOINT, init_class=cls).decode(
cls.element_from_string(
cls._get_request(endpoint=cls.ENDPOINT + '/all').text
)
)
|
{
"content_hash": "90b9aaeeba118980a0bfde93c6579092",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 103,
"avg_line_length": 40.645390070921984,
"alnum_prop": 0.6728319664979934,
"repo_name": "seibert-media/Highton",
"id": "b810ce3ec13e8acc300b2828a207b22b8a686603",
"size": "5735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "highton/models/task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93025"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
}
|
from tkinter import Tk,Frame,Label,Entry,StringVar,Button,Checkbutton,IntVar,Canvas,Text,Toplevel
from tkinter import LEFT,RIGHT,BOTH,NW,BOTTOM,TOP,X,Y,END
from PIL import Image,ImageTk,ImageDraw
from sys import argv
from time import time
from numpy import log10,array,apply_along_axis,hstack,vstack,lib,where,delete as npdelete
from tkinter.messagebox import askquestion,showerror
from tkinter.filedialog import asksaveasfilename
class DataExtraction:
format_string = '{:.5g}'
@staticmethod
def nfmt(n):
return [DataExtraction.format_string]*n
@staticmethod
def logscale(value,p0,p1):
return 10**((value-p0)/(p1-p0))
@staticmethod
def rolling_window(a, shape):
s = ((a.shape[0] - shape[0] + 1,) + (a.shape[1] - shape[1] + 1,) +
(1,) + shape + (a.shape[2],))
strides = a.strides + a.strides
return lib.stride_tricks.as_strided(a, shape=s, strides=strides)
@staticmethod
def inside(r,x,y):
return r[2] >= x >= r[0] and r[3] > y > r[1]
def __init__(self,image,width = 800,height=600):
self.root = Tk()
self.root.wm_title('Plot Data Extraction Tool')
self.root.bind("<Key-Escape>",self._quit)
self.root.bind("<Key-Return>",lambda event: self.fix())
self.root.protocol('WM_DELETE_WINDOW',self._quit)
self.root.resizable(0,0)
self.main = Frame(self.root)
self.main.pack()
self.circles = array((),dtype='int16').reshape(0,4)
self.filename = image
self.shapex = -1
self.im = Image.open(image).convert('RGBA')
self.original = self.im.copy()
self.pim = ImageTk.PhotoImage(self.im)
self.canvas = Canvas(self.main,cursor='plus',width=self.im.width,height=self.im.height)
self.canvas.create_image(0,0,image=self.pim,anchor=NW)
self.canvas.pack(side=LEFT)
self.canvas.bind("<Motion>",self.getxy)
self.canvas.bind("<ButtonPress-1>",self.setxy)
self.canvas.bind("<ButtonRelease-1>",self.setShape)
self.gui = Frame(self.main)
self.gui.pack(fill=BOTH,expand=True)
Label(self.gui,text="Pix-Phys - click to reset").pack()
self.xy = Frame(self.gui)
self.xy.pack()
self.coords = []
self.fixed = False
for i,coord in enumerate(('x0','x1','y0','y1')):
v = StringVar()
self.coords.append({
'pix': Label(self.xy,text=coord),
'ent': Entry(self.xy,width=5),
'lab': Label(self.xy,textvariable=v,width=11,borderwidth=2,
relief='solid'),
})
self.coords[-1]['pix'].grid(row=i>1,column=3*(i%2)+0)
self.coords[-1]['ent'].grid(row=i>1,column=3*(i%2)+1)
self.coords[-1]['lab'].grid(row=i>1,column=3*(i%2)+2)
self.coords[-1]['lab'].set = False
self.coords[-1]['lab'].var = v
self.coords[-1]['lab'].bind("<Button-1>",self.resetxy)
self.xlog = IntVar()
self.ylog = IntVar()
Checkbutton(self.xy,variable=self.xlog,text='x log').grid(row=0,column=6)
Checkbutton(self.xy,variable=self.ylog,text='y log').grid(row=1,column=6)
bf = Frame(self.gui)
bf.pack(fill=X)
self.buttonFix = Button(bf,text="Fix Scale",command=self.fix,bg='grey')
self.buttonFix.pack(side=LEFT,fill=X,expand=True)
c = Button(bf,text="Find shape",command=self.findShape,bg='grey',anchor='w')
c.pack(side=RIGHT,fill=X,expand=True)
c.pack_propagate(False)
vcmd = (c.register(lambda val: not len(val) or (len(val) <=2 and val.isdigit())) ,'%P')
self.precision = Entry(c,width=2,vcmd=vcmd,validate='key')
self.precision.pack(side=RIGHT)
self.precision.insert(0,"1")
self.position = StringVar()
Label(c,text="uncertain=0.",bg='white').pack(side=RIGHT)
f = Frame(self.gui)
f.pack(fill=X)
Label(f,text="Plot Coordinates").pack(side=LEFT)
Label(f,textvariable=self.position,borderwidth=2,relief='sunken').pack(
side=RIGHT,expand=True,fill=X)
c = Frame(self.gui)
c.pack(side=TOP,fill=BOTH,expand=True)
self.writerFrame = Frame(c)
self.writerFrame.pack(side=LEFT,fill=BOTH,expand=True)
self.writerFrame.pack_propagate(False)
self.writer = Text(self.writerFrame)
self.writer.pack()
c = Frame(c)
c.pack(side=RIGHT,fill=Y)
self.pop = Button(c,text="Pop" ,command=self.pop)
self.pop.pack(expand=True,fill=BOTH)
Button(c,text="Clear",command=self.clear).pack(expand=True,fill=BOTH)
Button(c,text="Save",command=self.save).pack(expand=True,fill=BOTH)
c = Canvas(self.gui,bg='grey')
c.pack(side=BOTTOM,fill=X)
self.root.update()
w,h=c.winfo_width(),c.winfo_height()
self.zoom = Image.new("RGBA",(w,h),color="white")
self.pzoom=ImageTk.PhotoImage(self.zoom)
c.create_image(w//2,h//2,image=self.pzoom,anchor='center')
length = 20
c.create_line(w//2-length,h//2,w//2+length,h//2,width=2,fill='blue')
c.create_line(w//2,h//2-length,w//2,h//2+length,width=2,fill='blue')
#For windows
self.root.focus_force()
self.root.mainloop()
def clear(self):
self.writer.delete(1.0,END)
self.im = Image.open(self.filename).convert('RGBA')
self.pim.paste(self.im)
def save(self):
f = asksaveasfilename()
if not f: return
with open(f,'w') as fd:
fd.write(self.writer.get(1.0,END))
def unpop(self,event = None):
w = Text(self.writerFrame)
w.pack()
w.insert(END,self.writer.get(1.0,END))
self.writer = w
self.top.destroy()
self.pop.configure(state='normal')
def pop(self):
self.pop.configure(state='disabled')
self.top = Toplevel(self.root)
self.top.wm_title("Right click to save")
w = Text(self.top)
w.pack()
w.insert(END,self.writer.get(1.0,END))
self.writer.destroy()
self.writer = w
self.top.bind("<Key-Escape>",self.unpop)
self.top.protocol('WM_DELETE_WINDOW',self.unpop)
def addCircle(self,draw,x,y,w,h):
draw.ellipse((x,y,x+w,y+h),outline='red')
def processShape(self,x0,y0,x1,y1,draw):
draw.ellipse((x0,y0,x1,y1),outline='red')
if self.fixed:
self.writer.insert(END,(' , '.join(self.nfmt(2))+'\n').format(*self.pixToPlot((x0+x1)/2,(y0+y1)/2)))
def findShape(self):
try: x0,y0,x1,y1 = self.shape
except AttributeError: return
if x0 == x1 or y0 == y1: return
self.im = Image.open(self.filename).convert('RGBA')
draw = ImageDraw.Draw(self.im)
shape = array(self.im)[y0:y1,x0:x1]
a,b,c = shape.shape
windows = self.rolling_window(array(self.im),shape.shape[:2])
target = float("0."+self.precision.get())*a*b*c*255
result = vstack(where((windows-shape).sum(axis=(2,3,4,5))<target))
apply_along_axis(lambda r: self.processShape(r[1],r[0],r[1]+b,r[0]+a,draw),0,result)
self.pim.paste(self.im)
def fix(self):
pixels = []
points = []
for coord in self.coords:
value = coord['ent'].get()
if not coord['lab'].set or not len(value):
showerror("Can't fix yet!","Make sure all pixels and plot values are set first!")
self.fixed = False
self.buttonFix.config(state='normal')
return
try: points.append(float(value))
except ValueError:
showerror("Can't fix yet!","Non-float value in entry, "+value+"!")
self.fixed = False
self.buttonFix.config(state='normal')
return
pixels.append(eval(coord['lab'].var.get()))
self.xscale = (points[1] - points[0])/((pixels[1][0]-pixels[0][0])**2 +
(pixels[1][1]-pixels[0][1])**2)
self.yscale = (points[3] - points[2])/((pixels[3][0]-pixels[2][0])**2 +
(pixels[3][1]-pixels[2][1])**2)
self.px0,self.px1,self.py0,self.py1 = points
self.x0,self.x1,self.y0,self.y1 = pixels
self.xx = pixels[1][0]-pixels[0][0]
self.xy = pixels[1][1]-pixels[0][1]
self.yx = pixels[3][0]-pixels[2][0]
self.yy = pixels[3][1]-pixels[2][1]
self.fixed = True
self.buttonFix.config(state='disabled')
def pixToPlot(self,x,y):
if not self.fixed:
showerror("Can't calculate xy!","Mapping not fixed yet!")
px = x - self.x0[0]
py = y - self.x0[1]
X=(px*self.xx+py*self.xy)*self.xscale+self.px0
px = x - self.y0[0]
py = y - self.y0[1]
Y=(px*self.yx+py*self.yy)*self.yscale+self.py0
try:
if self.xlog.get():
X = self.px0*self.logscale(X,self.px0,self.px1)
if self.ylog.get():
Y = self.py0*self.logscale(Y,self.py0,self.py1)
except ZeroDivisionError:
showerror("Invalid range!","0 or negative value in logarithmic scale!")
self.fixed = False
self.buttonFix.config(state='normal')
self.position.set("")
return
return X,Y
def removeShapes(self,rows):
apply_along_axis(lambda r: self.im.paste(self.original.crop(r),r),1,self.circles[rows])
npdelete(self.circles,rows,axis=0)
self.pim.paste(self.im)
def drawCircle(self,x0,y0,x1,y1,clear = False,save = True):
if clear:
#self.im.paste(self.oldim.copy().crop((x0-1,y0-1,x1+1,y1+1)),(x0,y0))
self.im = self.oldim.copy()
draw = ImageDraw.Draw(self.im)
draw.ellipse((x0,y0,x1,y1),outline='red')
self.pim.paste(self.im)
self.shape = (x0,y0,x1,y1)
if save:
width = 1
self.circles = vstack((self.circles,(x0-width,y0-width,x1+width,y1+width)))
def initShape(self,x,y):
self.shapex = x
self.shapey = y
self.oldim = self.im.copy()
self.drawCircle(x,y,x,y,True,False)
def setShape(self,event):
self.shapex = -1
if time()-self.time < 0.3:
if not self.fixed:
for coord in self.coords:
if not coord['lab'].set:
coord['lab'].set = True
coord['lab'].configure(relief='ridge')
break
else:
x,y = event.x,event.y
try:
rows = apply_along_axis(lambda r: self.inside(r,x,y),1,self.circles)
except IndexError: rows = array(())
if rows.any():
self.removeShapes(where(rows))
else:
width = 5
self.writer.insert(END,(' , '.join(self.nfmt(2))+"\n").format(*self.pixToPlot(x,y)))
self.drawCircle(x-width,y-width,x+width,y+width)
self.getZoom(x,y)
return
self.im = self.oldim.copy()
def getZoom(self,x,y):
try:
h = self.zoom.height
w = self.zoom.width
except AttributeError: return
white = array([255,255,255,255])
subimg = array(self.im)[max(y-h//2,0):min(y+h//2,self.im.height),max(x-w//2,0):min(x+w//2,self.im.width)]
if w//2 > x:
leftpad = white.repeat((w//2-x)*subimg.shape[0]).reshape(subimg.shape[0],-1,4)
subimg = hstack((leftpad,subimg))
if w//2+x > self.im.width:
rightpad = white.repeat((w//2+x-self.im.width)*subimg.shape[0]).reshape(subimg.shape[0],-1,4)
subimg = hstack((subimg,rightpad))
if h//2 > y:
toppad = white.repeat(((h//2-y)*subimg.shape[1])).reshape(-1,subimg.shape[1],4)
subimg = vstack((toppad,subimg))
if h//2+y > self.im.height:
bottpad = white.repeat((h//2+y-self.im.height)*subimg.shape[1]).reshape(-1,subimg.shape[1],4)
subimg = vstack((subimg,bottpad))
color = tuple(map(tuple,subimg.reshape(-1,4)))
self.zoom.putdata(color)
self.pzoom.paste(self.zoom.transform((self.zoom.width,self.zoom.height),Image.EXTENT,
(0.25*self.zoom.width,0.25*self.zoom.height,0.75*self.zoom.width,0.75*self.zoom.height)))
def getxy(self,event):
x,y = event.x,event.y
if self.shapex > -1:
dx,dy = abs(self.shapex-x),abs(self.shapey-y)
self.drawCircle(x-dx,y-dy,x+dx,y+dy,True,False)
elif not self.fixed:
for coord in self.coords:
if not coord['lab'].set:
coord['lab'].var.set(str((x,y)))
else:
self.position.set(("x = "+self.format_string+" , y = "+self.format_string).format(*self.pixToPlot(x,y)))
self.getZoom(x,y)
def setxy(self,event):
self.initShape(event.x,event.y)
self.time = time()
def resetxy(self,event):
event.widget.configure(relief='solid')
event.widget.set = False
self.fixed = False
self.buttonFix.config(state='normal')
def _quit(self, event = None):
if askquestion("Exit","Sure?") == 'no': return
self.root.quit()
self.root.destroy()
if __name__ == "__main__":
try:
DataExtraction(argv[1])
except IndexError:
print("-E- Image filename missing!")
|
{
"content_hash": "af7e65d3fae1b81213fd1d9231928f1c",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 116,
"avg_line_length": 39.671388101983005,
"alnum_prop": 0.5444158811768066,
"repo_name": "uperetz/AstroTools",
"id": "a0fa9b20a55add4152741ed7342ddc23066344d0",
"size": "14027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_extract.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "271375"
},
{
"name": "Shell",
"bytes": "4009"
}
],
"symlink_target": ""
}
|
import unittest
import numpy
import scipy
from mockredis import MockRedis as Redis
import mongomock
from future.builtins import range
from future.builtins import zip
from nearpy.storage import MemoryStorage, RedisStorage, MongoStorage
class StorageTest(unittest.TestCase):
"""
Base class for storage tests.
"""
def setUp(self):
self.storage.clean_all_buckets()
numpy.random.seed(4)
def check_store_vector(self, x):
bucket_key = '23749283743928748'
x_data = ['one', 'two', 'three']
self.storage.store_vector('testHash', bucket_key, x, x_data)
bucket = self.storage.get_bucket('testHash', bucket_key)
self.assertEqual(len(bucket), 1)
y, y_data = bucket[0]
self.assertEqual(type(y), type(x))
self.assertEqual(y.shape, x.shape)
self.assertEqual(max(abs(y - x)), 0)
self.assertEqual(y_data, x_data)
self.storage.clean_all_buckets()
self.assertEqual(self.storage.get_bucket('testHash', bucket_key), [])
def check_store_many_vectors(self, xs):
num_vector = len(xs)
bucket_keys = list(map(str,
list(range(10000000,
10000000 + num_vector))))
x_data = list(range(0, num_vector))
self.storage.store_many_vectors('testHash', bucket_keys, xs, x_data)
for bucket_key, x, data in zip(bucket_keys, xs, x_data):
bucket = self.storage.get_bucket('testHash', bucket_key)
self.assertEqual(len(bucket), 1)
y, y_data = bucket[0]
self.assertEqual(type(y), type(x))
self.assertEqual(y.shape, x.shape)
self.assertEqual(max(abs(y - x)), 0)
self.assertEqual(y_data, data)
self.storage.clean_all_buckets()
self.assertEqual(self.storage.get_bucket('testHash', bucket_key), [])
def check_get_all_bucket_keys(self):
x, x_data = numpy.ones(100), "data"
hash_config = [
("firstHash", ["1", "2", "3", "4"]),
("secondHash", ["10", "20", "3", "4", "50"]),
]
for hash_name, bucket_keys in hash_config:
for bucket_key in bucket_keys:
self.storage.store_vector(hash_name, bucket_key, x, x_data)
for hash_name, bucket_keys in hash_config:
self.assertSequenceEqual(
sorted(self.storage.get_all_bucket_keys(hash_name)),
sorted(bucket_keys)
)
def check_delete_vector(self, x):
hash_name, bucket_name = "tastHash", "testBucket"
samples = list(range(10))
for sample in samples:
self.storage.store_vector(hash_name, bucket_name, x, sample)
def get_bucket_items():
return [data for v, data
in self.storage.get_bucket(hash_name, bucket_name)]
self.assertEqual(get_bucket_items(), samples)
deleted_sample = 4
self.storage.delete_vector(hash_name, [bucket_name], deleted_sample)
samples.remove(deleted_sample)
self.assertEqual(get_bucket_items(), samples)
class MemoryStorageTest(StorageTest):
def setUp(self):
self.storage = MemoryStorage()
super(MemoryStorageTest, self).setUp()
def test_store_vector(self):
x = numpy.random.randn(100, 1)
self.check_store_vector(x)
def test_store_sparse_vector(self):
x = scipy.sparse.rand(100, 1, density=0.1)
self.check_store_vector(x)
def test_get_all_bucket_keys(self):
self.check_get_all_bucket_keys()
def test_delete_vector(self):
self.check_delete_vector(numpy.ones(100))
class RedisStorageTest(StorageTest):
def setUp(self):
self.storage = RedisStorage(Redis())
super(RedisStorageTest, self).setUp()
def test_store_vector(self):
x = numpy.random.randn(100, 1).ravel()
self.check_store_vector(x)
def test_store_sparse_vector(self):
x = scipy.sparse.rand(100, 1, density=0.1)
self.check_store_vector(x)
def test_get_all_bucket_keys(self):
self.check_get_all_bucket_keys()
def test_delete_vector(self):
self.check_delete_vector(numpy.ones(100))
def test_store_zero(self):
x = numpy.ones(100)
hash_name, bucket_name = "tastHash", "testBucket"
self.storage.store_vector(hash_name, bucket_name, x, 0)
bucket = self.storage.get_bucket(hash_name, bucket_name)
_, data = bucket[0]
self.assertEqual(data, 0)
def test_store_many_vectors(self):
x = numpy.random.randn(100, 10)
self.check_store_many_vectors(x)
class MongoStorageTest(StorageTest):
def setUp(self):
self.storage = MongoStorage(mongomock.MongoClient().db.collection)
super(MongoStorageTest, self).setUp()
def test_store_vector(self):
x = numpy.random.randn(100, 1).ravel()
self.check_store_vector(x)
def test_store_sparse_vector(self):
x = scipy.sparse.rand(100, 1, density=0.1)
self.check_store_vector(x)
def test_get_all_bucket_keys(self):
self.check_get_all_bucket_keys()
def test_delete_vector(self):
self.check_delete_vector(numpy.ones(100))
def test_store_zero(self):
x = numpy.ones(100)
hash_name, bucket_name = "tastHash", "testBucket"
self.storage.store_vector(hash_name, bucket_name, x, 0)
bucket = self.storage.get_bucket(hash_name, bucket_name)
_, data = bucket[0]
self.assertEqual(data, 0)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "7ca7cf434eff78f023fd7a7feee1bdda",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 77,
"avg_line_length": 33.5,
"alnum_prop": 0.6051883439943141,
"repo_name": "pixelogik/NearPy",
"id": "8f5750d1923fd2cf1294a62e9e1e61a49c79c45a",
"size": "6749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/storage_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198332"
}
],
"symlink_target": ""
}
|
import sys
import traceback
import logging
from __init__ import *
def main(argv):
try:
command = command_manager.parse_command_args(argv)
root_logger = logging.getLogger()
handler = logging.StreamHandler()
root_logger.addHandler(handler)
if command_manager.verbose:
root_logger.setLevel(logging.DEBUG)
handler.setLevel(logging.DEBUG)
else:
root_logger.setLevel(logging.INFO)
handler.setLevel(logging.INFO)
rc = command.run()
return rc
except ValidationError:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
traceback.print_exception(exc_type, exc_value, None, file=sys.stderr)
if command.get_results_file():
write_json(FatalErrorResults(ARCHIVE_VALIDATION_ERROR_RC,
exc_type, exc_value,
exc_traceback).to_json(),
command.get_results_file())
return ARCHIVE_VALIDATION_ERROR_RC
except SystemExit, c:
sys.exit(c)
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
if command.get_results_file():
write_json(FatalErrorResults(UNEXPECTED_EXC_RC,
exc_type, exc_value,
exc_traceback).to_json(),
command.get_results_file())
return UNEXPECTED_EXC_RC
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
{
"content_hash": "3c7e16307e829dfb510f93ba8da73b11",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 86,
"avg_line_length": 37.34090909090909,
"alnum_prop": 0.5556908094948265,
"repo_name": "quaddra/engage",
"id": "67d33c0680d106b80b397a54f36d8c0e4a297536",
"size": "1747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_pkg/engage/drivers/genforma/engage_django_sdk/packager/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "13559"
},
{
"name": "Makefile",
"bytes": "8662"
},
{
"name": "OCaml",
"bytes": "193014"
},
{
"name": "Python",
"bytes": "1425458"
},
{
"name": "Shell",
"bytes": "18171"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ohmu'
copyright = u'2014, DeLesley Hutchins'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ohmudoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ohmu.tex', u'ohmu Documentation',
u'DeLesley Hutchins', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ohmu', u'ohmu Documentation',
[u'DeLesley Hutchins'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ohmu', u'ohmu Documentation',
u'DeLesley Hutchins', 'ohmu', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "88d89ae9a3ff3bcd3c20e6ebdbc80c01",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 79,
"avg_line_length": 31.425101214574898,
"alnum_prop": 0.7039422829167741,
"repo_name": "google/ohmu",
"id": "504211a209330c3295c25d16d4fc50da450d9542",
"size": "8179",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6729"
},
{
"name": "C++",
"bytes": "751111"
},
{
"name": "CMake",
"bytes": "9762"
},
{
"name": "Shell",
"bytes": "5333"
}
],
"symlink_target": ""
}
|
from flask import Flask
from datetime import datetime
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World! " + str(datetime.utcnow())
|
{
"content_hash": "15221697b4c909e800e8c54a96a678c6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 51,
"avg_line_length": 17.77777777777778,
"alnum_prop": 0.66875,
"repo_name": "filemakergarage/zeroclient",
"id": "1777baaa1b74e12195c36c7fc5c5d3dbbe8c984f",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/local/test_flask.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "163404"
},
{
"name": "Shell",
"bytes": "5072"
}
],
"symlink_target": ""
}
|
from django.views import generic
from . import forms, models
class BookCreateView(generic.CreateView):
model = models.Book
form_class = forms.BookForm
success_url = "/"
|
{
"content_hash": "4be50398f34af43e7febcb8e4b8d628b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 41,
"avg_line_length": 20.444444444444443,
"alnum_prop": 0.717391304347826,
"repo_name": "applegrew/django-select2",
"id": "4bd094285da8cffff0a3499c5443927d2edb41f3",
"size": "184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/example/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "486"
},
{
"name": "JavaScript",
"bytes": "1783"
},
{
"name": "Python",
"bytes": "70231"
}
],
"symlink_target": ""
}
|
__all__ = (
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
'seal',
)
__version__ = '1.0'
import inspect
import pprint
import sys
import builtins
from types import ModuleType
from functools import wraps, partial
_builtins = {name for name in dir(builtins) if not name.startswith('_')}
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
FILTER_DIR = True
# Workaround for issue #12370
# Without this, the __class__ properties wouldn't be set correctly
_safe_super = super
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, type) and issubclass(obj, BaseExceptions)
)
def _get_signature_object(func, as_instance, eat_self):
"""
Given an arbitrary, possibly callable object, try to create a suitable
signature object.
Return a (reduced func, signature) tuple, or None.
"""
if isinstance(func, type) and not as_instance:
# If it's a type and should be modelled as a type, use __init__.
try:
func = func.__init__
except AttributeError:
return None
# Skip the `self` argument in __init__
eat_self = True
elif not isinstance(func, FunctionTypes):
# If we really want to model an instance of the passed type,
# __call__ should be looked up, not __init__.
try:
func = func.__call__
except AttributeError:
return None
if eat_self:
sig_func = partial(func, None)
else:
sig_func = func
try:
return func, inspect.signature(sig_func)
except ValueError:
# Certain callable types are not supported by inspect.signature()
return None
def _check_signature(func, mock, skipfirst, instance=False):
sig = _get_signature_object(func, instance, skipfirst)
if sig is None:
return
func, sig = sig
def checksig(_mock_self, *args, **kwargs):
sig.bind(*args, **kwargs)
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
# we explicitly don't copy func.__dict__ into this copy as it would
# expose original attributes that should be mocked
for attribute in (
'__name__', '__doc__', '__text_signature__',
'__module__', '__defaults__', '__kwdefaults__',
):
try:
setattr(funcopy, attribute, getattr(func, attribute))
except AttributeError:
pass
def _callable(obj):
if isinstance(obj, type):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, type):
# already an instance
return getattr(obj, '__call__', None) is not None
# *could* be broken by a class overriding __mro__ or __dict__ via
# a metaclass
for base in (obj,) + obj.__mro__:
if base.__dict__.get('__call__') is not None:
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original.
if not _callable(original):
return
skipfirst = isinstance(original, type)
result = _get_signature_object(original, instance, skipfirst)
if result is None:
return mock
func, sig = result
def checksig(*args, **kwargs):
sig.bind(*args, **kwargs)
_copy_func_details(func, checksig)
name = original.__name__
if not name.isidentifier():
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called(*args, **kwargs):
return mock.assert_called(*args, **kwargs)
def assert_not_called(*args, **kwargs):
return mock.assert_not_called(*args, **kwargs)
def assert_called_once(*args, **kwargs):
return mock.assert_called_once(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
funcopy.assert_called = assert_called
funcopy.assert_not_called = assert_not_called
funcopy.assert_called_once = assert_called_once
mock._mock_delegate = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
def __reduce__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(unittest.mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
def __reduce__(self):
return 'sentinel'
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
_allowed_names = {
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
}
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
# Internal class to identify if we wrapped an iterator object or not.
class _MockIter(object):
def __init__(self, obj):
self.obj = iter(obj)
def __iter__(self):
return self
def __next__(self):
return next(self.obj)
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
_spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
__dict__['_mock_sealed'] = False
if spec_set is not None:
spec = spec_set
spec_set = True
if _eat_self is None:
_eat_self = parent is not None
self._mock_add_spec(spec, spec_set, _spec_as_instance, _eat_self)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
__dict__['_mock_unsafe'] = unsafe
if kwargs:
self.configure_mock(**kwargs)
_safe_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set, _spec_as_instance=False,
_eat_self=False):
_spec_class = None
_spec_signature = None
if spec is not None and not _is_list(spec):
if isinstance(spec, type):
_spec_class = spec
else:
_spec_class = _get_class(spec)
res = _get_signature_object(spec,
_spec_as_instance, _eat_self)
_spec_signature = res and res[1]
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_spec_signature'] = _spec_signature
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
ret = self._mock_delegate.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
delegated = self._mock_delegate
if delegated is None:
return self._mock_side_effect
sf = delegated.side_effect
if (sf is not None and not callable(sf)
and not isinstance(sf, _MockIter) and not _is_exception(sf)):
sf = _MockIter(sf)
delegated.side_effect = sf
return sf
def __set_side_effect(self, value):
value = _try_iter(value)
delegated = self._mock_delegate
if delegated is None:
self._mock_side_effect = value
else:
delegated.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self, visited=None,*, return_value=False, side_effect=False):
"Restore the mock object to its initial state."
if visited is None:
visited = []
if id(self) in visited:
return
visited.append(id(self))
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
if return_value:
self._mock_return_value = DEFAULT
if side_effect:
self._mock_side_effect = None
for child in self._mock_children.values():
if isinstance(child, _SpecState):
continue
child.reset_mock(visited)
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock(visited)
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name in {'_mock_methods', '_mock_unsafe'}:
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
if not self._mock_unsafe:
if name.startswith(('assert', 'assret')):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is _deleted:
raise AttributeError(name)
elif result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def _extract_mock_name(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
return ''.join(_name_list)
def __repr__(self):
name = self._extract_mock_name()
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members."""
if not FILTER_DIR:
return object.__dir__(self)
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
elif name == '__class__':
self._spec_class = value
return
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
if self._mock_sealed and not hasattr(self, name):
mock_name = f'{self._extract_mock_name()}.{name}'
raise AttributeError(f'Cannot set {mock_name}')
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
if name in self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
if obj is _deleted:
raise AttributeError(name)
if obj is not _missing:
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def _call_matcher(self, _call):
"""
Given a call (or simply an (args, kwargs) tuple), return a
comparison key suitable for matching with other calls.
This is a best effort method which relies on the spec's signature,
if available, or falls back on the arguments themselves.
"""
sig = self._spec_signature
if sig is not None:
if len(_call) == 2:
name = ''
args, kwargs = _call
else:
name, args, kwargs = _call
try:
return name, sig.bind(*args, **kwargs)
except TypeError as e:
return e.with_traceback(None)
else:
return _call
def assert_not_called(_mock_self):
"""assert that the mock was never called.
"""
self = _mock_self
if self.call_count != 0:
msg = ("Expected '%s' to not have been called. Called %s times." %
(self._mock_name or 'mock', self.call_count))
raise AssertionError(msg)
def assert_called(_mock_self):
"""assert that the mock was called at least once
"""
self = _mock_self
if self.call_count == 0:
msg = ("Expected '%s' to have been called." %
self._mock_name or 'mock')
raise AssertionError(msg)
def assert_called_once(_mock_self):
"""assert that the mock was called only once.
"""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected '%s' to have been called once. Called %s times." %
(self._mock_name or 'mock', self.call_count))
raise AssertionError(msg)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
def _error_message():
msg = self._format_mock_failure_message(args, kwargs)
return msg
expected = self._call_matcher((args, kwargs))
actual = self._call_matcher(self.call_args)
if expected != actual:
cause = expected if isinstance(expected, Exception) else None
raise AssertionError(_error_message()) from cause
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and that that call was
with the specified arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected '%s' to be called once. Called %s times." %
(self._mock_name or 'mock', self.call_count))
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
expected = [self._call_matcher(c) for c in calls]
cause = expected if isinstance(expected, Exception) else None
all_calls = _CallList(self._call_matcher(c) for c in self.mock_calls)
if not any_order:
if expected not in all_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (_CallList(calls), self.mock_calls)
) from cause
return
all_calls = list(all_calls)
not_found = []
for kall in expected:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
) from cause
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
expected = self._call_matcher((args, kwargs))
actual = [self._call_matcher(c) for c in self.call_args_list]
if expected not in actual:
cause = expected if isinstance(expected, Exception) else None
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
) from cause
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
if self._mock_sealed:
attribute = "." + kw["name"] if "name" in kw else "()"
mock_name = self._extract_mock_name() + attribute
raise AttributeError(mock_name)
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_safe_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
_call = _Call((args, kwargs), two=True)
self.call_args = _call
self.call_args_list.append(_call)
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
result = next(effect)
if _is_exception(result):
raise result
if result is DEFAULT:
result = self.return_value
return result
ret_val = effect(*args, **kwargs)
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable. If any of the members of the iterable
are exceptions they will be raised instead of returned.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None then
calling the Mock will pass the call through to the wrapped object
(returning the real result). Attribute access on the mock will return a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will raise an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
_active_patches = []
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not None:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, type):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
extra_args = []
entered_patchers = []
exc_info = tuple()
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if name in _builtins and isinstance(target, ModuleType):
self.create = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise False to None
if spec is False:
spec = None
if spec_set is False:
spec_set = None
if autospec is False:
autospec = None
if spec is not None and autospec is not None:
raise TypeError("Can't specify spec and autospec")
if ((spec is not None or autospec is not None) and
spec_set not in (True, None)):
raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
original, local = self.get_original()
if new is DEFAULT and autospec is None:
inherit = False
if spec is True:
# set spec to the object we are replacing
spec = original
if spec_set is True:
spec_set = original
spec = None
elif spec is not None:
if spec_set is True:
spec_set = spec
spec = None
elif spec_set is True:
spec_set = original
if spec is not None or spec_set is not None:
if original is DEFAULT:
raise TypeError("Can't use 'spec' with create=True")
if isinstance(original, type):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif spec is not None or spec_set is not None:
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if _is_list(this_spec):
not_callable = '__call__' not in this_spec
else:
not_callable = not callable(this_spec)
if not_callable:
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if (not _is_list(this_spec) and not
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not None:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
if original is DEFAULT:
raise TypeError("Can't use 'autospec' with create=True")
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and (not hasattr(self.target, self.attribute) or
self.attribute in ('__doc__', '__module__',
'__defaults__', '__annotations__',
'__kwdefaults__')):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.append(self)
return result
def stop(self):
"""Stop an active patch."""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
pass
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, spec_set=None, autospec=None,
new_callable=None, **kwargs
):
"""
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) is str:
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock will be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, str):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, type):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
def _patch_stopall():
"""Stop all active patches. LIFO to unroll nested patches."""
for patch in reversed(_patch._active_patches):
patch.stop()
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.stopall = _patch_stopall
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
# we added divmod and rdivmod here instead of numerics
# because there is no idivmod
"divmod rdivmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
"bool next "
)
numerics = (
"add sub mul matmul div floordiv mod lshift rshift and xor or pow truediv"
)
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = {
'__get__', '__set__', '__delete__', '__reversed__', '__missing__',
'__reduce__', '__reduce_ex__', '__getinitargs__', '__getnewargs__',
'__getstate__', '__setstate__', '__getformat__', '__setformat__',
'__repr__', '__dir__', '__subclasses__', '__format__',
'__getnewargs_ex__',
}
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = {
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right]).split()
}
_all_magics = _magics | _non_defaults
_unsupported_magics = {
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
}
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
}
_return_values = {
'__lt__': NotImplemented,
'__gt__': NotImplemented,
'__le__': NotImplemented,
'__ge__': NotImplemented,
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
if self is other:
return True
return NotImplemented
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
if self is other:
return False
return NotImplemented
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
self._mock_set_magics() # make magic work for kwargs in init
_safe_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics() # fix magic broken by upper level init
def _mock_set_magics(self):
these_magics = _magics
if getattr(self, "_mock_methods", None) is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) for key, value in sorted(kwargs.items())
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name='', parent=None, two=False,
from_kall=True):
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, str):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, str):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, str):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
elif len_other == 2:
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, str):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
else:
return False
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
__ne__ = object.__ne__
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def count(self, *args, **kwargs):
return self.__getattr__('count')(*args, **kwargs)
def index(self, *args, **kwargs):
return self.__getattr__('index')(*args, **kwargs)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked
to check that they are called with the correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, type)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
if _kwargs and instance:
_kwargs['_spec_as_instance'] = True
_kwargs.update(kwargs)
Klass = MagicMock
if inspect.isdatadescriptor(spec):
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_name = _kwargs.pop('name', _name)
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions other than
# AttributeError on being fetched?
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
try:
original = getattr(spec, entry)
except AttributeError:
continue
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
skipfirst = _must_skip(spec, entry, is_type)
kwargs['_eat_self'] = skipfirst
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent,
**kwargs)
mock._mock_children[entry] = new
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with _set_signature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
"""
Return whether we should skip the first argument on spec's `entry`
attribute.
"""
if not isinstance(spec, type):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
spec = spec.__class__
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
elif isinstance(getattr(result, '__get__', None), MethodWrapperTypes):
# Normal method => skip if looked up on type
# (if looked up on instance, self is already skipped)
return is_type
else:
return False
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# it is possible for objects to have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
)
MethodWrapperTypes = (
type(ANY.__eq__.__get__),
)
file_spec = None
def _iterate_read_data(read_data):
# Helper for mock_open:
# Retrieve lines from read_data via a generator so that separate calls to
# readline, read, and readlines are properly interleaved
sep = b'\n' if isinstance(read_data, bytes) else '\n'
data_as_list = [l + sep for l in read_data.split(sep)]
if data_as_list[-1] == sep:
# If the last line ended in a newline, the list comprehension will have an
# extra entry that's just a newline. Remove this.
data_as_list = data_as_list[:-1]
else:
# If there wasn't an extra newline by itself, then the file being
# emulated doesn't have a newline to end the last line remove the
# newline that our naive format() added
data_as_list[-1] = data_as_list[-1][:-1]
for line in data_as_list:
yield line
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` methoddline`, and `readlines` of the
file handle to return. This is an empty string by default.
"""
def _readlines_side_effect(*args, **kwargs):
if handle.readlines.return_value is not None:
return handle.readlines.return_value
return list(_state[0])
def _read_side_effect(*args, **kwargs):
if handle.read.return_value is not None:
return handle.read.return_value
return type(read_data)().join(_state[0])
def _readline_side_effect():
if handle.readline.return_value is not None:
while True:
yield handle.readline.return_value
for line in _state[0]:
yield line
while True:
yield type(read_data)()
global file_spec
if file_spec is None:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.__enter__.return_value = handle
_state = [_iterate_read_data(read_data), None]
handle.write.return_value = None
handle.read.return_value = None
handle.readline.return_value = None
handle.readlines.return_value = None
handle.read.side_effect = _read_side_effect
_state[1] = _readline_side_effect()
handle.readline.side_effect = _state[1]
handle.readlines.side_effect = _readlines_side_effect
def reset_data(*args, **kwargs):
_state[0] = _iterate_read_data(read_data)
if handle.readline.side_effect == _state[1]:
# Only reset the side effect if the user hasn't overridden it.
_state[1] = _readline_side_effect()
handle.readline.side_effect = _state[1]
return DEFAULT
mock.side_effect = reset_data
mock.return_value = handle
return mock
class PropertyMock(Mock):
"""
A mock intended to be used as a property, or other descriptor, on a class.
`PropertyMock` provides `__get__` and `__set__` methods so you can specify
a return value when it is fetched.
Fetching a `PropertyMock` instance from an object calls the mock, with
no args. Setting it calls the mock with the value being set.
"""
def _get_child_mock(self, **kwargs):
return MagicMock(**kwargs)
def __get__(self, obj, obj_type):
return self()
def __set__(self, obj, val):
self(val)
def seal(mock):
"""Disable the automatic generation of "submocks"
Given an input Mock, seals it to ensure no further mocks will be generated
when accessing an attribute that was not already defined.
Submocks are defined as all mocks which were created DIRECTLY from the
parent. If a mock is assigned to an attribute of an existing mock,
it is not considered a submock.
"""
mock._mock_sealed = True
for attr in dir(mock):
try:
m = getattr(mock, attr)
except AttributeError:
continue
if not isinstance(m, NonCallableMock):
continue
if m._mock_new_parent is mock:
seal(m)
|
{
"content_hash": "d7991db34322b9fd214c8456f4dfef4c",
"timestamp": "",
"source": "github",
"line_count": 2436,
"max_line_length": 85,
"avg_line_length": 33.024220032840724,
"alnum_prop": 0.575447188832399,
"repo_name": "Microsoft/PTVS",
"id": "382696d6c7e9c6d9542207b369962a4848d3e2a6",
"size": "80614",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/unittest/mock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12235396"
},
{
"name": "C++",
"bytes": "212001"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "44322"
},
{
"name": "Python",
"bytes": "847130"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
import numpy as np
import scipy
import matplotlib.pyplot as plt
def plot_residuals(turnstile_weather, predictions):
'''
Using the same methods that we used to plot a histogram of entries
per hour for our data, why don't you make a histogram of the residuals
(that is, the difference between the original hourly entry data and the predicted values).
Based on this residual histogram, do you have any insight into how our model
performed? Reading a bit on this webpage might be useful:
http://www.itl.nist.gov/div898/handbook/pri/section2/pri24.htm
'''
plt.figure()
(turnstile_weather['ENTRIESn_hourly'] - predictions).hist()
return plt
|
{
"content_hash": "d28be5fd44a799c90922c7cc1d04a051",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 94,
"avg_line_length": 36.1578947368421,
"alnum_prop": 0.7263464337700145,
"repo_name": "KellyChan/Python",
"id": "a873dfef6bf06a475753d79e36e38c5753ceec12",
"size": "687",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/data_science/NYC/analysis4_plot_residuals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1186772"
},
{
"name": "Batchfile",
"bytes": "79181"
},
{
"name": "C",
"bytes": "36468971"
},
{
"name": "C++",
"bytes": "397352"
},
{
"name": "CSS",
"bytes": "9853"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "52804"
},
{
"name": "Groff",
"bytes": "492261"
},
{
"name": "HTML",
"bytes": "414186"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "JavaScript",
"bytes": "20910"
},
{
"name": "Makefile",
"bytes": "208458"
},
{
"name": "Objective-C",
"bytes": "66324"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "48074049"
},
{
"name": "R",
"bytes": "7906"
},
{
"name": "Shell",
"bytes": "865630"
},
{
"name": "TeX",
"bytes": "646204"
},
{
"name": "VimL",
"bytes": "9546"
},
{
"name": "Visual Basic",
"bytes": "962"
}
],
"symlink_target": ""
}
|
from __future__ import division
import commonfate as cf
import numpy as np
import pytest
import itertools
import operator
@pytest.fixture(params=[0.5])
def length(rate, request):
return request.param * rate
@pytest.fixture
def signal(channels, length):
return np.random.random((length, channels))
@pytest.fixture(params=[2])
def framelength(request):
return request.param * 512
@pytest.fixture(params=[2])
def hopsize(framelength, request):
return framelength // request.param
@pytest.fixture(params=itertools.combinations((30, 20, 10), 2))
# create many different shapes of dimension 2
def W(request):
return request.param
@pytest.fixture(params=[2])
def mhop(W, request):
d = (request.param, request.param)
return tuple(map(operator.floordiv, W, d))
@pytest.fixture(params=[16000, 22050])
def rate(request):
return request.param
def test_reconstruction(
channels, signal, framelength, hopsize, W, mhop, opt_einsum
):
"""
Test if transform-inverse identity holds for the tensor case
"""
components = cf.decompose.process(
signal,
nb_iter=50,
nb_components=2,
n_fft=framelength,
n_hop=hopsize,
cft_patch=W,
cft_hop=mhop,
)
# testing shapes
assert np.sum(components, axis=0).shape == signal.shape
# testing reconstruction error
error = np.sqrt(np.mean((np.sum(components, axis=0) - signal) ** 2))
assert error < 1e-8
|
{
"content_hash": "2a32d0065080b0f7b4bd2c32556b5962",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 72,
"avg_line_length": 21.880597014925375,
"alnum_prop": 0.6787175989085948,
"repo_name": "aliutkus/commonfate",
"id": "e5f1ab89f35280cd32690d5686ae1e5f185ec4f1",
"size": "1466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_decompose.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24584"
}
],
"symlink_target": ""
}
|
"""JSONwriter
Writes events to JSON files, segmenting on max size"""
import os, pathlib, shutil
import logging
import time
from datetime import datetime
from . import json_quick
from . import merge_test
TEMP_DIRECTORY = "/tmp/synth_json_writer/" # We build each file in a temporary directory, then move when it's finished (so that anyone watching the destination directory doesn't ever encounter partially-written files
DEFAULT_DIRECTORY = "../synth_logs/"
DEFAULT_MAX_EVENTS_PER_FILE = 100000 # FYI 100,000 messages is max JSON file size that DP can ingest (if that's where you end-up putting these files)
class Stream():
"""Write properties into JSON files, splitting by max size.
If you access .files_written property then call close() first"""
def __init__(self, filename, directory = DEFAULT_DIRECTORY, file_mode="wt",
max_events_per_file = DEFAULT_MAX_EVENTS_PER_FILE, merge = False,
ts_prefix = False, messages_prefix = False):
pathlib.Path(TEMP_DIRECTORY).mkdir(exist_ok=True) # Ensure temp directory exists
self.target_directory = directory
self.filename_root = filename
self.file_mode = file_mode
self.max_events_per_file = max_events_per_file
self.merge = merge
self.ts_prefix = ts_prefix
self.messages_prefix = messages_prefix
self.file = None
self.filename = None
self.files_written = []
self.file_count = 1
self.last_event = {} # Used to merge messages
self.first_timestamp = None
def _write_event(self, properties):
self.check_next_file()
jprops = properties.copy()
if self.first_timestamp is None:
self.first_timestamp = jprops["$ts"]
jprops["$ts"] = int(jprops["$ts"] * 1000) # Convert timestamp to ms as that's what DP uses internally in JSON files
s = json_quick.dumps(jprops)
if self.events_in_this_file > 0:
s = ",\n" + s
self.file.write(s)
def write_event(self, properties):
if not self.merge:
self._write_event(properties)
return
if len(self.last_event) == 0:
self.last_event = properties
return
if merge_test.ok(self.last_event, properties):
self.last_event.update(properties)
else:
self._write_event(self.last_event)
self.last_event = properties
def move_to_next_file(self):
"""Move to next json file"""
if self.file is not None:
self._close()
self.filename = self.filename_root + "%05d" % self.file_count + ".json"
logging.info("Starting new logfile " + self.filename)
self.file = open(TEMP_DIRECTORY + self.filename, self.file_mode) # No-longer unbuffered as Python3 doesn't support that on text files
self.file.write("[\n")
self.events_in_this_file = 0
def check_next_file(self):
"""Check if time to move to next json file"""
if self.file is None:
self.move_to_next_file()
return
self.events_in_this_file += 1
if self.events_in_this_file >= self.max_events_per_file:
self.move_to_next_file()
return
def _close(self):
if self.file is not None:
# logging.info("Closing JSON file")
self.file.write("\n]\n")
self.file.close()
if self.ts_prefix:
dt = datetime.fromtimestamp(self.first_timestamp)
prefix = dt.strftime("%Y-%m-%dT%H-%M-%S_")
else:
prefix = ""
if self.messages_prefix:
prefix += "%010d" % self.events_in_this_file + "_"
src = TEMP_DIRECTORY + self.filename
shutil.copy(src, DEFAULT_DIRECTORY + prefix + self.filename) # os.rename() fails if they're on different drives
os.remove(src)
self.files_written.append(DEFAULT_DIRECTORY + self.filename)
self.file = None
self.filename = None
self.first_timestamp = None
self.file_count += 1
def close(self):
if len(self.last_event) != 0:
self._write_event(self.last_event)
self._close()
|
{
"content_hash": "4ec5662d7879c025630563f7277a523b",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 217,
"avg_line_length": 38.4375,
"alnum_prop": 0.5951219512195122,
"repo_name": "DevicePilot/synth",
"id": "e91a03766d22491202365f050c0468a54cf924e4",
"size": "4305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synth/common/json_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "605"
},
{
"name": "Python",
"bytes": "628644"
},
{
"name": "Shell",
"bytes": "212"
}
],
"symlink_target": ""
}
|
from google.appengine.ext import ndb
from UserModel import UserModel
class LikeModel(ndb.Model):
"""Like Model"""
palette_id = ndb.StringProperty(required=True)
added_by = ndb.UserProperty()
added_by_id = ndb.StringProperty(required=True)
timestamp = ndb.DateTimeProperty(auto_now_add=True)
@staticmethod
def format(like):
if like is None or like.timestamp is None:
return False
return {
'id': like.key.id(),
'palette_id': like.palette_id,
'added_by': UserModel.format(like.added_by),
'timestamp': like.timestamp.isoformat()
}
|
{
"content_hash": "724b57a0b8886642c404759155669896",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 56,
"avg_line_length": 30.523809523809526,
"alnum_prop": 0.6271450858034321,
"repo_name": "rsyvarth/Schemify",
"id": "511186d8af4a8b37b47f4eacc3a47b8b3cc38ad6",
"size": "643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/models/LikeModel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33676"
},
{
"name": "HTML",
"bytes": "28230"
},
{
"name": "JavaScript",
"bytes": "310517"
},
{
"name": "Python",
"bytes": "2055770"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
Generic driver for XML sources. Uses an XSLT transform to transform
the data into sMAP-XML.
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
from twisted.internet import defer, threads
from twisted.python import log
import time
from lxml import etree
import urllib2
import urlparse
from smap import util, core
from smap.driver import FetchDriver
from smap.contrib import dtutil
class XMLDriver(FetchDriver):
"""Driver for generic XML documents. They are expected to be in
sMAP-XML format, which is basically the smap definitions directly
mapped into XML. To make this easier, you can apply an XSLT
transformation to your document before processing with this
module.
Parameters:
Xslt: path of XSLT stylesheet used to transform document.
Optional if the source is already in sMAP-XML
Timefmt = python strptime string used to parse the time in the
document. XSLTv1 doesn't have much in the way of time processing
and anyways it's a pain to use.
"""
def setup(self, opts):
FetchDriver.setup(self, opts) # set up the getter
self.xslt = opts.get('Xslt', None) # transformation to be applied
self.timefmt = opts.get("Timeformat", None)
self.timezone = opts.get("Timezone", 'UTC')
self.ignore_time = opts.get('IgnoreTimestamps', False)
if self.xslt:
with open(self.xslt, "r") as fp:
self.xslt = etree.XSLT(etree.XML(fp.read()))
def parse_val(self, ts, val):
if ts['Properties']['ReadingType'] == 'long':
return int(val)
elif ts['Properties']['ReadingType'] == 'double':
return float(val)
else:
return val
def parse_time(self, ts, val):
if self.timefmt == None:
return int(val)
else:
return dtutil.dt2ts(dtutil.strptime_tz(val, self.timefmt,
self.timezone))
def make_jsonts(self, xmlts):
"""Transform a sMAP-XML Properties and Metadata section into json
"""
ts = {
'Properties': {'UnitofMeasure': ''},
'Metadata': {},
'Readings': []
}
for c in xmlts.getchildren():
if c.tag == 'Properties':
for p in c.getchildren():
if p.text != None: ts['Properties'][p.tag] = p.text
elif c.tag == 'Metadata':
for cat in c.getchildren():
for field in cat.getchildren():
f = ts['Metadata'].get(cat.tag, {})
f[field.tag] = field.text
ts['Metadata'][cat.tag] = f
if not 'Timezone' in ts['Properties']:
ts['Properties']['Timezone'] = self.timezone
return ts
def process(self, data):
# maybe transform to smap-xml
data = etree.XML(data)
if self.xslt:
data = self.xslt(data)
for xmlts in data.getroot().getchildren():
if not 'path' in xmlts.attrib:
log.err("skipping timeseries: no path attribute")
continue
# maybe make/add a new timeseries if we haven't seen this one before
path = xmlts.attrib['path']
ts = self.get_timeseries(path)
if not ts:
ts = self.make_jsonts(xmlts)
ts['uuid'] = self.uuid(path)
ts = core.Timeseries(ts, None)
self.add_timeseries(path, ts)
for xmlts in data.getroot().getchildren():
if not 'path' in xmlts.attrib:
continue
# add all of the readings
path = xmlts.attrib['path']
for r in xmlts.find('Readings').getchildren():
try:
if not self.ignore_time:
rtime = self.parse_time(ts, r.find("Timestamp").text)
else:
rtime = time.time()
rval = self.parse_val(ts, r.find("Value").text)
except (ValueError, TypeError), e:
log.err()
continue
self._add(path, rtime, rval)
|
{
"content_hash": "906fa61d123f4dce92b8635712671482",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 80,
"avg_line_length": 38.689655172413794,
"alnum_prop": 0.6085561497326203,
"repo_name": "SoftwareDefinedBuildings/smap",
"id": "3006650ee8bebb5c3f37c11dc03069a3fbb2f2f9",
"size": "5610",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "python/smap/drivers/xslt.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "325117"
},
{
"name": "HTML",
"bytes": "9642"
},
{
"name": "Java",
"bytes": "47918"
},
{
"name": "Lua",
"bytes": "9058"
},
{
"name": "Makefile",
"bytes": "5715"
},
{
"name": "Python",
"bytes": "1641521"
},
{
"name": "R",
"bytes": "23461"
},
{
"name": "Shell",
"bytes": "1273"
},
{
"name": "TeX",
"bytes": "40212"
},
{
"name": "XSLT",
"bytes": "5081"
}
],
"symlink_target": ""
}
|
import gzip
import os
import pathlib
import sys
from functools import partial
from time import sleep
import cloudpickle
import pytest
from fsspec.compression import compr
from fsspec.core import open_files
from fsspec.implementations.local import LocalFileSystem
from tlz import concat, valmap
from dask import compute
from dask.bytes.core import read_bytes
from dask.bytes.utils import compress
from dask.utils import filetexts
compute = partial(compute, scheduler="sync")
files = {
".test.accounts.1.json": (
b'{"amount": 100, "name": "Alice"}\n'
b'{"amount": 200, "name": "Bob"}\n'
b'{"amount": 300, "name": "Charlie"}\n'
b'{"amount": 400, "name": "Dennis"}\n'
),
".test.accounts.2.json": (
b'{"amount": 500, "name": "Alice"}\n'
b'{"amount": 600, "name": "Bob"}\n'
b'{"amount": 700, "name": "Charlie"}\n'
b'{"amount": 800, "name": "Dennis"}\n'
),
}
csv_files = {
".test.fakedata.1.csv": (b"a,b\n" b"1,2\n"),
".test.fakedata.2.csv": (b"a,b\n" b"3,4\n"),
"subdir/.test.fakedata.2.csv": (b"a,b\n" b"5,6\n"),
}
def to_uri(path):
return pathlib.Path(os.path.abspath(path)).as_uri()
def test_unordered_urlpath_errors():
# Unordered urlpath argument
with pytest.raises(TypeError):
read_bytes(
{
"sets/are.csv",
"unordered/so/they.csv",
"should/not/be.csv",
"allowed.csv",
}
)
def test_read_bytes():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*")
assert isinstance(sample, bytes)
assert sample[:5] == files[sorted(files)[0]][:5]
assert sample.endswith(b"\n")
assert isinstance(values, (list, tuple))
assert isinstance(values[0], (list, tuple))
assert hasattr(values[0][0], "dask")
assert sum(map(len, values)) >= len(files)
results = compute(*concat(values))
assert set(results) == set(files.values())
def test_read_bytes_sample_delimiter():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*", sample=80, delimiter=b"\n")
assert sample.endswith(b"\n")
sample, values = read_bytes(".test.accounts.1.json", sample=80, delimiter=b"\n")
assert sample.endswith(b"\n")
sample, values = read_bytes(".test.accounts.1.json", sample=2, delimiter=b"\n")
assert sample.endswith(b"\n")
def test_parse_sample_bytes():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*", sample="40 B")
assert len(sample) == 40
def test_read_bytes_no_sample():
with filetexts(files, mode="b"):
sample, _ = read_bytes(".test.accounts.1.json", sample=False)
assert sample is False
def test_read_bytes_blocksize_none():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*", blocksize=None)
assert sum(map(len, values)) == len(files)
@pytest.mark.parametrize("blocksize", [5.0, "5 B"])
def test_read_bytes_blocksize_types(blocksize):
with filetexts(files, mode="b"):
sample, vals = read_bytes(".test.account*", blocksize=blocksize)
results = compute(*concat(vals))
ourlines = b"".join(results).split(b"\n")
testlines = b"".join(files.values()).split(b"\n")
assert set(ourlines) == set(testlines)
def test_read_bytes_blocksize_float_errs():
with filetexts(files, mode="b"):
with pytest.raises(TypeError):
read_bytes(".test.account*", blocksize=5.5)
def test_read_bytes_include_path():
with filetexts(files, mode="b"):
_, _, paths = read_bytes(".test.accounts.*", include_path=True)
assert {os.path.split(path)[1] for path in paths} == files.keys()
def test_with_urls():
with filetexts(files, mode="b"):
# OS-independent file:// URI with glob *
url = to_uri(".test.accounts.") + "*"
sample, values = read_bytes(url, blocksize=None)
assert sum(map(len, values)) == len(files)
@pytest.mark.skipif(sys.platform == "win32", reason="pathlib and moto clash on windows")
def test_with_paths():
with filetexts(files, mode="b"):
url = pathlib.Path("./.test.accounts.*")
sample, values = read_bytes(url, blocksize=None)
assert sum(map(len, values)) == len(files)
with pytest.raises(OSError):
# relative path doesn't work
url = pathlib.Path("file://.test.accounts.*")
read_bytes(url, blocksize=None)
def test_read_bytes_block():
with filetexts(files, mode="b"):
for bs in [5, 15, 45, 1500]:
sample, vals = read_bytes(".test.account*", blocksize=bs)
assert list(map(len, vals)) == [
max((len(v) // bs), 1) for v in files.values()
]
results = compute(*concat(vals))
assert sum(len(r) for r in results) == sum(len(v) for v in files.values())
ourlines = b"".join(results).split(b"\n")
testlines = b"".join(files.values()).split(b"\n")
assert set(ourlines) == set(testlines)
def test_read_bytes_delimited():
with filetexts(files, mode="b"):
for bs in [5, 15, 45, "1.5 kB"]:
_, values = read_bytes(".test.accounts*", blocksize=bs, delimiter=b"\n")
_, values2 = read_bytes(".test.accounts*", blocksize=bs, delimiter=b"foo")
assert [a.key for a in concat(values)] != [b.key for b in concat(values2)]
results = compute(*concat(values))
res = [r for r in results if r]
assert all(r.endswith(b"\n") for r in res)
ourlines = b"".join(res).split(b"\n")
testlines = b"".join(files[k] for k in sorted(files)).split(b"\n")
assert ourlines == testlines
# delimiter not at the end
d = b"}"
_, values = read_bytes(".test.accounts*", blocksize=bs, delimiter=d)
results = compute(*concat(values))
res = [r for r in results if r]
# All should end in } except EOF
assert sum(r.endswith(b"}") for r in res) == len(res) - 2
ours = b"".join(res)
test = b"".join(files[v] for v in sorted(files))
assert ours == test
fmt_bs = [(fmt, None) for fmt in compr] + [(fmt, 10) for fmt in compr] # type: ignore
@pytest.mark.parametrize("fmt,blocksize", fmt_bs)
def test_compression(fmt, blocksize):
if fmt not in compress:
pytest.skip("compression function not provided")
files2 = valmap(compress[fmt], files)
with filetexts(files2, mode="b"):
if fmt and blocksize:
with pytest.raises(ValueError):
read_bytes(
".test.accounts.*.json",
blocksize=blocksize,
delimiter=b"\n",
compression=fmt,
)
return
sample, values = read_bytes(
".test.accounts.*.json",
blocksize=blocksize,
delimiter=b"\n",
compression=fmt,
)
assert sample[:5] == files[sorted(files)[0]][:5]
assert sample.endswith(b"\n")
results = compute(*concat(values))
assert b"".join(results) == b"".join([files[k] for k in sorted(files)])
def test_open_files():
with filetexts(files, mode="b"):
myfiles = open_files(".test.accounts.*")
assert len(myfiles) == len(files)
for lazy_file, data_file in zip(myfiles, sorted(files)):
with lazy_file as f:
x = f.read()
assert x == files[data_file]
@pytest.mark.parametrize("encoding", ["utf-8", "ascii"])
def test_open_files_text_mode(encoding):
with filetexts(files, mode="b"):
myfiles = open_files(".test.accounts.*", mode="rt", encoding=encoding)
assert len(myfiles) == len(files)
data = []
for file in myfiles:
with file as f:
data.append(f.read())
assert list(data) == [files[k].decode(encoding) for k in sorted(files)]
@pytest.mark.parametrize("mode", ["rt", "rb"])
@pytest.mark.parametrize("fmt", list(compr))
def test_open_files_compression(mode, fmt):
if fmt not in compress:
pytest.skip("compression function not provided")
files2 = valmap(compress[fmt], files)
with filetexts(files2, mode="b"):
myfiles = open_files(".test.accounts.*", mode=mode, compression=fmt)
data = []
for file in myfiles:
with file as f:
data.append(f.read())
sol = [files[k] for k in sorted(files)]
if mode == "rt":
sol = [b.decode() for b in sol]
assert list(data) == sol
def test_bad_compression():
with filetexts(files, mode="b"):
for func in [read_bytes, open_files]:
with pytest.raises(ValueError):
sample, values = func(".test.accounts.*", compression="not-found")
def test_not_found():
fn = "not-a-file"
with pytest.raises((FileNotFoundError, OSError), match=fn):
read_bytes(fn)
@pytest.mark.slow
def test_names():
with filetexts(files, mode="b"):
_, a = read_bytes(".test.accounts.*")
_, b = read_bytes(".test.accounts.*")
a = list(concat(a))
b = list(concat(b))
assert [aa._key for aa in a] == [bb._key for bb in b]
sleep(1)
for fn in files:
with open(fn, "ab") as f:
f.write(b"x")
_, c = read_bytes(".test.accounts.*")
c = list(concat(c))
assert [aa._key for aa in a] != [cc._key for cc in c]
@pytest.mark.parametrize("compression_opener", [(None, open), ("gzip", gzip.open)])
def test_open_files_write(tmpdir, compression_opener):
compression, opener = compression_opener
tmpdir = str(tmpdir)
files = open_files(tmpdir, num=2, mode="wb", compression=compression)
assert len(files) == 2
assert {f.mode for f in files} == {"wb"}
for fil in files:
with fil as f:
f.write(b"000")
files = sorted(os.listdir(tmpdir))
assert files == ["0.part", "1.part"]
with opener(os.path.join(tmpdir, files[0]), "rb") as f:
d = f.read()
assert d == b"000"
def test_pickability_of_lazy_files(tmpdir):
tmpdir = str(tmpdir)
with filetexts(files, mode="b"):
myfiles = open_files(".test.accounts.*")
myfiles2 = cloudpickle.loads(cloudpickle.dumps(myfiles))
for f, f2 in zip(myfiles, myfiles2):
assert f.path == f2.path
assert type(f.fs) == type(f2.fs)
with f as f_open, f2 as f2_open:
assert f_open.read() == f2_open.read()
def test_py2_local_bytes(tmpdir):
fn = str(tmpdir / "myfile.txt.gz")
with gzip.open(fn, mode="wb") as f:
f.write(b"hello\nworld")
files = open_files(fn, compression="gzip", mode="rt")
with files[0] as f:
assert all(isinstance(line, str) for line in f)
def test_abs_paths(tmpdir):
tmpdir = str(tmpdir)
here = os.getcwd()
os.chdir(tmpdir)
with open("tmp", "w") as f:
f.write("hi")
out = LocalFileSystem().glob("*")
assert len(out) == 1
assert "/" in out[0]
assert "tmp" in out[0]
fs = LocalFileSystem()
os.chdir(here)
with fs.open(out[0], "r") as f:
res = f.read()
assert res == "hi"
|
{
"content_hash": "0daf6f418c344d39ca3a99997b68eae8",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 88,
"avg_line_length": 32.45609065155807,
"alnum_prop": 0.5734485467399842,
"repo_name": "blaze/dask",
"id": "2c2804ee43a3cf0167e545579c81b5a884d5c547",
"size": "11457",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dask/bytes/tests/test_local.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "1033404"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
}
|
from docutils.languages import get_language as get_language
from .log import log
def get_language_silent(lang):
"""Docutils get_language() with patches for older versions."""
try:
return get_language(lang)
except TypeError as err: # Docutils 0.8.1
if 'get_language() takes exactly 2 arguments' in str(err):
class SilentReporter(object):
def warning(self, msg):
pass
return get_language(lang, SilentReporter())
raise # re-raise any other TypeError
except ImportError: # Docutils < 0.8
return get_language('en')
def get_language_available(lang):
"""Docutils get_language() also returning the available language."""
module = get_language_silent(lang)
docutils_lang = module.__name__.rsplit('.', 1)[-1]
if docutils_lang == 'en' and docutils_lang != lang and '_' in lang:
module = get_language_silent(lang.split('_', 1)[0])
docutils_lang = module.__name__.rsplit('.', 1)[-1]
if docutils_lang != lang:
warn = (
docutils_lang.split('_', 1)[0] == lang.split('_', 1)[0]
and log.info
or log.warning
)
warn(
"Language '%s' not supported by Docutils,"
" using '%s' instead." % (lang, docutils_lang)
)
if docutils_lang == 'en' and lang.split('_', 1)[0] != 'en':
lang = 'en_US'
return lang, docutils_lang, module
|
{
"content_hash": "681319378dff108fa30aa0e52809602b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 72,
"avg_line_length": 34.976190476190474,
"alnum_prop": 0.572498298162015,
"repo_name": "rst2pdf/rst2pdf",
"id": "68fcaa2d0f70967b05ef730461c8395336e4add5",
"size": "1532",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "rst2pdf/languages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "485883"
},
{
"name": "Shell",
"bytes": "2089"
}
],
"symlink_target": ""
}
|
from django.db.models import Max, Min
from models import Flag, DiskFlag, Disk, FlagRange, IOStat
from django.core.exceptions import ObjectDoesNotExist
def generate_disk_flag(disk,flag,value,time):
range_match = None
for level in FlagRange.WARNING_LEVEL_CHOICES:
try:
range = flag.ranges.get(level=level[0])
if range.minimum and range.maximum:
if value >= range.minimum and value <= range.maximum:
range_match = range
else:
if range.minimum and value >= range.minimum:
range_match = range
elif range.maximum and value <= range.maximum:
range_match = range
except:
pass
if range_match is not None:
try:
obj = DiskFlag.objects.get(disk=disk,flag=flag)
obj.flag_range = range_match
if (obj.bad_value == 'low' and value <= obj.worst_value) or (obj.bad_value == 'high' and value >= obj.worst_value):
obj.worst_value = value
obj.worst_time = time
obj.flag_range = range_match
obj.value = value
obj.time = time
obj.save()
except ObjectDoesNotExist, e:
obj = DiskFlag.objects.create(disk=disk,flag=flag,flag_range=range_match,value=value,time=time,worst_value=value,worst_time=time)
except Exception, e:
print e
#NOT USED: Generate DiskFlags based on the all time min/max from IOStat (need to add more logic for smartctl)
#Leaving here for code reference
def generate_flags(flags):
from django.db.models import Max, Min
for flag in flags:
aggregate_func = Max if flag.bad_value == 'high' else Min
DiskFlag.objects.filter(flag=flag).delete()
if flag.iostat_attr:
#For each level from minor to severe, see if any disks match the criteria
for level in FlagRange.WARNING_LEVEL_CHOICES:
query= {'name':flag.iostat_attr}
try:
range = flag.ranges.get(level=level[0])
if range.minimum:
query['value__gte'] = range.minimum
if range.maximum:
query['value__lte'] = range.maximum
for result in IOStat.objects.filter(**query).values('disk').annotate(value=(aggregate_func('value'))):
try:
obj = DiskFlag.objects.get(disk_id=result['disk'],flag=flag)
obj.value = result.value
obj.save()
except:
obj = DiskFlag.objects.create(disk_id=result['disk'],flag=flag,flag_range=range,value=result['value'])
except Exception, e:
print level[0]
print e
|
{
"content_hash": "1949918128bfe2b04c0ac4b2f10ea149",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 141,
"avg_line_length": 46.703125,
"alnum_prop": 0.5369688859150218,
"repo_name": "amschaal/maestor",
"id": "c735591383fe9b6ce7d034431c8de0009d1f869b",
"size": "2989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maestor/flags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22920"
},
{
"name": "JavaScript",
"bytes": "4085"
},
{
"name": "Python",
"bytes": "47992"
}
],
"symlink_target": ""
}
|
import pytest
from integration_tests import AgentTestCase
from integration_tests.tests.usage_collector_base import TestUsageCollectorBase
pytestmark = pytest.mark.group_usage_collector
class TestUsageCollectorWithAgent(AgentTestCase, TestUsageCollectorBase):
def setUp(self):
super(AgentTestCase, self).setUp()
self.clean_timestamps()
def tearDown(self):
super(AgentTestCase, self).tearDown()
self.clean_usage_collector_log()
def test_collector_scripts_with_agent(self):
messages = [
"Uptime script finished running",
"Usage script finished running",
"'customer_id': 'MockCustomer'",
"'node_instances_count': 1",
"'compute_count': 1",
"'agents_count': 1",
"'premium_edition': True"
]
self.run_scripts_with_deployment("dsl/agent_tests/with_agent.yaml",
messages)
|
{
"content_hash": "9275454803acbc04e38edc706e913e3a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 33.13793103448276,
"alnum_prop": 0.6243496357960457,
"repo_name": "cloudify-cosmo/cloudify-manager",
"id": "4a6054d7c538ba8a4cbfa2879db8fb2332a4e327",
"size": "1603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration_tests/tests/agent_tests/test_usage_collector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "4067"
},
{
"name": "Dockerfile",
"bytes": "3843"
},
{
"name": "HTML",
"bytes": "320"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PLpgSQL",
"bytes": "119062"
},
{
"name": "Python",
"bytes": "3825971"
},
{
"name": "Shell",
"bytes": "49121"
}
],
"symlink_target": ""
}
|
"""WSGI tools for use with gate."""
import errno
import os
import signal
import time
import mimetools
from itertools import chain
from StringIO import StringIO
import eventlet
import eventlet.debug
from eventlet import greenio, GreenPool, sleep, wsgi, listen
from paste.deploy import loadapp, appconfig
from eventlet.green import socket, ssl
from urllib import unquote
from gate.common import utils
from gate.common.swob import Request
from gate.common.utils import capture_stdio, disable_fallocate, \
drop_privileges, get_logger, NullLogger, config_true_value, \
get_hub
def monkey_patch_mimetools():
"""
mimetools.Message defaults content-type to "text/plain"
This changes it to default to None, so we can detect missing headers.
"""
orig_parsetype = mimetools.Message.parsetype
def parsetype(self):
if not self.typeheader:
self.type = None
self.maintype = None
self.subtype = None
self.plisttext = ''
else:
orig_parsetype(self)
mimetools.Message.parsetype = parsetype
def get_socket(conf, default_port=8080):
"""Bind socket to bind ip:port in conf
:param conf: Configuration dict to read settings from
:param default_port: port to use if not specified in conf
:returns : a socket object as returned from socket.listen or
ssl.wrap_socket if conf specifies cert_file
"""
bind_addr = (conf.get('bind_ip', '0.0.0.0'),
int(conf.get('bind_port', default_port)))
address_family = [addr[0] for addr in socket.getaddrinfo(
bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
sock = None
bind_timeout = int(conf.get('bind_timeout', 30))
retry_until = time.time() + bind_timeout
warn_ssl = False
while not sock and time.time() < retry_until:
try:
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
family=address_family)
if 'cert_file' in conf:
warn_ssl = True
sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
keyfile=conf['key_file'])
except socket.error, err:
if err.args[0] != errno.EADDRINUSE:
raise
sleep(0.1)
if not sock:
raise Exception(_('Could not bind to %s:%s '
'after trying for %s seconds') % (
bind_addr[0], bind_addr[1], bind_timeout))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# in my experience, sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600)
if warn_ssl:
ssl_warning_message = 'WARNING: SSL should only be enabled for ' \
'testing purposes. Use external SSL ' \
'termination for a production deployment.'
get_logger(conf).warning(ssl_warning_message)
print _(ssl_warning_message)
return sock
# TODO: pull pieces of this out to test
def run_wsgi(conf_file, app_section, *args, **kwargs):
"""
Runs the server using the specified number of workers.
:param conf_file: Path to paste.deploy style configuration file
:param app_section: App name from conf file to load config from
"""
# Load configuration, Set logger and Load request processor
try:
(app, conf, logger, log_name) = \
init_request_processor(conf_file, app_section, *args, **kwargs)
except ConfigFileError, e:
print e
return
# bind to address and port
sock = get_socket(conf, default_port=kwargs.get('default_port', 8080))
# remaining tasks should not require elevated privileges
#drop_privileges(conf.get('user', 'gate'))
# set utils.FALLOCATE_RESERVE if desired
reserve = int(conf.get('fallocate_reserve', 0))
if reserve > 0:
utils.FALLOCATE_RESERVE = reserve
# redirect errors to logger and close stdio
#capture_stdio(logger)
def run_server():
wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
# Turn off logging requests by the underlying WSGI software.
wsgi.HttpProtocol.log_request = lambda *a: None
# Redirect logging other messages by the underlying WSGI software.
wsgi.HttpProtocol.log_message = \
lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60)
eventlet.hubs.use_hub(get_hub())
eventlet.patcher.monkey_patch(all=False, socket=True)
eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no'))
eventlet.debug.hub_exceptions(eventlet_debug)
app = loadapp('config:%s' % conf_file,
global_conf={'log_name': log_name})
pool = GreenPool(size=1024)
try:
wsgi.server(sock, app, NullLogger(), custom_pool=pool)
except socket.error, err:
if err[0] != errno.EINVAL:
raise
pool.waitall()
worker_count = int(conf.get('workers', '1'))
# Useful for profiling [no forks].
if worker_count == 0:
run_server()
return
def kill_children(*args):
"""Kills the entire process group."""
logger.error('SIGTERM received')
signal.signal(signal.SIGTERM, signal.SIG_IGN)
running[0] = False
os.killpg(0, signal.SIGTERM)
def hup(*args):
"""Shuts down the server, but allows running requests to complete"""
logger.error('SIGHUP received')
signal.signal(signal.SIGHUP, signal.SIG_IGN)
running[0] = False
running = [True]
signal.signal(signal.SIGTERM, kill_children)
signal.signal(signal.SIGHUP, hup)
children = []
while running[0]:
while len(children) < worker_count:
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
run_server()
logger.notice('Child %d exiting normally' % os.getpid())
return
else:
logger.notice('Started child %s' % pid)
children.append(pid)
try:
pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
logger.error('Removing dead child %s' % pid)
children.remove(pid)
except OSError, err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
logger.notice('User quit')
break
greenio.shutdown_safe(sock)
sock.close()
logger.notice('Exited')
class ConfigFileError(Exception):
pass
def init_request_processor(conf_file, app_section, *args, **kwargs):
"""
Loads common settings from conf
Sets the logger
Loads the request processor
:param conf_file: Path to paste.deploy style configuration file
:param app_section: App name from conf file to load config from
:returns: the loaded application entry point
:raises ConfigFileError: Exception is raised for config file error
"""
try:
conf = appconfig('config:%s' % conf_file, name=app_section)
except Exception, e:
raise ConfigFileError("Error trying to load config %s: %s" %
(conf_file, e))
# pre-configure logger
log_name = conf.get('log_name', app_section)
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = get_logger(conf, log_name,
log_to_console=kwargs.pop('verbose', False),
log_route='wsgi')
# disable fallocate if desired
if config_true_value(conf.get('disable_fallocate', 'no')):
disable_fallocate()
monkey_patch_mimetools()
app = loadapp('config:%s' % conf_file, global_conf={'log_name': log_name})
return (app, conf, logger, log_name)
class WSGIContext(object):
"""
This class provides a means to provide context (scope) for a middleware
filter to have access to the wsgi start_response results like the request
status and headers.
"""
def __init__(self, wsgi_app):
self.app = wsgi_app
def _start_response(self, status, headers, exc_info=None):
"""
Saves response info without sending it to the remote client.
Uses the same semantics as the usual WSGI start_response.
"""
self._response_status = status
self._response_headers = headers
self._response_exc_info = exc_info
def _app_call(self, env):
"""
Ensures start_response has been called before returning.
"""
self._response_status = None
self._response_headers = None
self._response_exc_info = None
resp = self.app(env, self._start_response)
# if start_response has been called, just return the iter
if self._response_status is not None:
return resp
resp = iter(resp)
try:
first_chunk = resp.next()
except StopIteration:
return iter([])
else: # We got a first_chunk
return chain([first_chunk], resp)
def _get_status_int(self):
"""
Returns the HTTP status int from the last called self._start_response
result.
"""
return int(self._response_status.split(' ', 1)[0])
def _response_header_value(self, key):
"Returns str of value for given header key or None"
for h_key, val in self._response_headers:
if h_key.lower() == key.lower():
return val
return None
def make_pre_authed_request(env, method=None, path=None, body=None,
headers=None, agent='Gate', gate_source=None):
"""
Makes a new swob.Request based on the current env but with the
parameters specified. Note that this request will be preauthorized.
:param env: The WSGI environment to base the new request on.
:param method: HTTP method of new request; default is from
the original env.
:param path: HTTP path of new request; default is from the
original env. path should be compatible with what you
would send to Request.blank. path should be quoted and it
can include a query string. for example:
'/a%20space?unicode_str%E8%AA%9E=y%20es'
:param body: HTTP body of new request; empty by default.
:param headers: Extra HTTP headers of new request; None by
default.
:param agent: The HTTP user agent to use; default 'Gate'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param gate_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:returns: Fresh swob.Request object.
"""
query_string = None
if path and '?' in path:
path, query_string = path.split('?', 1)
newenv = make_pre_authed_env(env, method, path=unquote(path), agent=agent,
query_string=query_string,
gate_source=gate_source)
if not headers:
headers = {}
if body:
return Request.blank(path, environ=newenv, body=body, headers=headers)
else:
return Request.blank(path, environ=newenv, headers=headers)
def make_pre_authed_env(env, method=None, path=None, agent='Gate',
query_string=None, gate_source=None):
"""
Returns a new fresh WSGI environment with escalated privileges to
do backend checks, listings, etc. that the remote user wouldn't
be able to accomplish directly.
:param env: The WSGI environment to base the new environment on.
:param method: The new REQUEST_METHOD or None to use the
original.
:param path: The new path_info or none to use the original. path
should NOT be quoted. When building a url, a Webob
Request (in accordance with wsgi spec) will quote
env['PATH_INFO']. url += quote(environ['PATH_INFO'])
:param query_string: The new query_string or none to use the original.
When building a url, a Webob Request will append
the query string directly to the url.
url += '?' + env['QUERY_STRING']
:param agent: The HTTP user agent to use; default 'Gate'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param gate_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:returns: Fresh WSGI environment.
"""
newenv = {}
for name in ('eventlet.posthooks', 'HTTP_USER_AGENT', 'HTTP_HOST',
'PATH_INFO', 'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD',
'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT',
'SERVER_PROTOCOL', 'gate.cache', 'gate.source',
'gate.trans_id'):
if name in env:
newenv[name] = env[name]
if method:
newenv['REQUEST_METHOD'] = method
if path:
newenv['PATH_INFO'] = path
newenv['SCRIPT_NAME'] = ''
if query_string is not None:
newenv['QUERY_STRING'] = query_string
if agent:
newenv['HTTP_USER_AGENT'] = (
agent % {'orig': env.get('HTTP_USER_AGENT', '')}).strip()
elif agent == '' and 'HTTP_USER_AGENT' in newenv:
del newenv['HTTP_USER_AGENT']
if gate_source:
newenv['gate.source'] = gate_source
newenv['gate.authorize'] = lambda req: None
newenv['gate.authorize_override'] = True
newenv['REMOTE_USER'] = '.wsgi.pre_authed'
newenv['wsgi.input'] = StringIO('')
if 'SCRIPT_NAME' not in newenv:
newenv['SCRIPT_NAME'] = ''
return newenv
|
{
"content_hash": "fd4f654d06af5a3708f7e0006a1d47f1",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 78,
"avg_line_length": 38.62303664921466,
"alnum_prop": 0.6000406669377796,
"repo_name": "vindeka/gate",
"id": "f8c8e0c0cb9f5c9325fd11f8d26d1301f8f25c80",
"size": "15337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gate/common/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "325743"
}
],
"symlink_target": ""
}
|
from . import auth, config, crud, hgrid, hooks, widget, repos # noqa
|
{
"content_hash": "dde1d1c6686f66ff18efa455861b59b5",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 69,
"avg_line_length": 69,
"alnum_prop": 0.7101449275362319,
"repo_name": "AndrewSallans/osf.io",
"id": "2855280de2e9a200b50b96bb75b7dfcfd4b0fb41",
"size": "69",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/addons/github/views/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70439"
},
{
"name": "JavaScript",
"bytes": "2555546"
},
{
"name": "Python",
"bytes": "2159449"
}
],
"symlink_target": ""
}
|
import os
import six
def flatten_list(node, prefix=None):
for v in node:
for kid in flatten_dict(v, prefix):
yield kid
def flatten_dict(node, prefix=None):
"""
This is an iterator that returns a list of flattened env
vars based on the conf file supplied
"""
for k, v in six.iteritems(node):
if prefix:
k = '%s_%s' % (prefix, k)
# We have a value we can stringify
if not isinstance(v, (dict, list)):
yield (k, os.path.expandvars(str(v)))
else:
for kid in flatten(v, prefix=k):
yield kid
def flatten(node, prefix=None):
flat_func = flatten_dict
if isinstance(node, list):
flat_func = flatten_list
for kid in flat_func(node, prefix):
yield kid
|
{
"content_hash": "621339f1ff2330f47f5cfd9e53ef6846",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 60,
"avg_line_length": 23.676470588235293,
"alnum_prop": 0.5726708074534161,
"repo_name": "ionrock/withenv",
"id": "42fc9bf9708cf53cc8430750520ae948579d2f46",
"size": "805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "withenv/flatten.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1928"
},
{
"name": "Python",
"bytes": "17172"
}
],
"symlink_target": ""
}
|
import time
def main(request, response):
key = request.GET.first(b"key")
if request.method == u"POST":
# Received result data from target page
request.server.stash.put(key, request.body, u'/scroll-to-text-fragment/')
return u"ok"
else:
# Request for result data from test page
value = request.server.stash.take(key, u'/scroll-to-text-fragment/')
return value
|
{
"content_hash": "d9f602d2e66700432db60dead84c84f3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 32.38461538461539,
"alnum_prop": 0.6389548693586699,
"repo_name": "nwjs/chromium.src",
"id": "f66f32ad8dbcdd34dd4ff61d5983e889a741e007",
"size": "421",
"binary": false,
"copies": "23",
"ref": "refs/heads/nw70",
"path": "third_party/blink/web_tests/external/wpt/scroll-to-text-fragment/stash.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
WSGI config for core project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings.prod")
activate_this = os.path.join("/home/fassster/virtualenvs/prod", 'bin', 'activate_this.py')
with open(activate_this) as file_:
exec(file_.read(), dict(__file__=activate_this))
application = get_wsgi_application()
|
{
"content_hash": "9c16cf9aed8f7f7e886e73dc866eb135",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 90,
"avg_line_length": 28.55,
"alnum_prop": 0.7425569176882661,
"repo_name": "hadrianpaulo/project_deathstar",
"id": "fe65937b48c559a3a4bd17d7b601f3cb2b0cc622",
"size": "571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/core/wsgi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4814"
},
{
"name": "HTML",
"bytes": "17143607"
},
{
"name": "JavaScript",
"bytes": "1648"
},
{
"name": "Jupyter Notebook",
"bytes": "1524677"
},
{
"name": "Python",
"bytes": "44793"
},
{
"name": "Vue",
"bytes": "56575"
}
],
"symlink_target": ""
}
|
"""
VirtualBox Driver Modules
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
pyremotevbox = importutils.try_import('pyremotevbox')
if pyremotevbox:
from pyremotevbox import exception as virtualbox_exc
from pyremotevbox import vbox as virtualbox
IRONIC_TO_VIRTUALBOX_DEVICE_MAPPING = {
boot_devices.PXE: 'Network',
boot_devices.DISK: 'HardDisk',
boot_devices.CDROM: 'DVD',
}
VIRTUALBOX_TO_IRONIC_DEVICE_MAPPING = {
v: k for k, v in IRONIC_TO_VIRTUALBOX_DEVICE_MAPPING.items()}
VIRTUALBOX_TO_IRONIC_POWER_MAPPING = {
'PoweredOff': states.POWER_OFF,
'Running': states.POWER_ON,
'Error': states.ERROR
}
opts = [
cfg.PortOpt('port',
default=18083,
help=_('Port on which VirtualBox web service is listening.')),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='virtualbox')
LOG = logging.getLogger(__name__)
REQUIRED_PROPERTIES = {
'virtualbox_vmname': _("Name of the VM in VirtualBox. Required."),
'virtualbox_host': _("IP address or hostname of the VirtualBox host. "
"Required.")
}
OPTIONAL_PROPERTIES = {
'virtualbox_username': _("Username for the VirtualBox host. "
"Default value is ''. Optional."),
'virtualbox_password': _("Password for 'virtualbox_username'. "
"Default value is ''. Optional."),
'virtualbox_port': _("Port on which VirtualBox web service is listening. "
"Optional."),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
def _strip_virtualbox_from_param_name(param_name):
if param_name.startswith('virtualbox_'):
return param_name[11:]
else:
return param_name
def _parse_driver_info(node):
"""Gets the driver specific node driver info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver.
:param node: an Ironic Node object.
:returns: a dict containing information from driver_info (or where
applicable, config values).
:raises: MissingParameterValue, if some required parameter(s) are missing
in the node's driver_info.
:raises: InvalidParameterValue, if some parameter(s) have invalid value(s)
in the node's driver_info.
"""
info = node.driver_info
d_info = {}
missing_params = []
for param in REQUIRED_PROPERTIES:
try:
d_info_param_name = _strip_virtualbox_from_param_name(param)
d_info[d_info_param_name] = info[param]
except KeyError:
missing_params.append(param)
if missing_params:
msg = (_("The following parameters are missing in driver_info: %s") %
', '.join(missing_params))
raise exception.MissingParameterValue(msg)
for param in OPTIONAL_PROPERTIES:
if param in info:
d_info_param_name = _strip_virtualbox_from_param_name(param)
d_info[d_info_param_name] = info[param]
port = d_info.get('port', CONF.virtualbox.port)
d_info['port'] = utils.validate_network_port(port, 'virtualbox_port')
return d_info
def _run_virtualbox_method(node, ironic_method, vm_object_method,
*call_args, **call_kwargs):
"""Runs a method of pyremotevbox.vbox.VirtualMachine
This runs a method from pyremotevbox.vbox.VirtualMachine.
The VirtualMachine method to be invoked and the argument(s) to be
passed to it are to be provided.
:param node: an Ironic Node object.
:param ironic_method: the Ironic method which called
'_run_virtualbox_method'. This is used for logging only.
:param vm_object_method: The method on the VirtualMachine object
to be called.
:param call_args: The args to be passed to 'vm_object_method'.
:param call_kwargs: The kwargs to be passed to the 'vm_object_method'.
:returns: The value returned by 'vm_object_method'
:raises: VirtualBoxOperationFailed, if execution of 'vm_object_method'
failed.
:raises: InvalidParameterValue,
- if 'vm_object_method' is not a valid 'VirtualMachine' method.
- if some parameter(s) have invalid value(s) in the node's driver_info.
:raises: MissingParameterValue, if some required parameter(s) are missing
in the node's driver_info.
:raises: pyremotevbox.exception.VmInWrongPowerState, if operation cannot
be performed when vm is in the current power state.
"""
driver_info = _parse_driver_info(node)
try:
host = virtualbox.VirtualBoxHost(**driver_info)
vm_object = host.find_vm(driver_info['vmname'])
except virtualbox_exc.PyRemoteVBoxException as exc:
LOG.error(_LE("Failed while creating a VirtualMachine object for "
"node %(node_id)s. Error: %(error)s."),
{'node_id': node.uuid, 'error': exc})
raise exception.VirtualBoxOperationFailed(operation=vm_object_method,
error=exc)
try:
func = getattr(vm_object, vm_object_method)
except AttributeError:
error_msg = _("Invalid VirtualMachine method '%s' passed "
"to '_run_virtualbox_method'.")
raise exception.InvalidParameterValue(error_msg % vm_object_method)
try:
return func(*call_args, **call_kwargs)
except virtualbox_exc.PyRemoteVBoxException as exc:
error_msg = _LE("'%(ironic_method)s' failed for node %(node_id)s with "
"error: %(error)s.")
LOG.error(error_msg, {'ironic_method': ironic_method,
'node_id': node.uuid,
'error': exc})
raise exception.VirtualBoxOperationFailed(operation=vm_object_method,
error=exc)
class VirtualBoxPower(base.PowerInterface):
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check if node.driver_info contains the required credentials.
:param task: a TaskManager instance.
:raises: MissingParameterValue, if some required parameter(s) are
missing in the node's driver_info.
:raises: InvalidParameterValue, if some parameter(s) have invalid
value(s) in the node's driver_info.
"""
_parse_driver_info(task.node)
def _apply_boot_device(self, task):
"""Get the target boot device and apply on the baremetal machine .
:param task: a TaskManager instance.
"""
driver_internal_info = task.node.driver_internal_info
device = driver_internal_info.pop('vbox_target_boot_device', None)
if device is not None:
task.driver.management.set_boot_device(task, device)
task.node.driver_internal_info = driver_internal_info
task.node.save()
def get_power_state(self, task):
"""Gets the current power state.
:param task: a TaskManager instance.
:returns: one of :mod:`ironic.common.states`
:raises: MissingParameterValue, if some required parameter(s) are
missing in the node's driver_info.
:raises: InvalidParameterValue, if some parameter(s) have invalid
value(s) in the node's driver_info.
:raises: VirtualBoxOperationFailed, if error encountered from
VirtualBox operation.
"""
power_status = _run_virtualbox_method(task.node, 'get_power_state',
'get_power_status')
try:
return VIRTUALBOX_TO_IRONIC_POWER_MAPPING[power_status]
except KeyError:
msg = _LE("VirtualBox returned unknown state '%(state)s' for "
"node %(node)s")
LOG.error(msg, {'state': power_status, 'node': task.node.uuid})
return states.ERROR
@task_manager.require_exclusive_lock
def set_power_state(self, task, target_state):
"""Turn the current power state on or off.
:param task: a TaskManager instance.
:param target_state: The desired power state POWER_ON,POWER_OFF or
REBOOT from :mod:`ironic.common.states`.
:raises: MissingParameterValue, if some required parameter(s) are
missing in the node's driver_info.
:raises: InvalidParameterValue, if some parameter(s) have invalid
value(s) in the node's driver_info OR if an invalid power state
was specified.
:raises: VirtualBoxOperationFailed, if error encountered from
VirtualBox operation.
"""
# We set boot device before power on to avoid the case that user
# shuts down the machine without calling power off method here. For
# instance, soft power off the machine from OS.
if target_state == states.POWER_OFF:
_run_virtualbox_method(task.node, 'set_power_state', 'stop')
self._apply_boot_device(task)
elif target_state == states.POWER_ON:
self._apply_boot_device(task)
_run_virtualbox_method(task.node, 'set_power_state', 'start')
elif target_state == states.REBOOT:
self.reboot(task)
else:
msg = _("'set_power_state' called with invalid power "
"state '%s'") % target_state
raise exception.InvalidParameterValue(msg)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Reboot the node.
:param task: a TaskManager instance.
:raises: MissingParameterValue, if some required parameter(s) are
missing in the node's driver_info.
:raises: InvalidParameterValue, if some parameter(s) have invalid
value(s) in the node's driver_info.
:raises: VirtualBoxOperationFailed, if error encountered from
VirtualBox operation.
"""
_run_virtualbox_method(task.node, 'reboot', 'stop')
self._apply_boot_device(task)
_run_virtualbox_method(task.node, 'reboot', 'start')
class VirtualBoxManagement(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check that 'driver_info' contains required credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: MissingParameterValue, if some required parameter(s) are
missing in the node's driver_info.
:raises: InvalidParameterValue, if some parameter(s) have invalid
value(s) in the node's driver_info.
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self, task):
"""Get a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return list(IRONIC_TO_VIRTUALBOX_DEVICE_MAPPING.keys())
def _get_boot_device_from_hardware(self, task):
boot_dev = _run_virtualbox_method(task.node,
'get_boot_device', 'get_boot_device')
ironic_boot_dev = VIRTUALBOX_TO_IRONIC_DEVICE_MAPPING.get(boot_dev)
persistent = True
if not ironic_boot_dev:
persistent = None
msg = _LW("VirtualBox returned unknown boot "
"device '%(device)s' for node %(node)s")
LOG.warning(msg, {'device': boot_dev, 'node': task.node.uuid})
return (ironic_boot_dev, persistent)
def get_boot_device(self, task):
"""Get the current boot device for a node.
:param task: a task from TaskManager.
:returns: a dictionary containing:
'boot_device': one of the ironic.common.boot_devices or None
'persistent': True if boot device is persistent, False otherwise
:raises: MissingParameterValue, if some required parameter(s) are
missing in the node's driver_info.
:raises: InvalidParameterValue, if some parameter(s) have invalid
value(s) in the node's driver_info.
:raises: VirtualBoxOperationFailed, if error encountered from
VirtualBox operation.
"""
if task.driver.power.get_power_state(task) == states.POWER_OFF:
ironic_boot_dev, persistent = \
self._get_boot_device_from_hardware(task)
else:
ironic_boot_dev = task.node. \
driver_internal_info.get('vbox_target_boot_device')
if ironic_boot_dev is not None:
msg = _LW("As ironic node %(node)s is"
" powered on, we will set to boot"
" from %(device)s before next boot.")
LOG.warning(msg, {'node': task.node.uuid,
'device': ironic_boot_dev})
persistent = True
else:
# Maybe the vbox_target_boot_device is cleaned
ironic_boot_dev, persistent = \
self._get_boot_device_from_hardware(task)
return {'boot_device': ironic_boot_dev, 'persistent': persistent}
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for a node.
:param task: a task from TaskManager.
:param device: ironic.common.boot_devices
:param persistent: This argument is ignored as VirtualBox support only
persistent boot devices.
:raises: MissingParameterValue, if some required parameter(s) are
missing in the node's driver_info.
:raises: InvalidParameterValue, if some parameter(s) have invalid
value(s) in the node's driver_info.
:raises: VirtualBoxOperationFailed, if error encountered from
VirtualBox operation.
"""
# NOTE(rameshg87): VirtualBox has only persistent boot devices.
try:
boot_dev = IRONIC_TO_VIRTUALBOX_DEVICE_MAPPING[device]
except KeyError:
raise exception.InvalidParameterValue(
_("Invalid boot device %s specified.") % device)
if task.driver.power.get_power_state(task) == states.POWER_OFF:
_run_virtualbox_method(task.node, 'set_boot_device',
'set_boot_device', boot_dev)
else:
LOG.warning(_LW('Node %(node_uuid)s: As VirtualBox do not support '
'setting boot device when VM is powered on, we '
'will set booting from %(device)s when reboot '
'next time.'),
{'node_uuid': task.node.uuid, 'device': device})
# We should store target boot device in case the
# end user shutoff the baremetal machine from NOVA API.
boot_device_now = self.get_boot_device(task)['boot_device']
if device != boot_device_now:
driver_internal_info = task.node.driver_internal_info
driver_internal_info['vbox_target_boot_device'] = device
task.node.driver_internal_info = driver_internal_info
task.node.save()
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:returns: returns a consistent format dict of sensor data grouped by
sensor type, which can be processed by Ceilometer.
"""
raise NotImplementedError()
|
{
"content_hash": "bfa92a6d3bdc965cdf0ab90dc20f50be",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 79,
"avg_line_length": 41.710997442455245,
"alnum_prop": 0.6223557544913851,
"repo_name": "devananda/ironic",
"id": "b58319d3c6e12b754531c2642fce6985b81c5666",
"size": "16855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/drivers/modules/virtualbox.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3354566"
}
],
"symlink_target": ""
}
|
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_csgraph', [dirname(__file__)])
except ImportError:
import _csgraph
return _csgraph
if fp is not None:
try:
_mod = imp.load_module('_csgraph', fp, pathname, description)
finally:
fp.close()
return _mod
_csgraph = swig_import_helper()
del swig_import_helper
else:
import _csgraph
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def cs_graph_components(*args):
"""cs_graph_components(int n_nod, int Ap, int Aj, int flag) -> int"""
return _csgraph.cs_graph_components(*args)
|
{
"content_hash": "96e2f5f4ffe40389cfbf84bf20c7a27b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 90,
"avg_line_length": 30.686567164179106,
"alnum_prop": 0.6021400778210116,
"repo_name": "ominux/scikit-learn",
"id": "4efad4c6b66d68759719827dd5025bcc345e912c",
"size": "2338",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sklearn/utils/sparsetools/csgraph.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "455969"
},
{
"name": "C++",
"bytes": "240380"
},
{
"name": "Makefile",
"bytes": "1411"
},
{
"name": "Python",
"bytes": "2064853"
},
{
"name": "Shell",
"bytes": "486"
}
],
"symlink_target": ""
}
|
"""Test the lander.ext.parser.discovery module."""
from lander.ext.parser._discovery import ParsingPlugins
from lander.parsers.article import ArticleParser
def test_discovery() -> None:
plugins = ParsingPlugins.load_plugins()
assert "article" in plugins.names
assert plugins["article"] == ArticleParser
|
{
"content_hash": "aeb14fd9eda5e290fbe9f87fc62b4f02",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 55,
"avg_line_length": 29,
"alnum_prop": 0.7523510971786834,
"repo_name": "lsst-sqre/lander",
"id": "98d69f551df8721973e9e78d2e3983b8888e4221",
"size": "319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ext_parser_discovery_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2183"
},
{
"name": "Jinja",
"bytes": "4630"
},
{
"name": "Makefile",
"bytes": "527"
},
{
"name": "Python",
"bytes": "117079"
},
{
"name": "TeX",
"bytes": "32126"
}
],
"symlink_target": ""
}
|
# Copyright 2015 Dejan D. M. Milosavljevic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [ 'Options', 'Settings', 'Atom', 'Environment', 'Config', 'Translator', 'Custom', 'component' ]
__name__ = 'nucleotide'
__author__ = 'I'
__developer__ = 'I'
__status__ = 'production'
__version__ = '0.0.0.0'
__date__ = '20:21 Friday, 02 October, 2015'
__revision__ = ''
__build__ = ''
__buildsys__ = ''
from .config import *
from .translator import *
from .klass import *
from .custom import *
from .atom import *
from .options import *
from .catalog import *
from .settings import *
from .environment import *
#from .main import *
|
{
"content_hash": "7a1d5ad9cf03890ac60ff249600279dd",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 111,
"avg_line_length": 28.866666666666667,
"alnum_prop": 0.6058506543494996,
"repo_name": "dmilos/nucleotide",
"id": "c446e396e424b6994df9340bc4c274742dc93773",
"size": "1323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nucleotide/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3225"
},
{
"name": "C++",
"bytes": "7281"
},
{
"name": "Python",
"bytes": "250438"
},
{
"name": "Shell",
"bytes": "973"
}
],
"symlink_target": ""
}
|
"""
Firewall configuration plugin for AIO.
"""
import gettext
from otopi import plugin, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""
Firewall configuration plugin for AIO
"""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
after=(
osetupcons.Stages.NET_FIREWALL_MANAGER_AVAILABLE,
oenginecons.Stages.AIO_CONFIG_AVAILABLE,
),
# must be run before firewall_manager plugin
condition=lambda self: self.environment[oenginecons.AIOEnv.CONFIGURE],
# must be always enabled to create examples
)
def _configuration(self):
self.environment[osetupcons.NetEnv.FIREWALLD_SERVICES].append(
{
'name': 'ovirt-aio',
'directory': 'aio'
}
)
# vim: expandtab tabstop=4 shiftwidth=4
|
{
"content_hash": "dfe1eae487a867d1f5e5b026ca8667d6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 25.377777777777776,
"alnum_prop": 0.6453590192644484,
"repo_name": "eayun/ovirt-engine",
"id": "bc974a29009731652254afd8012c857127b31d86",
"size": "1777",
"binary": false,
"copies": "6",
"ref": "refs/heads/eayunos-4.2",
"path": "packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/all-in-one/firewall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69586"
},
{
"name": "HTML",
"bytes": "16218"
},
{
"name": "Java",
"bytes": "35074047"
},
{
"name": "JavaScript",
"bytes": "69948"
},
{
"name": "Makefile",
"bytes": "24723"
},
{
"name": "PLSQL",
"bytes": "1101"
},
{
"name": "PLpgSQL",
"bytes": "796728"
},
{
"name": "Python",
"bytes": "970860"
},
{
"name": "Roff",
"bytes": "10764"
},
{
"name": "Shell",
"bytes": "163853"
},
{
"name": "XSLT",
"bytes": "54683"
}
],
"symlink_target": ""
}
|
class XMLRPCHandler():
pass
|
{
"content_hash": "10506b7c3df0f86c56d19ef25aface72",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 22,
"avg_line_length": 15,
"alnum_prop": 0.7333333333333333,
"repo_name": "briansan/rams",
"id": "4ae6a274d0f1b2c8b64e407ed081d5436265ed80",
"size": "131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RAMS/comm/XMLRPCHandler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24617"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0005_auto_20170306_0419'),
]
operations = [
migrations.AddField(
model_name='post',
name='member_Name',
field=models.CharField(default=b'Nill', editable=False, max_length=140, null=True),
),
]
|
{
"content_hash": "ce6772780ac85b51ed15ad46c87a6418",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 95,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.607981220657277,
"repo_name": "R-Wolf/CFD_A_library",
"id": "76d9000a17fa80395abb728b9cc24033f746109d",
"size": "499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalogue/migrations/0006_post_member_name.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "20896"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "48870"
}
],
"symlink_target": ""
}
|
import re
text = 'This is some text -- with punctuation.'
pattern = 'is'
print('Text :', text)
print('Pattern :', pattern)
m = re.search(pattern, text)
print('Search :', m)
s = re.fullmatch(pattern, text)
print('Full match :', s)
|
{
"content_hash": "7343d9d03f0605bd8cbba3c3ccbe6d6c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 47,
"avg_line_length": 19,
"alnum_prop": 0.6153846153846154,
"repo_name": "jasonwee/asus-rt-n14uhp-mrtg",
"id": "ec2074dcc483e68b959fce635cf6e03e0462833d",
"size": "247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lesson_text/re_fullmatch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45876"
},
{
"name": "HTML",
"bytes": "107072"
},
{
"name": "JavaScript",
"bytes": "161335"
},
{
"name": "Python",
"bytes": "6923750"
},
{
"name": "Shell",
"bytes": "7616"
}
],
"symlink_target": ""
}
|
import unittest
from mlabns.db import model
from mlabns.db import client_signature_fetcher
from mlabns.db import sliver_tool_fetcher
from mlabns.util import lookup_query
from mlabns.util import message
from mlabns.util import resolver
import mock
def _createSliverTool(tool_id,
site_id=None,
status_ipv4=None,
status_ipv6=None,
latitude=None,
longitude=None,
country=None):
tool = model.SliverTool()
tool.tool_id = tool_id
tool.site_id = site_id
tool.status_ipv4 = status_ipv4
tool.status_ipv6 = status_ipv6
tool.latitude = latitude
tool.longitude = longitude
tool.country = country
return tool
_TOOL_ID = 'valid_tool_id'
class ResolverTestCaseBase(unittest.TestCase):
"""Base class for common assertions among all ResolverTest classes."""
def assertQueryResultSingleTool(self, query, mock_fetch_results,
result_tool_expected,
tool_properties_expected):
"""Assert that the resolver result matches expected values.
Assert that calling resolver.answer_query returns a list with a single
tool and that resolver fetched tools from the db using the correct
criteria.
Args:
query: LookupQuery instance based on the client's query.
mock_fetch_results: Mock results from querying the db.
result_tool_expected: The expected winning tool that the resolver
returns.
tool_properties_expected: Expected tool properties that resolver
used to retrieve tools from the db.
"""
mock_fetch = sliver_tool_fetcher.SliverToolFetcher().fetch
mock_fetch.return_value = mock_fetch_results
query_results_expected = [result_tool_expected]
query_results_actual = self.resolver.answer_query(query)
self.assertSequenceEqual(query_results_expected, query_results_actual)
mock_fetch.assert_called_with(tool_properties_expected)
def assertQueryResultMultiTool(self, query, mock_fetch_results,
query_results_expected,
tool_properties_expected):
"""Assert that the resolver result matches expected values.
Assert that calling resolver.answer_query returns a list with multiple
tools and that resolver fetched tools from the db using the correct
criteria.
Args:
query: LookupQuery instance based on the client's query.
mock_fetch_results: Mock results from querying the db.
query_results_expected: The expected winning tools that the resolver
returns.
tool_properties_expected: Expected tool properties that resolver
used to retrieve tools from the db.
"""
mock_fetch = sliver_tool_fetcher.SliverToolFetcher().fetch
mock_fetch.return_value = mock_fetch_results
query_results_actual = self.resolver.answer_query(query)
self.assertSetEqual(
set(query_results_expected), set(query_results_actual))
mock_fetch.assert_called_with(tool_properties_expected)
def assertQueryResultSingleToolWithRandomChoice(
self, query, mock_fetch_results, filtered_tool_candidates,
tool_properties_expected):
"""Assert that the resolver result matches expected values.
Assert that calling resolver.answer_query finds a list of tool
candidates to return and then randomly selects a single tool as the
winner. Also asserts that the resolver fetched tools from the db using
the correct criteria.
Args:
query: LookupQuery instance based on the client's query.
mock_fetch_results: Mock results from querying the db.
filtered_tool_candidates: The expected candidate tools from which
the resolver will randomly pick a winner.
tool_properties_expected: Expected tool properties that resolver
used to retrieve tools from the db.
"""
mock_fetch = sliver_tool_fetcher.SliverToolFetcher().fetch
mock_fetch.return_value = mock_fetch_results
# Mock out random behavior to allow deterministic test results
with mock.patch('random.choice') as mock_random:
random_winner_index = 0
mock_random.side_effect = lambda x: x[random_winner_index]
query_results_expected = [
filtered_tool_candidates[random_winner_index]
]
query_results_actual = self.resolver.answer_query(query)
self.assertSequenceEqual(query_results_expected,
query_results_actual)
# Make sure that the random selection was between the expected
# candidate tools, after any filtering
self.assertSequenceEqual(filtered_tool_candidates,
mock_random.call_args[0][0])
mock_fetch.assert_called_with(tool_properties_expected)
def assertQueryResultMultiToolWithRandomSample(
self, query, mock_fetch_results, filtered_tool_candidates,
sample_size, tool_properties_expected):
"""Assert that the resolver result matches expected values.
Assert that calling resolver.answer_query finds a list of tool
candidates to return and then randomly selects a single tool as the
winner. Also asserts that the resolver fetched tools from the db using
the correct criteria.
Args:
query: LookupQuery instance based on the client's query.
mock_fetch_results: Mock results from querying the db.
filtered_tool_candidates: The expected candidate tools from which
the resolver will randomly pick a winner.
sample_size: The number of randomly selected elements expected in
the final result.
tool_properties_expected: Expected tool properties that resolver
used to retrieve tools from the db.
"""
mock_fetch = sliver_tool_fetcher.SliverToolFetcher().fetch
mock_fetch.return_value = mock_fetch_results
# Mock out random behavior to allow deterministic test results
with mock.patch('random.sample') as mock_random:
# Make random.sample yield the k last elements of the set
mock_random.side_effect = lambda x, k: x[-k:]
query_results_expected = filtered_tool_candidates[-sample_size:]
query_results_actual = self.resolver.answer_query(query)
self.assertSequenceEqual(query_results_expected,
query_results_actual)
# Make sure that the random selection was between the expected
# candidate tools, after any filtering
self.assertSequenceEqual(filtered_tool_candidates,
mock_random.call_args[0][0])
mock_fetch.assert_called_with(tool_properties_expected)
def assertQueryResultWithRandomShuffle(self, query, mock_fetch_results,
query_results_expected,
tool_properties_expected):
"""Assert that the resolver result matches expected values.
Assert that calling resolver.answer_query finds a list of tool
candidates to return and then randomly selects a subset of those tools
as the winners. Also asserts that the resolver fetched tools from the db
using the correct criteria.
Args:
query: LookupQuery instance based on the client's query.
mock_fetch_results: Mock results from querying the db.
query_results_expected: Expected results from calling
resolver.answer_query().
tool_properties_expected: Expected tool properties that resolver
used to retrieve tools from the db.
"""
mock_fetch = sliver_tool_fetcher.SliverToolFetcher().fetch
mock_fetch.return_value = mock_fetch_results
# Mock out random behavior to allow deterministic test results
with mock.patch('random.shuffle') as mock_shuffle:
# Change the random shuffle to a deterministic list reverse
mock_shuffle.side_effect = lambda x: x.reverse()
query_results_actual = self.resolver.answer_query(query)
self.assertSetEqual(
set(query_results_expected), set(query_results_actual))
mock_fetch.assert_called_with(tool_properties_expected)
class AllResolverTestCase(ResolverTestCaseBase):
def setUp(self):
sliver_tool_fetcher_patch = mock.patch.object(sliver_tool_fetcher,
'SliverToolFetcher',
autospec=True)
self.addCleanup(sliver_tool_fetcher_patch.stop)
sliver_tool_fetcher_patch.start()
self.resolver = resolver.AllResolver()
def testAnswerQueryWhenMatchingToolsExist(self):
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
mock_fetched_tools = [_createSliverTool(_TOOL_ID),
_createSliverTool(_TOOL_ID)]
# AllResolver should not do any additional filtering on the tools it
# fetched.
query_results_expected = mock_fetched_tools
# Make sure the resolver is fetching only online tools that match the
# specified tool ID.
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID, status=message.STATUS_ONLINE)
self.assertQueryResultMultiTool(query, mock_fetched_tools,
query_results_expected,
tool_properties_expected)
def testAnswerQueryWhenMatchingToolsExistAndQuerySpecifiesAf(self):
"""Resolver should take into account address family when specified."""
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
query.tool_address_family = message.ADDRESS_FAMILY_IPv6
mock_fetched_tools = [_createSliverTool(_TOOL_ID),
_createSliverTool(_TOOL_ID)]
# AllResolver should not do any additional filtering on the tools it
# fetched.
query_results_expected = mock_fetched_tools
# Make sure the resolver is fetching only tools with IPv6 interface
# online that match the specified tool ID.
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID,
address_family=message.ADDRESS_FAMILY_IPv6,
status=message.STATUS_ONLINE)
self.assertQueryResultMultiTool(query, mock_fetched_tools,
query_results_expected,
tool_properties_expected)
def testAnswerQueryWhenNoToolsMatchToolId(self):
tool_id = 'non_existent_tool'
query = lookup_query.LookupQuery()
query.tool_id = tool_id
# Simulate no matching tools
sliver_tool_fetcher.SliverToolFetcher().fetch.return_value = []
query_results = self.resolver.answer_query(query)
# Result should be None when there are no matches.
self.assertIsNone(query_results)
class GeoResolverTestCase(ResolverTestCaseBase):
def setUp(self):
sliver_tool_fetcher_patch = mock.patch.object(sliver_tool_fetcher,
'SliverToolFetcher',
autospec=True)
self.addCleanup(sliver_tool_fetcher_patch.stop)
sliver_tool_fetcher_patch.start()
self.resolver = resolver.GeoResolver()
client_signature_fetcher_patch = mock.patch.object(
client_signature_fetcher,
'ClientSignatureFetcher',
autospec=True)
self.addCleanup(client_signature_fetcher_patch.stop)
client_signature_fetcher_patch.start()
def testAnswerQueryWhenSingleToolIsClosest(self):
"""When a single tool is closest, return that tool."""
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
query.latitude = 0.0
query.longitude = 0.0
close_tool = _createSliverTool(_TOOL_ID,
site_id='abc01',
latitude=1.0,
longitude=1.0)
far_tool = _createSliverTool(_TOOL_ID,
site_id='cba01',
latitude=5.0,
longitude=5.0)
# Make sure the resolver is fetching only online tools that match the
# specified tool ID.
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID, status=message.STATUS_ONLINE)
mock_fetched_tools = [close_tool, far_tool]
client_signature_fetcher.ClientSignatureFetcher(
).fetch.return_value = 1.0
self.assertQueryResultSingleTool(query, mock_fetched_tools, close_tool,
tool_properties_expected)
def testAnswerQueryWhenSingleToolIsClosestAndQuerySpecifiesAf(self):
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
query.latitude = 0.0
query.longitude = 0.0
query.tool_address_family = message.ADDRESS_FAMILY_IPv4
close_tool = _createSliverTool(_TOOL_ID,
site_id='abc01',
latitude=1.0,
longitude=1.0)
far_tool = _createSliverTool(_TOOL_ID,
site_id='cba01',
latitude=5.0,
longitude=5.0)
# Make sure the resolver is fetching only online tools that match the
# specified tool ID.
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID,
address_family=message.ADDRESS_FAMILY_IPv4,
status=message.STATUS_ONLINE)
mock_fetched_tools = [close_tool, far_tool]
client_signature_fetcher.ClientSignatureFetcher(
).fetch.return_value = 1.0
self.assertQueryResultSingleTool(query, mock_fetched_tools, close_tool,
tool_properties_expected)
def testAnswerQueryWhenMultipleToolsAreEquallyClose(self):
"""When multiple tools are equally closest, randomly select one."""
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
query.latitude = 0.0
query.longitude = 0.0
equidistant_tools = (_createSliverTool(_TOOL_ID,
site_id='aaa01',
latitude=1.0,
longitude=5.0),
_createSliverTool(_TOOL_ID,
site_id='bbb01',
latitude=5.0,
longitude=1.0))
mock_fetched_tools = [
_createSliverTool(_TOOL_ID,
site_id='ccc01',
latitude=10.0,
longitude=10.0),
_createSliverTool(_TOOL_ID,
site_id='ddd01',
latitude=20.0,
longitude=20.0)
]
mock_fetched_tools.extend(equidistant_tools)
query_results_expected = [equidistant_tools[-1]]
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID, status=message.STATUS_ONLINE)
client_signature_fetcher.ClientSignatureFetcher(
).fetch.return_value = 1.0
self.assertQueryResultWithRandomShuffle(query, mock_fetched_tools,
query_results_expected,
tool_properties_expected)
def testAnswerQueryWhenNoToolsMatchToolId(self):
tool_id = 'non_existent_tool'
query = lookup_query.LookupQuery()
query.tool_id = tool_id
# Simulate no matching tools
sliver_tool_fetcher.SliverToolFetcher().fetch.return_value = []
# Result should be None when there are no matches.
self.assertIsNone(self.resolver.answer_query(query))
def testAnswerQueryReturnsRandomToolWhenQueryIsMissingLatLon(self):
# TODO(mtlynch): This behavior is confusing because it is inconsistent
# with the other resolvers that return None when required attributes are
# missing from the query. Change so that all are consistent.
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
mock_fetched_tools = [
_createSliverTool(
_TOOL_ID, site_id='abc01', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='cba01', latitude=5.0, longitude=5.0)
] # yapf: disable
# When lat/lon is missing, resolver performs no additional filtering
# after fetch
filtered_tools_expected = mock_fetched_tools
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID, status=message.STATUS_ONLINE)
self.assertQueryResultMultiToolWithRandomSample(
query, mock_fetched_tools, filtered_tools_expected, 1,
tool_properties_expected)
class GeoResolverWithOptionsTestCase(ResolverTestCaseBase):
def setUp(self):
sliver_tool_fetcher_patch = mock.patch.object(sliver_tool_fetcher,
'SliverToolFetcher',
autospec=True)
self.addCleanup(sliver_tool_fetcher_patch.stop)
sliver_tool_fetcher_patch.start()
self.resolver = resolver.GeoResolverWithOptions()
# Allow full diff output on test failures
self.maxDiff = None
client_signature_fetcher_patch = mock.patch.object(
client_signature_fetcher,
'ClientSignatureFetcher',
autospec=True)
self.addCleanup(client_signature_fetcher_patch.stop)
client_signature_fetcher_patch.start()
def testAnswerQueryWhenFourToolsAreEquallyClosest(self):
"""When exactly four tools tie for closest, return those four."""
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
query.latitude = 0.0
query.longitude = 0.0
mock_fetched_tools = [
_createSliverTool(
_TOOL_ID, site_id='abc01', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc02', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc03', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc04', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='cba01', latitude=5.0, longitude=5.0)
] # yapf: disable
# Result should be the four closest tools
query_results_expected = mock_fetched_tools[:4]
# Make sure the resolver is fetching only online tools that match the
# specified tool ID.
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID, status=message.STATUS_ONLINE)
client_signature_fetcher.ClientSignatureFetcher(
).fetch.return_value = 1.0
self.assertQueryResultMultiTool(query, mock_fetched_tools,
query_results_expected,
tool_properties_expected)
def testAnswerQueryWhenMoreThanFourToolsAreEquallyClosest(self):
"""When more than four tools tie for closest, randomly select four."""
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
query.latitude = 0.0
query.longitude = 0.0
mock_fetched_tools = [
_createSliverTool(
_TOOL_ID, site_id='abc01', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc02', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc03', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc04', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc05', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc06', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='cba01', latitude=5.0, longitude=5.0)
] # yapf: disable
# The mock shuffle reverses the list, so we expect items 2...6 in
# reverse order.
query_results_expected = mock_fetched_tools[-2:-6:-1]
# Make sure the resolver is fetching only online tools that match the
# specified tool ID.
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID, status=message.STATUS_ONLINE)
client_signature_fetcher.ClientSignatureFetcher(
).fetch.return_value = 1.0
self.assertQueryResultWithRandomShuffle(query, mock_fetched_tools,
query_results_expected,
tool_properties_expected)
def testAnswerQueryWhenMoreThanFourToolsFromDifferentSitesAreEquallyClosest(
self):
"""When more than four tools tie for closest, randomly select four."""
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
query.latitude = 0.0
query.longitude = 0.0
mock_fetched_tools = [
_createSliverTool(
_TOOL_ID, site_id='aaa01', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='bbb01', latitude=-1.0, longitude=-1.0),
_createSliverTool(
_TOOL_ID, site_id='ccc01', latitude=-1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='ddd01', latitude=1.0, longitude=-1.0),
_createSliverTool(
_TOOL_ID, site_id='eee01', latitude=-1.0, longitude=-1.0),
_createSliverTool(
_TOOL_ID, site_id='fff01', latitude=-1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='ggg01', latitude=5.0, longitude=5.0)
] # yapf: disable
# The mock shuffle reverses the list, so we expect items 2...6 in
# reverse order.
query_results_expected = mock_fetched_tools[-2:-6:-1]
# Make sure the resolver is fetching only online tools that match the
# specified tool ID.
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID, status=message.STATUS_ONLINE)
client_signature_fetcher.ClientSignatureFetcher(
).fetch.return_value = 1.0
self.assertQueryResultWithRandomShuffle(query, mock_fetched_tools,
query_results_expected,
tool_properties_expected)
def testAnswerQueryWhenFewerThanFourToolsMatch(self):
"""When fewer than four tools match, return whatever matches."""
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
query.latitude = 0.0
query.longitude = 0.0
mock_fetched_tools = [
_createSliverTool(
_TOOL_ID, site_id='abc01', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc02', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='cba01', latitude=5.0, longitude=5.0)
] # yapf: disable
query_results_expected = mock_fetched_tools
# Make sure the resolver is fetching only online tools that match the
# specified tool ID.
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID, status=message.STATUS_ONLINE)
client_signature_fetcher.ClientSignatureFetcher(
).fetch.return_value = 1.0
self.assertQueryResultMultiTool(query, mock_fetched_tools,
query_results_expected,
tool_properties_expected)
def testAnswerQueryWhenNoToolsMatchToolId(self):
tool_id = 'non_existent_tool'
query = lookup_query.LookupQuery()
query.tool_id = tool_id
# Simulate no matching tools
sliver_tool_fetcher.SliverToolFetcher().fetch.return_value = []
client_signature_fetcher.ClientSignatureFetcher(
).fetch.return_value = 1.0
# Result should be None when there are no matches.
self.assertIsNone(self.resolver.answer_query(query))
def testAnswerQueryReturnsRandomSubsetWhenQueryIsMissingLatLon(self):
"""When lat/lon is missing, expect a random subset of tools."""
# TODO(mtlynch): This behavior is confusing because it is inconsistent
# with the other resolvers that return None when required attributes are
# missing from the query. Change so that all are consistent.
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
mock_fetched_tools = [
_createSliverTool(
_TOOL_ID, site_id='abc01', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc02', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc03', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc04', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='cba01', latitude=5.0, longitude=5.0)
] # yapf: disable
# When lat/lon is missing, resolver performs no additional filtering
# after fetch
filtered_tools_expected = mock_fetched_tools
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID, status=message.STATUS_ONLINE)
self.assertQueryResultMultiToolWithRandomSample(
query, mock_fetched_tools, filtered_tools_expected, 4,
tool_properties_expected)
def testAnswerQueryReturnsRandomSubsetWhenQueryIsMissingLatLonLowCandidates(
self):
"""When lat/lon is missing, expect a random subset of tools.
If the number of matching candidates is lower than the number of tools
requested, return all the matching candidates.
"""
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
mock_fetched_tools = [
_createSliverTool(
_TOOL_ID, site_id='abc01', latitude=1.0, longitude=1.0),
_createSliverTool(
_TOOL_ID, site_id='abc02', latitude=1.0, longitude=1.0)
] # yapf: disable
# When lat/lon is missing, resolver performs no additional filtering
# after fetch
filtered_tools_expected = mock_fetched_tools
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID, status=message.STATUS_ONLINE)
# Normally we expect a random sample of 4, but there are only 2
# candidates in the set
self.assertQueryResultMultiToolWithRandomSample(
query, mock_fetched_tools, filtered_tools_expected, 2,
tool_properties_expected)
class RandomResolverTestCase(ResolverTestCaseBase):
def setUp(self):
sliver_tool_fetcher_patch = mock.patch.object(sliver_tool_fetcher,
'SliverToolFetcher',
autospec=True)
self.addCleanup(sliver_tool_fetcher_patch.stop)
sliver_tool_fetcher_patch.start()
self.resolver = resolver.RandomResolver()
def testAnswerQueryChoosesRandomlyAmongOnlineTools(self):
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
query.tool_address_family = message.ADDRESS_FAMILY_IPv6
mock_fetched_tools = (_createSliverTool(_TOOL_ID,
site_id='aaa01'),
_createSliverTool(_TOOL_ID,
site_id='bbb01'),
_createSliverTool(_TOOL_ID,
site_id='ccc01'),
_createSliverTool(_TOOL_ID,
site_id='ddd01'))
# Random resolver performs no additional filtering after the fetch.
filtered_tools_expected = mock_fetched_tools
# Make sure the resolver is fetching only online tools that match the
# specified tool ID.
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID,
address_family=message.ADDRESS_FAMILY_IPv6,
status=message.STATUS_ONLINE)
self.assertQueryResultSingleToolWithRandomChoice(
query, mock_fetched_tools, filtered_tools_expected,
tool_properties_expected)
def testAnswerQueryWhenNoToolsMatchToolId(self):
tool_id = 'non_existent_tool'
query = lookup_query.LookupQuery()
query.tool_id = tool_id
# Simulate no matching tools
sliver_tool_fetcher.SliverToolFetcher().fetch.return_value = []
# Result should be None when there are no matches.
self.assertIsNone(self.resolver.answer_query(query))
class MetroResolverTestCase(ResolverTestCaseBase):
def setUp(self):
sliver_tool_fetcher_patch = mock.patch.object(sliver_tool_fetcher,
'SliverToolFetcher',
autospec=True)
self.addCleanup(sliver_tool_fetcher_patch.stop)
sliver_tool_fetcher_patch.start()
self.resolver = resolver.MetroResolver()
def testAnswerReturnsNoneWhenMetroIsNotSpecified(self):
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
# query omits country attribute
candidate_tools = (_createSliverTool(_TOOL_ID),
_createSliverTool(_TOOL_ID))
mock_fetch = sliver_tool_fetcher.SliverToolFetcher().fetch
mock_fetch.return_value = candidate_tools
query_results = self.resolver.answer_query(query)
# Result should be None when there are no matches.
self.assertIsNone(query_results)
def testAnswerQueryChoosesRandomlyAmongToolsInMetro(self):
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
query.metro = 'aaa'
query.tool_address_family = message.ADDRESS_FAMILY_IPv4
mock_fetched_tools = (_createSliverTool(_TOOL_ID,
site_id='aaa01'),
_createSliverTool(_TOOL_ID,
site_id='aaa02'),
_createSliverTool(_TOOL_ID,
site_id='aaa03'))
filtered_tools_expected = mock_fetched_tools
# Make sure the resolver is fetching only online tools that match the
# specified tool ID in the specified metro.
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID,
status=message.STATUS_ONLINE,
address_family=message.ADDRESS_FAMILY_IPv4,
metro=query.metro)
self.assertQueryResultSingleToolWithRandomChoice(
query, mock_fetched_tools, filtered_tools_expected,
tool_properties_expected)
class CountryResolverTestCase(ResolverTestCaseBase):
def setUp(self):
sliver_tool_fetcher_patch = mock.patch.object(sliver_tool_fetcher,
'SliverToolFetcher',
autospec=True)
self.addCleanup(sliver_tool_fetcher_patch.stop)
sliver_tool_fetcher_patch.start()
self.resolver = resolver.CountryResolver()
def testAnswerReturnsNoneWhenCountryIsNotSpecified(self):
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
# query omits country attribute
candidate_tools = (_createSliverTool(_TOOL_ID),
_createSliverTool(_TOOL_ID))
mock_fetch = sliver_tool_fetcher.SliverToolFetcher().fetch
mock_fetch.return_value = candidate_tools
query_results = self.resolver.answer_query(query)
# Result should be None when there are no matches.
self.assertIsNone(query_results)
def testAnswerQueryChoosesRandomlyAmongToolsInCountry(self):
country = 'valid_country'
query = lookup_query.LookupQuery()
query.tool_id = _TOOL_ID
query.tool_address_family = message.ADDRESS_FAMILY_IPv4
query.country = country
mock_fetched_tools = (_createSliverTool(_TOOL_ID,
site_id='aaa01'),
_createSliverTool(_TOOL_ID,
site_id='bbb01'),
_createSliverTool(_TOOL_ID,
site_id='ccc01'))
filtered_tools_expected = mock_fetched_tools
# Make sure the resolver is fetching only online tools that match the
# specified tool ID in the specified country.
tool_properties_expected = sliver_tool_fetcher.ToolProperties(
tool_id=_TOOL_ID,
status=message.STATUS_ONLINE,
address_family=message.ADDRESS_FAMILY_IPv4,
country=country)
self.assertQueryResultSingleToolWithRandomChoice(
query, mock_fetched_tools, filtered_tools_expected,
tool_properties_expected)
class ResolverTestCase(unittest.TestCase):
def testNewResolver(self):
self.assertIsInstance(
resolver.new_resolver(message.POLICY_GEO), resolver.GeoResolver)
self.assertIsInstance(
resolver.new_resolver(message.POLICY_METRO), resolver.MetroResolver)
self.assertIsInstance(
resolver.new_resolver(message.POLICY_RANDOM),
resolver.RandomResolver)
self.assertIsInstance(
resolver.new_resolver(message.POLICY_COUNTRY),
resolver.CountryResolver)
self.assertIsInstance(
resolver.new_resolver('unrecognized_policy'),
resolver.RandomResolver)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "8327e3bfe5ac39403243b05d41e26d4b",
"timestamp": "",
"source": "github",
"line_count": 833,
"max_line_length": 80,
"avg_line_length": 42.71788715486195,
"alnum_prop": 0.59375,
"repo_name": "m-lab/mlab-ns",
"id": "fac9f1ee4cce6c4678623d0e57b296aa33e2a63c",
"size": "35584",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "server/mlabns/tests/test_resolver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14542"
},
{
"name": "Dockerfile",
"bytes": "877"
},
{
"name": "HTML",
"bytes": "36040"
},
{
"name": "JavaScript",
"bytes": "59657"
},
{
"name": "Python",
"bytes": "1475831"
},
{
"name": "Shell",
"bytes": "747"
}
],
"symlink_target": ""
}
|
import sys
import os
from eventlet.support import greenlets as greenlet
from eventlet import patcher
try:
# try and import pkg_resources ...
import pkg_resources
except ImportError:
# ... but do not depend on it
pkg_resources = None
__all__ = ["use_hub", "get_hub", "get_default_hub", "trampoline"]
threading = patcher.original('threading')
_threadlocal = threading.local()
def get_default_hub():
"""Select the default hub implementation based on what multiplexing
libraries are installed. The order that the hubs are tried is:
* epoll
* kqueue
* poll
* select
It won't automatically select the pyevent hub, because it's not
python-thread-safe.
.. include:: ../doc/common.txt
.. note :: |internal|
"""
# pyevent hub disabled for now because it is not thread-safe
# try:
# import eventlet.hubs.pyevent
# return eventlet.hubs.pyevent
# except:
# pass
select = patcher.original('select')
try:
import eventlet.hubs.epolls
return eventlet.hubs.epolls
except ImportError:
try:
import eventlet.hubs.kqueue
return eventlet.hubs.kqueue
except ImportError:
if hasattr(select, 'poll'):
import eventlet.hubs.poll
return eventlet.hubs.poll
else:
import eventlet.hubs.selects
return eventlet.hubs.selects
def use_hub(mod=None):
"""Use the module *mod*, containing a class called Hub, as the
event hub. Usually not required; the default hub is usually fine.
Mod can be an actual module, a string, or None. If *mod* is a module,
it uses it directly. If *mod* is a string and contains either '.' or ':'
use_hub tries to import the hub using the 'package.subpackage.module:Class'
convention, otherwise use_hub looks for a matching setuptools entry point
in the 'eventlet.hubs' group to load or finally tries to import
`eventlet.hubs.mod` and use that as the hub module. If *mod* is None,
use_hub uses the default hub. Only call use_hub during application
initialization, because it resets the hub's state and any existing
timers or listeners will never be resumed.
"""
if mod is None:
mod = os.environ.get('EVENTLET_HUB', None)
if mod is None:
mod = get_default_hub()
if hasattr(_threadlocal, 'hub'):
del _threadlocal.hub
if isinstance(mod, str):
assert mod.strip(), "Need to specify a hub"
if '.' in mod or ':' in mod:
modulename, _, classname = mod.strip().partition(':')
mod = __import__(modulename, globals(), locals(), [classname])
if classname:
mod = getattr(mod, classname)
else:
found = False
if pkg_resources is not None:
for entry in pkg_resources.iter_entry_points(
group='eventlet.hubs', name=mod):
mod, found = entry.load(), True
break
if not found:
mod = __import__(
'eventlet.hubs.' + mod, globals(), locals(), ['Hub'])
if hasattr(mod, 'Hub'):
_threadlocal.Hub = mod.Hub
else:
_threadlocal.Hub = mod
def get_hub():
"""Get the current event hub singleton object.
.. note :: |internal|
"""
try:
hub = _threadlocal.hub
except AttributeError:
try:
_threadlocal.Hub
except AttributeError:
use_hub()
hub = _threadlocal.hub = _threadlocal.Hub()
return hub
from eventlet import timeout
def trampoline(fd, read=None, write=None, timeout=None,
timeout_exc=timeout.Timeout):
"""Suspend the current coroutine until the given socket object or file
descriptor is ready to *read*, ready to *write*, or the specified
*timeout* elapses, depending on arguments specified.
To wait for *fd* to be ready to read, pass *read* ``=True``; ready to
write, pass *write* ``=True``. To specify a timeout, pass the *timeout*
argument in seconds.
If the specified *timeout* elapses before the socket is ready to read or
write, *timeout_exc* will be raised instead of ``trampoline()``
returning normally.
.. note :: |internal|
"""
t = None
hub = get_hub()
current = greenlet.getcurrent()
assert hub.greenlet is not current, 'do not call blocking functions from the mainloop'
assert not (
read and write), 'not allowed to trampoline for reading and writing'
try:
fileno = fd.fileno()
except AttributeError:
fileno = fd
if timeout is not None:
t = hub.schedule_call_global(timeout, current.throw, timeout_exc)
try:
if read:
listener = hub.add(hub.READ, fileno, current.switch)
elif write:
listener = hub.add(hub.WRITE, fileno, current.switch)
try:
return hub.switch()
finally:
hub.remove(listener)
finally:
if t is not None:
t.cancel()
|
{
"content_hash": "dedbff64c2027145ecd154d57d677c40",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 90,
"avg_line_length": 32.0625,
"alnum_prop": 0.6076023391812866,
"repo_name": "neumerance/cloudloon2",
"id": "9b4c2b7e203b5d0c59b2a80d6026058e33ffe3c7",
"size": "5130",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/eventlet/hubs/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "178040"
},
{
"name": "JavaScript",
"bytes": "460971"
},
{
"name": "Perl",
"bytes": "1954"
},
{
"name": "Python",
"bytes": "3227734"
},
{
"name": "Ruby",
"bytes": "76"
},
{
"name": "Shell",
"bytes": "14108"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.contrib.admin import register
from .models import MailChimpList
@register(MailChimpList)
class MailChimpListModelAdmin(admin.ModelAdmin):
list_display = ('slug', 'enabled', 'mailchimp_list_id')
|
{
"content_hash": "fb0e0f7f3657d9f65b3988cc65ff0aba",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 59,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.7886178861788617,
"repo_name": "sussexstudent/falmer",
"id": "cbeceec1619d7adf4869a95dde90f66925d15ceb",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "falmer/newsletters/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2142"
},
{
"name": "Dockerfile",
"bytes": "1035"
},
{
"name": "HTML",
"bytes": "8269"
},
{
"name": "JavaScript",
"bytes": "817"
},
{
"name": "Python",
"bytes": "513792"
},
{
"name": "Shell",
"bytes": "8120"
}
],
"symlink_target": ""
}
|
def fibonacci(limit):
a, b, c = 0, 1, 1
series = ['1']
for i in range(1, limit):
c = a + b
a, b = b, c
series.append(str(c))
return series
def main(limit: ('upper limit for calculations', 'positional', None, int)):
series = ', '.join(fibonacci(limit))
print(f'The first {limit} terms are: {series}')
if __name__ == '__main__':
try:
import plac
plac.call(main)
except KeyboardInterrupt:
print('\nGoodbye!')
|
{
"content_hash": "e86024610d3fd5d6eed0d9a07115c6f9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 22.318181818181817,
"alnum_prop": 0.5376782077393075,
"repo_name": "alexgurrola/induction",
"id": "c52a30f569540a9e6ca1f79081565a373cffde38",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic/fibonacci.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5946"
}
],
"symlink_target": ""
}
|
import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_dtype_equal
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.string_arrow import ArrowStringArray, ArrowStringDtype
skip_if_no_pyarrow = td.skip_if_no("pyarrow", min_version="1.0.0")
@pytest.fixture(
params=[
# pandas\tests\arrays\string_\test_string.py:16: error: List item 1 has
# incompatible type "ParameterSet"; expected
# "Sequence[Collection[object]]" [list-item]
"string",
pytest.param(
"arrow_string", marks=skip_if_no_pyarrow
), # type:ignore[list-item]
]
)
def dtype(request):
return request.param
@pytest.fixture
def dtype_object(dtype):
if dtype == "string":
return pd.StringDtype
else:
return ArrowStringDtype
@pytest.fixture(
params=[
pd.arrays.StringArray,
pytest.param(ArrowStringArray, marks=skip_if_no_pyarrow),
]
)
def cls(request):
return request.param
def test_repr(dtype, request):
if dtype == "arrow_string":
reason = (
"AssertionError: assert ' A\n0 a\n1 None\n2 b' "
"== ' A\n0 a\n1 <NA>\n2 b'"
)
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
df = pd.DataFrame({"A": pd.array(["a", pd.NA, "b"], dtype=dtype)})
expected = " A\n0 a\n1 <NA>\n2 b"
assert repr(df) == expected
expected = "0 a\n1 <NA>\n2 b\nName: A, dtype: string"
assert repr(df.A) == expected
expected = "<StringArray>\n['a', <NA>, 'b']\nLength: 3, dtype: string"
assert repr(df.A.array) == expected
def test_none_to_nan(cls):
a = cls._from_sequence(["a", None, "b"])
assert a[1] is not None
assert a[1] is pd.NA
def test_setitem_validates(cls):
arr = cls._from_sequence(["a", "b"])
if cls is pd.arrays.StringArray:
msg = "Cannot set non-string value '10' into a StringArray."
else:
msg = "Scalar must be NA or str"
with pytest.raises(ValueError, match=msg):
arr[0] = 10
if cls is pd.arrays.StringArray:
msg = "Must provide strings."
else:
msg = "Scalar must be NA or str"
with pytest.raises(ValueError, match=msg):
arr[:] = np.array([1, 2])
def test_setitem_with_scalar_string(dtype):
# is_float_dtype considers some strings, like 'd', to be floats
# which can cause issues.
arr = pd.array(["a", "c"], dtype=dtype)
arr[0] = "d"
expected = pd.array(["d", "c"], dtype=dtype)
tm.assert_extension_array_equal(arr, expected)
@pytest.mark.parametrize(
"input, method",
[
(["a", "b", "c"], operator.methodcaller("capitalize")),
(["a", "b", "c"], operator.methodcaller("capitalize")),
(["a b", "a bc. de"], operator.methodcaller("capitalize")),
],
)
def test_string_methods(input, method, dtype, request):
if dtype == "arrow_string":
reason = "AttributeError: 'ArrowStringDtype' object has no attribute 'base'"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
a = pd.Series(input, dtype=dtype)
b = pd.Series(input, dtype="object")
result = method(a.str)
expected = method(b.str)
assert result.dtype.name == dtype
tm.assert_series_equal(result.astype(object), expected)
def test_astype_roundtrip(dtype, request):
if dtype == "arrow_string":
reason = "ValueError: Could not convert object to NumPy datetime"
mark = pytest.mark.xfail(reason=reason, raises=ValueError)
request.node.add_marker(mark)
else:
mark = pytest.mark.xfail(
reason="GH#36153 casting from StringArray to dt64 fails", raises=ValueError
)
request.node.add_marker(mark)
ser = pd.Series(pd.date_range("2000", periods=12))
ser[0] = None
casted = ser.astype(dtype)
assert is_dtype_equal(casted.dtype, dtype)
result = casted.astype("datetime64[ns]")
tm.assert_series_equal(result, ser)
def test_add(dtype, request):
if dtype == "arrow_string":
reason = (
"TypeError: unsupported operand type(s) for +: 'ArrowStringArray' and "
"'ArrowStringArray'"
)
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
a = pd.Series(["a", "b", "c", None, None], dtype=dtype)
b = pd.Series(["x", "y", None, "z", None], dtype=dtype)
result = a + b
expected = pd.Series(["ax", "by", None, None, None], dtype=dtype)
tm.assert_series_equal(result, expected)
result = a.add(b)
tm.assert_series_equal(result, expected)
result = a.radd(b)
expected = pd.Series(["xa", "yb", None, None, None], dtype=dtype)
tm.assert_series_equal(result, expected)
result = a.add(b, fill_value="-")
expected = pd.Series(["ax", "by", "c-", "-z", None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_add_2d(dtype, request):
if dtype == "arrow_string":
reason = "Failed: DID NOT RAISE <class 'ValueError'>"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
a = pd.array(["a", "b", "c"], dtype=dtype)
b = np.array([["a", "b", "c"]], dtype=object)
with pytest.raises(ValueError, match="3 != 1"):
a + b
s = pd.Series(a)
with pytest.raises(ValueError, match="3 != 1"):
s + b
def test_add_sequence(dtype, request):
if dtype == "arrow_string":
reason = (
"TypeError: unsupported operand type(s) for +: 'ArrowStringArray' "
"and 'list'"
)
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
a = pd.array(["a", "b", None, None], dtype=dtype)
other = ["x", None, "y", None]
result = a + other
expected = pd.array(["ax", None, None, None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
result = other + a
expected = pd.array(["xa", None, None, None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_mul(dtype, request):
if dtype == "arrow_string":
reason = (
"TypeError: unsupported operand type(s) for *: 'ArrowStringArray' and 'int'"
)
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
a = pd.array(["a", "b", None], dtype=dtype)
result = a * 2
expected = pd.array(["aa", "bb", None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
result = 2 * a
tm.assert_extension_array_equal(result, expected)
@pytest.mark.xfail(reason="GH-28527")
def test_add_strings(dtype):
array = pd.array(["a", "b", "c", "d"], dtype=dtype)
df = pd.DataFrame([["t", "u", "v", "w"]])
assert array.__add__(df) is NotImplemented
result = array + df
expected = pd.DataFrame([["at", "bu", "cv", "dw"]]).astype(dtype)
tm.assert_frame_equal(result, expected)
result = df + array
expected = pd.DataFrame([["ta", "ub", "vc", "wd"]]).astype(dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH-28527")
def test_add_frame(dtype):
array = pd.array(["a", "b", np.nan, np.nan], dtype=dtype)
df = pd.DataFrame([["x", np.nan, "y", np.nan]])
assert array.__add__(df) is NotImplemented
result = array + df
expected = pd.DataFrame([["ax", np.nan, np.nan, np.nan]]).astype(dtype)
tm.assert_frame_equal(result, expected)
result = df + array
expected = pd.DataFrame([["xa", np.nan, np.nan, np.nan]]).astype(dtype)
tm.assert_frame_equal(result, expected)
def test_comparison_methods_scalar(all_compare_operators, dtype):
op_name = all_compare_operators
a = pd.array(["a", None, "c"], dtype=dtype)
other = "a"
result = getattr(a, op_name)(other)
expected = np.array([getattr(item, op_name)(other) for item in a], dtype=object)
expected = pd.array(expected, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_comparison_methods_scalar_pd_na(all_compare_operators, dtype):
op_name = all_compare_operators
a = pd.array(["a", None, "c"], dtype=dtype)
result = getattr(a, op_name)(pd.NA)
expected = pd.array([None, None, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_comparison_methods_scalar_not_string(all_compare_operators, dtype, request):
if all_compare_operators not in ["__eq__", "__ne__"]:
reason = "comparison op not supported between instances of 'str' and 'int'"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
op_name = all_compare_operators
a = pd.array(["a", None, "c"], dtype=dtype)
other = 42
result = getattr(a, op_name)(other)
expected_data = {"__eq__": [False, None, False], "__ne__": [True, None, True]}[
op_name
]
expected = pd.array(expected_data, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_comparison_methods_array(all_compare_operators, dtype, request):
if dtype == "arrow_string":
if all_compare_operators in ["__eq__", "__ne__"]:
reason = "NotImplementedError: Neither scalar nor ArrowStringArray"
else:
reason = "AssertionError: left is not an ExtensionArray"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
op_name = all_compare_operators
a = pd.array(["a", None, "c"], dtype=dtype)
other = [None, None, "c"]
result = getattr(a, op_name)(other)
expected = np.empty_like(a, dtype="object")
expected[-1] = getattr(other[-1], op_name)(a[-1])
expected = pd.array(expected, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = getattr(a, op_name)(pd.NA)
expected = pd.array([None, None, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_constructor_raises(cls):
if cls is pd.arrays.StringArray:
msg = "StringArray requires a sequence of strings or pandas.NA"
else:
msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowStringArray"
with pytest.raises(ValueError, match=msg):
cls(np.array(["a", "b"], dtype="S1"))
with pytest.raises(ValueError, match=msg):
cls(np.array([]))
with pytest.raises(ValueError, match=msg):
cls(np.array(["a", np.nan], dtype=object))
with pytest.raises(ValueError, match=msg):
cls(np.array(["a", None], dtype=object))
with pytest.raises(ValueError, match=msg):
cls(np.array(["a", pd.NaT], dtype=object))
@pytest.mark.parametrize("copy", [True, False])
def test_from_sequence_no_mutate(copy, cls, request):
if cls is ArrowStringArray and copy is False:
reason = "AssertionError: numpy array are different"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
nan_arr = np.array(["a", np.nan], dtype=object)
na_arr = np.array(["a", pd.NA], dtype=object)
result = cls._from_sequence(nan_arr, copy=copy)
if cls is ArrowStringArray:
import pyarrow as pa
expected = cls(pa.array(na_arr, type=pa.string(), from_pandas=True))
else:
expected = cls(na_arr)
tm.assert_extension_array_equal(result, expected)
expected = nan_arr if copy else na_arr
tm.assert_numpy_array_equal(nan_arr, expected)
def test_astype_int(dtype, request):
if dtype == "arrow_string":
reason = "TypeError: Cannot interpret 'Int64Dtype()' as a data type"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
arr = pd.array(["1", pd.NA, "3"], dtype=dtype)
result = arr.astype("Int64")
expected = pd.array([1, pd.NA, 3], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
def test_astype_float(any_float_allowed_nullable_dtype):
# Don't compare arrays (37974)
ser = pd.Series(["1.1", pd.NA, "3.3"], dtype="string")
result = ser.astype(any_float_allowed_nullable_dtype)
expected = pd.Series([1.1, np.nan, 3.3], dtype=any_float_allowed_nullable_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.xfail(reason="Not implemented StringArray.sum")
def test_reduce(skipna, dtype):
arr = pd.Series(["a", "b", "c"], dtype=dtype)
result = arr.sum(skipna=skipna)
assert result == "abc"
@pytest.mark.parametrize("method", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max(method, skipna, dtype, request):
if dtype == "arrow_string":
reason = "AttributeError: 'ArrowStringArray' object has no attribute 'max'"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
arr = pd.Series(["a", "b", "c", None], dtype=dtype)
result = getattr(arr, method)(skipna=skipna)
if skipna:
expected = "a" if method == "min" else "c"
assert result == expected
else:
assert result is pd.NA
@pytest.mark.parametrize("method", ["min", "max"])
@pytest.mark.parametrize("box", [pd.Series, pd.array])
def test_min_max_numpy(method, box, dtype, request):
if dtype == "arrow_string":
if box is pd.array:
reason = (
"TypeError: '<=' not supported between instances of 'str' and "
"'NoneType'"
)
else:
reason = "AttributeError: 'ArrowStringArray' object has no attribute 'max'"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
arr = box(["a", "b", "c", None], dtype=dtype)
result = getattr(np, method)(arr)
expected = "a" if method == "min" else "c"
assert result == expected
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.xfail(reason="Not implemented StringArray.sum")
def test_reduce_missing(skipna, dtype):
arr = pd.Series([None, "a", None, "b", "c", None], dtype=dtype)
result = arr.sum(skipna=skipna)
if skipna:
assert result == "abc"
else:
assert pd.isna(result)
def test_fillna_args():
# GH 37987
arr = pd.array(["a", pd.NA], dtype="string")
res = arr.fillna(value="b")
expected = pd.array(["a", "b"], dtype="string")
tm.assert_extension_array_equal(res, expected)
res = arr.fillna(value=np.str_("b"))
expected = pd.array(["a", "b"], dtype="string")
tm.assert_extension_array_equal(res, expected)
msg = "Cannot set non-string value '1' into a StringArray."
with pytest.raises(ValueError, match=msg):
arr.fillna(value=1)
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_arrow_array(dtype):
# protocol added in 0.15.0
import pyarrow as pa
data = pd.array(["a", "b", "c"], dtype=dtype)
arr = pa.array(data)
expected = pa.array(list(data), type=pa.string(), from_pandas=True)
if dtype == "arrow_string":
expected = pa.chunked_array(expected)
assert arr.equals(expected)
@td.skip_if_no("pyarrow", min_version="0.15.1.dev")
def test_arrow_roundtrip(dtype, dtype_object):
# roundtrip possible from arrow 1.0.0
import pyarrow as pa
data = pd.array(["a", "b", None], dtype=dtype)
df = pd.DataFrame({"a": data})
table = pa.table(df)
assert table.field("a").type == "string"
result = table.to_pandas()
assert isinstance(result["a"].dtype, dtype_object)
tm.assert_frame_equal(result, df)
# ensure the missing value is represented by NA and not np.nan or None
assert result.loc[2, "a"] is pd.NA
def test_value_counts_na(dtype, request):
if dtype == "arrow_string":
reason = "TypeError: boolean value of NA is ambiguous"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
arr = pd.array(["a", "b", "a", pd.NA], dtype=dtype)
result = arr.value_counts(dropna=False)
expected = pd.Series([2, 1, 1], index=["a", pd.NA, "b"], dtype="Int64")
tm.assert_series_equal(result, expected)
result = arr.value_counts(dropna=True)
expected = pd.Series([2, 1], index=["a", "b"], dtype="Int64")
tm.assert_series_equal(result, expected)
def test_value_counts_with_normalize(dtype, request):
if dtype == "arrow_string":
reason = "TypeError: boolean value of NA is ambiguous"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
s = pd.Series(["a", "b", "a", pd.NA], dtype=dtype)
result = s.value_counts(normalize=True)
expected = pd.Series([2, 1], index=["a", "b"], dtype="Float64") / 3
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"values, expected",
[
(["a", "b", "c"], np.array([False, False, False])),
(["a", "b", None], np.array([False, False, True])),
],
)
def test_use_inf_as_na(values, expected, dtype):
# https://github.com/pandas-dev/pandas/issues/33655
values = pd.array(values, dtype=dtype)
with pd.option_context("mode.use_inf_as_na", True):
result = values.isna()
tm.assert_numpy_array_equal(result, expected)
result = pd.Series(values).isna()
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
result = pd.DataFrame(values).isna()
expected = pd.DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_memory_usage(dtype, request):
# GH 33963
if dtype == "arrow_string":
pytest.skip("not applicable")
series = pd.Series(["a", "b", "c"], dtype=dtype)
assert 0 < series.nbytes <= series.memory_usage() < series.memory_usage(deep=True)
@pytest.mark.parametrize("float_dtype", [np.float16, np.float32, np.float64])
def test_astype_from_float_dtype(float_dtype, dtype):
# https://github.com/pandas-dev/pandas/issues/36451
s = pd.Series([0.1], dtype=float_dtype)
result = s.astype(dtype)
expected = pd.Series(["0.1"], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_to_numpy_returns_pdna_default(dtype):
arr = pd.array(["a", pd.NA, "b"], dtype=dtype)
result = np.array(arr)
expected = np.array(["a", pd.NA, "b"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_na_value(dtype, nulls_fixture):
na_value = nulls_fixture
arr = pd.array(["a", pd.NA, "b"], dtype=dtype)
result = arr.to_numpy(na_value=na_value)
expected = np.array(["a", na_value, "b"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
|
{
"content_hash": "224c1b80659d29320333a404f928962b",
"timestamp": "",
"source": "github",
"line_count": 575,
"max_line_length": 88,
"avg_line_length": 32.37913043478261,
"alnum_prop": 0.6211193468686218,
"repo_name": "jreback/pandas",
"id": "53659292135037cf6dab31d1352016883e6dec5f",
"size": "18618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/arrays/string_/test_string.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14930989"
},
{
"name": "Shell",
"bytes": "29317"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
import jsondiff
from pytest_regressions.data_regression import DataRegressionFixture
import gdsfactory as gf
from gdsfactory.cross_section import cross_section
gdspath = gf.PATH.gdsdir / "mzi2x2.gds"
def test_read_gds_hash2() -> gf.Component:
c = gf.import_gds(gdspath)
h = "bfc84eea02b07fa0e167223e0fdedc8e8b085026"
assert c.hash_geometry() == h, f"h = {c.hash_geometry()!r}"
return c
def test_read_gds_with_settings2(data_regression: DataRegressionFixture) -> None:
c = gf.import_gds(gdspath)
data_regression.check(c.to_dict())
def test_read_gds_equivalent2() -> None:
"""Ensures we can load it from GDS + YAML and get the same component
settings."""
splitter = gf.components.mmi1x2(cross_section=cross_section)
c1 = gf.components.mzi(splitter=splitter, cross_section=cross_section)
c2 = gf.import_gds(gdspath)
d1 = c1.to_dict()
d2 = c2.to_dict()
# we change the name, so there is no cache conflicts
# d1.pop("name")
# d2.pop("name")
# d1.pop("ports")
# d2.pop("ports")
# c1.pprint()
# c2.pprint()
d = jsondiff.diff(d1, d2)
# from pprint import pprint
# pprint(d1)
# pprint(d2)
# pprint(d)
assert len(d) == 0, d
def test_mix_cells_from_gds_and_from_function2() -> None:
"""Ensures not duplicated cell names.
when cells loaded from GDS and have the same name as a function with
@cell decorator
"""
c = gf.Component("test_mix_cells_from_gds_and_from_function")
c << gf.components.mzi()
c << gf.import_gds(gdspath)
c.write_gds()
c.show(show_ports=True)
def _write() -> None:
splitter = gf.components.mmi1x2(cross_section=cross_section)
c1 = gf.components.mzi(splitter=splitter, cross_section=cross_section)
c1.write_gds_with_metadata(gdspath=gdspath)
c1.show()
if __name__ == "__main__":
_write()
# test_read_gds_equivalent2()
c = test_read_gds_hash2()
# c.show(show_ports=True)
# test_mix_cells_from_gds_and_from_function2()
# test_read_gds_with_settings2()
c1 = gf.components.mzi()
c2 = gf.import_gds(gdspath)
d1 = c1.to_dict()
d2 = c2.to_dict()
d = jsondiff.diff(d1, d2)
print(d)
|
{
"content_hash": "edf27fdaf0942f6cdc6acea565df1416",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 81,
"avg_line_length": 25.697674418604652,
"alnum_prop": 0.648868778280543,
"repo_name": "gdsfactory/gdsfactory",
"id": "4fc67b27632f0bf30809f8159062037e4bc4f087",
"size": "2239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdsfactory/tests/test_import_gds_with_hierarchy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "605"
},
{
"name": "Dockerfile",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "4572"
},
{
"name": "Python",
"bytes": "2471982"
},
{
"name": "Shell",
"bytes": "671"
},
{
"name": "XS",
"bytes": "10045"
}
],
"symlink_target": ""
}
|
from biicode.server.model.user import User
from biicode.server.model.block import Block
from biicode.common.model.content import ContentDeserializer
from biicode.common.model.cells import CellDeserializer
from biicode.common.model.id import ID
from biicode.server.store.mongo_store import MongoStore
from biicode.server.store.generic_server_store import GenericServerStore
from biicode.common.model.brl.brl_block import BRLBlock
from biicode.server.exception import BiiPendingTransactionException
from biicode.server.model.permissions.element_permissions import ElementPermissions
from biicode.server.model.payment.user_subscription import UserSubscription
class MongoServerStore(MongoStore, GenericServerStore):
deserializer = {
GenericServerStore.PUBLISHED_CELL_ST: CellDeserializer(ID),
GenericServerStore.PUBLISHED_CONTENT_ST: ContentDeserializer(ID),
GenericServerStore.BLOCK_ST: Block,
GenericServerStore.USER_ST: User,
GenericServerStore.COUNTERS_ST: None,
GenericServerStore.BLOCK_PERMISSIONS_ST: ElementPermissions,
GenericServerStore.USER_SUBSCRIPTION_ST: UserSubscription
}
def __init__(self, connection, databasename=None):
'''
connection: MongoClient, can be get from MongoStore.makeConnection
'''
MongoStore.__init__(self, connection, databasename)
def read_user_by_email(self, email):
'''Reads user by email'''
dbcol = self.db[GenericServerStore.USER_ST]
doc = dbcol.find_one({User.SERIAL_EMAIL: email})
return User.deserialize(doc) if doc else None
def read_user_by_oauth_token(self, provider, token):
'''Reads user by github or google token'''
cols = {"google": User.SERIAL_OAUTH_GOOGLE_TOKEN,
"github": User.SERIAL_OAUTH_GITHUB_TOKEN}
dbcol = self.db[GenericServerStore.USER_ST]
doc = dbcol.find_one({cols[provider]: token})
return User.deserialize(doc) if doc else None
def read_user_subscription_by_customer_id(self, customer_id):
dbcol = self.db[GenericServerStore.USER_SUBSCRIPTION_ST]
doc = dbcol.find_one({UserSubscription.SERIAL_CUSTOMER_ID_KEY: customer_id})
return UserSubscription.deserialize(doc) if doc else None
def exists_user_id_ignoring_case(self, brl_user):
'''Check if user already exists with a case insensitive pattern'''
import re
dbcol = self.db[GenericServerStore.USER_ST]
doc = dbcol.find_one({User.SERIAL_ID_KEY: re.compile('^' + re.escape(brl_user) + '$',
re.IGNORECASE)})
return User.deserialize(doc) if doc else None
def generate_user_id(self):
counters = self.db["counters"]
updated = counters.find_and_modify(query={'_id': 'users'},
update={"$inc": {'seq': 1}},
upsert=True,
new=True)
return ID((updated['seq'], ))
def getDeserializer(self, collection):
'''Mapping our collections'''
return MongoServerStore.deserializer[collection]
def getDeserializerMulti(self, collection): # For read_multi keys
'''Mapping our collections'''
return {
GenericServerStore.PUBLISHED_CELL_ST: ID,
GenericServerStore.PUBLISHED_CONTENT_ST: ID,
GenericServerStore.BLOCK_ST: BRLBlock,
GenericServerStore.BLOCK_PERMISSIONS_ST: BRLBlock,
}[collection]
def check_transaction(self, brl_hive):
dbcol = self.db[MongoStore.HIVE_TRANSACTIONS]
transaction = dbcol.find_one({'_id': str(brl_hive)})
if transaction is not None:
raise BiiPendingTransactionException("Cannot read hive %s, try again later"
% transaction)
############ Get content size ################
def read_content_sizes(self, content_ids):
dbcol = self.db[GenericServerStore.PUBLISHED_CONTENT_ST]
ids = [a.serialize() for a in content_ids]
projection = {"l.sz": 1}
cursor = dbcol.find({"_id": {"$in": ids}}, projection)
result = {ID.deserialize(doc["_id"]): doc["l"]["sz"] for doc in cursor}
return result
########### Get published blocks info ############
def read_published_blocks_info(self):
"""Gets the blocks brl's with the last publish date in a tuple.
The method returns a generator, blocks can be a lot and we don't want it all in memory
"""
dbcol = self.db[GenericServerStore.BLOCK_ST]
brls = dbcol.find({}, {"_id": 1}) # Iterator in results
for ret_brl_block in brls: # Its an iterator too
brl_block = BRLBlock(ret_brl_block["_id"])
the_block = self.read_block(brl_block)
last_delta = the_block.last_delta
if last_delta:
last_pub_date = last_delta.datetime
yield (brl_block, last_pub_date)
|
{
"content_hash": "477da4f90737120a4bf02b3736753611",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 94,
"avg_line_length": 46.37614678899082,
"alnum_prop": 0.6354104846686449,
"repo_name": "bowlofstew/bii-server",
"id": "95ef8bd4b605e2aa508a409290b30135e82360bc",
"size": "5055",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "store/mongo_server_store.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "400214"
}
],
"symlink_target": ""
}
|
from RobotAppEyes import RobotAppEyes
from version import VERSION
_version_ = VERSION
class RobotAppEyes(RobotAppEyes):
"""
Robot-AppEyes is a visual verfication library for Robot Framework that leverages
the Eyes-Selenium and Selenium2 libraries.
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
|
{
"content_hash": "d761920a13c2ffec358f76fd9e8d4975",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 84,
"avg_line_length": 25.5,
"alnum_prop": 0.7483660130718954,
"repo_name": "NaviNet/Robot-AppEyes",
"id": "44e687861bda88462ba5b8110294d5e2f05e103c",
"size": "921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RobotAppEyes/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30419"
}
],
"symlink_target": ""
}
|
import datetime
import unittest
import mock
from touchdown.core.datetime import parse_datetime, utc
class TestDateTime(unittest.TestCase):
def test_5m_ago(self):
with mock.patch("touchdown.core.datetime.datetime") as mock_dt:
mock_dt.utcnow.return_value = datetime.datetime(
2015, 5, 25, 14, 23, 46, 890132
)
mock_dt.side_effect = datetime.datetime.now()
self.assertEqual(
parse_datetime("5m ago"),
datetime.datetime(2015, 5, 25, 14, 18, 46, 890132, tzinfo=utc),
)
|
{
"content_hash": "18a815de7e9238840f87d47c486c6fa4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 29.5,
"alnum_prop": 0.6,
"repo_name": "yaybu/touchdown",
"id": "6c68736c8cb1ca73524ec98f64da6884e9839ae2",
"size": "1168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "touchdown/tests/test_core_datetime.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "760"
},
{
"name": "Python",
"bytes": "1047173"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
from source.k3d_directives.plot import K3D_Plot
import shutil
import json
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'K3D-jupyter'
author = u'Artur Trzęsiok, Marcin Kostur, Tomasz Gandor, Thomas Mattone'
copyright = time.strftime(
'%Y') + ' ' + author
# The full version, including alpha/beta/rc tags
here = os.path.dirname(__file__)
repo = os.path.join(here, '..', '..')
_version_py = os.path.join(repo, 'package.json')
version_ns = {}
with open(_version_py) as f:
version = json.load(f)["version"]
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx_copybutton',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The root document.
root_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
html_theme_options = {
"show_prev_next": False,
"google_analytics_id": 'UA-141840477-1',
"icon_links": [
{
"name": "GitHub",
"url": "https://github.com/K3D-tools/K3D-jupyter",
"icon": "fab fa-github",
"type": "fontawesome",
},
{
"name": "PyPi",
"url": "https://pypi.org/project/k3d/",
"icon": "fas fa-box-open",
"type": "fontawesome",
},
{
"name": "Conda",
"url": "https://anaconda.org/conda-forge/k3d",
"icon": "fas fa-circle-notch",
"type": "fontawesome",
}
]
}
html_sidebars = {
"index": ["search-field", "sidebar_index"],
"gallery/*": []
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
shutil.copy('./../js/dist/standalone.js', './source/_static/standalone.js')
shutil.copy('./../node_modules/requirejs/require.js', './source/_static/require.js')
try:
app.add_css_file('style.css')
app.add_javascript('require.js')
app.add_javascript('standalone.js?k3d')
except AttributeError:
app.add_css_file('style.css')
app.add_js_file('require.js')
app.add_js_file('standalone.js?k3d')
app.add_directive('k3d_plot', K3D_Plot)
|
{
"content_hash": "c13e1b152680bc6ba4ad85734c36d1e9",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 88,
"avg_line_length": 29.52212389380531,
"alnum_prop": 0.5974220623501199,
"repo_name": "K3D-tools/K3D-jupyter",
"id": "819b004536d97263044ceca29faa20f4377e55c5",
"size": "3890",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "1326"
},
{
"name": "GLSL",
"bytes": "33792"
},
{
"name": "HTML",
"bytes": "8112"
},
{
"name": "JavaScript",
"bytes": "599147"
},
{
"name": "Jupyter Notebook",
"bytes": "5311"
},
{
"name": "Python",
"bytes": "1949685"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
}
|
import pandas as pd
import numpy as np
import SimpleRegression as SR
sales = None
train_data = None
test_data = None
# load data from csv files
def loaddata():
global sales, train_data, test_data
train_data = pd.read_csv('../Datafiles/kc_house_train_data.csv')
test_data = pd.read_csv('../Datafiles/kc_house_test_data.csv')
def main():
loaddata()
feature_sqft_living = np.array(train_data['sqft_living'])
feature_bedrooms = np.array(train_data['bedrooms'])
outputs = np.array(train_data['price'])
x = [1,2,2,3,3,4,5,6,6,6,8,10]
y = [-890,-1411,-1560,-2220,-2091,-2878,-3537,-3268,-3920,-4163,-5471,-5157]
intercept, slope = SR.simple_linear_regression(np.array(x), np.array(y))
print intercept, slope
print SR.get_regression_predictions(1, slope = slope, intercept = intercept)
intercept, slope = SR.simple_linear_regression(feature_sqft_living, outputs)
# predict 2650 sqft
pred_2650sqft = SR.get_regression_predictions(2650, intercept, slope)
print pred_2650sqft
# RSS of train data
RSS_train_data = SR.get_residual_sum_of_squares(feature_sqft_living, outputs, intercept, slope)
print RSS_train_data
# hosue price is $800000 and its squre feet
sqrtft_800000 = SR.inverse_regression_predictions(800000, slope = slope , intercept = intercept)
print sqrtft_800000
if __name__ == '__main__':
main()
|
{
"content_hash": "3410c1da6dcea3d144868c5fd72fb783",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 100,
"avg_line_length": 30.47826086956522,
"alnum_prop": 0.68188302425107,
"repo_name": "nonlining/SimpleML",
"id": "41b6a5c8464ae3c77a09e755c6832b2dcc0823ec",
"size": "1732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Regression/test_SimpleRegression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46673"
}
],
"symlink_target": ""
}
|
from concurrent.futures import ThreadPoolExecutor
from rx.core import Scheduler
from .newthreadscheduler import NewThreadScheduler
class ThreadPoolScheduler(NewThreadScheduler):
"""A scheduler that schedules work via the thread pool."""
class ThreadPoolThread:
"""Wraps a concurrent future as a thread."""
def __init__(self, executor, run):
self.run = run
self.future = None
self.executor = executor
def start(self):
self.future = self.executor.submit(self.run)
def cancel(self):
self.future.cancel()
def __init__(self, max_workers=None):
super(ThreadPoolScheduler, self).__init__(self.thread_factory)
self.executor = ThreadPoolExecutor(max_workers=max_workers)
def thread_factory(self, target, *args):
return self.ThreadPoolThread(self.executor, target)
Scheduler.thread_pool = thread_pool_scheduler = ThreadPoolScheduler()
|
{
"content_hash": "63d40127352ec35c13726fe8e71e9532",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 70,
"avg_line_length": 30.25,
"alnum_prop": 0.6714876033057852,
"repo_name": "ChemiKhazi/Sprytile",
"id": "0ac08b3f5e365aadd3b9f11ee4f258f6f3cf4085",
"size": "968",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rx/concurrency/threadpoolscheduler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "659287"
}
],
"symlink_target": ""
}
|
"""
Example of spatial graph
"""
import geoplotlib
from geoplotlib.utils import read_csv
data = read_csv('./data/flights.csv')
geoplotlib.graph(data,
src_lat='lat_departure',
src_lon='lon_departure',
dest_lat='lat_arrival',
dest_lon='lon_arrival',
color='hot_r',
alpha=16,
linewidth=2)
geoplotlib.show()
|
{
"content_hash": "8d0909b7fc3b514584a6ee40ef04d58d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 41,
"avg_line_length": 25,
"alnum_prop": 0.5247058823529411,
"repo_name": "lucciano/geoplotlib",
"id": "61574d69d92aef926d2b9114eb3f3964a5b73dbd",
"size": "425",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/graph-flights.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94730"
}
],
"symlink_target": ""
}
|
"""## Functions for copying elements from one graph to another.
These functions allow for recursive copying of elements (ops and variables)
from one graph to another. The copied elements are initialized inside a
user-specified scope in the other graph. There are separate functions to
copy ops and variables.
There is also a function to retrive the copied version of an op from the
first graph inside a scope in the second graph.
@@copy_op_to_graph
@@copy_variable_to_graph
@@get_copied_op
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from tensorflow.python.ops.variables import Variable
from tensorflow.python.client.session import Session
from tensorflow.python.framework import ops
__all__ = ["copy_op_to_graph", "copy_variable_to_graph", "get_copied_op"]
def copy_variable_to_graph(org_instance, to_graph, scope=""):
"""Given a `Variable` instance from one `Graph`, initializes and returns
a copy of it from another `Graph`, under the specified scope
(default `""`).
Args:
org_instance: A `Variable` from some `Graph`.
to_graph: The `Graph` to copy the `Variable` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Variable` from `to_graph`.
Raises:
TypeError: If `org_instance` is not a `Variable`.
"""
if not isinstance(org_instance, Variable):
raise TypeError(str(org_instance) + " is not a Variable")
#The name of the new variable
if scope != "":
new_name = (scope + '/' +
org_instance.name[:org_instance.name.index(':')])
else:
new_name = org_instance.name[:org_instance.name.index(':')]
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope,
#except the special ones required for variable initialization and
#training.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if (name == ops.GraphKeys.GLOBAL_VARIABLES or
name == ops.GraphKeys.TRAINABLE_VARIABLES or
scope == ''):
collections.append(name)
else:
collections.append(scope + '/' + name)
#See if its trainable.
trainable = (org_instance in org_instance.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES))
#Get the initial value
with org_instance.graph.as_default():
temp_session = Session()
init_value = temp_session.run(org_instance.initialized_value())
#Initialize the new variable
with to_graph.as_default():
new_var = Variable(init_value,
trainable,
name=new_name,
collections=collections,
validate_shape=False)
return new_var
def copy_op_to_graph(org_instance, to_graph, variables,
scope=""):
"""Returns a copy of an operation from another Graph under a specified scope.
Given an `Operation` `org_instance` from one `Graph`,
initializes and returns a copy of it from another `Graph`,
under the specified scope (default `""`).
The copying is done recursively, so any `Operation` whose output
is required to evaluate the `org_instance`, is also copied (unless
already done).
Since `Variable` instances are copied separately, those required
to evaluate `org_instance` must be provided as input.
Args:
org_instance: An `Operation` from some `Graph`. Could be a
`Placeholder` as well.
to_graph: The `Graph` to copy `org_instance` to.
variables: An iterable of `Variable` instances to copy `org_instance` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Operation` from `to_graph`.
Raises:
TypeError: If `org_instance` is not an `Operation` or `Tensor`.
"""
#The name of the new instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
#Extract names of variables
copied_variables = dict((x.name, x) for x in variables)
#If a variable by the new name already exists, return the
#correspondng tensor that will act as an input
if new_name in copied_variables:
return to_graph.get_tensor_by_name(
copied_variables[new_name].name)
#If an instance of the same name exists, return appropriately
try:
already_present = to_graph.as_graph_element(new_name,
allow_tensor=True,
allow_operation=True)
return already_present
except:
pass
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if scope == '':
collections.append(name)
else:
collections.append(scope + '/' + name)
#Take action based on the class of the instance
if isinstance(org_instance, ops.Tensor):
#If its a Tensor, it is one of the outputs of the underlying
#op. Therefore, copy the op itself and return the appropriate
#output.
op = org_instance.op
new_op = copy_op_to_graph(op, to_graph, variables, scope)
output_index = op.outputs.index(org_instance)
new_tensor = new_op.outputs[output_index]
#Add to collections if any
for collection in collections:
to_graph.add_to_collection(collection, new_tensor)
return new_tensor
elif isinstance(org_instance, ops.Operation):
op = org_instance
#If it has an original_op parameter, copy it
if op._original_op is not None:
new_original_op = copy_op_to_graph(op._original_op, to_graph,
variables, scope)
else:
new_original_op = None
#If it has control inputs, call this function recursively on each.
new_control_inputs = [copy_op_to_graph(x, to_graph, variables,
scope)
for x in op.control_inputs]
#If it has inputs, call this function recursively on each.
new_inputs = [copy_op_to_graph(x, to_graph, variables,
scope)
for x in op.inputs]
#Make a new node_def based on that of the original.
#An instance of tensorflow.core.framework.node_def_pb2.NodeDef, it
#stores String-based info such as name, device and type of the op.
#Unique to every Operation instance.
new_node_def = deepcopy(op._node_def)
#Change the name
new_node_def.name = new_name
#Copy the other inputs needed for initialization
output_types = op._output_types[:]
input_types = op._input_types[:]
#Make a copy of the op_def too.
#Its unique to every _type_ of Operation.
op_def = deepcopy(op._op_def)
#Initialize a new Operation instance
new_op = ops.Operation(new_node_def,
to_graph,
new_inputs,
output_types,
new_control_inputs,
input_types,
new_original_op,
op_def)
#Use Graph's hidden methods to add the op
to_graph._record_op_seen_by_control_dependencies(new_op)
for device_function in reversed(to_graph._device_function_stack):
new_op._set_device(device_function(new_op))
return new_op
else:
raise TypeError("Could not copy instance: " + str(org_instance))
def get_copied_op(org_instance, graph, scope=""):
"""Given an `Operation` instance from some `Graph`, returns
its namesake from `graph`, under the specified scope
(default `""`).
If a copy of `org_instance` is present in `graph` under the given
`scope`, it will be returned.
Args:
org_instance: An `Operation` from some `Graph`.
graph: The `Graph` to be searched for a copr of `org_instance`.
scope: The scope `org_instance` is present in.
Returns:
The `Operation` copy from `graph`.
"""
#The name of the copied instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
return graph.as_graph_element(new_name, allow_tensor=True,
allow_operation=True)
|
{
"content_hash": "8e901b99ead94e7d7c268872f8f7ec00",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 79,
"avg_line_length": 33.84274193548387,
"alnum_prop": 0.6457762421065173,
"repo_name": "laszlocsomor/tensorflow",
"id": "d060eda0a74010db10d9506b2a1c2345b2731709",
"size": "9082",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/copy_graph/python/util/copy_elements.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8535"
},
{
"name": "C",
"bytes": "314362"
},
{
"name": "C++",
"bytes": "34295651"
},
{
"name": "CMake",
"bytes": "211937"
},
{
"name": "Go",
"bytes": "1012495"
},
{
"name": "Java",
"bytes": "533607"
},
{
"name": "Jupyter Notebook",
"bytes": "1940884"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "44807"
},
{
"name": "Objective-C",
"bytes": "12460"
},
{
"name": "Objective-C++",
"bytes": "94483"
},
{
"name": "PHP",
"bytes": "1429"
},
{
"name": "Perl",
"bytes": "6186"
},
{
"name": "Perl 6",
"bytes": "1360"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "30060071"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "402121"
}
],
"symlink_target": ""
}
|
"""deck2pdf utilities
"""
import os
import hashlib
def gen_md5(filepath):
"""Return MD5 hex digest from file
:param filepath: target file path
:type filepath: str
:return: md5 digest (hex)
:rtype: str
"""
if not os.path.exists(filepath):
raise Exception()
hash_ = hashlib.md5()
with open(filepath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_.update(chunk)
return hash_.hexdigest()
|
{
"content_hash": "a3fd16148698161409f2c6564fe1f273",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 53,
"avg_line_length": 22.476190476190474,
"alnum_prop": 0.6080508474576272,
"repo_name": "attakei/slide2pdf",
"id": "abf17057d88a63374064802ad3787295730d4476",
"size": "494",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "deck2pdf/libs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "14398"
},
{
"name": "HTML",
"bytes": "123"
},
{
"name": "Makefile",
"bytes": "15176"
},
{
"name": "Python",
"bytes": "44166"
}
],
"symlink_target": ""
}
|
from pyrep.errors import PyRepError
from pyrep.backend import sim
from typing import Any
class Signal(object):
"""Signals can be seen as global variables.
Four types of signals are currently supported:
integer-, floating-, double-, and string-type signals.
Signals can be defined, redefined, read and cleared.
"""
def __init__(self, name):
self._name = name
def set(self, value) -> None:
"""Sets the value of this signal.
:param value: The value of the signal.
"""
pass
def get(self) -> Any:
"""Gets the value of this signal.
:raises PyRepError if signal does not exist.
:return: The value of the signal.
"""
pass
def clear(self) -> int:
"""Clears the value of this signal.
:return: The number of signals cleared. Either 0 or 1.
"""
pass
def _check_signal(self, value: int, type_name: str) -> None:
if value == 0:
raise PyRepError('Signal %s of type %s does not exist.' % (
self._name, type_name))
class IntegerSignal(Signal):
"""An integer-type signal."""
def set(self, value) -> None:
sim.simSetIntegerSignal(self._name, value)
def get(self) -> int:
ret, value = sim.simGetIntegerSignal(self._name)
self._check_signal(ret, 'int')
return value
def clear(self) -> int:
return sim.simClearIntegerSignal(self._name)
class FloatSignal(Signal):
"""An float-type signal."""
def set(self, value) -> None:
sim.simSetFloatSignal(self._name, value)
def get(self) -> float:
ret, value = sim.simGetFloatSignal(self._name)
self._check_signal(ret, 'float')
return value
def clear(self) -> int:
return sim.simClearFloatSignal(self._name)
class DoubleSignal(Signal):
"""An double-type signal."""
def set(self, value) -> None:
sim.simSetDoubleSignal(self._name, value)
def get(self) -> float:
ret, value = sim.simGetDoubleSignal(self._name)
self._check_signal(ret, 'double')
return value
def clear(self) -> int:
return sim.simClearDoubleSignal(self._name)
class StringSignal(Signal):
"""An string-type signal."""
def set(self, value) -> None:
sim.simSetStringSignal(self._name, value)
def get(self) -> str:
ret, value = sim.simGetStringSignal(self._name)
self._check_signal(ret, 'string')
return value
def clear(self) -> int:
return sim.simClearStringSignal(self._name)
|
{
"content_hash": "00fd269d62ec61ba371f874148814286",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 71,
"avg_line_length": 25.46078431372549,
"alnum_prop": 0.6037735849056604,
"repo_name": "stepjam/PyRep",
"id": "c1513af427023d395880eee081c47e0e371396f0",
"size": "2597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrep/misc/signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "56307"
},
{
"name": "Lua",
"bytes": "16854"
},
{
"name": "Python",
"bytes": "428818"
}
],
"symlink_target": ""
}
|
import codecs
import struct
import discord
from bs4 import BeautifulSoup
from cogs.guildwars2.utils.db import prepare_search
from discord import app_commands
from discord.app_commands import Choice
class MiscMixin:
@app_commands.command()
@app_commands.describe(
language="The language of the wiki to search on. Optional. "
"Defaults to English.",
search_text="The text to search the wiki for. Example: Lion's Arch")
@app_commands.choices(language=[
Choice(name=p.title(), value=p) for p in ["en", "fr", "es", "de"]
])
async def wiki(
self,
interaction: discord.Interaction,
search_text: str, # TODO autocomplete
language: str = "en"):
"""Search the Guild wars 2 wiki"""
if len(search_text) > 300:
return await interaction.response.send_message("Search too long",
ephemeral=True)
await interaction.response.defer()
wiki = {
"en": "https://wiki.guildwars2.com",
"de": "https://wiki-de.guildwars2.com",
"fr": "https://wiki-fr.guildwars2.com",
"es": "https://wiki-es.guildwars2.com"
}
search_url = {
"en": "{}/index.php?title=Special%3ASearch&search={}",
"de": "{}/index.php?search={}&title=Spezial%3ASuche&",
"fr": "{}/index.php?search={}&title=Spécial%3ARecherche",
"es": "{}/index.php?title=Especial%3ABuscar&search={}"
}
url = (search_url[language].format(wiki[language], search_text))
headers = {"User-Agent": "TybaltBot/v2"}
# Overzealous filtering on the wiki's side lead to the bot's IP being blocked.
# Seems to be a common issue, based on https://wiki.guildwars2.com/wiki/Guild_Wars_2_Wiki:Reporting_wiki_bugs#Forbidden_403
# And based on the information within, the wiki had added an exemption for requests with this user-string header
# It is just a little dirty, but, it doesn't really change anything in the end.
# The only thing being checked is this user-string, and
# given the lack of any other verification, I don't think it's anything too bad.
# That being said, if anyone takes an issue with this, I will contact the wiki
# and get an exemption for GW2bot too.
async with self.session.get(url, headers=headers) as r:
if r.history: # Redirected
embed = await self.search_results_embed(interaction,
"Wiki",
exact_match=r)
return await interaction.followup.send(embed=embed)
else:
results = await r.text()
soup = BeautifulSoup(results, 'html.parser')
posts = soup.find_all(
"div", {"class": "mw-search-result-heading"})[:5]
if not posts:
return await interaction.followup.send(
"No results for your search")
embed = await self.search_results_embed(interaction,
"Wiki",
posts,
base_url=wiki[language])
await interaction.followup.send(embed=embed)
async def search_results_embed(self,
ctx,
site,
posts=None,
*,
base_url="",
exact_match=None):
if exact_match:
soup = BeautifulSoup(await exact_match.text(), 'html.parser')
embed = discord.Embed(title=soup.title.get_text(),
color=await self.get_embed_color(ctx),
url=str(exact_match.url))
return embed
embed = discord.Embed(title="{} search results".format(site),
description="Closest matches",
color=await self.get_embed_color(ctx))
for post in posts:
post = post.a
url = base_url + post['href']
url = url.replace(")", "\\)")
embed.add_field(name=post["title"],
value="[Click here]({})".format(url),
inline=False)
return embed
async def chatcode_item_autocomplete(self,
interaction: discord.Interaction,
current: str):
if not current:
return []
query = prepare_search(current)
query = {
"name": query,
}
items = await self.db.items.find(query).to_list(25)
return [Choice(name=it["name"], value=str(it["_id"])) for it in items]
async def chatcode_skin_autocomplete(self,
interaction: discord.Interaction,
current: str):
if not current:
return []
query = prepare_search(current)
query = {
"name": query,
}
items = await self.db.skins.find(query).to_list(25)
return [Choice(name=it["name"], value=str(it["_id"])) for it in items]
async def chatcode_upgrade_autocomplete(self,
interaction: discord.Interaction,
current: str):
if not current:
return []
query = prepare_search(current)
query = {"name": query, "type": "UpgradeComponent"}
items = await self.db.items.find(query).to_list(25)
return [Choice(name=it["name"], value=str(it["_id"])) for it in items]
@app_commands.command()
@app_commands.describe(
item="Base item name for the chat code. Example: Banana",
quantity="Item quantity, ranging from 1 to 255.",
skin="Skin name to apply on the item.",
upgrade_1="Name of the upgrade in the first slot. "
"Example: +1 Agony Infusion",
upgrade_2="Name of the upgrade in the second slot. "
"Example: Superior rune of Generosity")
@app_commands.autocomplete(item=chatcode_item_autocomplete,
skin=chatcode_skin_autocomplete,
upgrade_1=chatcode_upgrade_autocomplete,
upgrade_2=chatcode_upgrade_autocomplete)
async def chatcode(
self,
interaction: discord.Interaction,
item: str,
quantity: int,
skin: str = None,
upgrade_1: str = None,
upgrade_2: str = None,
):
"""Generate a chat code"""
if not 1 <= quantity <= 255:
return await interaction.response.send_message(
"Invalid quantity. Quantity can be a number between 1 and 255",
ephemeral=True)
try:
item = int(item)
skin = int(skin) if skin else None
upgrade_1 = int(upgrade_1) if upgrade_1 else None
upgrade_2 = int(upgrade_2) if upgrade_2 else None
except ValueError:
return await interaction.response.send_message("Invalid value",
ephemeral=True)
upgrades = []
if upgrade_1:
upgrades.append(upgrade_1)
if upgrade_2:
upgrades.append(upgrade_2)
chat_code = self.generate_chat_code(item, quantity, skin, upgrades)
output = "Here's your chatcode. No refunds. ```\n{}```".format(
chat_code)
await interaction.response.send_message(output, ephemeral=True)
def generate_chat_code(self, item_id, count, skin_id, upgrades):
def little_endian(_id):
return [int(x) for x in struct.pack("<i", _id)]
def upgrade_flag():
skin = 0
first = 0
second = 0
if skin_id:
skin = 128
if len(upgrades) == 1:
first = 64
if len(upgrades) == 2:
second = 32
return skin | first | second
link = [2, count]
link.extend(little_endian(item_id))
link = link[:5]
link.append(upgrade_flag())
for x in filter(None, (skin_id, *upgrades)):
link.extend(little_endian(x))
link.append(0)
output = codecs.encode(bytes(link), 'base64').decode('utf-8')
return "[&{}]".format(output.strip())
|
{
"content_hash": "402d2db9a94a43cf078f8a7e07b333a6",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 131,
"avg_line_length": 43.34158415841584,
"alnum_prop": 0.5150199885779555,
"repo_name": "Maselkov/GW2Bot",
"id": "760e357019d215cd18b4385e483e9d929fc57e37",
"size": "8756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guildwars2/misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "392867"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import json
import os
import sys
import urlparse
from hooks import install
from paste import fileapp
from paste import httpserver
import webapp2
from webapp2 import Route, RedirectHandler
from dashboard_build import dashboard_dev_server_config
from tracing_build import tracing_dev_server_config
from netlog_viewer_build import netlog_viewer_dev_server_config
_MAIN_HTML = """<html><body>
<h1>Run Unit Tests</h1>
<ul>
%s
</ul>
<h1>Quick links</h1>
<ul>
%s
</ul>
</body></html>
"""
_QUICK_LINKS = [
('Trace File Viewer',
'/tracing_examples/trace_viewer.html'),
('Metrics debugger',
'/tracing_examples/metrics_debugger.html'),
]
_LINK_ITEM = '<li><a href="%s">%s</a></li>'
def _GetFilesIn(basedir):
data_files = []
for dirpath, dirnames, filenames in os.walk(basedir, followlinks=True):
new_dirnames = [d for d in dirnames if not d.startswith('.')]
del dirnames[:]
dirnames += new_dirnames
for f in filenames:
if f.startswith('.'):
continue
if f == 'README.md':
continue
full_f = os.path.join(dirpath, f)
rel_f = os.path.relpath(full_f, basedir)
data_files.append(rel_f)
data_files.sort()
return data_files
def _RelPathToUnixPath(p):
return p.replace(os.sep, '/')
class TestResultHandler(webapp2.RequestHandler):
def post(self, *args, **kwargs): # pylint: disable=unused-argument
msg = self.request.body
ostream = sys.stdout if 'PASSED' in msg else sys.stderr
ostream.write(msg + '\n')
return self.response.write('')
class TestsCompletedHandler(webapp2.RequestHandler):
def post(self, *args, **kwargs): # pylint: disable=unused-argument
msg = self.request.body
sys.stdout.write(msg + '\n')
exit_code = 0 if 'ALL_PASSED' in msg else 1
if hasattr(self.app.server, 'please_exit'):
self.app.server.please_exit(exit_code)
return self.response.write('')
class TestsErrorHandler(webapp2.RequestHandler):
def post(self, *args, **kwargs):
del args, kwargs
msg = self.request.body
sys.stderr.write(msg + '\n')
exit_code = 1
if hasattr(self.app.server, 'please_exit'):
self.app.server.please_exit(exit_code)
return self.response.write('')
class DirectoryListingHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
source_path = kwargs.pop('_source_path', None)
mapped_path = kwargs.pop('_mapped_path', None)
assert mapped_path.endswith('/')
data_files_relative_to_top = _GetFilesIn(source_path)
data_files = [mapped_path + x
for x in data_files_relative_to_top]
files_as_json = json.dumps(data_files)
self.response.content_type = 'application/json'
return self.response.write(files_as_json)
class FileAppWithGZipHandling(fileapp.FileApp):
def guess_type(self):
content_type, content_encoding = \
super(FileAppWithGZipHandling, self).guess_type()
if not self.filename.endswith('.gz'):
return content_type, content_encoding
# By default, FileApp serves gzip files as their underlying type with
# Content-Encoding of gzip. That causes them to show up on the client
# decompressed. That ends up being surprising to our xhr.html system.
return None, None
class SourcePathsHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
source_paths = kwargs.pop('_source_paths', [])
path = self.request.path
# This is how we do it. Its... strange, but its what we've done since
# the dawn of time. Aka 4 years ago, lol.
for mapped_path in source_paths:
rel = os.path.relpath(path, '/')
candidate = os.path.join(mapped_path, rel)
if os.path.exists(candidate):
app = FileAppWithGZipHandling(candidate)
app.cache_control(no_cache=True)
return app
self.abort(404)
@staticmethod
def GetServingPathForAbsFilename(source_paths, filename):
if not os.path.isabs(filename):
raise Exception('filename must be an absolute path')
for mapped_path in source_paths:
if not filename.startswith(mapped_path):
continue
rel = os.path.relpath(filename, mapped_path)
unix_rel = _RelPathToUnixPath(rel)
return unix_rel
return None
class SimpleDirectoryHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
top_path = os.path.abspath(kwargs.pop('_top_path', None))
if not top_path.endswith(os.path.sep):
top_path += os.path.sep
joined_path = os.path.abspath(
os.path.join(top_path, kwargs.pop('rest_of_path')))
if not joined_path.startswith(top_path):
self.response.set_status(403)
return
app = FileAppWithGZipHandling(joined_path)
app.cache_control(no_cache=True)
return app
class TestOverviewHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
test_links = []
for name, path in kwargs.pop('pds').iteritems():
test_links.append(_LINK_ITEM % (path, name))
quick_links = []
for name, path in _QUICK_LINKS:
quick_links.append(_LINK_ITEM % (path, name))
self.response.out.write(_MAIN_HTML % ('\n'.join(test_links),
'\n'.join(quick_links)))
class DevServerApp(webapp2.WSGIApplication):
def __init__(self, pds, args):
super(DevServerApp, self).__init__(debug=True)
self.pds = pds
self._server = None
self._all_source_paths = []
self._all_mapped_test_data_paths = []
self._InitFromArgs(args)
@property
def server(self):
return self._server
@server.setter
def server(self, server):
self._server = server
def _InitFromArgs(self, args):
default_tests = dict((pd.GetName(), pd.GetRunUnitTestsUrl())
for pd in self.pds)
routes = [
Route('/tests.html', TestOverviewHandler,
defaults={'pds': default_tests}),
Route('', RedirectHandler, defaults={'_uri': '/tests.html'}),
Route('/', RedirectHandler, defaults={'_uri': '/tests.html'}),
]
for pd in self.pds:
routes += pd.GetRoutes(args)
routes += [
Route('/%s/notify_test_result' % pd.GetName(),
TestResultHandler),
Route('/%s/notify_tests_completed' % pd.GetName(),
TestsCompletedHandler),
Route('/%s/notify_test_error' % pd.GetName(),
TestsErrorHandler)
]
for pd in self.pds:
# Test data system.
for mapped_path, source_path in pd.GetTestDataPaths(args):
self._all_mapped_test_data_paths.append((mapped_path, source_path))
routes.append(Route('%s__file_list__' % mapped_path,
DirectoryListingHandler,
defaults={
'_source_path': source_path,
'_mapped_path': mapped_path
}))
routes.append(Route('%s<rest_of_path:.+>' % mapped_path,
SimpleDirectoryHandler,
defaults={'_top_path': source_path}))
# This must go last, because its catch-all.
#
# Its funky that we have to add in the root path. The long term fix is to
# stop with the crazy multi-source-pathing thing.
for pd in self.pds:
self._all_source_paths += pd.GetSourcePaths(args)
routes.append(
Route('/<:.+>', SourcePathsHandler,
defaults={'_source_paths': self._all_source_paths}))
for route in routes:
self.router.add(route)
def GetAbsFilenameForHref(self, href):
for source_path in self._all_source_paths:
full_source_path = os.path.abspath(source_path)
expanded_href_path = os.path.abspath(os.path.join(full_source_path,
href.lstrip('/')))
if (os.path.exists(expanded_href_path) and
os.path.commonprefix([full_source_path,
expanded_href_path]) == full_source_path):
return expanded_href_path
return None
def GetURLForAbsFilename(self, filename):
assert self.server is not None
for mapped_path, source_path in self._all_mapped_test_data_paths:
if not filename.startswith(source_path):
continue
rel = os.path.relpath(filename, source_path)
unix_rel = _RelPathToUnixPath(rel)
url = urlparse.urljoin(mapped_path, unix_rel)
return url
path = SourcePathsHandler.GetServingPathForAbsFilename(
self._all_source_paths, filename)
if path is None:
return None
return urlparse.urljoin('/', path)
def _AddPleaseExitMixinToServer(server):
# Shutting down httpserver gracefully and yielding a return code requires
# a bit of mixin code.
exit_code_attempt = []
def PleaseExit(exit_code):
if len(exit_code_attempt) > 0:
return
exit_code_attempt.append(exit_code)
server.running = False
real_serve_forever = server.serve_forever
def ServeForever():
try:
real_serve_forever()
except KeyboardInterrupt:
# allow CTRL+C to shutdown
return 255
print("Exiting dev server")
if len(exit_code_attempt) == 1:
return exit_code_attempt[0]
# The serve_forever returned for some reason separate from
# exit_please.
return 0
server.please_exit = PleaseExit
server.serve_forever = ServeForever
def _AddCommandLineArguments(pds, argv):
parser = argparse.ArgumentParser(description='Run development server')
parser.add_argument(
'--no-install-hooks', dest='install_hooks', action='store_false')
parser.add_argument('-p', '--port', default=8003, type=int)
for pd in pds:
g = parser.add_argument_group(pd.GetName())
pd.AddOptionstToArgParseGroup(g)
args = parser.parse_args(args=argv[1:])
return args
def Main(argv):
pds = [
dashboard_dev_server_config.DashboardDevServerConfig(),
tracing_dev_server_config.TracingDevServerConfig(),
netlog_viewer_dev_server_config.NetlogViewerDevServerConfig(),
]
args = _AddCommandLineArguments(pds, argv)
if args.install_hooks:
install.InstallHooks()
app = DevServerApp(pds, args=args)
server = httpserver.serve(app, host='127.0.0.1', port=args.port,
start_loop=False, daemon_threads=True)
_AddPleaseExitMixinToServer(server)
# pylint: disable=no-member
server.urlbase = 'http://127.0.0.1:%i' % server.server_port
app.server = server
sys.stderr.write('Now running on %s\n' % server.urlbase)
return server.serve_forever()
|
{
"content_hash": "e3960aeff33734ed49c0d2415b908072",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 77,
"avg_line_length": 32.02395209580838,
"alnum_prop": 0.6454749439042633,
"repo_name": "endlessm/chromium-browser",
"id": "d109f2c8f8cd4d3bd61bfe306bdea7faebc817f3",
"size": "10863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/catapult_build/dev_server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import glob
import os
import platform
import subprocess
import sys
from setuptools import setup, Command, Extension
from setuptools.command.test import test as TestCommand
def define_extensions(file_ext):
return [Extension("lightfm.lightfm_fast",
['lightfm/lightfm_fast%s' % file_ext],
extra_link_args=["-fopenmp"],
extra_compile_args=['-fopenmp',
'-march=native',
'-ffast-math'])]
def set_gcc():
"""
Try to find and use GCC on OSX for OpenMP support.
"""
if 'darwin' in platform.platform().lower():
gcc_binaries = sorted(glob.glob('/usr/local/bin/gcc-*'))
if gcc_binaries:
_, gcc = os.path.split(gcc_binaries[-1])
os.environ["CC"] = gcc
else:
raise Exception('No GCC available. Install gcc from Homebrew '
'using brew install gcc.')
class Cythonize(Command):
"""
Compile the extension .pyx files.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import Cython
from Cython.Build import cythonize
cythonize(define_extensions('.pyx'))
class Clean(Command):
"""
Clean build files.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pth = os.path.dirname(os.path.abspath(__file__))
subprocess.call(['rm', '-rf', os.path.join(pth, 'build')])
subprocess.call(['rm', '-rf', os.path.join(pth, 'lightfm.egg-info')])
subprocess.call(['find', pth, '-name', 'lightfm*.pyc', '-type', 'f', '-delete'])
subprocess.call(['rm', os.path.join(pth, 'lightfm', 'lightfm_fast.so')])
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
set_gcc()
setup(
name='lightfm',
version='1.4',
description='LightFM recommendation model',
url='https://github.com/lyst/lightfm',
download_url='https://github.com/lyst/lightfm/tarball/1.4',
packages=['lightfm'],
install_requires=['numpy'],
tests_require=['pytest', 'requests', 'scikit-learn', 'scipy'],
cmdclass={'test': PyTest, 'cythonize': Cythonize, 'clean': Clean},
author='Lyst Ltd (Maciej Kula)',
author_email='data@ly.st',
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Artificial Intelligence'],
ext_modules=define_extensions('.c')
)
|
{
"content_hash": "59e9b66c5ce29fabc2c081923b273cc5",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 88,
"avg_line_length": 25.942622950819672,
"alnum_prop": 0.5769352290679305,
"repo_name": "shaunstanislaus/lightfm",
"id": "fc49366a79eb0ebbb5d228f8da4be132685d458c",
"size": "3165",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "84999"
}
],
"symlink_target": ""
}
|
import threading
import time
class Box(object):
lock = threading.RLock()
def __init__(self):
self.total_items = 0
def execute(self,n):
Box.lock.acquire()
self.total_items += n
Box.lock.release()
def add(self):
Box.lock.acquire()
self.execute(1)
Box.lock.release()
def remove(self):
Box.lock.acquire()
self.execute(-1)
Box.lock.release()
## These two functions run n in separate
## threads and call the Box's methods
def adder(box,items):
while items > 0:
print ("adding 1 item in the box\n")
box.add()
time.sleep(5)
items -= 1
def remover(box,items):
while items > 0:
print ("removing 1 item in the box")
box.remove()
time.sleep(5)
items -= 1
## the main program build some
## threads and make sure it works
if __name__ == "__main__":
items = 5
print ("putting %s items in the box " % items)
box = Box()
t1 = threading.Thread(target=adder,args=(box,items))
t2 = threading.Thread(target=remover,args=(box,items))
t1.start()
t2.start()
t1.join()
t2.join()
print ("%s items still remain in the box " % box.total_items)
|
{
"content_hash": "a2224477afae9928f6c04751e0a521fa",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 65,
"avg_line_length": 24.19607843137255,
"alnum_prop": 0.5745542949756888,
"repo_name": "IdiosyncraticDragon/Reading-Notes",
"id": "7130a7a44fb2155484fc188a8ede8da8cef3e33a",
"size": "1234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python Parallel Programming Cookbook_Code/Chapter 2/rlock_management.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "69018"
}
],
"symlink_target": ""
}
|
from horizon.test.settings import * # noqa: F403,H303
from openstack_dashboard.test.settings import * # noqa: F403,H303
# Update the dashboards with magnum_ui
import magnum_ui.enabled
import openstack_dashboard.enabled
from openstack_dashboard.utils import settings
# pop these keys to avoid log warnings about deprecation
# update_dashboards will populate them anyway
HORIZON_CONFIG.pop('dashboards', None)
HORIZON_CONFIG.pop('default_dashboard', None)
settings.update_dashboards(
[
magnum_ui.enabled,
openstack_dashboard.enabled,
],
HORIZON_CONFIG,
INSTALLED_APPS
)
# Ensure any duplicate apps are removed after the update_dashboards call
INSTALLED_APPS = list(set(INSTALLED_APPS))
|
{
"content_hash": "9ebae4930429f2623d0557eda00b7d4c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 72,
"avg_line_length": 30.083333333333332,
"alnum_prop": 0.7617728531855956,
"repo_name": "openstack/magnum-ui",
"id": "37d1997ef0e4db66ed9cb8d89a8b41f29080230b",
"size": "1396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magnum_ui/test/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "18914"
},
{
"name": "JavaScript",
"bytes": "276068"
},
{
"name": "Python",
"bytes": "70448"
},
{
"name": "SCSS",
"bytes": "786"
},
{
"name": "Shell",
"bytes": "2202"
}
],
"symlink_target": ""
}
|
r"""
==================================
Constants (:mod:`scipy.constants`)
==================================
.. currentmodule:: scipy.constants
Physical and mathematical constants and units.
Mathematical constants
======================
================ =================================================================
``pi`` Pi
``golden`` Golden ratio
``golden_ratio`` Golden ratio
================ =================================================================
Physical constants
==================
=========================== =================================================================
``c`` speed of light in vacuum
``speed_of_light`` speed of light in vacuum
``mu_0`` the magnetic constant :math:`\mu_0`
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
``h`` the Planck constant :math:`h`
``Planck`` the Planck constant :math:`h`
``hbar`` :math:`\hbar = h/(2\pi)`
``G`` Newtonian constant of gravitation
``gravitational_constant`` Newtonian constant of gravitation
``g`` standard acceleration of gravity
``e`` elementary charge
``elementary_charge`` elementary charge
``R`` molar gas constant
``gas_constant`` molar gas constant
``alpha`` fine-structure constant
``fine_structure`` fine-structure constant
``N_A`` Avogadro constant
``Avogadro`` Avogadro constant
``k`` Boltzmann constant
``Boltzmann`` Boltzmann constant
``sigma`` Stefan-Boltzmann constant :math:`\sigma`
``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma`
``Wien`` Wien displacement law constant
``Rydberg`` Rydberg constant
``m_e`` electron mass
``electron_mass`` electron mass
``m_p`` proton mass
``proton_mass`` proton mass
``m_n`` neutron mass
``neutron_mass`` neutron mass
=========================== =================================================================
Constants database
------------------
In addition to the above variables, :mod:`scipy.constants` also contains the
2014 CODATA recommended values [CODATA2014]_ database containing more physical
constants.
.. autosummary::
:toctree: generated/
value -- Value in physical_constants indexed by key
unit -- Unit in physical_constants indexed by key
precision -- Relative precision in physical_constants indexed by key
find -- Return list of physical_constant keys with a given string
ConstantWarning -- Constant sought not in newest CODATA data set
.. data:: physical_constants
Dictionary of physical constants, of the format
``physical_constants[name] = (value, unit, uncertainty)``.
Available constants:
====================================================================== ====
%(constant_names)s
====================================================================== ====
Units
=====
SI prefixes
-----------
============ =================================================================
``yotta`` :math:`10^{24}`
``zetta`` :math:`10^{21}`
``exa`` :math:`10^{18}`
``peta`` :math:`10^{15}`
``tera`` :math:`10^{12}`
``giga`` :math:`10^{9}`
``mega`` :math:`10^{6}`
``kilo`` :math:`10^{3}`
``hecto`` :math:`10^{2}`
``deka`` :math:`10^{1}`
``deci`` :math:`10^{-1}`
``centi`` :math:`10^{-2}`
``milli`` :math:`10^{-3}`
``micro`` :math:`10^{-6}`
``nano`` :math:`10^{-9}`
``pico`` :math:`10^{-12}`
``femto`` :math:`10^{-15}`
``atto`` :math:`10^{-18}`
``zepto`` :math:`10^{-21}`
============ =================================================================
Binary prefixes
---------------
============ =================================================================
``kibi`` :math:`2^{10}`
``mebi`` :math:`2^{20}`
``gibi`` :math:`2^{30}`
``tebi`` :math:`2^{40}`
``pebi`` :math:`2^{50}`
``exbi`` :math:`2^{60}`
``zebi`` :math:`2^{70}`
``yobi`` :math:`2^{80}`
============ =================================================================
Mass
----
================= ============================================================
``gram`` :math:`10^{-3}` kg
``metric_ton`` :math:`10^{3}` kg
``grain`` one grain in kg
``lb`` one pound (avoirdupous) in kg
``pound`` one pound (avoirdupous) in kg
``blob`` one inch version of a slug in kg (added in 1.0.0)
``slinch`` one inch version of a slug in kg (added in 1.0.0)
``slug`` one slug in kg (added in 1.0.0)
``oz`` one ounce in kg
``ounce`` one ounce in kg
``stone`` one stone in kg
``grain`` one grain in kg
``long_ton`` one long ton in kg
``short_ton`` one short ton in kg
``troy_ounce`` one Troy ounce in kg
``troy_pound`` one Troy pound in kg
``carat`` one carat in kg
``m_u`` atomic mass constant (in kg)
``u`` atomic mass constant (in kg)
``atomic_mass`` atomic mass constant (in kg)
================= ============================================================
Angle
-----
================= ============================================================
``degree`` degree in radians
``arcmin`` arc minute in radians
``arcminute`` arc minute in radians
``arcsec`` arc second in radians
``arcsecond`` arc second in radians
================= ============================================================
Time
----
================= ============================================================
``minute`` one minute in seconds
``hour`` one hour in seconds
``day`` one day in seconds
``week`` one week in seconds
``year`` one year (365 days) in seconds
``Julian_year`` one Julian year (365.25 days) in seconds
================= ============================================================
Length
------
===================== ============================================================
``inch`` one inch in meters
``foot`` one foot in meters
``yard`` one yard in meters
``mile`` one mile in meters
``mil`` one mil in meters
``pt`` one point in meters
``point`` one point in meters
``survey_foot`` one survey foot in meters
``survey_mile`` one survey mile in meters
``nautical_mile`` one nautical mile in meters
``fermi`` one Fermi in meters
``angstrom`` one Angstrom in meters
``micron`` one micron in meters
``au`` one astronomical unit in meters
``astronomical_unit`` one astronomical unit in meters
``light_year`` one light year in meters
``parsec`` one parsec in meters
===================== ============================================================
Pressure
--------
================= ============================================================
``atm`` standard atmosphere in pascals
``atmosphere`` standard atmosphere in pascals
``bar`` one bar in pascals
``torr`` one torr (mmHg) in pascals
``mmHg`` one torr (mmHg) in pascals
``psi`` one psi in pascals
================= ============================================================
Area
----
================= ============================================================
``hectare`` one hectare in square meters
``acre`` one acre in square meters
================= ============================================================
Volume
------
=================== ========================================================
``liter`` one liter in cubic meters
``litre`` one liter in cubic meters
``gallon`` one gallon (US) in cubic meters
``gallon_US`` one gallon (US) in cubic meters
``gallon_imp`` one gallon (UK) in cubic meters
``fluid_ounce`` one fluid ounce (US) in cubic meters
``fluid_ounce_US`` one fluid ounce (US) in cubic meters
``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
``bbl`` one barrel in cubic meters
``barrel`` one barrel in cubic meters
=================== ========================================================
Speed
-----
================== ==========================================================
``kmh`` kilometers per hour in meters per second
``mph`` miles per hour in meters per second
``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second
``knot`` one knot in meters per second
================== ==========================================================
Temperature
-----------
===================== =======================================================
``zero_Celsius`` zero of Celsius scale in Kelvin
``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
===================== =======================================================
.. autosummary::
:toctree: generated/
convert_temperature
Energy
------
==================== =======================================================
``eV`` one electron volt in Joules
``electron_volt`` one electron volt in Joules
``calorie`` one calorie (thermochemical) in Joules
``calorie_th`` one calorie (thermochemical) in Joules
``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
``erg`` one erg in Joules
``Btu`` one British thermal unit (International Steam Table) in Joules
``Btu_IT`` one British thermal unit (International Steam Table) in Joules
``Btu_th`` one British thermal unit (thermochemical) in Joules
``ton_TNT`` one ton of TNT in Joules
==================== =======================================================
Power
-----
==================== =======================================================
``hp`` one horsepower in watts
``horsepower`` one horsepower in watts
==================== =======================================================
Force
-----
==================== =======================================================
``dyn`` one dyne in newtons
``dyne`` one dyne in newtons
``lbf`` one pound force in newtons
``pound_force`` one pound force in newtons
``kgf`` one kilogram force in newtons
``kilogram_force`` one kilogram force in newtons
==================== =======================================================
Optics
------
.. autosummary::
:toctree: generated/
lambda2nu
nu2lambda
References
==========
.. [CODATA2014] CODATA Recommended Values of the Fundamental
Physical Constants 2014.
https://physics.nist.gov/cuu/Constants/
"""
from __future__ import division, print_function, absolute_import
# Modules contributed by BasSw (wegwerp@gmail.com)
from .codata import *
from .constants import *
from .codata import _obsolete_constants
_constant_names = [(_k.lower(), _k, _v)
for _k, _v in physical_constants.items()
if _k not in _obsolete_constants]
_constant_names = "\n".join(["``%s``%s %s %s" % (_x[1], " "*(66-len(_x[1])),
_x[2][0], _x[2][1])
for _x in sorted(_constant_names)])
if __doc__ is not None:
__doc__ = __doc__ % dict(constant_names=_constant_names)
del _constant_names
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
{
"content_hash": "2c09221126f80717035b698356db78c0",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 94,
"avg_line_length": 35.86470588235294,
"alnum_prop": 0.4261112022306052,
"repo_name": "lhilt/scipy",
"id": "b1010cb1698c729df2738026e0283b457b33f869",
"size": "12194",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "scipy/constants/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4396416"
},
{
"name": "C++",
"bytes": "643592"
},
{
"name": "Fortran",
"bytes": "5368331"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12378541"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
import platform as sysinfo
import base64 as encoder
import json
nl = '\n'
EI = sysinfo.platform() + nl + sysinfo.machine() + nl + sysinfo.processor() + nl + sysinfo.system() + ' ' + sysinfo.version()
file = open(".sysinfo", "w+")
file.write(EI)
file.close()
print (encoder.b64encode(bytes(EI, "utf-8"), altchars=None))
|
{
"content_hash": "5f00894ca9d95e1a950d7652063d27e4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 125,
"avg_line_length": 24.846153846153847,
"alnum_prop": 0.6718266253869969,
"repo_name": "L1b3r4t0r/CodeWisard-string-tools",
"id": "0574eb7a09db339443af8d36d2f4ff3257fc3649",
"size": "435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "diagnose.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "13742"
},
{
"name": "Python",
"bytes": "435"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# Import Salt Libs
from salt.states import win_dism as dism
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch
)
ensure_in_syspath('../../')
dism.__salt__ = {}
dism.__opts__ = {}
class WinDismTestCase(TestCase):
def test_capability_installed(self):
'''
Test capability installed state
'''
expected = {
'comment': "Installed Capa2",
'changes': {'capability': {'new': 'Capa2'},
'retcode': 0},
'name': 'Capa2',
'result': True}
mock_installed = MagicMock(
side_effect=[['Capa1'], ['Capa1', 'Capa2']])
mock_add = MagicMock(
return_value={'retcode': 0})
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_installed,
'dism.add_capability': mock_add}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.capability_installed('Capa2', 'somewhere', True)
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Capa2', 'somewhere', True, None, False)
self.assertEqual(out, expected)
def test_capability_installed_failure(self):
'''
Test installing a capability which fails with DISM
'''
expected = {
'comment': "Failed to install Capa2: Failed",
'changes': {},
'name': 'Capa2',
'result': False}
mock_installed = MagicMock(
side_effect=[['Capa1'], ['Capa1']])
mock_add = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_installed,
'dism.add_capability': mock_add}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.capability_installed('Capa2', 'somewhere', True)
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Capa2', 'somewhere', True, None, False)
self.assertEqual(out, expected)
def test_capability_installed_installed(self):
'''
Test installing a capability already installed
'''
expected = {
'comment': "The capability Capa2 is already installed",
'changes': {},
'name': 'Capa2',
'result': True}
mock_installed = MagicMock(
return_value=["Capa1", "Capa2"])
mock_add = MagicMock()
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_installed,
'dism.add_capability': mock_add}):
out = dism.capability_installed('Capa2', 'somewhere', True)
mock_installed.assert_called_once_with()
assert not mock_add.called
self.assertEqual(out, expected)
def test_capability_removed(self):
'''
Test capability removed state
'''
expected = {
'comment': "Removed Capa2",
'changes': {'capability': {'old': 'Capa2'},
'retcode': 0},
'name': 'Capa2',
'result': True}
mock_removed = MagicMock(
side_effect=[['Capa1', 'Capa2'], ['Capa1']])
mock_remove = MagicMock(
return_value={'retcode': 0})
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_removed,
'dism.remove_capability': mock_remove}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.capability_removed('Capa2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with('Capa2', None, False)
self.assertEqual(out, expected)
def test_capability_removed_failure(self):
'''
Test removing a capability which fails with DISM
'''
expected = {
'comment': "Failed to remove Capa2: Failed",
'changes': {},
'name': 'Capa2',
'result': False}
mock_removed = MagicMock(
side_effect=[['Capa1', 'Capa2'], ['Capa1', 'Capa2']])
mock_remove = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_removed,
'dism.remove_capability': mock_remove}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.capability_removed('Capa2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Capa2', None, False)
self.assertEqual(out, expected)
def test_capability_removed_removed(self):
'''
Test removing a capability already removed
'''
expected = {
'comment': "The capability Capa2 is already removed",
'changes': {},
'name': 'Capa2',
'result': True}
mock_removed = MagicMock(
return_value=["Capa1"])
mock_remove = MagicMock()
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_removed,
'dism.add_capability': mock_remove}):
out = dism.capability_removed('Capa2', 'somewhere', True)
mock_removed.assert_called_once_with()
assert not mock_remove.called
self.assertEqual(out, expected)
def test_feature_installed(self):
'''
Test installing a feature with DISM
'''
expected = {
'comment': "Installed Feat2",
'changes': {'feature': {'new': 'Feat2'},
'retcode': 0},
'name': 'Feat2',
'result': True}
mock_installed = MagicMock(
side_effect=[['Feat1'], ['Feat1', 'Feat2']])
mock_add = MagicMock(
return_value={'retcode': 0})
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_installed,
'dism.add_feature': mock_add}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.feature_installed('Feat2')
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Feat2', None, None, False, False, None, False)
self.assertEqual(out, expected)
def test_feature_installed_failure(self):
'''
Test installing a feature which fails with DISM
'''
expected = {
'comment': "Failed to install Feat2: Failed",
'changes': {},
'name': 'Feat2',
'result': False}
mock_installed = MagicMock(
side_effect=[['Feat1'], ['Feat1']])
mock_add = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_installed,
'dism.add_feature': mock_add}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.feature_installed('Feat2')
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Feat2', None, None, False, False, None, False)
self.assertEqual(out, expected)
def test_feature_installed_installed(self):
'''
Test installing a feature already installed
'''
expected = {
'comment': "The feature Feat1 is already installed",
'changes': {},
'name': 'Feat1',
'result': True}
mock_installed = MagicMock(
side_effect=[['Feat1', 'Feat2'], ['Feat1', 'Feat2']])
mock_add = MagicMock()
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_installed,
'dism.add_feature': mock_add}):
out = dism.feature_installed('Feat1')
mock_installed.assert_called_once_with()
assert not mock_add.called
self.assertEqual(out, expected)
def test_feature_removed(self):
'''
Test removing a feature with DISM
'''
expected = {
'comment': "Removed Feat2",
'changes': {'feature': {'old': 'Feat2'},
'retcode': 0},
'name': 'Feat2',
'result': True}
mock_removed = MagicMock(
side_effect=[['Feat1', 'Feat2'], ['Feat1']])
mock_remove = MagicMock(
return_value={'retcode': 0})
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_removed,
'dism.remove_feature': mock_remove}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.feature_removed('Feat2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Feat2', False, None, False)
self.assertEqual(out, expected)
def test_feature_removed_failure(self):
'''
Test removing a feature which fails with DISM
'''
expected = {
'comment': "Failed to remove Feat2: Failed",
'changes': {},
'name': 'Feat2',
'result': False}
mock_removed = MagicMock(
side_effect=[['Feat1', 'Feat2'], ['Feat1', 'Feat2']])
mock_remove = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_removed,
'dism.remove_feature': mock_remove}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.feature_removed('Feat2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Feat2', False, None, False)
self.assertEqual(out, expected)
def test_feature_removed_removed(self):
'''
Test removing a feature already removed
'''
expected = {
'comment': "The feature Feat2 is already removed",
'changes': {},
'name': 'Feat2',
'result': True}
mock_removed = MagicMock(
side_effect=[['Feat1'], ['Feat1']])
mock_remove = MagicMock()
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_removed,
'dism.remove_feature': mock_remove}):
out = dism.feature_removed('Feat2')
mock_removed.assert_called_once_with()
assert not mock_remove.called
self.assertEqual(out, expected)
def test_package_installed(self):
'''
Test installing a package with DISM
'''
expected = {
'comment': "Installed Pack2",
'changes': {'package': {'new': 'Pack2'},
'retcode': 0},
'name': 'Pack2',
'result': True}
mock_installed = MagicMock(
side_effect=[['Pack1'], ['Pack1', 'Pack2']])
mock_add = MagicMock(
return_value={'retcode': 0})
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_installed,
'dism.add_package': mock_add,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.package_installed('Pack2')
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Pack2', False, False, None, False)
self.assertEqual(out, expected)
def test_package_installed_failure(self):
'''
Test installing a package which fails with DISM
'''
expected = {
'comment': "Failed to install Pack2: Failed",
'changes': {},
'name': 'Pack2',
'result': False}
mock_installed = MagicMock(
side_effect=[['Pack1'], ['Pack1']])
mock_add = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_installed,
'dism.add_package': mock_add,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.package_installed('Pack2')
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Pack2', False, False, None, False)
self.assertEqual(out, expected)
def test_package_installed_installed(self):
'''
Test installing a package already installed
'''
expected = {
'comment': "The package Pack2 is already installed: Pack2",
'changes': {},
'name': 'Pack2',
'result': True}
mock_installed = MagicMock(
side_effect=[['Pack1', 'Pack2'], ['Pack1', 'Pack2']])
mock_add = MagicMock()
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_installed,
'dism.add_package': mock_add,
'dism.package_info': mock_info}):
out = dism.package_installed('Pack2')
mock_installed.assert_called_once_with()
assert not mock_add.called
self.assertEqual(out, expected)
def test_package_removed(self):
'''
Test removing a package with DISM
'''
expected = {
'comment': "Removed Pack2",
'changes': {'package': {'old': 'Pack2'},
'retcode': 0},
'name': 'Pack2',
'result': True}
mock_removed = MagicMock(
side_effect=[['Pack1', 'Pack2'], ['Pack1']])
mock_remove = MagicMock(
return_value={'retcode': 0})
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_removed,
'dism.remove_package': mock_remove,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.package_removed('Pack2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Pack2', None, False)
self.assertEqual(out, expected)
def test_package_removed_failure(self):
'''
Test removing a package which fails with DISM
'''
expected = {
'comment': "Failed to remove Pack2: Failed",
'changes': {},
'name': 'Pack2',
'result': False}
mock_removed = MagicMock(
side_effect=[['Pack1', 'Pack2'], ['Pack1', 'Pack2']])
mock_remove = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_removed,
'dism.remove_package': mock_remove,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.package_removed('Pack2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Pack2', None, False)
self.assertEqual(out, expected)
def test_package_removed_removed(self):
'''
Test removing a package already removed
'''
expected = {
'comment': "The package Pack2 is already removed",
'changes': {},
'name': 'Pack2',
'result': True}
mock_removed = MagicMock(
side_effect=[['Pack1'], ['Pack1']])
mock_remove = MagicMock()
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_removed,
'dism.remove_package': mock_remove,
'dism.package_info': mock_info}):
out = dism.package_removed('Pack2')
mock_removed.assert_called_once_with()
assert not mock_remove.called
self.assertEqual(out, expected)
if __name__ == '__main__':
from integration import run_tests
run_tests(WinDismTestCase, needs_daemon=False)
|
{
"content_hash": "6b504507efe19c9d4e23859af9b8fe97",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 75,
"avg_line_length": 34.14147286821706,
"alnum_prop": 0.5015609922234205,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "9d2811ff10fcb367f0d5f63c6c11fb3ff5109b1e",
"size": "17663",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.3/tests/unit/states/win_dism_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
"""A notebook manager that uses the local file system for storage.
Authors:
* Brian Granger
* Zach Sailer
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import io
import os
import glob
import shutil
from tornado import web
from .nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.io import atomic_writing
from IPython.utils.traitlets import Unicode, Bool, TraitError
from IPython.utils.py3compat import getcwd
from IPython.utils import tz
from IPython.html.utils import is_hidden, to_os_path
def sort_key(item):
"""Case-insensitive sorting."""
return item['name'].lower()
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class FileNotebookManager(NotebookManager):
save_script = Bool(False, config=True,
help="""Automatically create a Python script when saving the notebook.
For easier use of import, %run and %load across notebooks, a
<notebook-name>.py script will be created next to any
<notebook-name>.ipynb on each save. This can also be set with the
short `--script` flag.
"""
)
notebook_dir = Unicode(getcwd(), config=True)
def _notebook_dir_changed(self, name, old, new):
"""Do a bit of validation of the notebook dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.notebook_dir = os.path.abspath(new)
return
if not os.path.exists(new) or not os.path.isdir(new):
raise TraitError("notebook dir %r is not a directory" % new)
checkpoint_dir = Unicode('.ipynb_checkpoints', config=True,
help="""The directory name in which to keep notebook checkpoints
This is a path relative to the notebook's own directory.
By default, it is .ipynb_checkpoints
"""
)
def _copy(self, src, dest):
"""copy src to dest
like shutil.copy2, but log errors in copystat
"""
shutil.copyfile(src, dest)
try:
shutil.copystat(src, dest)
except OSError as e:
self.log.debug("copystat on %s failed", dest, exc_info=True)
def get_notebook_names(self, path=''):
"""List all notebook names in the notebook dir and path."""
path = path.strip('/')
if not os.path.isdir(self._get_os_path(path=path)):
raise web.HTTPError(404, 'Directory not found: ' + path)
names = glob.glob(self._get_os_path('*'+self.filename_ext, path))
names = [os.path.basename(name)
for name in names]
return names
def path_exists(self, path):
"""Does the API-style path (directory) actually exist?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.notebook_dir)
def _get_os_path(self, name=None, path=''):
"""Given a notebook name and a URL path, return its file system
path.
Parameters
----------
name : string
The name of a notebook file with the .ipynb extension
path : string
The relative URL path (with '/' as separator) to the named
notebook.
Returns
-------
path : string
A file system path that combines notebook_dir (location where
server started), the relative path, and the filename with the
current operating system's url.
"""
if name is not None:
path = path + '/' + name
return to_os_path(path, self.notebook_dir)
def notebook_exists(self, name, path=''):
"""Returns a True if the notebook exists. Else, returns False.
Parameters
----------
name : string
The name of the notebook you are checking.
path : string
The relative path to the notebook (with '/' as separator)
Returns
-------
bool
"""
path = path.strip('/')
nbpath = self._get_os_path(name, path=path)
return os.path.isfile(nbpath)
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def list_dirs(self, path):
"""List the directories for a given API style path."""
path = path.strip('/')
os_path = self._get_os_path('', path)
if not os.path.isdir(os_path):
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
elif is_hidden(os_path, self.notebook_dir):
self.log.info("Refusing to serve hidden directory, via 404 Error")
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
dir_names = os.listdir(os_path)
dirs = []
for name in dir_names:
os_path = self._get_os_path(name, path)
if os.path.isdir(os_path) and not is_hidden(os_path, self.notebook_dir)\
and self.should_list(name):
try:
model = self.get_dir_model(name, path)
except IOError:
pass
dirs.append(model)
dirs = sorted(dirs, key=sort_key)
return dirs
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def get_dir_model(self, name, path=''):
"""Get the directory model given a directory name and its API style path"""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isdir(os_path):
raise IOError('directory does not exist: %r' % os_path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'directory'
return model
def list_notebooks(self, path):
"""Returns a list of dictionaries that are the standard model
for all notebooks in the relative 'path'.
Parameters
----------
path : str
the URL path that describes the relative path for the
listed notebooks
Returns
-------
notebooks : list of dicts
a list of the notebook models without 'content'
"""
path = path.strip('/')
notebook_names = self.get_notebook_names(path)
notebooks = [self.get_notebook(name, path, content=False)
for name in notebook_names if self.should_list(name)]
notebooks = sorted(notebooks, key=sort_key)
return notebooks
def get_notebook(self, name, path='', content=True):
""" Takes a path and name for a notebook and returns its model
Parameters
----------
name : str
the name of the notebook
path : str
the URL path that describes the relative path for
the notebook
Returns
-------
model : dict
the notebook model. If contents=True, returns the 'contents'
dict in the model as well.
"""
path = path.strip('/')
if not self.notebook_exists(name=name, path=path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % name)
os_path = self._get_os_path(name, path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'notebook'
if content:
with io.open(os_path, 'r', encoding='utf-8') as f:
try:
nb = current.read(f, u'json')
except Exception as e:
raise web.HTTPError(400, u"Unreadable Notebook: %s %s" % (os_path, e))
self.mark_trusted_cells(nb, name, path)
model['content'] = nb
return model
def save_notebook(self, model, name='', path=''):
"""Save the notebook model and return the model with no content."""
path = path.strip('/')
if 'content' not in model:
raise web.HTTPError(400, u'No notebook JSON data provided')
# One checkpoint should always exist
if self.notebook_exists(name, path) and not self.list_checkpoints(name, path):
self.create_checkpoint(name, path)
new_path = model.get('path', path).strip('/')
new_name = model.get('name', name)
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
# Save the notebook file
os_path = self._get_os_path(new_name, new_path)
nb = current.to_notebook_json(model['content'])
self.check_and_sign(nb, new_name, new_path)
if 'name' in nb['metadata']:
nb['metadata']['name'] = u''
try:
self.log.debug("Autosaving notebook %s", os_path)
with atomic_writing(os_path, encoding='utf-8') as f:
current.write(nb, f, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while autosaving notebook: %s %s' % (os_path, e))
# Save .py script as well
if self.save_script:
py_path = os.path.splitext(os_path)[0] + '.py'
self.log.debug("Writing script %s", py_path)
try:
with atomic_writing(py_path, encoding='utf-8') as f:
current.write(nb, f, u'py')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook as script: %s %s' % (py_path, e))
model = self.get_notebook(new_name, new_path, content=False)
return model
def update_notebook(self, model, name, path=''):
"""Update the notebook's path and/or name"""
path = path.strip('/')
new_name = model.get('name', name)
new_path = model.get('path', path).strip('/')
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
model = self.get_notebook(new_name, new_path, content=False)
return model
def delete_notebook(self, name, path=''):
"""Delete notebook by name and path."""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isfile(os_path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % os_path)
# clear checkpoints
for checkpoint in self.list_checkpoints(name, path):
checkpoint_id = checkpoint['id']
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if os.path.isfile(cp_path):
self.log.debug("Unlinking checkpoint %s", cp_path)
os.unlink(cp_path)
self.log.debug("Unlinking notebook %s", os_path)
os.unlink(os_path)
def rename_notebook(self, old_name, old_path, new_name, new_path):
"""Rename a notebook."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_name == old_name and new_path == old_path:
return
new_os_path = self._get_os_path(new_name, new_path)
old_os_path = self._get_os_path(old_name, old_path)
# Should we proceed with the move?
if os.path.isfile(new_os_path):
raise web.HTTPError(409, u'Notebook with name already exists: %s' % new_os_path)
if self.save_script:
old_py_path = os.path.splitext(old_os_path)[0] + '.py'
new_py_path = os.path.splitext(new_os_path)[0] + '.py'
if os.path.isfile(new_py_path):
raise web.HTTPError(409, u'Python script with name already exists: %s' % new_py_path)
# Move the notebook file
try:
shutil.move(old_os_path, new_os_path)
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming notebook: %s %s' % (old_os_path, e))
# Move the checkpoints
old_checkpoints = self.list_checkpoints(old_name, old_path)
for cp in old_checkpoints:
checkpoint_id = cp['id']
old_cp_path = self.get_checkpoint_path(checkpoint_id, old_name, old_path)
new_cp_path = self.get_checkpoint_path(checkpoint_id, new_name, new_path)
if os.path.isfile(old_cp_path):
self.log.debug("Renaming checkpoint %s -> %s", old_cp_path, new_cp_path)
shutil.move(old_cp_path, new_cp_path)
# Move the .py script
if self.save_script:
shutil.move(old_py_path, new_py_path)
# Checkpoint-related utilities
def get_checkpoint_path(self, checkpoint_id, name, path=''):
"""find the path to a checkpoint"""
path = path.strip('/')
basename, _ = os.path.splitext(name)
filename = u"{name}-{checkpoint_id}{ext}".format(
name=basename,
checkpoint_id=checkpoint_id,
ext=self.filename_ext,
)
os_path = self._get_os_path(path=path)
cp_dir = os.path.join(os_path, self.checkpoint_dir)
if not os.path.exists(cp_dir):
os.mkdir(cp_dir)
cp_path = os.path.join(cp_dir, filename)
return cp_path
def get_checkpoint_model(self, checkpoint_id, name, path=''):
"""construct the info dict for a given checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
stats = os.stat(cp_path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
id = checkpoint_id,
last_modified = last_modified,
)
return info
# public checkpoint API
def create_checkpoint(self, name, path=''):
"""Create a checkpoint from the current state of a notebook"""
path = path.strip('/')
nb_path = self._get_os_path(name, path)
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
self.log.debug("creating checkpoint for notebook %s", name)
self._copy(nb_path, cp_path)
# return the checkpoint info
return self.get_checkpoint_model(checkpoint_id, name, path)
def list_checkpoints(self, name, path=''):
"""list the checkpoints for a given notebook
This notebook manager currently only supports one checkpoint per notebook.
"""
path = path.strip('/')
checkpoint_id = "checkpoint"
os_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.exists(os_path):
return []
else:
return [self.get_checkpoint_model(checkpoint_id, name, path)]
def restore_checkpoint(self, checkpoint_id, name, path=''):
"""restore a notebook to a checkpointed state"""
path = path.strip('/')
self.log.info("restoring Notebook %s from checkpoint %s", name, checkpoint_id)
nb_path = self._get_os_path(name, path)
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
self.log.debug("checkpoint file does not exist: %s", cp_path)
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s-%s' % (name, checkpoint_id)
)
# ensure notebook is readable (never restore from an unreadable notebook)
with io.open(cp_path, 'r', encoding='utf-8') as f:
current.read(f, u'json')
self._copy(cp_path, nb_path)
self.log.debug("copying %s -> %s", cp_path, nb_path)
def delete_checkpoint(self, checkpoint_id, name, path=''):
"""delete a notebook's checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s%s-%s' % (path, name, checkpoint_id)
)
self.log.debug("unlinking %s", cp_path)
os.unlink(cp_path)
def info_string(self):
return "Serving notebooks from local directory: %s" % self.notebook_dir
|
{
"content_hash": "764c483be03ecc5f21af3a0ac5971942",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 115,
"avg_line_length": 38.002079002079,
"alnum_prop": 0.5568685376661743,
"repo_name": "omni5cience/django-inlineformfield",
"id": "c9d4a32a0c43dd5af1efd0b3ab84063b7f8f4370",
"size": "18279",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": ".tox/py27/lib/python2.7/site-packages/IPython/html/services/notebooks/filenbmanager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43622"
},
{
"name": "Groff",
"bytes": "3667"
},
{
"name": "HTML",
"bytes": "108126"
},
{
"name": "JavaScript",
"bytes": "853457"
},
{
"name": "Python",
"bytes": "10506732"
},
{
"name": "Shell",
"bytes": "3801"
},
{
"name": "Smarty",
"bytes": "21023"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import unittest
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.six import b
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtaildocs import models
@override_settings(_WAGTAILSEARCH_FORCE_AUTO_UPDATE=['elasticsearch'])
class TestIssue613(TestCase, WagtailTestUtils):
def get_elasticsearch_backend(self):
from django.conf import settings
from wagtail.wagtailsearch.backends import get_search_backend
backend_path = 'wagtail.wagtailsearch.backends.elasticsearch'
# Search WAGTAILSEARCH_BACKENDS for an entry that uses the given backend path
for backend_name, backend_conf in settings.WAGTAILSEARCH_BACKENDS.items():
if backend_conf['BACKEND'] == backend_path:
return get_search_backend(backend_name)
else:
# no conf entry found - skip tests for this backend
raise unittest.SkipTest("No WAGTAILSEARCH_BACKENDS entry for the backend %s" % backend_path)
def setUp(self):
self.search_backend = self.get_elasticsearch_backend()
self.login()
def add_document(self, **params):
# Build a fake file
fake_file = ContentFile(b("A boring example document"))
fake_file.name = 'test.txt'
# Submit
post_data = {
'title': "Test document",
'file': fake_file,
}
post_data.update(params)
response = self.client.post(reverse('wagtaildocs:add'), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('wagtaildocs:index'))
# Document should be created
doc = models.Document.objects.filter(title=post_data['title'])
self.assertTrue(doc.exists())
return doc.first()
def edit_document(self, **params):
# Build a fake file
fake_file = ContentFile(b("A boring example document"))
fake_file.name = 'test.txt'
# Create a document without tags to edit
document = models.Document.objects.create(title="Test document", file=fake_file)
# Build another fake file
another_fake_file = ContentFile(b("A boring example document"))
another_fake_file.name = 'test.txt'
# Submit
post_data = {
'title': "Test document changed!",
'file': another_fake_file,
}
post_data.update(params)
response = self.client.post(reverse('wagtaildocs:edit', args=(document.id,)), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('wagtaildocs:index'))
# Document should be changed
doc = models.Document.objects.filter(title=post_data['title'])
self.assertTrue(doc.exists())
return doc.first()
def test_issue_613_on_add(self):
# Reset the search index
self.search_backend.reset_index()
self.search_backend.add_type(models.Document)
# Add a document with some tags
document = self.add_document(tags="hello")
self.search_backend.refresh_index()
# Search for it by tag
results = self.search_backend.search("hello", models.Document)
# Check
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, document.id)
def test_issue_613_on_edit(self):
# Reset the search index
self.search_backend.reset_index()
self.search_backend.add_type(models.Document)
# Add a document with some tags
document = self.edit_document(tags="hello")
self.search_backend.refresh_index()
# Search for it by tag
results = self.search_backend.search("hello", models.Document)
# Check
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, document.id)
|
{
"content_hash": "9609d01115498e3a7b8624f9346dddd2",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 104,
"avg_line_length": 35.39473684210526,
"alnum_prop": 0.6510532837670384,
"repo_name": "iansprice/wagtail",
"id": "715b3452090d892b5c2bc80923fd6c2b72fcffa7",
"size": "4035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail/wagtaildocs/tests/test_search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "166081"
},
{
"name": "HTML",
"bytes": "325248"
},
{
"name": "JavaScript",
"bytes": "177341"
},
{
"name": "Makefile",
"bytes": "720"
},
{
"name": "Python",
"bytes": "3102671"
},
{
"name": "Shell",
"bytes": "7871"
}
],
"symlink_target": ""
}
|
from tastypie.contrib.contenttypes.fields import GenericForeignKeyField
from tastypie.resources import ModelResource
from content_gfk.models import Note, Quote, Definition, Rating
class DefinitionResource(ModelResource):
class Meta:
resource_name = 'definitions'
queryset = Definition.objects.all()
class NoteResource(ModelResource):
class Meta:
resource_name = 'notes'
queryset = Note.objects.all()
class QuoteResource(ModelResource):
class Meta:
resource_name = 'quotes'
queryset = Quote.objects.all()
class RatingResource(ModelResource):
content_object = GenericForeignKeyField({
Note: NoteResource,
Quote: QuoteResource
}, 'content_object')
class Meta:
resource_name = 'ratings'
queryset = Rating.objects.all()
|
{
"content_hash": "3dd18a035fc1832b9f6261e3af4b29b1",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 71,
"avg_line_length": 25.12121212121212,
"alnum_prop": 0.6960193003618818,
"repo_name": "11craft/django-tastypie",
"id": "47c3c49dbb93f7f10ead5aed5d10b20dec3769e9",
"size": "829",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/content_gfk/api/resources.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class DocumentRiskSignalInstitutionMetadata(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'item_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'item_id': 'item_id', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, item_id, *args, **kwargs): # noqa: E501
"""DocumentRiskSignalInstitutionMetadata - a model defined in OpenAPI
Args:
item_id (str): The `item_id` of the Item associated with this webhook, warning, or error
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.item_id = item_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
{
"content_hash": "c26b935e6fd251aabb60855718f01e2c",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 110,
"avg_line_length": 39.54913294797688,
"alnum_prop": 0.5594855305466238,
"repo_name": "plaid/plaid-python",
"id": "e2dc6586dfc86c0eebdaf5b41a28caaa685250bf",
"size": "6842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/model/document_risk_signal_institution_metadata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
}
|
"""This module is used on every CLI operation"""
import os
import sys
import inspect
import imp
from symautomata.alphabet import createalphabet
sys.path.insert(1, imp.find_module('lightbulb')[1]+'/core/modules')
moduleNames = [name[:-3] for name in os.listdir(imp.find_module('lightbulb')[1]+'/core/modules')
if name.endswith(".py")]
def options_as_dictionary(meta):
"""
Transforms a list of tuples to a dictionary
Args:
meta (list): A list ot option tuples
Retuns:
dict: A dictionary of options key - values
"""
module_config = {}
for seltuple in meta:
module_config.update({seltuple[0]: seltuple[1]})
return module_config
def importmodule(name):
try:
try:
mod = __import__(name)
except:
modfile, pathname, description = imp.find_module(name)
mod = imp.load_module(name, modfile, pathname, description)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
except ImportError as error:
print error.__class__.__name__ + ": " + error.message
return mod
def create_object(object_type, object_type_configuration, handler, handler_configuration):
object_mod = importmodule('lightbulb.core.modules.' + object_type.lower())
object_class = getattr(object_mod, object_type)
handler_mod = importmodule('lightbulb.core.utils.' + handler.lower())
handler_class = getattr(handler_mod, handler)
class Module(object_class):
def setup(self, configuration):
super(Module,self).setup(configuration)
self.handler = handler_class(handler_configuration)
return Module
|
{
"content_hash": "66f38291e5619beff1a8cfbd4ef83285",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 96,
"avg_line_length": 34.63265306122449,
"alnum_prop": 0.6529169121979964,
"repo_name": "lightbulb-framework/lightbulb-framework",
"id": "20632be18c94983af28e17338246a08ea9d9a3a8",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lightbulb/core/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46517"
},
{
"name": "Makefile",
"bytes": "149"
},
{
"name": "Python",
"bytes": "790078"
},
{
"name": "Shell",
"bytes": "4829"
},
{
"name": "Yacc",
"bytes": "267605"
}
],
"symlink_target": ""
}
|
"""Test for nrf24l01 module. Portable between MicroPython targets."""
import usys
import ustruct as struct
import utime
from machine import Pin, SPI
from nrf24l01 import NRF24L01
from micropython import const
# Slave pause between receiving data and checking for further packets.
_RX_POLL_DELAY = const(15)
# Slave pauses an additional _SLAVE_SEND_DELAY ms after receiving data and before
# transmitting to allow the (remote) master time to get into receive mode. The
# master may be a slow device. Value tested with Pyboard, ESP32 and ESP8266.
_SLAVE_SEND_DELAY = const(10)
if usys.platform == "pyboard":
cfg = {"spi": 2, "miso": "Y7", "mosi": "Y8", "sck": "Y6", "csn": "Y5", "ce": "Y4"}
elif usys.platform == "esp8266": # Hardware SPI
cfg = {"spi": 1, "miso": 12, "mosi": 13, "sck": 14, "csn": 4, "ce": 5}
elif usys.platform == "esp32": # Software SPI
cfg = {"spi": -1, "miso": 32, "mosi": 33, "sck": 25, "csn": 26, "ce": 27}
else:
raise ValueError("Unsupported platform {}".format(usys.platform))
# Addresses are in little-endian format. They correspond to big-endian
# 0xf0f0f0f0e1, 0xf0f0f0f0d2
pipes = (b"\xe1\xf0\xf0\xf0\xf0", b"\xd2\xf0\xf0\xf0\xf0")
def master():
csn = Pin(cfg["csn"], mode=Pin.OUT, value=1)
ce = Pin(cfg["ce"], mode=Pin.OUT, value=0)
if cfg["spi"] == -1:
spi = SPI(-1, sck=Pin(cfg["sck"]), mosi=Pin(cfg["mosi"]), miso=Pin(cfg["miso"]))
nrf = NRF24L01(spi, csn, ce, payload_size=8)
else:
nrf = NRF24L01(SPI(cfg["spi"]), csn, ce, payload_size=8)
nrf.open_tx_pipe(pipes[0])
nrf.open_rx_pipe(1, pipes[1])
nrf.start_listening()
num_needed = 16
num_successes = 0
num_failures = 0
led_state = 0
print("NRF24L01 master mode, sending %d packets..." % num_needed)
while num_successes < num_needed and num_failures < num_needed:
# stop listening and send packet
nrf.stop_listening()
millis = utime.ticks_ms()
led_state = max(1, (led_state << 1) & 0x0F)
print("sending:", millis, led_state)
try:
nrf.send(struct.pack("ii", millis, led_state))
except OSError:
pass
# start listening again
nrf.start_listening()
# wait for response, with 250ms timeout
start_time = utime.ticks_ms()
timeout = False
while not nrf.any() and not timeout:
if utime.ticks_diff(utime.ticks_ms(), start_time) > 250:
timeout = True
if timeout:
print("failed, response timed out")
num_failures += 1
else:
# recv packet
(got_millis,) = struct.unpack("i", nrf.recv())
# print response and round-trip delay
print(
"got response:",
got_millis,
"(delay",
utime.ticks_diff(utime.ticks_ms(), got_millis),
"ms)",
)
num_successes += 1
# delay then loop
utime.sleep_ms(250)
print("master finished sending; successes=%d, failures=%d" % (num_successes, num_failures))
def slave():
csn = Pin(cfg["csn"], mode=Pin.OUT, value=1)
ce = Pin(cfg["ce"], mode=Pin.OUT, value=0)
if cfg["spi"] == -1:
spi = SPI(-1, sck=Pin(cfg["sck"]), mosi=Pin(cfg["mosi"]), miso=Pin(cfg["miso"]))
nrf = NRF24L01(spi, csn, ce, payload_size=8)
else:
nrf = NRF24L01(SPI(cfg["spi"]), csn, ce, payload_size=8)
nrf.open_tx_pipe(pipes[1])
nrf.open_rx_pipe(1, pipes[0])
nrf.start_listening()
print("NRF24L01 slave mode, waiting for packets... (ctrl-C to stop)")
while True:
if nrf.any():
while nrf.any():
buf = nrf.recv()
millis, led_state = struct.unpack("ii", buf)
print("received:", millis, led_state)
for led in leds:
if led_state & 1:
led.on()
else:
led.off()
led_state >>= 1
utime.sleep_ms(_RX_POLL_DELAY)
# Give master time to get into receive mode.
utime.sleep_ms(_SLAVE_SEND_DELAY)
nrf.stop_listening()
try:
nrf.send(struct.pack("i", millis))
except OSError:
pass
print("sent response")
nrf.start_listening()
try:
import pyb
leds = [pyb.LED(i + 1) for i in range(4)]
except:
leds = []
print("NRF24L01 test module loaded")
print("NRF24L01 pinout for test:")
print(" CE on", cfg["ce"])
print(" CSN on", cfg["csn"])
print(" SCK on", cfg["sck"])
print(" MISO on", cfg["miso"])
print(" MOSI on", cfg["mosi"])
print("run nrf24l01test.slave() on slave, then nrf24l01test.master() on master")
|
{
"content_hash": "55a4013981ffc8eab432e3dec36b9c94",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 95,
"avg_line_length": 32.24,
"alnum_prop": 0.5583126550868487,
"repo_name": "henriknelson/micropython",
"id": "56bdb6e26eb9c1277783c1410dbc6a5d99f9903c",
"size": "4836",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "drivers/nrf24l01/nrf24l01test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "106243"
},
{
"name": "C",
"bytes": "12081118"
},
{
"name": "C++",
"bytes": "570652"
},
{
"name": "CMake",
"bytes": "800"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "105674"
},
{
"name": "Objective-C",
"bytes": "7450"
},
{
"name": "Python",
"bytes": "847456"
},
{
"name": "Shell",
"bytes": "13882"
}
],
"symlink_target": ""
}
|
import numpy as np
from sklearn import cross_validation
from sklearn import tree
from sklearn.neighbors import KNeighborsClassifier
from data_processing_ml import *
def ten_fold_CV(classifier, x_train, y_train):
""" This function takes three arguments:
1) classifier, an just initialized classifier we want to train
2) x_train, numpy array containing training data
3) y_train, numpy array containing label for training data
In this function, we want to do a ten_fold cross validation with
the input classifier and training data.
This function returns the average_train score, and average_test score
after ten_fold.
"""
# variables to keep track
total_train_score = 0
total_test_score = 0
# cross validated 10 times
for i in range(10):
# split the training data to cross_validation data
# the test_size is 0.1 since this is a 10-fold cross_validation
cv_data_train, cv_data_test, cv_target_train, cv_target_test = \
cross_validation.train_test_split(x_train, y_train, test_size=0.1) # random_state=0
# fit the value and get the scores
classifier.fit(cv_data_train, cv_target_train)
train_score = classifier.score(cv_data_train,cv_target_train)
test_score = classifier.score(cv_data_test,cv_target_test)
# add both scores to total
total_train_score += train_score
total_test_score += test_score
# calculate the average score
average_train = total_train_score / float(10)
average_test = total_test_score / float(10)
return average_train, average_test
def dtree_tuning(x_train, y_train, num=20):
""" This function takes in three arguments:
1) x_train, numpy array containing training data
2) y_train, numpy array containing label for training data
3) num, a default input integer representing the number of depth
we will test. The default number is 20.
In this function, we want to do ten-fold cross-validation using a
dtree classifier with different depth. And choose the depth that produces
best test accuracy.
This function does not return anything, but reports the training_score,
testing_score for each dtree classifier with different depth.
"""
# keep track of final results
final = []
print("Currently cross-validating a Decision Tree Classfier...")
print("The results are in shown in format: (training_score, testing_score, max-depth)")
for i in range(num):
# get max_depth and build the classifier
max_depth = i+1
dtree = tree.DecisionTreeClassifier(max_depth=max_depth)
# cross-validation
average_train,average_test = ten_fold_CV(dtree, x_train, y_train)
# get result and report the result
result = (average_train,average_test,max_depth)
print(result)
final.append(result)
# sort result with testing score, report the best-performing max_depth
L = sorted(final, key=lambda x: x[1], reverse = True)
max_tree = L[0]
print("The max_depth perform the best is", max_tree[2], "with testing score:", max_tree[1], "\n")
return max_tree[2]
def knn_tuning(x_train, y_train, num=20):
""" This function takes in three arguments:
1) x_train, numpy array containing training data
2) y_train, numpy array containing label for training data
3) num, a default input integer representing the number of different
knn clasifiers we will test. The default number is 20.
In this function, we want to do ten-fold cross-validation using a
knn classifier with different number of neighbors. And choose the
number of neighbors that produces best test accuracy.
This function does not return anything, but reports the training_score,
testing_score for each knn classifier.
"""
# keep track of final results
final = []
print("Currently cross-validating a KNN Classfier...")
print("The result in shown in format: (training_score, testing_score, k-value)")
for i in range(num):
# get k-value and build the classifier
k = 2*(i+1)-1
knn = KNeighborsClassifier(n_neighbors=k)
# run cross_validation
train, test = ten_fold_CV(knn, x_train, y_train)
# get result and report the result
result = (train,test,k)
print(result)
final.append(result)
# sort result with testing score, report the best-performing k-value
L = sorted(final, key=lambda x: x[1], reverse = True)
max_knn = L[0]
print("The k-value perform the best is", max_knn[2], "with testing score:", max_knn[1], "\n")
return max_knn[2]
|
{
"content_hash": "88a2efe57dad979b2bd0cdd9ae524d18",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 101,
"avg_line_length": 36.36842105263158,
"alnum_prop": 0.6570188133140377,
"repo_name": "maggieli96/35-Final-Project",
"id": "9f777979ea5b1d148d21dbfd7be5a5be94ba27c2",
"size": "4837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Machine Learning/training_tuning.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25113"
}
],
"symlink_target": ""
}
|
"""Helper functions for model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import REDACTED.transformer_lingvo.lingvo.compat as tf
from REDACTED.transformer_lingvo.lingvo.core import py_utils
import six
from six.moves import range
def ComputeSplits(batch_size, num_splits):
"""Creates a tensor of size num_splits of number of values per split.
Assigns each split floor(batch_size/num_splits) and round-robins
the remainder (if any) to each split.
Example::
batch_size: [5]
num_splits: 3
returns: [2, 2, 1]
Args:
batch_size: tensor of rank 0, size of tensor to be split
num_splits: number of splits to split tensor into
Returns:
tensor of length num_splits containing sizes of each split
"""
values = tf.tile(
tf.div([batch_size], num_splits), tf.constant([num_splits],
dtype=tf.int32))
mods = tf.tile(tf.constant([1]), tf.math.floormod([batch_size], num_splits))
zeros = tf.tile(
tf.constant([0]), tf.subtract(tf.shape(values), tf.shape(mods)))
mods = tf.concat([mods, zeros], 0)
ret = tf.add(values, mods)
# for some reason TF erases shape information if num_splits is 1
if num_splits == 1:
ret.set_shape([1])
return ret
def SplitTensors(xs, num_splits):
"""Splits tensors in `xs` evenly into num_splits along the 1st dimenion.
Args:
xs: A tuple of tensors. Each tensor's 1st dimension is the same size.
num_splits: A python integer.
Returns:
A tuple of lists of tensors, num elements in the tuple = len(xs).
i-th element in each list corresponds to i-th split of each tensor in xs
along the first dimension of each tensor.
"""
# assert first dim of all tensors in xs is equal
batch_dims = [tf.shape(x)[0] for x in xs]
all_batch_dims = tf.stack(batch_dims)
all_batch_dims = py_utils.with_dependencies([
py_utils.assert_equal(
all_batch_dims,
tf.shape(xs[0])[0],
message='first dim of tensors in xs must match'),
py_utils.assert_greater_equal(
tf.shape(xs[0])[0],
num_splits,
message='first dim of tensors in xs must be greater than num_splits')
], all_batch_dims)
splits = ComputeSplits(tf.shape(xs[0])[0], num_splits)
# add the above assertion into the compute graph
splits = py_utils.with_dependencies([all_batch_dims], splits)
split_xs = [tf.split(axis=0, num_or_size_splits=splits, value=x) for x in xs]
return split_xs
def SplitDictOfTensors(t_dict, num_splits):
"""Splits tensors in `t_dict` evenly into `num_splits` along the 1st dimenion.
Args:
t_dict: A dictionary of tensors. Each tensor's 1st dimension is the same
size.
num_splits: A python integer.
Returns:
A list of dictionaries of tensors, num elements in the list = num_splits
i-th dictionary in the list corresponds to i-th split of each tensor
along the first dimension of each tensor for each key in the original dict.
"""
keys = []
values = []
for k, v in sorted(six.iteritems(t_dict)):
keys.append(k)
values.append(v)
splits = SplitTensors(tuple(values), num_splits)
assert all(len(lst) == len(splits[0]) for lst in splits)
ret_list = []
for s in range(num_splits):
d = {}
for k in range(len(splits)):
d[keys[k]] = splits[k][s]
ret_list.append(d)
return ret_list
|
{
"content_hash": "b23c2a1adb9d3f46f47b8b70096ca77e",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 80,
"avg_line_length": 30.767857142857142,
"alnum_prop": 0.6642484039466048,
"repo_name": "mlperf/training_results_v0.7",
"id": "e37de1c27e55738f1c8c97b1b599987f1d2eed44",
"size": "4163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-512/lingvo/core/input_generator_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
}
|
import sys
import numpy as np
from itertools import izip
BINARYMATFILENAME = sys.argv[1]
MATERNALFASTAFILENAMELISTFILENAME = sys.argv[2]
PATERNALFASTAFILENAMELISTFILENAME = sys.argv[3]
OUTPUTSEQUENCESMATFILENAMEPREFIX = sys.argv[4] # Should not end with .
OUTPUTSEQUENCESPATFILENAMEPREFIX = sys.argv[5] # Should not end with .
OUTPUTLABELSFILENAMEPREFIX = sys.argv[6] # Should not end with .
INDIVIDUALSLIST = []
for i in range(7, len(sys.argv)):
INDIVIDUALSLIST.append(sys.argv[i])
def readFileNameList(fileNameListFileName):
# Read a list of strings from a file
# ASSUMES THAT INDIVIDUAL NAMES ARE IN THE FILE NAMES
fileNameListFile = open(fileNameListFileName)
fileNameList = fileNameListFile.readlines()
fileNameListFile.close()
fileList = []
for individual in INDIVIDUALSLIST:
fileNameIndividual = filter(lambda x: individual in x, fileNameList)
assert(len(fileNameIndividual) == 1)
fileList.append(open(fileNameIndividual[0].strip()))
return fileList
def openOutputFiles():
# Open the output files
outputSequencesMatFileList = []
outputSequencesPatFileList = []
outputLabelsFileList = []
for individual in INDIVIDUALSLIST:
# Create output maternal sequence, paternal sequence, and label files for each individual
outputSequencesMatFile = open(OUTPUTSEQUENCESMATFILENAMEPREFIX + "." + individual + ".txt", 'w+')
outputSequencesPatFile = open(OUTPUTSEQUENCESPATFILENAMEPREFIX + "." + individual + ".txt", 'w+')
outputLabelsFile = open(OUTPUTLABELSFILENAMEPREFIX + "." + individual + ".txt", 'w+')
outputSequencesMatFileList.append(outputSequencesMatFile)
outputSequencesPatFileList.append(outputSequencesPatFile)
outputLabelsFileList.append(outputLabelsFile)
return [outputSequencesMatFileList, outputSequencesPatFileList, outputLabelsFileList]
def getNextSequences(fastaFileList):
# Get the next sequences from a fasta file list
nextSequences = []
for fastaFile in fastaFileList:
# Iterate through the fasta files and get the next sequences from each
fastaFile.readline()
nextSequences.append(fastaFile.readline().strip().upper())
return nextSequences
def recordSequences(sequenceList, fileList):
# Record a list of sequences to a list of files
for sequence, f in izip(sequenceList, fileList):
# Iterate through the sequences and record each
f.write(sequence + "\n")
def recordLabels(labelsList, outputLabelsFileList):
# Record a list of labels to a list of files
for label, outputLabelsFile in izip(labelsList, outputLabelsFileList):
# Iterate through the labels and record each
outputLabelsFile.write(str(label) + "\n")
def getAgreeAndDisagreeFastasWithSNPs():
# Get the maternal and paternal sequences and their corresponding labels for each individual
# Also remove sequences that are the same across individuals with different labels
# ASSUMES THAT THE MATERNAL AND PATERNAL FASTA FILES ARE IN THE SAME ORDER AS THEIR CORRESPONDING INDIVIDUALS
# ASSUMES THAT EACH ROW IN THE BINARY MATRIX CORRESPONDS TO THE SAME ROWS IN THE MATERNAL AND PATERNAL FASTA FILES
binaryMat = np.genfromtxt(BINARYMATFILENAME, dtype = np.int8, names = True, usecols = INDIVIDUALSLIST)
maternalFastaFileList = readFileNameList(MATERNALFASTAFILENAMELISTFILENAME)
paternalFastaFileList = readFileNameList(PATERNALFASTAFILENAMELISTFILENAME)
[outputSequencesMatFileList, outputSequencesPatFileList, outputLabelsFileList] = openOutputFiles()
for binaryRow in binaryMat:
# Iterate through the rows of the binary matrix and record each sequence in the appropriate set
maternalSequences = getNextSequences(maternalFastaFileList)
paternalSequences = getNextSequences(paternalFastaFileList)
binaryList = [x for x in binaryRow]
binaryArray = np.array(binaryList)
if 0 not in binaryArray:
# The current peak is present in everyone, so record it
recordSequences(maternalSequences, outputSequencesMatFileList)
recordSequences(paternalSequences, outputSequencesPatFileList)
recordLabels(binaryArray, outputLabelsFileList)
elif 1 not in binaryArray:
# None of the selected individuals have the peak, so skip it
continue
else:
# The current peak is not present in everyone, so record it only if there is some sequence difference between at least one individual with the peak and at least one individual without it
oneIndexes = np.nonzero(binaryArray)[0]
zeroIndexes = np.setdiff1d(range(0, len(INDIVIDUALSLIST)), oneIndexes)
disagreementFound = False
for oi in oneIndexes:
# Iterate through the individuals with a peak and compare their sequences to those without
for zi in zeroIndexes:
# Iterate through the individuals without a peak and compare their sequences
if maternalSequences[oi] == maternalSequences[zi]:
# The maternal sequences are the same, so see if the paternal sequences are
if paternalSequences[oi] != paternalSequences[zi]:
# A disagreement has been found, so record the sequences and stop
recordSequences(maternalSequences, outputSequencesMatFileList)
recordSequences(paternalSequences, outputSequencesPatFileList)
recordLabels(binaryArray, outputLabelsFileList)
disagreementFound = True
break
elif maternalSequences[oi] == paternalSequences[zi]:
# The maternal sequence is the same as the paternal sequence, so see if the paternal sequence of the individual with a peak is the same as the maternal sequence of the individual without a peak
if paternalSequences[oi] != maternalSequences[zi]:
# A disagreement has been found, so record the sequences and stop
recordSequences(maternalSequences, outputSequencesMatFileList)
recordSequences(paternalSequences, outputSequencesPatFileList)
recordLabels(binaryArray, outputLabelsFileList)
disagreementFound = True
break
else:
# A disagreement has been found, so record the sequences and stop
recordSequences(maternalSequences, outputSequencesMatFileList)
recordSequences(paternalSequences, outputSequencesPatFileList)
recordLabels(binaryArray, outputLabelsFileList)
disagreementFound = True
break
if disagreementFound:
# A disagreement has been found, so stop
break
for individualFileList in izip(maternalFastaFileList, paternalFastaFileList, outputSequencesMatFileList, outputSequencesPatFileList, outputLabelsFileList):
# Iterate through the individuals and close all of their input and output files
for individualFile in individualFileList:
# Iterate through the files for the current individual and close each
individualFile.close()
if __name__=="__main__":
getAgreeAndDisagreeFastasWithSNPs()
|
{
"content_hash": "c3a05d78a94cc72706f6d87fcfc035dc",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 199,
"avg_line_length": 50.76335877862596,
"alnum_prop": 0.783609022556391,
"repo_name": "imk1/IMKTFBindingCode",
"id": "b55a4b07e72168dea51646a307a065629c551187",
"size": "6650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getAgreeAndDisagreeFastasWithSNPs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1156919"
},
{
"name": "R",
"bytes": "22835"
},
{
"name": "Shell",
"bytes": "416606"
}
],
"symlink_target": ""
}
|
import numpy as np
from robosuite.models.objects import PrimitiveObject
from robosuite.utils.mjcf_utils import get_size
class CapsuleObject(PrimitiveObject):
"""
A capsule object.
Args:
size (2-tuple of float): (radius, half-length) size parameters for this capsule object
"""
def __init__(
self,
name,
size=None,
size_max=None,
size_min=None,
density=None,
friction=None,
rgba=None,
solref=None,
solimp=None,
material=None,
joints="default",
obj_type="all",
duplicate_collision_geoms=True,
):
size = get_size(size, size_max, size_min, [0.07, 0.07], [0.03, 0.03])
super().__init__(
name=name,
size=size,
rgba=rgba,
density=density,
friction=friction,
solref=solref,
solimp=solimp,
material=material,
joints=joints,
obj_type=obj_type,
duplicate_collision_geoms=duplicate_collision_geoms,
)
def sanity_check(self):
"""
Checks to make sure inputted size is of correct length
Raises:
AssertionError: [Invalid size length]
"""
assert len(self.size) == 2, "capsule size should have length 2"
def _get_object_subtree(self):
return self._get_object_subtree_(ob_type="capsule")
@property
def bottom_offset(self):
return np.array([0, 0, -1 * (self.size[0] + self.size[1])])
@property
def top_offset(self):
return np.array([0, 0, (self.size[0] + self.size[1])])
@property
def horizontal_radius(self):
return self.size[0]
|
{
"content_hash": "bc838cdee23044a3003a55af15d6f44a",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 94,
"avg_line_length": 25.544117647058822,
"alnum_prop": 0.5561312607944733,
"repo_name": "ARISE-Initiative/robosuite",
"id": "4aec85713c51b4485371077746130b6b2d56aea4",
"size": "1737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robosuite/models/objects/primitive/capsule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "552"
},
{
"name": "Python",
"bytes": "1197777"
}
],
"symlink_target": ""
}
|
"""
Defines methods for generating features with Kaldi.
"""
from os import path,remove
from shutil import copy2
from string import strip
from subprocess import Popen
from tempfile import NamedTemporaryFile
from skip.util import (KaldiObject, _randFilename, _getCachedObject,
_cacheObject, _refreshRequired, KaldiError)
def makeMfccFeats(directory, config, wavscp, segmentsfile, samplefreq,
useenergy, framelength, frameshift, numceps, applycmvn, normvars,
utt2spk, spk2utt, deltaorder):
Mfccdir = path.join(directory, "mfcc_feats")
(feats, idxFile) = _getCachedObject(Mfccdir, " ".join(["{0}:{1}".format(k,v) for k,v in locals().iteritems()]))
# check wave files to make sure they are up to date
wavsOld = False
try:
mtimes = feats.wav_times
for wavFile in mtimes.keys():
if int(path.getmtime(wavFile)) > mtimes[wavFile]:
wavsOld = True
break
except AttributeError:
pass
# check file modification time to see if a refresh is required
origNames = []
copyNames = []
try:
origNames.append(wavscp)
copyNames.append(feats.wavscp)
except AttributeError:
copyNames.append(None)
if segmentsfile:
try:
origNames.append(segmentsfile)
copyNames.append(feats.segmentsfile)
except AttributeError:
copyNames.append(None)
if utt2spk and spk2utt:
try:
origNames.append(utt2spk)
copyNames.append(feats.utt2spk)
except AttributeError:
copyNames.append(None)
try:
origNames.append(spk2utt)
copyNames.append(feats.spk2utt)
except AttributeError:
copyNames.append(None)
if not _refreshRequired(zip(origNames, copyNames)) and not wavsOld:
return feats
feats.filename = path.join(Mfccdir, _randFilename("feats-", ".ark"))
feats.wavscp = path.join(Mfccdir, _randFilename("wav-", ".scp"))
copy2(wavscp, feats.wavscp)
if segmentsfile:
feats.segmentsfile = path.join(Mfccdir, _randFilename("seg-", ".txt"))
copy2(segmentsfile, feats.segmentsfile)
feats.wav_times = {}
with open(feats.wavscp, "r") as wavsIn:
for line in wavsIn:
if strip(line):
if "|" not in line: # ignore commands in the table
fname = strip(line[line.index(" "):])
feats.wav_times[fname] = int(path.getmtime(fname))
if utt2spk and spk2utt:
feats.utt2spk = path.join(Mfccdir, _randFilename("utt2spk-", ".ark"))
copy2(utt2spk, feats.utt2spk)
feats.spk2utt = path.join(Mfccdir, _randFilename("spk2utt-", ".ark"))
copy2(spk2utt, feats.spk2utt)
tmp = NamedTemporaryFile(suffix=".ark", delete=False)
wavSegmentsFile = tmp.name
tmp.close()
tmp = NamedTemporaryFile(suffix=".ark", delete=False)
rawfeatsFile = tmp.name
tmp.close()
# prepare commands
rawfeatsDest = "\"ark:{0}\"".format(rawfeatsFile)
deltaStr = ""
if deltaorder > 0:
deltaStr = "ark:- | {0} --delta-order={1} ark:-".format(config.adddeltas, deltaorder)
if not applycmvn:
rawfeatsDest = "{0} \"ark:{1}\"".format(deltaStr, feats.filename)
elif not applycmvn:
rawfeatsDest = "\"ark:{0}\"".format(feats.filename)
if segmentsfile:
sourceSpecifier = "ark:{0}".format(wavSegmentsFile)
makeSegmentsCmd = "{0} \"scp:{1}\" \"{2}\" \"ark:{3}\"".format(config.extractsegments,
feats.wavscp, feats.segmentsfile, wavSegmentsFile)
else:
sourceSpecifier = "scp:{0}".format(feats.wavscp)
makeRawFeatCmd = "{0} --sample-frequency={1} --use-energy={2} \
--frame-length={3} --frame-shift={4} --num-ceps={5} \
\"{6}\" {7}".format(config.computemfccfeats,
samplefreq, str(useenergy).lower(), framelength, frameshift,
numceps, sourceSpecifier, rawfeatsDest)
if applycmvn:
spk2uttStr = ""
utt2spkStr = ""
if utt2spk and spk2utt:
spk2uttStr = "--spk2utt=\"ark:{0}\"".format(spk2utt)
utt2spkStr = "--utt2spk=\"ark:{0}\"".format(utt2spk)
applyCmvnCmd = "{0} {1} \"ark:{2}\" ark:- | {3} --norm-vars={4} \
{5} ark:- \"ark:{2}\" {6} \"ark:{7}\"".format(config.computecmvnstats,
spk2uttStr, rawfeatsFile, config.applycmvn, str(normvars).lower(),
utt2spkStr, deltaStr, feats.filename)
# compute and return the features
logFile = open(path.join(Mfccdir, _randFilename(suffix=".log")), "w")
try:
if segmentsfile:
segmentProc = Popen(makeSegmentsCmd, stderr=logFile, shell=True)
segmentProc.communicate()
retCode = segmentProc.poll()
if retCode:
raise KaldiError(logFile.name)
featProc = Popen(makeRawFeatCmd, stderr=logFile, shell=True)
featProc.communicate()
retCode = featProc.poll()
if retCode:
raise KaldiError(logFile.name)
if applycmvn:
featProc = Popen(applyCmvnCmd, stderr=logFile, shell=True)
featProc.communicate()
retCode = featProc.poll()
if retCode:
raise KaldiError(logFile.name)
finally:
logFile.close()
remove(rawfeatsFile)
return _cacheObject(feats, idxFile)
def segmentFeats(directory, config, featsfile, segfile, framerate):
segDir = path.join(directory, "feat_segments")
(feats, idxFile) = _getCachedObject(segDir, " ".join(["{0}:{1}".format(k,v) for k,v in locals().iteritems()]))
refreshRequired = False
try:
if int(path.getmtime(featsfile)) > feats.featsfile_time:
refreshRequired = True
except AttributeError:
refreshRequired = True
try:
if int(path.getmtime(segfile)) > feats.segfile_time:
refreshRequired = True
except AttributeError:
refreshRequired = True
if not refreshRequired:
return feats
feats.featsfile_time = int(path.getmtime(featsfile))
feats.segfile_time = int(path.getmtime(segfile))
feats.filename = path.join(segDir, _randFilename("feats-", ".ark"))
segFeatsCmd = "{0} --frame-rate={1} \"ark:{2}\" \"{3}\" \
\"ark:{4}\"".format(config.extractfeaturesegments, framerate,
featsfile, segfile, feats.filename)
# segment and return the features
logFile = open(path.join(segDir, _randFilename(suffix=".log")), "w")
try:
featProc = Popen(segFeatsCmd, stderr=logFile, shell=True)
featProc.communicate()
retCode = featProc.poll()
if retCode:
raise KaldiError(logFile.name)
finally:
logFile.close()
return _cacheObject(feats, idxFile)
|
{
"content_hash": "0e5c010e571f4f9bf3b6404f46220ddb",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 113,
"avg_line_length": 28.384615384615383,
"alnum_prop": 0.670492587278814,
"repo_name": "mrfalcone/skip",
"id": "04d0d4c8a7d5d68af5829de6dfb86f2c04e4a112",
"size": "6913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kaldi/feat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "75934"
}
],
"symlink_target": ""
}
|
''' analyzeData provides data analysis and storage through the DataSet class for the Geiger-Counter lab
in Columbia's W3081 Intermediate Physics Laboratory Course'''
import scipy as sp
import scipy.io.wavfile as wav
import pickle
import csv
# All times must be in seconds
class DataSet:
''' The DataSet class contains methods and members for storing count data, as well as calculating useful values
for the Geiger-Counter lab.'''
def __init__ (self, time = [], rate = 96000, fLength = 0):
''' Default constructor:
times contains a list of floats of the starting times for each count.
maxTimeResolution contains the number of samples per second.
fileLength contains the length of the file in seconds.
intervals contains the interval between each count (but is empty until getInterval() is called.
countRates contains the count rate between each interval, as defined when calling getCountRate(). countRates ie empty until getCountRate() is called.'''
# Defines lists of floats for times
self.times = time
# maxTimeResolution is a float that defines the binWidth by denoting the number of samples per second.
self.maxTimeResolution = rate
# fileLength describes how long the file is in seconds
self.fileLength = fLength
# Define empty interval and countRate list, which will be filled by interval below
self.intervals = []
self.countRates = []
@classmethod
def readWaveFile(cls, filename):
''' Effectively a overloaded constructor for reading in data from a wave file. THRESHOLD is the level
above which a waveform can be considered a count. It range can be from 0 to 32768, constrained
by output values from the (16 bit signed) wave file.'''
(rate, data) = wav.read(filename)
return (rate, data)
@classmethod
def fromWaveData(cls, i, level, aboveThreshold, inTimes, rate, THRESHOLD = 15000):
''' Effectively a overloaded constructor for procesing data from an already open wave file.
THRESHOLD is the level above which a waveform can be considered a count. It range can be from 0 to 32767,
constrained by output values from the (16 bit signed) wave file. This method must be called in a loop,
as is done in the GUI, to process the entire file.'''
# Displays a progress meter in the console.
if i % rate == 0:
print "Analyzed " + str(i/rate) + " seconds"
# If the level is below the threshold, ignore it. Otherwise, if it is above the threshold, save the start
# time of the level.
if level <= THRESHOLD:
aboveThreshold = 0
elif aboveThreshold == 1:
return aboveThreshold
else:
aboveThreshold = 1
inTimes.append(float(i)/float(rate))
return aboveThreshold
@classmethod
def fromSavedFile(cls, filename = "dataSet.bin"):
''' Effectively a overloaded constructor for importing previously analzyed data. This method imports
the DataSet object data from a previously analzyed wave file. This object must have been saved by
the pickle module. If the structure of DataSet changes, then this import will likely fail.'''
fIn = open(filename, "rb")
newDataSet = pickle.load(fIn)
fIn.close()
return newDataSet
@classmethod
def getDeadTime(cls, firstSample, secondSample, combinedSample, sampleRate = 1):
''' Effectively a overloaded constructor for calculating the dead time of a Geiger-Counter. This method
calculates the dead time from two different samples, along with a sample with the two sources in the
Geiger-Counter at the same time. The first three inputs to this function must be dataSet objects,
while the sampleRate must be an a number, in seconds, in the same form as would be passed getCountRate().
This is intentionally not exposed through the GUI. It must be called through the python interpreter, and
it will take some work. However, this is supposed to be a student calculation, so it should just be
performed by the student, outside of this code.'''
# Dead time is defined as \tau = (n1 + n2 - n12)/(2n1*n2)
n1 = firstSample.getCountRate(sampleRate)
n2 = secondSample.getCountRate(sampleRate)
n12 = combinedSample.getCountRate(sampleRate)
deadTime = (n1 + n2 - n12)/(2*n1*n2)
return deadTime
def getCountRate(self, sampleSize = 1):
''' Begins with a list (numpy array) of the beginning times of each count. The input, sampleSize,
is in seconds. It returns the rates in counts / second'''
numBins = int(self.fileLength / sampleSize)
# Ignore all times after the last full bin
maxTime = numBins * sampleSize
(rates, binEdges) = sp.histogram(self.times, numBins, (0, maxTime))
# Returns the count rate in counts / second, rather than counts / bin
self.countRates = rates/sampleSize
return (self.countRates, binEdges)
def getInterval(self):
''' Calculates the interval between each count, and returns a list with those intervals '''
self.intervals = []
# len(self.times)-1 because the interval list will by definition be shorter than times by 1
for i in range(0,len(self.times)-1):
self.intervals.append(self.times[i+1] - self.times[i])
return self.intervals
def getTotalCounts(self):
''' Returns the total number of counts in the sample. To compare this number to another recording, the length
of the recordings must be same (or scaled to account for the different length of recordings). '''
return len(self.times)
def save(self, filename = "dataSet.bin"):
''' Saves the DataSet object to a file, set by the input. This object is saved using the pickle module.'''
fOut = open(filename, "wb")
pickle.dump(self, fOut)
fOut.close()
def exportCSV(self, filename = "startTimes.csv"):
''' Exports the beginning time of each count. This would be for ussing the data in other programs.
If the data is to be used with this code, then use save(). There is a header in the first line,
followed by a count start time on each line. The file ends with a newline.'''
fOut = csv.writer(open(filename, "wb"))
fOut.writerow(["Beginning time for each count"])
for time in self.times:
fOut.writerow([time])
def rebin(self, newBinWidth):
''' Rebins data for some arbitrary multiple of the maxTimeResolution. A new object is returned.
This function is deprecated, as any arbitrary count rate can now be calculated in getCountRate()'''
# Determine the factor by which to scale the bins
rebinningFactor = newBinWidth/self.maxTimeResolution
# Define new variables to use in the rebinning
newCounts = []
newTimes = []
# Determine range for i by subtracting the overflow from the length of times.
# This does truncate the data, but it is not a full bin, so it doesn't make sense
# to count as a bin.
overflow = len(self.times) % rebinningFactor
upperLimit = (len(self.times) - overflow) / rebinningFactor;
for i in range(0, upperLimit):
# newTimes are determined by the index times the newBinWidth
newTimes.append(newBinWidth*i)
# newCounts is the sum of all of the counts in all of the old bins that are
# consolidated in the new bin. The bounds are determined by newBinWidth*i and
# newBinWidth*(i+1). Further, this number is divided by a rebinningFactor
# to ensure that the return result is counts per second.
newCount = sum( self.counts[ newBinWidth*i : newBinWidth*(i+1) ] )
newCounts.append( newCount / rebinningFactor)
# Create a new object to return, so we can keep the original data
newDataSet = DataSet(newCounts, newTimes)
return newDataSet
|
{
"content_hash": "4868a39333d2e8a32d0afa96239a7206",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 164,
"avg_line_length": 49.805882352941175,
"alnum_prop": 0.6479272469587811,
"repo_name": "samkohn/Geiger-Counter",
"id": "899aba66b1766a9b199344b9442f01829ec0501d",
"size": "8489",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "analyzeData.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27063"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.