code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python3
#
# Copyright 2019 Ryan Peck
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
from os import path
import ipgroup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="ipgroup",
maintainer="Ryan Peck",
maintainer_email="ryan@rypeck.com",
version=ipgroup.__version__,
url="https://github.com/RyPeck/python-ipgroup",
license="Apache License, Version 2.0",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet",
"Topic :: Internet :: Log Analysis",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Networking",
"Topic :: Utilities",
],
description="Functions to gather info on a group of IPv4 or IPv6 Networks",
py_modules=["ipgroup"],
long_description=long_description,
long_description_content_type="text/markdown",
)
|
RyPeck/python-ipgroup
|
setup.py
|
Python
|
apache-2.0
| 2,027
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2014-2019 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Helper functions for change management """
import collections
from datetime import datetime
import json
import logging
import shlex
from aquilon.aqdb.model import (
AddressAlias,
AddressAssignment,
Alias,
Archetype,
ArchetypeResource,
ARecord,
Building,
BundleResource,
Bunker,
Campus,
Chassis,
City,
Cluster,
ClusterLifecycle,
ClusterResource,
ComputeCluster,
ConsoleServer,
Continent,
Country,
Desk,
DnsDomain,
DnsEnvironment,
Domain,
EsxCluster,
Fqdn,
Grn,
GrnResource,
HardwareEntity,
HardwareFeature,
Host,
HostClusterMember,
HostEnvironment,
HostFeature,
HostLifecycle,
HostResource,
Hub,
Interface,
InterfaceFeature,
Location,
Machine,
MetaCluster,
NetGroupWhiteList,
Network,
NetworkCompartment,
NetworkDevice,
NetworkEnvironment,
OperatingSystem,
Organization,
Personality,
PersonalityResource,
PersonalityStage,
Rack,
Realm,
Resource,
ResourceGroup,
Role,
Room,
ServiceAddress,
ServiceInstance,
StorageCluster,
User,
)
from aquilon.aqdb.model.host_environment import Development, UAT, QA, Legacy, Production, Infra
from aquilon.config import Config
from aquilon.exceptions_ import ArgumentError
from aquilon.exceptions_ import AuthorizationException, InternalError, AquilonError
from aquilon.worker.dbwrappers.user_principal import get_or_create_user_principal
from aquilon.worker.processes import run_command
from sqlalchemy.orm import contains_eager, load_only, aliased
from sqlalchemy.orm.session import object_session
from sqlalchemy.orm.query import Query
cm_logger = logging.getLogger('change_management')
class ChangeManagement(object):
"""
Class calculate impacted environments with number objects in them
for impacted target
Command to be called for individual targets:
host, cluster, feature, service instance, personality stage,
os type, arch type, domain, host environment
Calculate target grn (eonid) - TBD
Prepare input for aqd_checkedm
Call aqd_checkedm
"""
config = Config()
extra_options = ""
handlers = {}
lifecycle_status_edm_check = ['ready'] # Crash and burn: 'build', 'rebuild',
# 'decommissioned', 'blind', 'install', 'reinstall', 'almostready', 'failed'
def __init__(self, session, user, justification, reason, logger, command,
cm_check=False, **arguments):
self.command = command
self.cm_check = cm_check
self.justification = justification
self.reason = reason
self.logger = logger
self.requestid = arguments.get('requestid', '')
self.dict_of_impacted_envs = {}
self.impacted_objects = {}
self.impacted_eonids = set()
self.eonid = 6980 # to be calculated for each target
self.enforce_validation = False
self.check_enabled = self.config.getboolean("change_management", "enable")
if self.config.has_option("change_management", "extra_options"):
self.extra_options = self.config.get("change_management", "extra_options")
dbuser = get_or_create_user_principal(session, user, commitoncreate=True)
self.username = dbuser.name
self.role_name = dbuser.role.name
def consider(self, target_obj, enforce_validation=False):
"""
Entry point validation method, chooses right validation method based on the object class
and self.handlers dict
Args:
target_obj: queryset or single db model object
enforce_validation: True or False
Returns: None or raises InternalError/AuthorizationException
"""
if enforce_validation:
self.enforce_validation = enforce_validation
if not self.check_enabled:
self.logger.debug('Change management is disabled. Exiting validate.')
return
self.logger.debug('Determine if the input object is a queryset or a single object')
if not target_obj:
self.logger.debug('Given objects is None. Nothing to validate.')
return
# If given object is query use it for validation
# to optimize validation of large amount of data
if isinstance(target_obj, Query):
if target_obj.count() == 0:
self.logger.debug('No impacted targets exiting')
return
self._call_handler_method(target_obj.first(), queryset=target_obj)
# If given Query is evaluated with .all() it is an instance of collections.Iterable
# then validate each item in the list separatelly
elif isinstance(target_obj, collections.Iterable):
for obj in target_obj:
self._call_handler_method(obj)
else:
self._call_handler_method(target_obj)
self.logger.debug('Call aqd_checkedm with metadata')
def _get_in_scope_objects_as_text(self):
if not self.impacted_objects:
return '\n\t - no affected objects in-scope for change ' \
'management found -'
in_scope_list = '\n'.join('\t{}'.format(o)
for k in sorted(self.impacted_objects)
for o in sorted(self.impacted_objects[k]))
return in_scope_list
def _call_handler_method(self, obj, queryset=None):
env_calculate_method = self.handlers.get(obj.__class__, None)
if not env_calculate_method:
raise InternalError('Change management calculate impact fail. Target class unknown.')
self.logger.debug('Calculate impacted environments and target status')
if queryset:
env_calculate_method(self, queryset)
else:
env_calculate_method(self, obj)
def validate(self):
"""Perform change management validation, or return in-scope objects.
Method calls adq_checkedm cmd tool with target resources metadata
to calculate if change management validation is required.
If required, justification validation will happen. If EDM calls
enabled, the ticket will be checked in EDM.
If self.cm_check True, this method immediately finishes by raising
ArgumentError to pass the list of objects in-scope for change
management to the client.
Returns: None, or raises AuthorizationException, or ArgumentError
"""
if self.cm_check:
raise ArgumentError(
'aborting upon user request (option --cm_check used). Please '
'find the list of in-scope objects you have requested below:\n'
'{}\n'.format(self._get_in_scope_objects_as_text()))
if not self.check_enabled:
self.logger.debug('Change management is disabled. Exiting validate.')
return
# Clean final impacted env list
self.logger.debug('Prepare impacted envs to call EDM')
for env, build_status_list in self.dict_of_impacted_envs.items():
self.dict_of_impacted_envs[env] = list(set(build_status_list))
# Prepare aqd_checkedm input dict
cm_extra_options = shlex.split(self.extra_options)
cmd = ["aqd_checkedm"] + cm_extra_options
metadata = {"ticket": self.justification,
"reason": self.reason,
"requestor": self.username,
"requestor_role": self.role_name,
"command": self.command,
"impacted_envs": self.dict_of_impacted_envs,
"eonid": self.eonid,
"enforce_validation": self.enforce_validation,
}
cmd.extend(["--metadata", json.dumps(metadata)])
out = run_command(cmd)
try:
out_dict = json.loads(out)
except Exception as err:
raise AquilonError("Invalid response received for the "
"change management check. {}".format(str(err)))
# Log Change Management validation results
self.log_change_management_validation(metadata, cm_extra_options, out_dict)
self.logger.info("Change Management validation "
"finished. Status: {}. {}".format(out_dict.get("Status"),
out_dict.get("Reason")))
if out_dict.get("Status") == 'Permitted':
self.logger.client_info("Approval Warning: "
"{}".format(out_dict.get("Reason")))
elif out_dict.get("Status") != 'Approved':
raise AuthorizationException(out_dict.get("Reason"))
def log_change_management_validation(self, metadata, cm_extra_options, out_dict):
if '--edm-instance' in cm_extra_options:
edm_ins = cm_extra_options[cm_extra_options.index('--edm-instance') + 1]
else:
edm_ins = 'prod'
if '--mode' in cm_extra_options:
mode = cm_extra_options[cm_extra_options.index('--mode') + 1]
else:
mode = 'enforce'
if '--disable_edm' in cm_extra_options:
disable_edm = 'Yes'
else:
disable_edm = 'No'
log_dict = {"edm_instance": edm_ins, "mode": mode, "disable_edm": disable_edm, "request_id": str(self.requestid)}
date_time = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S +0000')
log_dict['timestamp'] = date_time
log_dict.update(metadata)
log_dict.update(out_dict)
if log_dict['impacted_envs'].get('prod') and 'ready' in log_dict['impacted_envs'].get('prod'):
log_dict['prod_ready_env_impact'] = 'Yes'
else:
log_dict['prod_ready_env_impact'] = 'No'
log_dict['impacted_eonids'] = list(self.impacted_eonids)
cm_logger.info(json.dumps(log_dict))
def validate_default(self, obj):
pass
def validate_branch(self, obj):
"""
Method to be used when we do not need calculate impacted environment
Used with enforce_validation for some models, i.e. Domain, Sandbox
If enforce_validation is set, do not perform extra database queries
to get hosts and clusters impacted
Returns:
"""
if obj.requires_change_manager:
self.enforce_validation = True
def validate_prod_personality(self, personality):
session = object_session(personality)
if personality.is_cluster:
q = session.query(Cluster)
q = q.filter(Cluster.personality_stage.has(
PersonalityStage.personality == personality))
q = q.join(ClusterLifecycle)
else:
q = session.query(Host)
q = q.filter(Host.personality_stage.has(
PersonalityStage.personality == personality))
q = q.join(HostLifecycle)
q = q.options(contains_eager('status'))
q = q.join(PersonalityStage, Personality, HostEnvironment)
q = q.options(contains_eager('personality_stage.personality.host_environment'))
if isinstance(q.first(), Cluster):
for cluster in q.all():
self.validate_cluster(cluster)
else:
for host in q.all():
self.validate_host(host)
def validate_prod_personality_stage(self, personality_stage):
session = object_session(personality_stage)
if personality_stage.personality.is_cluster:
q = session.query(Cluster)
q = q.filter_by(personality_stage=personality_stage)
q = q.join(ClusterLifecycle)
else:
q = session.query(Host)
q = q.filter_by(personality_stage=personality_stage)
q = q.join(HostLifecycle)
q = q.options(contains_eager('status'))
q = q.join(PersonalityStage, Personality, HostEnvironment)
q = q.options(contains_eager('personality_stage.personality.host_environment'))
if isinstance(q.first(), Cluster):
for cluster in q.all():
self.validate_cluster(cluster)
else:
for host in q.all():
self.validate_host(host)
def validate_cluster(self, cluster):
"""
Validate impacted cluster and its hosts so that if
cluster env is infra but hosts are prod/ready CM would be enabled
Args:
cluster: single Cluster
Returns: None
"""
self._store_impacted_object_information(cluster)
# Validate only the impacted cluster
self.dict_of_impacted_envs.setdefault(
cluster.personality_stage.personality.host_environment.name, []).append(cluster.status.name)
# Also validate cluster hosts
for host in cluster.hosts:
self.validate_host(host)
if hasattr(cluster, 'members'):
for cluster_member in cluster.members:
self.validate_cluster(cluster_member)
# To do: Do we want to check if cluster is assigned
# to metacluster and if yes validate metacluster and its clusters/hosts?
def validate_host(self, host):
"""
Validate given single host
Args:
host: a single host
Returns: None
"""
self._store_impacted_object_information(host)
self.dict_of_impacted_envs.setdefault(
host.personality_stage.personality.host_environment.name,
[]).append(host.status.name)
def validate_hardware_entity(self, hwentities_or_hwentity):
"""
Validate given single hardware entities query or a single object
Args:
hwentity: queryset or single subclass of hardware entity
Returns: None
"""
# Check if there cannot be a case when one machine can
# have multiple hosts assigned - vms seems to be handled separately?
if isinstance(hwentities_or_hwentity, HardwareEntity):
if hwentities_or_hwentity.host:
self.validate_host(hwentities_or_hwentity.host)
else:
hwentities_or_hwentity = hwentities_or_hwentity.join(Host).options(contains_eager('host'))
for hwentity in hwentities_or_hwentity.all():
self.validate_host(hwentity.host)
def validate_location(self, location):
session = object_session(location)
location_children = session.query(Location).get(location.id).children
loc_ids = [loc.id for loc in location_children] + [location.id]
# ORA-01795: maximum number of expressions in a list is 1000
for i in range(0, len(loc_ids), 1000):
chunk_loc_ids = loc_ids[i:i + 1000]
q = session.query(Host).join(
HardwareEntity,
Host.hardware_entity_id == HardwareEntity.id).join(
Location,
HardwareEntity.location_id == Location.id).filter(
Location.id.in_(chunk_loc_ids))
q1 = session.query(Cluster).join(
Location,
Cluster.location_constraint_id == Location.id).filter(
Location.id.in_(chunk_loc_ids))
q = q.reset_joinpoint()
q = q.join(HostLifecycle).options(contains_eager('status'))
q = q.join(PersonalityStage, Personality).join(
HostEnvironment).options(
contains_eager(
'personality_stage.personality.host_environment'))
for host in q.all():
self.validate_host(host)
q1 = q1.reset_joinpoint()
q1 = q1.join(ClusterLifecycle).options(contains_eager('status'))
q1 = q1.join(PersonalityStage, Personality).join(
HostEnvironment).options(
contains_eager(
'personality_stage.personality.host_environment'))
for cluster in q1.all():
self.validate_cluster(cluster)
def validate_prod_network(self, network_or_networks):
"""
Validate queryset or single network object
Args:
networkor_networks: queryset or single network object
Returns: None
"""
CR = aliased(ClusterResource)
HR = aliased(HostResource)
S = aliased(ServiceAddress)
RG = aliased(ResourceGroup)
BR = aliased(BundleResource)
if isinstance(network_or_networks, Network):
session = object_session(network_or_networks)
# Filter Service addresses mapped to the clusters directly
q2 = session.query(Cluster).join(CR).join(Resource). \
join(ServiceAddress).join(ARecord).join(Network).filter(Network.id == network_or_networks.id)
# Filter Service addresses mapped to the cluster via resourcegroups
q5 = session.query(Cluster).join(CR)
q5 = q5.outerjoin((RG, RG.holder_id == CR.id),
(BR, BR.resourcegroup_id == RG.id),
(S, S.holder_id == BR.id))
q5 = q5.join(ARecord).join(Network).filter(Network.id == network_or_networks.id)
# Filter IP Addresses assigned to the hosts
q3 = session.query(Host).join(HardwareEntity).join(Interface, aliased=True). \
join(AddressAssignment, from_joinpoint=True).join(Network). \
filter(Network.id == network_or_networks.id)
# Filter Service addresses mapped to the hosts directly
q4 = session.query(Host).join(HardwareEntity).join(HostResource).join(Resource). \
join(ServiceAddress).join(ARecord).join(Network).filter(Network.id == network_or_networks.id)
# Filter Service addresses mapped to the host via resourcegroups
q6 = session.query(Host).join(HR)
q6 = q6.outerjoin((RG, RG.holder_id == HR.id),
(BR, BR.resourcegroup_id == RG.id),
(S, S.holder_id == BR.id))
q6 = q6.join(ARecord).join(Network).filter(Network.id == network_or_networks.id)
else:
session = object_session(network_or_networks.first())
network_sub_q = network_or_networks.options(load_only("id")).subquery()
# Filter Service addresses mapped to the clusters directly
q2 = session.query(Cluster).join(ClusterResource).join(Resource). \
join(ServiceAddress).join(ARecord).join(Network).filter(Network.id.in_(network_sub_q))
# Filter Service addresses mapped to the cluster via resourcegroups
q5 = session.query(Cluster).join(CR)
q5 = q5.outerjoin((RG, RG.holder_id == CR.id),
(BR, BR.resourcegroup_id == RG.id),
(S, S.holder_id == BR.id))
q5 = q5.join(ARecord).join(Network).filter(Network.id.in_(network_sub_q))
# Filter IP Addresses assigned to the hosts
q3 = session.query(Host).join(HardwareEntity).join(Interface, aliased=True). \
join(AddressAssignment, from_joinpoint=True).join(Network). \
filter(Network.id.in_(network_sub_q))
# Filter Service addresses mapped to the hosts directly
q4 = session.query(Host).join(HardwareEntity).join(HostResource).join(Resource). \
join(ServiceAddress).join(ARecord).join(Network).filter(Network.id.in_(network_sub_q))
# Filter Service addresses mapped to the host via resourcegroups
q6 = session.query(Host).join(HR)
q6 = q6.outerjoin((RG, RG.holder_id == HR.id),
(BR, BR.resourcegroup_id == RG.id),
(S, S.holder_id == BR.id))
q6 = q6.join(ARecord).join(Network).filter(Network.id.in_(network_sub_q))
# Validate clusters
for q in [q2, q5]:
q = q.reset_joinpoint()
q = q.join(ClusterLifecycle).options(contains_eager('status'))
q = q.join(PersonalityStage, Personality).join(HostEnvironment).options(
contains_eager('personality_stage.personality.host_environment'))
for cluster in q.all():
self.validate_cluster(cluster)
# Validate hosts
for q in [q3, q4, q6]:
q = q.reset_joinpoint()
q = q.join(HostLifecycle).options(contains_eager('status'))
q = q.join(PersonalityStage, Personality).join(HostEnvironment).options(
contains_eager('personality_stage.personality.host_environment'))
for host in q.all():
self.validate_host(host)
def validate_fqdn(self, dbfqdn):
# Check full depth of fqdn aliases or address_alias!
def dig_to_real_target(dbfqdn):
fqdns_to_test = [dbfqdn]
fqdns_tested = list()
final_target = dbfqdn
while fqdns_to_test:
to_test_now = []
for db_fqdn in fqdns_to_test:
to_test_now.extend(db_fqdn.dns_records)
fqdns_tested.append(db_fqdn)
fqdns_to_test = []
for rec in to_test_now:
if rec in fqdns_tested:
raise Exception("There might be a loop!!! Failing fast instead.")
if isinstance(rec, AddressAlias):
fqdns_to_test.append(rec.target)
elif isinstance(rec, Alias):
fqdns_to_test.append(rec.target)
else:
final_target = rec.fqdn
fqdns_to_test = list(set(fqdns_to_test))
return final_target
fqdn = dig_to_real_target(dbfqdn)
CR = aliased(ClusterResource)
HR = aliased(HostResource)
S = aliased(ServiceAddress)
RG = aliased(ResourceGroup)
BR = aliased(BundleResource)
session = object_session(fqdn)
ip_subquery = session.query(ARecord).filter(ARecord.fqdn_id == fqdn.id)
ip_subquery = [i.ip for i in ip_subquery]
# Filter Service addresses mapped to the clusters directly
q2 = session.query(Cluster).join(CR).join(Resource). \
join(ServiceAddress).join(ARecord).filter(ARecord.fqdn_id == fqdn.id)
# Filter Service addresses mapped to the cluster via resourcegroups
q5 = session.query(Cluster).join(CR)
q5 = q5.outerjoin((RG, RG.holder_id == CR.id),
(BR, BR.resourcegroup_id == RG.id),
(S, S.holder_id == BR.id))
q5 = q5.join(ARecord).filter(ARecord.fqdn_id == fqdn.id)
# Filter IP Addresses assigned to the hosts
q3 = session.query(Host).join(HardwareEntity).join(Interface, aliased=True). \
join(AddressAssignment).filter(AddressAssignment.ip.in_(ip_subquery))
# Filter Service addresses mapped to the hosts directly
q4 = session.query(Host).join(HardwareEntity).join(HostResource).join(Resource). \
join(ServiceAddress).join(ARecord).filter(ARecord.fqdn_id == fqdn.id)
# Filter Service addresses mapped to the host via resourcegroups
q6 = session.query(Host).join(HR)
q6 = q6.outerjoin((RG, RG.holder_id == HR.id),
(BR, BR.resourcegroup_id == RG.id),
(S, S.holder_id == BR.id))
q6 = q6.join(ARecord).filter(ARecord.fqdn_id == fqdn.id)
# Validate clusters
for q in [q2, q5]:
q = q.reset_joinpoint()
q = q.join(ClusterLifecycle).options(contains_eager('status'))
q = q.join(PersonalityStage,Personality).join(HostEnvironment).options(contains_eager('personality_stage.personality.host_environment'))
for cluster in q.all():
self.validate_cluster(cluster)
# Validate hosts
for q in [q3, q4, q6]:
q = q.reset_joinpoint()
q = q.join(HostLifecycle).options(contains_eager('status'))
q = q.join(PersonalityStage,Personality).join(HostEnvironment).options(contains_eager('personality_stage.personality.host_environment'))
for host in q.all():
self.validate_host(host)
def validate_chassis(self, chassis):
"""
Validate if given chassis object has hosts in slots
Args:
chassis: single chassis object
Returns: None
"""
for slot in chassis.machine_slots:
if slot.machine and slot.machine.host:
self.validate_host(slot.machine.host)
for slot in chassis.network_device_slots:
if slot.network_device and slot.network_device.host:
self.validate_host(slot.network_device.host)
def validate_console_server(self, console_server):
"""
Validate if given console_server object has hosts in any port
Args:
console_server: single console_server object
Returns: None
"""
for port in console_server.ports:
dbhw_ent = console_server.ports[port].client
if dbhw_ent and dbhw_ent.host:
self.validate_host(dbhw_ent.host)
def validate_resource_holder(self, resource_holder):
"""Validate a resource holder
Validate if given resource holder has hosts, through direct link,
cluster, personality, or a specified host environment
Args:
resource_holder: a single resource_holder object
Returns: None
"""
if getattr(resource_holder, 'host_environment', None):
self.validate_host_environment(resource_holder.host_environment)
return
dbobj = resource_holder.toplevel_holder_object
if isinstance(dbobj, Cluster):
self.validate_cluster(dbobj)
elif isinstance(dbobj, Host):
self.validate_host(dbobj)
elif isinstance(dbobj, Personality):
self.validate_prod_personality(dbobj)
def validate_host_environment(self, host_environment):
if host_environment.name == 'prod':
self.enforce_validation = True
def validate_prod_archetype(self, archtype):
session = object_session(archtype)
if archtype.cluster_type:
q = session.query(Cluster)
q = q.join(ClusterLifecycle)
else:
q = session.query(Host)
q = q.join(HostLifecycle)
q = q.options(contains_eager('status'))
q = q.join(PersonalityStage, Personality)
q = q.filter_by(archetype=archtype)
q = q.join(HostEnvironment)
q = q.options(contains_eager('personality_stage.personality.host_environment'))
if isinstance(q.first(), Cluster):
for cluster in q.all():
self.validate_cluster(cluster)
else:
for host in q.all():
self.validate_host(host)
def validate_prod_os(self, ostype):
session = object_session(ostype)
q = session.query(Host)
q = q.filter_by(operating_system=ostype)
q = q.join(HostLifecycle)
q = q.options(contains_eager('status'))
q = q.join(PersonalityStage, Personality, HostEnvironment)
q = q.options(contains_eager('personality_stage.personality.host_environment'))
for host in q.all():
self.validate_host(host)
def validate_prod_service_instance(self, service_instance):
session = object_session(service_instance)
q1 = session.query(Cluster)
q1 = q1.filter(Cluster.services_used.contains(service_instance))
q1 = q1.join(ClusterLifecycle)
q1 = q1.options(contains_eager('status'))
q1 = q1.join(PersonalityStage, Personality, HostEnvironment)
q1 = q1.options(contains_eager('personality_stage.personality.host_environment'))
for cluster in q1.all():
self.validate_cluster(cluster)
q2 = session.query(Host)
q2 = q2.filter(Host.services_used.contains(service_instance))
q2 = q2.join(HostLifecycle)
q2 = q2.options(contains_eager('status'))
q2 = q2.join(PersonalityStage, Personality, HostEnvironment)
q2 = q2.options(contains_eager('personality_stage.personality.host_environment'))
for host in q2.all():
self.validate_host(host)
def validate_prod_feature(self, feature):
session = object_session(feature)
q1 = session.query(Cluster)
q1 = q1.join(ClusterLifecycle)
q1 = q1.options(contains_eager('status'))
q1 = q1.join(PersonalityStage)
q1 = q1.join(PersonalityStage.features)
q1 = q1.filter_by(feature=feature)
q1 = q1.join(Personality, HostEnvironment)
q1 = q1.options(contains_eager('personality_stage.personality.host_environment'))
for cluster in q1.all():
self.validate_cluster(cluster)
q2 = session.query(Host)
q2 = q2.join(PersonalityStage)
q2 = q2.join(PersonalityStage.features)
q2 = q2.filter_by(feature=feature)
q2 = q2.join(Personality, HostEnvironment)
q2 = q2.options(contains_eager('personality_stage.personality.host_environment'))
for host in q2.all():
self.validate_host(host)
def _store_impacted_object_information(self, an_object):
# noinspection PyStringFormat
self.impacted_objects.setdefault(
'{0:c}'.format(an_object), set()).add('{0:l}'.format(an_object))
if hasattr(an_object, 'effective_owner_grn'):
eonid = an_object.effective_owner_grn.eon_id
self.impacted_eonids.add(eonid)
ChangeManagement.handlers[Cluster] = ChangeManagement.validate_cluster
ChangeManagement.handlers[ComputeCluster] = ChangeManagement.validate_cluster
ChangeManagement.handlers[StorageCluster] = ChangeManagement.validate_cluster
ChangeManagement.handlers[EsxCluster] = ChangeManagement.validate_cluster
ChangeManagement.handlers[HostClusterMember] = ChangeManagement.validate_cluster
ChangeManagement.handlers[MetaCluster] = ChangeManagement.validate_cluster
ChangeManagement.handlers[Personality] = ChangeManagement.validate_prod_personality
ChangeManagement.handlers[PersonalityStage] = ChangeManagement.validate_prod_personality_stage
ChangeManagement.handlers[InterfaceFeature] = ChangeManagement.validate_prod_feature
ChangeManagement.handlers[HardwareFeature] = ChangeManagement.validate_prod_feature
ChangeManagement.handlers[HostFeature] = ChangeManagement.validate_prod_feature
ChangeManagement.handlers[ServiceInstance] = ChangeManagement.validate_prod_service_instance
ChangeManagement.handlers[OperatingSystem] = ChangeManagement.validate_prod_os
ChangeManagement.handlers[Archetype] = ChangeManagement.validate_prod_archetype
ChangeManagement.handlers[Development] = ChangeManagement.validate_host_environment
ChangeManagement.handlers[UAT] = ChangeManagement.validate_host_environment
ChangeManagement.handlers[QA] = ChangeManagement.validate_host_environment
ChangeManagement.handlers[Legacy] = ChangeManagement.validate_host_environment
ChangeManagement.handlers[Production] = ChangeManagement.validate_host_environment
ChangeManagement.handlers[Infra] = ChangeManagement.validate_host_environment
ChangeManagement.handlers[Domain] = ChangeManagement.validate_branch
ChangeManagement.handlers[Host] = ChangeManagement.validate_host
ChangeManagement.handlers[Machine] = ChangeManagement.validate_hardware_entity
# Removing this as the HardwareEntity is too general, we have validate_hardware_entity, validate_host and validate_chassis
# ChangeManagement.handlers[HardwareEntity] = ChangeManagement.validate_hardware_entity
ChangeManagement.handlers[NetworkDevice] = ChangeManagement.validate_hardware_entity
ChangeManagement.handlers[Network] = ChangeManagement.validate_prod_network
ChangeManagement.handlers[Chassis] = ChangeManagement.validate_chassis
ChangeManagement.handlers[ConsoleServer] = ChangeManagement.validate_console_server
ChangeManagement.handlers[Rack] = ChangeManagement.validate_location
ChangeManagement.handlers[Organization] = ChangeManagement.validate_location
ChangeManagement.handlers[Hub] = ChangeManagement.validate_location
ChangeManagement.handlers[Continent] = ChangeManagement.validate_location
ChangeManagement.handlers[Country] = ChangeManagement.validate_location
ChangeManagement.handlers[Campus] = ChangeManagement.validate_location
ChangeManagement.handlers[City] = ChangeManagement.validate_location
ChangeManagement.handlers[Building] = ChangeManagement.validate_location
ChangeManagement.handlers[Room] = ChangeManagement.validate_location
ChangeManagement.handlers[Bunker] = ChangeManagement.validate_location
ChangeManagement.handlers[Desk] = ChangeManagement.validate_location
ChangeManagement.handlers[BundleResource] = ChangeManagement.validate_resource_holder
ChangeManagement.handlers[ClusterResource] = ChangeManagement.validate_resource_holder
ChangeManagement.handlers[HostResource] = ChangeManagement.validate_resource_holder
ChangeManagement.handlers[PersonalityResource] = \
ChangeManagement.validate_resource_holder
ChangeManagement.handlers[ArchetypeResource] = \
ChangeManagement.validate_resource_holder
ChangeManagement.handlers[GrnResource] = \
ChangeManagement.validate_resource_holder
ChangeManagement.handlers[Fqdn] = ChangeManagement.validate_fqdn
ChangeManagement.handlers[DnsDomain] = ChangeManagement.validate_default
ChangeManagement.handlers[DnsEnvironment] = ChangeManagement.validate_default
ChangeManagement.handlers[NetworkCompartment] = ChangeManagement.validate_default
ChangeManagement.handlers[NetworkEnvironment] = ChangeManagement.validate_default
ChangeManagement.handlers[NetGroupWhiteList] = ChangeManagement.validate_default
ChangeManagement.handlers[Grn] = ChangeManagement.validate_default
ChangeManagement.handlers[User] = ChangeManagement.validate_default
ChangeManagement.handlers[Realm] = ChangeManagement.validate_default
ChangeManagement.handlers[Role] = ChangeManagement.validate_default
|
quattor/aquilon
|
lib/aquilon/worker/dbwrappers/change_management.py
|
Python
|
apache-2.0
| 34,999
|
# Copyright (c) 2014 eBay Software Foundation
# Copyright 2015 HP Software, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from troveclient import common
from trove_dashboard import api as trove_api
from trove_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:database_clusters:index')
LAUNCH_URL = reverse('horizon:project:database_clusters:launch')
DETAILS_URL = reverse('horizon:project:database_clusters:detail', args=['id'])
ADD_SHARD_VIEWNAME = 'horizon:project:database_clusters:add_shard'
RESET_PASSWORD_VIEWNAME = 'horizon:project:database_clusters:reset_password'
class ClustersTests(test.TestCase):
@test.create_stubs({trove_api.trove: ('cluster_list',
'flavor_list')})
def test_index(self):
clusters = common.Paginated(self.trove_clusters.list())
trove_api.trove.cluster_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(clusters)
trove_api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
@test.create_stubs({trove_api.trove: ('cluster_list',
'flavor_list')})
def test_index_flavor_exception(self):
clusters = common.Paginated(self.trove_clusters.list())
trove_api.trove.cluster_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(clusters)
trove_api.trove.flavor_list(IsA(http.HttpRequest))\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs({trove_api.trove: ('cluster_list',)})
def test_index_list_exception(self):
trove_api.trove.cluster_list(IsA(http.HttpRequest), marker=None)\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs({trove_api.trove: ('cluster_list',
'flavor_list')})
def test_index_pagination(self):
clusters = self.trove_clusters.list()
last_record = clusters[0]
clusters = common.Paginated(clusters, next_marker="foo")
trove_api.trove.cluster_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(clusters)
trove_api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
self.assertContains(
res, 'marker=' + last_record.id)
@test.create_stubs({trove_api.trove: ('cluster_list',
'flavor_list')})
def test_index_flavor_list_exception(self):
clusters = common.Paginated(self.trove_clusters.list())
trove_api.trove.cluster_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(clusters)
trove_api.trove.flavor_list(IsA(http.HttpRequest))\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs({trove_api.trove: ('datastore_flavors',
'datastore_list',
'datastore_version_list'),
api.base: ['is_service_enabled']})
def test_launch_cluster(self):
api.base.is_service_enabled(IsA(http.HttpRequest), 'network')\
.AndReturn(False)
trove_api.trove.datastore_flavors(IsA(http.HttpRequest),
'mongodb', '2.6')\
.AndReturn(self.flavors.list())
trove_api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
trove_api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str))\
.AndReturn(self.datastore_versions.list())
self.mox.ReplayAll()
res = self.client.get(LAUNCH_URL)
self.assertTemplateUsed(res, 'project/database_clusters/launch.html')
@test.create_stubs({trove_api.trove: ['datastore_flavors',
'cluster_create',
'datastore_list',
'datastore_version_list'],
api.base: ['is_service_enabled']})
def test_create_simple_cluster(self):
api.base.is_service_enabled(IsA(http.HttpRequest), 'network')\
.AndReturn(False)
trove_api.trove.datastore_flavors(IsA(http.HttpRequest),
'mongodb', '2.6')\
.AndReturn(self.flavors.list())
trove_api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
trove_api.trove.datastore_version_list(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.datastore_versions.list())
cluster_name = u'MyCluster'
cluster_volume = 1
cluster_flavor = u'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
cluster_instances = 3
cluster_datastore = u'mongodb'
cluster_datastore_version = u'2.6'
cluster_network = u''
trove_api.trove.cluster_create(
IsA(http.HttpRequest),
cluster_name,
cluster_volume,
cluster_flavor,
cluster_instances,
datastore=cluster_datastore,
datastore_version=cluster_datastore_version,
nics=cluster_network,
root_password=None).AndReturn(self.trove_clusters.first())
self.mox.ReplayAll()
post = {
'name': cluster_name,
'volume': cluster_volume,
'num_instances': cluster_instances,
'num_shards': 1,
'num_instances_per_shards': cluster_instances,
'datastore': cluster_datastore + u'-' + cluster_datastore_version,
'mongodb_flavor': cluster_flavor,
'network': cluster_network
}
res = self.client.post(LAUNCH_URL, post)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_stubs({trove_api.trove: ['datastore_flavors',
'cluster_create',
'datastore_list',
'datastore_version_list'],
api.neutron: ['network_list_for_tenant'],
api.base: ['is_service_enabled']})
def test_create_simple_cluster_neutron(self):
api.base.is_service_enabled(IsA(http.HttpRequest), 'network')\
.AndReturn(True)
api.neutron.network_list_for_tenant(IsA(http.HttpRequest), '1')\
.AndReturn(self.networks.list())
trove_api.trove.datastore_flavors(IsA(http.HttpRequest),
'mongodb', '2.6')\
.AndReturn(self.flavors.list())
trove_api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
trove_api.trove.datastore_version_list(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.datastore_versions.list())
cluster_name = u'MyCluster'
cluster_volume = 1
cluster_flavor = u'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
cluster_instances = 3
cluster_datastore = u'mongodb'
cluster_datastore_version = u'2.6'
cluster_network = u'82288d84-e0a5-42ac-95be-e6af08727e42'
trove_api.trove.cluster_create(
IsA(http.HttpRequest),
cluster_name,
cluster_volume,
cluster_flavor,
cluster_instances,
datastore=cluster_datastore,
datastore_version=cluster_datastore_version,
nics=cluster_network,
root_password=None).AndReturn(self.trove_clusters.first())
self.mox.ReplayAll()
post = {
'name': cluster_name,
'volume': cluster_volume,
'num_instances': cluster_instances,
'num_shards': 1,
'num_instances_per_shards': cluster_instances,
'datastore': cluster_datastore + u'-' + cluster_datastore_version,
'mongodb_flavor': cluster_flavor,
'network': cluster_network
}
res = self.client.post(LAUNCH_URL, post)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_stubs({trove_api.trove: ['datastore_flavors',
'cluster_create',
'datastore_list',
'datastore_version_list'],
api.neutron: ['network_list_for_tenant']})
def test_create_simple_cluster_exception(self):
api.neutron.network_list_for_tenant(IsA(http.HttpRequest), '1')\
.AndReturn(self.networks.list())
trove_api.trove.datastore_flavors(IsA(http.HttpRequest),
'mongodb', '2.6')\
.AndReturn(self.flavors.list())
trove_api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
trove_api.trove.datastore_version_list(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.datastore_versions.list())
cluster_name = u'MyCluster'
cluster_volume = 1
cluster_flavor = u'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
cluster_instances = 3
cluster_datastore = u'mongodb'
cluster_datastore_version = u'2.6'
cluster_network = u'82288d84-e0a5-42ac-95be-e6af08727e42'
trove_api.trove.cluster_create(
IsA(http.HttpRequest),
cluster_name,
cluster_volume,
cluster_flavor,
cluster_instances,
datastore=cluster_datastore,
datastore_version=cluster_datastore_version,
nics=cluster_network,
root_password=None).AndReturn(self.trove_clusters.first())
self.mox.ReplayAll()
post = {
'name': cluster_name,
'volume': cluster_volume,
'num_instances': cluster_instances,
'num_shards': 1,
'num_instances_per_shards': cluster_instances,
'datastore': cluster_datastore + u'-' + cluster_datastore_version,
'mongodb_flavor': cluster_flavor,
'network': cluster_network
}
res = self.client.post(LAUNCH_URL, post)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({trove_api.trove: ('cluster_get',
'instance_get',
'flavor_get',)})
def test_details(self):
cluster = self.trove_clusters.first()
trove_api.trove.cluster_get(IsA(http.HttpRequest), cluster.id)\
.MultipleTimes().AndReturn(cluster)
trove_api.trove.instance_get(IsA(http.HttpRequest), IsA(str))\
.MultipleTimes().AndReturn(self.databases.first())
trove_api.trove.flavor_get(IsA(http.HttpRequest), IsA(str))\
.MultipleTimes().AndReturn(self.flavors.first())
self.mox.ReplayAll()
details_url = reverse('horizon:project:database_clusters:detail',
args=[cluster.id])
res = self.client.get(details_url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertContains(res, cluster.ip[0])
|
dklyle/trove-dashboard
|
trove_dashboard/content/database_clusters/tests.py
|
Python
|
apache-2.0
| 12,810
|
from __future__ import unicode_literals
import json
from moto.core import BaseBackend
from .parsing import ResourceMap, OutputMap
from .utils import generate_stack_id
class FakeStack(object):
def __init__(self, stack_id, name, template):
self.stack_id = stack_id
self.name = name
self.template = template
self.status = 'CREATE_COMPLETE'
template_dict = json.loads(self.template)
self.description = template_dict.get('Description')
self.resource_map = ResourceMap(stack_id, name, template_dict)
self.resource_map.create()
self.output_map = OutputMap(self.resource_map, template_dict)
self.output_map.create()
@property
def stack_resources(self):
return self.resource_map.values()
@property
def stack_outputs(self):
return self.output_map.values()
class CloudFormationBackend(BaseBackend):
def __init__(self):
self.stacks = {}
self.deleted_stacks = {}
def create_stack(self, name, template):
stack_id = generate_stack_id(name)
new_stack = FakeStack(stack_id=stack_id, name=name, template=template)
self.stacks[stack_id] = new_stack
return new_stack
def describe_stacks(self, name_or_stack_id):
stacks = self.stacks.values()
if name_or_stack_id:
for stack in stacks:
if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:
return [stack]
deleted_stacks = self.deleted_stacks.values()
for stack in deleted_stacks:
if stack.stack_id == name_or_stack_id:
return [stack]
else:
return stacks
def list_stacks(self):
return self.stacks.values()
def get_stack(self, name_or_stack_id):
if name_or_stack_id in self.stacks:
# Lookup by stack id
return self.stacks.get(name_or_stack_id)
else:
# Lookup by stack name
return [stack for stack in self.stacks.values() if stack.name == name_or_stack_id][0]
# def update_stack(self, name, template):
# stack = self.get_stack(name)
# stack.template = template
# return stack
def delete_stack(self, name_or_stack_id):
if name_or_stack_id in self.stacks:
# Delete by stack id
stack = self.stacks.pop(name_or_stack_id, None)
stack.status = 'DELETE_COMPLETE'
self.deleted_stacks[stack.stack_id] = stack
return self.stacks.pop(name_or_stack_id, None)
else:
# Delete by stack name
stack_to_delete = [stack for stack in self.stacks.values() if stack.name == name_or_stack_id][0]
self.delete_stack(stack_to_delete.stack_id)
cloudformation_backend = CloudFormationBackend()
|
djmitche/moto
|
moto/cloudformation/models.py
|
Python
|
apache-2.0
| 2,874
|
# Copyright 2017 DataCentred Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
import pecan.decorators
from sentinel.api.controllers import base
from sentinel.decorators import supported_queries, mutate_arguments
from sentinel.scope import Scope
class NetworkV2QuotasController(base.BaseController):
service = u'network'
resource = u'quota'
collection = u'quotas'
def __init__(self):
self._custom_actions = {
'default': ['GET'],
}
@pecan.expose('json')
@pecan.decorators.accept_noncanonical
@supported_queries()
def get_all(self):
quotas = Scope.filter(self.network.list_quotas())
return self.format_collection(quotas, links=False)
@pecan.expose('json')
@mutate_arguments('identity.projects')
def get(self, project):
quota = self.network.show_quota(project.id)
return self.format_resource(quota)
@pecan.expose('json')
@mutate_arguments('identity.projects')
def put(self, project):
quota = self.network.update_quota(project.id, body=pecan.request.json)
return self.format_resource(quota)
@pecan.expose('json')
@mutate_arguments('identity.projects')
def delete(self, project):
self.network.delete_quota(project.id)
pecan.response.status = 204
@pecan.expose('json')
@mutate_arguments('identity.projects')
def default(self, project):
quota = self.network.show_quota_default(project.id)
return self.format_resource(quota)
# vi: ts=4 et:
|
spjmurray/openstack-sentinel
|
sentinel/api/controllers/network/v2/quotas.py
|
Python
|
apache-2.0
| 2,073
|
"""
"""
import numpy
import theano
import theano.tensor as T
from pythonDnn.layers.logistic_sgd import LogisticRegression
from pythonDnn.layers.mlp import HiddenLayer
from pythonDnn.layers.rbm import RBM, GBRBM
from pythonDnn.models import nnet
class DBN(nnet):
"""Deep Belief Network
A deep belief network is obtained by stacking several RBMs on top of each
other. The hidden layer of the RBM at layer `i` becomes the input of the
RBM at layer `i+1`. The first layer RBM gets as input the input of the
network, and the hidden layer of the last RBM represents the output. When
used for classification, the DBN is treated as a MLP, by adding a logistic
regression layer on top.
"""
def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
hidden_layers_sizes=[500, 500], n_outs=10,
first_layer_gb = True,pretrainedLayers=None,activation=T.nnet.sigmoid):
"""This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the DBN
:type hidden_layers_sizes: list of ints
:param hidden_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
:type first_layer_gb: bool
:param first_layer_gb: wether first layer is gausian-bernolli or
bernolli-bernolli
"""
super(DBN, self).__init__()
self.layers = []
self.rbm_layers = []
self.n_layers = len(hidden_layers_sizes)
if pretrainedLayers == None:
self.nPreTrainLayers = n_layers
else :
self.nPreTrainLayers = pretrainedLayers
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector
# of [int] labels
# The DBN is an MLP, for which all weights of intermediate
# layers are shared with a different RBM. We will first
# construct the DBN as a deep multilayer perceptron, and when
# constructing each sigmoidal layer we also construct an RBM
# that shares weights with that layer. During pretraining we
# will train these RBMs (which will lead to chainging the
# weights of the MLP as well) During finetuning we will finish
# training the DBN by doing stochastic gradient descent on the
# MLP.
for i in xrange(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden
# units of the layer below or the input size if we are on
# the first layer
# the input to this layer is either the activation of the
# hidden layer below or the input of the DBN if you are on
# the first layer
if i == 0:
input_size = n_ins
layer_input = self.x
else:
input_size = hidden_layers_sizes[i - 1]
layer_input = self.layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=activation)
# add the layer to our list of layers
self.layers.append(sigmoid_layer)
# the parameters of the sigmoid_layers are parameters of the DBN.
# The visible biases in the RBM are parameters of those RBMs,
# but not of the DBN.
self.params.extend(sigmoid_layer.params)
self.delta_params.extend(sigmoid_layer.delta_params)
# Construct an RBM that shared weights with this layer
# the first layer could be Gaussian-Bernoulli RBM
# other layers are Bernoulli-Bernoulli RBMs
if i == 0 and first_layer_gb:
rbm_layer = GBRBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
hbias=sigmoid_layer.b,
activation=activation)
else:
rbm_layer = RBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
hbias=sigmoid_layer.b,
activation=activation)
self.rbm_layers.append(rbm_layer)
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs)
self.layers.append(self.logLayer)
self.params.extend(self.logLayer.params)
self.delta_params.extend(self.logLayer.delta_params)
# compute the cost for second phase of training, defined as the
# negative log likelihood of the logistic regression (output) layer
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
self.output = self.logLayer.prediction();
self.features = self.layers[-2].output;
self.features_dim = self.layers[-2].n_out
def pretraining_functions(self, train_set_x, batch_size, weight_cost):
'''Generates a list of functions, for performing one step of
gradient descent at a given layer. The function will require
as input the minibatch index, and to train an RBM you just
need to iterate, calling the corresponding function on all
minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared var. that contains all datapoints used
for training the RBM
:type batch_size: int
:param batch_size: size of a [mini]batch
:param weight_cost: weigth cost
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
momentum = T.scalar('momentum')
learning_rate = T.scalar('lr') # learning rate to use
# number of batches
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for rbm in self.rbm_layers:
# get the cost and the updates list
# using CD-k here (persisent=None,k=1) for training each RBM.
r_cost, fe_cost, updates = rbm.get_cost_updates(batch_size, learning_rate,
momentum, weight_cost)
# compile the theano function
fn = theano.function(inputs=[index,
theano.Param(learning_rate, default=0.0001),
theano.Param(momentum, default=0.5)],
outputs= [r_cost, fe_cost],
updates=updates,
givens={self.x: train_set_x[batch_begin:batch_end]})
# append function to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
|
IITM-DONLAB/python-dnn
|
src/pythonDnn/models/dbn.py
|
Python
|
apache-2.0
| 8,684
|
# Python Substrate Interface Library
#
# Copyright 2018-2020 Stichting Polkascan (Polkascan Foundation).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def version_tuple(version_string: str) -> tuple:
"""
Converts a basic version string to a tuple that can be compared
Parameters
----------
version_string
Returns
-------
tuple
"""
if re.search(r'[^\.0-9]', version_string):
raise ValueError('version_string can only contain numeric characters')
return tuple(int(v) for v in version_string.split('.'))
|
vertexproject/synapse
|
synapse/vendor/substrateinterface/utils/__init__.py
|
Python
|
apache-2.0
| 1,064
|
import time
import wiringpi2 as wiringpi
#use Broadcom pin numbers
wiringpi.wiringPiSetupGpio()
LED_PIN = 25
# setup pin as an output
wiringpi.pinMode(LED_PIN, 1)
while True:
# enable LED
wiringpi.digitalWrite(LED_PIN, 1)
# wait 1 second
time.sleep(1)
# disable LED
wiringpi.digitalWrite(LED_PIN, 0)
# wait 1 second
time.sleep(1)
#cleanup
wiringpi.pinMode(LED_PIN, 0)
|
lukaszo/rpitips-examples
|
wiringpi/led.py
|
Python
|
apache-2.0
| 392
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PlatformVmtypesResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'vm_types': 'dict(str, VirtualMachinesResponse)'
}
attribute_map = {
'vm_types': 'vmTypes'
}
def __init__(self, vm_types=None):
"""
PlatformVmtypesResponse - a model defined in Swagger
"""
self._vm_types = None
if vm_types is not None:
self.vm_types = vm_types
@property
def vm_types(self):
"""
Gets the vm_types of this PlatformVmtypesResponse.
:return: The vm_types of this PlatformVmtypesResponse.
:rtype: dict(str, VirtualMachinesResponse)
"""
return self._vm_types
@vm_types.setter
def vm_types(self, vm_types):
"""
Sets the vm_types of this PlatformVmtypesResponse.
:param vm_types: The vm_types of this PlatformVmtypesResponse.
:type: dict(str, VirtualMachinesResponse)
"""
self._vm_types = vm_types
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, PlatformVmtypesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Chaffelson/whoville
|
whoville/cloudbreak/models/platform_vmtypes_response.py
|
Python
|
apache-2.0
| 4,040
|
"""
Copyright 2015 Matthew D. Ball (M4Numbers)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'Matthew Ball'
import imgurpython
class ImgurCrawler:
__client = 0
def __init__(self, client, secret, pin):
conn = imgurpython.ImgurClient(client, secret)
auth_url = conn.get_auth_url('pin')
creds = conn.authorize(pin, 'pin')
conn.set_user_auth(creds['access_token'], creds['refresh_token'])
self.__client = conn
def request_profile(self, name):
ret = self.__client.get_account(name)
return ret
|
M4Numbers/Walkers
|
walkers/ImgurCrawler.py
|
Python
|
apache-2.0
| 1,092
|
i = int(input("The first number: "))
j = int(input("The second number: "))
k = int(input("The third number: "))
l = int(input("The fourth number: "))
pairs = 0
if i == j or i == k or i == l:
pairs += 1
if j == k or j == l:
pairs += 1
if k == l:
pairs += 1
if pairs >= 2:
print("Two pairs")
else:
print("Not two pairs")
|
JoachimVandersmissen/CodingSolutions
|
python/PythonForEveryone/chapter3/8.py
|
Python
|
apache-2.0
| 339
|
#!/usr/bin/env python
# Copyright (c) 2015 by Ken Guyton. All Rights Reserved.
"""Create a random grid and compute the solution."""
from __future__ import print_function
import argparse
import walk_grid
import random
MAX_GRID_SIZE = 20
MAX_FOOD = 200
MAX_ROOM_FOOD = 10
def get_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--grid_size', default=11, type=int,
help='The size of each side of the grid.')
parser.add_argument('--food', default=150, type=int,
help='Amount of food.')
return parser.parse_args()
def create_grid(grid_size):
"""Randomly create a grid."""
grid = []
for j in range(grid_size):
grid.append([])
for unused_i in range(grid_size):
grid[j].append(random.randrange(MAX_ROOM_FOOD + 1))
grid[0][0] = 0
return grid
def print_grid(grid):
"""Print out a grid."""
for row in grid:
for num in row:
print(' {0:2d}'.format(num), end='')
print()
def main():
"""Compute the answer for the given grid and food amounts."""
opts = get_args()
if opts.food > MAX_FOOD:
print('WARNING, you have exceeded the MAX FOOD size: {0}'.format(MAX_FOOD))
if opts.grid_size > MAX_GRID_SIZE:
print('WARNING, you have exceeded the MAX GRID SIZE: {0}'.format(
MAX_GRID_SIZE))
print('Food: {0}.'.format(opts.food))
print('Grid size: {0}.'.format(opts.grid_size))
grid = create_grid(opts.grid_size)
print('\nThe Grid...\n')
print_grid(grid)
print()
least_left, steps = walk_grid.answer_and_steps(grid, opts.food)
print('\nResult: {0} with steps {1}.'.format(least_left, steps))
if __name__ == '__main__':
main()
|
kmggh/python-walk-grid
|
spec_solution.py
|
Python
|
artistic-2.0
| 1,718
|
"""
Last.fm support for Django-Social-Auth.
An application must be registered first on Last.fm and the settings LASTFM_API_KEY
and LASTFM_SECRET must be defined with they corresponding values.
"""
from hashlib import md5
from re import sub
from urllib import urlencode
from urllib2 import urlopen
from django.conf import settings
from django.contrib.auth import authenticate
from django.utils import simplejson
from social_auth.backends import BaseAuth, SocialAuthBackend, USERNAME
LASTFM_API_SERVER = 'https://ws.audioscrobbler.com/2.0/'
LASTFM_AUTHORIZATION_URL = 'https://www.last.fm/api/auth/'
class LastfmBackend(SocialAuthBackend):
"""Last.fm authentication backend."""
name = "lastfm"
EXTRA_DATA = [('id', 'id'), ]
def get_user_id(self, details, response):
"""Get unique User id from response"""
return response['id']
def get_user_details(self, response):
"""Return user details from Last.fm account"""
full_name = response['realname'].strip()
if len(full_name.split(' ')) > 1:
last_name = full_name.split(' ')[-1].strip()
first_name = full_name.replace(last_name, '').strip()
else:
first_name = full_name
last_name = ''
data = {
USERNAME: response.get('name', ''),
'email': '',
'fullname': full_name,
'first_name': first_name,
'last_name': last_name
}
return data
def extra_data(self, user, uid, response, details):
data = {'access_token': response.get('access_token', '')}
name = self.name.replace('-', '_').upper()
names = (self.EXTRA_DATA or []) + getattr(settings, name + '_EXTRA_DATA', [])
data.update((alias, response.get(name)) for name, alias in names)
return data
class LastfmAuth(BaseAuth):
"""Last.fm authentication mechanism."""
AUTH_BACKEND = LastfmBackend
SETTINGS_KEY_NAME = 'LASTFM_API_KEY'
SETTINGS_SECRET_NAME = 'LASTFM_SECRET'
def auth_url(self):
"""Return authorization redirect url."""
key = self.api_key()
callback = self.request.build_absolute_uri(self.redirect)
callback = sub(r'^https', u'http', callback)
query = urlencode({'api_key': key, 'cb': callback})
return '%s?%s' % (LASTFM_AUTHORIZATION_URL, query)
def auth_complete(self, *args, **kwargs):
"""Return user from authenticate."""
token = self.data.get('token')
if not token:
raise ValueError('No token returned')
username, access_token = self.access_token(token)
data = self.user_data(username)
if data is not None:
data['access_token'] = access_token
kwargs.update({'response': data, self.AUTH_BACKEND.name: True})
return authenticate(*args, **kwargs)
def access_token(self, token):
"""Get the Last.fm session/access token via auth.getSession."""
data = {
'method': 'auth.getSession',
'api_key': self.api_key(),
'token': token,
'api_sig': self.method_signature('auth.getSession', token),
'format': 'json',
}
query = urlencode(data)
url = '%s?%s' % (LASTFM_API_SERVER, query)
try:
response = urlopen(url).read()
session = simplejson.loads(response)['session']
access_token = session['key']
username = session['name']
except:
access_token = ''
username = ''
return (username, access_token)
def user_data(self, username):
"""Request user data."""
data = {
'method': 'user.getinfo',
'api_key': self.api_key(),
'user': username,
'format': 'json',
}
query = urlencode(data)
url = '%s?%s' % (LASTFM_API_SERVER, query)
try:
response = urlopen(url).read()
user_data = simplejson.loads(response)['user']
except:
user_data = None
return user_data
def method_signature(self, method, token):
"""Generate method signature for API calls."""
data = {
'key': self.api_key(),
'secret': self.secret_key(),
'method': method,
'token': token,
}
key = 'api_key%(key)smethod%(method)stoken%(token)s%(secret)s' % data
return md5(key).hexdigest()
@classmethod
def enabled(cls):
"""Enable only if settings are defined."""
return cls.api_key and cls.secret_key
@classmethod
def api_key(cls):
return getattr(settings, cls.SETTINGS_KEY_NAME, '')
@classmethod
def secret_key(cls):
return getattr(settings, cls.SETTINGS_SECRET_NAME, '')
# Backend definition
BACKENDS = {
'lastfm': LastfmAuth,
}
|
mlavin/django-lastfm-auth
|
lastfm_auth/backend.py
|
Python
|
bsd-2-clause
| 4,889
|
#!/usr/bin/python
from __future__ import print_function
import os
import sys
from os.path import join, dirname
from setuptools import Extension, find_packages, setup
from distutils.command import build_ext as _build_ext
try:
# we use Cython if possible then resort to pre-build intermediate files
# noinspection PyPackageRequirements
from Cython.Distutils import build_ext
except ImportError:
build_ext = None
# change directory to this module path
try:
this_file = __file__
except NameError:
this_file = sys.argv[0]
this_file = os.path.abspath(this_file)
if os.path.dirname(this_file):
os.chdir(os.path.dirname(this_file))
script_dir = os.getcwd()
def readme(fname):
"""Read text out of a file in the same directory as setup.py.
"""
return open(join(dirname(__file__), fname), 'rt').read()
if build_ext:
avf_module = Extension(
'pyavfcam',
['src/avf.pyx',
'src/modules/avf.m',
'src/modules/camera_frame.m',
'src/modules/utils.m',
'src/modules/avf_impl.m',
],
extra_link_args=['-framework', 'AVFoundation',
'-framework', 'Foundation',
],
extra_compile_args=['-ObjC++', '-std=c++11', '-stdlib=libc++','-mmacosx-version-min=10.7'],
language="c++",
)
else:
avf_module = Extension(
'pyavfcam',
['src/avf.cpp',
'src/modules/avf.m',
'src/modules/camera_frame.m',
'src/modules/utils.m',
'src/modules/avf_impl.m',
],
extra_link_args=['-framework', 'AVFoundation',
'-framework', 'Foundation',
],
extra_compile_args=['-ObjC++', '-std=c++11', '-stdlib=libc++'],
language="c++",
)
# noinspection PyPep8Naming
class build_ext(_build_ext.build_ext):
def run(self):
print("""
--> Cython is not installed. Can not compile .pyx files. <--
If the pre-built sources did not work you'll have to do it yourself
and run this command again,
if you want to recompile your .pyx files.
`pip install cython` should suffice.
------------------------------------------------------------
""")
assert os.path.exists(
os.path.join(script_dir, 'src', 'avf.cpp')), \
'Source file not found!'
return _build_ext.build_ext.run(self)
setup(
name="pyavfcam",
version="0.0.1",
author="dashesy",
author_email="dashesy@gmail.com",
url='https://github.com/dashesy/pyavfcam',
description="Simple camera video capture in OSX using AVFoundation",
long_description=readme('README.md'),
packages=find_packages(),
license="BSD",
cmdclass={
'build_ext': build_ext,
},
classifiers=[
'Intended Audience :: Developers',
'Operating System :: MacOS :: MacOS X',
"License :: OSI Approved :: BSD License",
"Programming Language :: Objective C++",
"Programming Language :: Cython",
"Programming Language :: Python",
'Topic :: Software Development',
],
ext_modules=[
avf_module
]
)
|
dashesy/pyavfcam
|
setup.py
|
Python
|
bsd-2-clause
| 3,259
|
# -*- coding: utf-8 -*-
from glyph import Glyph, glyphs
from context import mergeSubPolys
from kerning import kernGlyphs
from punctuation import spaceGlyph
class LineBreak(object):
def __init__(self, leading):
super(LineBreak, self).__init__()
self.leading = leading
class TextBox(object):
def __init__(self, box, parent):
super(TextBox, self).__init__()
self.x = 0.0
self.y = 0.0
self.tracking = 0
self.size = 100
self.weight = 3.0
self.width = int(parent.attrib["width"])
self.serif = True
self.leading = None
# Pull in integer properties
for prop in ["x", "y", "width"]:
if prop in box.attrib:
setattr(self, prop, int(box.attrib[prop]))
# Pull in float properties
for prop in ["tracking", "size", "leading", "weight"]:
if prop in box.attrib:
setattr(self, prop, float(box.attrib[prop]))
if self.leading == None:
self.leading = self.size / 2.0
self.glyphs = []
self.addXMLChunk(box)
def addXMLChunk(self, chunk, weight=None, italic=False, capHeight=None,
color=None, serif=None, tracking=None, leading=None):
if chunk.text:
self.addTextChunk(chunk.text, weight=weight, italic=italic,
capHeight=capHeight, color=color, serif=serif,
tracking=tracking, leading=leading)
for el in chunk:
newWeight = weight
newItalic = italic
newCapHeight = capHeight
newColor = color
newSerif = serif
newTracking = tracking
newLeading = leading
if el.tag == "u":
newWeight = 0.5
elif el.tag == "l":
newWeight = 2.0
elif el.tag == "m":
newWeight = 3.0
elif el.tag == "b":
newWeight = 5.0
elif el.tag == "h":
newWeight = 7.0
elif el.tag == "i":
newItalic = True
elif el.tag == "leading":
newLeading = float(el.attrib["px"])
elif el.tag == "tracking":
newTracking = float(el.attrib["px"])
elif el.tag == "size":
newCapHeight = float(el.attrib["px"])
if newLeading == None:
newLeading = newCapHeight / 2.0
elif el.tag == "br":
self.addTextChunk("\n", capHeight=capHeight, leading=newLeading,
stripNewline=False)
elif el.tag == "color":
newColor = (float(el.attrib["r"]),
float(el.attrib["g"]),
float(el.attrib["b"]),
float(el.attrib["a"]))
elif el.tag == "sans":
newSerif = False
elif el.tag == "serif":
newSerif = True
self.addXMLChunk(el, weight=newWeight, italic=newItalic,
capHeight=newCapHeight, color=newColor,
serif=newSerif, tracking=newTracking,
leading=newLeading)
if el.tail:
self.addTextChunk(el.tail, weight=weight, italic=italic,
capHeight=capHeight, color=color,
serif=serif, tracking=tracking,
leading=leading)
def addTextChunk(self, text, weight=None, italic=False, capHeight=None,
stripNewline=True, color=None, serif=None, tracking=None,
leading=None):
for i in range(len(text)):
a = text[i]
if weight == None:
weight = self.weight
if capHeight == None:
capHeight = self.size
if color == None:
color = (0.0, 0.0, 0.0, 1.0)
if serif == None:
serif = self.serif
if tracking == None:
tracking = self.tracking
if leading == None:
leading = self.leading
#if a == " ":
# self.glyphs += [" "]
# continue
if a == "\n":
if not stripNewline:
self.glyphs += [LineBreak(leading)]
continue
glyph = glyphs[a](x=0, y=0, capHeight=capHeight)
glyph.w = (weight * (glyph.capHeight() / 100.0))
glyph.slanted = italic
glyph.color = color
glyph.willBeSerifed = serif
glyph.tracking = tracking
self.glyphs += [glyph]
def layoutGlyphs(self):
allGlyphs = []
wordGlyphs = []
xloc = self.x
yloc = self.y
metrics = Glyph(0, 0, capHeight=self.size)
for i in range(len(self.glyphs)):
a = self.glyphs[i]
if i + 1 < len(self.glyphs):
b = self.glyphs[i + 1]
else:
b = None
if isinstance(a, LineBreak):
if b and not isinstance(b, LineBreak):
bGlyphBounds = mergeSubPolys([b]).bounds
b.x = self.x
xloc = b.x - bGlyphBounds[0]
else:
xloc = self.x
yloc += metrics.capHeight() + a.leading
allGlyphs += wordGlyphs
wordGlyphs = []
continue
glyphBounds = mergeSubPolys([a]).bounds
a.x = xloc
a.y = yloc
xShift = glyphBounds[2] - glyphBounds[0]
if isinstance(b, Glyph):
xShift += (kernGlyphs(a.char, b.char, a.weight(),
capHeight=a.capHeight()) + a.tracking)
if a.outlined:
xShift += (a.capHeight() / 15.0)
xloc += xShift
if isinstance(a, spaceGlyph):
if len(wordGlyphs):
if xloc > self.width:
xloc = self.x
yloc += metrics.capHeight() + self.leading
allGlyphs += wordGlyphs
wordGlyphs = []
else:
wordGlyphs += [a]
allGlyphs += wordGlyphs
for g in allGlyphs:
g.serifed = g.willBeSerifed
return allGlyphs
|
hortont424/phiface
|
phiface/text.py
|
Python
|
bsd-2-clause
| 6,558
|
import glob
import os
import unicodedata
from string import punctuation
from nltk import word_tokenize, SnowballStemmer
from nltk.corpus import stopwords, PlaintextCorpusReader
class StemmingController:
def __init__(self):
ADDITIONAL_STOPWORDS = ['%', '?', '¿', 'please', 'your', 'flash', 'plugin', 'Tags', 'MÁS', 'EN', '.+MÁS',
'+Tags', '...', ',', '.', '[', ']', '"', '(',
')', '…', 'el', 'la', 'los', 'uno', 'una', '-', ':', '``', "''"]
self.ALL_STOPWORDS = set(stopwords.words('spanish') + ADDITIONAL_STOPWORDS)
self.DELIMITER = '\\'
def stemming_text(self, words):
stemmer = SnowballStemmer("spanish")
final_text = []
for word in words:
final_text.append(stemmer.stem(word))
return final_text
def stemming_files(self, source_folder, destination_folder):
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
corpus_news = PlaintextCorpusReader(source_folder, '.*\.txt')
for file in corpus_news.fileids():
file_name = os.path.basename(os.path.normpath(file))
words = corpus_news.words(file)
stemmed_content = self.stemming_text(words)
with open(destination_folder + "/" + file_name, 'w', encoding='utf8') as modified:
modified.write(' '.join(stemmed_content))
|
gcvalderrama/Palantir
|
worker/StemmingController.py
|
Python
|
bsd-2-clause
| 1,443
|
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.GL import glget
EXTENSION_NAME = 'GL_SGIS_texture_lod'
_p.unpack_constants( """GL_TEXTURE_MIN_LOD_SGIS 0x813A
GL_TEXTURE_MAX_LOD_SGIS 0x813B
GL_TEXTURE_BASE_LEVEL_SGIS 0x813C
GL_TEXTURE_MAX_LEVEL_SGIS 0x813D""", globals())
def glInitTextureLodSGIS():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
|
frederica07/Dragon_Programming_Process
|
PyOpenGL-3.0.2/OpenGL/raw/GL/SGIS/texture_lod.py
|
Python
|
bsd-2-clause
| 522
|
import logging
from ob2.util.hooks import register_job
logging.info("Hello world!")
@register_job("hw0")
def hw0_job_handler(repo, commit_hash):
return "You get full credit!", 10.0
|
octobear2/ob2
|
config/functions.py
|
Python
|
bsd-2-clause
| 188
|
import time
import socket
import struct
import urllib.parse
import select
from . import pac_server
from . import global_var as g
from .socket_wrap import SocketWrap
import utils
from .smart_route import handle_ip_proxy, handle_domain_proxy, netloc_to_host_port
from xlog import getLogger
xlog = getLogger("smart_router")
SO_ORIGINAL_DST = 80
class ProxyServer():
handle_num = 0
def __init__(self, sock, client, args):
self.conn = sock
self.rfile = self.conn.makefile("rb", 0)
self.wfile = self.conn.makefile("wb", 0)
self.client_address = client
self.read_buffer = b""
self.buffer_start = 0
self.support_redirect = True
def try_redirect(self):
if not self.support_redirect:
return False
try:
dst = self.conn.getsockopt(socket.SOL_IP, SO_ORIGINAL_DST, 16)
except:
self.support_redirect = False
return False
try:
dst_port, srv_ip = struct.unpack("!2xH4s8x", dst)
ip_str = socket.inet_ntoa(srv_ip)
if dst_port != g.config.proxy_port and not utils.is_private_ip(ip_str):
xlog.debug("Redirect to:%s:%d from:%s", ip_str, dst_port, self.client_address)
handle_ip_proxy(self.conn, ip_str, dst_port, self.client_address)
return True
else:
return False
except Exception as e:
xlog.exception("redirect except:%r", e)
return True
def handle(self):
self.__class__.handle_num += 1
if self.try_redirect():
return
sockets = [self.conn]
try:
r, w, e = select.select(sockets, [], [])
socks_version = self.conn.recv(1, socket.MSG_PEEK)
if not socks_version:
return
if socks_version == b"\x04":
self.socks4_handler()
elif socks_version == b"\x05":
self.socks5_handler()
elif socks_version == b"C":
self.https_handler()
elif socks_version in [b"G", b"P", b"D", b"O", b"H", b"T"]:
self.http_handler()
else:
xlog.warn("socks version:%s[%s] not supported", socks_version, utils.str2hex(socks_version))
return
except socket.error as e:
xlog.warn('socks handler read error:%r', e)
except Exception as e:
xlog.exception("any err:%r", e)
def read_null_end_line(self):
sock = self.conn
sock.setblocking(0)
try:
while True:
n1 = self.read_buffer.find(b"\x00", self.buffer_start)
if n1 > -1:
line = self.read_buffer[self.buffer_start:n1]
self.buffer_start = n1 + 1
return line
try:
data = sock.recv(8192)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
time.sleep(0.01)
continue
else:
raise e
self.read_buffer += data
finally:
sock.setblocking(1)
def read_crlf_line(self):
sock = self.conn
sock.setblocking(0)
try:
while True:
n1 = self.read_buffer.find(b"\r\n", self.buffer_start)
if n1 > -1:
line = self.read_buffer[self.buffer_start:n1]
self.buffer_start = n1 + 2
return line
try:
data = sock.recv(8192)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
time.sleep(0.01)
continue
else:
raise e
self.read_buffer += data
finally:
sock.setblocking(1)
def read_headers(self):
sock = self.conn
sock.setblocking(0)
try:
while True:
if self.read_buffer[self.buffer_start:] == b"\r\n":
self.buffer_start += 2
return ""
n1 = self.read_buffer.find(b"\r\n\r\n", self.buffer_start)
if n1 > -1:
block = self.read_buffer[self.buffer_start:n1]
self.buffer_start = n1 + 4
return block
try:
data = sock.recv(8192)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
time.sleep(0.01)
continue
else:
raise e
self.read_buffer += data
finally:
sock.setblocking(1)
def read_bytes(self, size):
sock = self.conn
sock.setblocking(1)
try:
while True:
left = len(self.read_buffer) - self.buffer_start
if left >= size:
break
need = size - left
try:
data = sock.recv(need)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
time.sleep(0.01)
continue
else:
raise e
if len(data):
self.read_buffer += data
else:
raise socket.error("recv fail")
finally:
sock.setblocking(1)
data = self.read_buffer[self.buffer_start:self.buffer_start + size]
self.buffer_start += size
return data
def socks4_handler(self):
# Socks4 or Socks4a
sock = self.conn
socks_version = ord(self.read_bytes(1))
cmd = ord(self.read_bytes(1))
if cmd != 1:
xlog.warn("Socks4 cmd:%d not supported", cmd)
return
data = self.read_bytes(6)
port = struct.unpack(">H", data[0:2])[0]
addr_pack = data[2:6]
if addr_pack[0:3] == b'\x00\x00\x00' and addr_pack[3:4] != b'\x00':
domain_mode = True
else:
ip = socket.inet_ntoa(addr_pack)
domain_mode = False
user_id = self.read_null_end_line()
if len(user_id):
xlog.debug("Socks4 user_id:%s", user_id)
if domain_mode:
addr = self.read_null_end_line()
else:
addr = ip
reply = b"\x00\x5a" + addr_pack + struct.pack(">H", port)
sock.send(reply)
# xlog.debug("Socks4:%r to %s:%d", self.client_address, addr, port)
handle_ip_proxy(sock, addr, port, self.client_address)
def socks5_handler(self):
sock = self.conn
socks_version = ord(self.read_bytes(1))
auth_mode_num = ord(self.read_bytes(1))
data = self.read_bytes(auth_mode_num)
sock.send(b"\x05\x00") # socks version 5, no auth needed.
try:
data = self.read_bytes(4)
except Exception as e:
xlog.debug("socks5 auth num:%d, list:%s", auth_mode_num, utils.str2hex(data))
xlog.warn("socks5 protocol error:%r", e)
return
socks_version = ord(data[0:1])
if socks_version != 5:
xlog.warn("request version:%d error", socks_version)
return
command = ord(data[1:2])
if command != 1: # 1. Tcp connect
xlog.warn("request not supported command mode:%d", command)
sock.send(b"\x05\x07\x00\x01") # Command not supported
return
addrtype_pack = data[3:4]
addrtype = ord(addrtype_pack)
if addrtype == 1: # IPv4
addr_pack = self.read_bytes(4)
addr = socket.inet_ntoa(addr_pack)
elif addrtype == 3: # Domain name
domain_len_pack = self.read_bytes(1)[0:1]
domain_len = ord(domain_len_pack)
domain = self.read_bytes(domain_len)
addr_pack = domain_len_pack + domain
addr = domain
elif addrtype == 4: # IPv6
addr_pack = self.read_bytes(16)
addr = socket.inet_ntop(socket.AF_INET6, addr_pack)
else:
xlog.warn("request address type unknown:%d", addrtype)
sock.send(b"\x05\x07\x00\x01") # Command not supported
return
port = struct.unpack('>H', self.rfile.read(2))[0]
# xlog.debug("socks5 %r connect to %s:%d", self.client_address, addr, port)
reply = b"\x05\x00\x00" + addrtype_pack + addr_pack + struct.pack(">H", port)
sock.send(reply)
if addrtype in [1, 4]:
handle_ip_proxy(sock, addr, port, self.client_address)
else:
handle_domain_proxy(sock, addr, port, self.client_address)
def https_handler(self):
line = self.read_crlf_line()
line = line
words = line.split()
if len(words) == 3:
command, path, version = words
elif len(words) == 2:
command, path = words
version = b"HTTP/1.1"
else:
xlog.warn("https req line fail:%s", line)
return
if command != b"CONNECT":
xlog.warn("https req line fail:%s", line)
return
host, _, port = path.rpartition(b':')
port = int(port)
header_block = self.read_headers()
sock = self.conn
# xlog.debug("https %r connect to %s:%d", self.client_address, host, port)
sock.send(b'HTTP/1.1 200 OK\r\n\r\n')
handle_domain_proxy(sock, host, port, self.client_address)
def http_handler(self):
req_data = self.conn.recv(65537, socket.MSG_PEEK)
rp = req_data.split(b"\r\n")
req_line = rp[0]
words = req_line.split()
if len(words) == 3:
method, url, http_version = words
elif len(words) == 2:
method, url = words
http_version = b"HTTP/1.1"
else:
xlog.warn("http req line fail:%s", req_line)
return
if url.lower().startswith(b"http://"):
o = urllib.parse.urlparse(url)
host, port = netloc_to_host_port(o.netloc)
url_prex_len = url[7:].find(b"/")
if url_prex_len >= 0:
url_prex_len += 7
path = url[url_prex_len:]
else:
url_prex_len = len(url)
path = b"/"
else:
# not proxy request, should be PAC
xlog.debug("PAC %s %s from:%s", method, url, self.client_address)
handler = pac_server.PacHandler(self.conn, self.client_address, None, xlog)
return handler.handle()
sock = SocketWrap(self.conn, self.client_address[0], self.client_address[1])
sock.replace_pattern = [url[:url_prex_len], ""]
xlog.debug("http %r connect to %s:%d %s %s", self.client_address, host, port, method, path)
handle_domain_proxy(sock, host, port, self.client_address)
|
xyuanmu/XX-Net
|
code/default/smart_router/local/proxy_handler.py
|
Python
|
bsd-2-clause
| 11,386
|
#!/usr/bin/env python
# Put your app specific configs here
consumer_key = ""
consumer_secret = ""
|
honza/clitwi
|
config.py
|
Python
|
bsd-2-clause
| 100
|
#!/usr/bin/env python
import os
import sys
from django.core.management import execute_from_command_line
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dispensa.settings.dev")
execute_from_command_line(sys.argv)
|
evonove/dispensa-website
|
django-dispensa/manage.py
|
Python
|
bsd-2-clause
| 251
|
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from . import views
urlpatterns = [
url(r'^$', views.InventoryView.as_view(), name='inventory'),
url(r'^(?P<node_type>\w+)/(?P<node_id>[0-9]+)/$',
login_required(views.NodesView.as_view()), name='nodes'),
url(r'^(?P<node_type>\w+)/(?P<node_id>[0-9]+)/variable/(?P<action>\w+)/$',
login_required(views.VariablesView.as_view()), name='variables'),
url(r'^(?P<node_type>\w+)/(?P<node_id>[0-9]+)/(?P<relation>\w+)/$',
login_required(views.RelationsView.as_view()), name='relations'),
]
|
ptonini/battuta-manager
|
battuta/inventory/urls.py
|
Python
|
bsd-2-clause
| 618
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('array_formula01.xlsx')
self.ignore_files = ['xl/calcChain.xml',
'[Content_Types].xml',
'xl/_rels/workbook.xml.rels']
def test_create_file(self):
"""Test the creation of an XlsxWriter file with an array formula."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write('B1', 0)
worksheet.write('B2', 0)
worksheet.write('B3', 0)
worksheet.write('C1', 0)
worksheet.write('C2', 0)
worksheet.write('C3', 0)
worksheet.write_array_formula(0, 0, 2, 0, '{=SUM(B1:C1*B2:C2)}', None, 0)
workbook.close()
self.assertExcelEqual()
def test_create_file_A1(self):
"""
Test the creation of an XlsxWriter file with an array formula
and A1 Notation.
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write('B1', 0)
worksheet.write('B2', 0)
worksheet.write('B3', 0)
worksheet.write('C1', 0)
worksheet.write('C2', 0)
worksheet.write('C3', 0)
worksheet.write_array_formula('A1:A3', '{=SUM(B1:C1*B2:C2)}', None, 0)
workbook.close()
self.assertExcelEqual()
def test_create_file_kwargs(self):
"""
Test the creation of an XlsxWriter file with an array formula
and keyword args
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write('B1', 0)
worksheet.write('B2', 0)
worksheet.write('B3', 0)
worksheet.write('C1', 0)
worksheet.write('C2', 0)
worksheet.write('C3', 0)
worksheet.write_array_formula(first_row=0, first_col=0,
last_row=2, last_col=0,
formula='{=SUM(B1:C1*B2:C2)}')
workbook.close()
self.assertExcelEqual()
|
jmcnamara/XlsxWriter
|
xlsxwriter/test/comparison/test_array_formula01.py
|
Python
|
bsd-2-clause
| 2,513
|
import logging
from django.conf import settings
from django.conf.urls import patterns, url
from django.db.models import Q
from django.http import Http404
from django.views.decorators.cache import never_cache
from guardian.shortcuts import get_objects_for_user
from preserialize.serialize import serialize
from serrano.resources.base import ThrottledResource
from varify import api
from vdw.samples.models import CohortVariant
from vdw.assessments.models import Assessment, Pathogenicity, \
AssessmentCategory
from vdw.variants.models import Variant
log = logging.getLogger(__name__)
GENOME_VERSION = getattr(settings, 'VDW_GENOME_VERSION', None)
try:
from solvebio.contrib.django_solvebio import SolveBio
from solvebio import SolveError, Filter
except ImportError:
SolveBio = None
log.warning('Could not import SolveBio')
class VariantResource(ThrottledResource):
model = Variant
template = api.templates.Variant
# Check if the object exists. The cache should not be relied on since
# it is not deleted or invalidated when objects are deleted
def is_not_found(self, request, response, pk):
return not self.model.objects.filter(pk=pk).exists()
@classmethod
@api.cache_resource
def get(self, request, pk):
related = ['type', 'chr']
try:
variant = self.model.objects.select_related(*related).get(pk=pk)
except self.model.DoesNotExist:
raise Http404
data = serialize(variant, **self.template)
# Roll up unique set of genes and effects for this variant since
# this is quite important
genes = set()
effects = set()
# Compile the HGVS_c values for the SolveBio query
hgvs_c_values = set()
for eff in data['effects']:
effects.add(eff['type'])
if eff.get('transcript') and eff['transcript'].get('gene'):
if eff['transcript']['gene']:
genes.add(eff['transcript']['gene']['symbol'])
if eff['transcript'].get('transcript') and eff.get('hgvs_c'):
hgvs_c_values.add('{0}:{1}'.format(
eff['transcript']['transcript'], eff['hgvs_c']))
data['unique_genes'] = sorted(genes)
data['unique_effects'] = sorted(effects)
# Augment resource with cohort-related details (e.g. allele
# frequencies)
perms = Q(cohort__user=None, cohort__published=True) | \
Q(cohort__user=request.user)
projects = get_objects_for_user(request.user, 'samples.view_project')
cohort_variants = CohortVariant.objects\
.filter(perms, variant=variant, cohort__project__in=projects)\
.order_by('-cohort__order', 'cohort__name').distinct()
cohort_list = []
for cv in cohort_variants:
cohort_data = serialize(cv, **api.templates.CohortVariant)
# Find all the samples within this cohort that also contain
# this variant. If a sample is in the cohort but doesn't contain
# this variant then there is no benefit to including it.
samples_with_variant = cv.cohort.samples.filter(
results__variant=cv.variant)
cohort_data['samples'] = serialize(samples_with_variant,
**api.templates.SimpleSample)
cohort_list.append(cohort_data)
data['cohorts'] = cohort_list
if SolveBio and SolveBio.is_enabled():
data['solvebio'] = {}
# ClinVar integration -- use position and HGVS
filters = Filter(chromosome=variant.chr.value,
start__lte=variant.pos,
stop__gte=variant.pos)
if hgvs_c_values:
filters = filters | Filter(hgvs_c__in=list(hgvs_c_values))
# TODO: add another clinvar query for reported gene-wide variants
# if genes:
# filters = filters | Filter(gene_symbol__in=list(genes))
try:
# Query ClinVar by its alias, return 10 results/page
# TODO: client-side pagination
q = SolveBio.get_dataset('clinvar').query(
limit=10, # limit to 10 results (single page)
filters=filters)
# Send the first page of results to the client
data['solvebio']['clinvar'] = {
'results': q.results,
'total': q.total
}
except SolveError as e:
log.exception('SolveBio ClinVar query failed: {0}'.format(e))
data['genome_version'] = GENOME_VERSION
return data
class VariantAssessmentMetricsResource(ThrottledResource):
model = Variant
def is_not_found(self, request, response, pk):
return not self.model.objects.filter(pk=pk).exists()
def get(self, request, pk):
categories = AssessmentCategory.objects.all()
pathogenicities = Pathogenicity.objects.all()
assessments = Assessment.objects.select_related('sample_result') \
.filter(sample_result__variant=pk)
data = {
'num_assessments': 0
}
num_assessments = len(assessments)
# Easier to check for 0 assessments here than checking for a divide by
# 0 situation in every loop iteration.
if num_assessments > 0:
data['num_assessments'] = num_assessments
# Create the pathogenic summary excluding pathogenicities with
# no calls associated with them.
data['pathogenicities'] = []
for p in pathogenicities:
filter_results = assessments.filter(pathogenicity=p.id)
if filter_results.exists():
assessment_data = self.get_assessment_data(
filter_results, num_assessments, request.user.id)
assessment_data['name'] = p.name
data['pathogenicities'].append(assessment_data)
# Create the assessment category summary excluding categories with
# no calls associated with them.
data['categories'] = []
for c in categories:
filter_results = assessments.filter(assessment_category=c.id)
if filter_results.exists():
assessment_data = self.get_assessment_data(
filter_results, num_assessments, request.user.id)
assessment_data['name'] = c.name
data['categories'].append(assessment_data)
# Get the list of all the projects the user has access to. We will
# use this later to make sure that we don't expose the assessment
# details made via samples this user doesn't have rights to view.
user_project_ids = \
get_objects_for_user(request.user, 'samples.view_project') \
.values_list('pk', flat=True)
data['assessments'] = []
for a in assessments:
a_data = {}
a_data['id'] = a.pk
a_data['pathogenicity'] = a.pathogenicity.name
a_data['category'] = getattr(a.assessment_category, 'name', '')
a_data['sanger'] = 'Yes' if a.sanger_requested else 'No'
a_data['mother_result'] = a.mother_result.name
a_data['father_result'] = a.father_result.name
a_data['sample'] = {
'id': a.sample_result.sample.id,
'name': a.sample_result.sample.name,
}
a_data['user'] = {
'username': a.user.username,
'email': a.user.email,
}
if a.sample_result.sample.project.id in user_project_ids:
a_data['details'] = a.evidence_details
data['assessments'].append(a_data)
return data
def get_assessment_data(self, queryset, total_count, user_id):
"""
Calculates and sets the following data for the supplied queryset:
data = {
'count': <the number of items in the queryset>
'percentage': <percentage of total_count queryset represents>
'is_user_call': <true if user made this call, false otherwise>
'users': <set of all users who made this call>
}
"""
# We need to convert the usernames to strings here because the JSON
# encoder will choke when serializing this data if the usernames are
# unicode as they are when we get them back from the distinct call.
users = [{'username': str(username), 'email': email}
for username, email
in queryset.values_list('user__username', 'user__email')
.distinct()]
count = queryset.count()
is_user_call = queryset.filter(user=user_id).exists()
return {
'count': count,
'percentage': count / float(total_count) * 100.0,
'is_user_call': is_user_call,
'users': users,
}
variant_resource = never_cache(VariantResource())
variant_metrics_resource = never_cache(VariantAssessmentMetricsResource())
urlpatterns = patterns(
'',
url(r'^(?P<pk>\d+)/$', variant_resource, name='variant'),
url(r'^(?P<pk>\d+)/assessment-metrics/$', variant_metrics_resource,
name='variant_assessment_metrics'),
)
|
chop-dbhi/varify
|
varify/variants/resources.py
|
Python
|
bsd-2-clause
| 9,609
|
# Copyright (c) 2020, Matt Layman
"""
nose-tap is a reporting plugin for nose that outputs
`Test Anything Protocol (TAP) <http://testanything.org/>`_ data.
TAP is a line based test protocol for recording test data in a standard way.
Follow development on `GitHub <https://github.com/python-tap/nose-tap>`_.
Developer documentation is on
`Read the Docs <https://tappy.readthedocs.io/>`_.
"""
from setuptools import Command, find_packages, setup
import nose_tap
class ReleaseCommand(Command):
description = "generate distribution release artifacts"
user_options = []
def initialize_options(self):
"""Initialize options.
This method overrides a required abstract method.
"""
def finalize_options(self):
"""Finalize options.
This method overrides a required abstract method.
"""
def run(self):
"""Generate the distribution release artifacts.
The custom command is used to ensure that compiling
po to mo is not skipped.
"""
self.run_command("compile_catalog")
self.run_command("sdist")
self.run_command("bdist_wheel")
if __name__ == "__main__":
with open("docs/releases.rst", "r") as f:
releases = f.read()
long_description = __doc__ + "\n\n" + releases
setup(
name="nose-tap",
version=nose_tap.__version__,
url="https://github.com/python-tap/nose-tap",
license="BSD",
author="Matt Layman",
author_email="matthewlayman@gmail.com",
description="Test Anything Protocol (TAP) reporting plugin for nose",
long_description=long_description,
packages=find_packages(),
entry_points={"nose.plugins.0.10": ["tap = nose_tap.plugin:TAP"]},
include_package_data=True,
zip_safe=False,
platforms="any",
install_requires=["nose", "tap.py"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Testing",
],
keywords=["TAP", "unittest", "nose"],
cmdclass={"release": ReleaseCommand},
)
|
python-tap/nose-tap
|
setup.py
|
Python
|
bsd-2-clause
| 2,573
|
# Copyright (c) 2010, Sancho McCann
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import glob
import os
import random
import unittest
from naive_bayes_nearest_neighbor import caltech_util
from sift import sift_descriptors_pb2
from sift import sift_util
class TestCaltechHelperFunctions(unittest.TestCase):
def tearDown(self):
if os.path.exists('/tmp/seminar.sift'):
os.remove('/tmp/seminar.sift')
def test_do_extraction_produces_output(self):
""" Tests working case do_extraction """
image_path = os.path.abspath('../test_images/seminar.pgm')
destination_dir = '/tmp/'
parameters = sift_descriptors_pb2.ExtractionParameters()
caltech_util.do_extraction((image_path, destination_dir,
parameters.SerializeToString()))
expected_output_path = os.path.join(destination_dir, 'seminar.sift')
self.assertTrue(os.path.exists(expected_output_path))
self.assertTrue(os.path.getsize(expected_output_path) > 0)
def test_do_extraction_doesnt_extract_when_already_done(self):
image_path = os.path.abspath('../test_images/seminar.pgm')
destination_dir = '/tmp/'
expected_output_path = os.path.join(destination_dir, 'seminar.sift')
parameters = sift_descriptors_pb2.ExtractionParameters()
caltech_util.do_extraction((image_path, destination_dir,
parameters.SerializeToString()))
first_creation_time_check = os.path.getmtime(expected_output_path)
caltech_util.do_extraction((image_path, destination_dir,
parameters.SerializeToString()))
second_creation_time_check = os.path.getmtime(expected_output_path)
self.assertEqual(first_creation_time_check, second_creation_time_check)
def test_do_extraction_does_extract_when_params_dont_match_existing(self):
image_path = os.path.abspath('../test_images/seminar.pgm')
destination_dir = '/tmp/'
expected_output_path = os.path.join(destination_dir, 'seminar.sift')
parameters = sift_descriptors_pb2.ExtractionParameters()
caltech_util.do_extraction((image_path, destination_dir,
parameters.SerializeToString()))
first_creation_time_check = os.path.getmtime(expected_output_path)
parameters.multiscale = False
caltech_util.do_extraction((image_path, destination_dir,
parameters.SerializeToString()))
second_creation_time_check = os.path.getmtime(expected_output_path)
self.assertNotEqual(first_creation_time_check,
second_creation_time_check)
def test_extraction_output_matches_requested_default_parameters(self):
""" Tests that the extraction has been performed with the requested
parameters.
"""
image_path = os.path.abspath('../test_images/seminar.pgm')
destination_dir = '/tmp'
# Tests the default settings.
parameters = sift_descriptors_pb2.ExtractionParameters()
caltech_util.do_extraction((image_path, destination_dir,
parameters.SerializeToString()))
expected_output_path = os.path.join(destination_dir, 'seminar.sift')
result_params = \
sift_util.get_extraction_parameters(expected_output_path)
self.assertEqual(result_params.rotation_invariance, False)
self.assertEqual(result_params.normalization_threshold, 0)
self.assertEqual(result_params.discard_unnormalized, False)
self.assertEqual(result_params.multiscale, True)
self.assertEqual(result_params.percentage, 1)
self.assertEqual(result_params.minimum_radius, 0)
self.assertEqual(result_params.fractional_xy, False)
self.assertEqual(result_params.top_left_x, 0)
self.assertEqual(result_params.top_left_y, 0)
self.assertEqual(result_params.bottom_right_x, 2147483647)
self.assertEqual(result_params.bottom_right_y, 2147483647)
self.assertEqual(result_params.implementation,
sift_descriptors_pb2.ExtractionParameters.VLFEAT)
self.assertEqual(result_params.grid_method,
sift_descriptors_pb2.ExtractionParameters.FIXED_3X3)
self.assertEqual(result_params.smoothed, True)
self.assertAlmostEqual(result_params.first_level_smoothing, 0.6666666,
places=4)
def test_extraction_output_matches_requested_parameters_nondefault(self):
""" Tests that the extraction has been performed with the requested
parameters.
"""
image_path = os.path.abspath('../test_images/seminar.pgm')
destination_dir = '/tmp'
# Tests the default settings + multisale = False
parameters = sift_descriptors_pb2.ExtractionParameters()
parameters.multiscale = False
parameters.fractional_xy = True
parameters.percentage = 0.5
parameters.normalization_threshold = 0.5
parameters.minimum_radius = 16
parameters.discard_unnormalized = True
parameters.first_level_smoothing = 0.5
parameters.grid_method = \
sift_descriptors_pb2.ExtractionParameters.SCALED_BIN_WIDTH
caltech_util.do_extraction((image_path, destination_dir,
parameters.SerializeToString()))
expected_output_path = os.path.join(destination_dir, 'seminar.sift')
result_params = \
sift_util.get_extraction_parameters(expected_output_path)
self.assertEqual(result_params.rotation_invariance, False)
self.assertEqual(result_params.normalization_threshold, 0.5)
self.assertEqual(result_params.discard_unnormalized, True)
self.assertEqual(result_params.multiscale, False)
self.assertEqual(result_params.percentage, 0.5)
self.assertEqual(result_params.minimum_radius, 16)
self.assertEqual(result_params.fractional_xy, True)
self.assertEqual(result_params.top_left_x, 0)
self.assertEqual(result_params.top_left_y, 0)
self.assertEqual(result_params.bottom_right_x, 2147483647)
self.assertEqual(result_params.bottom_right_y, 2147483647)
self.assertEqual(result_params.grid_method,
sift_descriptors_pb2.ExtractionParameters.SCALED_BIN_WIDTH)
self.assertEqual(result_params.implementation,
sift_descriptors_pb2.ExtractionParameters.VLFEAT)
self.assertEqual(result_params.smoothed, True)
self.assertAlmostEqual(result_params.first_level_smoothing, 0.5,
places=4)
def test_do_extraction_no_image(self):
""" Tests image missing do_extraction """
image_path = os.path.abspath('../test_images/missing.pgm')
destination_dir = '/tmp/'
parameters = sift_descriptors_pb2.ExtractionParameters()
self.assertRaises(caltech_util.ImageNotFoundError,
caltech_util.do_extraction,
(image_path, destination_dir,
parameters.SerializeToString()))
expected_output_path = os.path.join(destination_dir, 'missing.sift')
self.assertFalse(os.path.exists(expected_output_path))
if __name__ == '__main__':
unittest.main()
|
sanchom/sjm
|
naive_bayes_nearest_neighbor/caltech_util_test.py
|
Python
|
bsd-2-clause
| 8,679
|
# -*- coding: UTF-8 -*-
# Copyright 2011-2018 Luc Saffre
#
# License: BSD (see file COPYING for details)
from __future__ import unicode_literals
import datetime
from dateutil.easter import easter
from lino.api import dd, _
from lino.utils.format_date import fds
from .utils import day_and_month
class Weekdays(dd.ChoiceList):
verbose_name = _("Weekday")
add = Weekdays.add_item
add('1', _('Monday'), 'monday')
add('2', _('Tuesday'), 'tuesday')
add('3', _('Wednesday'), 'wednesday')
add('4', _('Thursday'), 'thursday')
add('5', _('Friday'), 'friday')
add('6', _('Saturday'), 'saturday')
add('7', _('Sunday'), 'sunday')
WORKDAYS = frozenset([
Weekdays.get_by_name(k)
for k in 'monday tuesday wednesday thursday friday'.split()])
class DurationUnit(dd.Choice):
def add_duration(unit, orig, value):
if orig is None:
return None
if unit.value == 's':
return orig + datetime.timedelta(seconds=value)
if unit.value == 'm':
return orig + datetime.timedelta(minutes=value)
if unit.value == 'h':
return orig + datetime.timedelta(hours=value)
if unit.value == 'D':
return orig + datetime.timedelta(days=value)
if unit.value == 'W':
return orig + datetime.timedelta(days=value * 7)
day = orig.day
while True:
year = orig.year
try:
if unit.value == 'M':
m = orig.month + value
while m > 12:
m -= 12
year += 1
while m < 1:
m += 12
year -= 1
return orig.replace(month=m, day=day, year=year)
if unit.value == 'Y':
return orig.replace(year=orig.year + value, day=day)
if unit.value == 'E':
offset = orig - easter(year)
return easter(year+value) + offset
raise Exception("Invalid DurationUnit %s" % unit)
except ValueError:
if day > 28:
day -= 1
else:
raise
def get_date_formatter(self):
if self.value in 'YEM':
return fds
return day_and_month
class DurationUnits(dd.ChoiceList):
verbose_name = _("Duration Unit")
item_class = DurationUnit
add = DurationUnits.add_item
add('s', _('seconds'), 'seconds')
add('m', _('minutes'), 'minutes')
add('h', _('hours'), 'hours')
add('D', _('days'), 'days')
add('W', _('weeks'), 'weeks')
add('M', _('months'), 'months')
add('Y', _('years'), 'years')
class Recurrencies(dd.ChoiceList):
verbose_name = _("Recurrency")
item_class = DurationUnit
add = Recurrencies.add_item
add('O', _('once'), 'once')
add('D', _('daily'), 'daily')
add('W', _('weekly'), 'weekly')
add('M', _('monthly'), 'monthly')
add('Y', _('yearly'), 'yearly')
add('P', _('per weekday'), 'per_weekday') # deprecated
add('E', _('Relative to Easter'), 'easter')
def amonthago():
return DurationUnits.months.add_duration(dd.today(), -1)
class AccessClasses(dd.ChoiceList):
verbose_name = _("Access Class")
add = AccessClasses.add_item
add('10', _('Private'), 'private')
add('20', _('Show busy'), 'show_busy')
add('30', _('Public'), 'public')
class PlannerColumns(dd.ChoiceList):
verbose_name = _("Planner column")
add = PlannerColumns.add_item
add('10', _('External'), 'external')
add('20', _('Internal'), 'internal')
|
khchine5/xl
|
lino_xl/lib/cal/choicelists.py
|
Python
|
bsd-2-clause
| 3,543
|
"""
Module for handling redis IO
"""
import redis
import hashlib
from flickipedia.config import log, settings
__author__ = 'Ryan Faulkner'
__date__ = "2014-04-01"
def hmac(key):
""" Use an hmac to generate a hash key """
return hashlib.md5(key + settings.__secret_key__).hexdigest()
def _decode_list(data):
""" Decodes list elements """
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
"""
Decodes dict elements.
'object_hook' for json.loads (e.g. obj =
json.loads(s, object_hook=_decode_dict))
"""
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
class DataIORedis(object):
""" Class implementing data IO for Redis. """
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 6379
DEFAULT_DB = 0
__instance = None
def __new__(cls, *args, **kwargs):
""" This class is Singleton, return only one instance """
if not cls.__instance:
cls.__instance = super(DataIORedis, cls).__new__(cls, *args,
**kwargs)
return cls.__instance
def __init__(self, **kwargs):
super(DataIORedis, self).__init__(**kwargs)
self.setconfig(**kwargs)
def setconfig(self, **kwargs):
""" Sets the instance config """
self.host = kwargs['host'] if kwargs.has_key('host') else \
self.DEFAULT_HOST
self.port = kwargs['port'] if kwargs.has_key('port') else \
self.DEFAULT_PORT
self.db = kwargs['db'] if kwargs.has_key('db') else self.DEFAULT_DB
def connect(self):
self.conn = redis.Redis(host=self.host, port=self.port, db=self.db)
def write(self, key, value):
if self.conn:
try:
return self.conn.set(key, value)
except KeyError as e:
log.error('Missing param -> {0}'.format(e.message))
return False
else:
log.error('No redis connection.')
return False
def read(self, key):
if self.conn:
try:
return self.conn.get(key)
except KeyError as e:
log.error('Missing param -> {0}'.format(e.message))
return False
else:
log.error('No redis connection.')
return False
def delete(self, **kwargs):
if self.conn:
try:
return self.conn.delete(kwargs['key'])
except KeyError as e:
log.error('Missing param -> {0}'.format(e.message))
return False
else:
log.error('No redis connection.')
return False
|
rfaulkner/Flickipedia
|
flickipedia/redisio.py
|
Python
|
bsd-2-clause
| 3,307
|
import dbf
import os.path
THRIFT_DIR_PATH = os.path.join(
os.path.dirname(__file__), "..", "src", "pastpy", "models"
)
class RecordField:
THRIFT_IMPORTS_BY_TYPE = {
"date.Date": 'include "thryft/native/date.thrift"',
"date_time.DateTime": 'include "thryft/native/date_time.thrift"',
"decimal.Decimal": 'include "thryft/native/decimal.thrift"',
}
# DBF type is C for char, et al.
# http://dbfread.readthedocs.io/en/latest/field_types.html
THRIFT_TYPE_BY_DBF_TYPE = {
# C text unicode string
"C": "string",
# D date datetime.date or None
"D": "date.Date",
# L logical True, False or None
"L": "bool",
# M memo unicode string (memo), byte string (picture or object) or None
"M": "string",
# N numeric int, float or None
"N": "decimal.Decimal",
# T time datetime.datetime
"T": "date_time.DateTime",
}
def __init__(self, dbf_field_info, name):
self.__dbf_field_info = dbf_field_info
self.__name = name
dbf_type = chr(dbf_field_info.field_type)
if dbf_type == "N":
if dbf_field_info.decimal > 0:
thrift_type = "decimal.Decimal"
else:
thrift_type = "i32"
else:
thrift_type = self.THRIFT_TYPE_BY_DBF_TYPE[dbf_type]
self.__thrift_type = thrift_type
def __eq__(self, other):
if self.name != other.name:
return False
if self.thrift_type != other.thrift_type:
return False
return True
@property
def name(self):
return self.__name
def thrift_import(self):
return self.THRIFT_IMPORTS_BY_TYPE.get(self.thrift_type, None)
def thrift_repr(self):
dbf_type = [chr(self.__dbf_field_info.field_type)]
if self.__dbf_field_info.length > 0:
dbf_type.append(str(self.__dbf_field_info.length))
if self.__dbf_field_info.decimal > 0:
dbf_type.append(str(self.__dbf_field_info.decimal))
dbf_type = ",".join(dbf_type)
name = self.name
thrift_type = self.thrift_type
return (
"""\
// %(dbf_type)s
optional %(thrift_type)s %(name)s;"""
% locals()
)
@property
def thrift_type(self):
return self.__thrift_type
def generate_record_thrift(union_dbf_file_paths, thrift_file_name):
fields_by_name = {}
thrift_imports = []
for dbf_file_path in union_dbf_file_paths:
with dbf.Table(dbf_file_path) as table:
for field_name in table.field_names:
field = RecordField(
dbf_field_info=table.field_info(field_name), name=field_name
)
if field_name in fields_by_name:
assert field == fields_by_name[field_name]
fields_by_name[field_name] = field
thrift_import = field.thrift_import()
if thrift_import is not None and thrift_import not in thrift_imports:
thrift_imports.append(thrift_import)
field_thrift_reprs = []
for field_name in sorted(fields_by_name.keys()):
field_thrift_reprs.append(fields_by_name[field_name].thrift_repr())
field_thrift_reprs = "\n\n".join(field_thrift_reprs)
thrift_file_path = os.path.join(THRIFT_DIR_PATH, thrift_file_name)
thrift_imports = "\n".join(sorted(thrift_imports))
struct_name = "".join(
part.capitalize() for part in os.path.splitext(thrift_file_name)[0].split("_")
)
with open(thrift_file_path, "w+b") as thrift_file:
thrift_file.write(
(
"""\
namespace * pastpy.models
%(thrift_imports)s
struct %(struct_name)s {
%(field_thrift_reprs)s
}
"""
% locals()
)
.replace("\r\n", "\n")
.encode("ascii")
)
print("wrote", thrift_file_path)
assert __name__ == "__main__"
generate_record_thrift(
("C:\\pp5eval\\Data\\OBJECTS.DBF", "C:\\pp5Reports\\PPSdata.dbf",),
"objects_dbf_record.thrift",
)
|
minorg/pastpy
|
devbin/generate_record_thrift.py
|
Python
|
bsd-2-clause
| 4,135
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_dosta_ln_wfp_sio_mule
@file marine-integrations/mi/dataset/parser/test/test_dosta_ln_wfp_sio_mule.py
@author Christopher Fortin
@brief Test code for a dosta_ln_wfp_sio_mule data parser
"""
#!/usr/bin/env python
import os
import ntplib, struct
from nose.plugins.attrib import attr
from mi.core.exceptions import SampleException, UnexpectedDataException
from mi.core.log import get_logger ; log = get_logger()
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_driver import DataSetDriverConfigKeys
from mi.dataset.parser.sio_mule_common import StateKey
from mi.dataset.parser.dosta_ln_wfp_sio_mule import DostaLnWfpSioMuleParser
from mi.dataset.parser.dosta_ln_wfp_sio_mule import DostaLnWfpSioMuleParserDataParticle
from mi.idk.config import Config
RESOURCE_PATH = os.path.join(Config().base_dir(), 'mi',
'dataset', 'driver', 'dosta_ln',
'wfp_sio_mule', 'resource')
@attr('UNIT', group='mi')
class DostaLnWfpSioParserUnitTestCase(ParserUnitTestCase):
def state_callback(self, state):
""" Call back method to watch what comes in via the position callback """
self.state_callback_value = state
def pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.publish_callback_value = pub
def exception_callback(self, exception):
""" Call back method to watch what comes in via the exception callback """
self.exception_callback_value = exception
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dosta_ln_wfp_sio_mule',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'DostaLnWfpSioMuleParserDataParticle'
}
# First 'WE' SIO header in noe58p1.dat, first record.
self.timestamp_1a = self.timestamp_to_ntp('Q\xf2W.') # The record timestamp should be 2986504401
log.debug("Converted timestamp 1a: %s",self.timestamp_1a)
self.particle_1a = DostaLnWfpSioMuleParserDataParticle(b'Q\xf2W.\x00\x00\x00\x00A9Y' \
'\xb4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x009\x00e\x02:', internal_timestamp = self.timestamp_1a)
# First 'WE' SIO header in noe58p1.dat, second record.
self.timestamp_1b = self.timestamp_to_ntp('Q\xf2Xq')
log.debug("Converted timestamp 1b: %s",self.timestamp_1b)
self.particle_1b = DostaLnWfpSioMuleParserDataParticle(b'Q\xf2XqB\x8f\x83DA5\x1e\xb8D' \
'\xfd\x85qB\x82\x83\x12?\xf9\xba^\x009\x00d\x028', internal_timestamp = self.timestamp_1b)
# First 'WE' SIO header in noe58p1.dat, third record.
self.timestamp_1c = self.timestamp_to_ntp('Q\xf2Z\xd3')
log.debug("Converted timestamp 1c: %s",self.timestamp_1c)
self.particle_1c = DostaLnWfpSioMuleParserDataParticle(b'Q\xf2Z\xd3B\x84\x06GA2\x9a\xd4E' \
'\t\xd3\xd7B\x9b\xdc)?\xec\xac\x08\x00:\x00d\x027', internal_timestamp = self.timestamp_1c)
# Second 'WE' SIO header in noe58p1.dat, first record.
self.timestamp_2a = self.timestamp_to_ntp('Q\xf2\x8fn')
log.debug("Converted timestamp 2a: %s",self.timestamp_2a)
self.particle_2a = DostaLnWfpSioMuleParserDataParticle(b'Q\xf2\x8fn\x00\x00\x00\x00A7\xd5f' \
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x008\x00a\x02=', internal_timestamp = self.timestamp_2a)
# Last 'WE' SIO header in node58p1.dat, last record (when reading 12).
self.timestamp_1l = self.timestamp_to_ntp('Q\xf2\x99q')
log.debug("Converted timestamp 1l: %s",self.timestamp_1l)
self.particle_1l = DostaLnWfpSioMuleParserDataParticle(b'Q\xf2\x99qC"\t\xceA/\x9alEM\x07\\C' \
'\x07\xd7\n?\xc3\x95\x81\x007\x00_\x02;', internal_timestamp = self.timestamp_1l)
# Last 'WE' SIO header in node58p1.dat[0:300000], second to last record.
self.timestamp_1k = self.timestamp_to_ntp('Q\xf2\x981')
log.debug("Converted timestamp 1k: %s",self.timestamp_1k)
self.particle_1k = DostaLnWfpSioMuleParserDataParticle(b'Q\xf2\x981C\x10\xe5kA/\xe4&EG\x8c\x00C' \
'\x04\xc2\x8f?\xc4\xfd\xf4\x006\x00_\x02;', internal_timestamp = self.timestamp_1k)
# Last record of second 'WE' SIO header, the last record when pulling 5000 bytes.
self.timestamp_m = self.timestamp_to_ntp('Q\xf2\xa5\xc9')
log.debug("Converted timestamp m2: %s",self.timestamp_m)
self.state_callback_value = None
self.publish_callback_value = None
self.exception_callback_value = None
def assert_result(self, result, in_process_data, unprocessed_data, particle):
self.assertEqual(result, [particle])
self.assert_state(in_process_data, unprocessed_data)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], particle)
def assert_state(self, in_process_data, unprocessed_data):
self.assertEqual(self.parser._state[StateKey.IN_PROCESS_DATA], in_process_data)
self.assertEqual(self.parser._state[StateKey.UNPROCESSED_DATA], unprocessed_data)
self.assertEqual(self.state_callback_value[StateKey.IN_PROCESS_DATA], in_process_data)
self.assertEqual(self.state_callback_value[StateKey.UNPROCESSED_DATA], unprocessed_data)
def timestamp_to_ntp(self, hex_timestamp):
fields = struct.unpack('>I', hex_timestamp)
timestamp = float(fields[0])
return ntplib.system_to_ntp_time(timestamp)
def test_simple(self):
"""
Read test data from the file and pull out data particles one at a time.
Assert that the results are those we expected.
"""
log.debug('------------------------------------------------------Starting test_simple')
self.stream_handle = open(os.path.join(RESOURCE_PATH,
'node58p1.dat'))
# NOTE: using the unprocessed data state of 0,5000 limits the file to reading
# just 5000 bytes, so even though the file is longer it only reads the first
# 5000
self.state = {StateKey.UNPROCESSED_DATA:[[0, 5000]],
StateKey.IN_PROCESS_DATA:[],
StateKey.FILE_SIZE: 1939566}
self.parser = DostaLnWfpSioMuleParser(self.config, self.state, self.stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(1)
log.debug("IN_PROCESS_DATA: %s", self.parser._state[StateKey.IN_PROCESS_DATA])
log.debug("Unprocessed: %s", self.parser._state[StateKey.UNPROCESSED_DATA])
# An extra byte exists between SIO headers([4058:4059] and [7423,7424])
self.assert_result(result, [[2818,2982,3,1], [4059,4673,18,0]],
[[2818,2982], [4058,5000]], self.particle_1a)
result = self.parser.get_records(1)
self.assert_result(result, [[2818,2982,3,2], [4059,4673,18,0]],
[[2818,2982], [4058,5000]], self.particle_1b)
result = self.parser.get_records(1)
self.assert_result(result, [[4059,4673,18,0]],
[[4058,5000]], self.particle_1c)
result = self.parser.get_records(1)
self.assert_result(result, [[4059,4673,18,1]],
[[4058,5000]], self.particle_2a)
self.stream_handle.close()
def test_get_many(self):
"""
Read test data from the file and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
log.debug('--------------------------------------------------------Starting test_get_many')
self.state = {StateKey.UNPROCESSED_DATA:[[0, 5000]],
StateKey.IN_PROCESS_DATA:[],
StateKey.FILE_SIZE: 1939566}
self.stream_handle = open(os.path.join(RESOURCE_PATH,
'node58p1.dat'))
self.parser = DostaLnWfpSioMuleParser(self.config, self.state, self.stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(4)
self.assertEqual(result,
[self.particle_1a, self.particle_1b, self.particle_1c, self.particle_2a])
self.assertEqual(self.publish_callback_value[0], self.particle_1a)
self.assertEqual(self.publish_callback_value[1], self.particle_1b)
self.assertEqual(self.publish_callback_value[2], self.particle_1c)
self.assertEqual(self.publish_callback_value[3], self.particle_2a)
self.assert_state([[4059,4673,18,1]],[[4058,5000]])
self.stream_handle.close()
def test_long_stream(self):
"""
Test a long stream
"""
log.debug('---------------------------------------------------------Starting test_long_stream')
self.stream_handle = open(os.path.join(RESOURCE_PATH,
'node58p1.dat'))
self.stream_handle.seek(0)
self.state = {StateKey.UNPROCESSED_DATA:[[0, 5000]],
StateKey.IN_PROCESS_DATA:[],
StateKey.FILE_SIZE: 1939566}
self.parser = DostaLnWfpSioMuleParser(self.config, self.state, self.stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(12)
self.assertEqual(result[0], self.particle_1a)
self.assertEqual(result[1], self.particle_1b)
self.assertEqual(result[2], self.particle_1c)
self.assertEqual(result[-2], self.particle_1k)
self.assertEqual(result[-1], self.particle_1l)
self.assertEqual(self.publish_callback_value[-2], self.particle_1k)
self.assertEqual(self.publish_callback_value[-1], self.particle_1l)
self.assert_state([[4059,4673,18,9]],[[4058,5000]])
self.stream_handle.close()
def test_mid_state_start(self):
"""
Test starting the parser in a state in the middle of processing
"""
log.debug('-----------------------------------------------------------Starting test_mid_state_start')
new_state = {StateKey.IN_PROCESS_DATA:[],
StateKey.UNPROCESSED_DATA:[[2818,2982]],
StateKey.FILE_SIZE: 1939566}
self.stream_handle = open(os.path.join(RESOURCE_PATH, 'node58p1.dat'))
self.parser = DostaLnWfpSioMuleParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(1)
self.assert_result(result, [[2818,2982,3,1]],
[[2818,2982]], self.particle_1a)
result = self.parser.get_records(1)
self.assert_result(result, [[2818,2982,3,2]],
[[2818,2982]], self.particle_1b)
result = self.parser.get_records(1)
self.assert_result(result, [], [], self.particle_1c)
self.stream_handle.close()
def test_bad_data(self):
"""
Ensure that the bad record ( in this case a currupted status message ) causes a sample exception
"""
self.stream_handle = open(os.path.join(RESOURCE_PATH, 'node58p1_BADFLAGS.dat'))
self.state = {StateKey.UNPROCESSED_DATA:[[0, 5000]],
StateKey.IN_PROCESS_DATA:[],
StateKey.FILE_SIZE: 1939566}
log.debug('-------------------------------------------------------------Starting test_bad_data')
self.parser = DostaLnWfpSioMuleParser(self.config, self.state, self.stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(1)
self.assert_(isinstance(self.exception_callback_value, UnexpectedDataException))
def test_in_process_start(self):
"""
test starting a parser with a state in the middle of processing
"""
log.debug('-------------------------------------------------------------Starting test_in_process_start')
#[2818:2982] contains the first WE SIO header
new_state = {StateKey.IN_PROCESS_DATA:[[2818,2982,3,0], [4059,4673,18,0]],
StateKey.UNPROCESSED_DATA:[[2818,2982], [4058,5000]],
StateKey.FILE_SIZE: 1939566}
self.stream_handle = open(os.path.join(RESOURCE_PATH, 'node58p1.dat'))
self.parser = DostaLnWfpSioMuleParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(1)
self.assert_result(result, [[2818,2982,3,1], [4059,4673,18,0]],
[[2818,2982], [4058,5000]], self.particle_1a)
result = self.parser.get_records(2)
self.assertEqual(result[0], self.particle_1b)
self.assertEqual(result[1], self.particle_1c)
self.assert_state([[4059,4673,18,0]], [[4058,5000]])
self.assertEqual(self.publish_callback_value[-1], self.particle_1c)
result = self.parser.get_records(1)
self.assert_result(result, [[4059,4673,18,1]],
[[4058,5000]], self.particle_2a)
self.stream_handle.close()
def test_set_state(self):
"""
Test changing to a new state after initializing the parser and
reading data, as if new data has been found and the state has
changed
"""
log.debug('-------------------------------------------------Starting test_set_state')
self.state = {StateKey.UNPROCESSED_DATA:[[4059, 4673]],
StateKey.IN_PROCESS_DATA:[],
StateKey.FILE_SIZE: 1939566}
new_state = {StateKey.UNPROCESSED_DATA:[[2818, 2982], [4058, 4059], [4673, 5000]],
StateKey.IN_PROCESS_DATA:[[2818, 2982, 3, 0]],
StateKey.FILE_SIZE: 1939566}
self.stream_handle = open(os.path.join(RESOURCE_PATH,
'node58p1.dat'))
self.parser = DostaLnWfpSioMuleParser(self.config, self.state, self.stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
# only 18 left in file at this point. Drain them, and make sure the next fetch fails
result = self.parser.get_records(17)
self.assert_state([[4059, 4673, 18, 17]],[[4059, 4673]])
result = self.parser.get_records(1)
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.parser.set_state(new_state)
result = self.parser.get_records(1)
self.assert_result(result,
[[2818, 2982, 3, 1]],
[[2818, 2982], [4058, 4059], [4673, 5000]],
self.particle_1a)
self.stream_handle.close()
def test_update(self):
"""
Test a file which has had a section of data replaced by 0s, as if a block of data has not been received yet,
then using the returned state make a new parser with the test data that has the 0s filled in
"""
log.debug('------------------------------------------------------Starting test_update')
self.state = {StateKey.UNPROCESSED_DATA:[[0, 5000]],
StateKey.IN_PROCESS_DATA:[],
StateKey.FILE_SIZE: 1939566}
# this file has first block of WE data replaced by 0s
self.stream_handle = open(os.path.join(RESOURCE_PATH,
'node58p1_1stWE0d.dat'))
self.parser = DostaLnWfpSioMuleParser(self.config, self.state, self.stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(1)
self.assert_result(result,
[[4059,4673,18,1]],
[[2818, 2982], [4058, 5000]],
self.particle_2a)
self.stream_handle.close()
next_state = self.parser._state
self.stream_handle = open(os.path.join(RESOURCE_PATH,
'node58p1.dat'))
self.parser = DostaLnWfpSioMuleParser(self.config, next_state, self.stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
# first get the old 'in process' records
# Once those are done, the un processed data will be checked
# there are 18 valid records in the second WE chunk. We read one above, now we need
# to drain the remaining 17 to trigger the reparsing of the earlier block
for kk in range(0, 17):
result = self.parser.get_records(1)
# so now, the next fetch should find the now-replaced earlier data
result = self.parser.get_records(1)
self.assert_result(result,
[[2818, 2982, 3, 1]],
[[2818, 2982], [4058, 4059], [4673, 5000]],
self.particle_1a)
# this should be the first of the newly filled in particles from
result = self.parser.get_records(1)
self.assert_result(result,
[[2818, 2982, 3, 2]],
[[2818, 2982], [4058, 4059], [4673, 5000]],
self.particle_1b)
self.stream_handle.close()
def test_bad_e_record(self):
"""
Ensure that the bad record causes a sample exception. The file 'bad_e_record.dat'
includes a record containing one byte less than the expected 30 for the
flord_l_wfp_sio_mule. The 'Number of Data Bytes' and the 'CRC Checksum' values in the
SIO Mule header have been modified accordingly.
"""
self.stream_handle = open(os.path.join(RESOURCE_PATH, 'bad_e_record.dat'))
self.state = {StateKey.UNPROCESSED_DATA:[[0, 5000]],
StateKey.IN_PROCESS_DATA:[], StateKey.FILE_SIZE:[]}
self.parser = DostaLnWfpSioMuleParser(self.config, self.state, self.stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(1)
self.assert_(isinstance(self.exception_callback_value, UnexpectedDataException))
|
ooici/marine-integrations
|
mi/dataset/parser/test/test_dosta_ln_wfp_sio_mule.py
|
Python
|
bsd-2-clause
| 18,792
|
# -*- mode: python; coding: utf-8; -*-
__author__ = "Kenny Meyer"
__email__ = "knny.myer@gmail.com"
from django.conf.urls.defaults import *
import views
urlpatterns = patterns('',
url(r'^$',
views.list_flashcards,
name = 'list_flashcards'),
url(r'^practice/$',
views.practice_flashcards,
name = 'practice_flashcards'),
url(r'^practice/(?P<mode>\w+)/$',
views.practice_flashcards,
name = 'practice_flashcards'),
url(r'^rating/$',
views.process_rating,
name = 'process_rating'),
url(r'^create/$',
views.create_flashcard,
name = 'create_flashcard'),
url(r'^(\d+)/$',
views.show_details_about,
name = 'show_details'),
url(r'^(\d+)/edit/$',
views.edit_flashcard,
name = 'edit_flashcard'),
url(r'^(\d+)/delete/$',
views.delete_flashcard,
name = 'delete_flashcard'),
)
|
kennym/django-flashcard
|
src/flashcard/urls.py
|
Python
|
bsd-2-clause
| 927
|
#!/usr/bin/env python
""" statistic
# ----
# License: BSD
# ----
# 0.1: init version - 2016.6 - by Nick Qian
"""
def statistic(bags):
"""example:
Niu 1-9: 1:98598, 2:100122, 3:100394, 4:101250, 5:100785, 6:100239, 7:100176, 8:100327, 9:100417
Niu 10: 97692 <NiuNiu> 10%
Niu 11-12: 22550, 22528 <DuiZi> <JinNiu> 2.25%
Niu 13-15: 7455, 7515, 7620 <BaoZi> <ManNiu> <ShunZi> 0.75%
rW_avg = (100000*(2 + 3 + 4+ 5 + 6 + 7 + 8 + 9 + 10) + 22540*(11 + 12) + 7500(13 + 14 +15) )/100,0000
(540,0000 + 51,8420 + 31,5000 )/100,0000 = 6.23342%
rL_avg = rW_avg
"""
count_1, count_2, count_3, count_4, count_5 = 0,0,0,0,0
count_6, count_7, count_8, count_9, count_0 = 0,0,0,0,0
niuSum = []
nius = []
BaoZi = []
ManNiu = []
ShunZi = []
DuiZi = []
JinNiu = []
for threeWei in bags:
if (threeWei[-4:] == '1.11') or (threeWei[-4:] == '2.22') or (threeWei[-4:] == '3.33'):
if VERBOSE_B:
print ("Info*: Baozi:", threeWei)
BaoZi.append(threeWei)
if (threeWei[-4:] == '1.00') or (threeWei[-4:] == '2.00') or (threeWei[-4:] == '3.00'):
if VERBOSE_C:
print ("Info*: ManNiu:", threeWei)
ManNiu.append(threeWei)
if (threeWei[-4:] == '1.23') or (threeWei[-4:] == '2.34') or (threeWei[-4:] == '3.45'):
if VERBOSE_C:
print ("Info*: ShunZi:", threeWei)
ShunZi.append(ShunZi)
if (threeWei[-4] == '0') and (threeWei[-2] == threeWei[-1] ):
#print ("Info*: DuiZi:", threeWei)
DuiZi.append(DuiZi)
if (threeWei[-4] == '0') and (threeWei[-1] == '0'):
#print ("Info*: JinNiu:", threeWei)
JinNiu.append(JinNiu)
for threeInt in bags:
niuSum.append( int(threeInt[-4]) + int(threeInt[-1]) + int(threeInt[-2]) )
#print ("NiuSum is:", niuSum)
for sumrlst in niuSum:
sumrlstString = str(sumrlst)
nius.append(int(sumrlstString[-1]))
#print ("Niu are:", nius)
for i in range(0, len(nius)):
if nius[i] is 1:
count_1 += 1
if nius[i] is 2:
count_2 += 1
if nius[i] is 3:
count_3 += 1
if nius[i] is 4:
count_4 += 1
if nius[i] is 5:
count_5 += 1
if nius[i] is 6:
count_6 += 1
if nius[i] is 7:
count_7 += 1
if nius[i] is 8:
count_8 += 1
if nius[i] is 9:
count_9 += 1
if nius[i] is 0:
count_0 += 1
print ("Info: Freq Sum [0->9]:", count_0, count_1, count_2, count_3, count_4,
count_5, count_6, count_7, count_8, count_9)
print ("Info: Freq <BaoZi> <ManNiu> <ShunZi> <DuiZi> <JinNiu>:", len(BaoZi), len(ManNiu), len(ShunZi), len(DuiZi), len(JinNiu) )
def statistic_r(bags): #rW & rL
my_rW = []
my_rL = []
rW = 0
rL = 0
for n in range(0, (len(bags))/2):
mybag = bags[n*2]
my_niu = computeOneBag(mybag)
dealerbag = bags[n*2+1]
dealer_niu = computeOneBag(dealerbag)
if VERBOSE_E:
print("--my bag:%s, dealer bag:%s |my niu:%d, dealer niu:%d"%(mybag, dealerbag, my_niu, dealer_niu))
if my_niu > dealer_niu:
if VERBOSE_D:
print("-->No.%d: I win : %d" %(n, my_niu) )
my_rW.append(my_niu)
#dealer_rL.append[my_niu]
elif my_niu < dealer_niu:
if VERBOSE_D:
print("-->No.%d: I lost: %d, " %(n, (0-dealer_niu)) )
my_rL.append(0-dealer_niu)
rW = (float(sum(my_rW))) / len(my_rW)
rL = (float(sum(my_rL))) / len(my_rL)
if VERBOSE_D:
print ("Info: statistic_r: my_rW is:", my_rW, "my_rL is:", my_rL)
print ("->statistic_r: rW is:",rW, "rL is:",rL)
|
NickQian/pyWager
|
statistic.py
|
Python
|
bsd-2-clause
| 4,094
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from kaarmebot import KaarmeBotApp
from kaarmebot import predicates as p
import example_plugin
app_conf = {
'servers': {
'some_server': {
'address': ('someserver.org', 6667),
'real_name': 'Sir Bot McBotsworth, Esq.',
'nick': 'BotName',
'username': 'botname',
'channels': ('#something',)
}
}
}
plugin_bindings = [
(p.All(p.PrivMsg, p.BodyRegEx('.*: .*')), 'echo'),
(p.All(p.PrivMsg,
p.BodyRegEx('.*https?://[.\w]*youtube\.com/watch\?[^\s]+.*')),
'utube'),
(p.All(p.PrivMsg,
p.BodyRegEx('.*https?://youtu\.be/[^\s]+.*')),
'utube'),
]
if __name__ == '__main__':
app = KaarmeBotApp(app_conf)
app.scan(example_plugin)
for predicate, name in plugin_bindings:
app.add_binding(predicate, name)
app.start()
|
jkpl/kaarmebot
|
example.py
|
Python
|
bsd-2-clause
| 900
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'commande' de la commande 'chgroupe'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmCommande(Parametre):
"""Commande 'groupe commande'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "commande", "command")
self.schema = "<chemin_commande> <groupe_existant>"
self.aide_courte = "change une commande de groupe"
self.aide_longue = \
"Utilisez cette commande pour changer une commande de " \
"groupe. Si par exemple vous voulez rendre l'écriture de " \
"mudmails accessibles aux joueurs, déplacez la commande " \
"|cmd|messages|ff| dans le groupe |tit|joueur|ff| grâce à la " \
"commande : %chgroupe% %chgroupe:commande% |cmd|messages " \
"joueur|ff|. Il est préférable, quand vous ajoutez " \
"une nouvelle commande au MUD, de la placer d'office " \
"dans un groupe essentiel (|tit|pnj|ff|, |tit|joueur|ff| " \
"ou |tit|administrateur|ff|). Une fois que la commande " \
"a bien été ajoutée, vous pourrez la déplacer dans " \
"le groupe final de destination. " \
"Enfin, sachez qu'en déplaçant une commande, toutes ses " \
"sous-commandes seront déplacées dans le même groupe. Pour "\
"évitez cela, mettez un point (|cmd|.|ff|) après le nom de " \
"votre commande. Si vous faites %chgroupe% %chgroupe:commande% " \
"|cmd|messages. joueur|ff|, la commande |cmd|mail|ff| " \
"sera déplacée mais aucun de ses paramètres. " \
"Libre à vous de les transférer ensuite un à un pour " \
"n'autoriser que certains paramètres aux " \
"joueurs, tout en en laissant certains accessibles qu'aux " \
"administrateurs."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
chemins = dic_masques["chemin_commande"].chemins
nom_groupe = dic_masques["groupe_existant"].nom_groupe
for chemin in chemins:
type(self).importeur.interpreteur.groupes.changer_groupe_commande(
chemin, nom_groupe)
nb_mod = len(chemins)
s = ""
if nb_mod > 1:
s = "s"
personnage << "{0} commande{s} déplacée{s} dans le groupe {1}.".format(
nb_mod, nom_groupe, s=s)
|
vlegoff/tsunami
|
src/primaires/joueur/commandes/chgroupe/commande.py
|
Python
|
bsd-3-clause
| 4,114
|
import unittest
from maskgen.external.api import *
from tests.test_support import TestSupport
import os
import numpy as np
import random
from maskgen.maskgen_loader import MaskGenLoader
import sys
class TestExternalAPI(TestSupport):
loader = MaskGenLoader()
def setUp(self):
self.loader.load()
def test_pull(self):
token = self.loader.get_key('apitoken')
url = self.loader.get_key('apiurl')
params = {}
params['width'] = 2180
params['media_type'] = 'video'
name = findAndDownloadImage(token,url,params,'.',prefix='videos')
self.assertTrue(name is not None)
os.remove(name)
if __name__ == '__main__':
unittest.main()
|
rwgdrummer/maskgen
|
tests/external/test_external_api.py
|
Python
|
bsd-3-clause
| 711
|
""" Tests for glm function in glm module
This checks the glm function with the procedure in the "Basic linear
modeling" exercise from Day 14.
Run at the project directory with:
nosetests code/utils/tests/test_glm.py
"""
# Loading modules.
import numpy as np
import nibabel as nib
import os
import sys
from numpy.testing import assert_almost_equal, assert_array_equal, assert_equal
# Add path to functions to the system path.
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
# Load our visualization functions.
from Image_Visualizing import present_3d, make_mask
def test_present():
# Read in the image data.
data = np.arange(100000)
data = data.reshape((100,100,10))
full=present_3d(data)
assert full.shape == (400,300)
def test_mask():
# example from http://www.jarrodmillman.com/rcsds/lectures/glm_intro.html
# it should be pointed out that hypothesis just looks at simple linear regression
data = np.arange(1000000)
data = data.reshape((100,100,100))
mask1 = np.ones((100,100,100))
mask2 = np.zeros((100,100,100))
mask3 = np.ones((200,200,100))
assert_equal(make_mask(data, mask1), data)
assert_equal(make_mask(data,mask2), mask2)
assert_equal(make_mask(data,mask3,fit=True).shape, data.shape)
x= False
try:
make_mask(data,mask3,fit=False)
except ValueError:
x=True
assert(x==True)
|
reychil/project-alpha-1
|
code/utils/tests/test_imaging.py
|
Python
|
bsd-3-clause
| 1,461
|
import os
import numpy as np
if __name__ == '__main__':
HOME = os.environ['HOME']
dataset_path = os.path.join(HOME, 'CaffeProjects/data/style')
source_path = os.path.join(HOME, 'PycharmProjects/Dataset/wikipainting/style')
f = open('classes', 'w')
images_path = os.path.join(source_path)
print len(os.listdir(images_path))
for cls, cls_name in enumerate(os.listdir(images_path)):
cls_name_replaced = cls_name.replace(' ', '_')
f.write(cls_name_replaced + ' ' + str(cls) + '\n')
f.close()
|
cs-chan/fuzzyDCN
|
prune_caffe/data/wikiart/style/gen_cls_file.py
|
Python
|
bsd-3-clause
| 541
|
# Imports from Django
from django.conf.urls.defaults import *
from django.contrib.sites.models import Site
# Imports from brubeck
from brubeck.podcasts.models import Channel, Episode
urlpatterns = patterns('django.views.generic.list_detail',
url(r'^episodes/(?P<object_id>\d+)/$', 'object_detail', {'queryset': Episode.objects.all()}, name='podcasts-episode-detail'),
(r'^episodes/$', 'object_list', {'queryset': Episode.objects.all(), 'extra_context': {'page_title': 'All episodes'}}),
(r'^archived/$', 'object_list', {'queryset': Channel.old.all(), 'extra_context': {'page_title': 'Archived channels'}}),
# url(r'^(?P<slug>[-\w]+)/$', 'object_detail', {'queryset': Channel.objects.all(), 'template_name': 'podcasts/episode_list.html'}, name='podcasts-channel-detail'),
# (r'^$', 'object_list', {'queryset': Channel.current.all(), 'extra_context': {'page_title': 'Current channels'}})
)
urlpatterns += patterns('',
(r'^calendar/(?P<year>\d{4})/(?P<month>\w{1,2})/(?P<day>\w{1,2})/$', 'brubeck.podcasts.views.calendar_day_view'),
(r'^calendar/(?P<year>\d{4})/(?P<month>\w{1,2})/$', 'brubeck.podcasts.views.calendar_view'),
(r'^calendar/(?P<year>\d{4})/$', 'brubeck.podcasts.views.calendar_view'),
(r'^calendar/$', 'brubeck.podcasts.views.calendar_view'),
(r'^(?P<channel_slug>[-\w]+)/calendar/(?P<year>\d{4})/(?P<month>\w{1,2})/(?P<day>\w{1,2})/$', 'brubeck.podcasts.views.calendar_day_view'),
(r'^(?P<channel_slug>[-\w]+)/calendar/(?P<year>\d{4})/(?P<month>\w{1,2})/$', 'brubeck.podcasts.views.calendar_view'),
(r'^(?P<channel_slug>[-\w]+)/calendar/(?P<year>\d{4})/$', 'brubeck.podcasts.views.calendar_view'),
(r'^(?P<channel_slug>[-\w]+)/calendar/$', 'brubeck.podcasts.views.calendar_view'),
(r'^(?P<slug>[-\w]+)/$', 'brubeck.podcasts.views.archive_view'),
)
# urlpatterns += patterns('django.views.generic.list_detail',
# (r'^$', 'object_list', {'queryset': Channel.current.all(), 'extra_context': {'page_title': 'Current channels'}}),
# )
# urlpatterns += patterns('django.views.generic.list_detail',
# url(r'^(?P<slug>[-\w]+)/$', 'object_detail', {'queryset': Channel.objects.all(), 'template_name': 'podcasts/episode_list.html'}, name='podcasts-channel-detail'),
# )
urlpatterns += patterns('',
(r'^$', 'brubeck.podcasts.views.list_view'),
)
|
albatrossandco/brubeck_cms
|
brubeck/podcasts/urls.py
|
Python
|
bsd-3-clause
| 2,320
|
import pytest
from unittest.mock import MagicMock
from torrt.toolbox import *
from torrt.utils import BotObjectsRegistry
@pytest.fixture(scope='function', autouse=True)
def clear_bot_registry():
"""HACK: clears all registered bots before each test.
otherwise test order matters
"""
BotObjectsRegistry._items.clear()
@pytest.fixture(scope='function')
def mock_telegram_plugin(monkeypatch):
"""MagicMock'ing everything under `telegram_bot` that's `python-telegram`-related"""
for target in (
'telegram',
'ReplyKeyboardMarkup', 'ReplyKeyboardRemove', 'InlineKeyboardMarkup', 'InlineKeyboardButton', 'Update',
'Filters', 'Updater', 'ConversationHandler', 'CommandHandler', 'MessageHandler', 'CallbackContext', 'CallbackQueryHandler'
):
monkeypatch.setattr(f'torrt.bots.telegram_bot.{target}', MagicMock(), raising=False)
# yield
def test_register_unregister_torrent(mock_config):
hashstr = '1234567890'
register_torrent(hashstr, url='https://exmaple.com/here/')
assert hashstr in mock_config['torrents']
unregister_torrent(hashstr)
assert not mock_config['torrents']
def test_set_walk_interval(mock_config):
set_walk_interval(16)
assert mock_config['walk_interval_hours'] == 16
def test_configure_logging():
configure_logging(show_logger_names=True)
def test_bots(mock_config, monkeypatch, mock_telegram_plugin):
monkeypatch.setattr('torrt.bots.telegram_bot.TelegramBot.test_configuration', lambda *args: True)
bot = configure_bot('telegram', {'token': 'xxx'})
bot.register()
assert mock_config['bots']['telegram']
run_bots(['telegram'])
remove_bot('telegram')
assert not mock_config['bots']
def test_no_bots_to_run_exists():
with pytest.raises(SystemExit) as excinfo:
run_bots(['stub'])
assert excinfo.value.code == 1
def test_telegram_without_plugin_raises_exception(monkeypatch):
monkeypatch.setattr(f'torrt.bots.telegram_bot.telegram', None)
with pytest.raises(BotRegistrationFailed) as excinfo:
configure_bot('telegram', {'token': 'xxx'})
assert 'python-telegram-bot' in str(excinfo.value)
def test_bot_configured_but_unregistered(monkeypatch, mock_telegram_plugin):
# configuring 'mocked' bot just to get it's configuration
monkeypatch.setattr(f'torrt.bots.telegram_bot.TelegramBot.test_configuration', lambda *args: True)
configure_bot('telegram', {'token': 'xxx'})
# making bot unspawnable, by forcebly removing required plugin mock
monkeypatch.setattr(f'torrt.bots.telegram_bot.telegram', None)
# check that user get's warning about configured, but unregistered bot
mocked_logger = MagicMock()
with monkeypatch.context() as m:
m.setattr(f'torrt.toolbox.LOGGER', mocked_logger)
init_object_registries()
mocked_logger.warn.assert_called_once()
assert 'bot is configured, but failed to register' in mocked_logger.warn.call_args[0][0]
def test_notifiers(mock_config, monkeypatch):
monkeypatch.setattr('torrt.notifiers.telegram.TelegramNotifier.test_configuration', lambda *args: True)
configure_notifier('telegram', {'token': 'xxx', 'chat_id': 'xxx'})
assert mock_config['notifiers']['telegram']
remove_notifier('telegram')
assert not mock_config['notifiers']
|
idlesign/torrt
|
tests/test_toolbox.py
|
Python
|
bsd-3-clause
| 3,339
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
from skbio.sequence import IUPACSequence
from skbio.util import classproperty
class ExampleIUPACSequence(IUPACSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
@classproperty
def nondegenerate_chars(cls):
return set("ABC")
class ExampleMotifsTester(ExampleIUPACSequence):
@property
def _motifs(self):
# These aren't really motifs, just a way to excercise the code paths
return {
"name1": lambda x, _, __: str(x),
"name2": lambda x, _, __: len(x)
}
class TestIUPACSequence(TestCase):
def test_instantiation_with_no_implementation(self):
class IUPACSequenceSubclassNoImplementation(IUPACSequence):
pass
with self.assertRaises(TypeError) as cm:
IUPACSequenceSubclassNoImplementation()
self.assertIn("abstract class", str(cm.exception))
self.assertIn("nondegenerate_chars", str(cm.exception))
self.assertIn("degenerate_map", str(cm.exception))
def test_init_default_parameters(self):
seq = ExampleIUPACSequence('.-ABCXYZ')
npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
self.assertEqual(seq.id, "")
self.assertEqual(seq.description, "")
self.assertIsNone(seq.quality)
def test_init_nondefault_parameters(self):
seq = ExampleIUPACSequence('.-ABCXYZ', id='foo', description='bar baz',
quality=range(8))
npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
self.assertEqual(seq.id, 'foo')
self.assertEqual(seq.description, 'bar baz')
npt.assert_equal(seq.quality, np.array(range(8), dtype='int'))
def test_init_valid_empty_sequence(self):
# just make sure we can instantiate an empty sequence regardless of
# `validate` and `case_insensitive` parameters. more extensive tests
# are performed in Sequence base class unit tests
for validate in (True, False):
for case_insensitive in (True, False):
seq = ExampleIUPACSequence('', validate=validate,
case_insensitive=case_insensitive)
self.assertEqual(seq, ExampleIUPACSequence(''))
def test_init_valid_single_character_sequence(self):
for validate in (True, False):
for case_insensitive in (True, False):
seq = ExampleIUPACSequence('C', validate=validate,
case_insensitive=case_insensitive)
self.assertEqual(seq, ExampleIUPACSequence('C'))
def test_init_valid_multiple_character_sequence(self):
for validate in (True, False):
for case_insensitive in (True, False):
seq = ExampleIUPACSequence('BAACB.XYY-AZ', validate=validate,
case_insensitive=case_insensitive)
self.assertEqual(seq, ExampleIUPACSequence('BAACB.XYY-AZ'))
def test_init_validate_parameter_single_character(self):
seq = 'w'
with self.assertRaisesRegexp(ValueError, "character.*'w'"):
ExampleIUPACSequence(seq)
# test that we can instantiate an invalid sequence. we don't guarantee
# anything working beyond instantiation
ExampleIUPACSequence(seq, validate=False)
def test_init_validate_parameter_multiple_characters(self):
# mix of valid and invalid characters with repeats and lowercased
# alphabet characters
seq = 'CBCBBbawCbbwBXYZ-.x'
with self.assertRaisesRegexp(ValueError, "\['a', 'b', 'w', 'x'\]"):
ExampleIUPACSequence(seq)
ExampleIUPACSequence(seq, validate=False)
def test_init_case_insensitive_lowercase(self):
s = 'cbcbbbazcbbzbxyz-.x'
with self.assertRaisesRegexp(ValueError,
"\['a', 'b', 'c', 'x', 'y', 'z'\]"):
ExampleIUPACSequence(s)
seq = ExampleIUPACSequence(s, case_insensitive=True)
self.assertEqual(seq, ExampleIUPACSequence('CBCBBBAZCBBZBXYZ-.X'))
def test_init_case_insensitive_mixed_case(self):
s = 'CBCBBbazCbbzBXYZ-.x'
with self.assertRaisesRegexp(ValueError, "\['a', 'b', 'x', 'z'\]"):
ExampleIUPACSequence(s)
seq = ExampleIUPACSequence(s, case_insensitive=True)
self.assertEqual(seq, ExampleIUPACSequence('CBCBBBAZCBBZBXYZ-.X'))
def test_init_case_insensitive_no_validation(self):
s = 'car'
with self.assertRaisesRegexp(ValueError, "\['a', 'c', 'r'\]"):
ExampleIUPACSequence(s)
with self.assertRaisesRegexp(ValueError, "character.*'R'"):
ExampleIUPACSequence(s, case_insensitive=True)
ExampleIUPACSequence(s, case_insensitive=True, validate=False)
def test_init_case_insensitive_byte_ownership(self):
bytes = np.array([97, 98, 97], dtype=np.uint8)
with self.assertRaisesRegexp(ValueError, "\['a', 'b'\]"):
ExampleIUPACSequence(bytes)
seq = ExampleIUPACSequence(bytes, case_insensitive=True)
self.assertEqual(seq, ExampleIUPACSequence('ABA'))
# should not share the same memory
self.assertIsNot(seq._bytes, bytes)
# we should have copied `bytes` before modifying in place to convert to
# upper. make sure `bytes` hasn't been mutated
npt.assert_equal(bytes, np.array([97, 98, 97], dtype=np.uint8))
def test_degenerate_chars(self):
expected = set("XYZ")
self.assertIs(type(ExampleIUPACSequence.degenerate_chars), set)
self.assertEqual(ExampleIUPACSequence.degenerate_chars, expected)
ExampleIUPACSequence.degenerate_chars.add("W")
self.assertEqual(ExampleIUPACSequence.degenerate_chars, expected)
self.assertEqual(ExampleIUPACSequence('').degenerate_chars, expected)
with self.assertRaises(AttributeError):
ExampleIUPACSequence('').degenerate_chars = set("BAR")
def test_nondegenerate_chars(self):
expected = set("ABC")
self.assertEqual(ExampleIUPACSequence.nondegenerate_chars, expected)
ExampleIUPACSequence.degenerate_chars.add("D")
self.assertEqual(ExampleIUPACSequence.nondegenerate_chars, expected)
self.assertEqual(ExampleIUPACSequence('').nondegenerate_chars,
expected)
with self.assertRaises(AttributeError):
ExampleIUPACSequence('').nondegenerate_chars = set("BAR")
def test_gap_chars(self):
expected = set(".-")
self.assertIs(type(ExampleIUPACSequence.gap_chars), set)
self.assertEqual(ExampleIUPACSequence.gap_chars, expected)
ExampleIUPACSequence.gap_chars.add("_")
self.assertEqual(ExampleIUPACSequence.gap_chars, expected)
self.assertEqual(ExampleIUPACSequence('').gap_chars, expected)
with self.assertRaises(AttributeError):
ExampleIUPACSequence('').gap_chars = set("_ =")
def test_alphabet(self):
expected = set("ABC.-XYZ")
self.assertIs(type(ExampleIUPACSequence.alphabet), set)
self.assertEqual(ExampleIUPACSequence.alphabet, expected)
ExampleIUPACSequence.alphabet.add("DEF")
self.assertEqual(ExampleIUPACSequence.alphabet, expected)
self.assertEqual(ExampleIUPACSequence('').alphabet, expected)
with self.assertRaises(AttributeError):
ExampleIUPACSequence('').alphabet = set("ABCDEFG.-WXYZ")
def test_degenerate_map(self):
expected = {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
self.assertEqual(ExampleIUPACSequence.degenerate_map, expected)
ExampleIUPACSequence.degenerate_map['W'] = set("ABC")
ExampleIUPACSequence.degenerate_map['X'] = set("CA")
self.assertEqual(ExampleIUPACSequence.degenerate_map, expected)
self.assertEqual(ExampleIUPACSequence('').degenerate_map, expected)
with self.assertRaises(AttributeError):
ExampleIUPACSequence('').degenerate_map = {'W': "ABC"}
def test_gaps(self):
self.assertIs(type(ExampleIUPACSequence("").gaps()), np.ndarray)
self.assertIs(ExampleIUPACSequence("").gaps().dtype, np.dtype('bool'))
npt.assert_equal(ExampleIUPACSequence("ABCXBZYABC").gaps(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleIUPACSequence(".-.-.").gaps(),
np.ones(5).astype(bool))
npt.assert_equal(ExampleIUPACSequence("A.B-C.X-Y.").gaps(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("AB.AC.XY-").gaps(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("A.BC.-").gaps(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_has_gaps(self):
self.assertIs(type(ExampleIUPACSequence("").has_gaps()), bool)
self.assertIs(type(ExampleIUPACSequence("-").has_gaps()), bool)
self.assertFalse(ExampleIUPACSequence("").has_gaps())
self.assertFalse(ExampleIUPACSequence("ABCXYZ").has_gaps())
self.assertTrue(ExampleIUPACSequence("-").has_gaps())
self.assertTrue(ExampleIUPACSequence("ABCXYZ-").has_gaps())
def test_degenerates(self):
self.assertIs(type(ExampleIUPACSequence("").degenerates()), np.ndarray)
self.assertIs(ExampleIUPACSequence("").degenerates().dtype,
np.dtype('bool'))
npt.assert_equal(ExampleIUPACSequence("ABCBC-.AB.").degenerates(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleIUPACSequence("ZYZYZ").degenerates(),
np.ones(5).astype(bool))
npt.assert_equal(ExampleIUPACSequence("AX.Y-ZBXCZ").degenerates(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("ABXACY.-Z").degenerates(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("AZBCXY").degenerates(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_has_degenerates(self):
self.assertIs(type(ExampleIUPACSequence("").has_degenerates()), bool)
self.assertIs(type(ExampleIUPACSequence("X").has_degenerates()), bool)
self.assertFalse(ExampleIUPACSequence("").has_degenerates())
self.assertFalse(ExampleIUPACSequence("A-.BC").has_degenerates())
self.assertTrue(ExampleIUPACSequence("Z").has_degenerates())
self.assertTrue(ExampleIUPACSequence("ABC.XYZ-").has_degenerates())
def test_nondegenerates(self):
self.assertIs(type(ExampleIUPACSequence("").nondegenerates()),
np.ndarray)
self.assertIs(ExampleIUPACSequence("").nondegenerates().dtype,
np.dtype('bool'))
npt.assert_equal(ExampleIUPACSequence("XYZYZ-.XY.").nondegenerates(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleIUPACSequence("ABABA").nondegenerates(),
np.ones(5).astype(bool))
npt.assert_equal(ExampleIUPACSequence("XA.B-AZCXA").nondegenerates(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("XXAZZB.-C").nondegenerates(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("YB.-AC").nondegenerates(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_has_nondegenerates(self):
self.assertIs(type(ExampleIUPACSequence("").has_nondegenerates()),
bool)
self.assertIs(type(ExampleIUPACSequence("A").has_nondegenerates()),
bool)
self.assertFalse(ExampleIUPACSequence("").has_nondegenerates())
self.assertFalse(ExampleIUPACSequence("X-.YZ").has_nondegenerates())
self.assertTrue(ExampleIUPACSequence("C").has_nondegenerates())
self.assertTrue(ExampleIUPACSequence(".XYZ-ABC").has_nondegenerates())
def test_degap(self):
kw = {
'id': 'some_id',
'description': 'some description',
}
self.assertEquals(ExampleIUPACSequence("", quality=[], **kw).degap(),
ExampleIUPACSequence("", quality=[], **kw))
self.assertEquals(ExampleIUPACSequence("ABCXYZ", quality=np.arange(6),
**kw).degap(),
ExampleIUPACSequence("ABCXYZ", quality=np.arange(6),
**kw))
self.assertEquals(ExampleIUPACSequence("ABC-XYZ", quality=np.arange(7),
**kw).degap(),
ExampleIUPACSequence("ABCXYZ",
quality=[0, 1, 2, 4, 5, 6],
**kw))
self.assertEquals(ExampleIUPACSequence(".-ABC-XYZ.",
quality=np.arange(10), **kw
).degap(),
ExampleIUPACSequence("ABCXYZ",
quality=[2, 3, 4, 6, 7, 8],
**kw))
self.assertEquals(ExampleIUPACSequence("---.-.-.-.-.",
quality=np.arange(12), **kw
).degap(),
ExampleIUPACSequence("", quality=[], **kw))
def test_expand_degenerates_no_degens(self):
seq = ExampleIUPACSequence("ABCABCABC")
self.assertEqual(list(seq.expand_degenerates()), [seq])
def test_expand_degenerates_all_degens(self):
exp = [ExampleIUPACSequence('ABA'), ExampleIUPACSequence('ABC'),
ExampleIUPACSequence('ACA'), ExampleIUPACSequence('ACC'),
ExampleIUPACSequence('BBA'), ExampleIUPACSequence('BBC'),
ExampleIUPACSequence('BCA'), ExampleIUPACSequence('BCC')]
# Sort based on sequence string, as order is not guaranteed.
obs = sorted(ExampleIUPACSequence('XYZ').expand_degenerates(), key=str)
self.assertEqual(obs, exp)
def test_expand_degenerates_with_metadata(self):
kw = {
"quality": np.arange(3),
"id": "some_id",
"description": "some description"
}
exp = [ExampleIUPACSequence('ABA', **kw),
ExampleIUPACSequence('ABC', **kw),
ExampleIUPACSequence('BBA', **kw),
ExampleIUPACSequence('BBC', **kw)]
obs = sorted(ExampleIUPACSequence('XBZ', **kw).expand_degenerates(),
key=str)
self.assertEqual(obs, exp)
def test_find_motifs_no_motif(self):
seq = ExampleMotifsTester("ABCABCABC")
with self.assertRaises(ValueError) as cm:
seq.find_motifs("doesn't-exist")
self.assertIn("doesn't-exist", str(cm.exception))
seq = ExampleIUPACSequence("ABCABCABC")
with self.assertRaises(ValueError) as cm:
seq.find_motifs("doesn't-exist")
self.assertIn("doesn't-exist", str(cm.exception))
def test_find_motifs(self):
seq = ExampleMotifsTester("ABC")
self.assertEqual(seq.find_motifs("name1"), "ABC")
self.assertEqual(seq.find_motifs("name2"), 3)
if __name__ == "__main__":
main()
|
jensreeder/scikit-bio
|
skbio/sequence/tests/test_iupac_sequence.py
|
Python
|
bsd-3-clause
| 16,144
|
import os
from textwrap import dedent
from nipype import Workflow, Node, Function
from traits.api import TraitError
import pytest
from .. import frontend
class TestFrontend(object):
@pytest.fixture
def lyman_dir(self, execdir):
lyman_dir = execdir.mkdir("lyman")
os.environ["LYMAN_DIR"] = str(lyman_dir)
scans = dedent("""
subj01:
sess01:
exp_alpha: [run01, run02]
sess02:
exp_alpha: [run01]
exp_beta: [run01, run02]
subj02:
sess01:
exp_beta: [run01, run03]
""")
project = dedent("""
data_dir = "../datums"
voxel_size = (2.5, 2.5, 2.5)
""")
experiment = dedent("""
tr = .72
smooth_fwhm = 2.5
""")
model = dedent("""
smooth_fwhm = 4
contrasts = [("a-b", ["a", "b"], [1, -1])]
""")
model_bad = dedent("""
contrasts = ["a-b", "b-a"]
""")
with open(lyman_dir.join("scans.yaml"), "w") as fid:
fid.write(scans)
with open(lyman_dir.join("project.py"), "w") as fid:
fid.write(project)
with open(lyman_dir.join("exp_alpha.py"), "w") as fid:
fid.write(experiment)
with open(lyman_dir.join("exp_alpha-model_a.py"), "w") as fid:
fid.write(model)
with open(lyman_dir.join("exp_alpha-model_b.py"), "w") as fid:
fid.write(model_bad)
yield lyman_dir
lyman_dir.remove()
def test_info(self, lyman_dir, execdir):
info = frontend.info()
assert info.data_dir == execdir.join("datums")
assert info.scan_info == {
"subj01": {"sess01": {"exp_alpha": ["run01", "run02"]},
"sess02": {"exp_alpha": ["run01"],
"exp_beta": ["run01", "run02"]}},
"subj02": {"sess01": {"exp_beta": ["run01", "run03"]}},
}
model_traits = frontend.ModelInfo().trait_get()
assert info.trait_get(*model_traits.keys()) == model_traits
info = frontend.info("exp_alpha")
assert info.tr == .72
assert info.smooth_fwhm == 2.5
info = frontend.info("exp_alpha", "model_a")
assert info.smooth_fwhm == 4
assert info.contrasts == [("a-b", ["a", "b"], [1, -1])]
with pytest.raises(TraitError):
frontend.info("exp_alpha", "model_b")
with pytest.raises(RuntimeError):
frontend.info(model="model_b")
lyman_dir_new = execdir.join("lyman2")
lyman_dir.move(lyman_dir_new)
info = frontend.info(lyman_dir=str(lyman_dir_new))
assert info.voxel_size == (2.5, 2.5, 2.5)
lyman_dir_new.move(execdir.join("lyman"))
del os.environ["LYMAN_DIR"]
info = frontend.info()
def test_subjects(self, lyman_dir, execdir):
subjects = frontend.subjects()
assert subjects == ["subj01", "subj02"]
subjects = frontend.subjects("subj01")
assert subjects == ["subj01"]
subjects = frontend.subjects(["subj01"])
assert subjects == ["subj01"]
subjects = frontend.subjects(["subj01", "subj02"])
assert subjects == ["subj01", "subj02"]
subj_file = lyman_dir.join("group_one.txt")
with open(subj_file, "w") as fid:
fid.write("subj01")
subjects = frontend.subjects("group_one")
assert subjects == ["subj01"]
subjects = frontend.subjects(["group_one"])
assert subjects == ["subj01"]
with pytest.raises(RuntimeError):
frontend.subjects(["subj01", "subj03"])
with pytest.raises(RuntimeError):
frontend.subjects(["subj01", "subj02"], ["sess01", "sess02"])
with pytest.raises(RuntimeError):
frontend.subjects(["subj02"], ["sess01", "sess02"])
del os.environ["LYMAN_DIR"]
subjects = frontend.subjects()
assert subjects == []
def test_execute(self, lyman_dir, execdir):
info = frontend.info(lyman_dir=lyman_dir)
def f(x):
return x ** 2
assert f(2) == 4
n1 = Node(Function("x", "y", f), "n1")
n2 = Node(Function("x", "y", f), "n2")
wf = Workflow("test", base_dir=info.cache_dir)
wf.connect(n1, "y", n2, "x")
wf.inputs.n1.x = 2
cache_dir = execdir.join("cache").join("test")
class args(object):
graph = False
n_procs = 1
debug = False
clear_cache = True
execute = True
frontend.execute(wf, args, info)
assert not cache_dir.exists()
args.debug = True
frontend.execute(wf, args, info)
assert cache_dir.exists()
args.debug = False
info.remove_cache = False
frontend.execute(wf, args, info)
assert cache_dir.exists()
args.execute = False
res = frontend.execute(wf, args, info)
assert res is None
args.execute = True
fname = str(execdir.join("graph").join("workflow.dot"))
args.graph = fname
res = frontend.execute(wf, args, info)
assert res == fname[:-4] + ".svg"
args.graph = True
args.stage = "preproc"
res = frontend.execute(wf, args, info)
assert res == cache_dir.join("preproc.svg")
def test_load_info_from_module(self, execdir):
lyman_dir = execdir.mkdir("lyman")
# Write a Python module to test import from disk
module_text = dedent("""
foo = "a"
bar = 3
buz = [1, 2, 3]
""")
module_fname = lyman_dir.join("test.py")
with open(module_fname, "w") as fid:
fid.write(module_text)
expected = dict(foo="a", bar=3, buz=[1, 2, 3])
module_vars = frontend.load_info_from_module("test", lyman_dir)
assert module_vars == expected
# Remove the file to test import from memory
os.remove(module_fname)
module_vars = frontend.load_info_from_module("test", lyman_dir)
assert module_vars == expected
def test_check_extra_vars(self):
with pytest.raises(RuntimeError):
module_vars = {"not_a_valid_trait": True}
frontend.check_extra_vars(module_vars, frontend.ProjectInfo)
|
mwaskom/lyman
|
lyman/tests/test_frontend.py
|
Python
|
bsd-3-clause
| 6,368
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from django.shortcuts import render
from django.http import JsonResponse
from django.conf import settings
from author.models import author
from subprocess import Popen, PIPE, check_call
from django.conf import settings
import paramiko
import datetime
import os
def gotoReaderPages(request, pageName):
# when login, it will check reader's status after readerId
if "readerId" in request.session:
return 'reader/' + pageName + '.html'
else:
return 'reader/login.html'
def getUserName(request):
if "readerId" not in request.session:
return 'reader/login.html'
return request.session["userName"]
def readerIndex(request):
return render(request, gotoReaderPages(request, "readerIndex"), {"userName": getUserName(request)})
def booksRecorded(request):
return render(request, gotoReaderPages(request, "booksRecorded"), {"userName": getUserName(request)})
def readerSetting(request):
return render(request, gotoReaderPages(request, "readerSetting"), {"userName": getUserName(request)})
def getEnableAuthorStatus(request):
try:
if "readerId" in request.session:
readerId = request.session["readerId"]
data = {}
data['status'] = 'success'
isAuthor = False
authorStatus = ""
if author.isExist(readerId):
isAuthor = True
request.session["isAuthor"] = True
# authorStatus maybe active or inactive or banned
authorStatus = author.getStatus(readerId)
if authorStatus:
request.session["authorStatus"] = authorStatus
else:
request.session["authorStatus"] = ""
data["isAuthor"] = isAuthor
data["authorStatus"] = authorStatus
return JsonResponse(data)
else:
data['status'] = 'fail'
return render(request, 'reader/login.html')
except Exception as e:
data['status'] = 'fail'
return JsonResponse(data)
def modifyAuthorStatus(request):
status = ""
if request.POST:
status = request.POST['status']
if "readerId" in request.session:
data = {}
try:
readerId = request.session["readerId"]
data['status'] = 'success'
# check thatreader is or is not a author
if author.isExist(readerId):
# when reader is a author
request.session["isAuthor"] = True
data['isAuthor'] = True
authorStatus = author.getStatus(readerId)
# check author status
if authorStatus == "active":
# disable author function when reader is registed and status is "active"
# step: modify the value of author's status in database
result = author.modifyStatus(readerId, status)
if result:
request.session["authorStatus"] = status
data['authorStatus'] = status #"inactive"
else:
data['authorStatus'] = request.session["authorStatus"]
elif authorStatus == "inactive":
# modify database
if author.modifyStatus(readerId, "active"):
data['authorStatus'] = "active"
data['message'] = ""
request.session["authorStatus"] = "active"
request.session["authorId"] = str(author.getId(readerId))
else:
data['status'] = "fail"
data['authorStatus'] = "inactive"
data['message'] = "啓用作者功能失敗!請刷新後重新嘗試或聯繫網站管理員!"
else:
data['status'] = "fail"
data['authorStatus'] = authorStatus
data['message'] = "讀取作者狀態異常!"
request.session["authorStatus"] = authorStatus
else:
# when reader is not a author
# step1: add data to table "author" in database
if author.addAuthor(readerId):
data['status'] = "success"
data['isAuthor'] = True
data['authorStatus'] = "active"
data['message'] = ""
request.session["isAuthor"] = True
request.session["authorStatus"] = "active"
else:
# This needs to check and delete error data in database.
data['status'] = "fail"
data['isAuthor'] = False
data['authorStatus'] = "inactive"
data['message'] = "啓用作者功能失敗!添加資料到數據庫中失敗!請刷新後重新嘗試或聯繫網站管理員!"
request.session["isAuthor"] = False
request.session["authorStatus"] = "inactive"
except Exception as e:
data['status'] = 'fail'
data['message'] = str(e)
return JsonResponse(data)
else:
return render(request, 'reader/login.html')
def activeAuthorFunction(request,readerId):
# enable author function when reader is registed and status is "inactive"
# step1: mkdir in git server
# statp2: modify the value of author's status in database
result = author.modifyStatus(readerId, "active")
if result:
# return status,authorStatus,message
return "success","active",mes
else:
# return status,authorStatus,message
return "success","active",mes
|
0lidaxiang/WeArt
|
reader/view/readerManageView.py
|
Python
|
bsd-3-clause
| 5,816
|
title = "Page d'accueil"
|
dbaty/soho
|
docs/_tutorial/4-i18n/src/fr/index.html.meta.py
|
Python
|
bsd-3-clause
| 25
|
from __future__ import print_function
import matplotlib.pyplot as plt
from neatsociety.ifnn import IFNeuron
n = IFNeuron()
times = []
currents = []
potentials = []
fired = []
for i in range(1000):
times.append(1.0 * i)
n.current = 0.0 if i < 100 or i > 800 else 16.0
currents.append(n.current)
n.advance()
potentials.append(n.potential)
fired.append(1.0 if n.has_fired else 0.0)
plt.subplot(3, 1, 1)
plt.title("IFNN model")
plt.ylabel("Input current")
plt.ylim(0, 20)
plt.grid()
plt.plot(times, currents, "b-", label="current")
plt.subplot(3, 1, 2)
plt.ylabel("Potential")
plt.grid()
plt.plot(times, potentials, "g-", label="potential")
plt.subplot(3, 1, 3)
plt.ylabel("Fired")
plt.xlabel("Time (msec)")
plt.grid()
plt.plot(times, fired, "r-", label="fired")
plt.show()
plt.close()
|
machinebrains/neat-python
|
examples/visualize/ifnn_visualize.py
|
Python
|
bsd-3-clause
| 816
|
from __future__ import annotations
import asyncio
import bisect
import builtins
import errno
import heapq
import logging
import os
import random
import sys
import threading
import warnings
import weakref
from collections import defaultdict, deque
from collections.abc import (
Callable,
Collection,
Container,
Iterable,
Iterator,
Mapping,
MutableMapping,
)
from concurrent.futures import Executor
from contextlib import suppress
from datetime import timedelta
from inspect import isawaitable
from pickle import PicklingError
from typing import TYPE_CHECKING, Any, ClassVar, Literal, NamedTuple, TypedDict, cast
from tlz import first, keymap, merge, pluck # noqa: F401
from tornado.ioloop import IOLoop, PeriodicCallback
import dask
from dask.core import istask
from dask.system import CPU_COUNT
from dask.utils import (
apply,
format_bytes,
funcname,
parse_bytes,
parse_timedelta,
stringify,
typename,
)
from . import comm, preloading, profile, system, utils
from .batched import BatchedSend
from .comm import Comm, connect, get_address_host
from .comm.addressing import address_from_user_args, parse_address
from .comm.utils import OFFLOAD_THRESHOLD
from .compatibility import to_thread
from .core import (
CommClosedError,
Status,
coerce_to_address,
error_message,
pingpong,
send_recv,
)
from .diagnostics import nvml
from .diagnostics.plugin import _get_plugin_name
from .diskutils import WorkDir, WorkSpace
from .http import get_handlers
from .metrics import time
from .node import ServerNode
from .proctitle import setproctitle
from .protocol import pickle, to_serialize
from .pubsub import PubSubWorkerExtension
from .security import Security
from .shuffle import ShuffleWorkerExtension
from .sizeof import safe_sizeof as sizeof
from .threadpoolexecutor import ThreadPoolExecutor
from .threadpoolexecutor import secede as tpe_secede
from .utils import (
LRU,
TimeoutError,
_maybe_complex,
get_ip,
has_arg,
import_file,
in_async_call,
iscoroutinefunction,
json_load_robust,
key_split,
log_errors,
offload,
parse_ports,
recursive_to_dict,
silence_logging,
thread_state,
warn_on_duration,
)
from .utils_comm import gather_from_workers, pack_data, retry_operation
from .utils_perf import ThrottledGC, disable_gc_diagnosis, enable_gc_diagnosis
from .versions import get_versions
if TYPE_CHECKING:
from typing_extensions import TypeAlias
from .actor import Actor
from .client import Client
from .diagnostics.plugin import WorkerPlugin
from .nanny import Nanny
# {TaskState -> finish: str | (finish: str, *args)}
Recs: TypeAlias = "dict[TaskState, str | tuple]"
Smsgs: TypeAlias = "list[dict[str, Any]]"
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
no_value = "--no-value-sentinel--"
# TaskState.state subsets
PROCESSING = {
"waiting",
"ready",
"constrained",
"executing",
"long-running",
"cancelled",
"resumed",
}
READY = {"ready", "constrained"}
DEFAULT_EXTENSIONS: list[type] = [PubSubWorkerExtension, ShuffleWorkerExtension]
DEFAULT_METRICS: dict[str, Callable[[Worker], Any]] = {}
DEFAULT_STARTUP_INFORMATION: dict[str, Callable[[Worker], Any]] = {}
DEFAULT_DATA_SIZE = parse_bytes(
dask.config.get("distributed.scheduler.default-data-size")
)
class SerializedTask(NamedTuple):
function: Callable
args: tuple
kwargs: dict[str, Any]
task: object # distributed.scheduler.TaskState.run_spec
class StartStop(TypedDict, total=False):
action: str
start: float
stop: float
source: str # optional
class InvalidTransition(Exception):
pass
class TaskState:
"""Holds volatile state relating to an individual Dask task
* **dependencies**: ``set(TaskState instances)``
The data needed by this key to run
* **dependents**: ``set(TaskState instances)``
The keys that use this dependency.
* **duration**: ``float``
Expected duration the a task
* **priority**: ``tuple``
The priority this task given by the scheduler. Determines run order.
* **state**: ``str``
The current state of the task. One of ["waiting", "ready", "executing",
"fetch", "memory", "flight", "long-running", "rescheduled", "error"]
* **who_has**: ``set(worker)``
Workers that we believe have this data
* **coming_from**: ``str``
The worker that current task data is coming from if task is in flight
* **waiting_for_data**: ``set(keys of dependencies)``
A dynamic version of dependencies. All dependencies that we still don't
have for a particular key.
* **resource_restrictions**: ``{str: number}``
Abstract resources required to run a task
* **exception**: ``str``
The exception caused by running a task if it erred
* **traceback**: ``str``
The exception caused by running a task if it erred
* **type**: ``type``
The type of a particular piece of data
* **suspicious_count**: ``int``
The number of times a dependency has not been where we expected it
* **startstops**: ``[{startstop}]``
Log of transfer, load, and compute times for a task
* **start_time**: ``float``
Time at which task begins running
* **stop_time**: ``float``
Time at which task finishes running
* **metadata**: ``dict``
Metadata related to task. Stored metadata should be msgpack
serializable (e.g. int, string, list, dict).
* **nbytes**: ``int``
The size of a particular piece of data
* **annotations**: ``dict``
Task annotations
Parameters
----------
key: str
run_spec: SerializedTask
A named tuple containing the ``function``, ``args``, ``kwargs`` and
``task`` associated with this `TaskState` instance. This defaults to
``None`` and can remain empty if it is a dependency that this worker
will receive from another worker.
"""
key: str
run_spec: SerializedTask | None
dependencies: set[TaskState]
dependents: set[TaskState]
duration: float | None
priority: tuple[int, ...] | None
state: str
who_has: set[str]
coming_from: str | None
waiting_for_data: set[TaskState]
waiters: set[TaskState]
resource_restrictions: dict[str, float]
exception: Exception | None
exception_text: str | None
traceback: object | None
traceback_text: str | None
type: type | None
suspicious_count: int
startstops: list[StartStop]
start_time: float | None
stop_time: float | None
metadata: dict
nbytes: float | None
annotations: dict | None
done: bool
_previous: str | None
_next: str | None
def __init__(self, key: str, run_spec: SerializedTask | None = None):
assert key is not None
self.key = key
self.run_spec = run_spec
self.dependencies = set()
self.dependents = set()
self.duration = None
self.priority = None
self.state = "released"
self.who_has = set()
self.coming_from = None
self.waiting_for_data = set()
self.waiters = set()
self.resource_restrictions = {}
self.exception = None
self.exception_text = ""
self.traceback = None
self.traceback_text = ""
self.type = None
self.suspicious_count = 0
self.startstops = []
self.start_time = None
self.stop_time = None
self.metadata = {}
self.nbytes = None
self.annotations = None
self.done = False
self._previous = None
self._next = None
def __repr__(self) -> str:
return f"<TaskState {self.key!r} {self.state}>"
def get_nbytes(self) -> int:
nbytes = self.nbytes
return nbytes if nbytes is not None else DEFAULT_DATA_SIZE
def _to_dict_no_nest(self, *, exclude: Container[str] = ()) -> dict:
"""Dictionary representation for debugging purposes.
Not type stable and not intended for roundtrips.
See also
--------
Client.dump_cluster_state
distributed.utils.recursive_to_dict
Notes
-----
This class uses ``_to_dict_no_nest`` instead of ``_to_dict``.
When a task references another task, just print the task repr. All tasks
should neatly appear under Worker.tasks. This also prevents a RecursionError
during particularly heavy loads, which have been observed to happen whenever
there's an acyclic dependency chain of ~200+ tasks.
"""
return recursive_to_dict(self, exclude=exclude, members=True)
def is_protected(self) -> bool:
return self.state in PROCESSING or any(
dep_ts.state in PROCESSING for dep_ts in self.dependents
)
class UniqueTaskHeap(Collection):
"""A heap of TaskState objects ordered by TaskState.priority
Ties are broken by string comparison of the key. Keys are guaranteed to be
unique. Iterating over this object returns the elements in priority order.
"""
def __init__(self, collection: Collection[TaskState] = ()):
self._known = {ts.key for ts in collection}
self._heap = [(ts.priority, ts.key, ts) for ts in collection]
heapq.heapify(self._heap)
def push(self, ts: TaskState) -> None:
"""Add a new TaskState instance to the heap. If the key is already
known, no object is added.
Note: This does not update the priority / heap order in case priority
changes.
"""
assert isinstance(ts, TaskState)
if ts.key not in self._known:
heapq.heappush(self._heap, (ts.priority, ts.key, ts))
self._known.add(ts.key)
def pop(self) -> TaskState:
"""Pop the task with highest priority from the heap."""
_, key, ts = heapq.heappop(self._heap)
self._known.remove(key)
return ts
def peek(self) -> TaskState:
"""Get the highest priority TaskState without removing it from the heap"""
return self._heap[0][2]
def __contains__(self, x: object) -> bool:
if isinstance(x, TaskState):
x = x.key
return x in self._known
def __iter__(self) -> Iterator[TaskState]:
return (ts for _, _, ts in sorted(self._heap))
def __len__(self) -> int:
return len(self._known)
def __repr__(self) -> str:
return f"<{type(self).__name__}: {len(self)} items>"
class Worker(ServerNode):
"""Worker node in a Dask distributed cluster
Workers perform two functions:
1. **Serve data** from a local dictionary
2. **Perform computation** on that data and on data from peers
Workers keep the scheduler informed of their data and use that scheduler to
gather data from other workers when necessary to perform a computation.
You can start a worker with the ``dask-worker`` command line application::
$ dask-worker scheduler-ip:port
Use the ``--help`` flag to see more options::
$ dask-worker --help
The rest of this docstring is about the internal state the the worker uses
to manage and track internal computations.
**State**
**Informational State**
These attributes don't change significantly during execution.
* **nthreads:** ``int``:
Number of nthreads used by this worker process
* **executors:** ``dict[str, concurrent.futures.Executor]``:
Executors used to perform computation. Always contains the default
executor.
* **local_directory:** ``path``:
Path on local machine to store temporary files
* **scheduler:** ``rpc``:
Location of scheduler. See ``.ip/.port`` attributes.
* **name:** ``string``:
Alias
* **services:** ``{str: Server}``:
Auxiliary web servers running on this worker
* **service_ports:** ``{str: port}``:
* **total_out_connections**: ``int``
The maximum number of concurrent outgoing requests for data
* **total_in_connections**: ``int``
The maximum number of concurrent incoming requests for data
* **comm_threshold_bytes**: ``int``
As long as the total number of bytes in flight is below this threshold
we will not limit the number of outgoing connections for a single tasks
dependency fetch.
* **batched_stream**: ``BatchedSend``
A batched stream along which we communicate to the scheduler
* **log**: ``[(message)]``
A structured and queryable log. See ``Worker.story``
**Volatile State**
These attributes track the progress of tasks that this worker is trying to
complete. In the descriptions below a ``key`` is the name of a task that
we want to compute and ``dep`` is the name of a piece of dependent data
that we want to collect from others.
* **tasks**: ``{key: TaskState}``
The tasks currently executing on this worker (and any dependencies of those tasks)
* **data:** ``{key: object}``:
Prefer using the **host** attribute instead of this, unless
memory_limit and at least one of memory_target_fraction or
memory_spill_fraction values are defined, in that case, this attribute
is a zict.Buffer, from which information on LRU cache can be queried.
* **data.memory:** ``{key: object}``:
Dictionary mapping keys to actual values stored in memory. Only
available if condition for **data** being a zict.Buffer is met.
* **data.disk:** ``{key: object}``:
Dictionary mapping keys to actual values stored on disk. Only
available if condition for **data** being a zict.Buffer is met.
* **data_needed**: UniqueTaskHeap
The tasks which still require data in order to execute, prioritized as a heap
* **ready**: [keys]
Keys that are ready to run. Stored in a LIFO stack
* **constrained**: [keys]
Keys for which we have the data to run, but are waiting on abstract
resources like GPUs. Stored in a FIFO deque
* **executing_count**: ``int``
A count of tasks currently executing on this worker
* **executed_count**: int
A number of tasks that this worker has run in its lifetime
* **long_running**: {keys}
A set of keys of tasks that are running and have started their own
long-running clients.
* **has_what**: ``{worker: {deps}}``
The data that we care about that we think a worker has
* **pending_data_per_worker**: ``{worker: UniqueTaskHeap}``
The data on each worker that we still want, prioritized as a heap
* **in_flight_tasks**: ``int``
A count of the number of tasks that are coming to us in current
peer-to-peer connections
* **in_flight_workers**: ``{worker: {task}}``
The workers from which we are currently gathering data and the
dependencies we expect from those connections
* **comm_bytes**: ``int``
The total number of bytes in flight
* **threads**: ``{key: int}``
The ID of the thread on which the task ran
* **active_threads**: ``{int: key}``
The keys currently running on active threads
* **waiting_for_data_count**: ``int``
A count of how many tasks are currently waiting for data
* **generation**: ``int``
Counter that decreases every time the compute-task handler is invoked by the
Scheduler. It is appended to TaskState.priority and acts as a tie-breaker
between tasks that have the same priority on the Scheduler, determining a
last-in-first-out order between them.
Parameters
----------
scheduler_ip: str, optional
scheduler_port: int, optional
scheduler_file: str, optional
ip: str, optional
data: MutableMapping, type, None
The object to use for storage, builds a disk-backed LRU dict by default
nthreads: int, optional
loop: tornado.ioloop.IOLoop
local_directory: str, optional
Directory where we place local resources
name: str, optional
memory_limit: int, float, string
Number of bytes of memory that this worker should use.
Set to zero for no limit. Set to 'auto' to calculate
as system.MEMORY_LIMIT * min(1, nthreads / total_cores)
Use strings or numbers like 5GB or 5e9
memory_target_fraction: float or False
Fraction of memory to try to stay beneath
(default: read from config key distributed.worker.memory.target)
memory_spill_fraction: float or false
Fraction of memory at which we start spilling to disk
(default: read from config key distributed.worker.memory.spill)
memory_pause_fraction: float or False
Fraction of memory at which we stop running new tasks
(default: read from config key distributed.worker.memory.pause)
max_spill: int, string or False
Limit of number of bytes to be spilled on disk.
(default: read from config key distributed.worker.memory.max-spill)
executor: concurrent.futures.Executor, dict[str, concurrent.futures.Executor], "offload"
The executor(s) to use. Depending on the type, it has the following meanings:
- Executor instance: The default executor.
- Dict[str, Executor]: mapping names to Executor instances. If the
"default" key isn't in the dict, a "default" executor will be created
using ``ThreadPoolExecutor(nthreads)``.
- Str: The string "offload", which refer to the same thread pool used for
offloading communications. This results in the same thread being used
for deserialization and computation.
resources: dict
Resources that this worker has like ``{'GPU': 2}``
nanny: str
Address on which to contact nanny, if it exists
lifetime: str
Amount of time like "1 hour" after which we gracefully shut down the worker.
This defaults to None, meaning no explicit shutdown time.
lifetime_stagger: str
Amount of time like "5 minutes" to stagger the lifetime value
The actual lifetime will be selected uniformly at random between
lifetime +/- lifetime_stagger
lifetime_restart: bool
Whether or not to restart a worker after it has reached its lifetime
Default False
kwargs: optional
Additional parameters to ServerNode constructor
Examples
--------
Use the command line to start a worker::
$ dask-scheduler
Start scheduler at 127.0.0.1:8786
$ dask-worker 127.0.0.1:8786
Start worker at: 127.0.0.1:1234
Registered with scheduler at: 127.0.0.1:8786
See Also
--------
distributed.scheduler.Scheduler
distributed.nanny.Nanny
"""
_instances: ClassVar[weakref.WeakSet[Worker]] = weakref.WeakSet()
_initialized_clients: ClassVar[weakref.WeakSet[Client]] = weakref.WeakSet()
tasks: dict[str, TaskState]
waiting_for_data_count: int
has_what: defaultdict[str, set[str]] # {worker address: {ts.key, ...}
pending_data_per_worker: defaultdict[str, UniqueTaskHeap]
nanny: Nanny | None
_lock: threading.Lock
data_needed: UniqueTaskHeap
in_flight_workers: dict[str, set[str]] # {worker address: {ts.key, ...}}
total_out_connections: int
total_in_connections: int
comm_threshold_bytes: int
comm_nbytes: int
_missing_dep_flight: set[TaskState]
threads: dict[str, int] # {ts.key: thread ID}
active_threads_lock: threading.Lock
active_threads: dict[int, str] # {thread ID: ts.key}
active_keys: set[str]
profile_keys: defaultdict[str, dict[str, Any]]
profile_keys_history: deque[tuple[float, dict[str, dict[str, Any]]]]
profile_recent: dict[str, Any]
profile_history: deque[tuple[float, dict[str, Any]]]
generation: int
ready: list[tuple[tuple[int, ...], str]] # heapq [(priority, key), ...]
constrained: deque[str]
_executing: set[TaskState]
_in_flight_tasks: set[TaskState]
executed_count: int
long_running: set[str]
log: deque[tuple] # [(..., stimulus_id: str | None, timestamp: float), ...]
incoming_transfer_log: deque[dict[str, Any]]
outgoing_transfer_log: deque[dict[str, Any]]
target_message_size: int
validate: bool
_transitions_table: dict[tuple[str, str], Callable]
_transition_counter: int
incoming_count: int
outgoing_count: int
outgoing_current_count: int
repetitively_busy: int
bandwidth: float
latency: float
profile_cycle_interval: float
workspace: WorkSpace
_workdir: WorkDir
local_directory: str
_client: Client | None
bandwidth_workers: defaultdict[str, tuple[float, int]]
bandwidth_types: defaultdict[type, tuple[float, int]]
preloads: list[preloading.Preload]
contact_address: str | None
_start_port: int | None
_start_host: str | None
_interface: str | None
_protocol: str
_dashboard_address: str | None
_dashboard: bool
_http_prefix: str
nthreads: int
total_resources: dict[str, float]
available_resources: dict[str, float]
death_timeout: float | None
lifetime: float | None
lifetime_stagger: float | None
lifetime_restart: bool
extensions: dict
security: Security
connection_args: dict[str, Any]
memory_limit: int | None
memory_target_fraction: float | Literal[False]
memory_spill_fraction: float | Literal[False]
memory_pause_fraction: float | Literal[False]
max_spill: int | Literal[False]
data: MutableMapping[str, Any] # {task key: task payload}
actors: dict[str, Actor | None]
loop: IOLoop
reconnect: bool
executors: dict[str, Executor]
batched_stream: BatchedSend
name: Any
scheduler_delay: float
stream_comms: dict[str, BatchedSend]
heartbeat_interval: float
heartbeat_active: bool
_ipython_kernel: Any | None = None
services: dict[str, Any] = {}
service_specs: dict[str, Any]
metrics: dict[str, Callable[[Worker], Any]]
startup_information: dict[str, Callable[[Worker], Any]]
low_level_profiler: bool
scheduler: Any
execution_state: dict[str, Any]
memory_monitor_interval: float | None
_memory_monitoring: bool
_throttled_gc: ThrottledGC
plugins: dict[str, WorkerPlugin]
_pending_plugins: tuple[WorkerPlugin, ...]
def __init__(
self,
scheduler_ip: str | None = None,
scheduler_port: int | None = None,
*,
scheduler_file: str | None = None,
nthreads: int | None = None,
loop: IOLoop | None = None,
local_dir: None = None, # Deprecated, use local_directory instead
local_directory: str | None = None,
services: dict | None = None,
name: Any | None = None,
reconnect: bool = True,
memory_limit: str | float = "auto",
executor: Executor | dict[str, Executor] | Literal["offload"] | None = None,
resources: dict[str, float] | None = None,
silence_logs: int | None = None,
death_timeout: Any | None = None,
preload: list[str] | None = None,
preload_argv: list[str] | list[list[str]] | None = None,
security: Security | dict[str, Any] | None = None,
contact_address: str | None = None,
heartbeat_interval: Any = "1s",
memory_monitor_interval: Any = "200ms",
memory_target_fraction: float | Literal[False] | None = None,
memory_spill_fraction: float | Literal[False] | None = None,
memory_pause_fraction: float | Literal[False] | None = None,
max_spill: float | str | Literal[False] | None = None,
extensions: list[type] | None = None,
metrics: Mapping[str, Callable[[Worker], Any]] = DEFAULT_METRICS,
startup_information: Mapping[
str, Callable[[Worker], Any]
] = DEFAULT_STARTUP_INFORMATION,
data: (
MutableMapping[str, Any] # pre-initialised
| Callable[[], MutableMapping[str, Any]] # constructor
| tuple[
Callable[..., MutableMapping[str, Any]], dict[str, Any]
] # (constructor, kwargs to constructor)
| None # create internatlly
) = None,
interface: str | None = None,
host: str | None = None,
port: int | None = None,
protocol: str | None = None,
dashboard_address: str | None = None,
dashboard: bool = False,
http_prefix: str = "/",
nanny: Nanny | None = None,
plugins: tuple[WorkerPlugin, ...] = (),
low_level_profiler: bool | None = None,
validate: bool | None = None,
profile_cycle_interval=None,
lifetime: Any | None = None,
lifetime_stagger: Any | None = None,
lifetime_restart: bool | None = None,
**kwargs,
):
self.tasks = {}
self.waiting_for_data_count = 0
self.has_what = defaultdict(set)
self.pending_data_per_worker = defaultdict(UniqueTaskHeap)
self.nanny = nanny
self._lock = threading.Lock()
self.data_needed = UniqueTaskHeap()
self.in_flight_workers = {}
self.total_out_connections = dask.config.get(
"distributed.worker.connections.outgoing"
)
self.total_in_connections = dask.config.get(
"distributed.worker.connections.incoming"
)
self.comm_threshold_bytes = int(10e6)
self.comm_nbytes = 0
self._missing_dep_flight = set()
self.threads = {}
self.active_threads_lock = threading.Lock()
self.active_threads = {}
self.active_keys = set()
self.profile_keys = defaultdict(profile.create)
self.profile_keys_history = deque(maxlen=3600)
self.profile_recent = profile.create()
self.profile_history = deque(maxlen=3600)
self.generation = 0
self.ready = []
self.constrained = deque()
self._executing = set()
self._in_flight_tasks = set()
self.executed_count = 0
self.long_running = set()
self.target_message_size = int(50e6) # 50 MB
self.log = deque(maxlen=100000)
if validate is None:
validate = dask.config.get("distributed.scheduler.validate")
self.validate = validate
self._transitions_table = {
("cancelled", "resumed"): self.transition_cancelled_resumed,
("cancelled", "fetch"): self.transition_cancelled_fetch,
("cancelled", "released"): self.transition_cancelled_released,
("cancelled", "waiting"): self.transition_cancelled_waiting,
("cancelled", "forgotten"): self.transition_cancelled_forgotten,
("cancelled", "memory"): self.transition_cancelled_memory,
("cancelled", "error"): self.transition_cancelled_error,
("resumed", "memory"): self.transition_generic_memory,
("resumed", "error"): self.transition_generic_error,
("resumed", "released"): self.transition_generic_released,
("resumed", "waiting"): self.transition_resumed_waiting,
("resumed", "fetch"): self.transition_resumed_fetch,
("resumed", "missing"): self.transition_resumed_missing,
("constrained", "executing"): self.transition_constrained_executing,
("constrained", "released"): self.transition_generic_released,
("error", "released"): self.transition_generic_released,
("executing", "error"): self.transition_executing_error,
("executing", "long-running"): self.transition_executing_long_running,
("executing", "memory"): self.transition_executing_memory,
("executing", "released"): self.transition_executing_released,
("executing", "rescheduled"): self.transition_executing_rescheduled,
("fetch", "flight"): self.transition_fetch_flight,
("fetch", "released"): self.transition_generic_released,
("flight", "error"): self.transition_flight_error,
("flight", "fetch"): self.transition_flight_fetch,
("flight", "memory"): self.transition_flight_memory,
("flight", "missing"): self.transition_flight_missing,
("flight", "released"): self.transition_flight_released,
("long-running", "error"): self.transition_generic_error,
("long-running", "memory"): self.transition_long_running_memory,
("long-running", "rescheduled"): self.transition_executing_rescheduled,
("long-running", "released"): self.transition_executing_released,
("memory", "released"): self.transition_memory_released,
("missing", "fetch"): self.transition_missing_fetch,
("missing", "released"): self.transition_missing_released,
("missing", "error"): self.transition_generic_error,
("ready", "error"): self.transition_generic_error,
("ready", "executing"): self.transition_ready_executing,
("ready", "released"): self.transition_generic_released,
("released", "error"): self.transition_generic_error,
("released", "fetch"): self.transition_released_fetch,
("released", "forgotten"): self.transition_released_forgotten,
("released", "memory"): self.transition_released_memory,
("released", "waiting"): self.transition_released_waiting,
("waiting", "constrained"): self.transition_waiting_constrained,
("waiting", "ready"): self.transition_waiting_ready,
("waiting", "released"): self.transition_generic_released,
}
self._transition_counter = 0
self.incoming_transfer_log = deque(maxlen=100000)
self.incoming_count = 0
self.outgoing_transfer_log = deque(maxlen=100000)
self.outgoing_count = 0
self.outgoing_current_count = 0
self.repetitively_busy = 0
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.bandwidth_workers = defaultdict(
lambda: (0, 0)
) # bw/count recent transfers
self.bandwidth_types = defaultdict(lambda: (0, 0)) # bw/count recent transfers
self.latency = 0.001
self._client = None
if profile_cycle_interval is None:
profile_cycle_interval = dask.config.get("distributed.worker.profile.cycle")
profile_cycle_interval = parse_timedelta(profile_cycle_interval, default="ms")
assert profile_cycle_interval
self._setup_logging(logger)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if not local_directory:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
os.makedirs(local_directory, exist_ok=True)
local_directory = os.path.join(local_directory, "dask-worker-space")
with warn_on_duration(
"1s",
"Creating scratch directories is taking a surprisingly long time. "
"This is often due to running workers on a network file system. "
"Consider specifying a local-directory to point workers to write "
"scratch data to a local disk.",
):
self._workspace = WorkSpace(os.path.abspath(local_directory))
self._workdir = self._workspace.new_work_dir(prefix="worker-")
self.local_directory = self._workdir.dir_path
if not preload:
preload = dask.config.get("distributed.worker.preload")
if not preload_argv:
preload_argv = dask.config.get("distributed.worker.preload-argv")
assert preload is not None
assert preload_argv is not None
self.preloads = preloading.process_preloads(
self, preload, preload_argv, file_dir=self.local_directory
)
if scheduler_file:
cfg = json_load_robust(scheduler_file)
scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address", None):
scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
scheduler_addr = coerce_to_address(scheduler_ip)
else:
scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
self.contact_address = contact_address
if protocol is None:
protocol_address = scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
assert protocol
self._start_port = port
self._start_host = host
if host:
# Helpful error message if IPv6 specified incorrectly
_, host_address = parse_address(host)
if host_address.count(":") > 1 and not host_address.startswith("["):
raise ValueError(
"Host address with IPv6 must be bracketed like '[::1]'; "
f"got {host_address}"
)
self._interface = interface
self._protocol = protocol
self.nthreads = nthreads or CPU_COUNT
if resources is None:
resources = dask.config.get("distributed.worker.resources", None)
assert isinstance(resources, dict)
self.total_resources = resources or {}
self.available_resources = (resources or {}).copy()
self.death_timeout = parse_timedelta(death_timeout)
self.extensions = {}
if silence_logs:
silence_logging(level=silence_logs)
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
self.memory_target_fraction = (
memory_target_fraction
if memory_target_fraction is not None
else dask.config.get("distributed.worker.memory.target")
)
self.memory_spill_fraction = (
memory_spill_fraction
if memory_spill_fraction is not None
else dask.config.get("distributed.worker.memory.spill")
)
self.memory_pause_fraction = (
memory_pause_fraction
if memory_pause_fraction is not None
else dask.config.get("distributed.worker.memory.pause")
)
if max_spill is None:
max_spill = dask.config.get("distributed.worker.memory.max-spill")
self.max_spill = False if max_spill is False else parse_bytes(max_spill)
if isinstance(data, MutableMapping):
self.data = data
elif callable(data):
self.data = data()
elif isinstance(data, tuple):
self.data = data[0](**data[1])
elif self.memory_limit and (
self.memory_target_fraction or self.memory_spill_fraction
):
from .spill import SpillBuffer
if self.memory_target_fraction:
target = int(
self.memory_limit
* (self.memory_target_fraction or self.memory_spill_fraction)
)
else:
target = sys.maxsize
self.data = SpillBuffer(
os.path.join(self.local_directory, "storage"),
target=target,
max_spill=self.max_spill,
)
else:
self.data = {}
self.actors = {}
self.loop = loop or IOLoop.current()
self.reconnect = reconnect
# Common executors always available
self.executors = {
"offload": utils._offload_executor,
"actor": ThreadPoolExecutor(1, thread_name_prefix="Dask-Actor-Threads"),
}
if nvml.device_get_count() > 0:
self.executors["gpu"] = ThreadPoolExecutor(
1, thread_name_prefix="Dask-GPU-Threads"
)
# Find the default executor
if executor == "offload":
self.executors["default"] = self.executors["offload"]
elif isinstance(executor, dict):
self.executors.update(executor)
elif executor is not None:
self.executors["default"] = executor
if "default" not in self.executors:
self.executors["default"] = ThreadPoolExecutor(
self.nthreads, thread_name_prefix="Dask-Default-Threads"
)
self.batched_stream = BatchedSend(interval="2ms", loop=self.loop)
self.name = name
self.scheduler_delay = 0
self.stream_comms = {}
self.heartbeat_active = False
self._ipython_kernel = None
if self.local_directory not in sys.path:
sys.path.insert(0, self.local_directory)
self.services = {}
self.service_specs = services or {}
self._dashboard_address = dashboard_address
self._dashboard = dashboard
self._http_prefix = http_prefix
self.metrics = dict(metrics) if metrics else {}
self.startup_information = (
dict(startup_information) if startup_information else {}
)
if low_level_profiler is None:
low_level_profiler = dask.config.get("distributed.worker.profile.low-level")
self.low_level_profiler = low_level_profiler
handlers = {
"gather": self.gather,
"run": self.run,
"run_coroutine": self.run_coroutine,
"get_data": self.get_data,
"update_data": self.update_data,
"free_keys": self.handle_free_keys,
"terminate": self.close,
"ping": pingpong,
"upload_file": self.upload_file,
"start_ipython": self.start_ipython,
"call_stack": self.get_call_stack,
"profile": self.get_profile,
"profile_metadata": self.get_profile_metadata,
"get_logs": self.get_logs,
"keys": self.keys,
"versions": self.versions,
"actor_execute": self.actor_execute,
"actor_attribute": self.actor_attribute,
"plugin-add": self.plugin_add,
"plugin-remove": self.plugin_remove,
"get_monitor_info": self.get_monitor_info,
}
stream_handlers = {
"close": self.close,
"cancel-compute": self.handle_cancel_compute,
"acquire-replicas": self.handle_acquire_replicas,
"compute-task": self.handle_compute_task,
"free-keys": self.handle_free_keys,
"remove-replicas": self.handle_remove_replicas,
"steal-request": self.handle_steal_request,
"worker-status-change": self.handle_worker_status_change,
}
super().__init__(
handlers=handlers,
stream_handlers=stream_handlers,
io_loop=self.loop,
connection_args=self.connection_args,
**kwargs,
)
self.scheduler = self.rpc(scheduler_addr)
self.execution_state = {
"scheduler": self.scheduler.address,
"ioloop": self.loop,
"worker": self,
}
self.heartbeat_interval = parse_timedelta(heartbeat_interval, default="ms")
pc = PeriodicCallback(self.heartbeat, self.heartbeat_interval * 1000)
self.periodic_callbacks["heartbeat"] = pc
pc = PeriodicCallback(
lambda: self.batched_stream.send({"op": "keep-alive"}), 60000
)
self.periodic_callbacks["keep-alive"] = pc
# FIXME annotations: https://github.com/tornadoweb/tornado/issues/3117
pc = PeriodicCallback(self.find_missing, 1000) # type: ignore
self.periodic_callbacks["find-missing"] = pc
self._address = contact_address
self.memory_monitor_interval = parse_timedelta(
memory_monitor_interval, default="ms"
)
self._memory_monitoring = False
if self.memory_limit:
assert self.memory_monitor_interval is not None
pc = PeriodicCallback(
self.memory_monitor, # type: ignore
self.memory_monitor_interval * 1000,
)
self.periodic_callbacks["memory"] = pc
if extensions is None:
extensions = DEFAULT_EXTENSIONS
for ext in extensions:
ext(self)
self._throttled_gc = ThrottledGC(logger=logger)
setproctitle("dask-worker [not started]")
profile_trigger_interval = parse_timedelta(
dask.config.get("distributed.worker.profile.interval"), default="ms"
)
pc = PeriodicCallback(self.trigger_profile, profile_trigger_interval * 1000)
self.periodic_callbacks["profile"] = pc
pc = PeriodicCallback(self.cycle_profile, profile_cycle_interval * 1000)
self.periodic_callbacks["profile-cycle"] = pc
self.plugins = {}
self._pending_plugins = plugins
if lifetime is None:
lifetime = dask.config.get("distributed.worker.lifetime.duration")
self.lifetime = parse_timedelta(lifetime)
if lifetime_stagger is None:
lifetime_stagger = dask.config.get("distributed.worker.lifetime.stagger")
lifetime_stagger = parse_timedelta(lifetime_stagger)
if lifetime_restart is None:
lifetime_restart = dask.config.get("distributed.worker.lifetime.restart")
self.lifetime_restart = lifetime_restart
if self.lifetime:
self.lifetime += (random.random() * 2 - 1) * lifetime_stagger
self.io_loop.call_later(self.lifetime, self.close_gracefully)
Worker._instances.add(self)
##################
# Administrative #
##################
def __repr__(self):
name = f", name: {self.name}" if self.name != self.address else ""
return (
f"<{self.__class__.__name__} {self.address!r}{name}, "
f"status: {self.status.name}, "
f"stored: {len(self.data)}, "
f"running: {self.executing_count}/{self.nthreads}, "
f"ready: {len(self.ready)}, "
f"comm: {self.in_flight_tasks}, "
f"waiting: {self.waiting_for_data_count}>"
)
@property
def logs(self):
return self._deque_handler.deque
def log_event(self, topic, msg):
self.batched_stream.send(
{
"op": "log-event",
"topic": topic,
"msg": msg,
}
)
@property
def executing_count(self) -> int:
return len(self._executing)
@property
def in_flight_tasks(self) -> int:
return len(self._in_flight_tasks)
@property
def worker_address(self):
"""For API compatibility with Nanny"""
return self.address
@property
def local_dir(self):
"""For API compatibility with Nanny"""
warnings.warn(
"The local_dir attribute has moved to local_directory", stacklevel=2
)
return self.local_directory
@property
def executor(self):
return self.executors["default"]
@ServerNode.status.setter # type: ignore
def status(self, value):
"""Override Server.status to notify the Scheduler of status changes"""
ServerNode.status.__set__(self, value)
self._send_worker_status_change()
def _send_worker_status_change(self) -> None:
if (
self.batched_stream
and self.batched_stream.comm
and not self.batched_stream.comm.closed()
):
self.batched_stream.send(
{"op": "worker-status-change", "status": self._status.name}
)
elif self._status != Status.closed:
self.loop.call_later(0.05, self._send_worker_status_change)
async def get_metrics(self) -> dict:
try:
spilled_memory, spilled_disk = self.data.spilled_total # type: ignore
except AttributeError:
# spilling is disabled
spilled_memory, spilled_disk = 0, 0
out = dict(
executing=self.executing_count,
in_memory=len(self.data),
ready=len(self.ready),
in_flight=self.in_flight_tasks,
bandwidth={
"total": self.bandwidth,
"workers": dict(self.bandwidth_workers),
"types": keymap(typename, self.bandwidth_types),
},
spilled_nbytes={
"memory": spilled_memory,
"disk": spilled_disk,
},
)
out.update(self.monitor.recent())
for k, metric in self.metrics.items():
try:
result = metric(self)
if isawaitable(result):
result = await result
# In case of collision, prefer core metrics
out.setdefault(k, result)
except Exception: # TODO: log error once
pass
return out
async def get_startup_information(self):
result = {}
for k, f in self.startup_information.items():
try:
v = f(self)
if isawaitable(v):
v = await v
result[k] = v
except Exception: # TODO: log error once
pass
return result
def identity(self):
return {
"type": type(self).__name__,
"id": self.id,
"scheduler": self.scheduler.address,
"nthreads": self.nthreads,
"memory_limit": self.memory_limit,
}
def _to_dict(
self, comm: Comm | None = None, *, exclude: Container[str] = ()
) -> dict:
"""Dictionary representation for debugging purposes.
Not type stable and not intended for roundtrips.
See also
--------
Worker.identity
Client.dump_cluster_state
distributed.utils.recursive_to_dict
"""
info = super()._to_dict(exclude=exclude)
extra = {
"status": self.status,
"ready": self.ready,
"constrained": self.constrained,
"data_needed": list(self.data_needed),
"pending_data_per_worker": {
w: list(v) for w, v in self.pending_data_per_worker.items()
},
"long_running": self.long_running,
"executing_count": self.executing_count,
"in_flight_tasks": self.in_flight_tasks,
"in_flight_workers": self.in_flight_workers,
"log": self.log,
"tasks": self.tasks,
"memory_limit": self.memory_limit,
"memory_target_fraction": self.memory_target_fraction,
"memory_spill_fraction": self.memory_spill_fraction,
"memory_pause_fraction": self.memory_pause_fraction,
"logs": self.get_logs(),
"config": dask.config.config,
"incoming_transfer_log": self.incoming_transfer_log,
"outgoing_transfer_log": self.outgoing_transfer_log,
}
info.update(extra)
info = {k: v for k, v in info.items() if k not in exclude}
return recursive_to_dict(info, exclude=exclude)
#####################
# External Services #
#####################
async def _register_with_scheduler(self):
self.periodic_callbacks["keep-alive"].stop()
self.periodic_callbacks["heartbeat"].stop()
start = time()
if self.contact_address is None:
self.contact_address = self.address
logger.info("-" * 49)
while True:
try:
_start = time()
comm = await connect(self.scheduler.address, **self.connection_args)
comm.name = "Worker->Scheduler"
comm._server = weakref.ref(self)
await comm.write(
dict(
op="register-worker",
reply=False,
address=self.contact_address,
status=self.status.name,
keys=list(self.data),
nthreads=self.nthreads,
name=self.name,
nbytes={
ts.key: ts.get_nbytes()
for ts in self.tasks.values()
# Only if the task is in memory this is a sensible
# result since otherwise it simply submits the
# default value
if ts.state == "memory"
},
types={k: typename(v) for k, v in self.data.items()},
now=time(),
resources=self.total_resources,
memory_limit=self.memory_limit,
local_directory=self.local_directory,
services=self.service_ports,
nanny=self.nanny,
pid=os.getpid(),
versions=get_versions(),
metrics=await self.get_metrics(),
extra=await self.get_startup_information(),
),
serializers=["msgpack"],
)
future = comm.read(deserializers=["msgpack"])
response = await future
if response.get("warning"):
logger.warning(response["warning"])
_end = time()
middle = (_start + _end) / 2
self._update_latency(_end - start)
self.scheduler_delay = response["time"] - middle
self.status = Status.running
break
except OSError:
logger.info("Waiting to connect to: %26s", self.scheduler.address)
await asyncio.sleep(0.1)
except TimeoutError: # pragma: no cover
logger.info("Timed out when connecting to scheduler")
if response["status"] != "OK":
raise ValueError(f"Unexpected response from register: {response!r}")
else:
await asyncio.gather(
*(
self.plugin_add(name=name, plugin=plugin)
for name, plugin in response["worker-plugins"].items()
)
)
logger.info(" Registered to: %26s", self.scheduler.address)
logger.info("-" * 49)
self.batched_stream.start(comm)
self.periodic_callbacks["keep-alive"].start()
self.periodic_callbacks["heartbeat"].start()
self.loop.add_callback(self.handle_scheduler, comm)
def _update_latency(self, latency):
self.latency = latency * 0.05 + self.latency * 0.95
if self.digests is not None:
self.digests["latency"].add(latency)
async def heartbeat(self):
if self.heartbeat_active:
logger.debug("Heartbeat skipped: channel busy")
return
self.heartbeat_active = True
logger.debug("Heartbeat: %s", self.address)
try:
start = time()
response = await retry_operation(
self.scheduler.heartbeat_worker,
address=self.contact_address,
now=start,
metrics=await self.get_metrics(),
executing={
key: start - self.tasks[key].start_time
for key in self.active_keys
if key in self.tasks
},
)
end = time()
middle = (start + end) / 2
self._update_latency(end - start)
if response["status"] == "missing":
# If running, wait up to 0.5s and then re-register self.
# Otherwise just exit.
start = time()
while self.status in Status.ANY_RUNNING and time() < start + 0.5:
await asyncio.sleep(0.01)
if self.status in Status.ANY_RUNNING:
await self._register_with_scheduler()
return
self.scheduler_delay = response["time"] - middle
self.periodic_callbacks["heartbeat"].callback_time = (
response["heartbeat-interval"] * 1000
)
self.bandwidth_workers.clear()
self.bandwidth_types.clear()
except CommClosedError:
logger.warning("Heartbeat to scheduler failed", exc_info=True)
if not self.reconnect:
await self.close(report=False)
except OSError as e:
# Scheduler is gone. Respect distributed.comm.timeouts.connect
if "Timed out trying to connect" in str(e):
await self.close(report=False)
else:
raise e
finally:
self.heartbeat_active = False
async def handle_scheduler(self, comm):
try:
await self.handle_stream(
comm, every_cycle=[self.ensure_communicating, self.ensure_computing]
)
except Exception as e:
logger.exception(e)
raise
finally:
if self.reconnect and self.status in Status.ANY_RUNNING:
logger.info("Connection to scheduler broken. Reconnecting...")
self.loop.add_callback(self.heartbeat)
else:
await self.close(report=False)
def start_ipython(self, comm):
"""Start an IPython kernel
Returns Jupyter connection info dictionary.
"""
from ._ipython_utils import start_ipython
if self._ipython_kernel is None:
self._ipython_kernel = start_ipython(
ip=self.ip, ns={"worker": self}, log=logger
)
return self._ipython_kernel.get_connection_info()
async def upload_file(self, comm, filename=None, data=None, load=True):
out_filename = os.path.join(self.local_directory, filename)
def func(data):
if isinstance(data, str):
data = data.encode()
with open(out_filename, "wb") as f:
f.write(data)
f.flush()
return data
if len(data) < 10000:
data = func(data)
else:
data = await offload(func, data)
if load:
try:
import_file(out_filename)
cache_loads.data.clear()
except Exception as e:
logger.exception(e)
raise e
return {"status": "OK", "nbytes": len(data)}
def keys(self) -> list[str]:
return list(self.data)
async def gather(self, who_has: dict[str, list[str]]) -> dict[str, Any]:
who_has = {
k: [coerce_to_address(addr) for addr in v]
for k, v in who_has.items()
if k not in self.data
}
result, missing_keys, missing_workers = await gather_from_workers(
who_has, rpc=self.rpc, who=self.address
)
self.update_data(data=result, report=False)
if missing_keys:
logger.warning(
"Could not find data: %s on workers: %s (who_has: %s)",
missing_keys,
missing_workers,
who_has,
)
return {"status": "partial-fail", "keys": missing_keys}
else:
return {"status": "OK"}
def get_monitor_info(
self, recent: bool = False, start: float = 0
) -> dict[str, Any]:
result = dict(
range_query=(
self.monitor.recent()
if recent
else self.monitor.range_query(start=start)
),
count=self.monitor.count,
last_time=self.monitor.last_time,
)
if nvml.device_get_count() > 0:
result["gpu_name"] = self.monitor.gpu_name
result["gpu_memory_total"] = self.monitor.gpu_memory_total
return result
#############
# Lifecycle #
#############
async def start(self):
if self.status and self.status in (
Status.closed,
Status.closing,
Status.closing_gracefully,
):
return
assert self.status is Status.undefined, self.status
await super().start()
enable_gc_diagnosis()
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
kwargs = self.security.get_listen_args("worker")
if self._protocol in ("tcp", "tls"):
kwargs = kwargs.copy()
kwargs["default_host"] = get_ip(
get_address_host(self.scheduler.address)
)
try:
await self.listen(start_address, **kwargs)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Worker on host {self._start_host}"
f"with port {self._start_port}"
)
# Start HTTP server associated with this Worker node
routes = get_handlers(
server=self,
modules=dask.config.get("distributed.worker.http.routes"),
prefix=self._http_prefix,
)
self.start_http_server(routes, self._dashboard_address)
if self._dashboard:
try:
import distributed.dashboard.worker
except ImportError:
logger.debug("To start diagnostics web server please install Bokeh")
else:
distributed.dashboard.worker.connect(
self.http_application,
self.http_server,
self,
prefix=self._http_prefix,
)
self.ip = get_address_host(self.address)
if self.name is None:
self.name = self.address
for preload in self.preloads:
await preload.start()
# Services listen on all addresses
# Note Nanny is not a "real" service, just some metadata
# passed in service_ports...
self.start_services(self.ip)
try:
listening_address = "%s%s:%d" % (self.listener.prefix, self.ip, self.port)
except Exception:
listening_address = f"{self.listener.prefix}{self.ip}"
logger.info(" Start worker at: %26s", self.address)
logger.info(" Listening to: %26s", listening_address)
for k, v in self.service_ports.items():
logger.info(" {:>16} at: {:>26}".format(k, self.ip + ":" + str(v)))
logger.info("Waiting to connect to: %26s", self.scheduler.address)
logger.info("-" * 49)
logger.info(" Threads: %26d", self.nthreads)
if self.memory_limit:
logger.info(" Memory: %26s", format_bytes(self.memory_limit))
logger.info(" Local Directory: %26s", self.local_directory)
setproctitle("dask-worker [%s]" % self.address)
plugins_msgs = await asyncio.gather(
*(
self.plugin_add(plugin=plugin, catch_errors=False)
for plugin in self._pending_plugins
),
return_exceptions=True,
)
plugins_exceptions = [msg for msg in plugins_msgs if isinstance(msg, Exception)]
if len(plugins_exceptions) >= 1:
if len(plugins_exceptions) > 1:
logger.error(
"Multiple plugin exceptions raised. All exceptions will be logged, the first is raised."
)
for exc in plugins_exceptions:
logger.error(repr(exc))
raise plugins_exceptions[0]
self._pending_plugins = ()
await self._register_with_scheduler()
self.start_periodic_callbacks()
return self
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
async def close(
self, report=True, timeout=30, nanny=True, executor_wait=True, safe=False
):
with log_errors():
if self.status in (Status.closed, Status.closing):
await self.finished()
return
self.reconnect = False
disable_gc_diagnosis()
try:
logger.info("Stopping worker at %s", self.address)
except ValueError: # address not available if already closed
logger.info("Stopping worker")
if self.status not in Status.ANY_RUNNING:
logger.info("Closed worker has not yet started: %s", self.status)
self.status = Status.closing
for preload in self.preloads:
await preload.teardown()
if nanny and self.nanny:
with self.rpc(self.nanny) as r:
await r.close_gracefully()
setproctitle("dask-worker [closing]")
teardowns = [
plugin.teardown(self)
for plugin in self.plugins.values()
if hasattr(plugin, "teardown")
]
await asyncio.gather(*(td for td in teardowns if isawaitable(td)))
for pc in self.periodic_callbacks.values():
pc.stop()
if self._client:
# If this worker is the last one alive, clean up the worker
# initialized clients
if not any(
w
for w in Worker._instances
if w != self and w.status in Status.ANY_RUNNING
):
for c in Worker._initialized_clients:
# Regardless of what the client was initialized with
# we'll require the result as a future. This is
# necessary since the heursitics of asynchronous are not
# reliable and we might deadlock here
c._asynchronous = True
if c.asynchronous:
await c.close()
else:
# There is still the chance that even with us
# telling the client to be async, itself will decide
# otherwise
c.close()
with suppress(EnvironmentError, TimeoutError):
if report and self.contact_address is not None:
await asyncio.wait_for(
self.scheduler.unregister(
address=self.contact_address, safe=safe
),
timeout,
)
await self.scheduler.close_rpc()
self._workdir.release()
self.stop_services()
# Give some time for a UCX scheduler to complete closing endpoints
# before closing self.batched_stream, otherwise the local endpoint
# may be closed too early and errors be raised on the scheduler when
# trying to send closing message.
if self._protocol == "ucx": # pragma: no cover
await asyncio.sleep(0.2)
if (
self.batched_stream
and self.batched_stream.comm
and not self.batched_stream.comm.closed()
):
self.batched_stream.send({"op": "close-stream"})
if self.batched_stream:
with suppress(TimeoutError):
await self.batched_stream.close(timedelta(seconds=timeout))
for executor in self.executors.values():
if executor is utils._offload_executor:
continue # Never shutdown the offload executor
def _close():
if isinstance(executor, ThreadPoolExecutor):
executor._work_queue.queue.clear()
executor.shutdown(wait=executor_wait, timeout=timeout)
else:
executor.shutdown(wait=executor_wait)
# Waiting for the shutdown can block the event loop causing
# weird deadlocks particularly if the task that is executing in
# the thread is waiting for a server reply, e.g. when using
# worker clients, semaphores, etc.
await to_thread(_close)
self.stop()
await self.rpc.close()
self.status = Status.closed
await super().close()
setproctitle("dask-worker [closed]")
return "OK"
async def close_gracefully(self, restart=None):
"""Gracefully shut down a worker
This first informs the scheduler that we're shutting down, and asks it
to move our data elsewhere. Afterwards, we close as normal
"""
if self.status in (Status.closing, Status.closing_gracefully):
await self.finished()
if self.status == Status.closed:
return
if restart is None:
restart = self.lifetime_restart
logger.info("Closing worker gracefully: %s", self.address)
# Wait for all tasks to leave the worker and don't accept any new ones.
# Scheduler.retire_workers will set the status to closing_gracefully and push it
# back to this worker.
await self.scheduler.retire_workers(
workers=[self.address], close_workers=False, remove=False
)
await self.close(safe=True, nanny=not restart)
async def terminate(self, report: bool = True, **kwargs) -> str:
await self.close(report=report, **kwargs)
return "OK"
async def wait_until_closed(self):
warnings.warn("wait_until_closed has moved to finished()")
await self.finished()
assert self.status == Status.closed
################
# Worker Peers #
################
def send_to_worker(self, address, msg):
if address not in self.stream_comms:
bcomm = BatchedSend(interval="1ms", loop=self.loop)
self.stream_comms[address] = bcomm
async def batched_send_connect():
comm = await connect(
address, **self.connection_args # TODO, serialization
)
comm.name = "Worker->Worker"
await comm.write({"op": "connection_stream"})
bcomm.start(comm)
self.loop.add_callback(batched_send_connect)
self.stream_comms[address].send(msg)
async def get_data(
self, comm, keys=None, who=None, serializers=None, max_connections=None
):
start = time()
if max_connections is None:
max_connections = self.total_in_connections
# Allow same-host connections more liberally
if (
max_connections
and comm
and get_address_host(comm.peer_address) == get_address_host(self.address)
):
max_connections = max_connections * 2
if self.status == Status.paused:
max_connections = 1
throttle_msg = " Throttling outgoing connections because worker is paused."
else:
throttle_msg = ""
if (
max_connections is not False
and self.outgoing_current_count >= max_connections
):
logger.debug(
"Worker %s has too many open connections to respond to data request "
"from %s (%d/%d).%s",
self.address,
who,
self.outgoing_current_count,
max_connections,
throttle_msg,
)
return {"status": "busy"}
self.outgoing_current_count += 1
data = {k: self.data[k] for k in keys if k in self.data}
if len(data) < len(keys):
for k in set(keys) - set(data):
if k in self.actors:
from .actor import Actor
data[k] = Actor(type(self.actors[k]), self.address, k, worker=self)
msg = {"status": "OK", "data": {k: to_serialize(v) for k, v in data.items()}}
nbytes = {k: self.tasks[k].nbytes for k in data if k in self.tasks}
stop = time()
if self.digests is not None:
self.digests["get-data-load-duration"].add(stop - start)
start = time()
try:
compressed = await comm.write(msg, serializers=serializers)
response = await comm.read(deserializers=serializers)
assert response == "OK", response
except OSError:
logger.exception(
"failed during get data with %s -> %s", self.address, who, exc_info=True
)
comm.abort()
raise
finally:
self.outgoing_current_count -= 1
stop = time()
if self.digests is not None:
self.digests["get-data-send-duration"].add(stop - start)
total_bytes = sum(filter(None, nbytes.values()))
self.outgoing_count += 1
duration = (stop - start) or 0.5 # windows
self.outgoing_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2,
"duration": duration,
"who": who,
"keys": nbytes,
"total": total_bytes,
"compressed": compressed,
"bandwidth": total_bytes / duration,
}
)
return Status.dont_reply
###################
# Local Execution #
###################
def update_data(
self,
data: dict[str, object],
report: bool = True,
stimulus_id: str = None,
) -> dict[str, Any]:
if stimulus_id is None:
stimulus_id = f"update-data-{time()}"
recommendations: Recs = {}
scheduler_messages = []
for key, value in data.items():
try:
ts = self.tasks[key]
recommendations[ts] = ("memory", value)
except KeyError:
self.tasks[key] = ts = TaskState(key)
try:
recs = self._put_key_in_memory(ts, value, stimulus_id=stimulus_id)
except Exception as e:
msg = error_message(e)
recommendations = {ts: tuple(msg.values())}
else:
recommendations.update(recs)
self.log.append((key, "receive-from-scatter", stimulus_id, time()))
if report:
scheduler_messages.append(
{"op": "add-keys", "keys": list(data), "stimulus_id": stimulus_id}
)
self.transitions(recommendations, stimulus_id=stimulus_id)
for msg in scheduler_messages:
self.batched_stream.send(msg)
return {"nbytes": {k: sizeof(v) for k, v in data.items()}, "status": "OK"}
def handle_free_keys(self, keys: list[str], stimulus_id: str) -> None:
"""
Handler to be called by the scheduler.
The given keys are no longer referred to and required by the scheduler.
The worker is now allowed to release the key, if applicable.
This does not guarantee that the memory is released since the worker may
still decide to hold on to the data and task since it is required by an
upstream dependency.
"""
self.log.append(("free-keys", keys, stimulus_id, time()))
recommendations: Recs = {}
for key in keys:
ts = self.tasks.get(key)
if ts:
recommendations[ts] = "released"
self.transitions(recommendations, stimulus_id=stimulus_id)
def handle_remove_replicas(self, keys: list[str], stimulus_id: str) -> str:
"""Stream handler notifying the worker that it might be holding unreferenced,
superfluous data.
This should not actually happen during ordinary operations and is only intended
to correct any erroneous state. An example where this is necessary is if a
worker fetches data for a downstream task but that task is released before the
data arrives. In this case, the scheduler will notify the worker that it may be
holding this unnecessary data, if the worker hasn't released the data itself,
already.
This handler does not guarantee the task nor the data to be actually
released but only asks the worker to release the data on a best effort
guarantee. This protects from race conditions where the given keys may
already have been rescheduled for compute in which case the compute
would win and this handler is ignored.
For stronger guarantees, see handler free_keys
"""
self.log.append(("remove-replicas", keys, stimulus_id, time()))
recommendations: Recs = {}
rejected = []
for key in keys:
ts = self.tasks.get(key)
if ts is None or ts.state != "memory":
continue
if not ts.is_protected():
self.log.append(
(ts.key, "remove-replica-confirmed", stimulus_id, time())
)
recommendations[ts] = "released"
else:
rejected.append(key)
if rejected:
self.log.append(("remove-replica-rejected", rejected, stimulus_id, time()))
self.batched_stream.send(
{"op": "add-keys", "keys": rejected, "stimulus_id": stimulus_id}
)
self.transitions(recommendations, stimulus_id=stimulus_id)
return "OK"
async def set_resources(self, **resources) -> None:
for r, quantity in resources.items():
if r in self.total_resources:
self.available_resources[r] += quantity - self.total_resources[r]
else:
self.available_resources[r] = quantity
self.total_resources[r] = quantity
await retry_operation(
self.scheduler.set_resources,
resources=self.total_resources,
worker=self.contact_address,
)
###################
# Task Management #
###################
def handle_cancel_compute(self, key: str, stimulus_id: str) -> None:
"""
Cancel a task on a best effort basis. This is only possible while a task
is in state `waiting` or `ready`.
Nothing will happen otherwise.
"""
ts = self.tasks.get(key)
if ts and ts.state in READY | {"waiting"}:
self.log.append((key, "cancel-compute", stimulus_id, time()))
# All possible dependents of TS should not be in state Processing on
# scheduler side and therefore should not be assigned to a worker,
# yet.
assert not ts.dependents
self.transition(ts, "released", stimulus_id=stimulus_id)
def handle_acquire_replicas(
self,
*,
keys: Collection[str],
who_has: dict[str, Collection[str]],
stimulus_id: str,
) -> None:
recommendations: Recs = {}
for key in keys:
ts = self.ensure_task_exists(
key=key,
# Transfer this data after all dependency tasks of computations with
# default or explicitly high (>0) user priority and before all
# computations with low priority (<0). Note that the priority= parameter
# of compute() is multiplied by -1 before it reaches TaskState.priority.
priority=(1,),
stimulus_id=stimulus_id,
)
if ts.state != "memory":
recommendations[ts] = "fetch"
self.update_who_has(who_has)
self.transitions(recommendations, stimulus_id=stimulus_id)
def ensure_task_exists(
self, key: str, *, priority: tuple[int, ...], stimulus_id: str
) -> TaskState:
try:
ts = self.tasks[key]
logger.debug("Data task %s already known (stimulus_id=%s)", ts, stimulus_id)
except KeyError:
self.tasks[key] = ts = TaskState(key)
if not ts.priority:
assert priority
ts.priority = priority
self.log.append((key, "ensure-task-exists", ts.state, stimulus_id, time()))
return ts
def handle_compute_task(
self,
*,
key: str,
who_has: dict[str, Collection[str]],
priority: tuple[int, ...],
duration: float,
function=None,
args=None,
kwargs=None,
task=no_value, # distributed.scheduler.TaskState.run_spec
nbytes: dict[str, int] | None = None,
resource_restrictions: dict[str, float] | None = None,
actor: bool = False,
annotations: dict | None = None,
stimulus_id: str,
) -> None:
self.log.append((key, "compute-task", stimulus_id, time()))
try:
ts = self.tasks[key]
logger.debug(
"Asked to compute an already known task %s",
{"task": ts, "stimulus_id": stimulus_id},
)
except KeyError:
self.tasks[key] = ts = TaskState(key)
ts.run_spec = SerializedTask(function, args, kwargs, task)
assert isinstance(priority, tuple)
priority = priority + (self.generation,)
self.generation -= 1
if actor:
self.actors[ts.key] = None
ts.exception = None
ts.traceback = None
ts.exception_text = ""
ts.traceback_text = ""
ts.priority = priority
ts.duration = duration
if resource_restrictions:
ts.resource_restrictions = resource_restrictions
ts.annotations = annotations
recommendations: Recs = {}
scheduler_msgs: Smsgs = []
for dependency in who_has:
dep_ts = self.ensure_task_exists(
key=dependency,
priority=priority,
stimulus_id=stimulus_id,
)
# link up to child / parents
ts.dependencies.add(dep_ts)
dep_ts.dependents.add(ts)
if ts.state in READY | {"executing", "waiting", "resumed"}:
pass
elif ts.state == "memory":
recommendations[ts] = "memory"
scheduler_msgs.append(self._get_task_finished_msg(ts))
elif ts.state in {
"released",
"fetch",
"flight",
"missing",
"cancelled",
"error",
}:
recommendations[ts] = "waiting"
else: # pragma: no cover
raise RuntimeError(f"Unexpected task state encountered {ts} {stimulus_id}")
for msg in scheduler_msgs:
self.batched_stream.send(msg)
self.update_who_has(who_has)
self.transitions(recommendations, stimulus_id=stimulus_id)
if nbytes is not None:
for key, value in nbytes.items():
self.tasks[key].nbytes = value
def transition_missing_fetch(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if self.validate:
assert ts.state == "missing"
assert ts.priority is not None
self._missing_dep_flight.discard(ts)
ts.state = "fetch"
ts.done = False
self.data_needed.push(ts)
return {}, []
def transition_missing_released(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
self._missing_dep_flight.discard(ts)
recs, smsgs = self.transition_generic_released(ts, stimulus_id=stimulus_id)
assert ts.key in self.tasks
return recs, smsgs
def transition_flight_missing(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
assert ts.done
ts.state = "missing"
self._missing_dep_flight.add(ts)
ts.done = False
return {}, []
def transition_released_fetch(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if self.validate:
assert ts.state == "released"
assert ts.priority is not None
for w in ts.who_has:
self.pending_data_per_worker[w].push(ts)
ts.state = "fetch"
ts.done = False
self.data_needed.push(ts)
return {}, []
def transition_generic_released(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
self.release_key(ts.key, stimulus_id=stimulus_id)
recs: Recs = {}
for dependency in ts.dependencies:
if (
not dependency.waiters
and dependency.state not in READY | PROCESSING | {"memory"}
):
recs[dependency] = "released"
if not ts.dependents:
recs[ts] = "forgotten"
return recs, []
def transition_released_waiting(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if self.validate:
assert ts.state == "released"
assert all(d.key in self.tasks for d in ts.dependencies)
recommendations: Recs = {}
ts.waiting_for_data.clear()
for dep_ts in ts.dependencies:
if not dep_ts.state == "memory":
ts.waiting_for_data.add(dep_ts)
dep_ts.waiters.add(ts)
if dep_ts.state not in {"fetch", "flight"}:
recommendations[dep_ts] = "fetch"
if ts.waiting_for_data:
self.waiting_for_data_count += 1
elif ts.resource_restrictions:
recommendations[ts] = "constrained"
else:
recommendations[ts] = "ready"
ts.state = "waiting"
return recommendations, []
def transition_fetch_flight(
self, ts: TaskState, worker, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if self.validate:
assert ts.state == "fetch"
assert ts.who_has
ts.done = False
ts.state = "flight"
ts.coming_from = worker
self._in_flight_tasks.add(ts)
return {}, []
def transition_memory_released(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
recs, smsgs = self.transition_generic_released(ts, stimulus_id=stimulus_id)
smsgs.append({"op": "release-worker-data", "key": ts.key})
return recs, smsgs
def transition_waiting_constrained(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if self.validate:
assert ts.state == "waiting"
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
assert all(dep.state == "memory" for dep in ts.dependencies)
assert ts.key not in self.ready
ts.state = "constrained"
self.constrained.append(ts.key)
return {}, []
def transition_long_running_rescheduled(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
recs: Recs = {ts: "released"}
smsgs = [{"op": "reschedule", "key": ts.key, "worker": self.address}]
return recs, smsgs
def transition_executing_rescheduled(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
self._executing.discard(ts)
recs: Recs = {ts: "released"}
smsgs: Smsgs = [{"op": "reschedule", "key": ts.key, "worker": self.address}]
return recs, smsgs
def transition_waiting_ready(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if self.validate:
assert ts.state == "waiting"
assert ts.key not in self.ready
assert not ts.waiting_for_data
for dep in ts.dependencies:
assert dep.key in self.data or dep.key in self.actors
assert dep.state == "memory"
ts.state = "ready"
assert ts.priority is not None
heapq.heappush(self.ready, (ts.priority, ts.key))
return {}, []
def transition_cancelled_error(
self,
ts: TaskState,
exception,
traceback,
exception_text,
traceback_text,
*,
stimulus_id: str,
) -> tuple[Recs, Smsgs]:
recs: Recs = {}
smsgs: Smsgs = []
if ts._previous == "executing":
recs, smsgs = self.transition_executing_error(
ts,
exception,
traceback,
exception_text,
traceback_text,
stimulus_id=stimulus_id,
)
elif ts._previous == "flight":
recs, smsgs = self.transition_flight_error(
ts,
exception,
traceback,
exception_text,
traceback_text,
stimulus_id=stimulus_id,
)
if ts._next:
recs[ts] = ts._next
return recs, smsgs
def transition_generic_error(
self,
ts: TaskState,
exception,
traceback,
exception_text,
traceback_text,
*,
stimulus_id: str,
) -> tuple[Recs, Smsgs]:
ts.exception = exception
ts.traceback = traceback
ts.exception_text = exception_text
ts.traceback_text = traceback_text
ts.state = "error"
smsg = {
"op": "task-erred",
"status": "error",
"key": ts.key,
"thread": self.threads.get(ts.key),
"exception": ts.exception,
"traceback": ts.traceback,
"exception_text": ts.exception_text,
"traceback_text": ts.traceback_text,
}
if ts.startstops:
smsg["startstops"] = ts.startstops
return {}, [smsg]
def transition_executing_error(
self,
ts: TaskState,
exception,
traceback,
exception_text,
traceback_text,
*,
stimulus_id: str,
) -> tuple[Recs, Smsgs]:
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
self._executing.discard(ts)
return self.transition_generic_error(
ts,
exception,
traceback,
exception_text,
traceback_text,
stimulus_id=stimulus_id,
)
def _transition_from_resumed(
self, ts: TaskState, finish: str, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
"""`resumed` is an intermediate degenerate state which splits further up
into two states depending on what the last signal / next state is
intended to be. There are only two viable choices depending on whether
the task is required to be fetched from another worker `resumed(fetch)`
or the task shall be computed on this worker `resumed(waiting)`.
The only viable state transitions ending up here are
flight -> cancelled -> resumed(waiting)
or
executing -> cancelled -> resumed(fetch)
depending on the origin. Equally, only `fetch`, `waiting` or `released`
are allowed output states.
See also `transition_resumed_waiting`
"""
recs: Recs = {}
smsgs: Smsgs = []
if ts.done:
next_state = ts._next
# if the next state is already intended to be waiting or if the
# coro/thread is still running (ts.done==False), this is a noop
if ts._next != finish:
recs, smsgs = self.transition_generic_released(
ts, stimulus_id=stimulus_id
)
assert next_state
recs[ts] = next_state
else:
ts._next = finish
return recs, smsgs
def transition_resumed_fetch(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
"""
See Worker._transition_from_resumed
"""
return self._transition_from_resumed(ts, "fetch", stimulus_id=stimulus_id)
def transition_resumed_missing(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
"""
See Worker._transition_from_resumed
"""
return self._transition_from_resumed(ts, "missing", stimulus_id=stimulus_id)
def transition_resumed_waiting(self, ts: TaskState, *, stimulus_id: str):
"""
See Worker._transition_from_resumed
"""
return self._transition_from_resumed(ts, "waiting", stimulus_id=stimulus_id)
def transition_cancelled_fetch(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if ts.done:
return {ts: "released"}, []
elif ts._previous == "flight":
ts.state = ts._previous
return {}, []
else:
assert ts._previous == "executing"
return {ts: ("resumed", "fetch")}, []
def transition_cancelled_resumed(
self, ts: TaskState, next: str, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
ts._next = next
ts.state = "resumed"
return {}, []
def transition_cancelled_waiting(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if ts.done:
return {ts: "released"}, []
elif ts._previous == "executing":
ts.state = ts._previous
return {}, []
else:
assert ts._previous == "flight"
return {ts: ("resumed", "waiting")}, []
def transition_cancelled_forgotten(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
ts._next = "forgotten"
if not ts.done:
return {}, []
return {ts: "released"}, []
def transition_cancelled_released(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if not ts.done:
ts._next = "released"
return {}, []
next_state = ts._next
assert next_state
self._executing.discard(ts)
self._in_flight_tasks.discard(ts)
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
recs, smsgs = self.transition_generic_released(ts, stimulus_id=stimulus_id)
if next_state != "released":
recs[ts] = next_state
return recs, smsgs
def transition_executing_released(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
ts._previous = ts.state
ts._next = "released"
# See https://github.com/dask/distributed/pull/5046#discussion_r685093940
ts.state = "cancelled"
ts.done = False
return {}, []
def transition_long_running_memory(
self, ts: TaskState, value=no_value, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
self.executed_count += 1
return self.transition_generic_memory(ts, value=value, stimulus_id=stimulus_id)
def transition_generic_memory(
self, ts: TaskState, value=no_value, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if value is no_value and ts.key not in self.data:
raise RuntimeError(
f"Tried to transition task {ts} to `memory` without data available"
)
if ts.resource_restrictions is not None:
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
self._executing.discard(ts)
self._in_flight_tasks.discard(ts)
ts.coming_from = None
try:
recs = self._put_key_in_memory(ts, value, stimulus_id=stimulus_id)
except Exception as e:
msg = error_message(e)
recs = {ts: tuple(msg.values())}
return recs, []
assert ts.key in self.data or ts.key in self.actors
smsgs = [self._get_task_finished_msg(ts)]
return recs, smsgs
def transition_executing_memory(
self, ts: TaskState, value=no_value, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if self.validate:
assert ts.state == "executing" or ts.key in self.long_running
assert not ts.waiting_for_data
assert ts.key not in self.ready
self._executing.discard(ts)
self.executed_count += 1
return self.transition_generic_memory(ts, value=value, stimulus_id=stimulus_id)
def transition_constrained_executing(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if self.validate:
assert not ts.waiting_for_data
assert ts.key not in self.data
assert ts.state in READY
assert ts.key not in self.ready
for dep in ts.dependencies:
assert dep.key in self.data or dep.key in self.actors
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] -= quantity
ts.state = "executing"
self._executing.add(ts)
self.loop.add_callback(self.execute, ts.key, stimulus_id=stimulus_id)
return {}, []
def transition_ready_executing(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if self.validate:
assert not ts.waiting_for_data
assert ts.key not in self.data
assert ts.state in READY
assert ts.key not in self.ready
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
ts.state = "executing"
self._executing.add(ts)
self.loop.add_callback(self.execute, ts.key, stimulus_id=stimulus_id)
return {}, []
def transition_flight_fetch(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
# If this transition is called after the flight coroutine has finished,
# we can reset the task and transition to fetch again. If it is not yet
# finished, this should be a no-op
if not ts.done:
return {}, []
recommendations: Recs = {}
ts.state = "fetch"
ts.coming_from = None
ts.done = False
if not ts.who_has:
recommendations[ts] = "missing"
else:
self.data_needed.push(ts)
for w in ts.who_has:
self.pending_data_per_worker[w].push(ts)
return recommendations, []
def transition_flight_error(
self,
ts: TaskState,
exception,
traceback,
exception_text,
traceback_text,
*,
stimulus_id: str,
) -> tuple[Recs, Smsgs]:
self._in_flight_tasks.discard(ts)
ts.coming_from = None
return self.transition_generic_error(
ts,
exception,
traceback,
exception_text,
traceback_text,
stimulus_id=stimulus_id,
)
def transition_flight_released(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
if ts.done:
# FIXME: Is this even possible? Would an assert instead be more
# sensible?
return self.transition_generic_released(ts, stimulus_id=stimulus_id)
else:
ts._previous = "flight"
ts._next = "released"
# See https://github.com/dask/distributed/pull/5046#discussion_r685093940
ts.state = "cancelled"
return {}, []
def transition_cancelled_memory(
self, ts: TaskState, value, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
assert ts._next
return {ts: ts._next}, []
def transition_executing_long_running(
self, ts: TaskState, compute_duration, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
ts.state = "long-running"
self._executing.discard(ts)
self.long_running.add(ts.key)
smsgs = [
{
"op": "long-running",
"key": ts.key,
"compute_duration": compute_duration,
}
]
self.io_loop.add_callback(self.ensure_computing)
return {}, smsgs
def transition_released_memory(
self, ts: TaskState, value, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
recs: Recs = {}
try:
recs = self._put_key_in_memory(ts, value, stimulus_id=stimulus_id)
except Exception as e:
msg = error_message(e)
recs[ts] = (
"error",
msg["exception"],
msg["traceback"],
msg["exception_text"],
msg["traceback_text"],
)
return recs, []
smsgs = [{"op": "add-keys", "keys": [ts.key], "stimulus_id": stimulus_id}]
return recs, smsgs
def transition_flight_memory(
self, ts: TaskState, value, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
self._in_flight_tasks.discard(ts)
ts.coming_from = None
recs: Recs = {}
try:
recs = self._put_key_in_memory(ts, value, stimulus_id=stimulus_id)
except Exception as e:
msg = error_message(e)
recs[ts] = (
"error",
msg["exception"],
msg["traceback"],
msg["exception_text"],
msg["traceback_text"],
)
return recs, []
smsgs = [{"op": "add-keys", "keys": [ts.key], "stimulus_id": stimulus_id}]
return recs, smsgs
def transition_released_forgotten(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Recs, Smsgs]:
recommendations: Recs = {}
# Dependents _should_ be released by the scheduler before this
if self.validate:
assert not any(d.state != "forgotten" for d in ts.dependents)
for dep in ts.dependencies:
dep.dependents.discard(ts)
if dep.state == "released" and not dep.dependents:
recommendations[dep] = "forgotten"
# Mark state as forgotten in case it is still referenced
ts.state = "forgotten"
self.tasks.pop(ts.key, None)
return recommendations, []
def _transition(
self, ts: TaskState, finish: str | tuple, *args, stimulus_id: str, **kwargs
) -> tuple[Recs, Smsgs]:
if isinstance(finish, tuple):
# the concatenated transition path might need to access the tuple
assert not args
finish, *args = finish # type: ignore
if ts is None or ts.state == finish:
return {}, []
start = ts.state
func = self._transitions_table.get((start, cast(str, finish)))
if func is not None:
self._transition_counter += 1
recs, smsgs = func(ts, *args, stimulus_id=stimulus_id, **kwargs)
self._notify_plugins("transition", ts.key, start, finish, **kwargs)
elif "released" not in (start, finish):
# start -> "released" -> finish
try:
recs, smsgs = self._transition(ts, "released", stimulus_id=stimulus_id)
v = recs.get(ts, (finish, *args))
v_state: str
v_args: list | tuple
if isinstance(v, tuple):
v_state, *v_args = v
else:
v_state, v_args = v, ()
b_recs, b_smsgs = self._transition(
ts, v_state, *v_args, stimulus_id=stimulus_id
)
recs.update(b_recs)
smsgs += b_smsgs
except InvalidTransition:
raise InvalidTransition(
f"Impossible transition from {start} to {finish} for {ts.key}"
) from None
else:
raise InvalidTransition(
f"Impossible transition from {start} to {finish} for {ts.key}"
)
self.log.append(
(
# key
ts.key,
# initial
start,
# recommended
finish,
# final
ts.state,
# new recommendations
{ts.key: new for ts, new in recs.items()},
stimulus_id,
time(),
)
)
return recs, smsgs
def transition(
self, ts: TaskState, finish: str, *, stimulus_id: str, **kwargs
) -> None:
"""Transition a key from its current state to the finish state
Examples
--------
>>> self.transition('x', 'waiting')
{'x': 'processing'}
Returns
-------
Dictionary of recommendations for future transitions
See Also
--------
Scheduler.transitions: transitive version of this function
"""
recs, smsgs = self._transition(ts, finish, stimulus_id=stimulus_id, **kwargs)
for msg in smsgs:
self.batched_stream.send(msg)
self.transitions(recs, stimulus_id=stimulus_id)
def transitions(self, recommendations: Recs, *, stimulus_id: str) -> None:
"""Process transitions until none are left
This includes feedback from previous transitions and continues until we
reach a steady state
"""
smsgs = []
remaining_recs = recommendations.copy()
tasks = set()
while remaining_recs:
ts, finish = remaining_recs.popitem()
tasks.add(ts)
a_recs, a_smsgs = self._transition(ts, finish, stimulus_id=stimulus_id)
remaining_recs.update(a_recs)
smsgs += a_smsgs
if self.validate:
# Full state validation is very expensive
for ts in tasks:
self.validate_task(ts)
if not self.batched_stream.closed():
for msg in smsgs:
self.batched_stream.send(msg)
else:
logger.debug(
"BatchedSend closed while transitioning tasks. %d tasks not sent.",
len(smsgs),
)
def maybe_transition_long_running(
self, ts: TaskState, *, stimulus_id: str, compute_duration=None
):
if ts.state == "executing":
self.transition(
ts,
"long-running",
compute_duration=compute_duration,
stimulus_id=stimulus_id,
)
assert ts.state == "long-running"
def stateof(self, key: str) -> dict[str, Any]:
ts = self.tasks[key]
return {
"executing": ts.state == "executing",
"waiting_for_data": bool(ts.waiting_for_data),
"heap": key in pluck(1, self.ready),
"data": key in self.data,
}
def story(self, *keys_or_tasks: str | TaskState) -> list[tuple]:
keys = [e.key if isinstance(e, TaskState) else e for e in keys_or_tasks]
return [
msg
for msg in self.log
if any(key in msg for key in keys)
or any(
key in c
for key in keys
for c in msg
if isinstance(c, (tuple, list, set))
)
]
def ensure_communicating(self) -> None:
stimulus_id = f"ensure-communicating-{time()}"
skipped_worker_in_flight = []
while self.data_needed and (
len(self.in_flight_workers) < self.total_out_connections
or self.comm_nbytes < self.comm_threshold_bytes
):
logger.debug(
"Ensure communicating. Pending: %d. Connections: %d/%d",
len(self.data_needed),
len(self.in_flight_workers),
self.total_out_connections,
)
ts = self.data_needed.pop()
if ts.state != "fetch":
continue
workers = [w for w in ts.who_has if w not in self.in_flight_workers]
if not workers:
assert ts.priority is not None
skipped_worker_in_flight.append(ts)
continue
host = get_address_host(self.address)
local = [w for w in workers if get_address_host(w) == host]
if local:
worker = random.choice(local)
else:
worker = random.choice(list(workers))
assert worker != self.address
to_gather, total_nbytes = self.select_keys_for_gather(worker, ts.key)
self.log.append(
("gather-dependencies", worker, to_gather, stimulus_id, time())
)
self.comm_nbytes += total_nbytes
self.in_flight_workers[worker] = to_gather
recommendations: Recs = {
self.tasks[d]: ("flight", worker) for d in to_gather
}
self.transitions(recommendations, stimulus_id=stimulus_id)
self.loop.add_callback(
self.gather_dep,
worker=worker,
to_gather=to_gather,
total_nbytes=total_nbytes,
stimulus_id=stimulus_id,
)
for el in skipped_worker_in_flight:
self.data_needed.push(el)
def _get_task_finished_msg(self, ts: TaskState) -> dict[str, Any]:
if ts.key not in self.data and ts.key not in self.actors:
raise RuntimeError(f"Task {ts} not ready")
typ = ts.type
if ts.nbytes is None or typ is None:
try:
value = self.data[ts.key]
except KeyError:
value = self.actors[ts.key]
ts.nbytes = sizeof(value)
typ = ts.type = type(value)
del value
try:
typ_serialized = dumps_function(typ)
except PicklingError:
# Some types fail pickling (example: _thread.lock objects),
# send their name as a best effort.
typ_serialized = pickle.dumps(typ.__name__, protocol=4)
d = {
"op": "task-finished",
"status": "OK",
"key": ts.key,
"nbytes": ts.nbytes,
"thread": self.threads.get(ts.key),
"type": typ_serialized,
"typename": typename(typ),
"metadata": ts.metadata,
}
if ts.startstops:
d["startstops"] = ts.startstops
return d
def _put_key_in_memory(self, ts: TaskState, value, *, stimulus_id: str) -> Recs:
"""
Put a key into memory and set data related task state attributes.
On success, generate recommendations for dependents.
This method does not generate any scheduler messages since this method
cannot distinguish whether it has to be an `add-task` or a
`task-finished` signal. The caller is required to generate this message
on success.
Raises
------
TypeError:
In case the data is put into the in memory buffer and an exception
occurs during spilling, this raises an exception. This has to be
handled by the caller since most callers generate scheduler messages
on success (see comment above) but we need to signal that this was
not successful.
Can only trigger if spill to disk is enabled and the task is not an
actor.
"""
if ts.key in self.data:
ts.state = "memory"
return {}
recommendations: Recs = {}
if ts.key in self.actors:
self.actors[ts.key] = value
else:
start = time()
self.data[ts.key] = value
stop = time()
if stop - start > 0.020:
ts.startstops.append(
{"action": "disk-write", "start": start, "stop": stop}
)
ts.state = "memory"
if ts.nbytes is None:
ts.nbytes = sizeof(value)
ts.type = type(value)
for dep in ts.dependents:
dep.waiting_for_data.discard(ts)
if not dep.waiting_for_data and dep.state == "waiting":
self.waiting_for_data_count -= 1
recommendations[dep] = "ready"
self.log.append((ts.key, "put-in-memory", stimulus_id, time()))
return recommendations
def select_keys_for_gather(self, worker, dep):
assert isinstance(dep, str)
deps = {dep}
total_bytes = self.tasks[dep].get_nbytes()
L = self.pending_data_per_worker[worker]
while L:
ts = L.pop()
if ts.state != "fetch":
continue
if total_bytes + ts.get_nbytes() > self.target_message_size:
break
deps.add(ts.key)
total_bytes += ts.get_nbytes()
return deps, total_bytes
@property
def total_comm_bytes(self):
warnings.warn(
"The attribute `Worker.total_comm_bytes` has been renamed to `comm_threshold_bytes`. "
"Future versions will only support the new name.",
FutureWarning,
)
return self.comm_threshold_bytes
def _filter_deps_for_fetch(
self, to_gather_keys: Iterable[str]
) -> tuple[set[str], set[str], TaskState | None]:
"""Filter a list of keys before scheduling coroutines to fetch data from workers.
Returns
-------
in_flight_keys:
The subset of keys in to_gather_keys in state `flight` or `resumed`
cancelled_keys:
The subset of tasks in to_gather_keys in state `cancelled` or `memory`
cause:
The task to attach startstops of this transfer to
"""
in_flight_tasks: set[TaskState] = set()
cancelled_keys: set[str] = set()
for key in to_gather_keys:
ts = self.tasks.get(key)
if ts is None:
continue
# At this point, a task has been transitioned fetch->flight
# flight is only allowed to be transitioned into
# {memory, resumed, cancelled}
# resumed and cancelled will block any further transition until this
# coro has been finished
if ts.state in ("flight", "resumed"):
in_flight_tasks.add(ts)
# If the key is already in memory, the fetch should not happen which
# is signalled by the cancelled_keys
elif ts.state in {"cancelled", "memory"}:
cancelled_keys.add(key)
else:
raise RuntimeError(
f"Task {ts.key} found in illegal state {ts.state}. "
"Only states `flight`, `resumed` and `cancelled` possible."
)
# For diagnostics we want to attach the transfer to a single task. this
# task is typically the next to be executed but since we're fetching
# tasks for potentially many dependents, an exact match is not possible.
# If there are no dependents, this is a pure replica fetch
cause = None
for ts in in_flight_tasks:
if ts.dependents:
cause = next(iter(ts.dependents))
break
else:
cause = ts
in_flight_keys = {ts.key for ts in in_flight_tasks}
return in_flight_keys, cancelled_keys, cause
def _update_metrics_received_data(
self, start: float, stop: float, data: dict, cause: TaskState, worker: str
) -> None:
total_bytes = sum(self.tasks[key].get_nbytes() for key in data)
cause.startstops.append(
{
"action": "transfer",
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"source": worker,
}
)
duration = (stop - start) or 0.010
bandwidth = total_bytes / duration
self.incoming_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2.0 + self.scheduler_delay,
"duration": duration,
"keys": {key: self.tasks[key].nbytes for key in data},
"total": total_bytes,
"bandwidth": bandwidth,
"who": worker,
}
)
if total_bytes > 1_000_000:
self.bandwidth = self.bandwidth * 0.95 + bandwidth * 0.05
bw, cnt = self.bandwidth_workers[worker]
self.bandwidth_workers[worker] = (bw + bandwidth, cnt + 1)
types = set(map(type, data.values()))
if len(types) == 1:
[typ] = types
bw, cnt = self.bandwidth_types[typ]
self.bandwidth_types[typ] = (bw + bandwidth, cnt + 1)
if self.digests is not None:
self.digests["transfer-bandwidth"].add(total_bytes / duration)
self.digests["transfer-duration"].add(duration)
self.counters["transfer-count"].add(len(data))
self.incoming_count += 1
async def gather_dep(
self,
worker: str,
to_gather: Iterable[str],
total_nbytes: int,
*,
stimulus_id: str,
) -> None:
"""Gather dependencies for a task from a worker who has them
Parameters
----------
worker : str
Address of worker to gather dependencies from
to_gather : list
Keys of dependencies to gather from worker -- this is not
necessarily equivalent to the full list of dependencies of ``dep``
as some dependencies may already be present on this worker.
total_nbytes : int
Total number of bytes for all the dependencies in to_gather combined
"""
if self.status not in Status.ANY_RUNNING: # type: ignore
return
recommendations: Recs = {}
with log_errors():
response = {}
to_gather_keys: set[str] = set()
cancelled_keys: set[str] = set()
try:
to_gather_keys, cancelled_keys, cause = self._filter_deps_for_fetch(
to_gather
)
if not to_gather_keys:
self.log.append(
("nothing-to-gather", worker, to_gather, stimulus_id, time())
)
return
assert cause
# Keep namespace clean since this func is long and has many
# dep*, *ts* variables
del to_gather
self.log.append(
("request-dep", worker, to_gather_keys, stimulus_id, time())
)
logger.debug(
"Request %d keys for task %s from %s",
len(to_gather_keys),
cause,
worker,
)
start = time()
response = await get_data_from_worker(
self.rpc, to_gather_keys, worker, who=self.address
)
stop = time()
if response["status"] == "busy":
return
self._update_metrics_received_data(
start=start,
stop=stop,
data=response["data"],
cause=cause,
worker=worker,
)
self.log.append(
("receive-dep", worker, set(response["data"]), stimulus_id, time())
)
except OSError:
logger.exception("Worker stream died during communication: %s", worker)
has_what = self.has_what.pop(worker)
self.pending_data_per_worker.pop(worker)
self.log.append(
("receive-dep-failed", worker, has_what, stimulus_id, time())
)
for d in has_what:
ts = self.tasks[d]
ts.who_has.remove(worker)
except Exception as e:
logger.exception(e)
if self.batched_stream and LOG_PDB:
import pdb
pdb.set_trace()
msg = error_message(e)
for k in self.in_flight_workers[worker]:
ts = self.tasks[k]
recommendations[ts] = tuple(msg.values())
raise
finally:
self.comm_nbytes -= total_nbytes
busy = response.get("status", "") == "busy"
data = response.get("data", {})
if busy:
self.log.append(
("busy-gather", worker, to_gather_keys, stimulus_id, time())
)
for d in self.in_flight_workers.pop(worker):
ts = self.tasks[d]
ts.done = True
if d in cancelled_keys:
if ts.state == "cancelled":
recommendations[ts] = "released"
else:
recommendations[ts] = "fetch"
elif d in data:
recommendations[ts] = ("memory", data[d])
elif busy:
recommendations[ts] = "fetch"
elif ts not in recommendations:
ts.who_has.discard(worker)
self.has_what[worker].discard(ts.key)
self.log.append((d, "missing-dep", stimulus_id, time()))
self.batched_stream.send(
{"op": "missing-data", "errant_worker": worker, "key": d}
)
recommendations[ts] = "fetch" if ts.who_has else "missing"
del data, response
self.transitions(recommendations, stimulus_id=stimulus_id)
self.ensure_computing()
if not busy:
self.repetitively_busy = 0
else:
# Exponential backoff to avoid hammering scheduler/worker
self.repetitively_busy += 1
await asyncio.sleep(0.100 * 1.5**self.repetitively_busy)
await self.query_who_has(*to_gather_keys)
self.ensure_communicating()
async def find_missing(self) -> None:
with log_errors():
if not self._missing_dep_flight:
return
try:
if self.validate:
for ts in self._missing_dep_flight:
assert not ts.who_has
stimulus_id = f"find-missing-{time()}"
who_has = await retry_operation(
self.scheduler.who_has,
keys=[ts.key for ts in self._missing_dep_flight],
)
who_has = {k: v for k, v in who_has.items() if v}
self.update_who_has(who_has)
recommendations: Recs = {}
for ts in self._missing_dep_flight:
if ts.who_has:
recommendations[ts] = "fetch"
self.transitions(recommendations, stimulus_id=stimulus_id)
finally:
# This is quite arbitrary but the heartbeat has scaling implemented
self.periodic_callbacks[
"find-missing"
].callback_time = self.periodic_callbacks["heartbeat"].callback_time
self.ensure_communicating()
self.ensure_computing()
async def query_who_has(self, *deps: str) -> dict[str, Collection[str]]:
with log_errors():
who_has = await retry_operation(self.scheduler.who_has, keys=deps)
self.update_who_has(who_has)
return who_has
def update_who_has(self, who_has: dict[str, Collection[str]]) -> None:
try:
for dep, workers in who_has.items():
if not workers:
continue
if dep in self.tasks:
dep_ts = self.tasks[dep]
if self.address in workers and self.tasks[dep].state != "memory":
logger.debug(
"Scheduler claims worker %s holds data for task %s which is not true.",
self.name,
dep,
)
# Do not mutate the input dict. That's rude
workers = set(workers) - {self.address}
dep_ts.who_has.update(workers)
for worker in workers:
self.has_what[worker].add(dep)
self.pending_data_per_worker[worker].push(dep_ts)
except Exception as e: # pragma: no cover
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def handle_steal_request(self, key: str, stimulus_id: str) -> None:
# There may be a race condition between stealing and releasing a task.
# In this case the self.tasks is already cleared. The `None` will be
# registered as `already-computing` on the other end
ts = self.tasks.get(key)
state = ts.state if ts is not None else None
response = {
"op": "steal-response",
"key": key,
"state": state,
"stimulus_id": stimulus_id,
}
self.batched_stream.send(response)
if state in READY | {"waiting"}:
assert ts
# If task is marked as "constrained" we haven't yet assigned it an
# `available_resources` to run on, that happens in
# `transition_constrained_executing`
self.transition(ts, "released", stimulus_id=stimulus_id)
def handle_worker_status_change(self, status: str) -> None:
new_status = Status.lookup[status] # type: ignore
if (
new_status == Status.closing_gracefully
and self._status not in Status.ANY_RUNNING # type: ignore
):
logger.error(
"Invalid Worker.status transition: %s -> %s", self._status, new_status
)
# Reiterate the current status to the scheduler to restore sync
self._send_worker_status_change()
else:
# Update status and send confirmation to the Scheduler (see status.setter)
self.status = new_status
def release_key(
self,
key: str,
cause: TaskState | None = None,
report: bool = True,
*,
stimulus_id: str,
) -> None:
try:
if self.validate:
assert not isinstance(key, TaskState)
ts = self.tasks[key]
# needed for legacy notification support
state_before = ts.state
ts.state = "released"
logger.debug(
"Release key %s",
{"key": key, "cause": cause, "stimulus_id": stimulus_id},
)
if cause:
self.log.append(
(key, "release-key", {"cause": cause}, stimulus_id, time())
)
else:
self.log.append((key, "release-key", stimulus_id, time()))
if key in self.data:
try:
del self.data[key]
except FileNotFoundError:
logger.error("Tried to delete %s but no file found", exc_info=True)
if key in self.actors:
del self.actors[key]
for worker in ts.who_has:
self.has_what[worker].discard(ts.key)
ts.who_has.clear()
if key in self.threads:
del self.threads[key]
if ts.resource_restrictions is not None:
if ts.state == "executing":
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
for d in ts.dependencies:
ts.waiting_for_data.discard(d)
d.waiters.discard(ts)
ts.waiting_for_data.clear()
ts.nbytes = None
ts._previous = None
ts._next = None
ts.done = False
self._executing.discard(ts)
self._in_flight_tasks.discard(ts)
self._notify_plugins(
"release_key", key, state_before, cause, stimulus_id, report
)
except CommClosedError:
# Batched stream send might raise if it was already closed
pass
except Exception as e: # pragma: no cover
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
################
# Execute Task #
################
def run(self, comm, function, args=(), wait=True, kwargs=None):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
def run_coroutine(self, comm, function, args=(), kwargs=None, wait=True):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
async def plugin_add(
self,
plugin: WorkerPlugin | bytes,
name: str | None = None,
catch_errors: bool = True,
) -> dict[str, Any]:
with log_errors(pdb=False):
if isinstance(plugin, bytes):
# Note: historically we have accepted duck-typed classes that don't
# inherit from WorkerPlugin. Don't do `assert isinstance`.
plugin = cast("WorkerPlugin", pickle.loads(plugin))
if name is None:
name = _get_plugin_name(plugin)
assert name
if name in self.plugins:
await self.plugin_remove(name=name)
self.plugins[name] = plugin
logger.info("Starting Worker plugin %s" % name)
if hasattr(plugin, "setup"):
try:
result = plugin.setup(worker=self)
if isawaitable(result):
result = await result
except Exception as e:
if not catch_errors:
raise
msg = error_message(e)
return msg
return {"status": "OK"}
async def plugin_remove(self, name: str) -> dict[str, Any]:
with log_errors(pdb=False):
logger.info(f"Removing Worker plugin {name}")
try:
plugin = self.plugins.pop(name)
if hasattr(plugin, "teardown"):
result = plugin.teardown(worker=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
return {"status": "OK"}
async def actor_execute(
self,
actor=None,
function=None,
args=(),
kwargs: dict | None = None,
) -> dict[str, Any]:
kwargs = kwargs or {}
separate_thread = kwargs.pop("separate_thread", True)
key = actor
actor = self.actors[key]
func = getattr(actor, function)
name = key_split(key) + "." + function
try:
if iscoroutinefunction(func):
result = await func(*args, **kwargs)
elif separate_thread:
result = await self.loop.run_in_executor(
self.executors["actor"],
apply_function_actor,
func,
args,
kwargs,
self.execution_state,
name,
self.active_threads,
self.active_threads_lock,
)
else:
result = func(*args, **kwargs)
return {"status": "OK", "result": to_serialize(result)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
def actor_attribute(self, actor=None, attribute=None) -> dict[str, Any]:
try:
value = getattr(self.actors[actor], attribute)
return {"status": "OK", "result": to_serialize(value)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
def meets_resource_constraints(self, key: str) -> bool:
ts = self.tasks[key]
if not ts.resource_restrictions:
return True
for resource, needed in ts.resource_restrictions.items():
if self.available_resources[resource] < needed:
return False
return True
async def _maybe_deserialize_task(
self, ts: TaskState, *, stimulus_id: str
) -> tuple[Callable, tuple, dict[str, Any]] | None:
if ts.run_spec is None:
return None
try:
start = time()
# Offload deserializing large tasks
if sizeof(ts.run_spec) > OFFLOAD_THRESHOLD:
function, args, kwargs = await offload(_deserialize, *ts.run_spec)
else:
function, args, kwargs = _deserialize(*ts.run_spec)
stop = time()
if stop - start > 0.010:
ts.startstops.append(
{"action": "deserialize", "start": start, "stop": stop}
)
return function, args, kwargs
except Exception as e:
logger.error("Could not deserialize task", exc_info=True)
self.log.append((ts.key, "deserialize-error", stimulus_id, time()))
emsg = error_message(e)
emsg.pop("status")
self.transition(
ts,
"error",
**emsg,
stimulus_id=stimulus_id,
)
raise
def ensure_computing(self) -> None:
if self.status in (Status.paused, Status.closing_gracefully):
return
try:
stimulus_id = f"ensure-computing-{time()}"
while self.constrained and self.executing_count < self.nthreads:
key = self.constrained[0]
ts = self.tasks.get(key, None)
if ts is None or ts.state != "constrained":
self.constrained.popleft()
continue
if self.meets_resource_constraints(key):
self.constrained.popleft()
self.transition(ts, "executing", stimulus_id=stimulus_id)
else:
break
while self.ready and self.executing_count < self.nthreads:
priority, key = heapq.heappop(self.ready)
ts = self.tasks.get(key)
if ts is None:
# It is possible for tasks to be released while still remaining on
# `ready` The scheduler might have re-routed to a new worker and
# told this worker to release. If the task has "disappeared" just
# continue through the heap
continue
elif ts.key in self.data:
self.transition(ts, "memory", stimulus_id=stimulus_id)
elif ts.state in READY:
self.transition(ts, "executing", stimulus_id=stimulus_id)
except Exception as e: # pragma: no cover
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
async def execute(self, key: str, *, stimulus_id: str) -> None:
if self.status in {Status.closing, Status.closed, Status.closing_gracefully}:
return
if key not in self.tasks:
return
ts = self.tasks[key]
try:
if ts.state == "cancelled":
# This might happen if keys are canceled
logger.debug(
"Trying to execute task %s which is not in executing state anymore",
ts,
)
ts.done = True
self.transition(ts, "released", stimulus_id=stimulus_id)
return
if self.validate:
assert not ts.waiting_for_data
assert ts.state == "executing"
assert ts.run_spec is not None
function, args, kwargs = await self._maybe_deserialize_task( # type: ignore
ts, stimulus_id=stimulus_id
)
args2, kwargs2 = self._prepare_args_for_execution(ts, args, kwargs)
if ts.annotations is not None and "executor" in ts.annotations:
executor = ts.annotations["executor"]
else:
executor = "default"
assert executor in self.executors
assert key == ts.key
self.active_keys.add(ts.key)
result: dict
try:
e = self.executors[executor]
ts.start_time = time()
if iscoroutinefunction(function):
result = await apply_function_async(
function,
args2,
kwargs2,
self.scheduler_delay,
)
elif "ThreadPoolExecutor" in str(type(e)):
result = await self.loop.run_in_executor(
e,
apply_function,
function,
args2,
kwargs2,
self.execution_state,
ts.key,
self.active_threads,
self.active_threads_lock,
self.scheduler_delay,
)
else:
result = await self.loop.run_in_executor(
e,
apply_function_simple,
function,
args2,
kwargs2,
self.scheduler_delay,
)
finally:
self.active_keys.discard(ts.key)
key = ts.key
# key *must* be still in tasks. Releasing it directly is forbidden
# without going through cancelled
ts = self.tasks.get(key) # type: ignore
assert ts, self.story(key)
ts.done = True
result["key"] = ts.key
value = result.pop("result", None)
ts.startstops.append(
{"action": "compute", "start": result["start"], "stop": result["stop"]}
)
self.threads[ts.key] = result["thread"]
recommendations: Recs = {}
if result["op"] == "task-finished":
ts.nbytes = result["nbytes"]
ts.type = result["type"]
recommendations[ts] = ("memory", value)
if self.digests is not None:
self.digests["task-duration"].add(result["stop"] - result["start"])
elif isinstance(result.pop("actual-exception"), Reschedule):
recommendations[ts] = "rescheduled"
else:
logger.warning(
"Compute Failed\n"
"Function: %s\n"
"args: %s\n"
"kwargs: %s\n"
"Exception: %r\n",
str(funcname(function))[:1000],
convert_args_to_str(args2, max_len=1000),
convert_kwargs_to_str(kwargs2, max_len=1000),
result["exception_text"],
)
recommendations[ts] = (
"error",
result["exception"],
result["traceback"],
result["exception_text"],
result["traceback_text"],
)
self.transitions(recommendations, stimulus_id=stimulus_id)
logger.debug("Send compute response to scheduler: %s, %s", ts.key, result)
if self.validate:
assert ts.state != "executing"
assert not ts.waiting_for_data
except Exception as exc:
assert ts
logger.error(
"Exception during execution of task %s.", ts.key, exc_info=True
)
emsg = error_message(exc)
emsg.pop("status")
self.transition(
ts,
"error",
**emsg,
stimulus_id=stimulus_id,
)
finally:
self.ensure_computing()
self.ensure_communicating()
def _prepare_args_for_execution(
self, ts: TaskState, args: tuple, kwargs: dict[str, Any]
) -> tuple[tuple, dict[str, Any]]:
start = time()
data = {}
for dep in ts.dependencies:
k = dep.key
try:
data[k] = self.data[k]
except KeyError:
from .actor import Actor # TODO: create local actor
data[k] = Actor(type(self.actors[k]), self.address, k, self)
args2 = pack_data(args, data, key_types=(bytes, str))
kwargs2 = pack_data(kwargs, data, key_types=(bytes, str))
stop = time()
if stop - start > 0.005:
ts.startstops.append({"action": "disk-read", "start": start, "stop": stop})
if self.digests is not None:
self.digests["disk-load-duration"].add(stop - start)
return args2, kwargs2
##################
# Administrative #
##################
async def memory_monitor(self) -> None:
"""Track this process's memory usage and act accordingly
If we rise above 70% memory use, start dumping data to disk.
If we rise above 80% memory use, stop execution of new tasks
"""
if self._memory_monitoring:
return
self._memory_monitoring = True
assert self.memory_limit
total = 0
memory = self.monitor.get_process_memory()
frac = memory / self.memory_limit
def check_pause(memory):
frac = memory / self.memory_limit
# Pause worker threads if above 80% memory use
if self.memory_pause_fraction and frac > self.memory_pause_fraction:
# Try to free some memory while in paused state
self._throttled_gc.collect()
if self.status == Status.running:
logger.warning(
"Worker is at %d%% memory usage. Pausing worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.status = Status.paused
elif self.status == Status.paused:
logger.warning(
"Worker is at %d%% memory usage. Resuming worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.status = Status.running
self.ensure_computing()
self.ensure_communicating()
check_pause(memory)
# Dump data to disk if above 70%
if self.memory_spill_fraction and frac > self.memory_spill_fraction:
from .spill import SpillBuffer
assert isinstance(self.data, SpillBuffer)
logger.debug(
"Worker is at %.0f%% memory usage. Start spilling data to disk.",
frac * 100,
)
# Implement hysteresis cycle where spilling starts at the spill threshold
# and stops at the target threshold. Normally that here the target threshold
# defines process memory, whereas normally it defines reported managed
# memory (e.g. output of sizeof() ).
# If target=False, disable hysteresis.
target = self.memory_limit * (
self.memory_target_fraction or self.memory_spill_fraction
)
count = 0
need = memory - target
while memory > target:
if not self.data.fast:
logger.warning(
"Unmanaged memory use is high. This may indicate a memory leak "
"or the memory may not be released to the OS; see "
"https://distributed.dask.org/en/latest/worker.html#memtrim "
"for more information. "
"-- Unmanaged memory: %s -- Worker memory limit: %s",
format_bytes(memory),
format_bytes(self.memory_limit),
)
break
weight = self.data.evict()
if weight == -1:
# Failed to evict:
# disk full, spill size limit exceeded, or pickle error
break
total += weight
count += 1
await asyncio.sleep(0)
memory = self.monitor.get_process_memory()
if total > need and memory > target:
# Issue a GC to ensure that the evicted data is actually
# freed from memory and taken into account by the monitor
# before trying to evict even more data.
self._throttled_gc.collect()
memory = self.monitor.get_process_memory()
check_pause(memory)
if count:
logger.debug(
"Moved %d tasks worth %s to disk",
count,
format_bytes(total),
)
self._memory_monitoring = False
def cycle_profile(self) -> None:
now = time() + self.scheduler_delay
prof, self.profile_recent = self.profile_recent, profile.create()
self.profile_history.append((now, prof))
self.profile_keys_history.append((now, dict(self.profile_keys)))
self.profile_keys.clear()
def trigger_profile(self) -> None:
"""
Get a frame from all actively computing threads
Merge these frames into existing profile counts
"""
if not self.active_threads: # hope that this is thread-atomic?
return
start = time()
with self.active_threads_lock:
active_threads = self.active_threads.copy()
frames = sys._current_frames()
frames = {ident: frames[ident] for ident in active_threads}
llframes = {}
if self.low_level_profiler:
llframes = {ident: profile.ll_get_stack(ident) for ident in active_threads}
for ident, frame in frames.items():
if frame is not None:
key = key_split(active_threads[ident])
llframe = llframes.get(ident)
state = profile.process(
frame, True, self.profile_recent, stop="distributed/worker.py"
)
profile.llprocess(llframe, None, state)
profile.process(
frame, True, self.profile_keys[key], stop="distributed/worker.py"
)
stop = time()
if self.digests is not None:
self.digests["profile-duration"].add(stop - start)
async def get_profile(
self,
start=None,
stop=None,
key=None,
server: bool = False,
):
now = time() + self.scheduler_delay
if server:
history = self.io_loop.profile
elif key is None:
history = self.profile_history
else:
history = [(t, d[key]) for t, d in self.profile_keys_history if key in d]
if start is None:
istart = 0
else:
istart = bisect.bisect_left(history, (start,))
if stop is None:
istop = None
else:
istop = bisect.bisect_right(history, (stop,)) + 1
if istop >= len(history):
istop = None # include end
if istart == 0 and istop is None:
history = list(history)
else:
iistop = len(history) if istop is None else istop
history = [history[i] for i in range(istart, iistop)]
prof = profile.merge(*pluck(1, history))
if not history:
return profile.create()
if istop is None and (start is None or start < now):
if key is None:
recent = self.profile_recent
else:
recent = self.profile_keys[key]
prof = profile.merge(prof, recent)
return prof
async def get_profile_metadata(
self, start: float = 0, stop: float | None = None
) -> dict[str, Any]:
add_recent = stop is None
now = time() + self.scheduler_delay
stop = stop or now
result = {
"counts": [
(t, d["count"]) for t, d in self.profile_history if start < t < stop
],
"keys": [
(t, {k: d["count"] for k, d in v.items()})
for t, v in self.profile_keys_history
if start < t < stop
],
}
if add_recent:
result["counts"].append((now, self.profile_recent["count"]))
result["keys"].append(
(now, {k: v["count"] for k, v in self.profile_keys.items()})
)
return result
def get_call_stack(self, keys: Collection[str] | None = None) -> dict[str, Any]:
with self.active_threads_lock:
sys_frames = sys._current_frames()
frames = {key: sys_frames[tid] for tid, key in self.active_threads.items()}
if keys is not None:
frames = {key: frames[key] for key in keys if key in frames}
return {key: profile.call_stack(frame) for key, frame in frames.items()}
def _notify_plugins(self, method_name, *args, **kwargs):
for name, plugin in self.plugins.items():
if hasattr(plugin, method_name):
if method_name == "release_key":
warnings.warn(
"The `WorkerPlugin.release_key` hook is deprecated and will be "
"removed in a future version. A similar event can now be "
"caught by filtering for a `finish=='released'` event in the "
"`WorkerPlugin.transition` hook.",
FutureWarning,
)
try:
getattr(plugin, method_name)(*args, **kwargs)
except Exception:
logger.info(
"Plugin '%s' failed with exception", name, exc_info=True
)
##############
# Validation #
##############
def validate_task_memory(self, ts):
assert ts.key in self.data or ts.key in self.actors
assert isinstance(ts.nbytes, int)
assert not ts.waiting_for_data
assert ts.key not in self.ready
assert ts.state == "memory"
def validate_task_executing(self, ts):
assert ts.state == "executing"
assert ts.run_spec is not None
assert ts.key not in self.data
assert not ts.waiting_for_data
for dep in ts.dependencies:
assert dep.state == "memory", self.story(dep)
assert dep.key in self.data or dep.key in self.actors
def validate_task_ready(self, ts):
assert ts.key in pluck(1, self.ready)
assert ts.key not in self.data
assert ts.state != "executing"
assert not ts.done
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors for dep in ts.dependencies
)
def validate_task_waiting(self, ts):
assert ts.key not in self.data
assert ts.state == "waiting"
assert not ts.done
if ts.dependencies and ts.run_spec:
assert not all(dep.key in self.data for dep in ts.dependencies)
def validate_task_flight(self, ts):
assert ts.key not in self.data
assert ts in self._in_flight_tasks
assert not any(dep.key in self.ready for dep in ts.dependents)
assert ts.coming_from
assert ts.coming_from in self.in_flight_workers
assert ts.key in self.in_flight_workers[ts.coming_from]
def validate_task_fetch(self, ts):
assert ts.key not in self.data
assert self.address not in ts.who_has
assert not ts.done
assert ts in self.data_needed
assert ts.who_has
for w in ts.who_has:
assert ts.key in self.has_what[w]
assert ts in self.pending_data_per_worker[w]
def validate_task_missing(self, ts):
assert ts.key not in self.data
assert not ts.who_has
assert not ts.done
assert not any(ts.key in has_what for has_what in self.has_what.values())
assert ts in self._missing_dep_flight
def validate_task_cancelled(self, ts):
assert ts.key not in self.data
assert ts._previous
assert ts._next
def validate_task_resumed(self, ts):
assert ts.key not in self.data
assert ts._next
assert ts._previous
def validate_task_released(self, ts):
assert ts.key not in self.data
assert not ts._next
assert not ts._previous
assert ts not in self._executing
assert ts not in self._in_flight_tasks
assert ts not in self._missing_dep_flight
assert ts not in self._missing_dep_flight
assert not any(ts.key in has_what for has_what in self.has_what.values())
assert not ts.waiting_for_data
assert not ts.done
assert not ts.exception
assert not ts.traceback
def validate_task(self, ts):
try:
if ts.key in self.tasks:
assert self.tasks[ts.key] == ts
if ts.state == "memory":
self.validate_task_memory(ts)
elif ts.state == "waiting":
self.validate_task_waiting(ts)
elif ts.state == "missing":
self.validate_task_missing(ts)
elif ts.state == "cancelled":
self.validate_task_cancelled(ts)
elif ts.state == "resumed":
self.validate_task_resumed(ts)
elif ts.state == "ready":
self.validate_task_ready(ts)
elif ts.state == "executing":
self.validate_task_executing(ts)
elif ts.state == "flight":
self.validate_task_flight(ts)
elif ts.state == "fetch":
self.validate_task_fetch(ts)
elif ts.state == "released":
self.validate_task_released(ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise AssertionError(
f"Invalid TaskState encountered for {ts!r}.\nStory:\n{self.story(ts)}\n"
) from e
def validate_state(self):
if self.status not in Status.ANY_RUNNING:
return
try:
assert self.executing_count >= 0
waiting_for_data_count = 0
for ts in self.tasks.values():
assert ts.state is not None
# check that worker has task
for worker in ts.who_has:
assert ts.key in self.has_what[worker]
# check that deps have a set state and that dependency<->dependent links
# are there
for dep in ts.dependencies:
# self.tasks was just a dict of tasks
# and this check was originally that the key was in `task_state`
# so we may have popped the key out of `self.tasks` but the
# dependency can still be in `memory` before GC grabs it...?
# Might need better bookkeeping
assert dep.state is not None
assert ts in dep.dependents, ts
if ts.waiting_for_data:
waiting_for_data_count += 1
for ts_wait in ts.waiting_for_data:
assert ts_wait.key in self.tasks
assert (
ts_wait.state
in READY | {"executing", "flight", "fetch", "missing"}
or ts_wait in self._missing_dep_flight
or ts_wait.who_has.issubset(self.in_flight_workers)
), (ts, ts_wait, self.story(ts), self.story(ts_wait))
assert self.waiting_for_data_count == waiting_for_data_count
for worker, keys in self.has_what.items():
for k in keys:
assert worker in self.tasks[k].who_has
for ts in self.tasks.values():
self.validate_task(ts)
except Exception as e:
self.loop.add_callback(self.close)
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
#######################################
# Worker Clients (advanced workloads) #
#######################################
@property
def client(self) -> Client:
with self._lock:
if self._client:
return self._client
else:
return self._get_client()
def _get_client(self, timeout: float | None = None) -> Client:
"""Get local client attached to this worker
If no such client exists, create one
See Also
--------
get_client
"""
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
try:
from .client import default_client
client = default_client()
except ValueError: # no clients found, need to make a new one
pass
else:
# must be lazy import otherwise cyclic import
from distributed.deploy.cluster import Cluster
if (
client.scheduler
and client.scheduler.address == self.scheduler.address
# The below conditions should only happen in case a second
# cluster is alive, e.g. if a submitted task spawned its onwn
# LocalCluster, see gh4565
or (
isinstance(client._start_arg, str)
and client._start_arg == self.scheduler.address
or isinstance(client._start_arg, Cluster)
and client._start_arg.scheduler_address == self.scheduler.address
)
):
self._client = client
if not self._client:
from .client import Client
asynchronous = in_async_call(self.loop)
self._client = Client(
self.scheduler,
loop=self.loop,
security=self.security,
set_as_default=True,
asynchronous=asynchronous,
direct_to_workers=True,
name="worker",
timeout=timeout,
)
Worker._initialized_clients.add(self._client)
if not asynchronous:
assert self._client.status == "running"
return self._client
def get_current_task(self) -> str:
"""Get the key of the task we are currently running
This only makes sense to run within a task
Examples
--------
>>> from dask.distributed import get_worker
>>> def f():
... return get_worker().get_current_task()
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'f-1234'
See Also
--------
get_worker
"""
return self.active_threads[threading.get_ident()]
def get_worker() -> Worker:
"""Get the worker currently running this task
Examples
--------
>>> def f():
... worker = get_worker() # The worker on which this task is running
... return worker.address
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'tcp://127.0.0.1:47373'
See Also
--------
get_client
worker_client
"""
try:
return thread_state.execution_state["worker"]
except AttributeError:
try:
return first(
w
for w in Worker._instances
if w.status in Status.ANY_RUNNING # type: ignore
)
except StopIteration:
raise ValueError("No workers found")
def get_client(address=None, timeout=None, resolve_address=True) -> Client:
"""Get a client while within a task.
This client connects to the same scheduler to which the worker is connected
Parameters
----------
address : str, optional
The address of the scheduler to connect to. Defaults to the scheduler
the worker is connected to.
timeout : int or str
Timeout (in seconds) for getting the Client. Defaults to the
``distributed.comm.timeouts.connect`` configuration value.
resolve_address : bool, default True
Whether to resolve `address` to its canonical form.
Returns
-------
Client
Examples
--------
>>> def f():
... client = get_client(timeout="10s")
... futures = client.map(lambda x: x + 1, range(10)) # spawn many tasks
... results = client.gather(futures)
... return sum(results)
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
55
See Also
--------
get_worker
worker_client
secede
"""
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
if address and resolve_address:
address = comm.resolve_address(address)
try:
worker = get_worker()
except ValueError: # could not find worker
pass
else:
if not address or worker.scheduler.address == address:
return worker._get_client(timeout=timeout)
from .client import Client
try:
client = Client.current() # TODO: assumes the same scheduler
except ValueError:
client = None
if client and (not address or client.scheduler.address == address):
return client
elif address:
return Client(address, timeout=timeout)
else:
raise ValueError("No global client found and no address provided")
def secede():
"""
Have this task secede from the worker's thread pool
This opens up a new scheduling slot and a new thread for a new task. This
enables the client to schedule tasks on this node, which is
especially useful while waiting for other jobs to finish (e.g., with
``client.gather``).
Examples
--------
>>> def mytask(x):
... # do some work
... client = get_client()
... futures = client.map(...) # do some remote work
... secede() # while that work happens, remove ourself from the pool
... return client.gather(futures) # return gathered results
See Also
--------
get_client
get_worker
"""
worker = get_worker()
tpe_secede() # have this thread secede from the thread pool
duration = time() - thread_state.start_time
worker.loop.add_callback(
worker.maybe_transition_long_running,
worker.tasks[thread_state.key],
compute_duration=duration,
stimulus_id=f"secede-{thread_state.key}-{time()}",
)
class Reschedule(Exception):
"""Reschedule this task
Raising this exception will stop the current execution of the task and ask
the scheduler to reschedule this task, possibly on a different machine.
This does not guarantee that the task will move onto a different machine.
The scheduler will proceed through its normal heuristics to determine the
optimal machine to accept this task. The machine will likely change if the
load across the cluster has significantly changed since first scheduling
the task.
"""
def parse_memory_limit(memory_limit, nthreads, total_cores=CPU_COUNT) -> int | None:
if memory_limit is None:
return None
if memory_limit == "auto":
memory_limit = int(system.MEMORY_LIMIT * min(1, nthreads / total_cores))
with suppress(ValueError, TypeError):
memory_limit = float(memory_limit)
if isinstance(memory_limit, float) and memory_limit <= 1:
memory_limit = int(memory_limit * system.MEMORY_LIMIT)
if isinstance(memory_limit, str):
memory_limit = parse_bytes(memory_limit)
else:
memory_limit = int(memory_limit)
return min(memory_limit, system.MEMORY_LIMIT)
async def get_data_from_worker(
rpc,
keys,
worker,
who=None,
max_connections=None,
serializers=None,
deserializers=None,
):
"""Get keys from worker
The worker has a two step handshake to acknowledge when data has been fully
delivered. This function implements that handshake.
See Also
--------
Worker.get_data
Worker.gather_dep
utils_comm.gather_data_from_workers
"""
if serializers is None:
serializers = rpc.serializers
if deserializers is None:
deserializers = rpc.deserializers
async def _get_data():
comm = await rpc.connect(worker)
comm.name = "Ephemeral Worker->Worker for gather"
try:
response = await send_recv(
comm,
serializers=serializers,
deserializers=deserializers,
op="get_data",
keys=keys,
who=who,
max_connections=max_connections,
)
try:
status = response["status"]
except KeyError: # pragma: no cover
raise ValueError("Unexpected response", response)
else:
if status == "OK":
await comm.write("OK")
return response
finally:
rpc.reuse(worker, comm)
return await retry_operation(_get_data, operation="get_data_from_worker")
job_counter = [0]
cache_loads = LRU(maxsize=100)
def loads_function(bytes_object):
"""Load a function from bytes, cache bytes"""
if len(bytes_object) < 100000:
try:
result = cache_loads[bytes_object]
except KeyError:
result = pickle.loads(bytes_object)
cache_loads[bytes_object] = result
return result
return pickle.loads(bytes_object)
def _deserialize(function=None, args=None, kwargs=None, task=no_value):
"""Deserialize task inputs and regularize to func, args, kwargs"""
if function is not None:
function = loads_function(function)
if args and isinstance(args, bytes):
args = pickle.loads(args)
if kwargs and isinstance(kwargs, bytes):
kwargs = pickle.loads(kwargs)
if task is not no_value:
assert not function and not args and not kwargs
function = execute_task
args = (task,)
return function, args or (), kwargs or {}
def execute_task(task):
"""Evaluate a nested task
>>> inc = lambda x: x + 1
>>> execute_task((inc, 1))
2
>>> execute_task((sum, [1, 2, (inc, 3)]))
7
"""
if istask(task):
func, args = task[0], task[1:]
return func(*map(execute_task, args))
elif isinstance(task, list):
return list(map(execute_task, task))
else:
return task
cache_dumps = LRU(maxsize=100)
_cache_lock = threading.Lock()
def dumps_function(func) -> bytes:
"""Dump a function to bytes, cache functions"""
try:
with _cache_lock:
result = cache_dumps[func]
except KeyError:
result = pickle.dumps(func, protocol=4)
if len(result) < 100000:
with _cache_lock:
cache_dumps[func] = result
except TypeError: # Unhashable function
result = pickle.dumps(func, protocol=4)
return result
def dumps_task(task):
"""Serialize a dask task
Returns a dict of bytestrings that can each be loaded with ``loads``
Examples
--------
Either returns a task as a function, args, kwargs dict
>>> from operator import add
>>> dumps_task((add, 1)) # doctest: +SKIP
{'function': b'\x80\x04\x95\x00\x8c\t_operator\x94\x8c\x03add\x94\x93\x94.'
'args': b'\x80\x04\x95\x07\x00\x00\x00K\x01K\x02\x86\x94.'}
Or as a single task blob if it can't easily decompose the result. This
happens either if the task is highly nested, or if it isn't a task at all
>>> dumps_task(1) # doctest: +SKIP
{'task': b'\x80\x04\x95\x03\x00\x00\x00\x00\x00\x00\x00K\x01.'}
"""
if istask(task):
if task[0] is apply and not any(map(_maybe_complex, task[2:])):
d = {"function": dumps_function(task[1]), "args": warn_dumps(task[2])}
if len(task) == 4:
d["kwargs"] = warn_dumps(task[3])
return d
elif not any(map(_maybe_complex, task[1:])):
return {"function": dumps_function(task[0]), "args": warn_dumps(task[1:])}
return to_serialize(task)
_warn_dumps_warned = [False]
def warn_dumps(obj, dumps=pickle.dumps, limit=1e6):
"""Dump an object to bytes, warn if those bytes are large"""
b = dumps(obj, protocol=4)
if not _warn_dumps_warned[0] and len(b) > limit:
_warn_dumps_warned[0] = True
s = str(obj)
if len(s) > 70:
s = s[:50] + " ... " + s[-15:]
warnings.warn(
"Large object of size %s detected in task graph: \n"
" %s\n"
"Consider scattering large objects ahead of time\n"
"with client.scatter to reduce scheduler burden and \n"
"keep data on workers\n\n"
" future = client.submit(func, big_data) # bad\n\n"
" big_future = client.scatter(big_data) # good\n"
" future = client.submit(func, big_future) # good"
% (format_bytes(len(b)), s)
)
return b
def apply_function(
function,
args,
kwargs,
execution_state,
key,
active_threads,
active_threads_lock,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.start_time = time()
thread_state.execution_state = execution_state
thread_state.key = key
msg = apply_function_simple(function, args, kwargs, time_delay)
with active_threads_lock:
del active_threads[ident]
return msg
def apply_function_simple(
function,
args,
kwargs,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
start = time()
try:
result = function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
msg["op"] = "task-erred"
msg["actual-exception"] = e
else:
msg = {
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
finally:
end = time()
msg["start"] = start + time_delay
msg["stop"] = end + time_delay
msg["thread"] = ident
return msg
async def apply_function_async(
function,
args,
kwargs,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
start = time()
try:
result = await function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
msg["op"] = "task-erred"
msg["actual-exception"] = e
else:
msg = {
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
finally:
end = time()
msg["start"] = start + time_delay
msg["stop"] = end + time_delay
msg["thread"] = ident
return msg
def apply_function_actor(
function, args, kwargs, execution_state, key, active_threads, active_threads_lock
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.execution_state = execution_state
thread_state.key = key
thread_state.actor = True
result = function(*args, **kwargs)
with active_threads_lock:
del active_threads[ident]
return result
def get_msg_safe_str(msg):
"""Make a worker msg, which contains args and kwargs, safe to cast to str:
allowing for some arguments to raise exceptions during conversion and
ignoring them.
"""
class Repr:
def __init__(self, f, val):
self._f = f
self._val = val
def __repr__(self):
return self._f(self._val)
msg = msg.copy()
if "args" in msg:
msg["args"] = Repr(convert_args_to_str, msg["args"])
if "kwargs" in msg:
msg["kwargs"] = Repr(convert_kwargs_to_str, msg["kwargs"])
return msg
def convert_args_to_str(args, max_len: int | None = None) -> str:
"""Convert args to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(args))]
for i, arg in enumerate(args):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
strs[i] = sarg
length += len(sarg) + 2
if max_len is not None and length > max_len:
return "({}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "({})".format(", ".join(strs))
def convert_kwargs_to_str(kwargs: dict, max_len: int | None = None) -> str:
"""Convert kwargs to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(kwargs))]
for i, (argname, arg) in enumerate(kwargs.items()):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
skwarg = repr(argname) + ": " + sarg
strs[i] = skwarg
length += len(skwarg) + 2
if max_len is not None and length > max_len:
return "{{{}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "{{{}}}".format(", ".join(strs))
async def run(server, comm, function, args=(), kwargs=None, is_coro=None, wait=True):
kwargs = kwargs or {}
function = pickle.loads(function)
if is_coro is None:
is_coro = iscoroutinefunction(function)
else:
warnings.warn(
"The is_coro= parameter is deprecated. "
"We now automatically detect coroutines/async functions"
)
assert wait or is_coro, "Combination not supported"
if args:
args = pickle.loads(args)
if kwargs:
kwargs = pickle.loads(kwargs)
if has_arg(function, "dask_worker"):
kwargs["dask_worker"] = server
if has_arg(function, "dask_scheduler"):
kwargs["dask_scheduler"] = server
logger.info("Run out-of-band function %r", funcname(function))
try:
if not is_coro:
result = function(*args, **kwargs)
else:
if wait:
result = await function(*args, **kwargs)
else:
server.loop.add_callback(function, *args, **kwargs)
result = None
except Exception as e:
logger.warning(
"Run Failed\nFunction: %s\nargs: %s\nkwargs: %s\n",
str(funcname(function))[:1000],
convert_args_to_str(args, max_len=1000),
convert_kwargs_to_str(kwargs, max_len=1000),
exc_info=True,
)
response = error_message(e)
else:
response = {"status": "OK", "result": to_serialize(result)}
return response
_global_workers = Worker._instances
try:
if nvml.device_get_count() < 1:
raise RuntimeError
except (Exception, RuntimeError):
pass
else:
async def gpu_metric(worker):
result = await offload(nvml.real_time)
return result
DEFAULT_METRICS["gpu"] = gpu_metric
def gpu_startup(worker):
return nvml.one_time()
DEFAULT_STARTUP_INFORMATION["gpu"] = gpu_startup
def print(*args, **kwargs):
"""Dask print function
This prints both wherever this function is run, and also in the user's
client session
"""
try:
worker = get_worker()
except ValueError:
pass
else:
msg = {
"args": tuple(stringify(arg) for arg in args),
"kwargs": {k: stringify(v) for k, v in kwargs.items()},
}
worker.log_event("print", msg)
builtins.print(*args, **kwargs)
def warn(*args, **kwargs):
"""Dask warn function
This raises a warning both wherever this function is run, and also
in the user's client session
"""
try:
worker = get_worker()
except ValueError: # pragma: no cover
pass
else:
worker.log_event("warn", {"args": args, "kwargs": kwargs})
warnings.warn(*args, **kwargs)
|
dask/distributed
|
distributed/worker.py
|
Python
|
bsd-3-clause
| 177,089
|
import os
from PIL import Image
from django.template import Library
register = Library()
def thumbnail(file, size='104x104', noimage=''):
# defining the size
x, y = [int(x) for x in size.split('x')]
# defining the filename and the miniature filename
try:
filehead, filetail = os.path.split(file.path)
except:
return ''
basename, format = os.path.splitext(filetail)
#quick fix for format
if format.lower() =='.gif':
return (filehead + '/' + filetail).replace(MEDIA_ROOT, MEDIA_URL)
miniature = basename + '_' + size + format
filename = file.path
miniature_filename = os.path.join(filehead, miniature)
filehead, filetail = os.path.split(file.url)
miniature_url = filehead + '/' + miniature
#fail on missing large file (bad import)
try:
if os.path.exists(miniature_filename) and os.path.getmtime(filename)>os.path.getmtime(miniature_filename):
os.unlink(miniature_filename)
except:
return ''
# if the image wasn't already resized, resize it
if not os.path.exists(miniature_filename):
try:
image = Image.open(filename)
except:
return ''
image.thumbnail([x, y], Image.ANTIALIAS)
image.convert('RGB')
try:
image.save(miniature_filename, image.format, quality=90, optimize=1)
except:
try:
image.save(miniature_filename, image.format, quality=90)
except:
return noimage
return miniature_url
register.filter(thumbnail)
from django.conf import settings
MEDIA_ROOT, MEDIA_URL = settings.MEDIA_ROOT, settings.MEDIA_URL
def thumbnail_crop(file, size='104x104', noimage=''):
# defining the size
x, y = [int(x) for x in size.split('x')]
# defining the filename and the miniature filename
try:
filehead, filetail = os.path.split(file.path)
except:
return '' # '/media/img/noimage.jpg'
basename, format = os.path.splitext(filetail)
#quick fix for format
if format.lower() =='.gif':
return (filehead + '/' + filetail).replace(MEDIA_ROOT, MEDIA_URL)
miniature = basename + '_' + size + format
filename = file.path
miniature_filename = os.path.join(filehead, miniature)
filehead, filetail = os.path.split(file.url)
miniature_url = filehead + '/' + miniature
try:
if os.path.exists(miniature_filename) and os.path.exists(filename) and os.path.getmtime(filename)>os.path.getmtime(miniature_filename):
os.unlink(miniature_filename)
except:
return ''
# if the image wasn't already resized, resize it
if not os.path.exists(miniature_filename):
try:
image = Image.open(filename)
except:
return '(minfail %s) ' % filename #noimage
src_width, src_height = image.size
src_ratio = float(src_width) / float(src_height)
dst_width, dst_height = x, y
dst_ratio = float(dst_width) / float(dst_height)
if dst_ratio < src_ratio:
crop_height = src_height
crop_width = crop_height * dst_ratio
x_offset = int(float(src_width - crop_width) / 2)
y_offset = 0
else:
crop_width = src_width
crop_height = crop_width / dst_ratio
x_offset = 0
y_offset = int(float(src_height - crop_height) / 3)
try:
image = image.crop((x_offset, y_offset, x_offset+int(crop_width), y_offset+int(crop_height)))
image = image.resize((dst_width, dst_height), Image.ANTIALIAS)
image.convert('RGB')
except:
pass
try:
image.save(miniature_filename, image.format, quality=90, optimize=1)
except:
try:
image.save(miniature_filename, image.format, quality=90)
except:
return '' #'/media/img/noimage.jpg'
return miniature_url
register.filter(thumbnail_crop)
|
mjbrownie/ascet_filer_teaser
|
ascet_filer_teaser/templatetags/huski_thumbnail.py
|
Python
|
bsd-3-clause
| 4,022
|
# -*- coding: utf-8 -*-
import time
import pytest
from selenium.webdriver import ActionChains
from django.db.models import get_model
from fancypages.test import factories
from fancypages.test.fixtures import admin_user # noqa
FancyPage = get_model('fancypages', 'FancyPage')
@pytest.mark.browser
def test_can_move_block_from_one_container_to_another(live_server, browser,
admin_user):
page = factories.FancyPageFactory(node__name='Home')
main_container = page.containers.all()[0]
layout = factories.TwoColumnLayoutBlockFactory(container=main_container)
browser.visit(live_server.url + page.get_absolute_url())
right = layout.containers.get(name='right-container')
left = layout.containers.get(name='left-container')
moving_block = factories.TextBlockFactory(container=right)
factories.TextBlockFactory(container=right)
factories.TextBlockFactory(container=left)
browser.visit(live_server.url + page.get_absolute_url())
browser.find_by_css('#editor-handle').first.click()
source = browser.find_by_css(
'#block-{} div.move'.format(moving_block.uuid)).first
chain = ActionChains(browser.driver)
chain.drag_and_drop_by_offset(source._element, -600, 200).perform()
time.sleep(5)
assert right.blocks.count() == left.blocks.count() == 1
assert main_container.blocks.count() == 2
main_block_ids = [b.uuid for b in main_container.blocks.all()]
assert main_block_ids == [layout.uuid, moving_block.uuid]
|
tangentlabs/django-fancypages
|
tests/browser/test_moving_blocks.py
|
Python
|
bsd-3-clause
| 1,547
|
__author__="cooke"
__date__ ="$01-Mar-2012 11:17:43$"
|
agcooke/ExperimentControl
|
experimentcontrol/test/__init__.py
|
Python
|
bsd-3-clause
| 53
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'MYPROJECT'
copyright = '2019, MYSELF'
author = 'MYSELF'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MYPROJECTdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MYPROJECT.tex', 'MYPROJECT Documentation',
'MYSELF', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'myproject', 'MYPROJECT Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MYPROJECT', 'MYPROJECT Documentation',
author, 'MYPROJECT', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# == NDD DOCKER SPHINX - OVERRIDE ============================================
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.graphviz',
'sphinx.ext.ifconfig',
# 'sphinx.ext.imgmath',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx-prompt',
'sphinxcontrib.actdiag',
'sphinxcontrib.blockdiag',
'sphinxcontrib.excel_table',
# 'sphinxcontrib.googleanalytics',
# 'sphinxcontrib.googlechart',
# 'sphinxcontrib.googlemaps',
'sphinxcontrib.nwdiag',
'sphinxcontrib.packetdiag',
'sphinxcontrib.plantuml',
'sphinxcontrib.rackdiag',
'sphinxcontrib.seqdiag',
]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# If true, 'todo' and 'todoList' produce output, else they produce nothing.
todo_include_todos = True
# -- Markdown ----------------------------------------------------------------
# http://www.sphinx-doc.org/en/stable/usage/markdown.html
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# -- Pseudo extensions -------------------------------------------------------
# Uncomment to enable Git parsing
# TODO: extract in a Sphinx plugin
#
# Must be defined somewhere
# html_context = {}
#
# import os.path
# source_directory = os.path.dirname(os.path.realpath(__file__))
# python_directory = os.path.join(source_directory, '_python')
# exec(open(os.path.join(python_directory, 'sphinx-git.py'), 'rb').read())
# -- Generator properties ----------------------------------------------------
# The Docker tag of the image that generated this project
ddidier_sphinxdoc_image_tag = '0123456789ABCDEF'
# The Git tag of the image that generated this project
# This is the most recent tag if the image is 'latest'
# Hopefully it will be (manually) updated while releasing...
ddidier_sphinxdoc_git_tag = '1.8.5-2'
|
zCFD/zCFD-docker
|
sphinx-doc/tests/test-init/expected/source/conf.py
|
Python
|
bsd-3-clause
| 7,345
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type cape."""
from .vetement import Vetement
class Cape(Vetement):
"""Type d'objet: cape.
"""
nom_type = "cape"
empilable_sur = ["vêtement", "armure"]
protection_froid = 2
def __init__(self, cle=""):
Vetement.__init__(self, cle)
self.emplacement = "dos"
self.positions = (1, )
|
vlegoff/tsunami
|
src/primaires/objet/types/cape.py
|
Python
|
bsd-3-clause
| 1,924
|
from .base import call_method_or_dispatch, create_registerer
sym_decision_function_dispatcher = {}
sym_decision_function = call_method_or_dispatch('sym_decision_function', sym_decision_function_dispatcher)
register_sym_decision_function = create_registerer(sym_decision_function_dispatcher, 'register_sym_decision_function')
|
jcrudy/sklearntools
|
sklearntools/sym/sym_decision_function.py
|
Python
|
bsd-3-clause
| 328
|
'''
Created on 9 jan. 2013
@author: sander
'''
from bitstring import ConstBitStream, BitStream, Bits
from ipaddress import IPv4Address
from pylisp.packet.ip import protocol_registry
from pylisp.packet.ip.protocol import Protocol
from pylisp.utils import checksum
import math
import numbers
class IPv4Packet(Protocol):
'''
Minimal IPv4 implementation to use in LISP Encapsulated Control Messages.
Options are not interpreted.
'''
header_type = 4
version = 4
def __init__(self, tos=0, identification=0, dont_fragment=False,
more_fragments=False, fragment_offset=0, ttl=0, protocol=0,
source=None, destination=None, options='', payload='',
next_header=None):
'''
Constructor
'''
# Call superclass
super(IPv4Packet, self).__init__(next_header=next_header or protocol,
payload=payload)
# Next-header and protocol can't conflict. Protocol is the official
# name, but next_header is used for compatibility with the other
# headers. They use the same name/number space anyway.
if next_header is not None and protocol != 0 \
and next_header != protocol:
raise ValueError("Conflicting next_header and protocol given")
# Set defaults
self.tos = tos
self.identification = identification
self.dont_fragment = dont_fragment
self.more_fragments = more_fragments
self.fragment_offset = fragment_offset
self.ttl = ttl
self.source = source
self.destination = destination
self.options = options
# Protocol is an alias for next-header
@property
def protocol(self):
return self.next_header
@protocol.setter
def protocol(self, protocol):
self.next_header = protocol
def is_fragmented(self):
return self.more_fragments or self.fragment_offset != 0
def get_final_payload(self):
return (self.protocol, self.payload)
def sanitize(self):
'''
Check if the current settings conform to the RFC and fix where possible
'''
# Let the parent do its stuff
super(IPv4Packet, self).sanitize()
# Check the version
if self.version != 4:
raise ValueError("Protocol version must be 4")
# Treat type-of-service as an 8-bit unsigned integer. Future versions
# of this code may implement methods to treat it as DSCP+ECN
if not isinstance(self.tos, numbers.Integral) \
or self.tos < 0 \
or self.tos >= 2 ** 8:
raise ValueError('Invalid type of service')
# Identification: An identifying value assigned by the sender to aid in
# assembling the fragments of a datagram.
if not isinstance(self.identification, numbers.Integral) \
or self.identification < 0 \
or self.identification >= 2 ** 16:
raise ValueError('Invalid fragment identification')
# An internet datagram can be marked "don't fragment." Any internet
# datagram so marked is not to be internet fragmented under any
# circumstances. If internet datagram marked don't fragment cannot be
# delivered to its destination without fragmenting it, it is to be
# discarded instead.
if not isinstance(self.dont_fragment, bool):
raise ValueError("Don't fragment flag must be a boolean")
# The More Fragments flag bit (MF) is set if the datagram is not the
# last fragment. The Fragment Offset field identifies the fragment
# location, relative to the beginning of the original unfragmented
# datagram. Fragments are counted in units of 8 octets. The
# fragmentation strategy is designed so than an unfragmented datagram
# has all zero fragmentation information (MF = 0, fragment offset =
# 0). If an internet datagram is fragmented, its data portion must be
# broken on 8 octet boundaries.
if not isinstance(self.more_fragments, bool):
raise ValueError('More fragments flag must be a boolean')
# Fragment offset: This field indicates where in the datagram this
# fragment belongs. The fragment offset is measured in units of 8
# octets (64 bits). The first fragment has offset zero.
if not isinstance(self.fragment_offset, numbers.Integral) \
or self.fragment_offset < 0 \
or self.fragment_offset >= 2 ** 13:
raise ValueError('Invalid fragment offset')
# Check for don't-fragment combined with a fragment offset
if self.dont_fragment and self.fragment_offset > 0:
raise ValueError("A packet marked don't fragment can't have "
"a fragment-offset")
# Check that the TTL is correct
if not isinstance(self.ttl, numbers.Integral) \
or self.ttl < 0 \
or self.ttl >= 2 ** 8:
raise ValueError('Invalid TTL')
# Check the source and destination addresses
if not isinstance(self.source, IPv4Address):
raise ValueError('Source address must be IPv4')
if not isinstance(self.destination, IPv4Address):
raise ValueError('Destination address must be IPv4')
@classmethod
def from_bytes(cls, bitstream, decode_payload=True):
'''
Parse the given packet and update properties accordingly
'''
packet = cls()
# Convert to ConstBitStream (if not already provided)
if not isinstance(bitstream, ConstBitStream):
if isinstance(bitstream, Bits):
bitstream = ConstBitStream(auto=bitstream)
else:
bitstream = ConstBitStream(bytes=bitstream)
# Read the version
version = bitstream.read('uint:4')
if version != packet.version:
raise ValueError('Provided bytes do not contain an IPv4 packet')
# Read the header length
ihl = bitstream.read('uint:4')
if ihl < 5:
raise ValueError('Invalid IPv4 header length')
# Now that we know the length of the header we store it to be able
# to easily recalculate the header checksum later
remaining_header_bits = (ihl * 32) - 8
header = (BitStream('uint:4=4, uint:4=%d' % ihl) +
bitstream.peek(remaining_header_bits))
# Read the type of service
packet.tos = bitstream.read('uint:8')
# Read the total length
total_length = bitstream.read('uint:16')
if total_length < ihl * 4:
raise ValueError('Total length is shorter than the header')
# Read the identification
packet.identification = bitstream.read('uint:16')
# Read the flags
(reserved,
packet.dont_fragment,
packet.more_fragments) = bitstream.readlist('3*bool')
if reserved:
raise ValueError('Reserved flag must be 0')
# Read the fragment offset
packet.fragment_offset = bitstream.read('uint:13')
# Read the TTL
packet.ttl = bitstream.read('uint:8')
# Read the protocol number
packet.protocol = bitstream.read('uint:8')
# Read the header checksum
header_checksum = bitstream.read('uint:16')
# Set the checksum bits in the header to 0 and re-calculate
header[80:96] = BitStream(16)
my_checksum = checksum.ones_complement(header.bytes)
if my_checksum != header_checksum:
raise ValueError('Header checksum does not match')
# Read the source and destination addresses
packet.source = IPv4Address(bitstream.read('uint:32'))
packet.destination = IPv4Address(bitstream.read('uint:32'))
# Read the options
option_len = (ihl - 5) * 4
packet.options = bitstream.read('bytes:%d' % option_len)
# And the rest is payload
payload_bytes = (total_length) - (ihl * 4)
packet.payload = bitstream.read('bytes:%d' % payload_bytes)
if decode_payload:
payload_class = protocol_registry.get_type_class(packet.protocol)
if payload_class:
packet.payload = payload_class.from_bytes(packet.payload)
# There should be no remaining bits
if bitstream.pos != bitstream.len:
raise ValueError('Bits remaining after processing packet')
# Verify that the properties make sense
packet.sanitize()
return packet
def to_bytes(self):
'''
Create bytes from properties
'''
# Verify that the properties make sense
self.sanitize()
# Write the version
bitstream = BitStream('uint:4=%d' % self.version)
# Write the header length
options_len = math.ceil(len(self.options) / 4.0)
bitstream += BitStream('uint:4=%d' % (5 + options_len))
# Write the type of service
bitstream += BitStream('uint:8=%d' % self.tos)
# Write the total length
payload_bytes = bytes(self.payload)
total_length = 20 + len(payload_bytes)
bitstream += BitStream('uint:16=%d' % total_length)
# Write the identification
bitstream += BitStream('uint:16=%d' % self.identification)
# Write the flags
bitstream += BitStream('bool=False, bool=%d, '
'bool=%d' % (self.dont_fragment,
self.more_fragments))
# Write the fragment offset
bitstream += BitStream('uint:13=%d' % self.fragment_offset)
# Write the TTL
bitstream += BitStream('uint:8=%d' % self.ttl)
# Write the protocol number
bitstream += BitStream('uint:8=%d' % self.protocol)
# Write the header checksum as 0 for now, we calculate it later
bitstream += BitStream('uint:16=0')
# Write the source and destination addresses
bitstream += BitStream('uint:32=%d, '
'uint:32=%d' % (int(self.source),
int(self.destination)))
# Add the options
bitstream += BitStream(bytes=self.options)
padding_len = (4 - (len(self.options) % 4)) % 4
bitstream += BitStream(padding_len * 8)
# Calculate the header checksum and fill it in
my_checksum = checksum.ones_complement(bitstream.bytes)
bitstream[80:96] = BitStream('uint:16=%d' % my_checksum)
return bitstream.bytes + payload_bytes
# Register this header type
protocol_registry.register_type_class(IPv4Packet)
|
steffann/pylisp
|
pylisp/packet/ip/ipv4.py
|
Python
|
bsd-3-clause
| 10,727
|
def extractManaTankMagus(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Mana Tank Magus' in item['tags']:
return buildReleaseMessageWithType(item, 'Mana Tank Magus', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractManaTankMagus.py
|
Python
|
bsd-3-clause
| 360
|
# -*- coding: utf-8 -*-
'''Custom validators used by RoodKamer'''
from wtforms.validators import StopValidation
from isbnlib import to_isbn13, is_isbn10, is_isbn13
class ValidateISBN(object):
"""
Validates that input is valid ISBN, either the 10 digit one from prior to
2007, or the 13 digit one from on or after January 1st, 2007.
"""
field_flags = ('required', )
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
if len(field.data) == 10:
if is_isbn10(field.data):
field.data = to_isbn13(field.data)
field.raw_data[0] = field.data
return
else:
message = self.__get_error_message("Not a valid ISBN-10.",
field)
elif len(field.data) == 13:
if is_isbn13(field.data):
return
else:
message = self.__get_error_message("Not a valid ISBN-13.",
field)
else:
message = self.__get_error_message(
'The ISBN must be either 10 or 13 digits.',
field
)
field.errors[:] = []
raise StopValidation(message)
def __get_error_message(self, msg, field):
if self.message is None:
return field.gettext(msg)
else:
return self.message
|
brotherjack/Rood-Kamer
|
roodkamer/validators.py
|
Python
|
bsd-3-clause
| 1,509
|
# Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import GPy
from .base import BOModel
##
## TODO: not fully tested yet.
##
class WarpedGPModel(BOModel):
analytical_gradient_prediction = False
def __init__(self, kernel=None, noise_var=None, exact_feval=False, optimizer='bfgs', max_iters=1000,
optimize_restarts=5, warping_function=None, warping_terms=3, verbose=False):
self.kernel = kernel
self.noise_var = noise_var
self.exact_feval = exact_feval
self.optimize_restarts = optimize_restarts
self.optimizer = optimizer
self.max_iters = max_iters
self.verbose = verbose
self.warping_function = warping_function
self.warping_terms = warping_terms
self.model = None
def _create_model(self, X, Y):
# --- define kernel
self.input_dim = X.shape[1]
if self.kernel is None:
self.kernel = GPy.kern.Matern32(self.input_dim, variance=1.) #+ GPy.kern.Bias(self.input_dim)
else:
self.kernel = self.kernel
# --- define model
noise_var = Y.var()*0.01 if self.noise_var is None else self.noise_var
self.model = GPy.models.WarpedGP(X, Y, kernel=self.kernel, warping_function=self.warping_function, warping_terms=self.warping_terms )
# --- restrict variance if exact evaluations of the objective
if self.exact_feval:
self.model.Gaussian_noise.constrain_fixed(1e-6, warning=False)
else:
self.model.Gaussian_noise.constrain_positive(warning=False)
def updateModel(self, X_all, Y_all, X_new, Y_new):
if self.model is None:
self._create_model(X_all, Y_all)
else:
self.model.set_XY(X_all, Y_all)
self.model.optimize(optimizer = self.optimizer, messages=self.verbose, max_iters=self.max_iters)
def predict(self, X):
if X.ndim==1: X = X[None,:]
m, v = self.model.predict(X)
v = np.clip(v, 1e-10, np.inf)
return m, np.sqrt(v)
def get_fmin(self):
return self.model.predict(self.model.X)[0].min()
|
SheffieldML/GPyOpt
|
GPyOpt/models/warpedgpmodel.py
|
Python
|
bsd-3-clause
| 2,206
|
import sys, os
try:
import sphinxtogithub
optional_extensions = ['sphinxtogithub']
except ImportError:
optional_extensions = []
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary'] + optional_extensions
master_doc = 'index'
project = u'gevent-utils'
copyright = u'2011, Travis Cline'
version = '0.0.2'
release = '0.0.2'
exclude_patterns = []
add_module_names = True
pygments_style = 'sphinx'
html_show_sourcelink = False
html_show_sphinx = False
htmlhelp_basename = 'gevent-utilsdoc'
latex_documents = [
('index', 'gevent-utils.tex', u'gevent-utils Documentation',
u'Travis Cline', 'manual'),
]
man_pages = [
('index', 'gevent-utils', u'gevent-utils Documentation',
[u'Travis Cline'], 1)
]
|
tmc/gevent-utils
|
docs/conf.py
|
Python
|
bsd-3-clause
| 722
|
#!/usr/bin/python
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Cook up tarballs/zipfile for distribution."""
import os
import re
import shutil
import stat
import subprocess
import sys
def RemoveIfExists(filename):
try:
os.remove(filename)
except OSError:
pass
def WindowsRemoveReadOnly(top):
"""Only on windows remove the read only attribute from a directory tree.
Arguments:
top: directory to change.
"""
if not sys.platform == 'win32':
return
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
os.chmod(os.path.join(root, name), stat.S_IWRITE)
for name in dirs:
os.chmod(os.path.join(root, name), stat.S_IWRITE)
def FindSVNRevision(path):
try:
info = subprocess.Popen(['svn', 'info'], shell=True,
stdout=subprocess.PIPE,
cwd=path).communicate()[0]
except IOError:
return 'UnknownRev'
m = re.search('Revision: ([0-9]+)', info)
if m:
return m.group(1)
else:
return 'UnknownRev'
def DeleteAllMatching(in_dir, regex):
"""Delete all files matching a particular regex.
Arguments:
in_dir: directory to sweep.
regex: file/directory expression to match.
"""
for root, dirs, files in os.walk(in_dir, topdown=False):
for name in files:
if regex.match(name):
os.remove(os.path.join(root, name))
for name in dirs:
if regex.match(name):
shutil.rmtree(os.path.join(root, name))
def PickOuterName(build_dir):
platform_name = {
'win32': 'windows',
'cygwin': 'windows',
'darwin': 'mac',
'linux': 'linux',
'linux2': 'linux',
}[sys.platform]
return 'nacl_%s_%s' % (platform_name, FindSVNRevision(build_dir))
def CookTarball(tgz_name, build_mode):
"""Cook up a distribution tarball.
Arguments:
tgz_name: base name of the tar/zip to create.
build_mode: --mode parameter for scons
"""
# Pick where to construct the tarball.
if sys.platform == 'win32':
tmp_dir = 'c:/nacl_tarball'
else:
tmp_dir = '/tmp/nacl_tarball'
# Find the root of this client.
build_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(build_dir, '..', '..'))
# Drop old working area.
WindowsRemoveReadOnly(tmp_dir)
shutil.rmtree(tmp_dir, ignore_errors=True)
# Create working area directory.
os.mkdir(tmp_dir)
# Pick the root directory name in the destination.
# TODO(bradnelson): consider switching this to use the following,
# assuming this doesn't complicate the docs.
# dst_dir = os.path.join(tmp_dir, PickOuterName(build_dir))
dst_dir = os.path.join(tmp_dir, 'build')
# Copy over everything.
shutil.copytree(src_dir, dst_dir)
WindowsRemoveReadOnly(dst_dir)
# Drop old tarballs/zips.
DeleteAllMatching(os.path.join(dst_dir, 'native_client', 'build'),
re.compile(r'.*\.tgz$'))
DeleteAllMatching(os.path.join(dst_dir, 'native_client', 'build'),
re.compile(r'.*\.zip$'))
# Drop .svn files.
DeleteAllMatching(dst_dir, re.compile(r'^\.svn$'))
# Drop gyp stuff.
shutil.rmtree(os.path.join(dst_dir, 'sconsbuild'), ignore_errors=True)
shutil.rmtree(os.path.join(dst_dir, 'xcodebuild'), ignore_errors=True)
for flavor in ['Debug', 'Release']:
shutil.rmtree(os.path.join(dst_dir, 'native_client',
'build', flavor), ignore_errors=True)
# Drop scons outputs.
shutil.rmtree(os.path.join(dst_dir, 'native_client', 'scons-out'),
ignore_errors=True)
# Drop tools BUILD intermediate output.
shutil.rmtree(os.path.join(dst_dir, 'native_client', 'tools', 'BUILD'),
ignore_errors=True)
# Drop any toolchain present.
shutil.rmtree(os.path.join(dst_dir, 'native_client', 'src',
'third_party', 'nacl_sdk'),
ignore_errors=True)
shutil.rmtree(os.path.join(dst_dir, 'native_client', 'compiler'),
ignore_errors=True)
shutil.rmtree(os.path.join(dst_dir, 'native_client', 'toolchain'),
ignore_errors=True)
# Pick scons version.
if sys.platform == 'win32':
scons = os.path.join(dst_dir, 'native_client', 'scons.bat')
else:
scons = os.path.join(dst_dir, 'native_client', 'scons')
# Pick doxygen version.
doxy_version = {
'win32': 'win',
'cygwin': 'win',
'darwin': 'osx',
'linux': 'linux',
'linux2': 'linux',
}[sys.platform]
doxy_path = os.path.normpath('../third_party/doxygen/%s/doxygen' %
doxy_version)
# Build the tools.
tool_platform = {
'win32': 'win_x86',
'cygwin': 'win_x86',
'darwin': 'mac_x86',
'linux': 'linux_x86',
'linux2': 'linux_x86',
}[sys.platform]
cmd = ('PATH=$PATH:/cygdrive/c/cygwin/bin '
'MAKEINFO=`pwd`/makeinfo_dummy '
'make '
'SDKLOC=`pwd`/../toolchain/%(tool_platform)s/sdk '
'HAMMER=scons') % {'tool_platform': tool_platform}
if sys.platform == 'win32':
cmd = "c:\\cygwin\\bin\\bash -c '%s'" % cmd
ret = subprocess.call(cmd, shell=True,
cwd=os.path.join(dst_dir, 'native_client', 'tools'))
if ret:
return ret
# Drop tools BUILD intermediate output.
shutil.rmtree(os.path.join(dst_dir, 'native_client', 'tools', 'BUILD'),
ignore_errors=True)
# Build the desired version.
ret = subprocess.call([scons,
'--mode='+build_mode,
'--verbose',
'DOXYGEN=%s' % doxy_path],
cwd=os.path.join(dst_dir, 'native_client'))
if ret:
return ret
# Drop items only needed for toolchain build.
shutil.rmtree(os.path.join(dst_dir, 'third_party', 'binutils'),
ignore_errors=True)
shutil.rmtree(os.path.join(dst_dir, 'third_party', 'newlib'),
ignore_errors=True)
shutil.rmtree(os.path.join(dst_dir, 'third_party', 'gcc'), ignore_errors=True)
shutil.rmtree(os.path.join(dst_dir, 'third_party', 'gdb'), ignore_errors=True)
# Drop doxygen if present.
shutil.rmtree(os.path.join(dst_dir, 'third_party', 'doxygen'),
ignore_errors=True)
shutil.rmtree(os.path.join(dst_dir, 'doxygen.DEPS'), ignore_errors=True)
# Pare back scons-out.
for ext in [re.compile(r'.*\.o$'),
re.compile(r'.*\.obj$', re.I),
re.compile(r'.*\.a$'),
re.compile(r'.*\.lib$', re.I),
re.compile(r'.*\.sconsign.*')]:
DeleteAllMatching(os.path.join(dst_dir, 'native_client', 'scons-out'), ext)
# Zip/tar it up.
if sys.platform in ['win32', 'cygwin']:
out_file = os.path.abspath(tgz_name + '.zip')
RemoveIfExists(out_file)
ret = subprocess.call(['zip', '-vr', out_file, '.'],
cwd=os.path.join(tmp_dir))
if ret:
return ret
else:
out_file = os.path.abspath(tgz_name + '.tgz')
ret = subprocess.call(['tar', 'cvfz', out_file, './'],
cwd=os.path.join(tmp_dir))
if ret:
return ret
# Success.
return 0
def main(argv):
if len(argv) != 3:
print 'USAGE: ./cook_tarball.sh <tgz_name> <build_mode>'
return 1
return CookTarball(tgz_name=argv[1], build_mode=argv[2])
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
eseidel/native_client_patches
|
build/cook_tarball.py
|
Python
|
bsd-3-clause
| 8,837
|
# -*- coding: utf-8 -*-
from rawdisk.filesystems.volume import Volume
class AppleBootVolume(Volume):
"""Structure for Apple_Boot volume
"""
def __init__(self):
self.fd = None
def load(self, filename, offset):
"""Will eventually load information for Apple_Boot volume.
Not yet implemented"""
try:
self.offset = offset
# self.fd = open(filename, 'rb')
# self.fd.close()
except IOError as e:
print(e)
def dump_volume(self):
print("TODO")
def __str__(self):
return "Type: Apple_Boot, Offset: 0x%X" % (
self.offset
)
|
dariusbakunas/rawdisk
|
rawdisk/plugins/filesystems/apple_boot/apple_boot_volume.py
|
Python
|
bsd-3-clause
| 667
|
"""Calculation of density of states."""
# Copyright (C) 2011 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import warnings
import numpy as np
from phonopy.phonon.mesh import Mesh
from phonopy.phonon.tetrahedron_mesh import TetrahedronMesh
from phonopy.structure.tetrahedron_method import TetrahedronMethod
class NormalDistribution:
"""Class to represent normal distribution."""
def __init__(self, sigma):
"""Init method."""
self._sigma = sigma
def calc(self, x):
"""Return normal distribution."""
return (
1.0
/ np.sqrt(2 * np.pi)
/ self._sigma
* np.exp(-(x ** 2) / 2.0 / self._sigma ** 2)
)
class CauchyDistribution:
"""Class to represent Cauchy distribution."""
def __init__(self, gamma):
"""Init method."""
self._gamma = gamma
def calc(self, x):
"""Return Cauchy distribution."""
return self._gamma / np.pi / (x ** 2 + self._gamma ** 2)
class Dos:
"""Base class to calculate density of states."""
def __init__(self, mesh_object: Mesh, sigma=None, use_tetrahedron_method=False):
"""Init method."""
self._mesh_object = mesh_object
self._frequencies = mesh_object.frequencies
self._weights = mesh_object.weights
self._tetrahedron_mesh = None
if use_tetrahedron_method and sigma is None:
self._tetrahedron_mesh = TetrahedronMesh(
mesh_object.dynamical_matrix.primitive,
self._frequencies,
mesh_object.mesh_numbers,
np.array(mesh_object.grid_address, dtype="int_"),
np.array(mesh_object.grid_mapping_table, dtype="int_"),
mesh_object.ir_grid_points,
)
self._frequency_points = None
self._sigma = sigma
self.set_draw_area()
self.set_smearing_function("Normal")
@property
def frequency_points(self):
"""Return frequency points."""
return self._frequency_points
def set_smearing_function(self, function_name):
"""Set function form for smearing method.
Parameters
----------
function_name : str
'Normal': smearing is done by normal distribution.
'Cauchy': smearing is done by Cauchy distribution.
"""
if function_name == "Cauchy":
self._smearing_function = CauchyDistribution(self._sigma)
else:
self._smearing_function = NormalDistribution(self._sigma)
def set_sigma(self, sigma):
"""Set sigma."""
self._sigma = sigma
def set_draw_area(self, freq_min=None, freq_max=None, freq_pitch=None):
"""Set frequency points."""
f_min = self._frequencies.min()
f_max = self._frequencies.max()
if self._sigma is None:
self._sigma = (f_max - f_min) / 100.0
if freq_min is None:
f_min -= self._sigma * 10
else:
f_min = freq_min
if freq_max is None:
f_max += self._sigma * 10
else:
f_max = freq_max
if freq_pitch is None:
f_delta = (f_max - f_min) / 200.0
else:
f_delta = freq_pitch
self._frequency_points = np.arange(f_min, f_max + f_delta * 0.1, f_delta)
class TotalDos(Dos):
"""Class to calculate total DOS."""
def __init__(self, mesh_object: Mesh, sigma=None, use_tetrahedron_method=False):
"""Init method."""
super().__init__(
mesh_object,
sigma=sigma,
use_tetrahedron_method=use_tetrahedron_method,
)
self._dos = None
self._freq_Debye = None
self._Debye_fit_coef = None
self._openmp_thm = True
def run(self):
"""Calculate total DOS."""
if self._tetrahedron_mesh is None:
self._dos = np.array(
[self._get_density_of_states_at_freq(f) for f in self._frequency_points]
)
else:
if self._openmp_thm:
self._run_tetrahedron_method_dos()
else:
self._dos = np.zeros_like(self._frequency_points)
thm = self._tetrahedron_mesh
thm.set(value="I", frequency_points=self._frequency_points)
for i, iw in enumerate(thm):
self._dos += np.sum(iw * self._weights[i], axis=1)
@property
def dos(self):
"""Return total DOS."""
return self._dos
def get_dos(self):
"""Return frequency points and total DOS.
Returns
-------
tuple
(frequency_points, total_dos)
"""
warnings.warn(
"TotalDos.get_dos() is deprecated. "
"Use frequency_points and dos attributes instead.",
DeprecationWarning,
)
return self._frequency_points, self._dos
def get_Debye_frequency(self):
"""Return a kind of Debye frequency."""
return self._freq_Debye
def set_Debye_frequency(self, num_atoms, freq_max_fit=None):
"""Calculate a kind of Debye frequency."""
try:
from scipy.optimize import curve_fit
except ImportError:
print("You need to install python-scipy.")
sys.exit(1)
def Debye_dos(freq, a):
return a * freq ** 2
freq_min = self._frequency_points.min()
freq_max = self._frequency_points.max()
if freq_max_fit is None:
N_fit = int(len(self._frequency_points) / 4.0) # Hard coded
else:
N_fit = int(
freq_max_fit / (freq_max - freq_min) * len(self._frequency_points)
)
popt, pcov = curve_fit(
Debye_dos, self._frequency_points[0:N_fit], self._dos[0:N_fit]
)
a2 = popt[0]
self._freq_Debye = (3 * 3 * num_atoms / a2) ** (1.0 / 3)
self._Debye_fit_coef = a2
def plot(self, ax, xlabel=None, ylabel=None, draw_grid=True, flip_xy=False):
"""Plot total DOS."""
if flip_xy:
_xlabel = "Density of states"
_ylabel = "Frequency"
else:
_xlabel = "Frequency"
_ylabel = "Density of states"
if xlabel is not None:
_xlabel = xlabel
if ylabel is not None:
_ylabel = ylabel
plot_total_dos(
ax,
self._frequency_points,
self._dos,
freq_Debye=self._freq_Debye,
Debye_fit_coef=self._Debye_fit_coef,
xlabel=_xlabel,
ylabel=_ylabel,
draw_grid=draw_grid,
flip_xy=flip_xy,
)
def write(self, filename="total_dos.dat"):
"""Write total DOS to total_dos.dat."""
if self._tetrahedron_mesh is None:
comment = "Sigma = %f" % self._sigma
else:
comment = "Tetrahedron method"
write_total_dos(
self._frequency_points, self._dos, comment=comment, filename=filename
)
def _run_tetrahedron_method_dos(self):
mesh_numbers = self._mesh_object.mesh_numbers
cell = self._mesh_object.dynamical_matrix.primitive
reciprocal_lattice = np.linalg.inv(cell.cell)
tm = TetrahedronMethod(reciprocal_lattice, mesh=mesh_numbers)
self._dos = run_tetrahedron_method_dos(
mesh_numbers,
self._frequency_points,
self._frequencies,
self._mesh_object.grid_address,
self._mesh_object.grid_mapping_table,
tm.get_tetrahedra(),
)
def _get_density_of_states_at_freq(self, f):
return np.sum(
np.dot(self._weights, self._smearing_function.calc(self._frequencies - f))
) / np.sum(self._weights)
class ProjectedDos(Dos):
"""Class to calculate projected DOS."""
def __init__(
self,
mesh_object: Mesh,
sigma=None,
use_tetrahedron_method=False,
direction=None,
xyz_projection=False,
):
"""Init method."""
super().__init__(
mesh_object,
sigma=sigma,
use_tetrahedron_method=use_tetrahedron_method,
)
self._eigenvectors = self._mesh_object.eigenvectors
self._projected_dos = None
if xyz_projection:
self._eigvecs2 = np.abs(self._eigenvectors) ** 2
else:
num_atom = self._frequencies.shape[1] // 3
i_x = np.arange(num_atom, dtype="int") * 3
i_y = np.arange(num_atom, dtype="int") * 3 + 1
i_z = np.arange(num_atom, dtype="int") * 3 + 2
if direction is None:
self._eigvecs2 = np.abs(self._eigenvectors[:, i_x, :]) ** 2
self._eigvecs2 += np.abs(self._eigenvectors[:, i_y, :]) ** 2
self._eigvecs2 += np.abs(self._eigenvectors[:, i_z, :]) ** 2
else:
d = np.array(direction, dtype="double")
d /= np.linalg.norm(direction)
proj_eigvecs = self._eigenvectors[:, i_x, :] * d[0]
proj_eigvecs += self._eigenvectors[:, i_y, :] * d[1]
proj_eigvecs += self._eigenvectors[:, i_z, :] * d[2]
self._eigvecs2 = np.abs(proj_eigvecs) ** 2
self._openmp_thm = True
@property
def partial_dos(self):
"""Return partial DOS."""
warnings.warn(
"PartialDos.partial_dos attribute is deprecated. "
"Use projected_dos attribute instead.",
DeprecationWarning,
)
return self._projected_dos
@property
def projected_dos(self):
"""Return projected DOS."""
return self._projected_dos
def run(self):
"""Calculate projected DOS."""
if self._tetrahedron_mesh is None:
self._run_smearing_method()
else:
if self._openmp_thm:
self._run_tetrahedron_method_dos()
else:
self._run_tetrahedron_method()
def get_partial_dos(self):
"""Return partial DOS.
Returns
-------
tuple
frequency_points: Sampling frequencies
projected_dos: [atom_index, frequency_points_index]
"""
warnings.warn(
"ProjectedDos.get_partial_dos() is deprecated. "
"Use frequency_points and projected_dos attributes instead.",
DeprecationWarning,
)
return self._frequency_points, self._projected_dos
def plot(
self,
ax,
indices=None,
legend=None,
xlabel=None,
ylabel=None,
draw_grid=True,
flip_xy=False,
):
"""Plot projected DOS."""
if flip_xy:
_xlabel = "Partial density of states"
_ylabel = "Frequency"
else:
_xlabel = "Frequency"
_ylabel = "Partial density of states"
if xlabel is not None:
_xlabel = xlabel
if ylabel is not None:
_ylabel = ylabel
plot_projected_dos(
ax,
self._frequency_points,
self._projected_dos,
indices=indices,
legend=legend,
xlabel=_xlabel,
ylabel=_ylabel,
draw_grid=draw_grid,
flip_xy=flip_xy,
)
def write(self, filename="projected_dos.dat"):
"""Write projected DOS to projected_dos.dat."""
if self._tetrahedron_mesh is None:
comment = "Sigma = %f" % self._sigma
else:
comment = "Tetrahedron method"
write_projected_dos(
self._frequency_points,
self._projected_dos,
comment=comment,
filename=filename,
)
def _run_smearing_method(self):
num_pdos = self._eigvecs2.shape[1]
num_freqs = len(self._frequency_points)
self._projected_dos = np.zeros((num_pdos, num_freqs), dtype="double")
weights = self._weights / float(np.sum(self._weights))
for i, freq in enumerate(self._frequency_points):
amplitudes = self._smearing_function.calc(self._frequencies - freq)
for j in range(self._projected_dos.shape[0]):
self._projected_dos[j, i] = np.dot(
weights, self._eigvecs2[:, j, :] * amplitudes
).sum()
def _run_tetrahedron_method(self):
num_pdos = self._eigvecs2.shape[1]
num_freqs = len(self._frequency_points)
self._projected_dos = np.zeros((num_pdos, num_freqs), dtype="double")
thm = self._tetrahedron_mesh
thm.set(value="I", frequency_points=self._frequency_points)
for i, iw in enumerate(thm):
w = self._weights[i]
self._projected_dos += np.dot(iw * w, self._eigvecs2[i].T).T
def _run_tetrahedron_method_dos(self):
mesh_numbers = self._mesh_object.mesh_numbers
cell = self._mesh_object.dynamical_matrix.primitive
reciprocal_lattice = np.linalg.inv(cell.cell)
tm = TetrahedronMethod(reciprocal_lattice, mesh=mesh_numbers)
pdos = run_tetrahedron_method_dos(
mesh_numbers,
self._frequency_points,
self._frequencies,
self._mesh_object.grid_address,
self._mesh_object.grid_mapping_table,
tm.get_tetrahedra(),
coef=self._eigvecs2,
)
self._projected_dos = pdos.T
class PartialDos(ProjectedDos):
"""Class to calculate partial DOS."""
def __init__(
self,
mesh_object: Mesh,
sigma=None,
use_tetrahedron_method=False,
direction=None,
xyz_projection=False,
):
"""Init method."""
warnings.warn(
"PartialDos class is deprecated. Use ProjectedDOS instead.",
DeprecationWarning,
)
super().__init__(
mesh_object,
sigma=sigma,
use_tetrahedron_method=use_tetrahedron_method,
direction=direction,
xyz_projection=xyz_projection,
)
def get_pdos_indices(symmetry):
"""Return atomic indieces grouped by symmetry."""
mapping = symmetry.get_map_atoms()
return [list(np.where(mapping == i)[0]) for i in symmetry.get_independent_atoms()]
def write_total_dos(
frequency_points, total_dos, comment=None, filename="total_dos.dat"
):
"""Write total_dos.dat."""
with open(filename, "w") as fp:
if comment is not None:
fp.write("# %s\n" % comment)
for freq, dos in zip(frequency_points, total_dos):
fp.write("%20.10f%20.10f\n" % (freq, dos))
def write_partial_dos(
frequency_points, partial_dos, comment=None, filename="partial_dos.dat"
):
"""Write partial_dos.dat."""
warnings.warn(
"write_partial_dos() is deprecated. Use write_projected_dos() instead.",
DeprecationWarning,
)
write_projected_dos(
frequency_points, partial_dos, comment=comment, filename=filename
)
def write_projected_dos(
frequency_points, projected_dos, comment=None, filename="projected_dos.dat"
):
"""Write projected_dos.dat."""
with open(filename, "w") as fp:
if comment is not None:
fp.write("# %s\n" % comment)
for freq, pdos in zip(frequency_points, projected_dos.T):
fp.write("%20.10f" % freq)
fp.write(("%20.10f" * len(pdos)) % tuple(pdos))
fp.write("\n")
def plot_total_dos(
ax,
frequency_points,
total_dos,
freq_Debye=None,
Debye_fit_coef=None,
xlabel=None,
ylabel=None,
draw_grid=True,
flip_xy=False,
):
"""Plot total DOS."""
ax.xaxis.set_ticks_position("both")
ax.yaxis.set_ticks_position("both")
ax.xaxis.set_tick_params(which="both", direction="in")
ax.yaxis.set_tick_params(which="both", direction="in")
if freq_Debye is not None:
freq_pitch = frequency_points[1] - frequency_points[0]
num_points = int(freq_Debye / freq_pitch)
freqs = np.linspace(0, freq_Debye, num_points + 1)
if flip_xy:
ax.plot(total_dos, frequency_points, "r-", linewidth=1)
if freq_Debye:
ax.plot(
np.append(Debye_fit_coef * freqs ** 2, 0),
np.append(freqs, freq_Debye),
"b-",
linewidth=1,
)
else:
ax.plot(frequency_points, total_dos, "r-", linewidth=1)
if freq_Debye:
ax.plot(
np.append(freqs, freq_Debye),
np.append(Debye_fit_coef * freqs ** 2, 0),
"b-",
linewidth=1,
)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
ax.grid(draw_grid)
def plot_partial_dos(
ax,
frequency_points,
partial_dos,
indices=None,
legend=None,
xlabel=None,
ylabel=None,
draw_grid=True,
flip_xy=False,
):
"""Plot partial DOS."""
warnings.warn(
"plot_partial_dos() is deprecated. Use plot_projected_dos() instead.",
DeprecationWarning,
)
plot_projected_dos(
ax,
frequency_points,
partial_dos,
indices=indices,
legend=legend,
xlabel=xlabel,
ylabel=ylabel,
draw_grid=draw_grid,
flip_xy=flip_xy,
)
def plot_projected_dos(
ax,
frequency_points,
projected_dos,
indices=None,
legend=None,
xlabel=None,
ylabel=None,
draw_grid=True,
flip_xy=False,
):
"""Plot projected DOS."""
ax.xaxis.set_ticks_position("both")
ax.yaxis.set_ticks_position("both")
ax.xaxis.set_tick_params(which="both", direction="in")
ax.yaxis.set_tick_params(which="both", direction="in")
plots = []
num_pdos = len(projected_dos)
if indices is None:
indices = []
for i in range(num_pdos):
indices.append([i])
for set_for_sum in indices:
pdos_sum = np.zeros_like(frequency_points)
for i in set_for_sum:
if i > num_pdos - 1:
print("Index number '%d' is specified," % (i + 1))
print("but it is not allowed to be larger than the number of " "atoms.")
raise ValueError
if i < 0:
print(
"Index number '%d' is specified, but it must be "
"positive." % (i + 1)
)
raise ValueError
pdos_sum += projected_dos[i]
if flip_xy:
plots.append(ax.plot(pdos_sum, frequency_points, linewidth=1))
else:
plots.append(ax.plot(frequency_points, pdos_sum, linewidth=1))
if legend is not None:
ax.legend(legend)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
ax.grid(draw_grid)
def run_tetrahedron_method_dos(
mesh,
frequency_points,
frequencies,
grid_address,
grid_mapping_table,
relative_grid_address,
coef=None,
):
"""Return (P)DOS calculated by tetrahedron method in C."""
try:
import phonopy._phonopy as phonoc
except ImportError:
import sys
print("Phonopy C-extension has to be built properly.")
sys.exit(1)
if coef is None:
_coef = np.ones((frequencies.shape[0], 1, frequencies.shape[1]), dtype="double")
else:
_coef = np.array(coef, dtype="double", order="C")
arr_shape = frequencies.shape + (len(frequency_points), _coef.shape[1])
dos = np.zeros(arr_shape, dtype="double")
phonoc.tetrahedron_method_dos(
dos,
np.array(mesh, dtype="int_"),
frequency_points,
frequencies,
_coef,
np.array(grid_address, dtype="int_", order="C"),
np.array(grid_mapping_table, dtype="int_", order="C"),
relative_grid_address,
)
if coef is None:
return dos[:, :, :, 0].sum(axis=0).sum(axis=0) / np.prod(mesh)
else:
return dos.sum(axis=0).sum(axis=0) / np.prod(mesh)
|
atztogo/phonopy
|
phonopy/phonon/dos.py
|
Python
|
bsd-3-clause
| 21,665
|
import mock
import pytest
from olympia import amo
from olympia.access.models import Group, GroupUser
from olympia.amo.tests import TestCase, req_factory_factory
from olympia.addons.models import Addon, AddonUser
from olympia.users.models import UserProfile
from .acl import (action_allowed, check_addon_ownership, check_ownership,
check_addons_reviewer, check_personas_reviewer,
check_unlisted_addons_reviewer, is_editor, match_rules)
pytestmark = pytest.mark.django_db
def test_match_rules():
"""
Unit tests for the match_rules method.
"""
rules = (
'*:*',
'Editors:*,Admin:EditAnyAddon,Admin:flagged,Admin:addons,'
'Admin:EditAnyCollection',
'Tests:*,Admin:serverstatus,Admin:users',
'Admin:EditAnyAddon,Admin:EditAnyLocale,Editors:*,'
'Admin:lists,Admin:applications,Admin:addons,Localizers:*',
'Admin:EditAnyAddon',
'Admin:ViewAnyStats,Admin:ViewAnyCollectionStats',
'Admin:ViewAnyStats',
'Editors:*,Admin:features',
'Admin:Statistics',
'Admin:Features,Editors:*',
'Admin:%',
'Admin:*',
'Admin:Foo',
'Admin:Bar',
)
for rule in rules:
assert match_rules(rule, 'Admin', '%'), "%s != Admin:%%" % rule
rules = (
'Doctors:*',
'Stats:View',
'CollectionStats:View',
'Addons:Review',
'Personas:Review',
'Locales:Edit',
'Locale.de:Edit',
'Reviews:Edit',
'None:None',
)
for rule in rules:
assert not match_rules(rule, 'Admin', '%'), \
"%s == Admin:%% and shouldn't" % rule
def test_anonymous_user():
fake_request = req_factory_factory('/')
assert not action_allowed(fake_request, amo.permissions.ADMIN)
class ACLTestCase(TestCase):
"""Test some basic ACLs by going to various locked pages on AMO."""
fixtures = ['access/login.json']
def test_admin_login_anon(self):
# Login form for anonymous user on the admin page.
url = '/en-US/admin/'
self.assertLoginRedirects(self.client.get(url), to=url)
class TestHasPerm(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestHasPerm, self).setUp()
assert self.client.login(email='del@icio.us')
self.user = UserProfile.objects.get(email='del@icio.us')
self.addon = Addon.objects.get(id=3615)
self.au = AddonUser.objects.get(addon=self.addon, user=self.user)
assert self.au.role == amo.AUTHOR_ROLE_OWNER
self.request = self.fake_request_with_user(self.user)
def fake_request_with_user(self, user):
request = mock.Mock()
request.user = user
request.user.is_authenticated = mock.Mock(return_value=True)
return request
def login_admin(self):
assert self.client.login(email='admin@mozilla.com')
return UserProfile.objects.get(email='admin@mozilla.com')
def test_anonymous(self):
self.request.user.is_authenticated.return_value = False
self.client.logout()
assert not check_addon_ownership(self.request, self.addon)
def test_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
assert check_addon_ownership(self.request, self.addon)
assert check_addon_ownership(self.request, self.addon, admin=True)
assert not check_addon_ownership(self.request, self.addon, admin=False)
def test_require_author(self):
assert check_ownership(self.request, self.addon, require_author=True)
def test_require_author_when_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
assert check_ownership(self.request, self.addon, require_author=False)
assert not check_ownership(self.request, self.addon,
require_author=True)
def test_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert not check_addon_ownership(self.request, self.addon)
self.test_admin()
def test_deleted(self):
self.addon.update(status=amo.STATUS_DELETED)
assert not check_addon_ownership(self.request, self.addon)
self.request.user = self.login_admin()
assert not check_addon_ownership(self.request, self.addon)
def test_ignore_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert check_addon_ownership(self.request, self.addon,
ignore_disabled=True)
def test_owner(self):
assert check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
def test_dev(self):
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
def test_viewer(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
def test_support(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, support=True)
def test_add_and_remove_group(self):
group = Group.objects.create(name='A Test Group', rules='Test:Group')
group_user = GroupUser.objects.create(group=group, user=self.user)
assert self.user.groups_list == [group]
# The groups_list property already existed. Make sure delete works.
group_user.delete()
assert self.user.groups_list == []
group_user = GroupUser.objects.create(group=group, user=self.user)
assert self.user.groups_list == [group]
del self.user.groups_list
# The groups_list didn't exist. Make sure delete works.
group_user.delete()
assert self.user.groups_list == []
class TestCheckReviewer(TestCase):
fixtures = ['base/addon_3615', 'addons/persona']
def setUp(self):
super(TestCheckReviewer, self).setUp()
self.user = UserProfile.objects.get()
self.persona = Addon.objects.get(pk=15663)
self.addon = Addon.objects.get(pk=3615)
def test_no_perm(self):
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_addons(self):
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_themes(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert check_personas_reviewer(req)
def test_perm_unlisted_addons(self):
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_is_editor_for_addon_reviewer(self):
"""An addon editor is also a persona editor."""
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert is_editor(req, self.persona)
assert is_editor(req, self.addon)
def test_is_editor_for_persona_reviewer(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop', user=self.user)
assert is_editor(req, self.persona)
assert not is_editor(req, self.addon)
|
harikishen/addons-server
|
src/olympia/access/tests.py
|
Python
|
bsd-3-clause
| 9,484
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<hongjuanx.wang@intel.com>
import os
import sys
import shutil
import commands
from optparse import OptionParser
import setup_ios
SCRIPT_PATH = os.path.realpath(__file__)
ConstPath = os.path.dirname(SCRIPT_PATH)
def run(dest=None):
try:
#print 'dest', dest
if dest:
runstatus = commands.getstatusoutput("xcodebuild test -project %s/crosswalk-ios/XWalkView/XWalkView.xcodeproj/ " \
"-scheme XWalkViewTests -destination '%s'" % (ConstPath, dest))
print runstatus[1]
if runstatus[0] == 0:
print "Test done"
else:
print "Test failed"
else:
print "Please input option the destination"
except Exception,e:
print Exception, "Run the unit test XWalkView error: ", e
sys.exit(1)
def init():
try:
setup_ios.main()
try:
shutil.rmtree(ConstPath + "/mobileSpec-crosswalk")
except:
os.system("rm -rf " + ConstPath + "/mobileSpec-crosswalk &>/dev/null")
#print traceback.print_exc()
run(setup_ios.dest)
except Exception,e:
print("Get wrong options: %s, exit ..." % e)
sys.exit(1)
if __name__ == '__main__':
init()
|
pk-sam/crosswalk-test-suite
|
embeddingapi/embedding-api-ios-tests/embeddingapi/xwalkview.py
|
Python
|
bsd-3-clause
| 2,795
|
'''
------------------------------------------
Red9 Studio Pack: Maya Pipeline Solutions
Author: Mark Jackson
email: rednineinfo@gmail.com
Red9 blog : http://red9-consultancy.blogspot.co.uk/
MarkJ blog: http://markj3d.blogspot.co.uk
------------------------------------------
This is the heart of the Red9 StudioPack's boot sequence, managing folder structures,
dependencies and menuItems.
######### THIS SHOULD NOT REQUIRE ANY OF THE RED9.core modules ##########
'''
#from Red9.startup import language_packs
__author__ = 'Mark Jackson'
__buildVersionID__ = 2.0
installedVersion= False
import sys
import os
import imp
import maya.cmds as cmds
import maya.mel as mel
from functools import partial
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
'''
Maya Version Mapping History:
====================================
Release -version -api python -qt prefs release extra info
-----------------------------------------------------------------------------------------
2008 . 2008 . ?????? . 2.5.1 na . 2008 . 2007-09-01
2009 . 2009 . ?????? . 2.5.1 na . 2009 . 2008-10-01
2010 . 2010 . 201000 . 2.6.1 na . 2010 . 2009-08-01
2011 Hotfix2 . 2011 . 201102 . 2.6.4 4.5.3 . 2011 .
2011 SAP . 2011 . 201104 . 2.6.4 4.5.3 . 2011.5 . 2010-09-29 . 2011 binary compliant
2012 . 2012 . 201200 . 2.6.4 4.7.1 . 2012 . 2011-04-01
2012 SP1 . 2012 . ?????? . 2.6.4 4.7.1 . 2012 .
2012 SAP1 . 2012 . ?????? . 2.6.4 4.7.1 . 2012 . 2012-01-26
2012 SP2 . 2012 . 201217 . 2.6.4 4.7.1 . 2012 .
2013 SP1 . 2013 . 201301 . 2.6.4 4.7.1 . 2013 . 2012-07-00
2013 SP2 . 2013 . 201303 . 2.6.4 4.7.1 . 2013 . 2013-01-00
2013 EXT . 2013 . 201350? . 2.6.4 4.7.1 . 2013.5 . 2012-09-25 . 2013 binary incompatible
2013 EXT2 . 2013 . 201355 . 2.6.4 4.7.1 . 2013.5 . 2013-01-22 . 2013 binary incompatible
2014 . 2014 . 201400 . 2.6.4 4.8.2 . 2014 . 2013-04-10
2015 . 2015 . 201500 . 2.7 4.8.5 . 2015 . 2014-04-15
2016 . 2016 . 201600 . 2.7 4.8.5 . 2016 . 2015-04-15
------------------------------------------------------------------------------------------
'''
#=========================================================================================
# LANGUAGE MAPPING -----------------------------------------------------------------------
#=========================================================================================
#global LANGUAGE_MAP
import language_packs.language_english
LANGUAGE_MAP = language_packs.language_english
def get_language_maps():
languages=[]
language_path = os.path.join(os.path.dirname(__file__),'language_packs')
packs = os.listdir(language_path)
for p in packs:
if p.startswith('language_') and p.endswith('.py'):
languages.append(p.split('.py')[0])
return languages
def set_language(language='language_english', *args):
global LANGUAGE_MAP
language_path = os.path.join(os.path.dirname(__file__),'language_packs')
packs = get_language_maps()
if language in packs:
print 'Red9 : Importing Language Map : %s' % language
LANGUAGE_MAP = imp.load_source('language', os.path.join(language_path, language+'.py'))
set_language()
#=========================================================================================
# MAYA DATA -----------------------------------------------------------------------------
#=========================================================================================
MAYA_INTERNAL_DATA = {} # cached Maya internal vars for speed
def mayaFullSpecs():
print 'Maya version : ', mayaVersion()
print 'Maya API version: ', mayaVersionRelease()
print 'QT build: ', mayaVersionQT()
print 'Prefs folder: ',mayaPrefs()
print 'OS build: ', osBuild()
print MAYA_INTERNAL_DATA
def mayaVersion():
#need to manage this better and use the API version,
#eg: 2013.5 returns 2013
if 'version' in MAYA_INTERNAL_DATA and MAYA_INTERNAL_DATA['version']:
return MAYA_INTERNAL_DATA['version']
else:
MAYA_INTERNAL_DATA['version'] = mel.eval('getApplicationVersionAsFloat')
return MAYA_INTERNAL_DATA['version']
def mayaVersionRelease():
if 'api' in MAYA_INTERNAL_DATA and MAYA_INTERNAL_DATA['api']:
return MAYA_INTERNAL_DATA['api']
else:
MAYA_INTERNAL_DATA['api'] = cmds.about(api=True)
return MAYA_INTERNAL_DATA['api']
def mayaVersionQT():
try:
if 'qt' in MAYA_INTERNAL_DATA and MAYA_INTERNAL_DATA['qt']:
return MAYA_INTERNAL_DATA['qt']
else:
MAYA_INTERNAL_DATA['qt'] = cmds.about(qt=True)
return MAYA_INTERNAL_DATA['qt']
except:
pass
def mayaPrefs():
'''
Root of Maya prefs folder
'''
if 'prefs' in MAYA_INTERNAL_DATA and MAYA_INTERNAL_DATA['prefs']:
return MAYA_INTERNAL_DATA['prefs']
else:
MAYA_INTERNAL_DATA['prefs'] = os.path.dirname(cmds.about(env=True))
return MAYA_INTERNAL_DATA['prefs']
def mayaUpAxis(setAxis=None):
import maya.OpenMaya as OpenMaya
if setAxis:
if setAxis.lower()=='y':
OpenMaya.MGlobal.setYAxisUp()
if setAxis.lower()=='z':
OpenMaya.MGlobal.setZAxisUp()
else:
vect=OpenMaya.MGlobal.upAxis()
if vect.z:
return 'z'
if vect.y:
return 'y'
def mayaIsBatch():
return cmds.about(batch=True)
def osBuild():
build = cmds.about(os=True)
if build == 'win64':
return 64
elif build == 'win32':
return 32
def getCurrentFPS():
'''
returns the current frames per second as a number, rather than a useless string
'''
fpsDict = {"game": 15.0, "film": 24.0, "pal": 25.0, "ntsc": 30.0, "show": 48.0, "palf": 50.0, "ntscf": 60.0}
return fpsDict[cmds.currentUnit(q=True, fullName=True, time=True)]
#=========================================================================================
# MENU SETUPS ----------------------------------------------------------------------------
#=========================================================================================
def menuSetup(parent='MayaWindow'):
#if exists remove all items, means we can update on the fly by restarting the Red9 pack
if cmds.menu('redNineMenuItemRoot', exists=True):
cmds.deleteUI('redNineMenuItemRoot')
log.info("Rebuilding Existing RedNine Menu")
# parent is an existing window with an existing menuBar?
if cmds.window(parent, exists=True):
if not cmds.window(parent, q=True, menuBar=True):
raise StandardError('given parent for Red9 Menu has no menuBarlayout %s' % parent)
else:
cmds.menu('redNineMenuItemRoot', l="RedNine", p=parent, tearOff=True, allowOptionBoxes=True)
log.info('new Red9 Menu added to current window : %s' % parent)
# parent is a menuBar?
elif cmds.menuBarLayout(parent, exists=True):
cmds.menu('redNineMenuItemRoot', l='RedNine', p=parent, tearOff=True, allowOptionBoxes=True)
log.info('New Red9 Sound Menu added to current windows menuBar : %s' % parent)
# parent is an existing menu?
elif cmds.menu(parent, exists=True):
cmds.menuItem('redNineMenuItemRoot', l='RedNine', sm=True, p=parent)
log.info('new Red9 subMenu added to current Menu : %s' % parent)
else:
raise StandardError('given parent for Red9 Menu is invalid %s' % parent)
try:
cmds.menuItem('redNineProRootItem',
l='PRO : PACK', sm=True, p='redNineMenuItemRoot', tearOff=True,i='red9.jpg')
# Holder Menus for Client code
if get_client_modules():
cmds.menuItem(divider=True,p='redNineMenuItemRoot')
for client in get_client_modules():
cmds.menuItem('redNineClient%sItem' % client,
l='CLIENT : %s' % client, sm=True, p='redNineMenuItemRoot', tearOff=True, i='red9.jpg')
cmds.menuItem(divider=True,p='redNineMenuItemRoot')
#Add the main Menu items
cmds.menuItem('redNineAnimItem',
l=LANGUAGE_MAP._MainMenus_.animation_toolkit,
ann=LANGUAGE_MAP._MainMenus_.animation_toolkit_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.AnimationUI.show()")
cmds.menuItem('redNineSnapItem',
l=LANGUAGE_MAP._MainMenus_.simple_snap,
ann=LANGUAGE_MAP._MainMenus_.simple_snap_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.AnimFunctions.snap()")
cmds.menuItem('redNineSearchItem',
l=LANGUAGE_MAP._MainMenus_.searchui,
ann=LANGUAGE_MAP._MainMenus_.searchui_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_CoreUtils as r9Core;r9Core.FilterNode_UI.show()")
cmds.menuItem('redNineLockChnsItem',
l=LANGUAGE_MAP._MainMenus_.lockchannels,
ann=LANGUAGE_MAP._MainMenus_.lockchannels_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_CoreUtils as r9Core;r9Core.LockChannels.UI.show()")
cmds.menuItem('redNineMetaUIItem',
l=LANGUAGE_MAP._MainMenus_.metanodeui,
ann=LANGUAGE_MAP._MainMenus_.metanodeui_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_Meta as r9Meta;r9Meta.MClassNodeUI.show()")
cmds.menuItem('redNineReporterUIItem',
l=LANGUAGE_MAP._MainMenus_.scene_reviewer,
ann=LANGUAGE_MAP._MainMenus_.scene_reviewer_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_Tools as r9Tools;r9Tools.SceneReviewerUI.show()")
cmds.menuItem('redNineMoCapItem',
l=LANGUAGE_MAP._MainMenus_.mouse_mocap,
ann=LANGUAGE_MAP._MainMenus_.mouse_mocap_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_Tools as r9Tools;r9Tools.RecordAttrs.show()")
cmds.menuItem('redNineRandomizerItem',
l=LANGUAGE_MAP._MainMenus_.randomize_keyframes,
ann=LANGUAGE_MAP._MainMenus_.randomize_keyframes_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.RandomizeKeys.showOptions()")
cmds.menuItem('redNineFilterCurvesItem',
l=LANGUAGE_MAP._MainMenus_.interactive_curve_filter,
ann=LANGUAGE_MAP._MainMenus_.interactive_curve_filter_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.FilterCurves.show()")
cmds.menuItem('redNineMirrorUIItem',
l=LANGUAGE_MAP._MainMenus_.mirror_setup,
ann=LANGUAGE_MAP._MainMenus_.mirror_setup_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.MirrorSetup().show()")
cmds.menuItem('redNineCameraTrackItem',
l='CameraTracker',sm=True,p='redNineMenuItemRoot')
cmds.menuItem('redNineCamerTrackFixedItem',
l=LANGUAGE_MAP._MainMenus_.camera_tracker_pan,
ann=LANGUAGE_MAP._MainMenus_.camera_tracker_pan_ann,
p='redNineCameraTrackItem', echoCommand=True,
c="from Red9.core.Red9_AnimationUtils import CameraTracker as camTrack;camTrack.cameraTrackView(fixed=True)")
if not mayaVersion()<=2009:
cmds.menuItem(optionBox=True,
ann=LANGUAGE_MAP._MainMenus_.tracker_tighness_ann,
p='redNineCameraTrackItem', echoCommand=True,
c="from Red9.core.Red9_AnimationUtils import CameraTracker as camTrack;camTrack(fixed=True)._showUI()")
cmds.menuItem('redNineCamerTrackFreeItem',
l=LANGUAGE_MAP._MainMenus_.camera_tracker_track,
ann=LANGUAGE_MAP._MainMenus_.camera_tracker_track_ann,
p='redNineCameraTrackItem', echoCommand=True,
c="from Red9.core.Red9_AnimationUtils import CameraTracker as camTrack;camTrack.cameraTrackView(fixed=False)")
if not mayaVersion()<=2009:
cmds.menuItem(optionBox=True,
ann=LANGUAGE_MAP._MainMenus_.tracker_tighness_ann,
p='redNineCameraTrackItem', echoCommand=True,
c="from Red9.core.Red9_AnimationUtils import CameraTracker as camTrack;camTrack(fixed=False)._showUI()")
cmds.menuItem(divider=True,p='redNineMenuItemRoot')
cmds.menuItem('redNineAnimBndItem',
l=LANGUAGE_MAP._MainMenus_.animation_binder,
ann=LANGUAGE_MAP._MainMenus_.animation_binder_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.AnimationBinder as animBnd;animBnd.AnimBinderUI()._UI()")
cmds.menuItem(divider=True,p='redNineMenuItemRoot')
cmds.menuItem('redNineHomepageItem',
l=LANGUAGE_MAP._MainMenus_.red9_homepage,
ann=LANGUAGE_MAP._MainMenus_.red9_homepage_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="Red9.setup.red9_website_home()")
cmds.menuItem('redNineBlogItem',
l=LANGUAGE_MAP._MainMenus_.red9_blog,
ann=LANGUAGE_MAP._MainMenus_.red9_blog_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="Red9.setup.red9_blog()")
cmds.menuItem('redNineVimeoItem',
l=LANGUAGE_MAP._MainMenus_.red9_vimeo,
ann=LANGUAGE_MAP._MainMenus_.red9_vimeo_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="Red9.setup.red9_vimeo()")
cmds.menuItem('redNineFacebookItem',
l=LANGUAGE_MAP._MainMenus_.red9_facebook,
ann=LANGUAGE_MAP._MainMenus_.red9_facebook_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="Red9.setup.red9_facebook()")
cmds.menuItem('redNineAPIDocItem',
l=LANGUAGE_MAP._MainMenus_.red9_api_docs,
ann=LANGUAGE_MAP._MainMenus_.red9_api_docs_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="Red9.setup.red9_apidocs()")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.red9_details,
c='Red9.setup.red9ContactInfo()',p='redNineMenuItemRoot')
cmds.menuItem(divider=True,p='redNineMenuItemRoot')
cmds.menuItem('redNineDebuggerItem', l=LANGUAGE_MAP._MainMenus_.red9_debugger,sm=True,p='redNineMenuItemRoot')
cmds.menuItem('redNineLostAnimItem', p='redNineDebuggerItem',
l=LANGUAGE_MAP._MainMenus_.reconnect_anim,
ann=LANGUAGE_MAP._MainMenus_.reconnect_anim_ann,
echoCommand=True, c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.ReconnectAnimData().show()")
cmds.menuItem('redNineOpenCrashItem', p='redNineDebuggerItem',
l=LANGUAGE_MAP._MainMenus_.open_last_crash,
ann=LANGUAGE_MAP._MainMenus_.open_last_crash_ann,
echoCommand=True, c="import Red9.core.Red9_General as r9General;r9General.os_openCrashFile()")
cmds.menuItem(divider=True,p='redNineDebuggerItem')
cmds.menuItem('redNineDebugItem',
l=LANGUAGE_MAP._MainMenus_.systems_debug,
ann=LANGUAGE_MAP._MainMenus_.systems_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug()")
cmds.menuItem('redNineInfoItem',
l=LANGUAGE_MAP._MainMenus_.systems_info,
ann=LANGUAGE_MAP._MainMenus_.systems_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info()")
cmds.menuItem(divider=True,p='redNineDebuggerItem')
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.individual_debug, sm=True, p='redNineDebuggerItem')
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Core",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Core')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Meta",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Meta')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Anim",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Anim')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Tools",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Tools')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Pose",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Pose')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9General",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9General')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Audio",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Audio')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.individual_info,sm=True,p='redNineDebuggerItem')
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Core",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Core')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Meta",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Meta')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Anim",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Anim')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Tools",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Tools')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Pose",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Pose')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9General",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9General')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Audio",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Audio')")
cmds.menuItem(divider=True,p='redNineDebuggerItem')
cmds.menuItem('redNineReloadItem',l=LANGUAGE_MAP._MainMenus_.systems_reload, p='redNineDebuggerItem',
ann=LANGUAGE_MAP._MainMenus_.systems_reload_ann,
echoCommand=True, c=reload_Red9) # "Red9.core._reload()")
cmds.menuItem(divider=True,p='redNineDebuggerItem')
for language in get_language_maps():
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.language+" : %s" % language, c=partial(set_language,language),p='redNineDebuggerItem')
except:
raise StandardError('Unable to parent Red9 Menu to given parent %s' % parent)
def addToMayaMenus():
try:
# fileMenu additions
if not cmds.menuItem('redNineOpenFolderItem',q=True,ex=True):
mainFileMenu=mel.eval("string $f=$gMainFileMenu")
if not cmds.menu(mainFileMenu, q=True, ni=True):
mel.eval('buildFileMenu()')
cmds.menuItem(divider=True,p=mainFileMenu)
cmds.menuItem('redNineCopyPathItem',
l=LANGUAGE_MAP._MainMenus_.copy_to_clipboard,
ann=LANGUAGE_MAP._MainMenus_.copy_to_clipboard_ann,
p=mainFileMenu,
echoCommand=True,
c="import maya.cmds as cmds;import Red9.core.Red9_General as r9General;r9General.Clipboard.setText(cmds.file(q=True,sn=True))")
cmds.menuItem('redNineOpenFolderItem',
l=LANGUAGE_MAP._MainMenus_.open_in_explorer,
ann=LANGUAGE_MAP._MainMenus_.open_in_explorer_ann,
p=mainFileMenu,
echoCommand=True,
c="import maya.cmds as cmds;import Red9.core.Red9_General as r9General;r9General.os_OpenFileDirectory(cmds.file(q=True,sn=True))")
# timeSlider additions
if not cmds.menuItem('redNineTimeSliderCollapseItem',q=True,ex=True):
if mayaVersion >= 2011:
mel.eval('updateTimeSliderMenu TimeSliderMenu')
TimeSliderMenu='TimeSliderMenu'
cmds.menuItem(divider=True, p=TimeSliderMenu)
cmds.menuItem(subMenu=True, label=LANGUAGE_MAP._MainMenus_.range_submenu, p=TimeSliderMenu)
cmds.menuItem(label=LANGUAGE_MAP._MainMenus_.selectkeys_timerange,
ann=LANGUAGE_MAP._MainMenus_.selectkeys_timerange_ann,
c='import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.selectKeysByRange()')
cmds.menuItem(label=LANGUAGE_MAP._MainMenus_.setrangetoo,
ann=LANGUAGE_MAP._MainMenus_.setrangetoo_ann,
c='import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.setTimeRangeToo()')
cmds.menuItem(divider=True, p=TimeSliderMenu)
cmds.menuItem('redNineTimeSliderCollapseItem', label=LANGUAGE_MAP._MainMenus_.collapse_time,
ann=LANGUAGE_MAP._MainMenus_.collapse_time_ann,
c='import Red9.core.Red9_CoreUtils as r9Core;r9Core.timeOffset_collapseUI()',
p=TimeSliderMenu)
cmds.menuItem(subMenu=True, label=LANGUAGE_MAP._MainMenus_.insert_padding, p=TimeSliderMenu)
cmds.menuItem(label=LANGUAGE_MAP._MainMenus_.pad_selected,
ann=LANGUAGE_MAP._MainMenus_.pad_selected_ann,
c='import Red9.core.Red9_CoreUtils as r9Core;r9Core.timeOffset_addPadding(scene=False)')
cmds.menuItem(label=LANGUAGE_MAP._MainMenus_.pad_full_scene,
ann=LANGUAGE_MAP._MainMenus_.pad_full_scene_ann,
c='import Red9.core.Red9_CoreUtils as r9Core;r9Core.timeOffset_addPadding(scene=True)')
else:
log.debug('Red9 Timeslider menus already built')
except:
log.debug('gMainFileMenu not found >> catch for unitTesting')
def addAudioMenu(parent=None, rootMenu='redNineTraxRoot'):
'''
Red9 Sound Menu setup
'''
print 'AudioMenu: given parent : ',parent
if not parent:
cmds.menu(rootMenu, l=LANGUAGE_MAP._MainMenus_.sound_red9_sound, tearOff=True, allowOptionBoxes=True)
print 'New r9Sound Menu added - no specific parent given so adding to whatever menu is currently being built!'
else:
# parent is a window containing a menuBar?
if cmds.window(parent, exists=True):
if not cmds.window(parent, q=True, menuBar=True):
raise StandardError('given parent for Red9 Sound Menu has no menuBarlayout %s' % parent)
else:
cmds.menu(rootMenu, l=LANGUAGE_MAP._MainMenus_.sound_red9_sound, p=parent, tearOff=True, allowOptionBoxes=True)
log.info('New Red9 Sound Menu added to current windows menuBar : %s' % parent)
# parent is a menuBar?
elif cmds.menuBarLayout(parent, exists=True):
cmds.menu(rootMenu, l=LANGUAGE_MAP._MainMenus_.sound_red9_sound, p=parent, tearOff=True, allowOptionBoxes=True)
log.info('New Red9 Sound Menu added to current windows menuBar : %s' % parent)
# parent is a menu already?
elif cmds.menu(parent, exists=True):
cmds.menuItem(rootMenu, l=LANGUAGE_MAP._MainMenus_.sound_red9_sound, sm=True, p=parent, allowOptionBoxes=True)
log.info('New Red9 Sound subMenu added to current Menu : %s' % parent)
else:
raise StandardError('given parent for Red9 Sound Menu is invalid %s' % parent)
# if not parent:
# print 'new r9Sound Menu added'
# cmds.menu(rootMenu, l=LANGUAGE_MAP._MainMenus_.sound_red9_sound, tearOff=True, allowOptionBoxes=True)
# else:
# print 'new r9Sound Menu added to parent menu', parent
# cmds.menu(rootMenu, l=LANGUAGE_MAP._MainMenus_.sound_red9_sound, tearOff=True, allowOptionBoxes=True, parent=parent)
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_offset_manager, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_offset_manager_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioToolsWrap().show()")
cmds.menuItem(d=True)
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_activate_selected_audio, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_activate_selected_audio_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().setActive()")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_set_timeline_to_selected, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_set_timeline_to_selected_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().setTimelineToAudio()")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_focus_on_selected, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_focus_on_selected_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().setTimelineToAudio();r9Audio.AudioHandler().setActive()")
cmds.menuItem(d=True)
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_mute_selected, p=rootMenu,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().muteSelected(True)")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_unmute_selected, p=rootMenu,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().muteSelected(False)")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_lock_selected, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_lock_selected_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().lockTimeInputs(True)")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_unlock_selected, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_unlock_selected_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().lockTimeInputs(False)")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_delete_selected, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_delete_selected_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().deleteSelected()")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_format_soundnode_name, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_format_soundnode_name_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().formatNodes_to_Path()")
cmds.menuItem(d=True)
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_combine_audio, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_combine_audio_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.combineAudio()")
cmds.menuItem(d=True)
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_open_audio_path, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_open_audio_path_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioNode().openAudioPath()")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_inspect_wav, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_inspect_wav_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.inspect_wav()")
#=========================================================================================
# GENERAL RED9 DATA ----------------------------------------------------------------------
#=========================================================================================
def red9ButtonBGC(colour):
'''
Generic setting for the main button colours in the UI's
'''
if colour==1 or colour=='green':
#return [0.6, 0.9, 0.65]
return [0.6, 1, 0.6]
elif colour==2 or colour=='grey':
return [0.5, 0.5, 0.5]
elif colour==3 or colour=='red':
return [1,0.3,0.3]
def red9ContactInfo(*args):
import Red9.core.Red9_General as r9General # lazy load
result=cmds.confirmDialog(title='Red9_StudioPack : build %f' % red9_getVersion(),
message=("Author: Mark Jackson\r\r"+
"Technical Animation Director\r\r"+
"Contact me at info@red9Consultancy.com for more information\r\r"+
"thanks for trying the toolset. If you have any\r"+
"suggestions or bugs please let me know!"),
button=['Red9Consultancy.com','ChangeLog','Close'],messageAlign='center')
if result == 'ChangeLog':
r9General.os_OpenFile(os.path.join(red9ModulePath(),'changeLog.txt'))
if result =='Red9Consultancy.com':
r9General.os_OpenFile('http://red9consultancy.com/')
def red9Presets():
'''
get the default presets dir for all filterSettings.cfg files
'''
return os.path.join(red9ModulePath(), 'presets')
def red9Presets_get():
'''
generic extraction of all cfg presets from the default location above
'''
try:
configs=[p for p in os.listdir(red9Presets()) if p.endswith('.cfg')]
configs.sort()
return configs
except:
log.debug('failed to retrieve the presets')
return []
def red9ModulePath():
'''
Returns the Main path to the Red9 root module folder
'''
return os.path.join(os.path.dirname(os.path.dirname(__file__)),'')
def red9MayaNativePath():
'''
Returns the MayaVersioned Hacked script path if valid and found
'''
_version=int(mayaVersion())
path=os.path.join(red9ModulePath(),'startup','maya_native','maya_%s' % str(_version))
if os.path.exists(path):
return path
else:
log.info('Red9MayaHacked Folder not found for this build of Maya : %s' % path)
def red9_help(*args):
'''
open up the Red9 help docs
'''
import Red9.core.Red9_General as r9General # lazy load
helpFile=os.path.join(red9ModulePath(),'docs',r'Red9-StudioTools Help.pdf')
r9General.os_OpenFile(helpFile)
def red9_blog(*args):
'''
open up the Red9 Blog
'''
import Red9.core.Red9_General as r9General # lazy load
r9General.os_OpenFile('http://red9-consultancy.blogspot.com/')
def red9_website_home(*args):
'''
open up the Red9 Consultancy homepage
'''
import Red9.core.Red9_General as r9General # lazy load
r9General.os_OpenFile('http://red9consultancy.com/')
def red9_facebook(*args):
'''
open up the Red9 Facebook Page
'''
import Red9.core.Red9_General as r9General # lazy load
r9General.os_OpenFile('http://www.facebook.com/Red9StudioPack/')
def red9_vimeo(*args):
'''
open up the Red9 Vimeo Channel
'''
import Red9.core.Red9_General as r9General # lazy load
r9General.os_OpenFile('https://vimeo.com/user9491246')
def red9_apidocs(*args):
'''
open up the Red9 Vimeo Channel
'''
import Red9.core.Red9_General as r9General # lazy load
apidocs=os.path.join(red9ModulePath(),'docs', 'html', 'index.html')
r9General.os_OpenFile(apidocs)
def red9_getVersion():
return __buildVersionID__
def red9_getAuthor():
return __author__
def get_pro_pack(*args):
try:
#new pro_pack build calls
import Red9.pro_pack.r9pro as r9pro
r9pro.r9import('r9wtools')
import r9wtools
r9wtools.MailRegistration().show()
except:
#legacy
import Red9.core.Red9_General as r9General # lazy load
result=cmds.confirmDialog(title='Red9_StudioPack : build %f' % red9_getVersion(),
message=("Red9_ProPack Not Installed!\r\r"+
"Contact info@red9consultancy.com for more information"),
button=['Red9Consultancy.com','Get_Pro','Close'],messageAlign='center')
if result == 'Get_Pro':
log.warning('Red9 ProPack systems not yet available - watch this space!')
if result =='Red9Consultancy.com':
r9General.os_OpenFile('http://red9consultancy.com/')
#=========================================================================================
# BOOT FUNCTIONS -------------------------------------------------------------------------
#=========================================================================================
def addScriptsPath(path):
'''
Add additional folders to the ScriptPath
'''
scriptsPath=os.environ.get('MAYA_SCRIPT_PATH')
if os.path.exists(path):
if not path in scriptsPath:
log.info('Adding To Script Paths : %s' % path)
os.environ['MAYA_SCRIPT_PATH']+='%s%s' % (os.pathsep,path)
else:
log.info('Red9 Script Path already setup : %s' % path)
else:
log.debug('Given Script Path is invalid : %s' % path)
def addPluginPath():
'''
Make sure the plugin path has been added. If run as a module
this will have already been added
'''
path=os.path.join(red9ModulePath(),'plug-ins')
plugPaths=os.environ.get('MAYA_PLUG_IN_PATH')
if not path in plugPaths:
log.info('Adding Red9 Plug-ins to Plugin Paths : %s' % path)
os.environ['MAYA_PLUG_IN_PATH']+='%s%s' % (os.pathsep,path)
else:
log.info('Red9 Plug-in Path already setup')
def addIconsPath():
'''
Make sure the icons path has been added. If run as a module
this will have already been added
'''
path=os.path.join(red9ModulePath(),'icons')
iconsPath=os.environ.get('XBMLANGPATH')
if not path in iconsPath:
log.info('Adding Red9 Icons To XBM Paths : %s' % path)
os.environ['XBMLANGPATH']+='%s%s' % (os.pathsep,path)
else:
log.info('Red9 Icons Path already setup')
def addPythonPackages():
'''
Add the packages folder which is where any external modules
will be stored
'''
red9Packages=os.path.join(red9ModulePath(),'packages')
if not red9Packages in sys.path:
log.info('Adding Red9Packages To Python Paths : %s' % red9Packages)
sys.path.append(red9Packages)
else:
log.info('Red9Packages Path already setup : %s' % red9Packages)
# PySide Management for pre 2014 x64 builds
if mayaVersion()<2014.0 and os.path.exists(os.path.join(red9Packages, 'PySide')):
pysidePath=os.path.join(red9Packages, 'PySide')
if mayaVersion()==2012.0:
pysidePath=os.path.join(pysidePath, 'PySide_2012_x64')
elif mayaVersion()==2013.0:
pysidePath=os.path.join(pysidePath, 'PySide_2013_x64')
if os.path.exists(pysidePath) and not pysidePath in sys.path:
sys.path.append(pysidePath)
log.info('Adding Red9Packages:PySide To Python Paths : %s' % pysidePath)
def sourceMelFolderContents(path):
'''
source all mel files in a given folder
'''
for script in [f for f in os.listdir(path) if f.lower().endswith('.mel')]:
log.info('Sourcing mel script : %s' % script)
mel.eval('source %s' % script)
#=========================================================================================
# PRO PACK ------------------------------------------------------------------------------
#=========================================================================================
PRO_PACK_STUBS=None
def pro_pack_path():
return os.path.join(red9ModulePath(),'pro_pack')
def has_pro_pack():
'''
Red9 Pro_Pack is available and activated as user
'''
if os.path.exists(pro_pack_path()):
try:
#new pro_pack call
import Red9.pro_pack.r9pro as r9pro
status=r9pro.checkr9user()
if status and not issubclass(type(status),str):
return True
else:
return False
except:
#we have the pro-pack folder so assume we're running legacy build (Dambusters support)
return True
else:
return False
class ProPack_UIError(Exception):
'''
custom exception so we can catch it, this launched the
get ProPack UI
'''
def __init__(self, *args):
get_pro_pack()
class ProPack_Error(Exception):
'''
custom exception so we can catch it. This is an in-function
error
'''
def __init__(self, *args):
super(ProPack_Error, self).__init__('ProPack missing from setup!')
class pro_pack_missing_stub(object):
'''
Exception to raised when the the Pro_Pack is missing
and the stubs are called
'''
def __init__(self):
raise ProPack_UIError()
#=========================================================================================
# RED9 PRODUCTION MODULES ----------------------------------------------------------------
#=========================================================================================
def has_internal_systems():
'''
Red9 Consultancy internal modules only
'''
if os.path.exists(internal_module_path()):
return True
def internal_module_path():
return os.path.join(os.path.dirname(os.path.dirname(red9ModulePath())),'Red9_Internals')
#=========================================================================================
# CLIENT MODULES -------------------------------------------------------------------------
#=========================================================================================
def client_core_path():
return os.path.join(os.path.dirname(os.path.dirname(red9ModulePath())),'Red9_ClientCore')
def has_client_modules():
'''
Red9 Client Modules is the distribution of bespoke code to clients
that tightly integrates into our ProPack core
'''
if os.path.exists(client_core_path()):
return True
def get_client_modules():
'''
get all client modules ready for the boot sequence
#TODO: link this up with a management setup so we can determine
which client to boot if we have multiple client repositories in the system.
'''
clients=[]
if has_client_modules():
for f in os.listdir(client_core_path()):
if os.path.isdir(os.path.join(client_core_path(), f)):
if not f.startswith('.'):
clients.append(f)
return clients
def boot_client_projects():
'''
Boot all Client modules found in the Red9_ClientCore dir
'''
for client in get_client_modules():
log.info('Booting Client Module : %s' % client)
cmds.evalDeferred("import Red9_ClientCore.%s" % client, lp=True) # Unresolved Import
#=========================================================================================
# BOOT CALL ------------------------------------------------------------------------------
#=========================================================================================
def start(Menu=True, MayaUIHooks=True, MayaOverloads=True, parentMenu='MayaWindow'):
'''
Main entry point for the StudioPack
@param Menu: Add the Red9 Menu to the Maya Main Menus
@param MayUIHooks: Add the Red9 hooks to Maya Native UI's
@param MayaOverloads: run the Maya native script hacks for Red9 - integrates into native Maya ui's
'''
log.info('Red9 StudioPack v%s : author: %s' % (red9_getVersion(), red9_getAuthor()))
log.info('Red9 StudioPack Setup Calls :: Booting from >> %s' % red9ModulePath())
#check for current builds
# currentBuild=False
# try:
# currentBuild = mel.eval('$temp=$buildInstalled')
# except:
# print 'Red9 : version not found'
#
# if currentBuild:
# print 'Red9 : StudioPack already found : v', currentBuild
# if currentBuild<=red9_getVersion():
# print 'Red9 StudioPack Start Aborted : v%f is already installed' % currentBuild
# return
# else:
# print 'Red9 : no version currently loaded'
#Ensure the Plug-in and Icon paths are up
addPluginPath()
addIconsPath()
#Need to add a Mel Folder to the scripts path
addScriptsPath(os.path.join(red9ModulePath(),'core'))
#Add the Packages folder
addPythonPackages()
if not cmds.about(batch=True):
if Menu:
try:
menuSetup(parent=parentMenu)
except:
log.debug('Red9 main menu Build Failed!')
if MayaUIHooks:
#Source Maya Hacked Mel files
hacked=red9MayaNativePath()
if hacked and MayaOverloads:
addScriptsPath(os.path.join(red9ModulePath(),'startup','maya_native'))
addScriptsPath(hacked)
try:
mel.eval('source Red9_MelCore')
sourceMelFolderContents(hacked)
except StandardError, error:
log.info(error)
#Add custom items to standard built Maya menus
addToMayaMenus()
log.info('Red9 StudioPack Complete!')
# Rearrangement of the Boot core systems to better structure the boot sequence
# Boot main Red9.core
cmds.evalDeferred("import Red9.core", lp=True)
# Boot the Pro_Pack
if has_pro_pack():
cmds.evalDeferred("import Red9.pro_pack", lp=True) # Unresolved Import
# Boot the Red9_Internal systems
if has_internal_systems():
cmds.evalDeferred("import Red9_Internals", lp=True) # Unresolved Import
# Boot Client Codebases
if has_client_modules():
boot_client_projects()
#cmds.evalDeferred("import Red9_ClientCore", lp=True) # Unresolved Import
def reload_Red9(*args):
#global LANGUAGE_MAP
#reload(LANGUAGE_MAP)
import Red9.core
Red9.core._reload()
if has_pro_pack():
import Red9.pro_pack.core
Red9.pro_pack.core._reload()
PRO_PACK_STUBS=pro_pack_missing_stub
|
Free3Dee/Red9_StudioPack
|
startup/setup.py
|
Python
|
bsd-3-clause
| 44,595
|
#!/usr/bin/env python
import csv
import sys
filename = sys.argv[1]
filename_out = sys.argv[2]
with open(filename, 'rb') as fd:
reader = csv.reader(fd)
header = reader.next()
rows = [row for row in reader]
with open(filename_out, 'w') as fd:
fd.write('dataset, -delta angle, centre angle, +delta angle,'
' -area, +area\n')
writer = csv.writer(fd)
for row in rows:
xs = [float(ii) for ii in row[1:8]]
xm = xs[2]
rr = [row[0], '%+.2f' % (xs[1] - xm), '%.2f' % xm,
'%+.2f' % (xs[3] - xm), '%.2e' % xs[5], '%.2e' % xs[6]]
writer.writerow(rr)
|
rc/dist_mixtures
|
make_aa_table.py
|
Python
|
bsd-3-clause
| 624
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Contact.content_type_label'
db.add_column(u'contacts_contact', 'content_type_label',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Contact.content_type_label'
db.delete_column(u'contacts_contact', 'content_type_label')
models = {
u'contacts.contact': {
'Meta': {'object_name': 'Contact'},
'comments': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_contact'", 'to': u"orm['contenttypes.ContentType']"}),
'content_type_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contacts.ContactType']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'contacts.contacttype': {
'Meta': {'object_name': 'ContactType'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sites.Site']", 'symmetrical': 'False'})
},
u'contacts.contacttypeicon': {
'Meta': {'unique_together': "(('type', 'context'),)", 'object_name': 'ContactTypeIcon'},
'context': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'icon': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'icons'", 'to': u"orm['contacts.ContactType']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['contacts']
|
suselrd/django-allcontacts
|
contacts/migrations/0002_auto__add_field_contact_content_type_label.py
|
Python
|
bsd-3-clause
| 3,604
|
"""functions relating to Jobma"""
import logging
from urllib.parse import urljoin
from django.conf import settings
from django.urls import reverse
from requests import Session
from profiles.api import get_first_and_last_names
log = logging.getLogger(__name__)
def get_jobma_client():
"""
Get an authenticated client for use with Jobma APIs
Returns:
Session: A Jobma session object
"""
session = Session()
session.headers["Authorization"] = f"Bearer {settings.JOBMA_ACCESS_TOKEN}"
session.headers[
"User-Agent"
] = f"BootcampEcommerceBot/{settings.VERSION} ({settings.SITE_BASE_URL})"
return session
def create_interview_in_jobma(interview):
"""
Create a new interview on Jobma
Args:
interview (Interview): An interview object
"""
client = get_jobma_client()
url = urljoin(settings.JOBMA_BASE_URL, "interviews")
job = interview.job
first_name, last_name = get_first_and_last_names(interview.applicant)
response = client.post(
url,
json={
"interview_template_id": str(job.interview_template_id),
"job_id": str(job.job_id),
"job_code": job.job_code,
"job_title": job.job_title,
"callback_url": urljoin(
settings.SITE_BASE_URL,
reverse("jobma-webhook", kwargs={"pk": interview.id}),
),
"candidate": {
"first_name": first_name,
"last_name": last_name,
"phone": "",
"email": interview.applicant.email,
},
},
)
response.raise_for_status()
result = response.json()
interview_link = result.get("interview_link")
if interview_link is not None:
interview.interview_url = interview_link
else:
log.error("Interview link not found in payload - %s", result)
interview_token = result.get("interview_token")
if interview_token is not None:
interview.interview_token = interview_token
interview.save_and_log(None)
return interview_link
|
mitodl/bootcamp-ecommerce
|
jobma/api.py
|
Python
|
bsd-3-clause
| 2,105
|
#!/usr/bin/env python
#
# Copyright (c) 2011 Ivan Zakrevsky
# Licensed under the terms of the BSD License (see LICENSE.txt)
import os.path
from setuptools import setup, find_packages
import metadata
app_name = metadata.name
version = metadata.version
setup(
name = app_name,
version = version,
packages = find_packages(),
author = "Ivan Zakrevsky",
author_email = "ivzak@yandex.ru",
description = "Extension of django-modeltranslation.",
long_description=open(os.path.join(os.path.dirname(__file__), 'README')).read(),
license = "BSD License",
keywords = "django i18n multilingual translation",
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Internationalization',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Localization',
],
url = "https://bitbucket.org/emacsway/{0}".format(app_name),
)
|
emacsway/django-modeltranslation-ext
|
setup.py
|
Python
|
bsd-3-clause
| 1,312
|
__author__ = 'oddBit'
import json
settings_json = json.dumps([
{'type': 'title',
'title': 'game settings'},
{'type': 'bool',
'title': 'hardcore mode',
'desc': 'adds enemies and a portal to the map',
'section': 'game',
'key': 'hardcoreOption'},
{'type': 'title',
'title': 'sound settings'},
{'type': 'bool',
'title': 'play music',
'desc': '',
'section': 'sound',
'key': 'musicOption'},
{'type': 'bool',
'title': 'play game sounds',
'desc': '',
'section': 'sound',
'key': 'soundsOption'}
])
|
oddbitdev/hexTap
|
SettingsJson.py
|
Python
|
bsd-3-clause
| 580
|
import py
from ..base import BaseTopazTest
class TestLocalPropagation(BaseTopazTest):
def test_simple(self, space):
w_res = space.execute("""
require "libdeltablue"
string, number = "0", 0
always predicate: -> { string == number.to_s },
methods: -> {[ string <-> { number.to_s },
number <-> { string.to_i } ]}
$res = [string, number]
string = "23"
$res << string << number
number = 7
$res << string << number
return $res
""")
assert self.unwrap(space, w_res) == ["0", 0, "23", 23, "7", 7]
def test_interface_constraint(self, space):
w_res = space.execute("""
require "libdeltablue"
to_i_able = nil
always predicate: -> { to_i_able.respond_to? :to_i },
methods: -> {[ to_i_able <-> { "" } ]}
$res = []
$res << to_i_able
to_i_able = 100
$res << to_i_able
return $res
""")
assert self.unwrap(space, w_res) == ["", 100]
def test_class_constraint_raises(self, space):
with self.raises(space, "RuntimeError", "Failed to enforce a required constraint"):
space.execute("""
require "libdeltablue"
string = nil
always predicate: -> { string.is_a? String },
methods: -> {[ string <-> { "" } ]}
string = 10
""")
def test_non_required_class_constraint_doesnt_raise(self, space):
space.execute("""
require "libdeltablue"
string = nil
always predicate: -> { string.is_a? String },
methods: -> {[ string <-> { "" } ]},
priority: :medium
string = 10
# Fails if this raises
""")
def test_simple_alt_syntax(self, space):
w_res = space.execute("""
require "libdeltablue"
string, number = "0", 0
always predicate: -> { string == number.to_s } do
[string <-> { number.to_s },
number <-> { string.to_i }]
end
$res = [string, number]
string = "23"
$res << string << number
number = 7
$res << string << number
return $res
""")
assert self.unwrap(space, w_res) == ["0", 0, "23", 23, "7", 7]
def test_arithmetic(self, space):
w_res = space.execute("""
require "libdeltablue"
$res = []
x, y, z = 0, 0, 0
always predicate: -> { x + y == z } do
[x <-> { z - y },
y <-> { z - x },
z <-> { x + y }]
end
$res << [x, y, z]
x = 20
$res << [x, y, z]
z = 100
$res << [x, y, z]
return $res
""")
assert self.unwrap(space, w_res) == [
[0, 0, 0],
[20, -20, 0],
[20, 80, 100]
]
|
babelsberg/babelsberg-r
|
tests/constraints/test_local_propagation.py
|
Python
|
bsd-3-clause
| 2,923
|
__author__ = "Jens Thomas & Felix Simkovic"
__date__ = "10 June 2019"
__version__ = "1.0"
import argparse
import os
from ample.modelling.multimer_definitions import MULTIMER_MODES
class BoolAction(argparse.Action):
"""Class to set a boolean value either form a string or just from the use of the command-line flag"""
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
# values = self.default
values = True # if nothing specified supplying the flag sets the variable to True
if values in ['0', 'f', 'F', 'false', 'False', False]:
values = False
elif values in ['1', 't', 'T', 'true', 'True', True]:
values = True
else:
raise argparse.ArgumentError(self, 'Unrecognised True/False value: {0}'.format(values))
setattr(namespace, self.dest, values)
class FilePathAction(argparse.Action):
"""Class to handle paths to files or directories.
AMPLE changes directory into a work directory so relative paths to files don't work.
We set absolulte paths here.
"""
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, str):
values = os.path.abspath(values)
setattr(namespace, self.dest, values)
def add_core_options(parser=None):
"""Function to add any arguments required by all runtypes"""
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('-config_file', action=FilePathAction, help="user configuration file")
parser.add_argument('-debug', action=BoolAction, nargs='?', metavar='True/False', help=argparse.SUPPRESS)
parser.add_argument(
'-nproc',
type=int,
help="Number of processors [1]. For local, serial runs the jobs will be split across nproc processors. For cluster submission, this should be the number of processors on a node.",
)
parser.add_argument(
'-work_dir',
action=FilePathAction,
help='Path to the directory where the job will run (will be created if it doesn\'t exist)',
)
return parser
def add_cluster_submit_options(parser=None):
"""Add the options for submission to a cluster queuing system"""
if parser is None:
parser = argparse.ArgumentParser()
submit_group = parser.add_argument_group('Cluster queue submission options')
submit_group.add_argument(
'-submit_array', action=BoolAction, nargs='?', metavar='True/False', help='Submit SGE jobs as array jobs'
)
submit_group.add_argument(
'-submit_cluster',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Submit jobs to a cluster - need to set -submit_qtype flag to specify the batch queue system.',
)
submit_group.add_argument(
'-submit_max_array',
type=int,
help='The maximum number of jobs to run concurrently with SGE array job submission',
)
submit_group.add_argument(
'-submit_num_array_jobs', type=int, help='The number of jobs to run concurrently with SGE array job submission'
)
submit_group.add_argument(
'-submit_pe_lsf', help='Cluster submission: string to set number of processors for LSF queueing system'
)
submit_group.add_argument(
'-submit_pe_sge', help='Cluster submission: string to set number of processors for SGE queueing system'
)
submit_group.add_argument('-submit_queue', help='The queue to submit to on the cluster.')
submit_group.add_argument('-submit_qtype', help='Cluster submission queue type - currently support SGE and LSF')
return parser
def add_general_options(parser=None):
from ample.util import version
if parser is None:
parser = argparse.ArgumentParser()
add_core_options(parser)
parser.add_argument(
'-alignment_file',
action=FilePathAction,
help='Alignment file in fasta format. For homologues the first line of each sequence must be the pdb file name',
)
parser.add_argument(
'-allow_his_tag',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Allow HIS tags in the input sequence',
)
parser.add_argument(
'-blast_dir',
action=FilePathAction,
help='Directory where ncbi blast is installed (binaries in expected in bin subdirectory)',
)
parser.add_argument(
'-classic_mode',
metavar='True/False',
help='Preset options to run the original AMPLE clustering/truncation options (1 cluster, 3 subclustering radii, 3 sidechains)',
)
parser.add_argument(
'-ccp4i2_xml',
action=FilePathAction,
help='Path to CCP4I2 XML file - if not None indicates we are running under CCP4I2',
)
parser.add_argument(
'-coiled_coil',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Turn on Coiled-Coil mode for solving Coiled-Coil structures',
)
parser.add_argument(
'-devel_mode', metavar='devel_mode', help='Preset options to run in development mode - takes longer'
)
parser.add_argument('-dry_run', metavar='True/False', help='Check if input files and supplied options are valid.')
parser.add_argument(
'-early_terminate',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Stop the run as soon as a success has been found.',
)
parser.add_argument('-ensembles', help='Path to directory containing existing ensembles')
parser.add_argument('-fasta', action=FilePathAction, help='protein fasta file. (required)')
parser.add_argument('-fast_protein_cluster_exe', help='path to fast_protein_cluster executable')
parser.add_argument('-F', metavar='flag for F', help='Flag for F column in the MTZ file')
parser.add_argument('-FREE', metavar='flag for FREE', help='Flag for FREE column in the MTZ file')
parser.add_argument(
'-ideal_helices',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Use ideal polyalanine helices to solve structure (8 helices: from 5-40 residues)',
)
parser.add_argument(
'-improve_template', metavar='improve_template', help='Path to a template to improve - NMR, homolog'
)
parser.add_argument('-LGA', metavar='path_to_LGA dir', help=argparse.SUPPRESS)
parser.add_argument(
'-make_models',
action=BoolAction,
nargs='?',
metavar='True/False',
help='run rosetta modeling, set to False to import pre-made models (required if making models locally default True)',
)
parser.add_argument('-max_array_jobs', help='Maximum number of array jobs to run')
parser.add_argument(
'-models',
metavar='models',
help='Path to a folder of PDB decoys, or a tarred and gzipped/bziped, or zipped collection of decoys',
)
parser.add_argument(
'-mr_sequence',
action=FilePathAction,
help="sequence file for crystal content (if different from what's given by -fasta)",
)
parser.add_argument('-mtz', action=FilePathAction, metavar='MTZ in', help='The MTZ file with the reflection data.')
parser.add_argument('-name', metavar='job_name', help='4-letter identifier for job [ampl]')
parser.add_argument(
'-native_pdb',
action=FilePathAction,
metavar='native_pdb',
help='Path to the crystal structure PDB for benchmarking.',
)
parser.add_argument(
'-native_mtz',
action=FilePathAction,
metavar='native_pdb',
help='Path to the native MTZ containing FC and PHIC calculated phases for benchmarking.',
)
parser.add_argument('-nmr_model_in', action=FilePathAction, metavar='nmr_model_in', help='PDB with NMR models')
parser.add_argument('-nmr_process', type=int, help='number of times to process the NMR models')
parser.add_argument(
'-nmr_remodel', action=BoolAction, nargs='?', metavar='True/False', help='Remodel the NMR structures'
)
parser.add_argument(
'-nmr_remodel_fasta',
action=FilePathAction,
help='The FASTA sequence to be used for remodelling the NMR ensemble if different from the default FASTA sequence',
)
parser.add_argument(
'-purge',
metavar='purge_level',
type=int,
choices=[0, 1, 2],
help='Delete intermediate files and failed MRBUMP results: 0 - None, 1 - Some, 2 - All possible',
)
parser.add_argument('-psipred_ss2', metavar='PSIPRED_FILE', help='Psipred secondary structure prediction file')
parser.add_argument(
'-quick_mode',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Preset options to run quickly, but less thoroughly',
)
parser.add_argument('-restart_pkl', help='Rerun a job using the pickled ample dictionary')
parser.add_argument(
'-run_dir',
action=FilePathAction,
metavar='run_directory',
help='Directory where the AMPLE work directory will be created [current dir]',
default=os.getcwd(),
)
parser.add_argument(
'-rvapi_document', action=FilePathAction, help='Path to an existing rvapi document (for running under jscofe)'
)
parser.add_argument('-scwrl_exe', metavar='path to scwrl', help='Path to Scwrl4 executable')
parser.add_argument(
'-show_gui', action=BoolAction, nargs='?', metavar='True/False', help='Pop up and display a stand-alone GUI'
)
parser.add_argument('-single_model', help='Single structure model to be used to create ensembles')
parser.add_argument(
'-sf_cif', action=FilePathAction, help='Path to a structure factor CIF file (instead of MTZ file)'
)
parser.add_argument('-SIGF', help='Flag for SIGF column in the MTZ file')
parser.add_argument('-top_model_only', metavar='True/False', help='Only process the top model in each ensemble')
parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(version.__version__))
parser.add_argument(
'-webserver_uri', help='URI of the webserver directory - also indicates we are running as a webserver'
)
return parser
def add_contact_options(parser=None):
"""Contact prediction related options"""
if parser is None:
parser = argparse.ArgumentParser()
contact_group = parser.add_argument_group("Contact Restraints Options")
contact_group.add_argument(
'-bbcontacts_file', action=FilePathAction, help='Additional bbcontacts file. Requires normal contactfile'
)
contact_group.add_argument(
'-bbcontacts_format', help='Residue contact file format. For available formats refer to the AMPLE documentation'
)
contact_group.add_argument('-contact_file', action=FilePathAction, help='Residue contact file')
contact_group.add_argument(
'-contact_format', help='Residue contact file format. For available formats refer to the AMPLE documentation'
)
contact_group.add_argument(
'-disulfide_constraints_file',
action=FilePathAction,
help='Disulfide residue constraints for ab initio modelling',
)
contact_group.add_argument(
'-distance_to_neighbour', type=int, help="Min. distance between residue pairs for contact (default=5)"
)
contact_group.add_argument(
'-energy_function', help='Rosetta energy function for contact restraint conversion (default=FADE)'
)
contact_group.add_argument(
'-native_cutoff', type=float, help='Distance cutoff for reference contacts in native structure (default=8A)'
)
contact_group.add_argument(
'--no-contact-prediction', action=BoolAction, default=False, help="Do not predict contacts"
)
contact_group.add_argument(
'-restraints_factor',
type=float,
help='Factor (* Sequence length) determining number of contact restraints to use (default=1.0)',
)
contact_group.add_argument(
'-restraints_file', action=FilePathAction, help='Residue restraints for ab initio modelling'
)
contact_group.add_argument(
'-restraints_weight', type=float, help="Additional energy weighting of restraints in Rosetta"
)
contact_group.add_argument(
'-subselect_mode',
help="Long-range decoy satisfaction subselection mode - one of [{0}]".format(
" | ".join(["linear", "scaled", "cutoff"])
),
)
return parser
def add_mr_options(parser=None):
if parser is None:
parser = argparse.ArgumentParser()
mr_group = parser.add_argument_group('MRBUMP/Molecular Replacement Options')
mr_group.add_argument('-arpwarp_cycles', type=int, help='The number of ArpWarp cycles to run')
mr_group.add_argument('-buccaneer_cycles', type=int, help='The number of Bucanner rebuilding cycles to run')
mr_group.add_argument(
'-do_mr', action=BoolAction, nargs='?', metavar='True/False', help='Run or skip the Molecular Replacement step'
)
mr_group.add_argument('-domain_termini_distance', help='distance between termini for insert domains')
mr_group.add_argument('-existing_mr_solution', action=FilePathAction, help='Existing MR solution to give to MRBUMP')
mr_group.add_argument(
'-early_terminate_SHELXE_CC', type=float, help='SHELXE_CC criteria for when a job has succeeeded'
)
mr_group.add_argument(
'-early_terminate_SHELXE_ACL', type=int, help='SHELXE_ACL criteria for when a job has succeeeded'
)
mr_group.add_argument(
'-molrep_only',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Only use Molrep for Molecular Replacement step in MRBUMP',
)
mr_group.add_argument(
'-mrbump_dir', action=FilePathAction, help='Path to a directory of MRBUMP jobs (see restart_pkl)'
)
mr_group.add_argument(
'-mr_keys',
nargs='+',
action='append',
help='Additional keywords for MRBUMP - are passed through without editing',
)
mr_group.add_argument(
'-mr_sg_all',
metavar='True/False',
help='Try all possible space groups in PHASER Molecular Replacement step in MRBUMP',
)
mr_group.add_argument(
'-nmasu',
type=int,
help='Manually specify the number of molecules in the asymmetric unit - sets the NMASu MRBUMP flag',
)
mr_group.add_argument(
'-phaser_kill',
metavar='phaser_kill',
type=int,
help='Time in minutes after which phaser will be killed (0 to leave running)',
)
mr_group.add_argument(
'-phaser_only',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Only use Phaser for Molecular Replacement step in MRBUMP',
)
mr_group.add_argument('-phaser_rms', metavar='phaser_rms', help='RMS value for phaser')
mr_group.add_argument(
'-refine_rebuild_arpwarp',
metavar='True/False',
help='True to use ARPWARP to rebuild the REFMAC-refined MR result.',
)
mr_group.add_argument(
'-refine_rebuild_buccaneer',
metavar='True/False',
help='True to use Buccaneer to rebuild the REFMAC-refined MR result.',
)
mr_group.add_argument('-shelx_cycles', help='The number of SHELXE cycles to run when rebuilding.')
mr_group.add_argument('-shelxe_exe', metavar='path to shelxe executable', help='Path to the SHELXE executable')
mr_group.add_argument('-shelxe_max_resolution', help='Maximum permitted resolution for rebuilding with SHELXE')
mr_group.add_argument(
'-shelxe_rebuild',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Rebuild SHELXE traced pdb with buccaneer and arpwarp',
)
mr_group.add_argument(
'-shelxe_rebuild_arpwarp',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Rebuild SHELXE traced pdb with arpwarp',
)
mr_group.add_argument(
'-shelxe_rebuild_buccaneer',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Rebuild SHELXE traced pdb with buccaneer',
)
mr_group.add_argument(
'-use_scwrl',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Remodel sidechains of the decoy models using Scwrl4',
)
mr_group.add_argument('-use_shelxe', action=BoolAction, nargs='?', metavar='True/False', help='True to use SHELXE')
return parser
def add_rosetta_options(parser=None):
if parser is None:
parser = argparse.ArgumentParser()
rosetta_group = parser.add_argument_group('ROSETTA Modelling Options')
rosetta_group.add_argument(
'-all_atom',
action=BoolAction,
nargs='?',
metavar='True/False',
help="Do all-atom Rosetta modelling (adds \"-return_full_atom true\" to rosetta arguments",
)
rosetta_group.add_argument(
'-frags_3mers', action=FilePathAction, help='Path to file with pre-existing Rosetta 3mer fragments'
)
rosetta_group.add_argument(
'-frags_9mers', action=FilePathAction, help='Path to file with pre-existing Rosetta 3mer fragments'
)
rosetta_group.add_argument(
'-make_frags',
action=BoolAction,
nargs='?',
metavar='True/False',
help='set True to generate Rosetta 3mers and 9mers locally, False to import fragments',
)
rosetta_group.add_argument(
'-multimer_modelling', help='Generate multimeric models. Accepted values: {}'.format(MULTIMER_MODES)
)
rosetta_group.add_argument(
'-nmodels', metavar='number of models', type=int, help='number of models to make (default: 1000)'
)
rosetta_group.add_argument('-nr', metavar='nr', help='Path to the NR non-redundant sequence database')
rosetta_group.add_argument(
'-rg_reweight',
metavar='radius of gyration reweight',
type=float,
help='Set the Rosetta -rg_reweight flag to specify the radius of gyration reweight.',
)
rosetta_group.add_argument(
'-rosetta_executable', action=FilePathAction, help='Path to ROSETTA executable for modelling'
)
rosetta_group.add_argument('-rosetta_db', action=FilePathAction, help='Path to the Rosetta database directory')
rosetta_group.add_argument('-rosetta_dir', action=FilePathAction, help='The Rosetta install directory')
rosetta_group.add_argument(
'-rosetta_fragments_exe', action=FilePathAction, help='Location of the Rosetta make_fragments.pl script'
)
rosetta_group.add_argument(
'-rosetta_flagsfile', action=FilePathAction, help='Location of file with Rosetta modelling commands'
)
rosetta_group.add_argument('-rosetta_version', type=float, help='The version number of Rosetta')
rosetta_group.add_argument(
'-transmembrane',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Do Rosetta modelling for transmembrane proteins (Ovchinnikov protocol)',
)
rosetta_group.add_argument(
'-transmembrane_old',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Do Rosetta modelling for transmembrane proteins (Yarov-Yarovoy protocol)',
)
rosetta_group.add_argument(
'-transmembrane_octopusfile', action=FilePathAction, help='Octopus transmembrane topology predicition file'
)
rosetta_group.add_argument(
'-transmembrane_spanfile', action=FilePathAction, help='Span file for modelling transmembrane proteins'
)
rosetta_group.add_argument(
'-transmembrane_lipofile', action=FilePathAction, help='Lips4 file for modelling transmembrane proteins'
)
rosetta_group.add_argument(
'-use_homs',
action=BoolAction,
nargs='?',
metavar='True/False',
help="Select ROSETTA fragments from homologous models",
)
return parser
def add_ensembler_options(parser=None):
# --------------------------------------------------------------------------------------------- #
# sphinx-argparse ignores Mock imports and thus cannot find iotbx.pdb when generating the docs. #
try:
from ample.ensembler.constants import ALLOWED_SIDE_CHAIN_TREATMENTS, SPICKER_RMSD, SPICKER_TM
from ample.ensembler.truncation_util import TRUNCATION_METHODS
except ImportError:
allowed_side_chain_treatments = ['polyala', 'reliable', 'allatom', 'unmod']
truncation_methods = ['percent']
SPICKER_RMSD = 'spicker'
SPICKER_TM = 'spicker_tm'
else:
allowed_side_chain_treatments = ALLOWED_SIDE_CHAIN_TREATMENTS[:]
truncation_methods = [t.value for t in TRUNCATION_METHODS]
if parser is None:
parser = argparse.ArgumentParser()
ensembler_group = parser.add_argument_group('Ensemble Options')
ensembler_group.add_argument(
'-cluster_dir', action=FilePathAction, help='Path to directory of pre-clustered models to import'
)
ensembler_group.add_argument(
'-cluster_method',
help='How to cluster the models for ensembling. Options: ' + '|'.join([SPICKER_RMSD, SPICKER_TM]),
)
ensembler_group.add_argument('-ensembler_timeout', type=int, help='Time in seconds before timing out ensembling')
ensembler_group.add_argument(
'-gesamt_exe', action=FilePathAction, metavar='gesamt_exe', help='Path to the gesamt executable'
)
ensembler_group.add_argument(
'-homologs',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Generate ensembles from homologs models (requires -alignment_file)',
)
ensembler_group.add_argument(
'-homolog_aligner',
metavar='homolog_aligner',
help='Program to use for structural alignment of homologs (gesamt|mustang)',
)
ensembler_group.add_argument('-ensemble_max_models', help='Maximum number of models permitted in an ensemble')
ensembler_group.add_argument(
'-mustang_exe', action=FilePathAction, metavar='mustang_exe', help='Path to the mustang executable'
)
ensembler_group.add_argument(
'-num_clusters', type=int, help='The number of Spicker clusters of the original decoys that will be sampled [1]'
)
ensembler_group.add_argument('-percent', metavar='percent_truncation', help='percent interval for truncation')
ensembler_group.add_argument(
'-percent_fixed_intervals', nargs='+', type=int, help='list of integer percentage intervals for truncation'
)
ensembler_group.add_argument('-score_matrix', action=FilePathAction, help='Path to score matrix for spicker')
ensembler_group.add_argument(
'-score_matrix_file_list',
action=FilePathAction,
help='File with list of ordered model names for the score_matrix',
)
ensembler_group.add_argument(
'-side_chain_treatments',
type=str,
nargs='+',
help='The side chain treatments to use. Options: ' + '|'.join(allowed_side_chain_treatments),
)
ensembler_group.add_argument('-spicker_exe', action=FilePathAction, help='Path to spicker executable')
ensembler_group.add_argument(
'-subcluster_radius_thresholds',
type=float,
nargs='+',
help='The radii to use for subclustering the truncated ensembles',
)
ensembler_group.add_argument('-subcluster_program', help='Program for subclustering models [gesamt]')
ensembler_group.add_argument(
'-theseus_exe', action=FilePathAction, metavar='Theseus exe', help='Path to theseus executable'
)
ensembler_group.add_argument(
'-thin_clusters',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Create ensembles from 10 clusters with 1 + 3A subclustering and polyAlanine sidechains',
)
ensembler_group.add_argument(
'-truncation_method', help='How to truncate the models for ensembling: ' + '|'.join(truncation_methods)
)
ensembler_group.add_argument('-truncation_pruning', help='Whether to remove isolated residues (single)')
ensembler_group.add_argument(
'-truncation_scorefile',
action=FilePathAction,
help="CSV file containing per residue scores - COLUMN ONE MUST BE RESIDUE INDEX STARTING FROM 1",
)
ensembler_group.add_argument(
'-truncation_scorefile_header', nargs='+', help="column headers to be used to create ensembles"
)
return parser
def process_command_line(args=None, contacts=True, modelling=True, mol_rep=True):
"""Process the command-line for the main AMPLE program.
:args: optional argument that can hold the command-line arguments if we
have been called from within python for testing
"""
parser = argparse.ArgumentParser(
description="AMPLE: Ab initio Modelling of Proteins for moLEcular replacement", prefix_chars="-"
)
add_general_options(parser)
add_cluster_submit_options(parser)
add_ensembler_options(parser)
if contacts:
add_contact_options(parser)
if mol_rep:
add_mr_options(parser)
if modelling:
add_rosetta_options(parser)
return vars(parser.parse_args(args))
|
linucks/ample
|
ample/util/argparse_util.py
|
Python
|
bsd-3-clause
| 25,356
|
#! /usr/bin/env python
#
# Copyright (C) 2016 Rich Lewis <rl403@cam.ac.uk>
# License: 3-clause BSD
"""
## skchem.cross_validation
Module implementing cross validation routines useful for chemical data.
"""
from .similarity_threshold import SimThresholdSplit
__all__ = [
'SimThresholdSplit'
]
|
richlewis42/scikit-chem
|
skchem/cross_validation/__init__.py
|
Python
|
bsd-3-clause
| 300
|
#coding=utf-8
import time
class ConfsModel:
def __init__(self):
self.reload()
def reload(self, datum = None):
self._cache = {}
if datum is not None:
ret = datum.result('select conf_name, conf_vals from confs')
if ret:
for row in ret:
self._cache[row['conf_name']] = row['conf_vals']
def obtain(self, datum, name):
if name in self._cache:
return self._cache[name]
ret = datum.single('select conf_vals from confs where conf_name = ?', (name, ))
if ret:
self._cache[name] = ret['conf_vals']
else:
self._cache[name] = None
return self._cache[name]
def exists(self, datum, name):
ret = datum.single('select 1 from confs where conf_name = ?', (name, ))
return bool(ret)
def upsert(self, datum, name, vals):
datum.affect('replace into confs (conf_name, conf_vals, conf_ctms) values (?, ?, ?)', (name, vals, int(time.time()),))
self._cache[name] = vals
def delete(self, datum, name):
datum.affect('delete from confs where conf_name = ?', (name, ))
self._cache[name] = None
|
finron/luokr.com
|
www.luokr.com/app/model/confs.py
|
Python
|
bsd-3-clause
| 1,204
|
import json
import os
import time
import phonenumbers
import psycopg2
from ndoh_hub.constants import LANGUAGES
def get_addresses(addresses):
addresses = addresses.get("msisdn") or {}
result = []
for addr, details in addresses.items():
try:
p = phonenumbers.parse(addr, "ZA")
assert phonenumbers.is_possible_number(p)
assert phonenumbers.is_valid_number(p)
p = phonenumbers.format_number(p, phonenumbers.PhoneNumberFormat.E164)
except Exception:
continue
if details.get("default"):
return [p]
if not details.get("optedout"):
result.append(p)
return result
def process_identity(identities, id, details, failed_msgs_count):
details = details or {}
addresses = get_addresses(details.get("addresses", {}))
if not addresses or "redacted" in addresses:
return
identities[id] = {
"msisdns": addresses,
"failed_msgs_count": failed_msgs_count,
"uuid": id,
}
for k in [
"operator_id",
"passport_no",
"passport_origin",
"consent",
"sa_id_no",
"mom_given_name",
"mom_family_name",
"faccode",
"id_type",
]:
if details.get(k):
identities[id][k] = details[k]
language = (
details.get("lang_code")
or details.get("language")
or details.get("preferred_language")
)
if language and language in LANGUAGES:
identities[id]["language"] = language.rstrip("_ZA")
pmtct_risk = details.get("pmtct", {}).get("risk_status", None)
if pmtct_risk:
identities[id]["pmtct_risk"] = pmtct_risk
dob = details.get("mom_dob") or details.get("dob")
if dob:
identities[id]["mom_dob"] = dob
def process_optout(identities, id, created, reason):
if not identities.get(id):
return
created = created.isoformat()
timestamp = identities[id].get("optout_timestamp")
if timestamp and timestamp > created:
return
identities[id]["optout_timestamp"] = created
identities[id]["optout_reason"] = reason
def process_registration(identities, id, data):
if not identities.get(id):
return
for k in [
"edd",
"faccode",
"id_type",
"mom_dob",
"mom_given_name",
"mom_family_name",
"msisdn_device",
"passport_no",
"passport_origin",
"sa_id_no",
"consent",
]:
if data.get(k):
if not identities[id].get(k):
identities[id][k] = data[k]
if data.get("baby_dob"):
if not identities[id].get("baby_dobs"):
identities[id]["baby_dobs"] = [data["baby_dob"]]
else:
identities[id]["baby_dobs"].append(data["baby_dob"])
uuid_device = data.get("uuid_device") or data.get("operator_id")
if uuid_device and not identities[id].get("msisdn_device"):
try:
identities[id]["msisdn_device"] = identities[uuid_device]["msisdns"][0]
except Exception:
pass
if data.get("language") and not identities[id].get("language"):
if data["language"] in LANGUAGES:
identities[id]["language"] = data["language"].rstrip("_ZA")
def process_change(identities, id, action, data, created):
if not identities.get(id):
return
created = created.isoformat()
if "optout" in action:
timestamp = identities[id].get("optout_timestamp")
if timestamp and timestamp > created:
return
identities[id]["optout_timestamp"] = created
if data.get("reason"):
identities[id]["optout_reason"] = data["reason"]
elif action == "baby_switch":
baby_dobs = identities[id].get("baby_dobs")
if not baby_dobs:
identities[id]["baby_dobs"] = [created]
else:
identities[id]["baby_dobs"].append(created)
def process_subscription(identities, id, name, created_at):
if not identities.get(id):
return
created_at = created_at.isoformat()
if "whatsapp" in name:
identities[id]["channel"] = "WhatsApp"
else:
if not identities[id].get("channel"):
identities[id]["channel"] = "SMS"
if "pmtct" in name:
identities[id]["pmtct_messaging"] = "TRUE"
elif "loss" in name:
identities[id]["optout_reason"] = name.split(".")[0].split("_")[-1]
identities[id]["optout_timestamp"] = created_at
identities[id]["loss_messaging"] = "TRUE"
elif (
"momconnect_prebirth.patient" in name
or "momconnect_prebirth.hw_partial" in name
):
identities[id]["public_messaging"] = "TRUE"
identities[id]["public_registration_date"] = created_at
elif "momconnect_prebirth.hw_full" in name:
identities[id]["prebirth_messaging"] = name[-1]
elif "momconnect_postbirth.hw_full" in name:
identities[id]["postbirth_messaging"] = "TRUE"
else:
return
def merge_dicts(d1, d2):
for k, v in d2.items():
if type(v) == list:
d1[k] = d1.get(k, []) + v
else:
d1[k] = v
return d1
def deduplicate_msisdns(identities):
msisdns: dict = {}
total = 0
start, d_print = time.time(), time.time()
for identity in identities.values():
for msisdn in identity.pop("msisdns"):
msisdns[msisdn] = merge_dicts(
msisdns.get(msisdn, {"msisdn": msisdn}), identity
)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} msisdns at {total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} msisdns in {time.time() - start:.0f}s")
return msisdns
if __name__ == "__main__":
identities: dict = {}
conn = psycopg2.connect(
dbname="identitystore",
user="identitystore",
password=os.environ["IDENTITY_PASS"],
host="localhost",
port=7000,
)
cursor = conn.cursor("identity_store_identities")
print("Processing identities...")
cursor.execute(
"""
SELECT
id, details, failed_message_count
FROM
identities_identity
"""
)
total = 0
start, d_print = time.time(), time.time()
for (id, details, failed_msgs_count) in cursor:
process_identity(identities, id, details, failed_msgs_count)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} identities at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} identities in {time.time() - start:.0f}s")
print("Processing opt outs...")
cursor = conn.cursor("identity_store_optouts")
cursor.execute(
"""
SELECT
identity_id, created_at, reason
FROM
identities_optout
"""
)
total = 0
start, d_print = time.time(), time.time()
for (id, created, reason) in cursor:
process_optout(identities, id, created, reason)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} optouts at {total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} optouts in {time.time() - start:.0f}s")
print("Processing Registrations...")
conn = psycopg2.connect(
dbname="hub",
user="hub",
password=os.environ["HUB_PASS"],
host="localhost",
port=7000,
)
cursor = conn.cursor("hub_registrations")
cursor.execute(
"""
SELECT
registrant_id, data
FROM
registrations_registration
WHERE
validated=true
ORDER BY
created_at ASC
"""
)
total = 0
start, d_print = time.time(), time.time()
for (id, data) in cursor:
process_registration(identities, id, data)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} registrations at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} registrations in {time.time() - start:.0f}s")
print("Processing Changes...")
cursor = conn.cursor("hub_changes")
cursor.execute(
"""
SELECT
registrant_id, action, data, created_at
FROM
changes_change
WHERE
validated=true
ORDER BY
created_at ASC
"""
)
total = 0
start, d_print = time.time(), time.time()
for (id, action, data, created) in cursor:
process_change(identities, id, action, data, created)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} changes at {total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} changes in {time.time() - start:.0f}s")
print("Processing subscriptions...")
conn = psycopg2.connect(
dbname="stage_based_messaging",
user="stage_based_messaging",
password=os.environ["STAGE_PASS"],
host="localhost",
port=7000,
)
cursor = conn.cursor("stage_subscriptions")
cursor.execute(
"""
SELECT
subscription.identity, messageset.short_name, subscription.created_at
FROM
subscriptions_subscription as subscription
JOIN
contentstore_messageset as messageset
ON
subscription.messageset_id = messageset.id
WHERE
subscription.active=true and
subscription.completed=false and
subscription.process_status=0
"""
)
total = 0
start, d_print = time.time(), time.time()
for (id, name, created) in cursor:
process_subscription(identities, id, name, created)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} subscriptions at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} subscriptions in {time.time() - start:.0f}s")
print("Deduplicating msisdns")
identities = deduplicate_msisdns(identities)
print("Writing results to file..")
start = time.time()
with open("results.json", "w") as f:
for i in identities.values():
f.write(json.dumps(i))
f.write("\n")
print(f"Wrote results to file in {time.time() - start:.0f}s")
|
praekeltfoundation/ndoh-hub
|
scripts/migrate_to_rapidpro/collect_information.py
|
Python
|
bsd-3-clause
| 10,809
|
# -*- coding: utf-8 -*-
from pytest import raises
from watson.filters import abc
class TestFilterBase(object):
def test_call_error(self):
with raises(TypeError):
abc.Filter()
|
watsonpy/watson-filters
|
tests/watson/filters/test_abc.py
|
Python
|
bsd-3-clause
| 202
|
from typing import Any, List, Union
from apistar import app
from apistar.pipelines import ArgName
class Settings(dict):
@classmethod
def build(cls, app: app.App):
return cls(app.settings)
def get(self, indexes: Union[str, List[str]], default: Any=None) -> Any:
if isinstance(indexes, str):
return super().get(indexes, default)
value = self
for index in indexes:
if not isinstance(value, dict):
return default
value = value.get(index, default)
return value
class Setting(object):
def __new__(cls, *args):
assert len(args) == 1
return args[0]
@classmethod
def build(cls, arg_name: ArgName, settings: Settings):
return settings.get(arg_name)
|
thimslugga/apistar
|
apistar/settings.py
|
Python
|
bsd-3-clause
| 787
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
import scipy
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import compute_class_weight
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.model_selection import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
sp_version = tuple([int(s) for s in scipy.__version__.split('.')])
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear', 'sag']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
cv = StratifiedKFold(3)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds)
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modifed dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
msg = ("In LogisticRegressionCV the liblinear solver cannot handle "
"multiclass with class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set class_weight='balanced'")
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raise_message(ValueError, msg, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
sample_weight = np.ones(y.shape[0])
sample_weight[y == 1] = 2
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ['lbfgs', 'liblinear']:
clf_sw_none = LR(solver=solver, fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver=solver, fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(
clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=y + 1)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=y + 1)
clf_sw_sag = LR(solver='sag', fit_intercept=False,
max_iter=2000, tol=1e-7)
clf_sw_sag.fit(X, y, sample_weight=y + 1)
clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False,
max_iter=50, tol=1e-7)
clf_sw_liblinear.fit(X, y, sample_weight=y + 1)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ['lbfgs', 'liblinear']:
clf_cw_12 = LR(solver=solver, fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l1")
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l1")
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l2", dual=True)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l2", dual=True)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes, y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_multinomial_logistic_regression_with_classweight_auto():
X, y = iris.data, iris.target
model = LogisticRegression(multi_class='multinomial',
class_weight='auto', solver='lbfgs')
# 'auto' is deprecated and will be removed in 0.19
assert_warns_message(DeprecationWarning,
"class_weight='auto' heuristic is deprecated",
model.fit, X, y)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'sag' and multi_class == 'multinomial':
break
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
|
jmschrei/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
Python
|
bsd-3-clause
| 39,145
|
"""
Flask-CouchDBKit
----------------
Flask extension that provides integration with CouchDBKit.
Links
`````
* `documentation <http://packages.python.org/Flask-CouchDBKit>`_
* `development version
<http://github.com/sirn/flask-couchdbkit/zipball/master#egg=Flask-CouchDBKit-dev>`_
"""
from setuptools import setup
def run_tests():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'tests'))
from test_couchdbkit import suite
return suite()
setup(
name='Flask-CouchDBKit',
version='0.3.5',
url='http://code.grid.in.th/',
license='BSD',
author='Kridsada Thanabulpong',
author_email='sirn@ogsite.net',
description='Flask extension that provides integration with CouchDBKit.',
long_description=__doc__,
py_modules=['flask_couchdbkit'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'restkit>=3.0.2',
'CouchDBKit',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='__main__.run_tests'
)
|
sirn/flask-couchdbkit
|
setup.py
|
Python
|
bsd-3-clause
| 1,410
|
""" Test functions for stats module
WRITTEN BY LOUIS LUANGKESORN <lluang@yahoo.com> FOR THE STATS MODULE
BASED ON WILKINSON'S STATISTICS QUIZ
http://www.stanford.edu/~clint/bench/wilk.txt
Additional tests by a host of SciPy developers.
"""
from __future__ import division, print_function, absolute_import
import sys
import warnings
from collections import namedtuple
from numpy.testing import (TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_almost_equal,
assert_array_equal, assert_approx_equal,
assert_raises, run_module_suite, assert_allclose,
dec)
import numpy.ma.testutils as mat
from numpy import array, arange, float32, float64, power
import numpy as np
import scipy.stats as stats
""" Numbers in docstrings beginning with 'W' refer to the section numbers
and headings found in the STATISTICS QUIZ of Leland Wilkinson. These are
considered to be essential functionality. True testing and
evaluation of a statistics package requires use of the
NIST Statistical test data. See McCoullough(1999) Assessing The Reliability
of Statistical Software for a test methodology and its
implementation in testing SAS, SPSS, and S-Plus
"""
# Datasets
# These data sets are from the nasty.dat sets used by Wilkinson
# For completeness, I should write the relevant tests and count them as failures
# Somewhat acceptable, since this is still beta software. It would count as a
# good target for 1.0 status
X = array([1,2,3,4,5,6,7,8,9], float)
ZERO = array([0,0,0,0,0,0,0,0,0], float)
BIG = array([99999991,99999992,99999993,99999994,99999995,99999996,99999997,
99999998,99999999], float)
LITTLE = array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996,
0.99999997,0.99999998,0.99999999], float)
HUGE = array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12], float)
TINY = array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12], float)
ROUND = array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5], float)
class TestTrimmedStats(TestCase):
# TODO: write these tests to handle missing values properly
dprec = np.finfo(np.float64).precision
def test_tmean(self):
y = stats.tmean(X, (2, 8), (True, True))
assert_approx_equal(y, 5.0, significant=self.dprec)
y1 = stats.tmean(X, limits=(2, 8), inclusive=(False, False))
y2 = stats.tmean(X, limits=None)
assert_approx_equal(y1, y2, significant=self.dprec)
def test_tvar(self):
y = stats.tvar(X, limits=(2, 8), inclusive=(True, True))
assert_approx_equal(y, 4.6666666666666661, significant=self.dprec)
y = stats.tvar(X, limits=None)
assert_approx_equal(y, X.var(ddof=1), significant=self.dprec)
def test_tstd(self):
y = stats.tstd(X, (2, 8), (True, True))
assert_approx_equal(y, 2.1602468994692865, significant=self.dprec)
y = stats.tstd(X, limits=None)
assert_approx_equal(y, X.std(ddof=1), significant=self.dprec)
def test_tmin(self):
x = np.arange(10)
assert_equal(stats.tmin(x), 0)
assert_equal(stats.tmin(x, lowerlimit=0), 0)
assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), 1)
x = x.reshape((5, 2))
assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), [2, 1])
assert_equal(stats.tmin(x, axis=1), [0, 2, 4, 6, 8])
assert_equal(stats.tmin(x, axis=None), 0)
def test_tmax(self):
x = np.arange(10)
assert_equal(stats.tmax(x), 9)
assert_equal(stats.tmax(x, upperlimit=9),9)
assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), 8)
x = x.reshape((5, 2))
assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), [8, 7])
assert_equal(stats.tmax(x, axis=1), [1, 3, 5, 7, 9])
assert_equal(stats.tmax(x, axis=None), 9)
def test_tsem(self):
y = stats.tsem(X, limits=(3, 8), inclusive=(False, True))
y_ref = np.array([4, 5, 6, 7, 8])
assert_approx_equal(y, y_ref.std(ddof=1) / np.sqrt(y_ref.size),
significant=self.dprec)
assert_approx_equal(stats.tsem(X, limits=[-1, 10]),
stats.tsem(X, limits=None),
significant=self.dprec)
class TestNanFunc(TestCase):
def __init__(self, *args, **kw):
TestCase.__init__(self, *args, **kw)
self.X = X.copy()
self.Xall = X.copy()
self.Xall[:] = np.nan
self.Xsome = X.copy()
self.Xsomet = X.copy()
self.Xsome[0] = np.nan
self.Xsomet = self.Xsomet[1:]
def test_nanmean_none(self):
# Check nanmean when no values are nan.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
m = stats.nanmean(X)
assert_approx_equal(m, X[4])
def test_nanmean_some(self):
# Check nanmean when some values only are nan.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
m = stats.nanmean(self.Xsome)
assert_approx_equal(m, 5.5)
def test_nanmean_all(self):
# Check nanmean when all values are nan.
with warnings.catch_warnings():
warns = (DeprecationWarning, RuntimeWarning)
warnings.simplefilter('ignore', warns)
with np.errstate(invalid='ignore'):
m = stats.nanmean(self.Xall)
assert_(np.isnan(m))
def test_nanstd_none(self):
# Check nanstd when no values are nan.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
s = stats.nanstd(self.X)
assert_approx_equal(s, np.std(self.X, ddof=1))
def test_nanstd_some(self):
# Check nanstd when some values only are nan.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
s = stats.nanstd(self.Xsome)
assert_approx_equal(s, np.std(self.Xsomet, ddof=1))
def test_nanstd_all(self):
# Check nanstd when all values are nan.
with warnings.catch_warnings():
warns = (DeprecationWarning, RuntimeWarning)
warnings.simplefilter('ignore', warns)
with np.errstate(invalid='ignore'):
s = stats.nanstd(self.Xall)
assert_(np.isnan(s))
def test_nanstd_bias_kw(self):
s = stats.nanstd(self.X, bias=True)
assert_approx_equal(s, np.std(self.X, ddof=0))
def test_nanstd_negative_axis(self):
x = np.array([1, 2, 3])
res = stats.nanstd(x, -1)
assert_equal(res, 1)
def test_nanmedian_none(self):
# Check nanmedian when no values are nan.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
m = stats.nanmedian(self.X)
assert_approx_equal(m, np.median(self.X))
def test_nanmedian_axis(self):
# Check nanmedian with axis
X = self.X.reshape(3,3)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
m = stats.nanmedian(X, axis=0)
assert_equal(m, np.median(X, axis=0))
m = stats.nanmedian(X, axis=1)
assert_equal(m, np.median(X, axis=1))
def test_nanmedian_some(self):
# Check nanmedian when some values only are nan.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
m = stats.nanmedian(self.Xsome)
assert_approx_equal(m, np.median(self.Xsomet))
def test_nanmedian_all(self):
# Check nanmedian when all values are nan.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
m = stats.nanmedian(self.Xall)
assert_(np.isnan(m))
assert_equal(len(w), 2) # Deprecation & RuntimeWarning
assert_(issubclass(w[1].category, RuntimeWarning))
def test_nanmedian_all_axis(self):
# Check nanmedian when all values are nan.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
m = stats.nanmedian(self.Xall.reshape(3,3), axis=1)
assert_(np.isnan(m).all())
assert_equal(len(w), 4)
assert_(issubclass(w[-1].category, RuntimeWarning))
def test_nanmedian_scalars(self):
# Check nanmedian for scalar inputs. See ticket #1098.
assert_equal(stats.nanmedian(1), np.median(1))
assert_equal(stats.nanmedian(True), np.median(True))
assert_equal(stats.nanmedian(np.array(1)), np.median(np.array(1)))
assert_equal(stats.nanmedian(np.nan), np.median(np.nan))
class TestCorrPearsonr(TestCase):
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, shoud be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN corelations, if
your program has them.
"""
def test_pXX(self):
y = stats.pearsonr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXBIG(self):
y = stats.pearsonr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXLITTLE(self):
y = stats.pearsonr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXHUGE(self):
y = stats.pearsonr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXTINY(self):
y = stats.pearsonr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXROUND(self):
y = stats.pearsonr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGBIG(self):
y = stats.pearsonr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGLITTLE(self):
y = stats.pearsonr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGHUGE(self):
y = stats.pearsonr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGTINY(self):
y = stats.pearsonr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGROUND(self):
y = stats.pearsonr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLELITTLE(self):
y = stats.pearsonr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEHUGE(self):
y = stats.pearsonr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLETINY(self):
y = stats.pearsonr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEROUND(self):
y = stats.pearsonr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEHUGE(self):
y = stats.pearsonr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGETINY(self):
y = stats.pearsonr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEROUND(self):
y = stats.pearsonr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYTINY(self):
y = stats.pearsonr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYROUND(self):
y = stats.pearsonr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pROUNDROUND(self):
y = stats.pearsonr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_r_exactly_pos1(self):
a = arange(3.0)
b = a
r, prob = stats.pearsonr(a,b)
assert_equal(r, 1.0)
assert_equal(prob, 0.0)
def test_r_exactly_neg1(self):
a = arange(3.0)
b = -a
r, prob = stats.pearsonr(a,b)
assert_equal(r, -1.0)
assert_equal(prob, 0.0)
def test_basic(self):
# A basic test, with a correlation coefficient
# that is not 1 or -1.
a = array([-1, 0, 1])
b = array([0, 0, 3])
r, prob = stats.pearsonr(a, b)
assert_approx_equal(r, np.sqrt(3)/2)
assert_approx_equal(prob, 1.0/3)
class TestFisherExact(TestCase):
"""Some tests to show that fisher_exact() works correctly.
Note that in SciPy 0.9.0 this was not working well for large numbers due to
inaccuracy of the hypergeom distribution (see #1218). Fixed now.
Also note that R and Scipy have different argument formats for their
hypergeometric distribution functions.
R:
> phyper(18999, 99000, 110000, 39000, lower.tail = FALSE)
[1] 1.701815e-09
"""
def test_basic(self):
fisher_exact = stats.fisher_exact
res = fisher_exact([[14500, 20000], [30000, 40000]])[1]
assert_approx_equal(res, 0.01106, significant=4)
res = fisher_exact([[100, 2], [1000, 5]])[1]
assert_approx_equal(res, 0.1301, significant=4)
res = fisher_exact([[2, 7], [8, 2]])[1]
assert_approx_equal(res, 0.0230141, significant=6)
res = fisher_exact([[5, 1], [10, 10]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 15], [20, 20]])[1]
assert_approx_equal(res, 0.0958044, significant=6)
res = fisher_exact([[5, 16], [20, 25]])[1]
assert_approx_equal(res, 0.1725862, significant=6)
res = fisher_exact([[10, 5], [10, 1]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 0], [1, 4]])[1]
assert_approx_equal(res, 0.04761904, significant=6)
res = fisher_exact([[0, 1], [3, 2]])[1]
assert_approx_equal(res, 1.0)
res = fisher_exact([[0, 2], [6, 4]])[1]
assert_approx_equal(res, 0.4545454545)
res = fisher_exact([[2, 7], [8, 2]])
assert_approx_equal(res[1], 0.0230141, significant=6)
assert_approx_equal(res[0], 4.0 / 56)
def test_precise(self):
# results from R
#
# R defines oddsratio differently (see Notes section of fisher_exact
# docstring), so those will not match. We leave them in anyway, in
# case they will be useful later on. We test only the p-value.
tablist = [
([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)),
([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)),
([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)),
([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)),
([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)),
([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)),
([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)),
([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)),
([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)),
([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)),
([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000))
]
for table, res_r in tablist:
res = stats.fisher_exact(np.asarray(table))
np.testing.assert_almost_equal(res[1], res_r[1], decimal=11,
verbose=True)
@dec.slow
def test_large_numbers(self):
# Test with some large numbers. Regression test for #1401
pvals = [5.56e-11, 2.666e-11, 1.363e-11] # from R
for pval, num in zip(pvals, [75, 76, 77]):
res = stats.fisher_exact([[17704, 496], [1065, num]])[1]
assert_approx_equal(res, pval, significant=4)
res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1]
assert_approx_equal(res, 0.2751, significant=4)
def test_raises(self):
# test we raise an error for wrong shape of input.
assert_raises(ValueError, stats.fisher_exact,
np.arange(6).reshape(2, 3))
def test_row_or_col_zero(self):
tables = ([[0, 0], [5, 10]],
[[5, 10], [0, 0]],
[[0, 5], [0, 10]],
[[5, 0], [10, 0]])
for table in tables:
oddsratio, pval = stats.fisher_exact(table)
assert_equal(pval, 1.0)
assert_equal(oddsratio, np.nan)
def test_less_greater(self):
tables = (
# Some tables to compare with R:
[[2, 7], [8, 2]],
[[200, 7], [8, 300]],
[[28, 21], [6, 1957]],
[[190, 800], [200, 900]],
# Some tables with simple exact values
# (includes regression test for ticket #1568):
[[0, 2], [3, 0]],
[[1, 1], [2, 1]],
[[2, 0], [1, 2]],
[[0, 1], [2, 3]],
[[1, 0], [1, 4]],
)
pvals = (
# from R:
[0.018521725952066501, 0.9990149169715733],
[1.0, 2.0056578803889148e-122],
[1.0, 5.7284374608319831e-44],
[0.7416227, 0.2959826],
# Exact:
[0.1, 1.0],
[0.7, 0.9],
[1.0, 0.3],
[2./3, 1.0],
[1.0, 1./3],
)
for table, pval in zip(tables, pvals):
res = []
res.append(stats.fisher_exact(table, alternative="less")[1])
res.append(stats.fisher_exact(table, alternative="greater")[1])
assert_allclose(res, pval, atol=0, rtol=1e-7)
def test_gh3014(self):
# check if issue #3014 has been fixed.
# before, this would have risen a ValueError
odds, pvalue = stats.fisher_exact([[1, 2], [9, 84419233]])
class TestCorrSpearmanr(TestCase):
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, shoud be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN corelations, if
your program has them.
"""
def test_sXX(self):
y = stats.spearmanr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXBIG(self):
y = stats.spearmanr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXLITTLE(self):
y = stats.spearmanr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXHUGE(self):
y = stats.spearmanr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXTINY(self):
y = stats.spearmanr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXROUND(self):
y = stats.spearmanr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGBIG(self):
y = stats.spearmanr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGLITTLE(self):
y = stats.spearmanr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGHUGE(self):
y = stats.spearmanr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGTINY(self):
y = stats.spearmanr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGROUND(self):
y = stats.spearmanr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLELITTLE(self):
y = stats.spearmanr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEHUGE(self):
y = stats.spearmanr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLETINY(self):
y = stats.spearmanr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEROUND(self):
y = stats.spearmanr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEHUGE(self):
y = stats.spearmanr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGETINY(self):
y = stats.spearmanr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEROUND(self):
y = stats.spearmanr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYTINY(self):
y = stats.spearmanr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYROUND(self):
y = stats.spearmanr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sROUNDROUND(self):
y = stats.spearmanr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
class TestCorrSpearmanrTies(TestCase):
"""Some tests of tie-handling by the spearmanr function."""
def test_tie1(self):
# Data
x = [1.0, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 2.0, 3.0]
# Ranks of the data, with tie-handling.
xr = [1.0, 2.0, 3.0, 4.0]
yr = [1.0, 2.5, 2.5, 4.0]
# Result of spearmanr should be the same as applying
# pearsonr to the ranks.
sr = stats.spearmanr(x, y)
pr = stats.pearsonr(xr, yr)
assert_almost_equal(sr, pr)
# W.II.E. Tabulate X against X, using BIG as a case weight. The values
# should appear on the diagonal and the total should be 899999955.
# If the table cannot hold these values, forget about working with
# census data. You can also tabulate HUGE against TINY. There is no
# reason a tabulation program should not be able to distinguish
# different values regardless of their magnitude.
# I need to figure out how to do this one.
def test_kendalltau():
# with some ties
x1 = [12, 2, 1, 12, 2]
x2 = [1, 4, 7, 1, 0]
expected = (-0.47140452079103173, 0.24821309157521476)
res = stats.kendalltau(x1, x2)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# with only ties in one or both inputs
assert_equal(stats.kendalltau([2,2,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.kendalltau([2,0,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.kendalltau([2,2,2], [2,0,2]), (np.nan, np.nan))
# empty arrays provided as input
assert_equal(stats.kendalltau([], []), (np.nan, np.nan))
# check two different sort methods
assert_approx_equal(stats.kendalltau(x1, x2, initial_lexsort=False)[1],
stats.kendalltau(x1, x2, initial_lexsort=True)[1])
# and with larger arrays
np.random.seed(7546)
x = np.array([np.random.normal(loc=1, scale=1, size=500),
np.random.normal(loc=1, scale=1, size=500)])
corr = [[1.0, 0.3],
[0.3, 1.0]]
x = np.dot(np.linalg.cholesky(corr), x)
expected = (0.19291382765531062, 1.1337108207276285e-10)
res = stats.kendalltau(x[0], x[1])
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# and do we get a tau of 1 for identical inputs?
assert_approx_equal(stats.kendalltau([1,1,2], [1,1,2])[0], 1.0)
class TestRegression(TestCase):
def test_linregressBIGX(self):
# W.II.F. Regress BIG on X.
# The constant should be 99999990 and the regression coefficient should be 1.
y = stats.linregress(X,BIG)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,99999990)
assert_almost_equal(r,1.0)
def test_regressXX(self):
# W.IV.B. Regress X on X.
# The constant should be exactly 0 and the regression coefficient should be 1.
# This is a perfectly valid regression. The program should not complain.
y = stats.linregress(X,X)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,0.0)
assert_almost_equal(r,1.0)
# W.IV.C. Regress X on BIG and LITTLE (two predictors). The program
# should tell you that this model is "singular" because BIG and
# LITTLE are linear combinations of each other. Cryptic error
# messages are unacceptable here. Singularity is the most
# fundamental regression error.
# Need to figure out how to handle multiple linear regression. Not obvious
def test_regressZEROX(self):
# W.IV.D. Regress ZERO on X.
# The program should inform you that ZERO has no variance or it should
# go ahead and compute the regression and report a correlation and
# total sum of squares of exactly 0.
y = stats.linregress(X,ZERO)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,0.0)
assert_almost_equal(r,0.0)
def test_regress_simple(self):
# Regress a line with sinusoidal noise.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
res = stats.linregress(x, y)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_simple_onearg_rows(self):
# Regress a line w sinusoidal noise, with a single input of shape (2, N).
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
rows = np.vstack((x, y))
res = stats.linregress(rows)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_simple_onearg_cols(self):
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
cols = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1)))
res = stats.linregress(cols)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_shape_error(self):
# Check that a single input argument to linregress with wrong shape
# results in a ValueError.
assert_raises(ValueError, stats.linregress, np.ones((3, 3)))
def test_linregress(self):
# compared with multivariate ols with pinv
x = np.arange(11)
y = np.arange(5,16)
y[[(1),(-2)]] -= 1
y[[(0),(-1)]] += 1
res = (1.0, 5.0, 0.98229948625750, 7.45259691e-008, 0.063564172616372733)
assert_array_almost_equal(stats.linregress(x,y),res,decimal=14)
def test_regress_simple_negative_cor(self):
# If the slope of the regression is negative the factor R tend to -1 not 1.
# Sometimes rounding errors makes it < -1 leading to stderr being NaN
a, n = 1e-71, 100000
x = np.linspace(a, 2 * a, n)
y = np.linspace(2 * a, a, n)
stats.linregress(x, y)
res = stats.linregress(x, y)
assert_(res[2] >= -1) # propagated numerical errors were not corrected
assert_almost_equal(res[2], -1) # perfect negative correlation case
assert_(not np.isnan(res[4])) # stderr should stay finite
def test_theilslopes():
# Basic slope test.
slope, intercept, lower, upper = stats.theilslopes([0,1,1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
# Test of confidence intervals.
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
class TestHistogram(TestCase):
# Tests that histogram works as it should, and keeps old behaviour
#
# what is untested:
# - multidimensional arrays (since 'a' is ravel'd as the first line in the method)
# - very large arrays
# - Nans, Infs, empty and otherwise bad inputs
# sample arrays to test the histogram with
low_values = np.array([0.2, 0.3, 0.4, 0.5, 0.5, 0.6, 0.7, 0.8, 0.9, 1.1, 1.2],
dtype=float) # 11 values
high_range = np.array([2, 3, 4, 2, 21, 32, 78, 95, 65, 66, 66, 66, 66, 4],
dtype=float) # 14 values
low_range = np.array([2, 3, 3, 2, 3, 2.4, 2.1, 3.1, 2.9, 2.6, 2.7, 2.8, 2.2, 2.001],
dtype=float) # 14 values
few_values = np.array([2.0, 3.0, -1.0, 0.0], dtype=float) # 4 values
def test_simple(self):
# Tests that each of the tests works as expected with default params
#
# basic tests, with expected results (no weighting)
# results taken from the previous (slower) version of histogram
basic_tests = ((self.low_values, (np.array([1., 1., 1., 2., 2.,
1., 1., 0., 1., 1.]),
0.14444444444444446, 0.11111111111111112, 0)),
(self.high_range, (np.array([5., 0., 1., 1., 0.,
0., 5., 1., 0., 1.]),
-3.1666666666666661, 10.333333333333332, 0)),
(self.low_range, (np.array([3., 1., 1., 1., 0., 1.,
1., 2., 3., 1.]),
1.9388888888888889, 0.12222222222222223, 0)),
(self.few_values, (np.array([1., 0., 1., 0., 0., 0.,
0., 1., 0., 1.]),
-1.2222222222222223, 0.44444444444444448, 0)),
)
for inputs, expected_results in basic_tests:
given_results = stats.histogram(inputs)
assert_array_almost_equal(expected_results[0], given_results[0],
decimal=2)
for i in range(1, 4):
assert_almost_equal(expected_results[i], given_results[i],
decimal=2)
def test_weighting(self):
# Tests that weights give expected histograms
# basic tests, with expected results, given a set of weights
# weights used (first n are used for each test, where n is len of array) (14 values)
weights = np.array([1., 3., 4.5, 0.1, -1.0, 0.0, 0.3, 7.0, 103.2, 2, 40, 0, 0, 1])
# results taken from the numpy version of histogram
basic_tests = ((self.low_values, (np.array([4.0, 0.0, 4.5, -0.9, 0.0,
0.3,110.2, 0.0, 0.0, 42.0]),
0.2, 0.1, 0)),
(self.high_range, (np.array([9.6, 0., -1., 0., 0.,
0.,145.2, 0., 0.3, 7.]),
2.0, 9.3, 0)),
(self.low_range, (np.array([2.4, 0., 0., 0., 0.,
2., 40., 0., 103.2, 13.5]),
2.0, 0.11, 0)),
(self.few_values, (np.array([4.5, 0., 0.1, 0., 0., 0.,
0., 1., 0., 3.]),
-1., 0.4, 0)),
)
for inputs, expected_results in basic_tests:
# use the first lot of weights for test
# default limits given to reproduce output of numpy's test better
given_results = stats.histogram(inputs, defaultlimits=(inputs.min(),
inputs.max()),
weights=weights[:len(inputs)])
assert_array_almost_equal(expected_results[0], given_results[0],
decimal=2)
for i in range(1, 4):
assert_almost_equal(expected_results[i], given_results[i],
decimal=2)
def test_reduced_bins(self):
# Tests that reducing the number of bins produces expected results
# basic tests, with expected results (no weighting),
# except number of bins is halved to 5
# results taken from the previous (slower) version of histogram
basic_tests = ((self.low_values, (np.array([2., 3., 3., 1., 2.]),
0.075000000000000011, 0.25, 0)),
(self.high_range, (np.array([5., 2., 0., 6., 1.]),
-9.625, 23.25, 0)),
(self.low_range, (np.array([4., 2., 1., 3., 4.]),
1.8625, 0.27500000000000002, 0)),
(self.few_values, (np.array([1., 1., 0., 1., 1.]),
-1.5, 1.0, 0)),
)
for inputs, expected_results in basic_tests:
given_results = stats.histogram(inputs, numbins=5)
assert_array_almost_equal(expected_results[0], given_results[0],
decimal=2)
for i in range(1, 4):
assert_almost_equal(expected_results[i], given_results[i],
decimal=2)
def test_increased_bins(self):
# Tests that increasing the number of bins produces expected results
# basic tests, with expected results (no weighting),
# except number of bins is double to 20
# results taken from the previous (slower) version of histogram
basic_tests = ((self.low_values, (np.array([1., 0., 1., 0., 1.,
0., 2., 0., 1., 0.,
1., 1., 0., 1., 0.,
0., 0., 1., 0., 1.]),
0.1736842105263158, 0.052631578947368418, 0)),
(self.high_range, (np.array([5., 0., 0., 0., 1.,
0., 1., 0., 0., 0.,
0., 0., 0., 5., 0.,
0., 1., 0., 0., 1.]),
-0.44736842105263142, 4.8947368421052628, 0)),
(self.low_range, (np.array([3., 0., 1., 1., 0., 0.,
0., 1., 0., 0., 1., 0.,
1., 0., 1., 0., 1., 3.,
0., 1.]),
1.9710526315789474, 0.057894736842105263, 0)),
(self.few_values, (np.array([1., 0., 0., 0., 0., 1.,
0., 0., 0., 0., 0., 0.,
0., 0., 1., 0., 0., 0.,
0., 1.]),
-1.1052631578947367, 0.21052631578947367, 0)),
)
for inputs, expected_results in basic_tests:
given_results = stats.histogram(inputs, numbins=20)
assert_array_almost_equal(expected_results[0], given_results[0],
decimal=2)
for i in range(1, 4):
assert_almost_equal(expected_results[i], given_results[i],
decimal=2)
def test_cumfreq():
x = [1, 4, 2, 1, 3, 1]
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4)
assert_array_almost_equal(cumfreqs, np.array([3., 4., 5., 6.]))
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4,
defaultreallimits=(1.5, 5))
assert_(extrapoints == 3)
def test_relfreq():
a = np.array([1, 4, 2, 1, 3, 1])
relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4)
assert_array_almost_equal(relfreqs,
array([0.5, 0.16666667, 0.16666667, 0.16666667]))
# check array_like input is accepted
relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1],
numbins=4)
assert_array_almost_equal(relfreqs, relfreqs2)
class TestGMean(TestCase):
def test_1D_list(self):
a = (1,2,3,4)
actual = stats.gmean(a)
desired = power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = stats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
def test_1D_array(self):
a = array((1,2,3,4), float32)
actual = stats.gmean(a)
desired = power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired, decimal=7)
desired1 = stats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=7)
def test_2D_array_default(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
actual = stats.gmean(a)
desired = array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
desired1 = stats.gmean(a,axis=0)
assert_array_almost_equal(actual, desired1, decimal=14)
def test_2D_array_dim1(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
actual = stats.gmean(a, axis=1)
v = power(1*2*3*4,1./4.)
desired = array((v,v,v))
assert_array_almost_equal(actual, desired, decimal=14)
def test_large_values(self):
a = array([1e100, 1e200, 1e300])
actual = stats.gmean(a)
assert_approx_equal(actual, 1e200, significant=14)
class TestHMean(TestCase):
def test_1D_list(self):
a = (1,2,3,4)
actual = stats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = stats.hmean(array(a),axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
def test_1D_array(self):
a = array((1,2,3,4), float64)
actual = stats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = stats.hmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
def test_2D_array_default(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
actual = stats.hmean(a)
desired = array((1.,2.,3.,4.))
assert_array_almost_equal(actual, desired, decimal=14)
actual1 = stats.hmean(a,axis=0)
assert_array_almost_equal(actual1, desired, decimal=14)
def test_2D_array_dim1(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
v = 4. / (1./1 + 1./2 + 1./3 + 1./4)
desired1 = array((v,v,v))
actual1 = stats.hmean(a, axis=1)
assert_array_almost_equal(actual1, desired1, decimal=14)
class TestScoreatpercentile(TestCase):
def setUp(self):
self.a1 = [3, 4, 5, 10, -3, -5, 6]
self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
def test_basic(self):
x = arange(8) * 0.5
assert_equal(stats.scoreatpercentile(x, 0), 0.)
assert_equal(stats.scoreatpercentile(x, 100), 3.5)
assert_equal(stats.scoreatpercentile(x, 50), 1.75)
def test_fraction(self):
scoreatperc = stats.scoreatpercentile
# Test defaults
assert_equal(scoreatperc(list(range(10)), 50), 4.5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5)
# explicitly specify interpolation_method 'fraction' (the default)
assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100),
interpolation_method='fraction'),
55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10),
interpolation_method='fraction'),
5.5)
def test_lower_higher(self):
scoreatperc = stats.scoreatpercentile
# interpolation_method 'lower'/'higher'
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100),
interpolation_method='lower'), 10)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100),
interpolation_method='higher'), 100)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10),
interpolation_method='lower'), 1)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10),
interpolation_method='higher'), 10)
def test_sequence_per(self):
x = arange(8) * 0.5
expected = np.array([0, 3.5, 1.75])
res = stats.scoreatpercentile(x, [0, 100, 50])
assert_allclose(res, expected)
assert_(isinstance(res, np.ndarray))
# Test with ndarray. Regression test for gh-2861
assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])),
expected)
# Also test combination of 2-D array, axis not None and array-like per
res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)),
np.array([0, 1, 100, 100]), axis=1)
expected2 = array([[0, 4, 8],
[0.03, 4.03, 8.03],
[3, 7, 11],
[3, 7, 11]])
assert_allclose(res2, expected2)
def test_axis(self):
scoreatperc = stats.scoreatpercentile
x = arange(12).reshape(3, 4)
assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0])
r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0)
r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1)
x = array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
score = stats.scoreatpercentile(x, 50)
assert_equal(score.shape, ())
assert_equal(score, 1.0)
score = stats.scoreatpercentile(x, 50, axis=0)
assert_equal(score.shape, (3,))
assert_equal(score, [1, 1, 1])
def test_exception(self):
assert_raises(ValueError, stats.scoreatpercentile, [1, 2], 56,
interpolation_method='foobar')
assert_raises(ValueError, stats.scoreatpercentile, [1], 101)
assert_raises(ValueError, stats.scoreatpercentile, [1], -1)
def test_empty(self):
assert_equal(stats.scoreatpercentile([], 50), np.nan)
assert_equal(stats.scoreatpercentile(np.array([[], []]), 50), np.nan)
assert_equal(stats.scoreatpercentile([], [50, 99]), [np.nan, np.nan])
class TestItemfreq(object):
a = [5, 7, 1, 2, 1, 5, 7] * 10
b = [1, 2, 5, 7]
def test_numeric_types(self):
# Check itemfreq works for all dtypes (adapted from np.unique tests)
def _check_itemfreq(dt):
a = np.array(self.a, dt)
v = stats.itemfreq(a)
assert_array_equal(v[:, 0], [1, 2, 5, 7])
assert_array_equal(v[:, 1], np.array([20, 10, 20, 20], dtype=dt))
dtypes = [np.int32, np.int64, np.float32, np.float64,
np.complex64, np.complex128]
for dt in dtypes:
yield _check_itemfreq, dt
def test_object_arrays(self):
a, b = self.a, self.b
dt = 'O'
aa = np.empty(len(a), dt)
aa[:] = a
bb = np.empty(len(b), dt)
bb[:] = b
v = stats.itemfreq(aa)
assert_array_equal(v[:, 0], bb)
def test_structured_arrays(self):
a, b = self.a, self.b
dt = [('', 'i'), ('', 'i')]
aa = np.array(list(zip(a, a)), dt)
bb = np.array(list(zip(b, b)), dt)
v = stats.itemfreq(aa)
# Arrays don't compare equal because v[:,0] is object array
assert_equal(tuple(v[2, 0]), tuple(bb[2]))
class TestMode(TestCase):
def test_empty(self):
vals, counts = stats.mode([])
assert_equal(vals, np.array([]))
assert_equal(counts, np.array([]))
def test_basic(self):
data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
vals = stats.mode(data1)
assert_equal(vals[0][0], 6)
assert_equal(vals[1][0], 3)
def test_axes(self):
data1 = [10, 10, 30, 40]
data2 = [10, 10, 10, 10]
data3 = [20, 10, 20, 20]
data4 = [30, 30, 30, 30]
data5 = [40, 30, 30, 30]
arr = np.array([data1, data2, data3, data4, data5])
vals = stats.mode(arr, axis=None)
assert_equal(vals[0], np.array([30]))
assert_equal(vals[1], np.array([8]))
vals = stats.mode(arr, axis=0)
assert_equal(vals[0], np.array([[10, 10, 30, 30]]))
assert_equal(vals[1], np.array([[2, 3, 3, 2]]))
vals = stats.mode(arr, axis=1)
assert_equal(vals[0], np.array([[10], [10], [20], [30], [30]]))
assert_equal(vals[1], np.array([[2], [4], [3], [4], [3]]))
def test_strings(self):
data1 = ['rain', 'showers', 'showers']
vals = stats.mode(data1)
assert_equal(vals[0][0], 'showers')
assert_equal(vals[1][0], 2)
@dec.knownfailureif(sys.version_info > (3,), 'numpy github issue 641')
def test_mixed_objects(self):
objects = [10, True, np.nan, 'hello', 10]
arr = np.empty((5,), dtype=object)
arr[:] = objects
vals = stats.mode(arr)
assert_equal(vals[0][0], 10)
assert_equal(vals[1][0], 2)
def test_objects(self):
"""Python objects must be sortable (le + eq) and have ne defined
for np.unique to work. hash is for set.
"""
class Point(object):
def __init__(self, x):
self.x = x
def __eq__(self, other):
return self.x == other.x
def __ne__(self, other):
return self.x != other.x
def __lt__(self, other):
return self.x < other.x
def __hash__(self):
return hash(self.x)
points = [Point(x) for x in [1, 2, 3, 4, 3, 2, 2, 2]]
arr = np.empty((8,), dtype=object)
arr[:] = points
assert len(set(points)) == 4
assert_equal(np.unique(arr).shape, (4,))
vals = stats.mode(arr)
assert_equal(vals[0][0], Point(2))
assert_equal(vals[1][0], 4)
class TestVariability(TestCase):
testcase = [1,2,3,4]
def test_signaltonoise(self):
# This is not in R, so used:
# mean(testcase, axis=0) / (sqrt(var(testcase) * 3/4))
# y = stats.signaltonoise(self.shoes[0])
# assert_approx_equal(y,4.5709967)
y = stats.signaltonoise(self.testcase)
assert_approx_equal(y,2.236067977)
def test_sem(self):
# This is not in R, so used:
# sqrt(var(testcase)*3/4)/sqrt(3)
# y = stats.sem(self.shoes[0])
# assert_approx_equal(y,0.775177399)
y = stats.sem(self.testcase)
assert_approx_equal(y, 0.6454972244)
n = len(self.testcase)
assert_allclose(stats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
stats.sem(self.testcase, ddof=2))
def test_zmap(self):
# not in R, so tested by using:
# (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)
y = stats.zmap(self.testcase,self.testcase)
desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired,y,decimal=12)
def test_zmap_axis(self):
# Test use of 'axis' keyword in zmap.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 2.0],
[2.0, 0.0, 2.0, 0.0]])
t1 = 1.0/np.sqrt(2.0/3)
t2 = np.sqrt(3.)/3
t3 = np.sqrt(2.)
z0 = stats.zmap(x, x, axis=0)
z1 = stats.zmap(x, x, axis=1)
z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
[0.0, t3, -t3/2, t1],
[t1, -t3/2, t3, -t1]]
z1_expected = [[-1.0, -1.0, 1.0, 1.0],
[-t2, -t2, -t2, np.sqrt(3.)],
[1.0, -1.0, 1.0, -1.0]]
assert_array_almost_equal(z0, z0_expected)
assert_array_almost_equal(z1, z1_expected)
def test_zmap_ddof(self):
# Test use of 'ddof' keyword in zmap.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 3.0]])
z = stats.zmap(x, x, axis=1, ddof=1)
z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
assert_array_almost_equal(z[0], z0_expected)
assert_array_almost_equal(z[1], z1_expected)
def test_zscore(self):
# not in R, so tested by using:
# (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)
y = stats.zscore(self.testcase)
desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired,y,decimal=12)
def test_zscore_axis(self):
# Test use of 'axis' keyword in zscore.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 2.0],
[2.0, 0.0, 2.0, 0.0]])
t1 = 1.0/np.sqrt(2.0/3)
t2 = np.sqrt(3.)/3
t3 = np.sqrt(2.)
z0 = stats.zscore(x, axis=0)
z1 = stats.zscore(x, axis=1)
z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
[0.0, t3, -t3/2, t1],
[t1, -t3/2, t3, -t1]]
z1_expected = [[-1.0, -1.0, 1.0, 1.0],
[-t2, -t2, -t2, np.sqrt(3.)],
[1.0, -1.0, 1.0, -1.0]]
assert_array_almost_equal(z0, z0_expected)
assert_array_almost_equal(z1, z1_expected)
def test_zscore_ddof(self):
# Test use of 'ddof' keyword in zscore.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 3.0]])
z = stats.zscore(x, axis=1, ddof=1)
z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
assert_array_almost_equal(z[0], z0_expected)
assert_array_almost_equal(z[1], z1_expected)
class TestMoments(TestCase):
"""
Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
testmathworks comes from documentation for the
Statistics Toolbox for Matlab and can be found at both
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml
Note that both test cases came from here.
"""
testcase = [1,2,3,4]
np.random.seed(1234)
testcase_moment_accuracy = np.random.rand(42)
testmathworks = [1.165, 0.6268, 0.0751, 0.3516, -0.6965]
def test_moment(self):
# mean((testcase-mean(testcase))**power,axis=0),axis=0))**power))
y = stats.moment(self.testcase,1)
assert_approx_equal(y,0.0,10)
y = stats.moment(self.testcase,2)
assert_approx_equal(y,1.25)
y = stats.moment(self.testcase,3)
assert_approx_equal(y,0.0)
y = stats.moment(self.testcase,4)
assert_approx_equal(y,2.5625)
def test_variation(self):
# variation = samplestd / mean
y = stats.variation(self.testcase)
assert_approx_equal(y,0.44721359549996, 10)
def test_skewness(self):
# sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0) /
# ((sqrt(var(testmathworks)*4/5))**3)/5
y = stats.skew(self.testmathworks)
assert_approx_equal(y,-0.29322304336607,10)
y = stats.skew(self.testmathworks,bias=0)
assert_approx_equal(y,-0.437111105023940,10)
y = stats.skew(self.testcase)
assert_approx_equal(y,0.0,10)
def test_skewness_scalar(self):
# `skew` must return a scalar for 1-dim input
assert_equal(stats.skew(arange(10)), 0.0)
def test_kurtosis(self):
# sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4
# sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5
# Set flags for axis = 0 and
# fisher=0 (Pearson's defn of kurtosis for compatiability with Matlab)
y = stats.kurtosis(self.testmathworks,0,fisher=0,bias=1)
assert_approx_equal(y, 2.1658856802973,10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = stats.kurtosis(self.testmathworks,fisher=0,bias=0)
assert_approx_equal(y, 3.663542721189047,10)
y = stats.kurtosis(self.testcase,0,0)
assert_approx_equal(y,1.64)
def test_kurtosis_array_scalar(self):
assert_equal(type(stats.kurtosis([1,2,3])), float)
def test_moment_accuracy(self):
# 'moment' must have a small enough error compared to the slower
# but very accurate numpy.power() implementation.
tc_no_mean = self.testcase_moment_accuracy - \
np.mean(self.testcase_moment_accuracy)
assert_allclose(np.power(tc_no_mean, 42).mean(),
stats.moment(self.testcase_moment_accuracy, 42))
class TestThreshold(TestCase):
def test_basic(self):
a = [-1,2,3,4,5,-1,-2]
assert_array_equal(stats.threshold(a),a)
assert_array_equal(stats.threshold(a,3,None,0),
[0,0,3,4,5,0,0])
assert_array_equal(stats.threshold(a,None,3,0),
[-1,2,3,0,0,-1,-2])
assert_array_equal(stats.threshold(a,2,4,0),
[0,2,3,4,0,0,0])
class TestStudentTest(TestCase):
X1 = np.array([-1, 0, 1])
X2 = np.array([0, 1, 2])
T1_0 = 0
P1_0 = 1
T1_1 = -1.732051
P1_1 = 0.2254033
T1_2 = -3.464102
P1_2 = 0.0741799
T2_0 = 1.732051
P2_0 = 0.2254033
def test_onesample(self):
t, p = stats.ttest_1samp(self.X1, 0)
assert_array_almost_equal(t, self.T1_0)
assert_array_almost_equal(p, self.P1_0)
t, p = stats.ttest_1samp(self.X2, 0)
assert_array_almost_equal(t, self.T2_0)
assert_array_almost_equal(p, self.P2_0)
t, p = stats.ttest_1samp(self.X1, 1)
assert_array_almost_equal(t, self.T1_1)
assert_array_almost_equal(p, self.P1_1)
t, p = stats.ttest_1samp(self.X1, 2)
assert_array_almost_equal(t, self.T1_2)
assert_array_almost_equal(p, self.P1_2)
def test_percentileofscore():
pcos = stats.percentileofscore
assert_equal(pcos([1,2,3,4,5,6,7,8,9,10],4), 40.0)
for (kind, result) in [('mean', 35.0),
('strict', 30.0),
('weak', 40.0)]:
yield assert_equal, pcos(np.arange(10) + 1,
4, kind=kind), \
result
# multiple - 2
for (kind, result) in [('rank', 45.0),
('strict', 30.0),
('weak', 50.0),
('mean', 40.0)]:
yield assert_equal, pcos([1,2,3,4,4,5,6,7,8,9],
4, kind=kind), \
result
# multiple - 3
assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4), 50.0)
for (kind, result) in [('rank', 50.0),
('mean', 45.0),
('strict', 30.0),
('weak', 60.0)]:
yield assert_equal, pcos([1,2,3,4,4,4,5,6,7,8],
4, kind=kind), \
result
# missing
for kind in ('rank', 'mean', 'strict', 'weak'):
yield assert_equal, pcos([1,2,3,5,6,7,8,9,10,11],
4, kind=kind), \
30
# larger numbers
for (kind, result) in [('mean', 35.0),
('strict', 30.0),
('weak', 40.0)]:
yield assert_equal, \
pcos([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 40,
kind=kind), result
for (kind, result) in [('mean', 45.0),
('strict', 30.0),
('weak', 60.0)]:
yield assert_equal, \
pcos([10, 20, 30, 40, 40, 40, 50, 60, 70, 80],
40, kind=kind), result
for kind in ('rank', 'mean', 'strict', 'weak'):
yield assert_equal, \
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
40, kind=kind), 30.0
# boundaries
for (kind, result) in [('rank', 10.0),
('mean', 5.0),
('strict', 0.0),
('weak', 10.0)]:
yield assert_equal, \
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
10, kind=kind), result
for (kind, result) in [('rank', 100.0),
('mean', 95.0),
('strict', 90.0),
('weak', 100.0)]:
yield assert_equal, \
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
110, kind=kind), result
# out of bounds
for (kind, score, result) in [('rank', 200, 100.0),
('mean', 200, 100.0),
('mean', 0, 0.0)]:
yield assert_equal, \
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
score, kind=kind), result
assert_raises(ValueError, pcos, [1, 2, 3, 3, 4], 3, kind='unrecognized')
PowerDivCase = namedtuple('Case', ['f_obs', 'f_exp', 'ddof', 'axis',
'chi2', # Pearson's
'log', # G-test (log-likelihood)
'mod_log', # Modified log-likelihood
'cr', # Cressie-Read (lambda=2/3)
])
# The details of the first two elements in power_div_1d_cases are used
# in a test in TestPowerDivergence. Check that code before making
# any changes here.
power_div_1d_cases = [
# Use the default f_exp.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=None, ddof=0, axis=None,
chi2=4,
log=2*(4*np.log(4/8) + 12*np.log(12/8)),
mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
# Give a non-uniform f_exp.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=[2, 16, 12, 2], ddof=0, axis=None,
chi2=24,
log=2*(4*np.log(4/2) + 8*np.log(8/16) + 8*np.log(8/2)),
mod_log=2*(2*np.log(2/4) + 16*np.log(16/8) + 2*np.log(2/8)),
cr=(4*((4/2)**(2/3) - 1) + 8*((8/16)**(2/3) - 1) +
8*((8/2)**(2/3) - 1))/(5/9)),
# f_exp is a scalar.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=8, ddof=0, axis=None,
chi2=4,
log=2*(4*np.log(4/8) + 12*np.log(12/8)),
mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
# f_exp equal to f_obs.
PowerDivCase(f_obs=[3, 5, 7, 9], f_exp=[3, 5, 7, 9], ddof=0, axis=0,
chi2=0, log=0, mod_log=0, cr=0),
]
power_div_empty_cases = [
# Shape is (0,)--a data set with length 0. The computed
# test statistic should be 0.
PowerDivCase(f_obs=[],
f_exp=None, ddof=0, axis=0,
chi2=0, log=0, mod_log=0, cr=0),
# Shape is (0, 3). This is 3 data sets, but each data set has
# length 0, so the computed test statistic should be [0, 0, 0].
PowerDivCase(f_obs=np.array([[],[],[]]).T,
f_exp=None, ddof=0, axis=0,
chi2=[0, 0, 0],
log=[0, 0, 0],
mod_log=[0, 0, 0],
cr=[0, 0, 0]),
# Shape is (3, 0). This represents an empty collection of
# data sets in which each data set has length 3. The test
# statistic should be an empty array.
PowerDivCase(f_obs=np.array([[],[],[]]),
f_exp=None, ddof=0, axis=0,
chi2=[],
log=[],
mod_log=[],
cr=[]),
]
class TestPowerDivergence(object):
def check_power_divergence(self, f_obs, f_exp, ddof, axis, lambda_,
expected_stat):
f_obs = np.asarray(f_obs)
if axis is None:
num_obs = f_obs.size
else:
b = np.broadcast(f_obs, f_exp)
num_obs = b.shape[axis]
stat, p = stats.power_divergence(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis, lambda_=lambda_)
assert_allclose(stat, expected_stat)
if lambda_ == 1 or lambda_ == "pearson":
# Also test stats.chisquare.
stat, p = stats.chisquare(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis)
assert_allclose(stat, expected_stat)
ddof = np.asarray(ddof)
expected_p = stats.chisqprob(expected_stat, num_obs - 1 - ddof)
assert_allclose(p, expected_p)
def test_basic(self):
for case in power_div_1d_cases:
yield (self.check_power_divergence,
case.f_obs, case.f_exp, case.ddof, case.axis,
None, case.chi2)
yield (self.check_power_divergence,
case.f_obs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
yield (self.check_power_divergence,
case.f_obs, case.f_exp, case.ddof, case.axis,
1, case.chi2)
yield (self.check_power_divergence,
case.f_obs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
yield (self.check_power_divergence,
case.f_obs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
yield (self.check_power_divergence,
case.f_obs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
yield (self.check_power_divergence,
case.f_obs, case.f_exp, case.ddof, case.axis,
2/3, case.cr)
def test_basic_masked(self):
for case in power_div_1d_cases:
mobs = np.ma.array(case.f_obs)
yield (self.check_power_divergence,
mobs, case.f_exp, case.ddof, case.axis,
None, case.chi2)
yield (self.check_power_divergence,
mobs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
yield (self.check_power_divergence,
mobs, case.f_exp, case.ddof, case.axis,
1, case.chi2)
yield (self.check_power_divergence,
mobs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
yield (self.check_power_divergence,
mobs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
yield (self.check_power_divergence,
mobs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
yield (self.check_power_divergence,
mobs, case.f_exp, case.ddof, case.axis,
2/3, case.cr)
def test_axis(self):
case0 = power_div_1d_cases[0]
case1 = power_div_1d_cases[1]
f_obs = np.vstack((case0.f_obs, case1.f_obs))
f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
case1.f_exp))
# Check the four computational code paths in power_divergence
# using a 2D array with axis=1.
yield (self.check_power_divergence,
f_obs, f_exp, 0, 1,
"pearson", [case0.chi2, case1.chi2])
yield (self.check_power_divergence,
f_obs, f_exp, 0, 1,
"log-likelihood", [case0.log, case1.log])
yield (self.check_power_divergence,
f_obs, f_exp, 0, 1,
"mod-log-likelihood", [case0.mod_log, case1.mod_log])
yield (self.check_power_divergence,
f_obs, f_exp, 0, 1,
"cressie-read", [case0.cr, case1.cr])
# Reshape case0.f_obs to shape (2,2), and use axis=None.
# The result should be the same.
yield (self.check_power_divergence,
np.array(case0.f_obs).reshape(2, 2), None, 0, None,
"pearson", case0.chi2)
def test_ddof_broadcasting(self):
# Test that ddof broadcasts correctly.
# ddof does not affect the test statistic. It is broadcast
# with the computed test statistic for the computation of
# the p value.
case0 = power_div_1d_cases[0]
case1 = power_div_1d_cases[1]
# Create 4x2 arrays of observed and expected frequencies.
f_obs = np.vstack((case0.f_obs, case1.f_obs)).T
f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
case1.f_exp)).T
expected_chi2 = [case0.chi2, case1.chi2]
# ddof has shape (2, 1). This is broadcast with the computed
# statistic, so p will have shape (2,2).
ddof = np.array([[0], [1]])
stat, p = stats.power_divergence(f_obs, f_exp, ddof=ddof)
assert_allclose(stat, expected_chi2)
# Compute the p values separately, passing in scalars for ddof.
stat0, p0 = stats.power_divergence(f_obs, f_exp, ddof=ddof[0,0])
stat1, p1 = stats.power_divergence(f_obs, f_exp, ddof=ddof[1,0])
assert_array_equal(p, np.vstack((p0, p1)))
def test_empty_cases(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
for case in power_div_empty_cases:
yield (self.check_power_divergence,
case.f_obs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
yield (self.check_power_divergence,
case.f_obs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
yield (self.check_power_divergence,
case.f_obs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
yield (self.check_power_divergence,
case.f_obs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
def test_chisquare_masked_arrays():
# Test masked arrays.
obs = np.array([[8, 8, 16, 32, -1], [-1, -1, 3, 4, 5]]).T
mask = np.array([[0, 0, 0, 0, 1], [1, 1, 0, 0, 0]]).T
mobs = np.ma.masked_array(obs, mask)
expected_chisq = np.array([24.0, 0.5])
expected_g = np.array([2*(2*8*np.log(0.5) + 32*np.log(2.0)),
2*(3*np.log(0.75) + 5*np.log(1.25))])
chisq, p = stats.chisquare(mobs)
mat.assert_array_equal(chisq, expected_chisq)
mat.assert_array_almost_equal(p, stats.chisqprob(expected_chisq,
mobs.count(axis=0) - 1))
g, p = stats.power_divergence(mobs, lambda_='log-likelihood')
mat.assert_array_almost_equal(g, expected_g, decimal=15)
mat.assert_array_almost_equal(p, stats.chisqprob(expected_g,
mobs.count(axis=0) - 1))
chisq, p = stats.chisquare(mobs.T, axis=1)
mat.assert_array_equal(chisq, expected_chisq)
mat.assert_array_almost_equal(p,
stats.chisqprob(expected_chisq,
mobs.T.count(axis=1) - 1))
g, p = stats.power_divergence(mobs.T, axis=1, lambda_="log-likelihood")
mat.assert_array_almost_equal(g, expected_g, decimal=15)
mat.assert_array_almost_equal(p, stats.chisqprob(expected_g,
mobs.count(axis=0) - 1))
obs1 = np.ma.array([3, 5, 6, 99, 10], mask=[0, 0, 0, 1, 0])
exp1 = np.ma.array([2, 4, 8, 10, 99], mask=[0, 0, 0, 0, 1])
chi2, p = stats.chisquare(obs1, f_exp=exp1)
# Because of the mask at index 3 of obs1 and at index 4 of exp1,
# only the first three elements are included in the calculation
# of the statistic.
mat.assert_array_equal(chi2, 1/2 + 1/4 + 4/8)
# When axis=None, the two values should have type np.float64.
chisq, p = stats.chisquare(np.ma.array([1,2,3]), axis=None)
assert_(isinstance(chisq, np.float64))
assert_(isinstance(p, np.float64))
assert_equal(chisq, 1.0)
assert_almost_equal(p, stats.chisqprob(1.0, 2))
# Empty arrays:
# A data set with length 0 returns a masked scalar.
with np.errstate(invalid='ignore'):
chisq, p = stats.chisquare(np.ma.array([]))
assert_(isinstance(chisq, np.ma.MaskedArray))
assert_equal(chisq.shape, ())
assert_(chisq.mask)
empty3 = np.ma.array([[],[],[]])
# empty3 is a collection of 0 data sets (whose lengths would be 3, if
# there were any), so the return value is an array with length 0.
chisq, p = stats.chisquare(empty3)
assert_(isinstance(chisq, np.ma.MaskedArray))
mat.assert_array_equal(chisq, [])
# empty3.T is an array containing 3 data sets, each with length 0,
# so an array of size (3,) is returned, with all values masked.
with np.errstate(invalid='ignore'):
chisq, p = stats.chisquare(empty3.T)
assert_(isinstance(chisq, np.ma.MaskedArray))
assert_equal(chisq.shape, (3,))
assert_(np.all(chisq.mask))
def test_power_divergence_against_cressie_read_data():
# Test stats.power_divergence against tables 4 and 5 from
# Cressie and Read, "Multimonial Goodness-of-Fit Tests",
# J. R. Statist. Soc. B (1984), Vol 46, No. 3, pp. 440-464.
# This tests the calculation for several values of lambda.
# `table4` holds just the second and third columns from Table 4.
table4 = np.array([
# observed, expected,
15, 15.171,
11, 13.952,
14, 12.831,
17, 11.800,
5, 10.852,
11, 9.9796,
10, 9.1777,
4, 8.4402,
8, 7.7620,
10, 7.1383,
7, 6.5647,
9, 6.0371,
11, 5.5520,
3, 5.1059,
6, 4.6956,
1, 4.3183,
1, 3.9713,
4, 3.6522,
]).reshape(-1, 2)
table5 = np.array([
# lambda, statistic
-10.0, 72.2e3,
-5.0, 28.9e1,
-3.0, 65.6,
-2.0, 40.6,
-1.5, 34.0,
-1.0, 29.5,
-0.5, 26.5,
0.0, 24.6,
0.5, 23.4,
0.67, 23.1,
1.0, 22.7,
1.5, 22.6,
2.0, 22.9,
3.0, 24.8,
5.0, 35.5,
10.0, 21.4e1,
]).reshape(-1, 2)
for lambda_, expected_stat in table5:
stat, p = stats.power_divergence(table4[:,0], table4[:,1],
lambda_=lambda_)
assert_allclose(stat, expected_stat, rtol=5e-3)
def test_friedmanchisquare():
# see ticket:113
# verified with matlab and R
# From Demsar "Statistical Comparisons of Classifiers over Multiple Data Sets"
# 2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28)
x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583,
0.775, 1.0, 0.94, 0.619, 0.972, 0.957]),
array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583,
0.838, 1.0, 0.962, 0.666, 0.981, 0.978]),
array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563,
0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]),
array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625,
0.875, 1.0, 0.962, 0.669, 0.975, 0.970])]
# From "Bioestadistica para las ciencias de la salud" Xf=18.95 p<0.001:
x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]),
array([2,2,1,2,3,1,2,3,2,1,1,3]),
array([2,4,3,3,4,3,3,4,4,1,2,1]),
array([3,5,4,3,4,4,3,3,3,4,4,4])]
# From Jerrorl H. Zar, "Biostatistical Analysis"(example 12.6), Xf=10.68, 0.005 < p < 0.01:
# Probability from this example is inexact using Chisquare aproximation of Friedman Chisquare.
x3 = [array([7.0,9.9,8.5,5.1,10.3]),
array([5.3,5.7,4.7,3.5,7.7]),
array([4.9,7.6,5.5,2.8,8.4]),
array([8.8,8.9,8.1,3.3,9.1])]
assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),
(10.2283464566929, 0.0167215803284414))
assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
(18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),
(10.68, 0.0135882729582176))
np.testing.assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1])
# test using mstats
assert_array_almost_equal(stats.mstats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),
(10.2283464566929, 0.0167215803284414))
# the following fails
# assert_array_almost_equal(stats.mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
# (18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(stats.mstats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),
(10.68, 0.0135882729582176))
np.testing.assert_raises(ValueError,stats.mstats.friedmanchisquare,x3[0],x3[1])
def test_kstest():
# from numpy.testing import assert_almost_equal
# comparing with values from R
x = np.linspace(-1,1,9)
D,p = stats.kstest(x,'norm')
assert_almost_equal(D, 0.15865525393145705, 12)
assert_almost_equal(p, 0.95164069201518386, 1)
x = np.linspace(-15,15,9)
D,p = stats.kstest(x,'norm')
assert_almost_equal(D, 0.44435602715924361, 15)
assert_almost_equal(p, 0.038850140086788665, 8)
# the following tests rely on deterministicaly replicated rvs
np.random.seed(987654321)
x = stats.norm.rvs(loc=0.2, size=100)
D,p = stats.kstest(x, 'norm', mode='asymp')
assert_almost_equal(D, 0.12464329735846891, 15)
assert_almost_equal(p, 0.089444888711820769, 15)
assert_almost_equal(np.array(stats.kstest(x, 'norm', mode='asymp')),
np.array((0.12464329735846891, 0.089444888711820769)), 15)
assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='less')),
np.array((0.12464329735846891, 0.040989164077641749)), 15)
# this 'greater' test fails with precision of decimal=14
assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='greater')),
np.array((0.0072115233216310994, 0.98531158590396228)), 12)
# missing: no test that uses *args
def test_ks_2samp():
# exact small sample solution
data1 = np.array([1.0,2.0])
data2 = np.array([1.0,2.0,3.0])
assert_almost_equal(np.array(stats.ks_2samp(data1+0.01,data2)),
np.array((0.33333333333333337, 0.99062316386915694)))
assert_almost_equal(np.array(stats.ks_2samp(data1-0.01,data2)),
np.array((0.66666666666666674, 0.42490954988801982)))
# these can also be verified graphically
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,100)+2+0.1)),
np.array((0.030000000000000027, 0.99999999996005062)))
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,100)+2-0.1)),
np.array((0.020000000000000018, 0.99999999999999933)))
# these are just regression tests
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,110)+20.1)),
np.array((0.21090909090909091, 0.015880386730710221)))
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,110)+20-0.1)),
np.array((0.20818181818181825, 0.017981441789762638)))
def test_ttest_rel():
# regression test
tr,pr = 0.81248591389165692, 0.41846234511362157
tpr = ([tr,-tr],[pr,pr])
rvs1 = np.linspace(1,100,100)
rvs2 = np.linspace(1.01,99.989,100)
rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)])
rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)])
t,p = stats.ttest_rel(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_rel(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
olderr = np.seterr(all='ignore')
try:
# test zero division problem
t,p = stats.ttest_rel([0,0,0],[1,1,1])
assert_equal((np.abs(t),p), (np.inf, 0))
assert_equal(stats.ttest_rel([0,0,0], [0,0,0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1,np.nan],[-1,1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2,2))),([0, np.nan], [1,np.nan]))
finally:
np.seterr(**olderr)
# test incorrect input shape raise an error
x = np.arange(24)
assert_raises(ValueError, stats.ttest_rel, x.reshape((8, 3)),
x.reshape((2, 3, 4)))
def _desc_stats(x1, x2, axis=0):
def _stats(x, axis=0):
x = np.asarray(x)
mu = np.mean(x, axis=axis)
std = np.std(x, axis=axis, ddof=1)
nobs = x.shape[axis]
return mu, std, nobs
return _stats(x1, axis) + _stats(x2, axis)
def test_ttest_perm():
# Test on horizontal dimension
N = 20
np.random.seed(0)
a = np.vstack((np.arange((3*N)/4),np.random.random((3*N)/4)))
b = np.vstack((np.arange(N/4) + 100,np.random.random(N/4)))
p_t_stats, pvalues = stats.ttest_ind(a, b, axis=1, equal_var=False)
np_t_stats, pvalues = stats.ttest_ind(a, b, axis=1, equal_var=False,
permutations=1000,
random_state=0)
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, array([0.000999, 0.69031]))
# Test on vertical dimension
N = 20
np.random.seed(0)
a = np.vstack((np.arange((3*N)/4),np.random.random((3*N)/4))).transpose()
b = np.vstack((np.arange(N/4) + 100,np.random.random(N/4))).transpose()
p_t_stats, pvalues = stats.ttest_ind(a, b, axis=0, equal_var=False)
np_t_stats, pvalues = stats.ttest_ind(a, b, axis=0, equal_var=False,
permutations=1000,
random_state=0)
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, array([0.000999, 0.69031]))
# Test on 1 dimensional case
N = 20
np.random.seed(0)
a = np.arange((3*N)/4)
b = np.arange(N/4) + 100
p_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=False)
np_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=False,
permutations=1000,
random_state=0)
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, array([0.000999]))
# Test just arrays
N = 20
np.random.seed(0)
a = range(int((3*N)/4))
b = range(100,int(N/4)+100)
p_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=False)
np_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=False,
permutations=1000,
random_state=0)
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, array([0.000999]))
# Test equal variance
N = 20
np.random.seed(0)
a = np.arange(N/2)
b = np.arange(N/2) + 100
p_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=True)
np_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=True,
permutations=1000,
random_state=0)
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, array([0.000999]))
# Test out random seed
N = 20
a = np.vstack((np.arange((3*N)/4),np.random.random((3*N)/4)))
b = np.vstack((np.arange(N/4) + 100,np.random.random(N/4)))
p_t_stats, pvalues = stats.ttest_ind(a, b, axis=1, equal_var=False)
np_t_stats, pvalues = stats.ttest_ind(a, b, axis=1, equal_var=False,
permutations=1000,
random_state=np.random.RandomState(seed=0))
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, array([0.000999, 0.69031]))
# Test out different array dimensions
np.random.seed(0)
rvs1 = stats.norm.rvs(loc=5, scale=10, size=500)
rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
np_t_stats, pvalues = stats.ttest_ind(rvs1.reshape((100, 5)),
rvs5, permutations=1000)
assert_array_almost_equal(np_t_stats,
np.array([0.012733, 0.393926, 0.208261,
0.050528, 1.111482]))
assert_array_almost_equal(pvalues,
np.array([0.988012, 0.686314, 0.81019,
0.963037, 0.25974]))
def test_ttest_ind_permutations():
# Test on horizontal dimension
N = 20
np.random.seed(0)
a = np.vstack((np.arange(3*N//4), np.random.random(3*N//4)))
b = np.vstack((np.arange(N//4) + 100, np.random.random(N//4)))
p_t_stats, pvalues = stats.ttest_ind(a, b, axis=1, equal_var=False)
np_t_stats, pvalues = stats.ttest_ind(a, b, axis=1, equal_var=False,
permutations=1000,
random_state=0)
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, array([0.000999, 0.69031]))
# Test on vertical dimension
N = 20
np.random.seed(0)
a = np.vstack((np.arange((3*N)//4), np.random.random(3*N//4))).transpose()
b = np.vstack((np.arange(N//4) + 100, np.random.random(N//4))).transpose()
p_t_stats, pvalues = stats.ttest_ind(a, b, axis=0, equal_var=False)
np_t_stats, pvalues = stats.ttest_ind(a, b, axis=0, equal_var=False,
permutations=1000,
random_state=0)
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, array([0.000999, 0.69031]))
# Test on 1 dimensional case
N = 20
np.random.seed(0)
a = np.arange(3*N//4)
b = np.arange(N//4) + 100
p_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=False)
np_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=False,
permutations=1000,
random_state=0)
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, 0.000999)
# Test just arrays
N = 20
np.random.seed(0)
a = range(3*N//4)
b = range(100, N//4 + 100)
p_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=False)
np_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=False,
permutations=1000,
random_state=0)
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, 0.000999)
# Test equal variance
np.random.seed(0)
a = np.arange(10)
b = np.arange(10) + 100
p_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=True)
np_t_stats, pvalues = stats.ttest_ind(a, b, equal_var=True,
permutations=1000,
random_state=0)
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, 0.000999)
# Test out random seed
N = 20
a = np.vstack((np.arange(3*N//4), np.random.random(3*N//4)))
b = np.vstack((np.arange(N//4) + 100, np.random.random(N//4)))
p_t_stats, pvalues = stats.ttest_ind(a, b, axis=1, equal_var=False)
np_t_stats, pvalues = stats.ttest_ind(a, b, axis=1, equal_var=False,
permutations=1000,
random_state=np.random.RandomState(seed=0))
assert_array_almost_equal(p_t_stats, np_t_stats, 5)
assert_array_almost_equal(pvalues, array([0.000999, 0.69031]))
def test_ttest_ind():
# regression test
tr = 1.0912746897927283
pr = 0.27647818616351882
tpr = ([tr,-tr],[pr,pr])
rvs2 = np.linspace(1,100,100)
rvs1 = np.linspace(5,105,100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
t,p = stats.ttest_ind(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
# test from_stats API
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs2)),
[t, p])
t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
[t, p])
t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
[t, p])
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
olderr = np.seterr(all='ignore')
try:
# test zero division problem
t,p = stats.ttest_ind([0,0,0],[1,1,1])
assert_equal((np.abs(t),p), (np.inf, 0))
assert_equal(stats.ttest_ind([0,0,0], [0,0,0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1,np.nan],[-1,1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2,2))),([0, np.nan], [1,np.nan]))
finally:
np.seterr(**olderr)
def test_ttest_ind_with_uneq_var():
# check vs. R
a = (1, 2, 3)
b = (1.1, 2.9, 4.2)
pr = 0.53619490753126731
tr = -0.68649512735572582
t, p = stats.ttest_ind(a, b, equal_var=False)
assert_array_almost_equal([t,p], [tr, pr])
# test from desc stats API
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
equal_var=False),
[t, p])
a = (1, 2, 3, 4)
pr = 0.84354139131608286
tr = -0.2108663315950719
t, p = stats.ttest_ind(a, b, equal_var=False)
assert_array_almost_equal([t,p], [tr, pr])
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
equal_var=False),
[t, p])
# regression test
tr = 1.0912746897927283
tr_uneq_n = 0.66745638708050492
pr = 0.27647831993021388
pr_uneq_n = 0.50873585065616544
tpr = ([tr,-tr],[pr,pr])
rvs3 = np.linspace(1,100, 25)
rvs2 = np.linspace(1,100,100)
rvs1 = np.linspace(5,105,100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
t,p = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)
assert_array_almost_equal([t,p],(tr,pr))
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs2),
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1, rvs3, axis=0, equal_var=False)
assert_array_almost_equal([t,p], (tr_uneq_n, pr_uneq_n))
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs3),
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, equal_var=False)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1, equal_var=False)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
equal_var=False),
(t, p))
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1, equal_var=False)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
args = _desc_stats(rvs1_3D, rvs2_3D, axis=1)
t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2),
axis=2, equal_var=False)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
args = _desc_stats(np.rollaxis(rvs1_3D, 2),
np.rollaxis(rvs2_3D, 2), axis=2)
t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
olderr = np.seterr(all='ignore')
try:
# test zero division problem
t,p = stats.ttest_ind([0,0,0],[1,1,1], equal_var=False)
assert_equal((np.abs(t),p), (np.inf, 0))
assert_equal(stats.ttest_ind([0,0,0], [0,0,0], equal_var=False), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1,np.nan],[-1,1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2,2)), equal_var=False),
([0, np.nan], [1,np.nan]))
finally:
np.seterr(**olderr)
def test_ttest_1samp_new():
n1, n2, n3 = (10,15,20)
rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3))
# check multidimensional array and correct axis handling
# deterministic rvn1 and rvn2 would be better as in test_ttest_rel
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0)
t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n2,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n3)),axis=1)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1)
t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2)),axis=2)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2)
t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n2))
olderr = np.seterr(all='ignore')
try:
# test zero division problem
t,p = stats.ttest_1samp([0,0,0], 1)
assert_equal((np.abs(t),p), (np.inf, 0))
assert_equal(stats.ttest_1samp([0,0,0], 0), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1,np.nan],[-1,1]])
assert_equal(stats.ttest_1samp(anan, 0),([0, np.nan], [1,np.nan]))
finally:
np.seterr(**olderr)
class TestDescribe(TestCase):
def test_describe_numbers(self):
x = np.vstack((np.ones((3,4)), 2 * np.ones((2,4))))
nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))
mc = np.array([1.4, 1.4, 1.4, 1.4])
vc = np.array([0.3, 0.3, 0.3, 0.3])
skc = [0.40824829046386357] * 4
kurtc = [-1.833333333333333] * 4
n, mm, m, v, sk, kurt = stats.describe(x)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
# not sure about precision with sk, skc
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
# not sure about precision with sk, skc
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
def test_describe_result_attributes(self):
actual = stats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
for i, attr in enumerate(attributes):
assert_equal(actual[i], getattr(actual, attr))
def test_describe_typename(self):
actual = stats.describe(np.arange(5))
assert_equal(str(actual)[:8], 'Describe')
def test_normalitytests():
# numbers verified with R: dagoTest in package fBasics
st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734)
pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019)
x = np.array((-2,-1,0,1,2,3)*4)**2
yield assert_array_almost_equal, stats.normaltest(x), (st_normal, pv_normal)
yield assert_array_almost_equal, stats.skewtest(x), (st_skew, pv_skew)
yield assert_array_almost_equal, stats.kurtosistest(x), (st_kurt, pv_kurt)
# Test axis=None (equal to axis=0 for 1-D input)
yield (assert_array_almost_equal, stats.normaltest(x, axis=None),
(st_normal, pv_normal))
yield (assert_array_almost_equal, stats.skewtest(x, axis=None),
(st_skew, pv_skew))
yield (assert_array_almost_equal, stats.kurtosistest(x, axis=None),
(st_kurt, pv_kurt))
class TestJarqueBera(TestCase):
def test_jarque_bera_stats(self):
np.random.seed(987654321)
x = np.random.normal(0, 1, 100000)
y = np.random.chisquare(10000, 100000)
z = np.random.rayleigh(1, 100000)
assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(y)[1])
assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(z)[1])
assert_(stats.jarque_bera(y)[1] > stats.jarque_bera(z)[1])
def test_jarque_bera_array_like(self):
np.random.seed(987654321)
x = np.random.normal(0, 1, 100000)
JB1, p1 = stats.jarque_bera(list(x))
JB2, p2 = stats.jarque_bera(tuple(x))
JB3, p3 = stats.jarque_bera(x.reshape(2, 50000))
assert_(JB1 == JB2 == JB3)
assert_(p1 == p2 == p3)
def test_jarque_bera_size(self):
assert_raises(ValueError, stats.jarque_bera, [])
def test_skewtest_too_few_samples():
# Regression test for ticket #1492.
# skewtest requires at least 8 samples; 7 should raise a ValueError.
x = np.arange(7.0)
assert_raises(ValueError, stats.skewtest, x)
def test_kurtosistest_too_few_samples():
# Regression test for ticket #1425.
# kurtosistest requires at least 5 samples; 4 should raise a ValueError.
x = np.arange(4.0)
assert_raises(ValueError, stats.kurtosistest, x)
def test_mannwhitneyu():
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1.,
1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 1., 1., 2., 1., 1., 2.,
1., 2., 1., 1., 1., 1., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1.])
# p-value verified with matlab and R to 5 significant digits
assert_array_almost_equal(stats.stats.mannwhitneyu(x,y),
(16980.5, 2.8214327656317373e-005), decimal=12)
def test_pointbiserial():
# same as mstats test except for the nan
# Test data: http://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1]
assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5)
def test_obrientransform():
# A couple tests calculated by hand.
x1 = np.array([0, 2, 4])
t1 = stats.obrientransform(x1)
expected = [7, -2, 7]
assert_allclose(t1[0], expected)
x2 = np.array([0, 3, 6, 9])
t2 = stats.obrientransform(x2)
expected = np.array([30, 0, 0, 30])
assert_allclose(t2[0], expected)
# Test two arguments.
a, b = stats.obrientransform(x1, x2)
assert_equal(a, t1[0])
assert_equal(b, t2[0])
# Test three arguments.
a, b, c = stats.obrientransform(x1, x2, x1)
assert_equal(a, t1[0])
assert_equal(b, t2[0])
assert_equal(c, t1[0])
# This is a regression test to check np.var replacement.
# The author of this test didn't separately verify the numbers.
x1 = np.arange(5)
result = np.array(
[[5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667],
[21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]])
assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8)
# Example from "O'Brien Test for Homogeneity of Variance"
# by Herve Abdi.
values = range(5, 11)
reps = np.array([5, 11, 9, 3, 2, 2])
data = np.repeat(values, reps)
transformed_values = np.array([3.1828, 0.5591, 0.0344,
1.6086, 5.2817, 11.0538])
expected = np.repeat(transformed_values, reps)
result = stats.obrientransform(data)
assert_array_almost_equal(result[0], expected, decimal=4)
class HarMeanTestCase:
def test_1dlist(self):
# Test a 1d list
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
b = 34.1417152147
self.do(a, b)
def test_1darray(self):
# Test a 1d array
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 34.1417152147
self.do(a, b)
def test_1dma(self):
# Test a 1d masked array
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 34.1417152147
self.do(a, b)
def test_1dmavalue(self):
# Test a 1d masked array with a masked value
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
mask=[0,0,0,0,0,0,0,0,0,1])
b = 31.8137186141
self.do(a, b)
# Note the next tests use axis=None as default, not axis=0
def test_2dlist(self):
# Test a 2d list
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 38.6696271841
self.do(a, b)
def test_2darray(self):
# Test a 2d array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 38.6696271841
self.do(np.array(a), b)
def test_2dma(self):
# Test a 2d masked array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 38.6696271841
self.do(np.ma.array(a), b)
def test_2daxis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545])
self.do(a, b, axis=0)
def test_2daxis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([19.2, 63.03939962, 103.80078637])
self.do(a, b, axis=1)
def test_2dmatrixdaxis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]])
self.do(np.matrix(a), b, axis=0)
def test_2dmatrixaxis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[19.2, 63.03939962, 103.80078637]]).T
self.do(np.matrix(a), b, axis=1)
class TestHarMean(HarMeanTestCase, TestCase):
def do(self, a, b, axis=None, dtype=None):
x = stats.hmean(a, axis=axis, dtype=dtype)
assert_almost_equal(b, x)
assert_equal(x.dtype, dtype)
class GeoMeanTestCase:
def test_1dlist(self):
# Test a 1d list
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
b = 45.2872868812
self.do(a, b)
def test_1darray(self):
# Test a 1d array
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 45.2872868812
self.do(a, b)
def test_1dma(self):
# Test a 1d masked array
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 45.2872868812
self.do(a, b)
def test_1dmavalue(self):
# Test a 1d masked array with a masked value
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0,0,0,0,0,0,0,0,0,1])
b = 41.4716627439
self.do(a, b)
# Note the next tests use axis=None as default, not axis=0
def test_2dlist(self):
# Test a 2d list
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 52.8885199
self.do(a, b)
def test_2darray(self):
# Test a 2d array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 52.8885199
self.do(np.array(a), b)
def test_2dma(self):
# Test a 2d masked array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 52.8885199
self.do(np.ma.array(a), b)
def test_2daxis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371])
self.do(a, b, axis=0)
def test_2daxis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([22.13363839, 64.02171746, 104.40086817])
self.do(a, b, axis=1)
def test_2dmatrixdaxis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]])
self.do(np.matrix(a), b, axis=0)
def test_2dmatrixaxis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[22.13363839, 64.02171746, 104.40086817]]).T
self.do(np.matrix(a), b, axis=1)
def test_1dlist0(self):
# Test a 1d list with zero element
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0]
b = 0.0 # due to exp(-inf)=0
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
def test_1darray0(self):
# Test a 1d array with zero element
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
b = 0.0 # due to exp(-inf)=0
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
def test_1dma0(self):
# Test a 1d masked array with zero element
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
b = 41.4716627439
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
def test_1dmainf(self):
# Test a 1d masked array with negative element
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1])
b = 41.4716627439
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
class TestGeoMean(GeoMeanTestCase, TestCase):
def do(self, a, b, axis=None, dtype=None):
# Note this doesn't test when axis is not specified
x = stats.gmean(a, axis=axis, dtype=dtype)
assert_almost_equal(b, x)
assert_equal(x.dtype, dtype)
def test_binomtest():
# precision tests compared to R for ticket:986
pp = np.concatenate((np.linspace(0.1,0.2,5), np.linspace(0.45,0.65,5),
np.linspace(0.85,0.95,5)))
n = 501
x = 450
results = [0.0, 0.0, 1.0159969301994141e-304,
2.9752418572150531e-275, 7.7668382922535275e-250,
2.3381250925167094e-099, 7.8284591587323951e-081,
9.9155947819961383e-065, 2.8729390725176308e-050,
1.7175066298388421e-037, 0.0021070691951093692,
0.12044570587262322, 0.88154763174802508, 0.027120993063129286,
2.6102587134694721e-006]
for p, res in zip(pp,results):
assert_approx_equal(stats.binom_test(x, n, p), res,
significant=12, err_msg='fail forp=%f' % p)
assert_approx_equal(stats.binom_test(50,100,0.1), 5.8320387857343647e-024,
significant=12, err_msg='fail forp=%f' % p)
def test_binomtest2():
# test added for issue #2384
res2 = [
[1.0, 1.0],
[0.5,1.0,0.5],
[0.25,1.00,1.00,0.25],
[0.125,0.625,1.000,0.625,0.125],
[0.0625,0.3750,1.0000,1.0000,0.3750,0.0625],
[0.03125,0.21875,0.68750,1.00000,0.68750,0.21875,0.03125],
[0.015625,0.125000,0.453125,1.000000,1.000000,0.453125,0.125000,0.015625],
[0.0078125,0.0703125,0.2890625,0.7265625,1.0000000,0.7265625,0.2890625,
0.0703125,0.0078125],
[0.00390625,0.03906250,0.17968750,0.50781250,1.00000000,1.00000000,
0.50781250,0.17968750,0.03906250,0.00390625],
[0.001953125,0.021484375,0.109375000,0.343750000,0.753906250,1.000000000,
0.753906250,0.343750000,0.109375000,0.021484375,0.001953125]
]
for k in range(1, 11):
res1 = [stats.binom_test(v, k, 0.5) for v in range(k + 1)]
assert_almost_equal(res1, res2[k-1], decimal=10)
def test_binomtest3():
# test added for issue #2384
# test when x == n*p and neighbors
res3 = [stats.binom_test(v, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
assert_equal(res3, np.ones(len(res3), int))
#> bt=c()
#> for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i-1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}}
binom_testm1 = np.array([
0.5, 0.5555555555555556, 0.578125, 0.5904000000000003,
0.5981224279835393, 0.603430543396034, 0.607304096221924,
0.610255656871054, 0.612579511000001, 0.625, 0.670781893004115,
0.68853759765625, 0.6980101120000006, 0.703906431368616,
0.70793209416498, 0.7108561134173507, 0.713076544331419,
0.714820192935702, 0.6875, 0.7268709038256367, 0.7418963909149174,
0.74986110468096, 0.7548015520398076, 0.7581671424768577,
0.760607984787832, 0.762459425024199, 0.7639120677676575, 0.7265625,
0.761553963657302, 0.774800934828818, 0.7818005980538996,
0.78613491480358, 0.789084353140195, 0.7912217659828884,
0.79284214559524, 0.794112956558801, 0.75390625, 0.7856929451142176,
0.7976688481430754, 0.8039848974727624, 0.807891868948366,
0.8105487660137676, 0.812473307174702, 0.8139318233591120,
0.815075399104785, 0.7744140625, 0.8037322594985427,
0.814742863657656, 0.8205425178645808, 0.8241275984172285,
0.8265645374416, 0.8283292196088257, 0.829666291102775,
0.8307144686362666, 0.7905273437499996, 0.8178712053954738,
0.828116983756619, 0.833508948940494, 0.8368403871552892,
0.839104213210105, 0.840743186196171, 0.84198481438049,
0.8429580531563676, 0.803619384765625, 0.829338573944648,
0.8389591907548646, 0.84401876783902, 0.84714369697889,
0.8492667010581667, 0.850803474598719, 0.851967542858308,
0.8528799045949524, 0.8145294189453126, 0.838881732845347,
0.847979024541911, 0.852760894015685, 0.8557134656773457,
0.8577190131799202, 0.85917058278431, 0.860270010472127,
0.861131648404582, 0.823802947998047, 0.846984756807511,
0.855635653643743, 0.860180994825685, 0.86298688573253,
0.864892525675245, 0.866271647085603, 0.867316125625004,
0.8681346531755114
])
# > bt=c()
# > for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i+1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}}
binom_testp1 = np.array([
0.5, 0.259259259259259, 0.26171875, 0.26272, 0.2632244513031551,
0.2635138663069203, 0.2636951804161073, 0.2638162407564354,
0.2639010709000002, 0.625, 0.4074074074074074, 0.42156982421875,
0.4295746560000003, 0.43473045988554, 0.4383309503172684,
0.4409884859402103, 0.4430309389962837, 0.444649849401104, 0.6875,
0.4927602499618962, 0.5096031427383425, 0.5189636628480,
0.5249280070771274, 0.5290623300865124, 0.5320974248125793,
0.5344204730474308, 0.536255847400756, 0.7265625, 0.5496019313526808,
0.5669248746708034, 0.576436455045805, 0.5824538812831795,
0.5866053321547824, 0.589642781414643, 0.5919618019300193,
0.593790427805202, 0.75390625, 0.590868349763505, 0.607983393277209,
0.617303847446822, 0.623172512167948, 0.627208862156123,
0.6301556891501057, 0.632401894928977, 0.6341708982290303,
0.7744140625, 0.622562037497196, 0.639236102912278, 0.648263335014579,
0.65392850011132, 0.657816519817211, 0.660650782947676,
0.662808780346311, 0.6645068560246006, 0.7905273437499996,
0.6478843304312477, 0.6640468318879372, 0.6727589686071775,
0.6782129857784873, 0.681950188903695, 0.684671508668418,
0.686741824999918, 0.688369886732168, 0.803619384765625,
0.668716055304315, 0.684360013879534, 0.6927642396829181,
0.6980155964704895, 0.701609591890657, 0.7042244320992127,
0.7062125081341817, 0.707775152962577, 0.8145294189453126,
0.686243374488305, 0.7013873696358975, 0.709501223328243,
0.714563595144314, 0.718024953392931, 0.7205416252126137,
0.722454130389843, 0.723956813292035, 0.823802947998047,
0.701255953767043, 0.715928221686075, 0.723772209289768,
0.7286603031173616, 0.7319999279787631, 0.7344267920995765,
0.736270323773157, 0.737718376096348
])
res4_p1 = [stats.binom_test(v+1, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
res4_m1 = [stats.binom_test(v-1, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
assert_almost_equal(res4_p1, binom_testp1, decimal=13)
assert_almost_equal(res4_m1, binom_testm1, decimal=13)
class TestTrim(object):
# test trim functions
def test_trim1(self):
a = np.arange(11)
assert_equal(stats.trim1(a, 0.1), np.arange(10))
assert_equal(stats.trim1(a, 0.2), np.arange(9))
assert_equal(stats.trim1(a, 0.2, tail='left'), np.arange(2,11))
assert_equal(stats.trim1(a, 3/11., tail='left'), np.arange(3,11))
def test_trimboth(self):
a = np.arange(11)
assert_equal(stats.trimboth(a, 3/11.), np.arange(3,8))
assert_equal(stats.trimboth(a, 0.2), np.array([2, 3, 4, 5, 6, 7, 8]))
assert_equal(stats.trimboth(np.arange(24).reshape(6,4), 0.2),
np.arange(4,20).reshape(4,4))
assert_equal(stats.trimboth(np.arange(24).reshape(4,6).T, 2/6.),
np.array([[2, 8, 14, 20],[3, 9, 15, 21]]))
assert_raises(ValueError, stats.trimboth,
np.arange(24).reshape(4,6).T, 4/6.)
def test_trim_mean(self):
# don't use pre-sorted arrays
a = np.array([4, 8, 2, 0, 9, 5, 10, 1, 7, 3, 6])
idx = np.array([3, 5, 0, 1, 2, 4])
a2 = np.arange(24).reshape(6, 4)[idx, :]
a3 = np.arange(24).reshape(6, 4, order='F')[idx, :]
assert_equal(stats.trim_mean(a3, 2/6.),
np.array([2.5, 8.5, 14.5, 20.5]))
assert_equal(stats.trim_mean(a2, 2/6.),
np.array([10., 11., 12., 13.]))
idx4 = np.array([1, 0, 3, 2])
a4 = np.arange(24).reshape(4, 6)[idx4, :]
assert_equal(stats.trim_mean(a4, 2/6.),
np.array([9., 10., 11., 12., 13., 14.]))
# shuffled arange(24) as array_like
a = [7, 11, 12, 21, 16, 6, 22, 1, 5, 0, 18, 10, 17, 9, 19, 15, 23,
20, 2, 14, 4, 13, 8, 3]
assert_equal(stats.trim_mean(a, 2/6.), 11.5)
assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5)
# check axis argument
np.random.seed(1234)
a = np.random.randint(20, size=(5, 6, 4, 7))
for axis in [0, 1, 2, 3, -1]:
res1 = stats.trim_mean(a, 2/6., axis=axis)
res2 = stats.trim_mean(np.rollaxis(a, axis), 2/6.)
assert_equal(res1, res2)
res1 = stats.trim_mean(a, 2/6., axis=None)
res2 = stats.trim_mean(a.ravel(), 2/6.)
assert_equal(res1, res2)
assert_raises(ValueError, stats.trim_mean, a, 0.6)
class TestSigamClip(object):
def test_sigmaclip1(self):
a = np.concatenate((np.linspace(9.5,10.5,31),np.linspace(0,20,5)))
fact = 4 # default
c, low, upp = stats.sigmaclip(a)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, a.size)
def test_sigmaclip2(self):
a = np.concatenate((np.linspace(9.5,10.5,31),np.linspace(0,20,5)))
fact = 1.5
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, 4)
assert_equal(a.size, 36) # check original array unchanged
def test_sigmaclip3(self):
a = np.concatenate((np.linspace(9.5,10.5,11),np.linspace(-100,-50,3)))
fact = 1.8
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c, np.linspace(9.5,10.5,11))
class TestFOneWay(TestCase):
def test_trivial(self):
# A trivial test of stats.f_oneway, with F=0.
F, p = stats.f_oneway([0,2], [0,2])
assert_equal(F, 0.0)
def test_basic(self):
# Despite being a floating point calculation, this data should
# result in F being exactly 2.0.
F, p = stats.f_oneway([0,2], [2,4])
assert_equal(F, 2.0)
def test_large_integer_array(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
F, p = stats.f_oneway(a, b)
assert_almost_equal(F, 0.77450216931805538)
class TestKruskal(TestCase):
def test_simple(self):
x = [1]
y = [2]
h, p = stats.kruskal(x, y)
assert_equal(h, 1.0)
assert_approx_equal(p, stats.chisqprob(h, 1))
h, p = stats.kruskal(np.array(x), np.array(y))
assert_equal(h, 1.0)
assert_approx_equal(p, stats.chisqprob(h, 1))
def test_basic(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
h, p = stats.kruskal(x, y)
assert_approx_equal(h, 3./11, significant=10)
assert_approx_equal(p, stats.chisqprob(3./11, 1))
h, p = stats.kruskal(np.array(x), np.array(y))
assert_approx_equal(h, 3./11, significant=10)
assert_approx_equal(p, stats.chisqprob(3./11, 1))
def test_simple_tie(self):
x = [1]
y = [1, 2]
h_uncorr = 1.5**2 + 2*2.25**2 - 12
corr = 0.75
expected = h_uncorr / corr # 0.5
h, p = stats.kruskal(x, y)
# Since the expression is simple and the exact answer is 0.5, it
# should be safe to use assert_equal().
assert_equal(h, expected)
def test_another_tie(self):
x = [1, 1, 1, 2]
y = [2, 2, 2, 2]
h_uncorr = (12. / 8. / 9.) * 4 * (3**2 + 6**2) - 3 * 9
corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
expected = h_uncorr / corr
h, p = stats.kruskal(x, y)
assert_approx_equal(h, expected)
def test_three_groups(self):
# A test of stats.kruskal with three groups, with ties.
x = [1, 1, 1]
y = [2, 2, 2]
z = [2, 2]
h_uncorr = (12. / 8. / 9.) * (3*2**2 + 3*6**2 + 2*6**2) - 3 * 9 # 5.0
corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
expected = h_uncorr / corr # 7.0
h, p = stats.kruskal(x, y, z)
assert_approx_equal(h, expected)
assert_approx_equal(p, stats.chisqprob(h, 2))
class TestCombinePvalues(TestCase):
def test_fisher(self):
# Example taken from http://en.wikipedia.org/wiki/Fisher's_exact_test#Example
xsq, p = stats.combine_pvalues([.01, .2, .3], method='fisher')
assert_approx_equal(p, 0.02156, significant=4)
def test_stouffer(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer')
assert_approx_equal(p, 0.01651, significant=4)
def test_stouffer2(self):
Z, p = stats.combine_pvalues([.5, .5, .5], method='stouffer')
assert_approx_equal(p, 0.5, significant=4)
def test_weighted_stouffer(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
weights=np.ones(3))
assert_approx_equal(p, 0.01651, significant=4)
def test_weighted_stouffer2(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
weights=np.array((1, 4, 9)))
assert_approx_equal(p, 0.1464, significant=4)
if __name__ == "__main__":
run_module_suite()
|
mortonjt/scipy
|
scipy/stats/tests/test_stats.py
|
Python
|
bsd-3-clause
| 124,000
|
from features_to_hdf5 import features_to_hdf5
from videos_to_hdf5 import *
|
EderSantana/seya
|
seya/preprocessing/__init__.py
|
Python
|
bsd-3-clause
| 75
|
""" Utilities for working with Images and common neuroimaging spaces
Images are very general things, and don't know anything about the kinds of
spaces they refer to, via their coordinate map.
There are a set of common neuroimaging spaces. When we create neuroimaging
Images, we want to place them in neuroimaging spaces, and return information
about common neuroimaging spaces.
We do this by putting information about neuroimaging spaces in functions and
variables in the ``nipy.core.reference.spaces`` module, and in this module.
This keeps the specific neuroimaging spaces out of our Image object.
>>> from nipy.core.api import Image, vox2mni, rollimg, xyz_affine, as_xyz_image
Make a standard 4D xyzt image in MNI space.
First the data and affine:
>>> data = np.arange(24).reshape((1,2,3,4))
>>> affine = np.diag([2,3,4,1]).astype(float)
We can add the TR (==2.0) to make the full 5x5 affine we need
>>> img = Image(data, vox2mni(affine, 2.0))
>>> img.affine
array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 2., 0.],
[ 0., 0., 0., 0., 1.]])
In this case the neuroimaging 'xyz_affine' is just the 4x4 from the 5x5 in the image
>>> xyz_affine(img)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
However, if we roll time first in the image array, we can't any longer get an
xyz_affine that makes sense in relationship to the voxel data:
>>> img_t0 = rollimg(img, 't')
>>> xyz_affine(img_t0) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AxesError: First 3 input axes must correspond to X, Y, Z
But we can fix this:
>>> img_t0_affable = as_xyz_image(img_t0)
>>> xyz_affine(img_t0_affable)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
It also works with nibabel images, which can only have xyz_affines:
>>> import nibabel as nib
>>> nimg = nib.Nifti1Image(data, affine)
>>> xyz_affine(nimg)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
"""
import sys
import numpy as np
from ...fixes.nibabel import io_orientation
from ...io import nibcompat
from ..image.image import Image
from ..reference import spaces as rsp
from ..reference.coordinate_map import AffineTransform
def xyz_affine(img, name2xyz=None):
""" Return xyz affine from image `img` if possible, or raise error
Parameters
----------
img : ``Image`` instance or nibabel image
It has a ``coordmap`` or attribute ``affine`` or method ``get_affine``
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Not used for nibabel `img` input.
Returns
-------
xyz_aff : (4,4) array
voxel to X, Y, Z affine mapping
Raises
------
SpaceTypeError : if `img` does not have an affine coordinate map
AxesError : if not all of x, y, z recognized in `img` ``coordmap`` range
AffineError : if axes dropped from the affine contribute to x, y, z
coordinates
Examples
--------
>>> from nipy.core.api import vox2mni, Image
>>> arr = np.arange(24).reshape((2,3,4,1)).astype(float)
>>> img = Image(arr, vox2mni(np.diag([2,3,4,5,1])))
>>> img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64),
affine=array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 5., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> xyz_affine(img)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
Nibabel images always have xyz affines
>>> import nibabel as nib
>>> nimg = nib.Nifti1Image(arr, np.diag([2,3,4,1]))
>>> xyz_affine(nimg)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
"""
if hasattr(img, 'coordmap'): # nipy image
return rsp.xyz_affine(img.coordmap, name2xyz)
return nibcompat.get_affine(img)
def is_xyz_affable(img, name2xyz=None):
""" Return True if the image `img` has an xyz affine
Parameters
----------
img : ``Image`` or nibabel ``SpatialImage``
If ``Image`` test ``img.coordmap``. If a nibabel image, return True
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Not used for nibabel `img` input.
Returns
-------
tf : bool
True if `img` has an xyz affine, False otherwise
Examples
--------
>>> from nipy.core.api import vox2mni, Image, rollimg
>>> arr = np.arange(24).reshape((2,3,4,1))
>>> img = Image(arr, vox2mni(np.diag([2,3,4,5,1])))
>>> img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64),
affine=array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 5., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> is_xyz_affable(img)
True
>>> time0_img = rollimg(img, 't')
>>> time0_img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64),
affine=array([[ 0., 2., 0., 0., 0.],
[ 0., 0., 3., 0., 0.],
[ 0., 0., 0., 4., 0.],
[ 5., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> is_xyz_affable(time0_img)
False
Nibabel images always have xyz affines
>>> import nibabel as nib
>>> nimg = nib.Nifti1Image(arr, np.diag([2,3,4,1]))
>>> is_xyz_affable(nimg)
True
"""
try:
xyz_affine(img, name2xyz)
except rsp.SpaceError:
return False
return True
def as_xyz_image(img, name2xyz=None):
""" Return version of `img` that has a valid xyz affine, or raise error
Parameters
----------
img : ``Image`` instance or nibabel image
It has a ``coordmap`` attribute (``Image``) or a ``get_affine`` method
(nibabel image object)
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Not used for nibabel `img` input.
Returns
-------
reo_img : ``Image`` instance or nibabel image
Returns image of same type as `img` input. If necessary, `reo_img` has
its data and coordmap changed to allow it to return an xyz affine. If
`img` is already xyz affable we return the input unchanged (``img is
reo_img``).
Raises
------
SpaceTypeError : if `img` does not have an affine coordinate map
AxesError : if not all of x, y, z recognized in `img` ``coordmap`` range
AffineError : if axes dropped from the affine contribute to x, y, z
coordinates
"""
try:
aff = xyz_affine(img, name2xyz)
except (rsp.AxesError, rsp.AffineError):
pass
else:
return img
cmap = img.coordmap
order = rsp.xyz_order(cmap.function_range, name2xyz)
# Reorder reference to canonical order
reo_img = img.reordered_reference(order)
# Which input axes correspond?
ornt = io_orientation(reo_img.coordmap.affine)
current_in_order = ornt[:,0]
# Set nan to inf to make np.argsort work for old numpy versions
current_in_order[np.isnan(current_in_order)] = np.inf
# Do we have the first three axes somewhere?
if not set((0,1,2)).issubset(current_in_order):
raise rsp.AxesError("One of x, y or z outputs missing a "
"corresponding input axis")
desired_input_order = np.argsort(current_in_order)
reo_img = reo_img.reordered_axes(list(desired_input_order))
try:
aff = xyz_affine(reo_img, name2xyz)
except rsp.SpaceError:
# Python 2.5 / 3 compatibility
e = sys.exc_info()[1]
raise e.__class__("Could not reorder so xyz coordinates did not "
"depend on the other axis coordinates: " +
str(e))
return reo_img
def make_xyz_image(data, xyz_affine, world, metadata=None):
""" Create 3D+ image embedded in space named in `world`
Parameters
----------
data : object
Object returning array from ``np.asarray(obj)``, and having ``shape``
attribute. Should have at least 3 dimensions (``len(shape) >= 3``), and
these three first 3 dimensions should be spatial
xyz_affine : (4, 4) array-like or tuple
if (4, 4) array-like (the usual case), then an affine relating spatial
dimensions in data (dimensions 0:3) to mm in XYZ space given in `world`.
If a tuple, then contains two values: the (4, 4) array-like, and a
sequence of scalings for the dimensions greater than 3. See examples.
world : str or XYZSpace or CoordSysMaker or CoordinateSystem
World 3D space to which affine refers. See ``spaces.get_world_cs()``
metadata : None or mapping, optional
metadata for created image. Defaults to None, giving empty metadata.
Returns
-------
img : Image
image containing `data`, with coordmap constructed from `affine` and
`world`, and with default voxel input coordinates. If the data has more
than 3 dimensions, and you didn't specify the added zooms with a tuple
`xyz_affine` parameter, the coordmap affine gets filled out with extra
ones on the diagonal to give an (N+1, N+1) affine, with ``N =
len(data.shape)``
Examples
--------
>>> data = np.arange(24).reshape((2, 3, 4))
>>> aff = np.diag([4, 5, 6, 1])
>>> img = make_xyz_image(data, aff, 'mni')
>>> img
Image(
data=array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
<BLANKLINE>
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]]),
coordmap=AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S'), name='mni', coord_dtype=float64),
affine=array([[ 4., 0., 0., 0.],
[ 0., 5., 0., 0.],
[ 0., 0., 6., 0.],
[ 0., 0., 0., 1.]])
))
Now make data 4D; we just add 1. to the diagonal for the new dimension
>>> data4 = data[..., None]
>>> img = make_xyz_image(data4, aff, 'mni')
>>> img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64),
affine=array([[ 4., 0., 0., 0., 0.],
[ 0., 5., 0., 0., 0.],
[ 0., 0., 6., 0., 0.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
)
We can pass in a scalar or tuple to specify scaling for the extra dimension
>>> img = make_xyz_image(data4, (aff, 2.0), 'mni')
>>> img.coordmap.affine
array([[ 4., 0., 0., 0., 0.],
[ 0., 5., 0., 0., 0.],
[ 0., 0., 6., 0., 0.],
[ 0., 0., 0., 2., 0.],
[ 0., 0., 0., 0., 1.]])
>>> data5 = data4[..., None]
>>> img = make_xyz_image(data5, (aff, (2.0, 3.0)), 'mni')
>>> img.coordmap.affine
array([[ 4., 0., 0., 0., 0., 0.],
[ 0., 5., 0., 0., 0., 0.],
[ 0., 0., 6., 0., 0., 0.],
[ 0., 0., 0., 2., 0., 0.],
[ 0., 0., 0., 0., 3., 0.],
[ 0., 0., 0., 0., 0., 1.]])
"""
N = len(data.shape)
if N < 3:
raise ValueError('Need data with at least 3 dimensions')
if type(xyz_affine) is tuple:
xyz_affine, added_zooms = xyz_affine
# Could be scalar added zooms
try:
len(added_zooms)
except TypeError:
added_zooms = (added_zooms,)
if len(added_zooms) != (N - 3):
raise ValueError('Wrong number of added zooms')
else:
added_zooms = (1,) * (N - 3)
xyz_affine = np.asarray(xyz_affine)
if not xyz_affine.shape == (4, 4):
raise ValueError("Expecting 4 x 4 affine")
# Make coordinate map
world_cm = rsp.get_world_cs(world, N)
voxel_cm = rsp.voxel_csm(N)
if N > 3:
affine = np.diag((1., 1, 1) + added_zooms + (1,))
affine[:3, :3] = xyz_affine[:3, :3]
affine[:3, -1] = xyz_affine[:3, 3]
else:
affine = xyz_affine
cmap = AffineTransform(voxel_cm, world_cm, affine)
return Image(data, cmap, metadata)
|
arokem/nipy
|
nipy/core/image/image_spaces.py
|
Python
|
bsd-3-clause
| 14,025
|
import json
import os
import shutil
import tempfile
from django.conf import settings
from django.test.utils import override_settings
import mock
import pytest
from nose.tools import eq_
from PIL import Image
import amo
import amo.tests
from addons.models import Addon
from amo.helpers import user_media_path
from amo.tests.test_helpers import get_image_path
from devhub import tasks
from files.models import FileUpload
pytestmark = pytest.mark.django_db
def test_resize_icon_shrink():
""" Image should be shrunk so that the longest side is 32px. """
resize_size = 32
final_size = (32, 12)
_uploader(resize_size, final_size)
def test_resize_icon_enlarge():
""" Image stays the same, since the new size is bigger than both sides. """
resize_size = 350
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_same():
""" Image stays the same, since the new size is the same. """
resize_size = 339
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_list():
""" Resize multiple images at once. """
resize_size = [32, 339, 350]
final_size = [(32, 12), (339, 128), (339, 128)]
_uploader(resize_size, final_size)
def _uploader(resize_size, final_size):
img = get_image_path('mozilla.png')
original_size = (339, 128)
src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix=".png",
delete=False)
# resize_icon removes the original
shutil.copyfile(img, src.name)
src_image = Image.open(src.name)
eq_(src_image.size, original_size)
if isinstance(final_size, list):
uploadto = user_media_path('addon_icons')
try:
os.makedirs(uploadto)
except OSError:
pass
for rsize, fsize in zip(resize_size, final_size):
dest_name = os.path.join(uploadto, '1234')
tasks.resize_icon(src.name, dest_name, resize_size, locally=True)
dest_image = Image.open(open('%s-%s.png' % (dest_name, rsize)))
eq_(dest_image.size, fsize)
if os.path.exists(dest_image.filename):
os.remove(dest_image.filename)
assert not os.path.exists(dest_image.filename)
shutil.rmtree(uploadto)
else:
dest = tempfile.mktemp(suffix='.png')
tasks.resize_icon(src.name, dest, resize_size, locally=True)
dest_image = Image.open(dest)
eq_(dest_image.size, final_size)
assert not os.path.exists(src.name)
class TestValidator(amo.tests.TestCase):
mock_sign_addon_warning = (
'{"warnings": 1, "messages": [{"context": null, "editors_only": '
'false, "description": "Add-ons which are already signed will be '
're-signed when published on AMO. This will replace any existing '
'signatures on the add-on.", "column": null, "type": "warning", '
'"id": ["testcases_content", "signed_xpi"], "file": "", '
'"tier": 2, "for_appversions": null, "message": "Package already '
'signed", "uid": "87326f8f699f447e90b3d5a66a78513e", "line": '
'null, "compatibility_type": null}]}')
def setUp(self):
super(TestValidator, self).setUp()
self.upload = FileUpload.objects.create()
assert not self.upload.valid
def get_upload(self):
return FileUpload.objects.get(pk=self.upload.pk)
@mock.patch('devhub.tasks.run_validator')
def test_pass_validation(self, _mock):
_mock.return_value = '{"errors": 0}'
tasks.validator(self.upload.pk)
assert self.get_upload().valid
@mock.patch('devhub.tasks.run_validator')
def test_fail_validation(self, _mock):
_mock.return_value = '{"errors": 2}'
tasks.validator(self.upload.pk)
assert not self.get_upload().valid
@mock.patch('devhub.tasks.run_validator')
def test_validation_error(self, _mock):
_mock.side_effect = Exception
eq_(self.upload.task_error, None)
with self.assertRaises(Exception):
tasks.validator(self.upload.pk)
error = self.get_upload().task_error
assert error.startswith('Traceback (most recent call last)'), error
@override_settings(SIGNING_SERVER='http://full',
PRELIMINARY_SIGNING_SERVER='http://prelim')
@mock.patch('devhub.tasks.run_validator')
def test_validation_signing_warning(self, _mock):
"""If we sign addons, warn on signed addon submission."""
_mock.return_value = self.mock_sign_addon_warning
tasks.validator(self.upload.pk)
validation = json.loads(self.get_upload().validation)
assert validation['warnings'] == 1
assert len(validation['messages']) == 1
@override_settings(SIGNING_SERVER='', PRELIMINARY_SIGNING_SERVER='')
@mock.patch('devhub.tasks.run_validator')
def test_validation_no_signing_warning(self, _mock):
"""If we're not signing addon don't warn on signed addon submission."""
_mock.return_value = self.mock_sign_addon_warning
tasks.validator(self.upload.pk)
validation = json.loads(self.get_upload().validation)
assert validation['warnings'] == 0
assert len(validation['messages']) == 0
@mock.patch('devhub.tasks.run_validator')
def test_annotate_passed_auto_validation(self, _mock):
"""Set passed_auto_validation on reception of the results."""
result = {'signing_summary': {'trivial': 1, 'low': 0, 'medium': 0,
'high': 0}}
_mock.return_value = json.dumps(result)
eq_(self.upload.task_error, None)
tasks.validator(self.upload.pk)
validation = json.loads(self.get_upload().validation)
assert validation['passed_auto_validation']
result['signing_summary']['low'] = 1
_mock.return_value = json.dumps(result)
eq_(self.upload.task_error, None)
tasks.validator(self.upload.pk)
validation = json.loads(self.get_upload().validation)
assert not validation['passed_auto_validation']
@mock.patch('devhub.tasks.run_validator')
def test_annotate_passed_auto_validation_bogus_result(self, _mock):
"""Don't set passed_auto_validation, don't fail if results is bogus."""
_mock.return_value = ''
eq_(self.upload.task_error, None)
tasks.validator(self.upload.pk)
assert self.get_upload().validation == ''
class TestFlagBinary(amo.tests.TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestFlagBinary, self).setUp()
self.addon = Addon.objects.get(pk=3615)
@mock.patch('devhub.tasks.run_validator')
def test_flag_binary(self, _mock):
_mock.return_value = ('{"metadata":{"contains_binary_extension": 1, '
'"contains_binary_content": 0}}')
tasks.flag_binary([self.addon.pk])
eq_(Addon.objects.get(pk=self.addon.pk).binary, True)
_mock.return_value = ('{"metadata":{"contains_binary_extension": 0, '
'"contains_binary_content": 1}}')
tasks.flag_binary([self.addon.pk])
eq_(Addon.objects.get(pk=self.addon.pk).binary, True)
@mock.patch('devhub.tasks.run_validator')
def test_flag_not_binary(self, _mock):
_mock.return_value = ('{"metadata":{"contains_binary_extension": 0, '
'"contains_binary_content": 0}}')
tasks.flag_binary([self.addon.pk])
eq_(Addon.objects.get(pk=self.addon.pk).binary, False)
@mock.patch('devhub.tasks.run_validator')
def test_flag_error(self, _mock):
_mock.side_effect = RuntimeError()
tasks.flag_binary([self.addon.pk])
eq_(Addon.objects.get(pk=self.addon.pk).binary, False)
@mock.patch('devhub.tasks.send_html_mail_jinja')
def test_send_welcome_email(send_html_mail_jinja_mock):
tasks.send_welcome_email(3615, ['del@icio.us'], {'omg': 'yes'})
send_html_mail_jinja_mock.assert_called_with(
'Mozilla Add-ons: Thanks for submitting a Firefox Add-on!',
'devhub/email/submission.html',
'devhub/email/submission.txt',
{'omg': 'yes'},
recipient_list=['del@icio.us'],
from_email=settings.NOBODY_EMAIL,
use_blacklist=False,
perm_setting='individual_contact',
headers={'Reply-To': settings.EDITORS_EMAIL})
|
Joergen/olympia
|
apps/devhub/tests/test_tasks.py
|
Python
|
bsd-3-clause
| 8,425
|
from __future__ import absolute_import
import os.path
import pytest
import subprocess
from django.conf import settings
from raven.versioning import fetch_git_sha, fetch_package_version
from raven.utils import six
def has_git_requirements():
return os.path.exists(os.path.join(settings.PROJECT_ROOT, '.git', 'refs', 'heads', 'master'))
@pytest.mark.skipif('not has_git_requirements()')
def test_fetch_git_sha():
result = fetch_git_sha(settings.PROJECT_ROOT)
assert result is not None
assert len(result) == 40
assert isinstance(result, six.string_types)
assert result == subprocess.check_output(
'git rev-parse --verify HEAD', shell=True, cwd=settings.PROJECT_ROOT
).strip()
def test_fetch_package_version():
result = fetch_package_version('raven')
assert result is not None
assert isinstance(result, six.string_types)
|
ronaldevers/raven-python
|
tests/versioning/tests.py
|
Python
|
bsd-3-clause
| 874
|
from io import StringIO
from .. import *
from bfg9000 import path
from bfg9000 import safe_str
from bfg9000.shell.syntax import *
class my_safe_str(safe_str.safe_string):
pass
class TestWriteString(TestCase):
def test_variable(self):
out = Writer(StringIO())
out.write('foo', Syntax.variable)
out.write('$bar', Syntax.variable)
self.assertEqual(out.stream.getvalue(), 'foo$bar')
def test_shell(self):
out = Writer(StringIO())
out.write('foo', Syntax.shell)
out.write('$bar', Syntax.shell)
self.assertEqual(out.stream.getvalue(), "foo'$bar'")
class TestWriteLiteral(TestCase):
def test_variable(self):
out = Writer(StringIO())
out.write(safe_str.literal('$foo'), Syntax.variable)
self.assertEqual(out.stream.getvalue(), '$foo')
def test_shell(self):
out = Writer(StringIO())
out.write(safe_str.literal('$foo'), Syntax.shell)
self.assertEqual(out.stream.getvalue(), '$foo')
class TestWriteJbos(TestCase):
def test_variable(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.literal('bar'))
out.write(s, Syntax.variable)
self.assertEqual(out.stream.getvalue(), '$foobar')
def test_shell(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.literal('bar'))
out.write(s, Syntax.shell)
self.assertEqual(out.stream.getvalue(), "'$foo'bar")
class TestWritePath(PathTestCase):
def test_variable(self):
out = Writer(StringIO())
out.write(self.Path('foo', path.InstallRoot.bindir), Syntax.variable)
self.assertEqual(out.stream.getvalue(),
self.ospath.join('${bindir}', 'foo'))
def test_shell(self):
out = Writer(StringIO())
out.write(self.Path('foo', path.InstallRoot.bindir), Syntax.shell)
self.assertEqual(out.stream.getvalue(),
"'" + self.ospath.join('${bindir}', 'foo') + "'")
class TestWriteInvalid(TestCase):
def test_invalid(self):
out = Writer(StringIO())
with self.assertRaises(TypeError):
out.write(my_safe_str(), Syntax.variable)
class TestWriteEach(TestCase):
def test_basic(self):
out = Writer(StringIO())
out.write_each(['foo', 'bar'], Syntax.variable)
self.assertEqual(out.stream.getvalue(), 'foo bar')
def test_delims(self):
out = Writer(StringIO())
out.write_each(['foo', 'bar'], Syntax.variable, ',', '[', ']')
self.assertEqual(out.stream.getvalue(), '[foo,bar]')
class TestVariable(TestCase):
def test_equality(self):
self.assertTrue(Variable('foo') == Variable('foo'))
self.assertFalse(Variable('foo') != Variable('foo'))
self.assertFalse(Variable('foo') == Variable('bar'))
self.assertTrue(Variable('foo') != Variable('bar'))
def test_concat_str(self):
self.assertEqual(Variable('foo') + 'bar', safe_str.jbos(
safe_str.literal('${foo}'), 'bar'
))
self.assertEqual('foo' + Variable('bar'), safe_str.jbos(
'foo', safe_str.literal('${bar}')
))
def test_concat_path(self):
self.assertEqual(Variable('foo') + path.Path('bar'), safe_str.jbos(
safe_str.literal('${foo}'), path.Path('bar')
))
self.assertEqual(path.Path('foo') + Variable('bar'), safe_str.jbos(
path.Path('foo'), safe_str.literal('${bar}')
))
def test_concat_var(self):
self.assertEqual(Variable('foo') + Variable('bar'), safe_str.jbos(
safe_str.literal('${foo}'), safe_str.literal('${bar}')
))
def test_hash(self):
self.assertEqual(hash(Variable('foo')), hash(Variable('foo')))
|
jimporter/bfg9000
|
test/unit/shell/test_syntax.py
|
Python
|
bsd-3-clause
| 3,803
|
################################################################################
# Copyright (c) 2011-2021, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Data accessor class for HDF5 files produced by Fringe Finder correlator."""
import logging
import pathlib
import re
import h5py
import katpoint
import numpy as np
from .categorical import CategoricalData
from .concatdata import ConcatenatedLazyIndexer
from .dataset import (DEFAULT_SENSOR_PROPS, DEFAULT_VIRTUAL_SENSORS,
BrokenFile, DataSet, Subarray, WrongVersion,
_robust_target)
from .lazy_indexer import LazyIndexer, LazyTransform
from .sensordata import RecordSensorGetter, SensorCache, to_str
from .spectral_window import SpectralWindow
logger = logging.getLogger(__name__)
def _labels_to_state(scan_label, compscan_label):
"""Use scan and compscan labels to derive basic state of antenna."""
if not scan_label or scan_label == 'slew':
return 'slew'
if scan_label == 'cal':
return 'track'
return 'track' if compscan_label == 'track' else 'scan'
SENSOR_PROPS = dict(DEFAULT_SENSOR_PROPS)
SENSOR_ALIASES = {
'nd_coupler': 'rfe3_rfe15_noise_coupler_on',
'nd_pin': 'rfe3_rfe15_noise_pin_on',
}
def _calc_azel(cache, name, ant):
"""Calculate virtual (az, el) sensors from actual ones in sensor cache."""
base_name = 'pos_actual_scan_azim' if name.endswith('az') else 'pos_actual_scan_elev'
real_sensor = f'Antennas/{ant}/{base_name}'
cache[name] = sensor_data = katpoint.deg2rad(cache.get(real_sensor))
return sensor_data
VIRTUAL_SENSORS = dict(DEFAULT_VIRTUAL_SENSORS)
VIRTUAL_SENSORS.update({'Antennas/{ant}/az': _calc_azel, 'Antennas/{ant}/el': _calc_azel})
# -------------------------------------------------------------------------------------------------
# -- CLASS : H5DataV1
# -------------------------------------------------------------------------------------------------
class H5DataV1(DataSet):
"""Load HDF5 format version 1 file produced by Fringe Finder correlator.
For more information on attributes, see the :class:`DataSet` docstring.
Parameters
----------
filename : string
Name of HDF5 file
ref_ant : string, optional
Name of reference antenna, used to partition data set into scans
(default is first antenna in use)
time_offset : float, optional
Offset to add to all correlator timestamps, in seconds
mode : string, optional
HDF5 file opening mode (e.g. 'r+' to open file in write mode)
kwargs : dict, optional
Extra keyword arguments, typically meant for other formats and ignored
Attributes
----------
file : :class:`h5py.File` object
Underlying HDF5 file, exposed via :mod:`h5py` interface
"""
def __init__(self, filename, ref_ant='', time_offset=0.0, mode='r', **kwargs):
# The closest thing to a capture block ID is the Unix timestamp in the original filename
# There is only one (unnamed) output stream, so leave off the stream name
cbid = pathlib.Path(filename).stem
DataSet.__init__(self, cbid, ref_ant, time_offset, url=filename)
# Load file
self.file, self.version = H5DataV1._open(filename, mode)
f = self.file
# Load main HDF5 groups
ants_group, corr_group, data_group = f['Antennas'], f['Correlator'], f['Scans']
# Get observation script parameters, with defaults
self.observer = self.obs_params['observer'] = to_str(f.attrs.get('observer', ''))
self.description = self.obs_params['description'] = to_str(f.attrs.get('description', ''))
self.experiment_id = self.obs_params['experiment_id'] = to_str(f.attrs.get('experiment_id', ''))
# Collect all groups below data group that fit the description of a scan group
scan_groups = []
def register_scan_group(name, obj):
"""A scan group is defined as a group named 'Scan*' with non-empty timestamps and data."""
if isinstance(obj, h5py.Group) and name.split('/')[-1].startswith('Scan') and \
'data' in obj and 'timestamps' in obj and len(obj['timestamps']) > 0:
scan_groups.append(obj)
data_group.visititems(register_scan_group)
# Sort scan groups in chronological order via 'decorate-sort-undecorate' (DSU) idiom
decorated_scan_groups = [(s['timestamps'][0], s) for s in scan_groups]
decorated_scan_groups.sort()
self._scan_groups = [s[-1] for s in decorated_scan_groups]
# ------ Extract timestamps ------
self.dump_period = 1.0 / corr_group.attrs['dump_rate_hz']
self._segments = np.cumsum([0] + [len(s['timestamps']) for s in self._scan_groups])
num_dumps = self._segments[-1]
self._time_keep = np.ones(num_dumps, dtype=np.bool)
data_timestamps = self.timestamps
if data_timestamps[0] < 1e9:
logger.warning("File '%s' has invalid first correlator timestamp (%f)", filename, data_timestamps[0])
# Estimate timestamps by assuming they are uniformly spaced (much quicker than loading them from file).
# This is useful for the purpose of segmenting data set, where accurate timestamps are not that crucial.
# The real timestamps are still loaded when the user explicitly asks for them.
# Do quick test for uniform spacing of timestamps (necessary but not sufficient).
if abs((data_timestamps[-1] - data_timestamps[0]) / self.dump_period + 1 - num_dumps) < 0.01:
# Estimate the timestamps as being uniformly spaced
data_timestamps = data_timestamps[0] + self.dump_period * np.arange(num_dumps)
else:
# Load the real timestamps instead and warn the user, as this is anomalous
data_timestamps = data_timestamps[:]
expected_dumps = (data_timestamps[-1] - data_timestamps[0]) / self.dump_period + 1
logger.warning(("Irregular timestamps detected in file '%s':"
"expected %.3f dumps based on dump period and start/end times, got %d instead") %
(filename, expected_dumps, num_dumps))
self.start_time = katpoint.Timestamp(data_timestamps[0] - 0.5 * self.dump_period)
self.end_time = katpoint.Timestamp(data_timestamps[-1] + 0.5 * self.dump_period)
# ------ Extract sensors ------
# Populate sensor cache with all HDF5 datasets below antennas group that fit the description of a sensor
cache = {}
def register_sensor(name, obj):
if isinstance(obj, h5py.Dataset) and obj.shape != () and \
obj.dtype.names == ('timestamp', 'value', 'status'):
# Assume sensor dataset name is AntennaN/Sensors/dataset and rename it to Antennas/{ant}/dataset
ant_name = to_str(obj.parent.parent.attrs['description']).split(',')[0]
dataset_name = name.split('/')[-1]
standardised_name = f"Antennas/{ant_name}/{dataset_name}"
cache[standardised_name] = RecordSensorGetter(obj, standardised_name)
ants_group.visititems(register_sensor)
# Use estimated data timestamps for now, to speed up data segmentation
# This will linearly interpolate pointing coordinates to correlator data timestamps (on access)
# As long as azimuth is in natural antenna coordinates, no special angle interpolation required
self.sensor = SensorCache(cache, data_timestamps, self.dump_period, keep=self._time_keep,
props=SENSOR_PROPS, virtual=VIRTUAL_SENSORS, aliases=SENSOR_ALIASES)
# ------ Extract subarrays ------
ants = [katpoint.Antenna(to_str(ants_group[group].attrs['description'])) for group in ants_group]
self.ref_ant = ants[0].name if not ref_ant else ref_ant
# Map from (old-style) DBE input label (e.g. '0x') to the new antenna-based input label (e.g. 'ant1h')
input_label = {to_str(ants_group[group]['H'].attrs['dbe_input']): ant.name + 'h'
for ant, group in zip(ants, ants_group.keys()) if 'H' in ants_group[group]}
input_label.update({to_str(ants_group[group]['V'].attrs['dbe_input']): ant.name + 'v'
for ant, group in zip(ants, ants_group.keys()) if 'V' in ants_group[group]})
# Split DBE input product string into its separate inputs
split_product = re.compile(r'(\d+[xy])(\d+[xy])')
# Iterate over map from correlation product index to DBE input product string and convert
# the latter to pairs of input labels (this assumes that the corrprod indices are sorted)
corrprods = []
for corrind, product in corr_group['input_map']:
product = to_str(product)
match = split_product.match(product)
if match is None:
raise BrokenFile(f"Unknown DBE input product '{product}' in input map (expected e.g. '0x1y')")
corrprods.append(tuple([input_label[inp] for inp in match.groups()]))
data_cp_len = len(self._scan_groups[0]['data'].dtype)
if len(corrprods) != data_cp_len:
raise BrokenFile(f'Number of baseline labels received from correlator ({len(corrprods)}) '
f'differs from number of baselines in data ({data_cp_len})')
self.subarrays = [Subarray(ants, corrprods)]
self.sensor['Observation/subarray'] = CategoricalData(self.subarrays, [0, len(data_timestamps)])
self.sensor['Observation/subarray_index'] = CategoricalData([0], [0, len(data_timestamps)])
# Store antenna objects in sensor cache too, for use in virtual sensor calculations
for ant in ants:
self.sensor[f'Antennas/{ant.name}/antenna'] = CategoricalData([ant], [0, len(data_timestamps)])
# Extract array reference from first antenna (first 5 fields of description)
array_ant_fields = ['array'] + ants[0].description.split(',')[1:5]
array_ant = katpoint.Antenna(','.join(array_ant_fields))
self.sensor['Antennas/array/antenna'] = CategoricalData([array_ant], [0, len(data_timestamps)])
# ------ Extract spectral windows / frequencies ------
centre_freq = corr_group.attrs['center_frequency_hz']
num_chans = corr_group.attrs['num_freq_channels']
data_num_chans = self._scan_groups[0]['data'].shape[1]
if num_chans != data_num_chans:
raise BrokenFile(f'Number of channels received from correlator ({num_chans}) '
f'differs from number of channels in data ({data_num_chans})')
channel_width = corr_group.attrs['channel_bandwidth_hz']
self.spectral_windows = [SpectralWindow(centre_freq, channel_width, num_chans, 'poco')]
self.sensor['Observation/spw'] = CategoricalData(self.spectral_windows, [0, len(data_timestamps)])
self.sensor['Observation/spw_index'] = CategoricalData([0], [0, len(data_timestamps)])
# ------ Extract scans / compound scans / targets ------
# Fringe Finder augment does not store antenna activity sensors - use scan + compscan labels as a guess
scan_labels = [to_str(s.attrs.get('label', '')) for s in self._scan_groups]
compscan_labels = [to_str(s.parent.attrs.get('label', '')) for s in self._scan_groups]
scan_states = [_labels_to_state(s, cs) for s, cs in zip(scan_labels, compscan_labels)]
# The scans are already partitioned into groups - use corresponding segments as start events
self.sensor['Observation/scan_state'] = CategoricalData(scan_states, self._segments)
self.sensor['Observation/scan_index'] = CategoricalData(list(range(len(scan_states))), self._segments)
# Group scans together based on compscan group name and have one label per compound scan
compscan = CategoricalData([s.parent.name for s in self._scan_groups], self._segments)
compscan.remove_repeats()
label = CategoricalData(compscan_labels, self._segments)
label.align(compscan.events)
self.sensor['Observation/label'] = label
self.sensor['Observation/compscan_index'] = CategoricalData(list(range(len(label))), label.events)
# Extract targets from compscan groups, replacing empty or bad descriptions with dummy target
target = CategoricalData([_robust_target(to_str(s.parent.attrs.get('target', '')))
for s in self._scan_groups], self._segments)
target.align(compscan.events)
self.sensor['Observation/target'] = target
self.sensor['Observation/target_index'] = CategoricalData(target.indices, target.events)
# Set up catalogue containing all targets in file, with reference antenna as default antenna
self.catalogue.add(target.unique_values)
self.catalogue.antenna = self.sensor[f'Antennas/{self.ref_ant}/antenna'][0]
# Ensure that each target flux model spans all frequencies in data set if possible
self._fix_flux_freq_range()
# Restore original (slow) timestamps so that subsequent sensors (e.g. pointing) will have accurate values
self.sensor.timestamps = self.timestamps
# Apply default selection and initialise all members that depend on selection in the process
self.select(spw=0, subarray=0)
@staticmethod
def _open(filename, mode='r'):
"""Open file and do basic version and augmentation sanity check."""
f = h5py.File(filename, mode)
version = to_str(f.attrs.get('version', '1.x'))
if not version.startswith('1.'):
raise WrongVersion(f"Attempting to load version '{version}' file with version 1 loader")
if 'augment' not in f.attrs:
raise BrokenFile('HDF5 file not augmented - please run '
'augment4.py (provided by k7augment package)')
return f, version
@staticmethod
def _get_ants(filename):
"""Quick look function to get the list of antennas in a data file.
This is intended to be called without creating a full katdal object.
Parameters
----------
filename : string
Data file name
Returns
-------
antennas : list of :class:'katpoint.Antenna' objects
"""
f, version = H5DataV1._open(filename)
ants_group = f['Antennas']
antennas = [katpoint.Antenna(to_str(ants_group[group].attrs['description']))
for group in ants_group]
return antennas
@staticmethod
def _get_targets(filename):
"""Quick look function to get the list of targets in a data file.
This is intended to be called without creating a full katdal object.
Parameters
----------
filename : string
Data file name
Returns
-------
targets : :class:'katpoint.Catalogue' object
All targets in file
"""
f, version = H5DataV1._open(filename)
compound_scans = f['Scans']
all_target_strings = [to_str(compound_scans[group].attrs['target'])
for group in compound_scans]
return katpoint.Catalogue(np.unique(all_target_strings))
@property
def timestamps(self):
"""Visibility timestamps in UTC seconds since Unix epoch.
The timestamps are returned as an array indexer of float64, shape (*T*,),
with one timestamp per integration aligned with the integration
*midpoint*. To get the data array itself from the indexer `x`, do `x[:]`
or perform any other form of indexing on it.
"""
indexers = []
# Avoid storing reference to self in extract_time closure below, as this hinders garbage collection
dump_period, time_offset = self.dump_period, self.time_offset
# Convert from millisecs to secs since Unix epoch, and be sure to use float64 to preserve digits
extract_time = LazyTransform('extract_time',
lambda t, keep: np.float64(t) / 1000. + 0.5 * dump_period + time_offset,
dtype=np.float64)
for n, s in enumerate(self._scan_groups):
indexers.append(LazyIndexer(s['timestamps'], keep=self._time_keep[self._segments[n]:self._segments[n + 1]]))
return ConcatenatedLazyIndexer(indexers, transforms=[extract_time])
def _vis_indexers(self):
"""Create list of indexers to access visibilities across scans.
Fringe Finder has a weird vis data structure: each scan data group is
a recarray with shape (T, F) and fields '0'...'11' indicating the
correlation products. The per-scan LazyIndexers therefore only do the
time + frequency indexing, leaving corrprod indexing to the final
transform. This returns a list of per-scan visibility indexers based
on the current data selection.
"""
# Avoid storing reference to self in transform closure below, as this hinders garbage collection
corrprod_keep = self._corrprod_keep
# Apply both first-stage and second-stage corrprod indexing in the transform
def index_corrprod(tf, keep):
# Ensure that keep tuple has length of 3 (truncate or pad with blanket slices as necessary)
keep = keep[:3] + (slice(None),) * (3 - len(keep))
# Final indexing ensures that returned data are always 3-dimensional (i.e. keep singleton dimensions)
force_3dim = tuple((np.newaxis if np.isscalar(dim_keep) else slice(None)) for dim_keep in keep)
# Conjugate the data to correct for the lower sideband downconversion
return np.dstack([tf[str(corrind)][force_3dim[:2]].conjugate() for corrind in
np.nonzero(corrprod_keep)[0]])[:, :, keep[2]][:, :, force_3dim[2]]
extract_vis = LazyTransform('extract_vis_v1', index_corrprod,
lambda shape: (shape[0], shape[1], corrprod_keep.sum()), np.complex64)
indexers = []
for n, s in enumerate(self._scan_groups):
indexers.append(LazyIndexer(s['data'], keep=(self._time_keep[self._segments[n]:self._segments[n + 1]],
self._freq_keep),
transforms=[extract_vis]))
return indexers
@property
def vis(self):
r"""Complex visibility data as a function of time, frequency and baseline.
The visibility data are returned as an array indexer of complex64, shape
(*T*, *F*, *B*), with time along the first dimension, frequency along the
second dimension and correlation product ("baseline") index along the
third dimension. The returned array always has all three dimensions,
even for scalar (single) values. The number of integrations *T* matches
the length of :meth:`timestamps`, the number of frequency channels *F*
matches the length of :meth:`freqs` and the number of correlation
products *B* matches the length of :meth:`corr_products`. To get the
data array itself from the indexer `x`, do `x[:]` or perform any other
form of indexing on it. Only then will data be loaded into memory.
The sign convention of the imaginary part is consistent with an
electric field of :math:`e^{i(\omega t - jz)}` i.e. phase that
increases with time.
"""
return ConcatenatedLazyIndexer(self._vis_indexers())
@property
def weights(self):
"""Visibility weights as a function of time, frequency and baseline.
The weights data are returned as an array indexer of float32, shape
(*T*, *F*, *B*), with time along the first dimension, frequency along the
second dimension and correlation product ("baseline") index along the
third dimension. The number of integrations *T* matches the length of
:meth:`timestamps`, the number of frequency channels *F* matches the
length of :meth:`freqs` and the number of correlation products *B*
matches the length of :meth:`corr_products`. To get the data array
itself from the indexer `x`, do `x[:]` or perform any other form of
indexing on it. Only then will data be loaded into memory.
"""
# Tell the user that there are no weights in the h5 file
logger.warning("No weights in HDF5 v1 data files, returning array of unity weights")
ones = LazyTransform('ones', lambda data, keep: np.ones_like(data, dtype=np.float32), dtype=np.float32)
return ConcatenatedLazyIndexer(self._vis_indexers(), transforms=[ones])
@property
def flags(self):
"""Flags as a function of time, frequency and baseline.
The flags data are returned as an array indexer of bool, shape
(*T*, *F*, *B*), with time along the first dimension, frequency along the
second dimension and correlation product ("baseline") index along the
third dimension. The number of integrations *T* matches the length of
:meth:`timestamps`, the number of frequency channels *F* matches the
length of :meth:`freqs` and the number of correlation products *B*
matches the length of :meth:`corr_products`. To get the data array
itself from the indexer `x`, do `x[:]` or perform any other form of
indexing on it. Only then will data be loaded into memory.
"""
# Tell the user that there are no flags in the h5 file
logger.warning("No flags in HDF5 v1 data files, returning array of zero flags")
falses = LazyTransform('falses', lambda data, keep: np.zeros_like(data, dtype=np.bool), dtype=np.bool)
return ConcatenatedLazyIndexer(self._vis_indexers(), transforms=[falses])
@property
def temperature(self):
"""Air temperature in degrees Celsius."""
return self.sensor[f'Antennas/{self.ref_ant}/enviro_air_temperature']
@property
def pressure(self):
"""Barometric pressure in millibars."""
return self.sensor[f'Antennas/{self.ref_ant}/enviro_air_pressure']
@property
def humidity(self):
"""Relative humidity as a percentage."""
return self.sensor[f'Antennas/{self.ref_ant}/enviro_air_relative_humidity']
@property
def wind_speed(self):
"""Wind speed in metres per second."""
return self.sensor[f'Antennas/{self.ref_ant}/enviro_wind_speed']
@property
def wind_direction(self):
"""Wind direction as an azimuth angle in degrees."""
return self.sensor[f'Antennas/{self.ref_ant}/enviro_wind_direction']
|
ska-sa/katdal
|
katdal/h5datav1.py
|
Python
|
bsd-3-clause
| 23,433
|
'''Unit test package for module "tws.helper._hook_currenttime".'''
__copyright__ = "Copyright (c) 2009 Kevin J Bluck"
__version__ = "$Id$"
import unittest
import tws
from tws.helper import HookCurrentTime
class test_helper_HookCurrentTime(unittest.TestCase):
'''Test type "tws.helper.HookCurrentTime"'''
def setUp(self):
self.wrapper = tws.EWrapper()
def test_init(self):
self.assertTrue(HookCurrentTime(tws.EWrapper()))
def test_hook(self):
self.assertFalse(self.wrapper.__dict__.get("currentTime", None))
self.assertFalse(self.wrapper.__dict__.get("get_current_time", None))
HookCurrentTime(self.wrapper)
self.assertTrue(self.wrapper.__dict__.get("currentTime", None))
self.assertTrue(self.wrapper.__dict__.get("get_current_time", None))
|
kbluck/pytws
|
test_tws/test_helper/test_hook_currenttime.py
|
Python
|
bsd-3-clause
| 821
|
def extractWwwLiterarynerdsCom(item):
'''
Parser for 'www.literarynerds.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractWwwLiterarynerdsCom.py
|
Python
|
bsd-3-clause
| 554
|
from __future__ import absolute_import
from rest_framework.response import Response
from sentry import features
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.paginator import OffsetPaginator
from sentry.api.serializers import serialize
from sentry.constants import ObjectStatus
from sentry.models import Repository
from sentry.plugins import bindings
class OrganizationRepositoriesEndpoint(OrganizationEndpoint):
doc_section = DocSection.ORGANIZATIONS
def has_feature(self, request, organization):
return features.has(
'organizations:repos',
organization=organization,
actor=request.user,
)
def get(self, request, organization):
"""
List an Organization's Repositories
```````````````````````````````````
Return a list of version control repositories for a given organization.
:pparam string organization_slug: the organization short name
:auth: required
"""
if not self.has_feature(request, organization):
return self.respond({
'error_type': 'unavailable_feature',
'detail': ['You do not have that feature enabled']
}, status=403)
queryset = Repository.objects.filter(
organization_id=organization.id,
)
status = request.GET.get('status', 'active')
if status == 'active':
queryset = queryset.filter(
status=ObjectStatus.VISIBLE,
)
elif status == 'deleted':
queryset = queryset.exclude(
status=ObjectStatus.VISIBLE,
)
elif status:
queryset = queryset.none()
return self.paginate(
request=request,
queryset=queryset,
order_by='name',
on_results=lambda x: serialize(x, request.user),
paginator_cls=OffsetPaginator,
)
def post(self, request, organization):
if not request.user.is_authenticated():
return Response(status=401)
if not self.has_feature(request, organization):
return self.respond({
'error_type': 'unavailable_feature',
'detail': ['You do not have that feature enabled']
}, status=403)
provider_id = request.DATA.get('provider')
if features.has('organizations:internal-catchall', organization, actor=request.user):
if provider_id is not None and provider_id.startswith('integrations:'):
try:
provider_cls = bindings.get('integration-repository.provider').get(provider_id)
except KeyError:
return Response(
{
'error_type': 'validation',
}, status=400
)
provider = provider_cls(id=provider_id)
return provider.dispatch(request, organization)
try:
provider_cls = bindings.get('repository.provider').get(provider_id)
except KeyError:
return Response(
{
'error_type': 'validation',
}, status=400
)
provider = provider_cls(id=provider_id)
return provider.dispatch(request, organization)
|
looker/sentry
|
src/sentry/api/endpoints/organization_repositories.py
|
Python
|
bsd-3-clause
| 3,426
|
# -*- coding: utf-8 -*-
import pytest
from schematics.models import Model
from schematics.types import *
from schematics.types.compound import *
from schematics.exceptions import *
from schematics.undefined import Undefined
def autofail(value, context):
raise ValidationError("Fubar!", info=99)
class M(Model):
intfield = IntType(max_value=2)
reqfield = StringType(required=True)
matrixfield = ListType(ListType(IntType(max_value=2)))
listfield = ListType(IntType(), max_size=3, validators=[autofail])
modelfield = ModelType('M')
def get_input_dict():
inputdict = {
'intfield': '1',
'reqfield': 'foo',
'listfield': [],
'modelfield': {
'reqfield': 'bar',
'listfield': [1, 2, 3, 4],
'modelfield': {
'intfield': '3',
'matrixfield': [[0, 1, 0, 1], [1, 2, 3, 4], ['1', '0', '1', '0']],
'listfield': None,
'modelfield': {
'intfield': '0',
'reqfield': 'foo',
'listfield': None}}}}
return inputdict
def get_input_instance(input_init):
inputinstance = M(init=input_init)
inputinstance.intfield = '1'
inputinstance.reqfield = 'foo'
inputinstance.listfield = []
inputinstance.modelfield = M(init=input_init)
inputinstance.modelfield.reqfield = 'bar'
inputinstance.modelfield.listfield = [1, 2, 3, 4]
inputinstance.modelfield.modelfield = M(init=input_init)
inputinstance.modelfield.modelfield.intfield = '3'
inputinstance.modelfield.modelfield.matrixfield = [[0, 1, 0, 1], [1, 2, 3, 4], ['1', '0', '1', '0']]
inputinstance.modelfield.modelfield.listfield = None
inputinstance.modelfield.modelfield.modelfield = M(init=input_init)
inputinstance.modelfield.modelfield.modelfield.intfield = '0'
inputinstance.modelfield.modelfield.modelfield.reqfield = 'foo'
inputinstance.modelfield.modelfield.modelfield.listfield = None
return inputinstance
@pytest.fixture
def input(input_instance, input_init):
if input_instance:
return get_input_instance(input_init)
else:
return get_input_dict()
@pytest.mark.parametrize('input_instance, input_init, init, missing_obj',
[( False, None, True, None),
( False, None, False, Undefined),
( True, False, True, None),
( True, False, False, Undefined),
( True, True, True, None),
( True, True, False, None)])
def test_conversion(input, init, missing_obj):
m = M(input, init=init)
assert type(m.intfield) is int
assert type(m.modelfield.modelfield.intfield) is int
assert type(m.modelfield.modelfield.matrixfield[2][3]) is int
assert type(m.listfield) is list
assert type(m.modelfield) is M
assert type(m.modelfield.modelfield) is M
assert type(m.modelfield.modelfield.modelfield) is M
assert type(m.modelfield.listfield) is list
assert type(m.modelfield.modelfield.matrixfield) is list
assert type(m.modelfield.modelfield.matrixfield[2]) is list
assert m._data['listfield'] == []
assert m.modelfield._data['intfield'] is missing_obj
assert m.modelfield.modelfield._data['listfield'] is None
assert m.modelfield.modelfield._data['reqfield'] is missing_obj
@pytest.mark.parametrize('partial', (True, False))
@pytest.mark.parametrize('import_, two_pass, input_instance, input_init, init, missing_obj',
[( True, False, False, None, True, None),
( True, False, False, None, False, Undefined),
( True, False, True, False, True, None),
( True, False, True, False, False, Undefined),
( True, False, True, True, True, None),
( True, False, True, True, False, None),
( True, True, False, None, True, None),
( True, True, False, None, False, Undefined),
( True, True, True, False, True, None),
( True, True, True, False, False, Undefined),
( True, True, True, True, True, None),
( True, True, True, True, False, None),
( False, None, True, False, True, None),
( False, None, True, False, False, Undefined),
( False, None, True, True, True, None),
( False, None, True, True, False, None)])
def test_conversion_with_validation(input, init, missing_obj, import_, two_pass, partial):
if missing_obj is None:
partial_data = {
'intfield': 1,
'reqfield': u'foo',
'matrixfield': None,
'modelfield': {
'intfield': None,
'reqfield': u'bar',
'matrixfield': None,
'modelfield': {
'reqfield': None,
'listfield': None,
'modelfield': M({
'intfield': 0,
'reqfield': u'foo',
'listfield': None})}}}
else:
partial_data = {
'intfield': 1,
'reqfield': u'foo',
'modelfield': {
'reqfield': u'bar',
'modelfield': {
'listfield': None,
'modelfield': M({
'intfield': 0,
'reqfield': u'foo',
'listfield': None}, init=False)}}}
with pytest.raises(DataError) as excinfo:
if import_:
if two_pass:
m = M(input, init=init)
m.validate(partial=partial)
else:
M(input, init=init, partial=partial, validate=True)
else:
input.validate(init_values=init, partial=partial)
messages = excinfo.value.messages
err_list = messages.pop('listfield')
assert err_list.pop().type == ValidationError
assert err_list == []
err_list = messages['modelfield'].pop('listfield')
assert err_list.pop().type == ValidationError
assert err_list.pop().type == ValidationError
assert err_list == []
err_list = messages['modelfield']['modelfield'].pop('intfield')
err_msg = err_list.pop()
assert err_list == []
assert err_msg.type == ValidationError
if not partial:
err_list = messages['modelfield']['modelfield'].pop('reqfield')
err_msg = err_list.pop()
assert err_list == []
assert err_msg.type == ConversionError
if missing_obj is None:
partial_data['modelfield']['modelfield'].pop('reqfield')
err_dict = messages['modelfield']['modelfield'].pop('matrixfield')
sub_err_dict = err_dict.pop(1)
err_list_1 = sub_err_dict.pop(2)
err_list_2 = sub_err_dict.pop(3)
assert err_list_1.pop().type == ValidationError
assert err_list_2.pop().type == ValidationError
assert err_list_1 == err_list_2 == []
assert err_dict == sub_err_dict == {}
assert messages['modelfield'].pop('modelfield') == {}
assert messages.pop('modelfield') == {}
assert messages == {}
assert excinfo.value.partial_data == partial_data
|
mlyundin/schematics
|
tests/test_conversion.py
|
Python
|
bsd-3-clause
| 7,830
|
# coding: utf-8
"""
Environmental Exposures API
Environmental Exposures API
OpenAPI spec version: 1.0.0
Contact: stealey@renci.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from setuptools import setup, find_packages
NAME = "swagger_client"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Environmental Exposures API",
author_email="stealey@renci.org",
url="",
keywords=["Swagger", "Environmental Exposures API"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Environmental Exposures API
"""
)
|
ResearchSoftwareInstitute/greendatatranslator
|
src/greentranslator/python-client/setup.py
|
Python
|
bsd-3-clause
| 1,462
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['PolyTrend'] , ['BestCycle'] , ['LSTM'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_None/model_control_one_enabled_None_PolyTrend_BestCycle_LSTM.py
|
Python
|
bsd-3-clause
| 148
|
"""Talks forms."""
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from flask_wtf import Form
from wtforms.validators import Optional
from wtforms_alchemy import model_form_factory
from pygotham.talks.models import Duration, Talk
__all__ = ('TalkSubmissionForm',)
ModelForm = model_form_factory(Form)
def duration_query_factory():
"""Return available :class:`~pygotha.models.Duration` instances."""
return Duration.query.filter(Duration.inactive == False)
class TalkSubmissionForm(ModelForm):
"""Form for editing :class:`~pygotham.models.Talk` instances."""
class Meta:
model = Talk
exclude = ('status', 'type')
field_args = {
'name': {'label': 'Title'},
'description': {
'label': 'Description',
'description': (
'If your talk is accepted this will be made public. It '
'should be one paragraph.'
),
},
'level': {'label': 'Experience Level'},
'duration': {'label': 'Duration'},
'abstract': {
'label': 'Abstract',
'description': (
'Detailed overview. Will be made public if your talk is '
'accepted.'
),
},
'outline': {
'label': 'Outline',
'description': (
'Sections and key points of the talk meant to give the '
'program committee an overview.'
),
},
'additional_requirements': {
'label': 'Additional Notes',
'description': (
"Any other information you'd like the program committee "
"to know, e.g., additional context and resources, "
"previous speaking experiences, etc. This will not be "
"shared publicly."
),
},
'recording_release': {
'label': 'Recording Release',
'validators': (Optional(),),
},
}
duration = QuerySelectField(query_factory=duration_query_factory)
|
djds23/pygotham-1
|
pygotham/talks/forms.py
|
Python
|
bsd-3-clause
| 2,225
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Module under test
import bokeh.command.subcommands as sc # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def test_all() -> None:
assert hasattr(sc, 'all')
assert type(sc.all) is list
def test_all_types() -> None:
from bokeh.command.subcommand import Subcommand
assert all(issubclass(x, Subcommand) for x in sc.all)
def test_all_count() -> None:
from os import listdir
from os.path import dirname
files = listdir(dirname(sc.__file__))
pyfiles = [x for x in files if x.endswith(".py")]
# the -2 accounts for __init__.py and file_output.py
assert len(sc.all) == len(pyfiles) - 2
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
bokeh/bokeh
|
tests/unit/bokeh/command/subcommands/test___init___subcommands.py
|
Python
|
bsd-3-clause
| 2,184
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.sql_db.operations import RawSQLMigration
migrator = RawSQLMigration(('custom', 'icds_reports', 'migrations', 'sql_templates'))
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrator.get_migration('create_tables_and_views.sql'),
migrator.get_migration('create_functions.sql'),
]
|
qedsoftware/commcare-hq
|
custom/icds_reports/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 452
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_auto_20150223_1523'),
]
operations = [
migrations.AlterField(
model_name='mealdefaultmeal',
name='meal',
field=models.ForeignKey(related_name=b'meals', to='orders.MealDefault'),
),
]
|
delphcf/sis
|
sis/orders/migrations/0007_auto_20150224_1134.py
|
Python
|
bsd-3-clause
| 442
|
from __future__ import annotations
from dials.algorithms.refinement.restraints.restraints_parameterisation import (
RestraintsParameterisation,
)
__all__ = ["RestraintsParameterisation"]
|
dials/dials
|
algorithms/refinement/restraints/__init__.py
|
Python
|
bsd-3-clause
| 193
|
from sklearn import linear_model as lm_
from dask_ml import linear_model as lm
from dask_ml.utils import assert_estimator_equal
class TestStochasticGradientClassifier(object):
def test_basic(self, single_chunk_classification):
X, y = single_chunk_classification
a = lm.PartialSGDClassifier(classes=[0, 1], random_state=0,
max_iter=1000, tol=1e-3)
b = lm_.SGDClassifier(random_state=0, max_iter=1000, tol=1e-3)
a.fit(X, y)
b.partial_fit(X, y, classes=[0, 1])
assert_estimator_equal(a, b, exclude='loss_function_')
class TestStochasticGradientRegressor(object):
def test_basic(self, single_chunk_regression):
X, y = single_chunk_regression
a = lm.PartialSGDRegressor(random_state=0,
max_iter=1000, tol=1e-3)
b = lm_.SGDRegressor(random_state=0, max_iter=1000, tol=1e-3)
a.fit(X, y)
b.partial_fit(X, y)
assert_estimator_equal(a, b)
|
daniel-severo/dask-ml
|
tests/linear_model/test_stochastic_gradient.py
|
Python
|
bsd-3-clause
| 1,014
|
#!/usr/bin/env python
from setuptools import setup, find_packages
try:
README = open('README.rst').read()
except:
README = None
try:
REQUIREMENTS = open('requirements.txt').read()
except:
REQUIREMENTS = None
setup(
name = 'django-legacymigrations',
version = "0.1",
description = 'Continuous legacy database migrations using Django.',
long_description = README,
install_requires = REQUIREMENTS,
author = '1%CLUB',
author_email = 'devteam@1procentclub.nl',
url = 'https://github.com/onepercentclub/django-legacymigrations/',
packages = find_packages(),
include_package_data = True,
classifiers = ['Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'],
)
|
onepercentclub/django-legacymigrations
|
setup.py
|
Python
|
bsd-3-clause
| 1,058
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import six.moves.urllib.parse # pylint: disable=import-error
from core import benchmark_finders
from core import benchmark_utils
from telemetry.story import story_filter
_SHARD_MAP_DIR = os.path.join(os.path.dirname(__file__), 'shard_maps')
_ALL_BENCHMARKS_BY_NAMES = dict(
(b.Name(), b) for b in benchmark_finders.GetAllBenchmarks())
OFFICIAL_BENCHMARKS = frozenset(
b for b in benchmark_finders.GetOfficialBenchmarks()
if not b.Name().startswith('UNSCHEDULED_'))
CONTRIB_BENCHMARKS = frozenset(benchmark_finders.GetContribBenchmarks())
ALL_SCHEDULEABLE_BENCHMARKS = OFFICIAL_BENCHMARKS | CONTRIB_BENCHMARKS
GTEST_STORY_NAME = '_gtest_'
def _IsPlatformSupported(benchmark, platform):
supported = benchmark.GetSupportedPlatformNames(benchmark.SUPPORTED_PLATFORMS)
return 'all' in supported or platform in supported
class PerfPlatform(object):
def __init__(self,
name,
description,
benchmark_configs,
num_shards,
platform_os,
is_fyi=False,
is_calibration=False,
run_reference_build=False,
executables=None):
benchmark_configs = benchmark_configs.Frozenset()
self._name = name
self._description = description
self._platform_os = platform_os
# For sorting ignore case and "segments" in the bot name.
self._sort_key = name.lower().replace('-', ' ')
self._is_fyi = is_fyi
self._is_calibration = is_calibration
self.run_reference_build = run_reference_build
self.executables = executables or frozenset()
assert num_shards
self._num_shards = num_shards
# pylint: disable=redefined-outer-name
self._benchmark_configs = frozenset([
b for b in benchmark_configs if
_IsPlatformSupported(b.benchmark, self._platform_os)])
# pylint: enable=redefined-outer-name
benchmark_names = [config.name for config in self._benchmark_configs]
assert len(set(benchmark_names)) == len(benchmark_names), (
'Make sure that a benchmark does not appear twice.')
base_file_name = name.replace(' ', '_').lower()
self._timing_file_path = os.path.join(
_SHARD_MAP_DIR, 'timing_data', base_file_name + '_timing.json')
self.shards_map_file_name = base_file_name + '_map.json'
self._shards_map_file_path = os.path.join(
_SHARD_MAP_DIR, self.shards_map_file_name)
def __lt__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
# pylint: disable=protected-access
return self._sort_key < other._sort_key
@property
def num_shards(self):
return self._num_shards
@property
def shards_map_file_path(self):
return self._shards_map_file_path
@property
def timing_file_path(self):
return self._timing_file_path
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def platform(self):
return self._platform_os
@property
def benchmarks_to_run(self):
# TODO(crbug.com/965158): Deprecate this in favor of benchmark_configs
# as part of change to make sharding scripts accommodate abridged
# benchmarks.
return frozenset({b.benchmark for b in self._benchmark_configs})
@property
def benchmark_configs(self):
return self._benchmark_configs
@property
def is_fyi(self):
return self._is_fyi
@property
def is_calibration(self):
return self._is_calibration
@property
def is_official(self):
return not self._is_fyi and not self.is_calibration
@property
def builder_url(self):
return ('https://ci.chromium.org/p/chrome/builders/ci/%s' %
six.moves.urllib.parse.quote(self._name))
class BenchmarkConfig(object):
def __init__(self, benchmark, abridged):
"""A configuration for a benchmark that helps decide how to shard it.
Args:
benchmark: the benchmark.Benchmark object.
abridged: True if the benchmark should be abridged so fewer stories
are run, and False if the whole benchmark should be run.
"""
self.benchmark = benchmark
self.abridged = abridged
self._stories = None
self.is_telemetry = True
@property
def name(self):
return self.benchmark.Name()
@property
def repeat(self):
return self.benchmark.options.get('pageset_repeat', 1)
@property
def stories(self):
if self._stories != None:
return self._stories
else:
story_set = benchmark_utils.GetBenchmarkStorySet(self.benchmark())
abridged_story_set_tag = (
story_set.GetAbridgedStorySetTagFilter() if self.abridged else None)
story_filter_obj = story_filter.StoryFilter(
abridged_story_set_tag=abridged_story_set_tag)
stories = story_filter_obj.FilterStories(story_set)
self._stories = [story.name for story in stories]
return self._stories
class ExecutableConfig(object):
def __init__(self, name, path=None, flags=None, estimated_runtime=60):
self.name = name
self.path = path or name
self.flags = flags or []
self.estimated_runtime = estimated_runtime
self.abridged = False
self.stories = [GTEST_STORY_NAME]
self.is_telemetry = False
self.repeat = 1
class PerfSuite(object):
def __init__(self, configs):
self._configs = dict()
self.Add(configs)
def Frozenset(self):
return frozenset(self._configs.values())
def Add(self, configs):
if isinstance(configs, PerfSuite):
configs = configs.Frozenset()
for config in configs:
if isinstance(config, str):
config = _GetBenchmarkConfig(config)
if config.name in self._configs:
raise ValueError('Cannot have duplicate benchmarks/executables.')
self._configs[config.name] = config
return self
def Remove(self, configs):
for config in configs:
name = config
if isinstance(config, PerfSuite):
name = config.name
del self._configs[name]
return self
def Abridge(self, config_names):
for name in config_names:
del self._configs[name]
self._configs[name] = _GetBenchmarkConfig(
name, abridged=True)
return self
# Global |benchmarks| is convenient way to keep BenchmarkConfig objects
# unique, which allows us to use set subtraction below.
benchmarks = {b.Name(): {True: BenchmarkConfig(b, abridged=True),
False: BenchmarkConfig(b, abridged=False)}
for b in ALL_SCHEDULEABLE_BENCHMARKS}
def _GetBenchmarkConfig(benchmark_name, abridged=False):
return benchmarks[benchmark_name][abridged]
OFFICIAL_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig(b.Name()) for b in OFFICIAL_BENCHMARKS])
# power.mobile requires special hardware.
# only run blink_perf.sanitizer-api on linux-perf.
# speedometer2-chrome-health is only for use with the Chrome Health pipeline
OFFICIAL_BENCHMARK_CONFIGS = OFFICIAL_BENCHMARK_CONFIGS.Remove([
'power.mobile',
'blink_perf.sanitizer-api',
'speedometer2-chrome-health',
])
# TODO(crbug.com/965158): Remove OFFICIAL_BENCHMARK_NAMES once sharding
# scripts are no longer using it.
OFFICIAL_BENCHMARK_NAMES = frozenset(
b.name for b in OFFICIAL_BENCHMARK_CONFIGS.Frozenset())
# TODO(crbug.com/1030840): Stop using these 'OFFICIAL_EXCEPT' suites and instead
# define each benchmarking config separately as is already done for many of the
# suites below.
_OFFICIAL_EXCEPT_DISPLAY_LOCKING = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove(
['blink_perf.display_locking'])
_OFFICIAL_EXCEPT_JETSTREAM2 = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove(
['jetstream2'])
_OFFICIAL_EXCEPT_DISPLAY_LOCKING_JETSTREAM2 = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove(
['blink_perf.display_locking', 'jetstream2'])
def _base_perftests(estimated_runtime=270):
return ExecutableConfig(
'base_perftests',
flags=['--test-launcher-jobs=1', '--test-launcher-retry-limit=0'],
estimated_runtime=estimated_runtime)
def _components_perftests(estimated_runtime=110):
return ExecutableConfig('components_perftests',
flags=[
'--xvfb',
],
estimated_runtime=estimated_runtime)
def _dawn_perf_tests(estimated_runtime=270):
return ExecutableConfig(
'dawn_perf_tests',
flags=['--test-launcher-jobs=1', '--test-launcher-retry-limit=0'],
estimated_runtime=estimated_runtime)
def _gpu_perftests(estimated_runtime=60):
return ExecutableConfig('gpu_perftests', estimated_runtime=estimated_runtime)
def _load_library_perf_tests(estimated_runtime=3):
return ExecutableConfig('load_library_perf_tests',
estimated_runtime=estimated_runtime)
def _performance_browser_tests(estimated_runtime=67):
return ExecutableConfig(
'performance_browser_tests',
path='browser_tests',
flags=[
'--full-performance-run',
'--test-launcher-jobs=1',
'--test-launcher-retry-limit=0',
# Allow the full performance runs to take up to 60 seconds (rather
# than the default of 30 for normal CQ browser test runs).
'--ui-test-action-timeout=60000',
'--ui-test-action-max-timeout=60000',
'--test-launcher-timeout=60000',
'--gtest_filter=*/TabCapturePerformanceTest.*:'
'*/CastV2PerformanceTest.*',
],
estimated_runtime=estimated_runtime)
def _tracing_perftests(estimated_runtime=50):
return ExecutableConfig('tracing_perftests',
estimated_runtime=estimated_runtime)
def _views_perftests(estimated_runtime=7):
return ExecutableConfig('views_perftests',
flags=['--xvfb'],
estimated_runtime=estimated_runtime)
_CHROME_HEALTH_BENCHMARK_CONFIGS_DESKTOP = PerfSuite([
_GetBenchmarkConfig('system_health.common_desktop')
])
_LINUX_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
]).Add([
'blink_perf.sanitizer-api',
])
_LINUX_EXECUTABLE_CONFIGS = frozenset([
# TODO(crbug.com/811766): Add views_perftests.
_base_perftests(200),
_load_library_perf_tests(),
_performance_browser_tests(165),
_tracing_perftests(5),
])
_MAC_HIGH_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_MAC_HIGH_END_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(300),
_dawn_perf_tests(330),
_performance_browser_tests(190),
_views_perftests(),
])
_MAC_LOW_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'jetstream2',
'v8.runtime_stats.top_25',
])
_MAC_LOW_END_EXECUTABLE_CONFIGS = frozenset([
_load_library_perf_tests(),
_performance_browser_tests(210),
])
_MAC_M1_MINI_2020_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_MAC_M1_MINI_2020_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(300),
_dawn_perf_tests(330),
_performance_browser_tests(190),
_views_perftests(),
])
_WIN_10_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_WIN_10_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(200),
_components_perftests(125),
_dawn_perf_tests(600),
_views_perftests(),
])
_WIN_10_LOW_END_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
])
_WIN_10_LOW_END_HP_CANDIDATE_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('v8.browsing_desktop'),
_GetBenchmarkConfig('rendering.desktop', abridged=True),
])
_WIN_10_AMD_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('jetstream'),
_GetBenchmarkConfig('jetstream2'),
_GetBenchmarkConfig('kraken'),
_GetBenchmarkConfig('octane'),
_GetBenchmarkConfig('system_health.common_desktop'),
])
_WIN_10_AMD_LAPTOP_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('jetstream'),
_GetBenchmarkConfig('jetstream2'),
_GetBenchmarkConfig('kraken'),
_GetBenchmarkConfig('octane'),
])
_WIN_7_BENCHMARK_CONFIGS = PerfSuite([
'loading.desktop',
]).Abridge([
'loading.desktop',
])
_WIN_7_GPU_BENCHMARK_CONFIGS = PerfSuite(['rendering.desktop']).Abridge(
['rendering.desktop'])
_ANDROID_GO_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.memory_mobile'),
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.webview_startup'),
_GetBenchmarkConfig('v8.browsing_mobile'),
_GetBenchmarkConfig('speedometer'),
_GetBenchmarkConfig('speedometer2')])
_ANDROID_GO_WEBVIEW_BENCHMARK_CONFIGS = _ANDROID_GO_BENCHMARK_CONFIGS
# Note that Nexus 5 bot capacity is very low, so we must severely limit
# the benchmarks that we run on it and abridge large benchmarks in order
# to run them on it. See crbug.com/1030840 for details.
_ANDROID_NEXUS_5_BENCHMARK_CONFIGS = PerfSuite([
'loading.mobile',
'startup.mobile',
'system_health.common_mobile',
'system_health.webview_startup',
]).Abridge(['loading.mobile', 'startup.mobile', 'system_health.common_mobile'])
_ANDROID_NEXUS_5_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(100),
_gpu_perftests(45),
_tracing_perftests(55),
])
_ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL2_BENCHMARK_CONFIGS = PerfSuite(
_OFFICIAL_EXCEPT_DISPLAY_LOCKING).Remove(['system_health.weblayer_startup'])
_ANDROID_PIXEL2_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(60),
])
_ANDROID_PIXEL2_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL2_WEBLAYER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile', True),
_GetBenchmarkConfig('system_health.memory_mobile', True),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.weblayer_startup')
])
_ANDROID_PIXEL4_BENCHMARK_CONFIGS = PerfSuite(
_OFFICIAL_EXCEPT_DISPLAY_LOCKING).Remove(['system_health.weblayer_startup'])
_ANDROID_PIXEL4_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(60),
])
_ANDROID_PIXEL4_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL4_WEBLAYER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile', True),
_GetBenchmarkConfig('system_health.memory_mobile', True),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.weblayer_startup')
])
_ANDROID_PIXEL4A_POWER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('power.mobile'),
_GetBenchmarkConfig('system_health.scroll_jank_mobile')
])
_ANDROID_NEXUS5X_FYI_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig('system_health.scroll_jank_mobile')])
_ANDROID_PIXEL2_AAB_FYI_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig('startup.mobile')])
_ANDROID_PIXEL2_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('v8.browsing_mobile'),
_GetBenchmarkConfig('system_health.memory_mobile'),
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('speedometer2'),
_GetBenchmarkConfig('rendering.mobile'),
_GetBenchmarkConfig('octane'),
_GetBenchmarkConfig('jetstream'),
_GetBenchmarkConfig('system_health.scroll_jank_mobile')
])
_CHROMEOS_KEVIN_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('rendering.desktop')])
_LACROS_EVE_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_LINUX_PERF_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('power.desktop'),
_GetBenchmarkConfig('rendering.desktop'),
_GetBenchmarkConfig('system_health.common_desktop')
])
_FUCHSIA_PERF_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.memory_desktop'),
_GetBenchmarkConfig('media.mobile')
])
_LINUX_PERF_CALIBRATION_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('speedometer2'),
_GetBenchmarkConfig('blink_perf.shadow_dom'),
_GetBenchmarkConfig('system_health.common_desktop'),
])
_ANDROID_PIXEL2_PERF_CALIBRATION_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('system_health.memory_mobile'),
])
# Linux
LINUX = PerfPlatform(
'linux-perf',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_LINUX_BENCHMARK_CONFIGS,
26,
'linux',
executables=_LINUX_EXECUTABLE_CONFIGS)
LINUX_REL = PerfPlatform(
'linux-perf-rel',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_CHROME_HEALTH_BENCHMARK_CONFIGS_DESKTOP,
2,
'linux',
executables=_LINUX_EXECUTABLE_CONFIGS)
# Mac
MAC_HIGH_END = PerfPlatform(
'mac-10_13_laptop_high_end-perf',
'MacBook Pro, Core i7 2.8 GHz, 16GB RAM, 256GB SSD, Radeon 55',
_MAC_HIGH_END_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_HIGH_END_EXECUTABLE_CONFIGS)
MAC_LOW_END = PerfPlatform(
'mac-10_12_laptop_low_end-perf',
'MacBook Air, Core i5 1.8 GHz, 8GB RAM, 128GB SSD, HD Graphics',
_MAC_LOW_END_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_LOW_END_EXECUTABLE_CONFIGS)
MAC_M1_MINI_2020 = PerfPlatform(
'mac-m1_mini_2020-perf',
'Mac M1 Mini 2020',
_MAC_M1_MINI_2020_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_M1_MINI_2020_EXECUTABLE_CONFIGS)
# Win
WIN_10_LOW_END = PerfPlatform(
'win-10_laptop_low_end-perf',
'Low end windows 10 HP laptops. HD Graphics 5500, x86-64-i3-5005U, '
'SSD, 4GB RAM.',
_WIN_10_LOW_END_BENCHMARK_CONFIGS,
# TODO(crbug.com/998161): Increase the number of shards once you
# have enough test data to make a shard map and when more devices
# are added to the data center.
46,
'win')
WIN_10 = PerfPlatform(
'win-10-perf',
'Windows Intel HD 630 towers, Core i7-7700 3.6 GHz, 16GB RAM,'
' Intel Kaby Lake HD Graphics 630', _WIN_10_BENCHMARK_CONFIGS,
26, 'win', executables=_WIN_10_EXECUTABLE_CONFIGS)
WIN_10_AMD = PerfPlatform('win-10_amd-perf', 'Windows AMD chipset',
_WIN_10_AMD_BENCHMARK_CONFIGS, 1, 'win')
WIN_10_AMD_LAPTOP = PerfPlatform('win-10_amd_laptop-perf',
'Windows 10 Laptop with AMD chipset.',
_WIN_10_AMD_LAPTOP_BENCHMARK_CONFIGS, 2, 'win')
WIN_7 = PerfPlatform('Win 7 Perf', 'N/A', _WIN_7_BENCHMARK_CONFIGS, 2, 'win')
WIN_7_GPU = PerfPlatform('Win 7 Nvidia GPU Perf', 'N/A',
_WIN_7_GPU_BENCHMARK_CONFIGS, 3, 'win')
# Android
ANDROID_GO = PerfPlatform(
'android-go-perf', 'Android O (gobo)', _ANDROID_GO_BENCHMARK_CONFIGS,
19, 'android')
ANDROID_GO_WEBVIEW = PerfPlatform('android-go_webview-perf',
'Android OPM1.171019.021 (gobo)',
_ANDROID_GO_WEBVIEW_BENCHMARK_CONFIGS, 13,
'android')
ANDROID_NEXUS_5 = PerfPlatform('Android Nexus5 Perf',
'Android KOT49H',
_ANDROID_NEXUS_5_BENCHMARK_CONFIGS,
10,
'android',
executables=_ANDROID_NEXUS_5_EXECUTABLE_CONFIGS)
ANDROID_NEXUS_5X_WEBVIEW = PerfPlatform(
'Android Nexus5X WebView Perf', 'Android AOSP MOB30K',
_ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS, 16, 'android')
ANDROID_PIXEL2 = PerfPlatform('android-pixel2-perf',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_BENCHMARK_CONFIGS,
28,
'android',
executables=_ANDROID_PIXEL2_EXECUTABLE_CONFIGS)
ANDROID_PIXEL2_WEBVIEW = PerfPlatform(
'android-pixel2_webview-perf', 'Android OPM1.171019.021',
_ANDROID_PIXEL2_WEBVIEW_BENCHMARK_CONFIGS, 21, 'android')
ANDROID_PIXEL2_WEBLAYER = PerfPlatform(
'android-pixel2_weblayer-perf', 'Android OPM1.171019.021',
_ANDROID_PIXEL2_WEBLAYER_BENCHMARK_CONFIGS, 4, 'android')
ANDROID_PIXEL4 = PerfPlatform('android-pixel4-perf',
'Android R',
_ANDROID_PIXEL4_BENCHMARK_CONFIGS,
28,
'android',
executables=_ANDROID_PIXEL4_EXECUTABLE_CONFIGS)
ANDROID_PIXEL4_WEBVIEW = PerfPlatform(
'android-pixel4_webview-perf', 'Android R',
_ANDROID_PIXEL4_WEBVIEW_BENCHMARK_CONFIGS, 21, 'android')
ANDROID_PIXEL4_WEBLAYER = PerfPlatform(
'android-pixel4_weblayer-perf', 'Android R',
_ANDROID_PIXEL4_WEBLAYER_BENCHMARK_CONFIGS, 4, 'android')
ANDROID_PIXEL4A_POWER = PerfPlatform('android-pixel4a_power-perf',
'Android QD4A.200102.001.A1',
_ANDROID_PIXEL4A_POWER_BENCHMARK_CONFIGS,
1, 'android')
# Cros/Lacros
LACROS_EVE_PERF = PerfPlatform('lacros-eve-perf', '',
_LACROS_EVE_BENCHMARK_CONFIGS, 8, 'chromeos')
# FYI bots
WIN_10_LOW_END_HP_CANDIDATE = PerfPlatform(
'win-10_laptop_low_end-perf_HP-Candidate', 'HP 15-BS121NR Laptop Candidate',
_WIN_10_LOW_END_HP_CANDIDATE_BENCHMARK_CONFIGS,
1, 'win', is_fyi=True)
ANDROID_NEXUS5X_PERF_FYI = PerfPlatform('android-nexus5x-perf-fyi',
'Android MMB29Q',
_ANDROID_NEXUS5X_FYI_BENCHMARK_CONFIGS,
2,
'android',
is_fyi=True)
ANDROID_PIXEL2_PERF_AAB_FYI = PerfPlatform(
'android-pixel2-perf-aab-fyi',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_AAB_FYI_BENCHMARK_CONFIGS,
1,
'android',
is_fyi=True)
ANDROID_PIXEL2_PERF_FYI = PerfPlatform('android-pixel2-perf-fyi',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_FYI_BENCHMARK_CONFIGS,
4,
'android',
is_fyi=True)
CHROMEOS_KEVIN_PERF_FYI = PerfPlatform('chromeos-kevin-perf-fyi',
'',
_CHROMEOS_KEVIN_FYI_BENCHMARK_CONFIGS,
4,
'chromeos',
is_fyi=True)
LINUX_PERF_FYI = PerfPlatform('linux-perf-fyi',
'',
_LINUX_PERF_FYI_BENCHMARK_CONFIGS,
1,
'linux',
is_fyi=True)
FUCHSIA_PERF_FYI = PerfPlatform('fuchsia-perf-fyi',
'',
_FUCHSIA_PERF_FYI_BENCHMARK_CONFIGS,
3,
'fuchsia',
is_fyi=True)
# Calibration bots
LINUX_PERF_CALIBRATION = PerfPlatform(
'linux-perf-calibration',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_LINUX_BENCHMARK_CONFIGS,
28,
'linux',
executables=_LINUX_EXECUTABLE_CONFIGS,
is_calibration=True)
ANDROID_PIXEL2_PERF_CALIBRATION = PerfPlatform(
'android-pixel2-perf-calibration',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_BENCHMARK_CONFIGS,
42,
'android',
executables=_ANDROID_PIXEL2_EXECUTABLE_CONFIGS,
is_calibration=True)
ALL_PLATFORMS = {
p for p in locals().values() if isinstance(p, PerfPlatform)
}
PLATFORMS_BY_NAME = {p.name: p for p in ALL_PLATFORMS}
FYI_PLATFORMS = {
p for p in ALL_PLATFORMS if p.is_fyi
}
CALIBRATION_PLATFORMS = {p for p in ALL_PLATFORMS if p.is_calibration}
OFFICIAL_PLATFORMS = {p for p in ALL_PLATFORMS if p.is_official}
ALL_PLATFORM_NAMES = {
p.name for p in ALL_PLATFORMS
}
OFFICIAL_PLATFORM_NAMES = {
p.name for p in OFFICIAL_PLATFORMS
}
def find_bot_platform(builder_name):
for bot_platform in ALL_PLATFORMS:
if bot_platform.name == builder_name:
return bot_platform
|
nwjs/chromium.src
|
tools/perf/core/bot_platforms.py
|
Python
|
bsd-3-clause
| 25,195
|
# -*- coding: utf-8 -*-
"""Basic regex lexer implementation"""
# :copyright: (c) 2009 - 2012 Thom Neale and individual contributors,
# All rights reserved.
# :license: BSD (3 Clause), see LICENSE for more details.
from __future__ import absolute_import
import logging.config
from rexlex import config
VERSION = (0, 0, 2, '')
__version__ = '.'.join(str(p) for p in VERSION[0:3]) + ''.join(VERSION[3:])
__author__ = 'Thom Neale'
__contact__ = 'twneale@gmail.com'
__homepage__ = 'http://github.com/twneale/rexlex'
__docformat__ = 'restructuredtext'
__all__ = [
'Lexer', 'Token', 'include', 'bygroups', 'Rule',
'ScannerLexer', 'IncompleteLex',
'TRACE', 'TRACE_RESULT', 'TRACE_META', 'TRACE_STATE',
'TRACE_RULE', '__version__']
# Configure logging.
logging.config.dictConfig(config.LOGGING_CONFIG)
# Import cumstom log levels.
from rexlex.log_config import (
REXLEX_TRACE_RESULT as TRACE_RESULT,
REXLEX_TRACE_META as TRACE_META,
REXLEX_TRACE_STATE as TRACE_STATE,
REXLEX_TRACE_RULE as TRACE_RULE,
REXLEX_TRACE as TRACE,
)
# Import lexer.
from rexlex.lexer.lexer import Lexer
from rexlex.lexer.tokentype import Token
from rexlex.lexer.utils import include, bygroups, Rule
from rexlex.lexer.exceptions import IncompleteLex
# Import Scanner.
from rexlex.scanner.scanner import ScannerLexer
|
twneale/rexlex
|
rexlex/__init__.py
|
Python
|
bsd-3-clause
| 1,346
|
# mapper/sync.py
# Copyright (C) 2005, 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Contains the ClauseSynchronizer class, which is used to map
attributes between two objects in a manner corresponding to a SQL
clause that compares column values.
"""
from sqlalchemy import schema, exceptions, util
from sqlalchemy.sql import visitors, operators
from sqlalchemy import logging
from sqlalchemy.orm import util as mapperutil
ONETOMANY = 0
MANYTOONE = 1
MANYTOMANY = 2
class ClauseSynchronizer(object):
"""Given a SQL clause, usually a series of one or more binary
expressions between columns, and a set of 'source' and
'destination' mappers, compiles a set of SyncRules corresponding
to that information.
The ClauseSynchronizer can then be executed given a set of
parent/child objects or destination dictionary, which will iterate
through each of its SyncRules and execute them. Each SyncRule
will copy the value of a single attribute from the parent to the
child, corresponding to the pair of columns in a particular binary
expression, using the source and destination mappers to map those
two columns to object attributes within parent and child.
"""
def __init__(self, parent_mapper, child_mapper, direction):
self.parent_mapper = parent_mapper
self.child_mapper = child_mapper
self.direction = direction
self.syncrules = []
def compile(self, sqlclause, foreign_keys=None, issecondary=None):
def compile_binary(binary):
"""Assemble a SyncRule given a single binary condition."""
if binary.operator != operators.eq or not isinstance(binary.left, schema.Column) or not isinstance(binary.right, schema.Column):
return
source_column = None
dest_column = None
if foreign_keys is None:
if binary.left.table == binary.right.table:
raise exceptions.ArgumentError("need foreign_keys argument for self-referential sync")
if binary.left in util.Set([f.column for f in binary.right.foreign_keys]):
dest_column = binary.right
source_column = binary.left
elif binary.right in util.Set([f.column for f in binary.left.foreign_keys]):
dest_column = binary.left
source_column = binary.right
else:
if binary.left in foreign_keys:
source_column = binary.right
dest_column = binary.left
elif binary.right in foreign_keys:
source_column = binary.left
dest_column = binary.right
if source_column and dest_column:
if self.direction == ONETOMANY:
self.syncrules.append(SyncRule(self.parent_mapper, source_column, dest_column, dest_mapper=self.child_mapper))
elif self.direction == MANYTOONE:
self.syncrules.append(SyncRule(self.child_mapper, source_column, dest_column, dest_mapper=self.parent_mapper))
else:
if not issecondary:
self.syncrules.append(SyncRule(self.parent_mapper, source_column, dest_column, dest_mapper=self.child_mapper, issecondary=issecondary))
else:
self.syncrules.append(SyncRule(self.child_mapper, source_column, dest_column, dest_mapper=self.parent_mapper, issecondary=issecondary))
rules_added = len(self.syncrules)
visitors.traverse(sqlclause, visit_binary=compile_binary)
if len(self.syncrules) == rules_added:
raise exceptions.ArgumentError("No syncrules generated for join criterion " + str(sqlclause))
def dest_columns(self):
return [r.dest_column for r in self.syncrules if r.dest_column is not None]
def update(self, dest, parent, child, old_prefix):
for rule in self.syncrules:
rule.update(dest, parent, child, old_prefix)
def execute(self, source, dest, obj=None, child=None, clearkeys=None):
for rule in self.syncrules:
rule.execute(source, dest, obj, child, clearkeys)
def source_changes(self, uowcommit, source):
for rule in self.syncrules:
if rule.source_changes(uowcommit, source):
return True
else:
return False
class SyncRule(object):
"""An instruction indicating how to populate the objects on each
side of a relationship.
E.g. if table1 column A is joined against table2 column
B, and we are a one-to-many from table1 to table2, a syncrule
would say *take the A attribute from object1 and assign it to the
B attribute on object2*.
"""
def __init__(self, source_mapper, source_column, dest_column, dest_mapper=None, issecondary=None):
self.source_mapper = source_mapper
self.source_column = source_column
self.issecondary = issecondary
self.dest_mapper = dest_mapper
self.dest_column = dest_column
#print "SyncRule", source_mapper, source_column, dest_column, dest_mapper
def dest_primary_key(self):
# late-evaluating boolean since some syncs are created
# before the mapper has assembled pks
try:
return self._dest_primary_key
except AttributeError:
self._dest_primary_key = self.dest_mapper is not None and self.dest_column in self.dest_mapper._pks_by_table[self.dest_column.table] and not self.dest_mapper.allow_null_pks
return self._dest_primary_key
def _raise_col_to_prop(self, isdest):
if isdest:
raise exceptions.UnmappedColumnError("Can't execute sync rule for destination column '%s'; mapper '%s' does not map this column. Try using an explicit `foreign_keys` collection which does not include this column (or use a viewonly=True relation)." % (self.dest_column, self.dest_mapper))
else:
raise exceptions.UnmappedColumnError("Can't execute sync rule for source column '%s'; mapper '%s' does not map this column. Try using an explicit `foreign_keys` collection which does not include destination column '%s' (or use a viewonly=True relation)." % (self.source_column, self.source_mapper, self.dest_column))
def source_changes(self, uowcommit, source):
try:
prop = self.source_mapper._get_col_to_prop(self.source_column)
except exceptions.UnmappedColumnError:
self._raise_col_to_prop(False)
(added, unchanged, deleted) = uowcommit.get_attribute_history(source, prop.key, passive=True)
return bool(added and deleted)
def update(self, dest, parent, child, old_prefix):
if self.issecondary is False:
source = parent
elif self.issecondary is True:
source = child
try:
oldvalue = self.source_mapper._get_committed_attr_by_column(source.obj(), self.source_column)
value = self.source_mapper._get_state_attr_by_column(source, self.source_column)
except exceptions.UnmappedColumnError:
self._raise_col_to_prop(False)
dest[self.dest_column.key] = value
dest[old_prefix + self.dest_column.key] = oldvalue
def execute(self, source, dest, parent, child, clearkeys):
# TODO: break the "dictionary" case into a separate method like 'update' above,
# reduce conditionals
if source is None:
if self.issecondary is False:
source = parent
elif self.issecondary is True:
source = child
if clearkeys or source is None:
value = None
clearkeys = True
else:
try:
value = self.source_mapper._get_state_attr_by_column(source, self.source_column)
except exceptions.UnmappedColumnError:
self._raise_col_to_prop(False)
if isinstance(dest, dict):
dest[self.dest_column.key] = value
else:
if clearkeys and self.dest_primary_key():
raise exceptions.AssertionError("Dependency rule tried to blank-out primary key column '%s' on instance '%s'" % (str(self.dest_column), mapperutil.state_str(dest)))
if logging.is_debug_enabled(self.logger):
self.logger.debug("execute() instances: %s(%s)->%s(%s) ('%s')" % (mapperutil.state_str(source), str(self.source_column), mapperutil.state_str(dest), str(self.dest_column), value))
try:
self.dest_mapper._set_state_attr_by_column(dest, self.dest_column, value)
except exceptions.UnmappedColumnError:
self._raise_col_to_prop(True)
SyncRule.logger = logging.class_logger(SyncRule)
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.3-py2.5.egg/sqlalchemy/orm/sync.py
|
Python
|
bsd-3-clause
| 9,024
|
'''
Created on Dec 16, 2011
@author: t4aalton
'''
from socialDevices.action import Action, actionbody, actionprecondition
from socialDevices.deviceInterfaces.talkingDevice import TalkingDevice
from socialDevices.device import Device
import socialDevices.misc as misc
class DialogTest(Action):
def __init__(self, d1, d2):
self.d1 = d1
self.d2 = d2
@actionprecondition
def precondition(self, d1, d2):
return misc.proximity([d1, d2]) and d1.talkingDevice.isWilling() and d2.talkingDevice.isWilling() and d1.talkingDevice.isSilent() and \
d1.hasInterface(TalkingDevice) and d2.hasInterface(TalkingDevice)
@actionbody
def body(self, d1, d2):
mtalk = misc.Mtalk()
conversation = mtalk.getConversation(nbrOfPeople=2)
devices = [d1, d2]
for line in conversation:
devices[line[0]].talkingDevice.say(line[1])
|
socialdevices/manager
|
core/fixtures/talkingDevices/invalid_files/dialog_action_exists.py
|
Python
|
bsd-3-clause
| 928
|