hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
720badc8342a8777d1052bba1e6430c44f634e25
| 1,809
|
py
|
Python
|
aita/auth.py
|
ze-lin/AITA
|
0f2fe4e630c37fcc566a54621880b78ec67eefa6
|
[
"MIT"
] | null | null | null |
aita/auth.py
|
ze-lin/AITA
|
0f2fe4e630c37fcc566a54621880b78ec67eefa6
|
[
"MIT"
] | null | null | null |
aita/auth.py
|
ze-lin/AITA
|
0f2fe4e630c37fcc566a54621880b78ec67eefa6
|
[
"MIT"
] | 1
|
2020-12-29T19:45:28.000Z
|
2020-12-29T19:45:28.000Z
|
import datetime, base64, functools
from flask import Blueprint, g, request, session, jsonify, render_template
from aita.db import get_collection
bp = Blueprint('auth', __name__, url_prefix='/auth')
@bp.route('/signup', methods=['POST'])
def sign_up():
MEMBER = get_collection('member')
result = MEMBER.find_one({'usr': request.form['usr']})
if result:
return 'Taken'
else:
image = request.files['file']
document = {
'usr': request.form['usr'],
'pwd': request.form['pwd'],
'role': request.form['role'],
'date': str(datetime.date.today()),
'pic': base64.encodestring(image.read())
}
MEMBER.insert_one(document)
return 'Success!'
@bp.route('/signin', methods=['POST'])
def sign_in():
result_filter = {
'usr': request.form['usr'],
'pwd': request.form['pwd']
}
MEMBER = get_collection('member')
result = MEMBER.find_one(result_filter)
if result:
session.clear() # What is a session? And how does cookie work in this?
session['usr'] = result['usr']
return jsonify(status=True, role=result['role'])
else:
return jsonify(status=False)
@bp.before_app_request
def load_logged_in_user():
usr_id = session.get('usr')
if usr_id is None:
g.usr = None
else:
MEMBER = get_collection('member')
result = MEMBER.find_one({'usr': usr_id})
result.pop('pwd')
g.usr = result
@bp.route('/logout', methods=['GET'])
def logout():
session.clear()
return 'Success!'
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.usr is None:
return 'Invalid User'
return view(**kwargs)
return wrapped_view
| 24.780822
| 78
| 0.595909
|
4c0809bfbc5bbe4cc151050f60a215fb3ff072dd
| 4,940
|
py
|
Python
|
library/k8s_v1_component_status_list.py
|
ansible/ansible-kubernetes-modules-
|
b5c7a85de6173c2f6141f19a130ff37b1fdafbf6
|
[
"Apache-2.0"
] | 91
|
2017-03-23T03:46:43.000Z
|
2021-06-03T18:30:03.000Z
|
library/k8s_v1_component_status_list.py
|
ansible/ansible-kubernetes-modules-
|
b5c7a85de6173c2f6141f19a130ff37b1fdafbf6
|
[
"Apache-2.0"
] | 28
|
2017-06-02T18:21:13.000Z
|
2020-01-29T22:33:05.000Z
|
library/k8s_v1_component_status_list.py
|
ansible/ansible-kubernetes-modules-
|
b5c7a85de6173c2f6141f19a130ff37b1fdafbf6
|
[
"Apache-2.0"
] | 40
|
2017-03-23T03:46:45.000Z
|
2022-02-01T14:29:21.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ansible.module_utils.k8s_common import KubernetesAnsibleModule, KubernetesAnsibleException
DOCUMENTATION = '''
module: k8s_v1_component_status_list
short_description: Kubernetes ComponentStatusList
description:
- Retrieve a list of component_status. List operations provide a snapshot read of
the underlying objects, returning a resource_version representing a consistent version
of the listed objects.
version_added: 2.3.0
author: OpenShift (@openshift)
options:
api_key:
description:
- Token used to connect to the API.
cert_file:
description:
- Path to a certificate used to authenticate with the API.
type: path
context:
description:
- The name of a context found in the Kubernetes config file.
debug:
description:
- Enable debug output from the OpenShift helper. Logging info is written to KubeObjHelper.log
default: false
type: bool
force:
description:
- If set to C(True), and I(state) is C(present), an existing object will updated,
and lists will be replaced, rather than merged.
default: false
type: bool
host:
description:
- Provide a URL for acessing the Kubernetes API.
key_file:
description:
- Path to a key file used to authenticate with the API.
type: path
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json).
type: path
password:
description:
- Provide a password for connecting to the API. Use in conjunction with I(username).
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API.
type: path
username:
description:
- Provide a username for connecting to the API.
verify_ssl:
description:
- Whether or not to verify the API server's SSL certificates.
type: bool
requirements:
- kubernetes == 4.0.0
'''
EXAMPLES = '''
'''
RETURN = '''
api_version:
description: Requested API version
type: string
component_status_list:
type: complex
returned: on success
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
type: str
items:
description:
- List of ComponentStatus objects.
type: list
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value,
and may reject unrecognized values.
type: str
conditions:
description:
- List of component conditions observed
type: list
contains:
error:
description:
- Condition error code for a component. For example, a health check
error code.
type: str
message:
description:
- Message about the condition for a component. For example, information
about a health check.
type: str
status:
description:
- 'Status of the condition for a component. Valid values for "Healthy":
"True", "False", or "Unknown".'
type: str
type:
description:
- 'Type of condition for a component. Valid value: "Healthy"'
type: str
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated. In CamelCase.
type: str
metadata:
description:
- Standard object's metadata.
type: complex
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to. Cannot
be updated. In CamelCase.
type: str
metadata:
description:
- Standard list metadata.
type: complex
'''
def main():
try:
module = KubernetesAnsibleModule('component_status_list', 'v1')
except KubernetesAnsibleException as exc:
# The helper failed to init, so there is no module object. All we can do is raise the error.
raise Exception(exc.message)
try:
module.execute_module()
except KubernetesAnsibleException as exc:
module.fail_json(msg="Module failed!", error=str(exc))
if __name__ == '__main__':
main()
| 31.265823
| 100
| 0.652632
|
6a434cb0184d36c03e7163fa656314a95b70e8c3
| 5,183
|
py
|
Python
|
backend/models.py
|
telemahos/decaf
|
b891f937b176bcf0edb0b0d99b8fd1c87eac1996
|
[
"Apache-2.0"
] | null | null | null |
backend/models.py
|
telemahos/decaf
|
b891f937b176bcf0edb0b0d99b8fd1c87eac1996
|
[
"Apache-2.0"
] | null | null | null |
backend/models.py
|
telemahos/decaf
|
b891f937b176bcf0edb0b0d99b8fd1c87eac1996
|
[
"Apache-2.0"
] | null | null | null |
from sqlalchemy import Column, Boolean, Integer, String, ForeignKey, Date, Float
from sqlalchemy.orm import relationship
from .database import Base
class Notes(Base):
__tablename__ = 'notes'
id = Column(Integer, primary_key=True, index=True)
date = Column(Date, index=True)
title = Column(String, index = True)
body = Column(String, index = True)
tags = Column(String, index = True)
active = Column(Boolean, default=False)
author_id = Column(Integer,ForeignKey("users.id"))
class Blog(Base):
__tablename__ = 'blogs'
id = Column(Integer, primary_key=True, index=True)
date = Column(Date, index=True)
title = Column(String, index = True)
body = Column(String, index = True)
tags = Column(String, index = True)
active = Column(Boolean, default=False)
author_id = Column(Integer,ForeignKey("users.id"))
owner = relationship("User", back_populates="the_blogs")
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, index=True)
name = Column(String, index=True)
email = Column(String, index=True)
password = Column(String)
the_blogs = relationship("Blog", back_populates="owner")
class Income_old(Base):
__tablename__ = 'income_old'
id = Column(Integer, primary_key=True, index=True)
date = Column(Date, index=True)
z_count = Column(Integer)
early_income = Column(Float)
late_income = Column(Float)
notes = Column(String)
class Outcome(Base):
__tablename__ = 'outcome'
id = Column(Integer, primary_key=True, index=True)
date = Column(Date, index=True)
description = Column(String, index=True)
invoice_number = Column(String)
cost = Column(Float)
extra_cost = Column(Float)
tax_perc = Column(Integer)
tax_perc2 = Column(Integer)
supplier_id = Column(Integer)
staff_id = Column(Integer)
fixed_cost_id = Column(Integer)
# supplier_id = Column(Integer, ForeignKey("suppliers.id"))
# staff_id = Column(Integer, ForeignKey("staff.id"))
is_variable_cost = Column(Boolean(), default=False)
is_fix_cost = Column(Boolean(), default=False)
is_purchase_cost = Column(Boolean(), default=False)
is_salary_cost = Column(Boolean(), default=False)
is_insurance_cost = Column(Boolean(), default=False)
is_misc_cost = Column(Boolean(), default=False)
payment_way = Column(String)
is_paid = Column(Boolean(), default=False)
outcome_notes = Column(String)
# the_supplier = relationship("Suppliers", back_populates="the_outcome")
# the_staff = relationship("Staff", back_populates="the_outcome")
class OutcomeDetails(Base):
__tablename__ = 'outcome_details'
id = Column(Integer, primary_key=True, index=True)
# date = Column(Date, index=True)
outcome_id = Column(Integer, ForeignKey("outcome.id"), index=True, nullable=False)
product_name = Column(String)
product_description = Column(String)
price_per = Column(Float)
amount = Column(Integer)
tax = Column(Integer)
notes = Column(String)
class FixedCost(Base):
__tablename__ = 'fixed_cost'
id = Column(Integer, primary_key=True, index=True)
name = Column(String)
class Suppliers(Base):
__tablename__ = 'suppliers'
id = Column(Integer, primary_key=True, index=True)
company_name = Column(String)
responsible = Column(String)
address = Column(String)
telephone = Column(String)
email = Column(String)
payment_way = Column(String)
notes = Column(String)
# the_outcome = relationship("Outcome", back_populates="the_supplier")
class Staff(Base):
__tablename__ = 'staff'
id = Column(Integer, primary_key=True, index=True)
name = Column(String)
position = Column(String)
active = Column(Boolean(), default=False)
daily_salary = Column(Float)
insurance = Column(Float)
notes = Column(String)
# the_outcome = relationship("Outcome", back_populates="the_staff")
# income = relationship("Income", back_populates="staff")
class Shift(Base):
__tablename__ = 'shift'
id = Column(Integer, primary_key=True, index=True)
service_id_1 = Column(Integer)
barman_id_1 = Column(Integer)
service_id_2 = Column(Integer)
barman_id_2 = Column(Integer)
the_income = relationship("Income", back_populates="the_shift")
class Income(Base):
__tablename__ = 'income'
id = Column(Integer, primary_key=True, index=True)
date = Column(Date, index=True)
service_income_1 = Column(Float)
service_income_2 = Column(Float)
bar_income_1 = Column(Float)
bar_income_2 = Column(Float)
pos = Column(Float, default=0.0)
z_count = Column(Integer, default=0.0)
vat = Column(Float, default=0.0)
waitress_1 = Column(String)
waitress_2 = Column(String)
barman_1 = Column(String)
barman_2 = Column(String)
notes = Column(String)
shift_id = Column( String, ForeignKey('shift.id'), nullable=False)
# staff_id = Column('staff_id', Integer(), ForeignKey('staff.id'), nullable=False)
# staff = relationship("Staff", back_populates="income")
the_shift = relationship("Shift", back_populates="the_income")
| 35.258503
| 86
| 0.693807
|
93ef979cf96b2d1ec5a23232a6f8671378363248
| 35,277
|
py
|
Python
|
nova/conductor/manager.py
|
larsbutler/nova
|
fb190f30a911658d8b0c4deaf43cbb8c9e35b672
|
[
"Apache-2.0"
] | null | null | null |
nova/conductor/manager.py
|
larsbutler/nova
|
fb190f30a911658d8b0c4deaf43cbb8c9e35b672
|
[
"Apache-2.0"
] | null | null | null |
nova/conductor/manager.py
|
larsbutler/nova
|
fb190f30a911658d8b0c4deaf43cbb8c9e35b672
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
import copy
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import excutils
import six
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute.utils import wrap_instance_event
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.conductor.tasks import migrate
from nova.db import base
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova import image
from nova import manager
from nova import network
from nova import objects
from nova.objects import base as nova_object
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
target = messaging.Target(version='3.0')
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.compute_task_mgr = ComputeTaskManager()
self.additional_endpoints.append(self.compute_task_mgr)
# NOTE(hanlind): This can be removed in version 4.0 of the RPC API
def provider_fw_rule_get_all(self, context):
# NOTE(hanlind): Simulate an empty db result for compat reasons.
return []
def _object_dispatch(self, target, method, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in an ExpectedException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(*args, **kwargs)
except Exception:
raise messaging.ExpectedException()
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
objclass = nova_object.NovaObject.obj_class_from_name(
objname, object_versions[objname])
args = tuple([context] + list(args))
result = self._object_dispatch(objclass, objmethod, args, kwargs)
# NOTE(danms): The RPC layer will convert to primitives for us,
# but in this case, we need to honor the version the client is
# asking for, so we do it before returning here.
return (result.obj_to_primitive(
target_version=object_versions[objname],
version_manifest=object_versions)
if isinstance(result, nova_object.NovaObject) else result)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for name, field in objinst.fields.items():
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
getattr(oldobj, name) != getattr(objinst, name)):
updates[name] = field.to_primitive(objinst, name,
getattr(objinst, name))
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_backport_versions(self, context, objinst, object_versions):
target = object_versions[objinst.obj_name()]
LOG.debug('Backporting %(obj)s to %(ver)s with versions %(manifest)s',
{'obj': objinst.obj_name(),
'ver': target,
'manifest': ','.join(
['%s=%s' % (name, ver)
for name, ver in object_versions.items()])})
return objinst.obj_to_primitive(target_version=target,
version_manifest=object_versions)
def reset(self):
objects.Service.clear_min_version_cache()
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
target = messaging.Target(namespace='compute_task', version='1.15')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.image_api = image.API()
self.network_api = network.API()
self.servicegroup_api = servicegroup.API()
self.scheduler_client = scheduler_client.SchedulerClient()
self.notifier = rpc.get_notifier('compute', CONF.host)
def reset(self):
LOG.info(_LI('Reloading compute RPC API'))
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
# TODO(tdurakov): remove `live` parameter here on compute task api RPC
# version bump to 2.x
@messaging.expected_exceptions(
exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.ComputeHostNotFound,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.MigrationPreCheckClientException,
exception.LiveMigrationWithOldNovaNotSupported,
exception.UnsupportedPolicyException)
@wrap_instance_event(prefix='conductor')
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None,
clean_shutdown=True, request_spec=None):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=attrs)
# NOTE: Remove this when we drop support for v1 of the RPC API
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
elif not live and not rebuild and flavor:
instance_uuid = instance.uuid
with compute_utils.EventReporter(context, 'cold_migrate',
instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations, clean_shutdown, request_spec)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations, clean_shutdown, request_spec):
image = utils.get_image_from_system_metadata(
instance.system_metadata)
# NOTE(sbauza): If a reschedule occurs when prep_resize(), then
# it only provides filter_properties legacy dict back to the
# conductor with no RequestSpec part of the payload.
if not request_spec:
# Make sure we hydrate a new RequestSpec object with the new flavor
# and not the nested one from the instance
request_spec = objects.RequestSpec.from_components(
context, instance.uuid, image,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, None, instance.availability_zone)
else:
# NOTE(sbauza): Resizes means new flavor, so we need to update the
# original RequestSpec object for make sure the scheduler verifies
# the right one and not the original flavor
request_spec.flavor = flavor
task = self._build_cold_migrate_task(context, instance, flavor,
request_spec,
reservations, clean_shutdown)
# TODO(sbauza): Provide directly the RequestSpec object once
# _set_vm_state_and_notify() accepts it
legacy_spec = request_spec.to_legacy_request_spec_dict()
try:
task.execute()
except exception.NoValidHost as ex:
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, legacy_spec)
# if the flavor IDs match, it's migrate; otherwise resize
if flavor.id == instance.instance_type_id:
msg = _("No valid host found for cold migrate")
else:
msg = _("No valid host found for resize")
raise exception.NoValidHost(reason=msg)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, legacy_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance.vm_state,
'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, legacy_spec)
# NOTE(sbauza): Make sure we persist the new flavor in case we had
# a successful scheduler call if and only if nothing bad happened
if request_spec.obj_what_changed():
request_spec.save()
def _set_vm_state_and_notify(self, context, instance_uuid, method, updates,
ex, request_spec):
scheduler_utils.set_vm_state_and_notify(
context, instance_uuid, 'compute_task', method, updates,
ex, request_spec)
def _cleanup_allocated_networks(
self, context, instance, requested_networks):
try:
# If we were told not to allocate networks let's save ourselves
# the trouble of calling the network API.
if not (requested_networks and requested_networks.no_allocate):
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
except Exception:
msg = _LE('Failed to deallocate networks')
LOG.exception(msg, instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE: It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
@wrap_instance_event(prefix='conductor')
def live_migrate_instance(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
destination = scheduler_hint.get("host")
def _set_vm_state(context, instance, ex, vm_state=None,
task_state=None):
request_spec = {'instance_properties': {
'uuid': instance.uuid, },
}
scheduler_utils.set_vm_state_and_notify(context,
instance.uuid,
'compute_task', 'migrate_server',
dict(vm_state=vm_state,
task_state=task_state,
expected_task_state=task_states.MIGRATING,),
ex, request_spec)
migration = objects.Migration(context=context.elevated())
migration.dest_compute = destination
migration.status = 'accepted'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.migration_type = 'live-migration'
if instance.obj_attr_is_set('flavor'):
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = instance.flavor.id
else:
migration.old_instance_type_id = instance.instance_type_id
migration.new_instance_type_id = instance.instance_type_id
migration.create()
task = self._build_live_migrate_task(context, instance, destination,
block_migration, disk_over_commit,
migration, request_spec)
try:
task.execute()
except (exception.NoValidHost,
exception.ComputeHostNotFound,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.MigrationPreCheckClientException,
exception.LiveMigrationWithOldNovaNotSupported,
exception.MigrationSchedulerRPCError) as ex:
with excutils.save_and_reraise_exception():
# TODO(johngarbutt) - eventually need instance actions here
_set_vm_state(context, instance, ex, instance.vm_state)
migration.status = 'error'
migration.save()
except Exception as ex:
LOG.error(_LE('Migration of instance %(instance_id)s to host'
' %(dest)s unexpectedly failed.'),
{'instance_id': instance.uuid, 'dest': destination},
exc_info=True)
_set_vm_state(context, instance, ex, vm_states.ERROR,
instance.task_state)
migration.status = 'error'
migration.save()
raise exception.MigrationError(reason=six.text_type(ex))
def _build_live_migrate_task(self, context, instance, destination,
block_migration, disk_over_commit, migration,
request_spec=None):
return live_migrate.LiveMigrationTask(context, instance,
destination, block_migration,
disk_over_commit, migration,
self.compute_rpcapi,
self.servicegroup_api,
self.scheduler_client,
request_spec)
def _build_cold_migrate_task(self, context, instance, flavor,
request_spec, reservations,
clean_shutdown):
return migrate.MigrationTask(context, instance, flavor,
request_spec,
reservations, clean_shutdown,
self.compute_rpcapi,
self.scheduler_client)
def _destroy_build_request(self, context, instance):
# The BuildRequest needs to be stored until the instance is mapped to
# an instance table. At that point it will never be used again and
# should be deleted.
try:
build_request = objects.BuildRequest.get_by_instance_uuid(context,
instance.uuid)
# TODO(alaski): Sync API updates of the build_request to the
# instance before it is destroyed. Right now only locked_by can
# be updated before this is destroyed.
build_request.destroy()
except exception.BuildRequestNotFound:
with excutils.save_and_reraise_exception() as exc_ctxt:
service_version = objects.Service.get_minimum_version(
context, 'nova-api')
if service_version >= 12:
# A BuildRequest was created during the boot process, the
# NotFound exception indicates a delete happened which
# should abort the boot.
pass
else:
LOG.debug('BuildRequest not found for instance %(uuid)s, '
'likely due to an older nova-api service '
'running.', {'uuid': instance.uuid})
exc_ctxt.reraise = False
return
def _populate_instance_mapping(self, context, instance, host):
try:
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
# NOTE(alaski): If nova-api is up to date this exception should
# never be hit. But during an upgrade it's possible that an old
# nova-api didn't create an instance_mapping during this boot
# request.
LOG.debug('Instance was not mapped to a cell, likely due '
'to an older nova-api service running.',
instance=instance)
return None
else:
try:
host_mapping = objects.HostMapping.get_by_host(context,
host['host'])
except exception.HostMappingNotFound:
# NOTE(alaski): For now this exception means that a
# deployment has not migrated to cellsv2 and we should
# remove the instance_mapping that has been created.
# Eventually this will indicate a failure to properly map a
# host to a cell and we may want to reschedule.
inst_mapping.destroy()
return None
else:
inst_mapping.cell_mapping = host_mapping.cell_mapping
inst_mapping.save()
return inst_mapping
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping=None, legacy_bdm=True):
# TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
# 2.0 of the RPC API.
# TODO(danms): Remove this in version 2.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList.from_tuples(
requested_networks)
# TODO(melwitt): Remove this in version 2.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
request_spec = {}
try:
# check retry policy. Rather ugly use of instances[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
scheduler_utils.populate_retry(
filter_properties, instances[0].uuid)
request_spec = scheduler_utils.build_request_spec(
context, image, instances)
hosts = self._schedule_instances(
context, request_spec, filter_properties)
except Exception as exc:
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
self._set_vm_state_and_notify(
context, instance.uuid, 'build_instances', updates,
exc, request_spec)
self._cleanup_allocated_networks(
context, instance, requested_networks)
return
for (instance, host) in six.moves.zip(instances, hosts):
try:
instance.refresh()
except (exception.InstanceNotFound,
exception.InstanceInfoCacheNotFound):
LOG.debug('Instance deleted during build', instance=instance)
continue
local_filter_props = copy.deepcopy(filter_properties)
scheduler_utils.populate_filter_properties(local_filter_props,
host)
# The block_device_mapping passed from the api doesn't contain
# instance specific information
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
inst_mapping = self._populate_instance_mapping(context, instance,
host)
try:
self._destroy_build_request(context, instance)
except exception.BuildRequestNotFound:
# This indicates an instance delete has been requested in the
# API. Stop the build, cleanup the instance_mapping and
# potentially the block_device_mappings
# TODO(alaski): Handle block_device_mapping cleanup
if inst_mapping:
inst_mapping.destroy()
return
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host['host'], image=image,
request_spec=request_spec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host['nodename'],
limits=host['limits'])
def _schedule_instances(self, context, request_spec, filter_properties):
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
# TODO(sbauza): Hydrate here the object until we modify the
# scheduler.utils methods to directly use the RequestSpec object
spec_obj = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
hosts = self.scheduler_client.select_destinations(context, spec_obj)
return hosts
def unshelve_instance(self, context, instance, request_spec=None):
sys_meta = instance.system_metadata
def safe_image_show(ctx, image_id):
if image_id:
return self.image_api.get(ctx, image_id, show_deleted=False)
else:
raise exception.ImageNotFound(image_id='')
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
image = None
image_id = sys_meta.get('shelved_image_id')
# No need to check for image if image_id is None as
# "shelved_image_id" key is not set for volume backed
# instance during the shelve process
if image_id:
with compute_utils.EventReporter(
context, 'get_image_info', instance.uuid):
try:
image = safe_image_show(context, image_id)
except exception.ImageNotFound:
instance.vm_state = vm_states.ERROR
instance.save()
reason = _('Unshelve attempted but the image %s '
'cannot be found.') % image_id
LOG.error(reason, instance=instance)
raise exception.UnshelveException(
instance_id=instance.uuid, reason=reason)
try:
with compute_utils.EventReporter(context, 'schedule_instances',
instance.uuid):
if not request_spec:
# NOTE(sbauza): We were unable to find an original
# RequestSpec object - probably because the instance is
# old. We need to mock that the old way
filter_properties = {}
request_spec = scheduler_utils.build_request_spec(
context, image, [instance])
else:
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
# TODO(sbauza): Provide directly the RequestSpec object
# when _schedule_instances(),
# populate_filter_properties and populate_retry()
# accept it
filter_properties = request_spec.\
to_legacy_filter_properties_dict()
request_spec = request_spec.\
to_legacy_request_spec_dict()
scheduler_utils.populate_retry(filter_properties,
instance.uuid)
hosts = self._schedule_instances(
context, request_spec, filter_properties)
host_state = hosts[0]
scheduler_utils.populate_filter_properties(
filter_properties, host_state)
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.unshelve_instance(
context, instance, host, image=image,
filter_properties=filter_properties, node=node)
except (exception.NoValidHost,
exception.UnsupportedPolicyException):
instance.task_state = None
instance.save()
LOG.warning(_LW("No valid host found for unshelve instance"),
instance=instance)
return
except Exception:
with excutils.save_and_reraise_exception():
instance.task_state = None
instance.save()
LOG.error(_LE("Unshelve attempted but an error "
"has occurred"), instance=instance)
else:
LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
request_spec=None):
with compute_utils.EventReporter(context, 'rebuild_server',
instance.uuid):
node = limits = None
if not host:
if not request_spec:
# NOTE(sbauza): We were unable to find an original
# RequestSpec object - probably because the instance is old
# We need to mock that the old way
filter_properties = {'ignore_hosts': [instance.host]}
request_spec = scheduler_utils.build_request_spec(
context, image_ref, [instance])
else:
# NOTE(sbauza): Augment the RequestSpec object by excluding
# the source host for avoiding the scheduler to pick it
request_spec.ignore_hosts = request_spec.ignore_hosts or []
request_spec.ignore_hosts.append(instance.host)
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
# TODO(sbauza): Provide directly the RequestSpec object
# when _schedule_instances() and _set_vm_state_and_notify()
# accept it
filter_properties = request_spec.\
to_legacy_filter_properties_dict()
request_spec = request_spec.to_legacy_request_spec_dict()
try:
hosts = self._schedule_instances(
context, request_spec, filter_properties)
host_dict = hosts.pop(0)
host, node, limits = (host_dict['host'],
host_dict['nodename'],
host_dict['limits'])
except exception.NoValidHost as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning(_LW("No valid host found for rebuild"),
instance=instance)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning(_LW("Server with unsupported policy "
"cannot be rebuilt"),
instance=instance)
try:
migration = objects.Migration.get_by_instance_and_status(
context, instance.uuid, 'accepted')
except exception.MigrationNotFoundByStatus:
LOG.debug("No migration record for the rebuild/evacuate "
"request.", instance=instance)
migration = None
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "rebuild.scheduled")
self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
migration=migration,
host=host, node=node, limits=limits)
| 49.407563
| 79
| 0.587011
|
8efdb885a680cab0421fed10a80c0e09050debcf
| 260
|
py
|
Python
|
manage.py
|
benstreb/daily-interesting
|
bac71fa54a1eb4e139d38af4e22fe38a69fee2cb
|
[
"MIT"
] | null | null | null |
manage.py
|
benstreb/daily-interesting
|
bac71fa54a1eb4e139d38af4e22fe38a69fee2cb
|
[
"MIT"
] | null | null | null |
manage.py
|
benstreb/daily-interesting
|
bac71fa54a1eb4e139d38af4e22fe38a69fee2cb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "daily_interesting.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.636364
| 81
| 0.780769
|
5be420ba88ad39038f57b0e6cf80034293984036
| 27,310
|
py
|
Python
|
examples/slim/nets/resnet_v1_test.py
|
MohammadChalaki/morph-net
|
83addae677195b4beba34a321ccf9c0cd55d5b62
|
[
"Apache-2.0"
] | 1,061
|
2019-03-12T20:35:41.000Z
|
2022-02-08T00:00:33.000Z
|
examples/slim/nets/resnet_v1_test.py
|
MohammadChalaki/morph-net
|
83addae677195b4beba34a321ccf9c0cd55d5b62
|
[
"Apache-2.0"
] | 67
|
2019-03-16T18:50:59.000Z
|
2021-09-03T13:28:42.000Z
|
examples/slim/nets/resnet_v1_test.py
|
MohammadChalaki/morph-net
|
83addae677195b4beba34a321ccf9c0cd55d5b62
|
[
"Apache-2.0"
] | 168
|
2019-03-23T09:36:39.000Z
|
2022-01-16T12:19:33.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nets import resnet_utils
from nets import resnet_v1
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
slim = tf.contrib.slim
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch_size, 1, 1, channels]))
class ResnetUtilsTest(tf.test.TestCase):
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 26],
[28, 48, 66, 37],
[43, 66, 84, 46],
[26, 37, 46, 22]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43],
[43, 84]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.to_float([[48, 37],
[37, 22]])
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
blocks = [
resnet_v1.resnet_v1_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v1.resnet_v1_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v1/shortcut',
'tiny/block1/unit_1/bottleneck_v1/conv1',
'tiny/block1/unit_1/bottleneck_v1/conv2',
'tiny/block1/unit_1/bottleneck_v1/conv3',
'tiny/block1/unit_2/bottleneck_v1/conv1',
'tiny/block1/unit_2/bottleneck_v1/conv2',
'tiny/block1/unit_2/bottleneck_v1/conv3',
'tiny/block2/unit_1/bottleneck_v1/shortcut',
'tiny/block2/unit_1/bottleneck_v1/conv1',
'tiny/block2/unit_1/bottleneck_v1/conv2',
'tiny/block2/unit_1/bottleneck_v1/conv3',
'tiny/block2/unit_2/bottleneck_v1/conv1',
'tiny/block2/unit_2/bottleneck_v1/conv2',
'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, list(end_points.keys()))
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
def testStridingLastUnitVsSubsampleBlockEnd(self):
"""Compares subsampling at the block's last unit or block's end.
Makes sure that the final output is the same when we use a stride at the
last unit of a block vs. we subsample activations at the end of a block.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Subsampling at the last unit of the block.
output = resnet_utils.stack_blocks_dense(
inputs, blocks, output_stride,
store_non_strided_activations=False,
outputs_collections='output')
output_end_points = slim.utils.convert_collection_to_dict(
'output')
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Subsample activations at the end of the blocks.
expected = resnet_utils.stack_blocks_dense(
inputs, blocks, output_stride,
store_non_strided_activations=True,
outputs_collections='expected')
expected_end_points = slim.utils.convert_collection_to_dict(
'expected')
sess.run(tf.global_variables_initializer())
# Make sure that the final output is the same.
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
# Make sure that intermediate block activations in
# output_end_points are subsampled versions of the corresponding
# ones in expected_end_points.
for i, block in enumerate(blocks[:-1:]):
output = output_end_points[block.scope]
expected = expected_end_points[block.scope]
atrous_activated = (output_stride is not None and
2 ** i >= output_stride)
if not atrous_activated:
expected = resnet_utils.subsample(expected, 2)
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(tf.test.TestCase):
"""Tests with complete small ResNet v1 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 for faster tests."""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v1.resnet_v1(inputs, blocks, num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=include_root_block,
spatial_squeeze=spatial_squeeze,
reuse=reuse,
scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
self.assertTrue('global_pool' in end_points)
self.assertListEqual(end_points['global_pool'].get_shape().as_list(),
[2, 1, 1, 32])
def testClassificationEndPointsWithNoBatchNormArgscope(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
is_training=None,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
self.assertTrue('global_pool' in end_points)
self.assertListEqual(end_points['global_pool'].get_shape().as_list(),
[2, 1, 1, 32])
def testEndpointNames(self):
# Like ResnetUtilsTest.testEndPointsV1(), but for the public API.
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
expected = ['resnet/conv1']
for block in range(1, 5):
for unit in range(1, 4 if block < 4 else 3):
for conv in range(1, 4):
expected.append('resnet/block%d/unit_%d/bottleneck_v1/conv%d' %
(block, unit, conv))
expected.append('resnet/block%d/unit_%d/bottleneck_v1' % (block, unit))
expected.append('resnet/block%d/unit_1/bottleneck_v1/shortcut' % block)
expected.append('resnet/block%d' % block)
expected.extend(['global_pool', 'resnet/logits', 'resnet/spatial_squeeze',
'predictions'])
self.assertItemsEqual(list(end_points.keys()), expected)
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
include_root_block=False,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
def testDepthMultiplier(self):
resnets = [
resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101,
resnet_v1.resnet_v1_152, resnet_v1.resnet_v1_200
]
resnet_names = [
'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v1_200'
]
for resnet, resnet_name in zip(resnets, resnet_names):
depth_multiplier = 0.25
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
scope_base = resnet_name + '_base'
_, end_points_base = resnet(
inputs,
num_classes,
global_pool=global_pool,
min_base_depth=1,
scope=scope_base)
scope_test = resnet_name + '_test'
_, end_points_test = resnet(
inputs,
num_classes,
global_pool=global_pool,
min_base_depth=1,
depth_multiplier=depth_multiplier,
scope=scope_test)
for block in ['block1', 'block2', 'block3', 'block4']:
block_name_base = scope_base + '/' + block
block_name_test = scope_test + '/' + block
self.assertTrue(block_name_base in end_points_base)
self.assertTrue(block_name_test in end_points_test)
self.assertEqual(
len(end_points_base[block_name_base].get_shape().as_list()), 4)
self.assertEqual(
len(end_points_test[block_name_test].get_shape().as_list()), 4)
self.assertListEqual(
end_points_base[block_name_base].get_shape().as_list()[:3],
end_points_test[block_name_test].get_shape().as_list()[:3])
self.assertEqual(
int(depth_multiplier *
end_points_base[block_name_base].get_shape().as_list()[3]),
end_points_test[block_name_test].get_shape().as_list()[3])
def testMinBaseDepth(self):
resnets = [
resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101,
resnet_v1.resnet_v1_152, resnet_v1.resnet_v1_200
]
resnet_names = [
'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v1_200'
]
for resnet, resnet_name in zip(resnets, resnet_names):
min_base_depth = 5
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = resnet(
inputs,
num_classes,
global_pool=global_pool,
min_base_depth=min_base_depth,
depth_multiplier=0,
scope=resnet_name)
for block in ['block1', 'block2', 'block3', 'block4']:
block_name = resnet_name + '/' + block
self.assertTrue(block_name in end_points)
self.assertEqual(
len(end_points[block_name].get_shape().as_list()), 4)
# The output depth is 4 times base_depth.
depth_expected = min_base_depth * 4
self.assertEqual(
end_points[block_name].get_shape().as_list()[3], depth_expected)
if __name__ == '__main__':
tf.test.main()
| 43.14376
| 80
| 0.598352
|
4ec77e6966e59b21687824e61f31b4a8a63461da
| 17,509
|
py
|
Python
|
sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pci_device.py
|
riddopic/config
|
59f027fc42e2b34d9609c5fd6ec956b46a480235
|
[
"Apache-2.0"
] | 10
|
2020-02-07T18:57:44.000Z
|
2021-09-11T10:29:34.000Z
|
sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pci_device.py
|
riddopic/config
|
59f027fc42e2b34d9609c5fd6ec956b46a480235
|
[
"Apache-2.0"
] | 1
|
2021-01-14T12:01:55.000Z
|
2021-01-14T12:01:55.000Z
|
sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pci_device.py
|
riddopic/config
|
59f027fc42e2b34d9609c5fd6ec956b46a480235
|
[
"Apache-2.0"
] | 10
|
2020-10-13T08:37:46.000Z
|
2022-02-09T00:21:25.000Z
|
# Copyright (c) 2015-2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import jsonpatch
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from ast import literal_eval
from oslo_log import log
from sysinv._i18n import _
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import device as dconstants
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv import objects
LOG = log.getLogger(__name__)
class PCIDevicePatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class PCIDevice(base.APIBase):
"""API representation of an PCI device
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of an
Pci Device .
"""
uuid = types.uuid
"Unique UUID for this device"
type = wtypes.text
"Represent the type of device"
name = wtypes.text
"Represent the name of the device. Unique per host"
pciaddr = wtypes.text
"Represent the pci address of the device"
pclass_id = wtypes.text
"Represent the numerical pci class of the device"
pvendor_id = wtypes.text
"Represent the numerical pci vendor of the device"
pdevice_id = wtypes.text
"Represent the numerical pci device of the device"
pclass = wtypes.text
"Represent the pci class description of the device"
pvendor = wtypes.text
"Represent the pci vendor description of the device"
pdevice = wtypes.text
"Represent the pci device description of the device"
psvendor = wtypes.text
"Represent the pci svendor of the device"
psdevice = wtypes.text
"Represent the pci sdevice of the device"
numa_node = int
"Represent the numa node or zone sdevice of the device"
sriov_totalvfs = int
"The total number of available SR-IOV VFs"
sriov_numvfs = int
"The number of configured SR-IOV VFs"
sriov_vfs_pci_address = wtypes.text
"The PCI Addresses of the VFs"
sriov_vf_driver = wtypes.text
"The driver of configured SR-IOV VFs"
sriov_vf_pdevice_id = wtypes.text
"The SR-IOV VF PCI device id for this device"
driver = wtypes.text
"The kernel driver for this device"
extra_info = wtypes.text
"Extra information for this device"
host_id = int
"Represent the host_id the device belongs to"
host_uuid = types.uuid
"Represent the UUID of the host the device belongs to"
enabled = types.boolean
"Represent the enabled status of the device"
bmc_build_version = wtypes.text
"Represent the BMC build version of the fpga device"
bmc_fw_version = wtypes.text
"Represent the BMC firmware version of the fpga device"
retimer_a_version = wtypes.text
"Represent the retimer A version of the fpga device"
retimer_b_version = wtypes.text
"Represent the retimer B version of the fpga device"
root_key = wtypes.text
"Represent the root key of the fpga device"
revoked_key_ids = wtypes.text
"Represent the key revocation ids of the fpga device"
boot_page = wtypes.text
"Represent the boot page of the fpga device"
bitstream_id = wtypes.text
"Represent the bitstream id of the fpga device"
links = [link.Link]
"Represent a list containing a self link and associated device links"
def __init__(self, **kwargs):
self.fields = list(objects.pci_device.fields.keys())
for k in self.fields:
setattr(self, k, kwargs.get(k))
@classmethod
def convert_with_links(cls, rpc_device, expand=True):
device = PCIDevice(**rpc_device.as_dict())
if not expand:
device.unset_fields_except(['uuid', 'host_id',
'name', 'pciaddr', 'pclass_id',
'pvendor_id', 'pdevice_id', 'pclass',
'pvendor', 'pdevice', 'psvendor',
'psdevice', 'numa_node',
'sriov_totalvfs', 'sriov_numvfs',
'sriov_vfs_pci_address',
'sriov_vf_driver',
'sriov_vf_pdevice_id', 'driver',
'host_uuid', 'enabled',
'bmc_build_version', 'bmc_fw_version',
'retimer_a_version', 'retimer_b_version',
'root_key', 'revoked_key_ids',
'boot_page', 'bitstream_id',
'created_at', 'updated_at',
'extra_info'])
# do not expose the id attribute
device.host_id = wtypes.Unset
device.node_id = wtypes.Unset
# if not FPGA device, hide these attributes
if device.pclass_id != dconstants.PCI_DEVICE_CLASS_FPGA:
device.bmc_build_version = wtypes.Unset
device.bmc_fw_version = wtypes.Unset
device.retimer_a_version = wtypes.Unset
device.retimer_b_version = wtypes.Unset
device.root_key = wtypes.Unset
device.revoked_key_ids = wtypes.Unset
device.boot_page = wtypes.Unset
device.bitstream_id = wtypes.Unset
device.links = [link.Link.make_link('self', pecan.request.host_url,
'pci_devices', device.uuid),
link.Link.make_link('bookmark',
pecan.request.host_url,
'pci_devices', device.uuid,
bookmark=True)
]
return device
class PCIDeviceCollection(collection.Collection):
"""API representation of a collection of PciDevice objects."""
pci_devices = [PCIDevice]
"A list containing PciDevice objects"
def __init__(self, **kwargs):
self._type = 'pci_devices'
@classmethod
def convert_with_links(cls, rpc_devices, limit, url=None,
expand=False, **kwargs):
collection = PCIDeviceCollection()
collection.pci_devices = [PCIDevice.convert_with_links(d, expand)
for d in rpc_devices]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'PCIDeviceController'
class PCIDeviceController(rest.RestController):
"""REST controller for PciDevices."""
_custom_actions = {
'detail': ['GET'],
}
def __init__(self, from_ihosts=False):
self._from_ihosts = from_ihosts
def _get_pci_devices_collection(self, uuid, marker, limit, sort_key,
sort_dir, expand=False, resource_url=None):
if self._from_ihosts and not uuid:
raise exception.InvalidParameterValue(_(
"Host id not specified."))
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.pci_device.get_by_uuid(
pecan.request.context,
marker)
if self._from_ihosts:
devices = pecan.request.dbapi.pci_device_get_by_host(
uuid, limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
else:
if uuid:
devices = pecan.request.dbapi.pci_device_get_by_host(
uuid, limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
else:
devices = pecan.request.dbapi.pci_device_get_list(
limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
return PCIDeviceCollection.convert_with_links(devices, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(PCIDeviceCollection, types.uuid, types.uuid,
int, wtypes.text, wtypes.text)
def get_all(self, uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of devices."""
return self._get_pci_devices_collection(uuid,
marker, limit, sort_key, sort_dir)
@wsme_pecan.wsexpose(PCIDeviceCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def detail(self, uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of devices with detail."""
# NOTE: /detail should only work against collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "pci_devices":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['pci_devices', 'detail'])
return self._get_pci_devices_collection(uuid, marker, limit, sort_key,
sort_dir, expand, resource_url)
@wsme_pecan.wsexpose(PCIDevice, types.uuid)
def get_one(self, device_uuid):
"""Retrieve information about the given device."""
if self._from_ihosts:
raise exception.OperationNotPermitted
rpc_device = objects.pci_device.get_by_uuid(
pecan.request.context, device_uuid)
return PCIDevice.convert_with_links(rpc_device)
@cutils.synchronized(LOCK_NAME)
@wsme.validate(types.uuid, [PCIDevicePatchType])
@wsme_pecan.wsexpose(PCIDevice, types.uuid,
body=[PCIDevicePatchType])
def patch(self, device_uuid, patch):
"""Update an existing device."""
if self._from_ihosts:
raise exception.OperationNotPermitted
rpc_device = objects.pci_device.get_by_uuid(
pecan.request.context, device_uuid)
# replace host_uuid with corresponding host_id
patch_obj = jsonpatch.JsonPatch(patch)
for p in patch_obj:
if p['path'] == '/host_uuid':
p['path'] = '/host_id'
host = objects.host.get_by_uuid(pecan.request.context,
p['value'])
p['value'] = host.id
try:
device = PCIDevice(**jsonpatch.apply_patch(rpc_device.as_dict(),
patch_obj))
except utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Semantic checks
host = pecan.request.dbapi.ihost_get(device.host_id)
_check_host(host)
sriov_update = _check_device_sriov(device.as_dict(), host)
# Update fields that have changed
for field in objects.pci_device.fields:
value = getattr(device, field)
if rpc_device[field] != value:
_check_field(field)
if (field in ['sriov_vf_driver', 'driver'] and
value == 'none'):
rpc_device[field] = None
else:
rpc_device[field] = getattr(device, field)
if field == 'sriov_numvfs':
# Save desired number of VFs in extra_info since
# sriov_numvfs may get overwritten by concurrent inventory report
expected_numvfs = {'expected_numvfs': rpc_device[field]}
if not rpc_device['extra_info']:
rpc_device['extra_info'] = str(expected_numvfs)
else:
extra_info = literal_eval(rpc_device['extra_info'])
extra_info.update(expected_numvfs)
rpc_device['extra_info'] = str(extra_info)
rpc_device.save()
if sriov_update:
pecan.request.rpcapi.update_sriov_config(
pecan.request.context, host['uuid'])
return PCIDevice.convert_with_links(rpc_device)
def _check_host(host):
if utils.is_aio_simplex_host_unlocked(host):
raise wsme.exc.ClientSideError(_('Host must be locked.'))
elif host.administrative != constants.ADMIN_LOCKED and not \
utils.is_host_simplex_controller(host):
raise wsme.exc.ClientSideError(_('Host must be locked.'))
if constants.WORKER not in host.subfunctions:
raise wsme.exc.ClientSideError(_('Can only modify worker node cores.'))
def _check_field(field):
if field not in ["enabled", "name", "driver", "sriov_numvfs", "sriov_vf_driver", "extra_info"]:
raise wsme.exc.ClientSideError(_('Modifying %s attribute restricted') % field)
def _check_device_sriov(device, host):
sriov_update = False
if (device['pdevice_id'] in dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS and
host.invprovision != constants.PROVISIONED):
raise wsme.exc.ClientSideError(_("Cannot configure device %s "
"until host %s is unlocked for the first time." %
(device['uuid'], host.hostname)))
if (device['pdevice_id'] not in dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS and
'sriov_numvfs' in device.keys() and device['sriov_numvfs']):
raise wsme.exc.ClientSideError(_("The number of SR-IOV VFs is specified "
"but the device is not supported for SR-IOV"))
if (device['pdevice_id'] not in dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS and
'sriov_vf_driver' in device.keys() and device['sriov_vf_driver']):
raise wsme.exc.ClientSideError(_("The SR-IOV VF driver is specified "
"but the device is not supported for SR-IOV"))
if device['pdevice_id'] not in dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS:
return sriov_update
if 'sriov_numvfs' not in device.keys():
raise wsme.exc.ClientSideError(_("The number of SR-IOV VFs must be specified"))
else:
if ('sriov_vf_driver' in device.keys() and device['sriov_vf_driver'] and
device['sriov_vf_driver'] != dconstants.FPGA_INTEL_5GNR_FEC_DRIVER_NONE and
device['sriov_numvfs'] is None):
raise wsme.exc.ClientSideError(_("Value for number of SR-IOV VFs must be specified."))
if device['sriov_numvfs'] and device['sriov_numvfs'] < 0:
raise wsme.exc.ClientSideError(_("Value for number of SR-IOV VFs must be >= 0."))
if ('sriov_vf_driver' in device.keys() and device['sriov_vf_driver'] and
device['sriov_vf_driver'] != dconstants.FPGA_INTEL_5GNR_FEC_DRIVER_NONE and
device['sriov_numvfs'] == 0):
raise wsme.exc.ClientSideError(_("Value for number of SR-IOV VFs must be > 0."))
if 'sriov_totalvfs' in device.keys():
if not device['sriov_totalvfs']:
raise wsme.exc.ClientSideError(_("SR-IOV cannot be configured on this interface"))
if device['sriov_numvfs'] and device['sriov_numvfs'] > device['sriov_totalvfs']:
raise wsme.exc.ClientSideError(_(
"The device supports a maximum of %s VFs" % device['sriov_totalvfs']))
if 'sriov_vf_driver' not in device.keys():
raise wsme.exc.ClientSideError(_("The SR-IOV VF driver must be specified"))
else:
if (device['sriov_vf_driver'] is not None and
device['pdevice_id'] in
dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS and
device['sriov_vf_driver'] not in
dconstants.FPGA_INTEL_5GNR_FEC_VF_VALID_DRIVERS):
msg = (_("Value for SR-IOV VF driver must be one of "
"{}").format(', '.join(dconstants.FPGA_INTEL_5GNR_FEC_VF_VALID_DRIVERS)))
raise wsme.exc.ClientSideError(msg)
if ('driver' in device.keys() and device['driver'] and
device['pdevice_id'] in dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS and
device['driver'] not in
dconstants.FPGA_INTEL_5GNR_FEC_PF_VALID_DRIVERS):
msg = (_("Value for SR-IOV PF driver must be one of "
"{}").format(', '.join(dconstants.FPGA_INTEL_5GNR_FEC_PF_VALID_DRIVERS)))
raise wsme.exc.ClientSideError(msg)
sriov_update = True
return sriov_update
| 39.613122
| 99
| 0.585642
|
ba0aef25b83ff2be9ff3420c49e1946139569ad8
| 1,057
|
py
|
Python
|
bloodhound_dashboard/bhdashboard/tests/widgets/__init__.py
|
beebopkim/bloodhound
|
1a1d3c88309136229f4b64d9e75e0441a4c590e4
|
[
"Apache-2.0"
] | 84
|
2015-01-07T03:42:53.000Z
|
2022-01-10T11:57:30.000Z
|
bloodhound_dashboard/bhdashboard/tests/widgets/__init__.py
|
beebopkim/bloodhound
|
1a1d3c88309136229f4b64d9e75e0441a4c590e4
|
[
"Apache-2.0"
] | 1
|
2021-11-04T12:52:03.000Z
|
2021-11-04T12:52:03.000Z
|
bloodhound_dashboard/bhdashboard/tests/widgets/__init__.py
|
isabella232/bloodhound-1
|
c3e31294e68af99d4e040e64fbdf52394344df9e
|
[
"Apache-2.0"
] | 35
|
2015-01-06T11:30:27.000Z
|
2021-11-10T16:34:52.000Z
|
# -*- coding: UTF-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from bhdashboard.tests.widgets import timeline
def suite():
suite = unittest.TestSuite()
suite.addTest(timeline.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 32.030303
| 63
| 0.741722
|
76af767e1c3fc5c5b10da38a714b4be10c297e7f
| 712
|
py
|
Python
|
astropy/wcs/wcsapi/tests/test_low_level_api.py
|
PriyankaH21/astropy
|
159fb9637ce4acdc60329d20517ed3dc7ba79581
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/wcs/wcsapi/tests/test_low_level_api.py
|
PriyankaH21/astropy
|
159fb9637ce4acdc60329d20517ed3dc7ba79581
|
[
"BSD-3-Clause"
] | 1
|
2018-11-14T14:18:55.000Z
|
2020-01-21T10:36:05.000Z
|
astropy/wcs/wcsapi/tests/test_low_level_api.py
|
PriyankaH21/astropy
|
159fb9637ce4acdc60329d20517ed3dc7ba79581
|
[
"BSD-3-Clause"
] | null | null | null |
from pytest import raises
from ..low_level_api import validate_physical_types
def test_validate_physical_types():
# Check valid cases
validate_physical_types(['pos.eq.ra', 'pos.eq.ra'])
validate_physical_types(['spect.dopplerVeloc.radio', 'custom:spam'])
validate_physical_types(['time', None])
# Make sure validation is case sensitive
with raises(ValueError) as exc:
validate_physical_types(['pos.eq.ra', 'Pos.eq.dec'])
assert exc.value.args[0] == 'Invalid physical type: Pos.eq.dec'
# Make sure nonsense types are picked up
with raises(ValueError) as exc:
validate_physical_types(['spam'])
assert exc.value.args[0] == 'Invalid physical type: spam'
| 32.363636
| 72
| 0.70927
|
a344fe938d109d99ec93b24717421e2b356a87bc
| 399
|
py
|
Python
|
utils/hardware_controller/wrappers.py
|
skal1ozz/AI-WordOfWarcraft-Bot
|
1d9659e76d1107909d6ca27b573d0dd83a25fce4
|
[
"Apache-2.0"
] | null | null | null |
utils/hardware_controller/wrappers.py
|
skal1ozz/AI-WordOfWarcraft-Bot
|
1d9659e76d1107909d6ca27b573d0dd83a25fce4
|
[
"Apache-2.0"
] | null | null | null |
utils/hardware_controller/wrappers.py
|
skal1ozz/AI-WordOfWarcraft-Bot
|
1d9659e76d1107909d6ca27b573d0dd83a25fce4
|
[
"Apache-2.0"
] | null | null | null |
from .abstract_backend import AbstractBackend
def wrapper_backend_required(fn):
def wr(self, *args, **kwargs):
backend = getattr(self, 'backend', None)
if not isinstance(backend, AbstractBackend):
raise AttributeError('Backend should be a child of '
'AbstractKeyboardBackend')
return fn(self, *args, **kwargs)
return wr
| 33.25
| 64
| 0.626566
|
b9f43c5a9fbd87f85add6feeb941295158bdae9e
| 3,162
|
py
|
Python
|
main/utilities_v2/infrastructure_as_code/build_spec_to_cloud.py
|
emerginganalytics/cyberarena
|
311d179a30017285571f65752eaa91b78c7097aa
|
[
"MIT"
] | 2
|
2022-01-24T20:15:08.000Z
|
2022-01-24T20:15:25.000Z
|
main/utilities_v2/infrastructure_as_code/build_spec_to_cloud.py
|
emerginganalytics/cyberarena
|
311d179a30017285571f65752eaa91b78c7097aa
|
[
"MIT"
] | 26
|
2021-12-23T19:37:27.000Z
|
2022-03-28T04:03:41.000Z
|
main/utilities_v2/infrastructure_as_code/build_spec_to_cloud.py
|
emerginganalytics/cyberarena
|
311d179a30017285571f65752eaa91b78c7097aa
|
[
"MIT"
] | null | null | null |
"""
Parses yaml according to version 2 of the Cyber Arena specification and stores a build structure in the cloud datastore.
We use marshmallow to perform serializer validation only. We define the required schemas in WorkoutComputeSchema,
WorkoutContainerSchema, ArenaSchema, and perhaps more. New fields in the yaml should be accounted for in the schema
validation.
"""
from datetime import datetime
from marshmallow import ValidationError
from google.cloud import logging_v2
import random
import string
from utilities_v2.globals import BuildConstants, DatastoreKeyTypes, PubSub
from utilities_v2.infrastructure_as_code.schema import FixedArenaSchema, FixedArenaWorkoutSchema
from utilities_v2.gcp.cloud_env import CloudEnv
from utilities_v2.gcp.datastore_manager import DataStoreManager
from utilities_v2.gcp.pubsub_manager import PubSubManager
__author__ = "Philip Huff"
__copyright__ = "Copyright 2022, UA Little Rock, Emerging Analytics Center"
__credits__ = ["Philip Huff"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Philip Huff"
__email__ = "pdhuff@ualr.edu"
__status__ = "Testing"
class BuildSpecToCloud:
def __init__(self, cyber_arena_spec, debug=False):
"""
Prepares the build of workouts based on a YAML specification by storing the information in the
cloud datastore.
:@param cyber_arena_spec: The specification for building the Cyber Arena
:@param debug: Whether to publish to cloud functions or debug the build operations.
"""
self.env = CloudEnv()
log_client = logging_v2.Client()
log_client.setup_logging()
if 'build_type' not in cyber_arena_spec:
raise ValidationError
cyber_arena_spec['creation_timestamp'] = datetime.utcnow().isoformat()
self.pubsub_manager = PubSubManager(topic=PubSub.Topics.CYBER_ARENA)
self.build_type = cyber_arena_spec['build_type']
if self.build_type == BuildConstants.BuildType.FIXED_ARENA.value:
self.build_id = cyber_arena_spec['id']
self.cyber_arena_spec = FixedArenaSchema().load(cyber_arena_spec)
self.datastore_manager = DataStoreManager(key_type=DatastoreKeyTypes.FIXED_ARENA, key_id=self.build_id)
self.action = PubSub.BuildActions.FIXED_ARENA.value
elif self.build_type == BuildConstants.BuildType.FIXED_ARENA_WORKOUT.value:
self.build_id = ''.join(random.choice(string.ascii_lowercase) for j in range(10))
cyber_arena_spec['id'] = self.build_id
self.cyber_arena_spec = FixedArenaWorkoutSchema().load(cyber_arena_spec)
self.datastore_manager = DataStoreManager(key_type=DatastoreKeyTypes.FIXED_ARENA_WORKOUT,
key_id=self.build_id)
self.action = PubSub.BuildActions.FIXED_ARENA_WORKOUT.value
self.debug = debug
def commit(self):
self.datastore_manager.put(self.cyber_arena_spec)
if not self.debug:
self.pubsub_manager.msg(handler=PubSub.Handlers.BUILD, action=self.action,
fixed_arena_workout_id=self.build_id)
| 47.19403
| 120
| 0.728969
|
511d9c6371ab4661901f93033472cbc5d8de1ab6
| 13,232
|
py
|
Python
|
SNPLIB/snplib.py
|
VasLem/SNPLIB
|
aea1cb943a7db22faa592a53cf1132561ce50c4e
|
[
"BSD-3-Clause"
] | 2
|
2019-11-21T04:55:13.000Z
|
2021-10-05T18:01:23.000Z
|
SNPLIB/snplib.py
|
VasLem/SNPLIB
|
aea1cb943a7db22faa592a53cf1132561ce50c4e
|
[
"BSD-3-Clause"
] | null | null | null |
SNPLIB/snplib.py
|
VasLem/SNPLIB
|
aea1cb943a7db22faa592a53cf1132561ce50c4e
|
[
"BSD-3-Clause"
] | 1
|
2022-02-17T17:20:24.000Z
|
2022-02-17T17:20:24.000Z
|
"""SNPLIB class Module
This module contains the definition of SNPLIB class and some supportive methods.
Example
-------
Notes
-----
"""
import math
import re
import numpy as np
import numpy.linalg as npl
import pandas as pd
from copy import deepcopy
from scipy.stats import t, chi2, f
from multiprocessing import cpu_count
import _SNPLIB as lib
chr_dict = {
'X': 23,
'Y': 24,
'XY': 25,
'MT': 26,
}
def convert_chr(chr_c):
if chr_c.isdigit():
return int(chr_c)
else:
return chr_dict[chr_c.upper()]
def CalcIBSConnection(src_geno, dest_geno, num_threads):
return lib.CalcIBSConnection(src_geno.GENO, dest_geno.GENO, src_geno.nSamples, dest_geno.nSamples, num_threads)
def UpdateAf(aaf, num_pops, num_generations, effective_sample_size):
return lib.UpdateAf(aaf, num_pops, num_generations, effective_sample_size).T
class SNPLIB:
"""The SNPLIB class stores the genotype data in plink binary format and provides methods analyzing genotype data
Attributes
----------
nThreads : int
Number of threads used for computation
SNPs : :obj:`pandas.DataFrame`, optional
`pandas.DataFrame` of SNPs' information imported from plink .bim file
Samples : :obj:`pandas.DataFrame`, optional
`pandas.DataFrame` of Samples' information imported from plink .fam file
"""
def __init__(self, nThreads=cpu_count()//2):
"""Constructor function
Parameters
----------
nThreads : int, optional
Number of threads used for computation
"""
self.nThreads = nThreads
self.SNPs = []
self.Samples = []
def importPLINKDATA(self, bfile):
"""Import plink binary fileset
Parameters
----------
bfile : str
The name of plink binary fileset
"""
filename = bfile + '.bim'
self.SNPs = pd.read_table(
bfile+'.bim', sep=None, names=['CHR', 'RSID', 'Cm', 'POS', 'ALT', 'REF'], engine='python')
self.Samples = pd.read_table(bfile+'.fam', sep=None,
names=['FID', 'IID', 'PID', 'MID', 'Sex', 'Pheno'], engine='python')
self.nSNPs = self.SNPs.shape[0]
self.nSamples = self.Samples.shape[0]
filename = bfile + '.bed'
num_bytes = math.ceil(self.nSamples / 4.0)
GENO = np.fromfile(filename, dtype=np.uint8, count=-1)
GENO = GENO[3:]
self.GENO = np.reshape(GENO, (num_bytes, - 1), order='F')
# Simulations
def GenerateIndividuals(self, af):
"""Simulate the genotypes according to the individual allele frequencies
Parameters
----------
af : ndarray
A `ndarray` matrix contains the individual allele frequencies, with shape of ``(num_samples, num_snps)``
"""
self.nSamples = af.shape[0]
self.nSNPs = af.shape[1]
self.GENO = lib.GenerateIndividuals(af)
def GenerateAdmixedIndividuals(self, af, num_samples):
self.nSamples = num_samples
self.nSNPs = af.shape[1]
self.GENO = lib.GenerateAdmixedIndividuals(af, num_samples)
def GeneratePairwiseSiblings(self, parent_obj):
self.nSamples = parent_obj.nSamples
self.nSNPs = parent_obj.nSNPs
self.GENO = lib.GeneratePairwiseSiblings(
parent_obj.GENO, self.nSamples//2)
# Data manage
def UnpackGeno(self):
"""Unpack the plink binary format into a double matrix
Returns
-------
`ndarray`
A `ndarray` matrix contains the individual genotypes
"""
return lib.UnpackGeno(self.GENO, self.nSamples)
def Keep(self, index):
"""Keep the individuals listed in the ``index``, same as the plink option --keep
Returns
-------
`SNPLIB`
A `SNPLIB` object with the individuals in ``index``
"""
result = SNPLIB(self.nThreads)
result.nSamples = len(index)
result.nSNPs = deepcopy(self.nSNPs)
result.GENO = lib.Keep(self.GENO, self.nSamples, index)
return result
def Extract(self, index):
result = SNPLIB(self.nThreads)
result.nSamples = deepcopy(self.nSamples)
result.nSNPs = len(index)
result.GENO = deepcopy(self.GENO[:, index])
return result
# Statistics
def CalcAlleleFrequencies(self):
return lib.CalcAlleleFrequencies(self.GENO, self.nSamples)
def CalcMissing(self):
return lib.CalcMissing(self.GENO, self.nSamples)
def CalcAdjustedAF(self, covariates):
return lib.CalcAdjustedAF(self.GENO, covariates, self.nThreads)
def CalcAdjustedMAF(self, covariates):
return lib.CalcAdjustedMAF(self.GENO, covariates, self.nThreads)
# Relationships
def CalcAdjustedGRM(self, covariates):
matrix, gcta_diag = lib.CalcAdjustedGRM(
self.GENO, covariates, self.nThreads)
return matrix, gcta_diag
def CalcAdmixedGRM(self, pop_af, pop):
matrix, gcta_diag = lib.CalcAdmixedGRM(
self.GENO, pop_af, pop, self.nThreads)
return matrix, gcta_diag
def CalcGRMMatrix(self):
af = self.CalcAlleleFrequencies()
matrix = lib.CalcGRMMatrix(self.GENO, af, self.nSamples, self.nThreads)
gcta_diag = lib.CalcGCTADiagonal(
self.GENO, af, self.nSamples, self.nThreads)
return matrix, gcta_diag
def CalcIBSMatrix(self):
return lib.CalcIBSMatrix(self.GENO, self.nSamples, self.nThreads)
def CalcKINGMatrix(self):
return lib.CalcKINGMatrix(self.GENO, self.nSamples, self.nThreads)
def CalcUGRMMatrix(self):
return lib.CalcUGRMMatrix(self.GENO, self.nSamples, self.nThreads)
def FindUnrelated(self, threshold=0.044):
king = self.CalcKINGMatrix()
return lib.FindUnrelatedGroup(king, threshold)
# Ancestry Estimation
def CalcPCAScores(self, nComponents):
grm, _ = self.CalcGRMMatrix()
w, V = npl.eig(grm)
ind = np.argsort(-w)
return V[:, ind[:nComponents]]
def CalcPCALoadingsExact(self, nComponents):
af = self.CalcAlleleFrequencies()
A = lib.UnpackGRMGeno(self.GENO, af, self.nSamples)
U, s, _ = npl.svd(A.T, full_matrices=False)
S = np.diag(s[:nComponents])
U = U[:, :nComponents]
return npl.solve(S, U.T)
def CalcPCALoadingsApprox(self, nComponents, nParts=10):
af = self.CalcAlleleFrequencies()
grm = lib.CalcGRMMatrix(self.GENO, af, self.nSamples, self.nThreads)
L = 2*nComponents
I = 10
G = np.zeros((self.nSamples, L*(I+1)), dtype='double', order='F')
G[:, :L] = np.random.randn(self.nSamples, L)
for i in range(1, I):
G[:, i*L: (i+1)*L] = grm@G[:, (i-1)*L:i*L]
H = np.zeros((self.nSNPs, L*(I+1)), dtype='double', order='F')
nSNPsPart = math.ceil(self.nSNPs/nParts)
for i in range(nParts-1):
A = lib.UnpackGRMGeno(
self.GENO[:, i*nSNPsPart:(i+1)*nSNPsPart], af[i*nSNPsPart:(i+1)*nSNPsPart], self.nSamples)
H[i*nSNPsPart:(i+1)*nSNPsPart, :] = A.T@G
A = lib.UnpackGRMGeno(
self.GENO[:, (nParts-1)*nSNPsPart:], af[(nParts-1)*nSNPsPart:], self.nSamples)
H[(nParts-1)*nSNPsPart:, :] = A.T@G
Q, _ = npl.qr(H)
T = np.zeros((self.nSamples, L*(I+1)), dtype='double', order='F')
for i in range(nParts-1):
A = lib.UnpackGRMGeno(
self.GENO[:, i*nSNPsPart:(i+1)*nSNPsPart], af[i*nSNPsPart:(i+1)*nSNPsPart], self.nSamples)
T = T + A@Q[i*nSNPsPart:(i+1)*nSNPsPart, :]
A = lib.UnpackGRMGeno(
self.GENO[:, (nParts-1)*nSNPsPart:], af[(nParts-1)*nSNPsPart:], self.nSamples)
T = T+A@Q[(nParts-1)*nSNPsPart:, :]
_, S, W = npl.svd(T, full_matrices=False)
U = Q@W.T
S = S[:nComponents]
S = np.diag(S)
U = U[:, :nComponents]
return npl.solve(S, U.T)
def ProjectPCA(self, ref_obj, loadings, nParts=10):
nSNPsPart = math.ceil(self.nSNPs/nParts)
af = ref_obj.CalcAlleleFrequencies()
nComponents = loadings.shape[0]
scores = np.zeros((nComponents, self.nSamples))
for i in range(nParts-1):
A = lib.UnpackGRMGeno(
self.GENO[:, i*nSNPsPart:(i+1)*nSNPsPart], af[i*nSNPsPart:(i+1)*nSNPsPart], self.nSamples)
scores = scores + loadings[:, i*nSNPsPart:(i+1)*nSNPsPart]@A.T
A = lib.UnpackGRMGeno(
self.GENO[:, (nParts-1)*nSNPsPart:], af[nParts*nSNPsPart:], self.nSamples)
scores = scores + loadings[:, (nParts-1)*nSNPsPart:]@A.T
return scores.T
def CalcPCAiRScores(self, nComponents, is_exact=False):
ind = self.FindUnrelated()
Unrelated = self.Keep(ind)
if is_exact:
loadings = Unrelated.CalcPCALoadingsExact(nComponents)
else:
loadings = Unrelated.CalcPCALoadingsApprox(nComponents)
return self.ProjectPCA(Unrelated, loadings)
def CalcSUGIBSScores(self, nComponents):
ibs = self.CalcIBSMatrix()
d = np.sum(ibs, axis=0)
ugrm = self.CalcUGRMMatrix()
D = np.diag(d**-0.5)
I = D@ugrm@D
w, V = npl.eig(I)
ind = np.argsort(-w)
return D@V[:, ind[1:nComponents+1]]
def CalcSUGIBSLoadingsExact(self, nComponents):
ibs = self.CalcIBSMatrix()
d = np.sum(ibs, axis=0)
D = np.diag(d**-0.5)
A = lib.UnpackUGeno(self.GENO, self.nSamples)
A = D@A
U, s, _ = npl.svd(A.T, full_matrices=False)
S = np.diag(s[1:nComponents+1])
U = U[:, 1:nComponents+1]
return npl.solve(S, U.T)
def CalcSUGIBSLoadingsApprox(self, nComponents, nParts=10):
L = 2*(nComponents+1)
I = 10
G = np.zeros((self.nSamples, L*(I+1)), dtype='double', order='F')
G[:, :L] = np.random.randn(self.nSamples, L)
ibs = self.CalcIBSMatrix()
ugrm = self.CalcUGRMMatrix()
d = np.sum(ibs, axis=0)
D = np.diag(d**-0.5)
ugrm = D@ugrm@D
for i in range(1, I):
G[:, i*L: (i+1)*L] = ugrm@G[:, (i-1)*L:i*L]
H = np.zeros((self.nSNPs, L*(I+1)), dtype='double', order='F')
nSNPsPart = math.ceil(self.nSNPs/nParts)
for i in range(nParts-1):
A = lib.UnpackUGeno(
self.GENO[:, i*nSNPsPart:(i+1)*nSNPsPart], self.nSamples)
H[i*nSNPsPart:(i+1)*nSNPsPart, :] = A.T@D@G
A = lib.UnpackUGeno(self.GENO[:, (nParts-1)*nSNPsPart:], self.nSamples)
H[(nParts-1)*nSNPsPart:, :] = A.T@D@G
Q, _ = npl.qr(H)
T = np.zeros((self.nSamples, L*(I+1)), dtype='double', order='F')
for i in range(nParts-1):
A = lib.UnpackUGeno(
self.GENO[:, i*nSNPsPart:(i+1)*nSNPsPart], self.nSamples)
T = T + D@A@Q[i*nSNPsPart:(i+1)*nSNPsPart, :]
A = lib.UnpackUGeno(self.GENO[:, (nParts-1)*nSNPsPart:], self.nSamples)
T = T+D@A@Q[(nParts-1)*nSNPsPart:, :]
_, S, W = npl.svd(T, full_matrices=False)
U = Q@W.T
S = S[1:nComponents+1]
S = np.diag(S)
U = U[:, 1:nComponents+1]
return npl.solve(S, U.T)
def ProjectSUGIBS(self, ref_obj, loadings, nParts=10):
nSNPsPart = math.ceil(self.nSNPs/nParts)
nComponents = loadings.shape[0]
scores = np.zeros((nComponents, self.nSamples))
for i in range(nParts-1):
A = lib.UnpackUGeno(
self.GENO[:, i*nSNPsPart:(i+1)*nSNPsPart], self.nSamples)
scores = scores + loadings[:, i*nSNPsPart:(i+1)*nSNPsPart]@A.T
A = lib.UnpackUGeno(
self.GENO[:, (nParts-1)*nSNPsPart:], self.nSamples)
scores = scores + loadings[:, (nParts-1)*nSNPsPart:]@A.T
connect = CalcIBSConnection(ref_obj, self, self.nThreads)
D = np.diag(connect**-1)
scores = scores@D
return scores.T
def CalcSUGIBSiRScores(self, nComponents, is_exact=False):
ind = self.FindUnrelated()
Unrelated = self.Keep(ind)
if is_exact:
loadings = Unrelated.CalcSUGIBSLoadingsExact(nComponents)
else:
loadings = Unrelated.CalcSUGIBSLoadingsApprox(nComponents)
return self.ProjectSUGIBS(Unrelated, loadings)
# GWAS
def CalcLinearRegressionGWAS(self, trait, covariates):
betas, stats = lib.CalcLinearRegressionGWAS(
self.GENO, covariates, trait, self.nThreads)
df = len(trait)-covariates.shape[1]
pvalues = t.cdf(stats, df=df)
return betas, pvalues
def CalcCCAGWAS(self, trait):
Y = trait - trait.mean(axis=0)
betas, rho2 = lib.CalcCCAGWAS(self.GENO, Y, self.nThreads)
nDims = trait.shape[1]
t = (nDims**2-4)/(nDims**2+1-5)
Lambda = 1.0 - rho2
t = np.sqrt(t)
w = self.nSamples - (nDims+4)/2
df1 = nDims
df2 = w*t-nDims/2+1
Lambda = np.power(Lambda, 1.0/t)
F = (1-Lambda)/Lambda*df2/df1
pvalues = f.cdf(F, dfn=df2, dfd=df1)
return betas, rho2, pvalues
| 35.098143
| 117
| 0.593183
|
cfa2898c4fb053e99a06848a322a8c26f5397eb4
| 10,398
|
py
|
Python
|
watchman/integration/eden/test_eden_pathgen.py
|
chadaustin/watchman
|
b01a841f3eca5ebaf5ce4a62e1937c95851f450e
|
[
"MIT"
] | 9,308
|
2015-01-03T13:33:47.000Z
|
2022-03-31T06:45:26.000Z
|
watchman/integration/eden/test_eden_pathgen.py
|
00mjk/watchman
|
eb22a60e74f04065e24eb51ba1dd1342d66f2ad6
|
[
"MIT"
] | 878
|
2015-01-07T15:28:33.000Z
|
2022-03-30T09:30:10.000Z
|
watchman/integration/eden/test_eden_pathgen.py
|
00mjk/watchman
|
eb22a60e74f04065e24eb51ba1dd1342d66f2ad6
|
[
"MIT"
] | 932
|
2015-01-05T08:25:00.000Z
|
2022-03-25T11:11:42.000Z
|
# vim:ts=4:sw=4:et:
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# no unicode literals
from __future__ import absolute_import, division, print_function
import os
import pywatchman
import WatchmanEdenTestCase
def populate(repo):
# We ignore ".hg" here just so some of the tests that list files don't have to
# explicitly filter out the contents of this directory. However, in most situations
# the .hg directory normally should not be ignored.
repo.write_file(".watchmanconfig", '{"ignore_dirs":[".buckd", ".hg"]}')
repo.write_file("hello", "hola\n")
repo.write_file("adir/file", "foo!\n")
repo.write_file("bdir/test.sh", "#!/bin/bash\necho test\n", mode=0o755)
repo.write_file("bdir/noexec.sh", "#!/bin/bash\necho test\n")
repo.write_file("b*ir/star", "star")
repo.write_file("b\\*ir/foo", "foo")
repo.write_file("cdir/sub/file", "")
repo.symlink("slink", "hello")
repo.commit("initial commit.")
class TestEdenPathGenerator(WatchmanEdenTestCase.WatchmanEdenTestCase):
def test_defer_mtime(self):
root = self.makeEdenMount(populate)
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
# Ensure that the mtime field is loaded for rendering.
# The expression doesn't use timestamps but the field list does.
res = self.watchmanCommand(
"query", root, {"glob": ["bdir/noexec.sh"], "fields": ["name", "mtime"]}
)
print(res)
self.assertEqual(res["files"][0]["name"], "bdir/noexec.sh")
self.assertGreater(res["files"][0]["mtime"], 0)
def test_eden_readlink(self):
root = self.makeEdenMount(populate)
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
res = self.watchmanCommand(
"query",
root,
{
"expression": ["allof", ["type", "l"], ["not", ["dirname", ".eden"]]],
"fields": ["name", "symlink_target"],
},
)
print(res)
self.assertEqual(res["files"][0], {"name": "slink", "symlink_target": "hello"})
def test_non_existent_file(self):
root = self.makeEdenMount(populate)
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
clock = self.watchmanCommand("clock", root)["clock"]
# Create the file that we want to remove
self.touchRelative(root, "111")
# We need to observe this file prior to deletion, otherwise
# eden will optimize it out of the results after the delete
res = self.watchmanCommand(
"query", root, {"since": clock, "fields": ["name", "mode"]}
)
clock = res["clock"]
os.unlink(os.path.join(root, "111"))
res = self.watchmanCommand(
"query", root, {"since": clock, "fields": ["name", "mode"]}
)
# Clunky piecemeal checks here because the `mode` value is set
# to something, even though it is deleted, but we cannot portably
# test the values from python land, so we want to check that it
# is non-zero
files = res["files"]
self.assertEqual(len(files), 1)
f = files[0]
self.assertEqual(f["name"], "111")
self.assertGreater(f["mode"], 0)
def test_eden_watch(self):
root = self.makeEdenMount(populate)
# make sure this exists; we should not observe it in any of the results
# that we get back from watchman because it is listed in the ignore_dirs
# config section.
os.mkdir(os.path.join(root, ".buckd"))
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
self.assertFileList(
root,
self.eden_dir_entries
+ [
".eden",
".watchmanconfig",
"adir",
"adir/file",
"bdir",
"bdir/noexec.sh",
"bdir/test.sh",
"b*ir",
"b*ir/star",
"b\\*ir",
"b\\*ir/foo",
"cdir",
"cdir/sub",
"cdir/sub/file",
"hello",
"slink",
],
)
res = self.watchmanCommand(
"query", root, {"expression": ["type", "f"], "fields": ["name"]}
)
self.assertFileListsEqual(
res["files"],
[
".watchmanconfig",
"adir/file",
"bdir/noexec.sh",
"bdir/test.sh",
"b*ir/star",
"b\\*ir/foo",
"cdir/sub/file",
"hello",
],
)
res = self.watchmanCommand(
"query", root, {"expression": ["type", "l"], "fields": ["name"]}
)
self.assertFileListsEqual(res["files"], self.eden_dir_entries + ["slink"])
res = self.watchmanCommand(
"query",
root,
{"expression": ["type", "f"], "relative_root": "bdir", "fields": ["name"]},
)
self.assertFileListsEqual(res["files"], ["noexec.sh", "test.sh"])
res = self.watchmanCommand(
"query",
root,
{"expression": ["type", "f"], "fields": ["name"], "glob": ["*.sh"]},
)
self.assertFileListsEqual([], res["files"])
res = self.watchmanCommand(
"query",
root,
{
"expression": ["type", "f"],
"fields": ["name"],
"relative_root": "bdir",
"glob": ["*.sh"],
},
)
self.assertFileListsEqual(res["files"], ["noexec.sh", "test.sh"])
res = self.watchmanCommand(
"query",
root,
{"expression": ["type", "f"], "fields": ["name"], "glob": ["**/*.sh"]},
)
self.assertFileListsEqual(res["files"], ["bdir/noexec.sh", "bdir/test.sh"])
# glob_includedotfiles should be False by default.
res = self.watchmanCommand(
"query", root, {"fields": ["name"], "glob": ["**/root"]}
)
self.assertFileListsEqual(res["files"], [])
# Verify glob_includedotfiles=True is honored in Eden.
res = self.watchmanCommand(
"query",
root,
{"fields": ["name"], "glob": ["**/root"], "glob_includedotfiles": True},
)
self.assertFileListsEqual(res["files"], [".eden/root"])
res = self.watchmanCommand("query", root, {"path": [""], "fields": ["name"]})
self.assertFileListsEqual(
res["files"],
self.eden_dir_entries
+ [
".eden",
".watchmanconfig",
"adir",
"adir/file",
"b*ir",
"b*ir/star",
"bdir",
"bdir/noexec.sh",
"bdir/test.sh",
"b\\*ir",
"b\\*ir/foo",
"cdir",
"cdir/sub",
"cdir/sub/file",
"hello",
"slink",
],
)
res = self.watchmanCommand(
"query", root, {"path": [{"path": "bdir", "depth": 0}], "fields": ["name"]}
)
self.assertFileListsEqual(res["files"], ["bdir/noexec.sh", "bdir/test.sh"])
with self.assertRaises(pywatchman.CommandError) as ctx:
self.watchmanCommand(
"query",
root,
{"path": [{"path": "bdir", "depth": 1}], "fields": ["name"]},
)
self.assertIn("only supports depth", str(ctx.exception))
res = self.watchmanCommand(
"query", root, {"path": [""], "relative_root": "bdir", "fields": ["name"]}
)
self.assertFileListsEqual(res["files"], ["noexec.sh", "test.sh"])
# Don't wildcard match a name with a * in it
res = self.watchmanCommand(
"query", root, {"path": [{"path": "b*ir", "depth": 0}], "fields": ["name"]}
)
self.assertFileListsEqual(res["files"], ["b*ir/star"])
# Check that the globbing stuff does the right thing
# with a backslash literal here. Unfortunately, watchman
# has a slight blindsport with such a path; we're normalizing
# backslash to a forward slash in the name of portability...
res = self.watchmanCommand(
"query",
root,
{"path": [{"path": "b\\*ir", "depth": 0}], "fields": ["name"]},
)
# ... so the path that gets encoded in the query is
# "b/*ir" and that gets expanded to "b/\*ir/*" when this
# is mapped to a glob and passed to eden. This same
# path query won't yield the correct set of matches
# in the non-eden case either.
self.assertFileListsEqual(res["files"], [])
res = self.watchmanCommand(
"query",
root,
{"expression": ["type", "f"], "fields": ["name"], "suffix": ["sh", "js"]},
)
self.assertFileListsEqual(res["files"], ["bdir/noexec.sh", "bdir/test.sh"])
res = self.watchmanCommand(
"query",
root,
{"expression": ["type", "f"], "fields": ["name"], "suffix": ["s*"]},
)
# With overlapping glob patterns in the same generator, Watchman should
# not return duplicate results.
res = self.watchmanCommand(
"query", root, {"fields": ["name"], "glob": ["bdir/*.sh", "bdir/test*"]}
)
files = res["files"]
self.assertFileListsEqual(
files,
["bdir/noexec.sh", "bdir/test.sh"],
"Overlapping patterns should yield no duplicates",
)
# edenfs had a bug where a globFiles request like ["foo/*", "foo/*/*"] would
# effectively ignore the second pattern. Ensure that bug has been fixed.
res = self.watchmanCommand(
"query", root, {"fields": ["name"], "glob": ["cdir/*", "cdir/*/*"]}
)
self.assertFileListsEqual(res["files"], ["cdir/sub", "cdir/sub/file"])
| 35.855172
| 88
| 0.513464
|
4ecf99c3ba68768287b6fb5dda2748645e03c941
| 2,488
|
py
|
Python
|
utils/deploy/geoapp/makelocalcfg.py
|
d3m-purdue/geoapp
|
9d848688b988ec43cb0fe4f11650bbeb7779220e
|
[
"Apache-2.0"
] | null | null | null |
utils/deploy/geoapp/makelocalcfg.py
|
d3m-purdue/geoapp
|
9d848688b988ec43cb0fe4f11650bbeb7779220e
|
[
"Apache-2.0"
] | null | null | null |
utils/deploy/geoapp/makelocalcfg.py
|
d3m-purdue/geoapp
|
9d848688b988ec43cb0fe4f11650bbeb7779220e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
rootPath = os.environ['KWDEMO_KEY']
data = {
'LIMITED': os.environ.get('LIMITED_DATA', '') == 'true',
'LOCAL': os.environ.get('LIMITED_DATA', '') == 'local',
'FULL': True
}
for key in data:
if data[key] and key != 'FULL':
data['FULL'] = False
cfg = """
[global]
server.socket_port: 8080
tools.proxy.on: True
[database]
uri: "mongodb://%HOSTIP%:27017/%ROOTPATH%"
[server]
# Set to "production" or "development"
mode: "production"
api_root: "../api/v1"
static_root: "girder/static"
[resources]
# The activityLog is where the Draper logging receiver is located. If this
# optional module is not included, this parameter is irrelevant
%FULL%activityLogURI: "http://10.1.93.208"
%LIMITED%activityLogURI:
%LOCAL%activityLogURI:
# Each entry in this section is an available database. The order is by lowest
# "order" value, then alphabetically for ties. Each entry consists of {"name":
# (name shown to the user), "class": (internal database class, such as
# TaxiViaPostgres), "params": (database specific parameters)}
[taxidata]
%FULL%postgresfullg: {"order": 0, "name": "Postgres Full w/ Green", "class": "TaxiViaPostgresSeconds", "params": {"db": "taxifullg", "host": "%HOSTIP%", "user": "taxi", "password": "taxi#1"}}
%LIMITED%postgresfullg:
%LOCAL%postgresfullg: {"order": 0, "name": "Postgres Full w/ Green", "class": "TaxiViaPostgresSeconds", "params": {"db": "taxifullg", "host": "parakon", "user": "taxi", "password": "taxi#1"}}
%FULL%postgres12:
%LIMITED%postgres12: {"order": 2, "name": "Postgres 1/12 Shuffled", "class": "TaxiViaPostgres", "params": {"db": "taxi12r", "host": "%HOSTIP%", "user": "taxi", "password": "taxi#1"}}
%LOCAL%postgres12:
postgresfull:
mongofull:
mongo12r:
mongo12:
mongo:
tangelo:
[instagramdata]
%FULL%postgres: {"order": 0, "name": "Instagram and Twitter", "class": "InstagramViaPostgres", "params": {"db": "instatwitter", "host": "%HOSTIP%", "user": "taxi", "password": "taxi#1"}}
%LIMITED%postgres:
%LOCAL%postgres: {"order": 0, "name": "Instagram and Twitter", "class": "InstagramViaPostgres", "params": {"db": "instatwitter", "host": "parakon", "user": "taxi", "password": "taxi#1"}}
"""
hostip = os.popen("netstat -nr | grep '^0\.0\.0\.0' | awk '{print $2}'").read()
cfg = cfg.replace('%HOSTIP%', hostip.strip()).strip()
cfg = cfg.replace('%ROOTPATH%', rootPath)
for key in data:
cfg = cfg.replace('%' + key + '%', '' if data[key] else '#')
print cfg
| 35.042254
| 191
| 0.65836
|
26cd1b2e58c7eb6ac59ef37eb4b1ce490f4bb9ec
| 6,977
|
py
|
Python
|
assignment/assignment3/python/task1/task1_dev.py
|
maple1eaf/data_mining_inf553
|
fba0c19f46aac5882e103dbe53155e7128a9290f
|
[
"MIT"
] | 1
|
2021-05-04T05:17:57.000Z
|
2021-05-04T05:17:57.000Z
|
assignment/assignment3/python/task1/task1_dev.py
|
maple1eaf/data_mining_inf553
|
fba0c19f46aac5882e103dbe53155e7128a9290f
|
[
"MIT"
] | null | null | null |
assignment/assignment3/python/task1/task1_dev.py
|
maple1eaf/data_mining_inf553
|
fba0c19f46aac5882e103dbe53155e7128a9290f
|
[
"MIT"
] | 1
|
2021-09-24T08:17:23.000Z
|
2021-09-24T08:17:23.000Z
|
"""
spark-submit task1.py <input_file> <output_file>
spark-submit task1_dev.py "file:///Users/markduan/duan/USC_course/USC_APDS/INF553/homework/hw3/data/train_review.json" "./output/task1_dev_output.json"
"""
import sys
import time
import json
import random
from pyspark import SparkConf, SparkContext
# for run on vocareum
# import os
# os.environ['PYSPARK_PYTHON'] = '/usr/local/bin/python3.6'
# os.environ['PYSPARK_DRIVER_PYTHON'] = '/usr/local/bin/python3.6'
NUM_HASHS = 30
LARGE_NUMBER = 999999
NUM_BANDS = NUM_HASHS
JACCARD_SIMILARITY_THRESHOLD = 0.05 # >=
input_file_path = sys.argv[1]
output_file_path = sys.argv[2]
class ProjectionTable():
"""
key-value domains are one to one projection.
"""
def __init__(self, values):
"""
values - iterable
iv - index-value pair
"""
self.values = values
self.length = len(values)
self.indice = list(range(self.length))
self.iv = list(enumerate(values))
def getIndex(self, value):
for i, v in self.iv:
if v == value:
return i
return None
def getValue(self, index):
for i, v in self.iv:
if i == index:
return v
return None
def isPrime(num):
if num == 1:
return False
for divider in range(2, num):
if num % divider == 0:
return False
return True
def smallestPrime(num):
while(1):
num = num + 1
if isPrime(num):
return num
def generateHashs(n, m):
"""
generate a list of hash functions
:n - number of hash functions we want to generate
:m - number of attributes
"""
random.seed(12)
ab = []
for i in range(2*n):
n1 = random.randint(1,m)
if n1 not in ab:
ab.append(n1)
a_list = ab[:n]
b_list = ab[n:]
m_new = smallestPrime(m)
print('ab list length:', len(ab))
print('the prime number m_new:', m_new)
def generateHash(i):
a = a_list[i]
b = b_list[i]
def hashfunc(x):
return (a * hash(x) + b) % m_new
return hashfunc
return [generateHash(i) for i in range(n)]
def generateHashForLSH(r):
random.seed(8)
l_ = []
for i in range(r):
a = random.randint(1, 10000)
if a not in l_:
l_.append(a)
def h(l):
# l - list of integers
l_len = len(l)
sum_ = 0
for i in range(l_len):
sum_ = sum_ + l_[i] * l[i]
return sum_ % 500000
return h
def updateSignature(s_cur, s_new, length):
for i in range(length):
if s_new[i] < s_cur[i]:
s_cur[i] = s_new[i]
def minHash(x, hashs):
b_id = x[0]
u_ids = x[1]
signature = [LARGE_NUMBER] * NUM_HASHS
for u in u_ids:
s_ = [hashs[i](u) for i in range(NUM_HASHS)]
updateSignature(signature, s_, NUM_HASHS)
res = (b_id, signature)
return res
# def minHash2(x, hashs):
# b_id = x[0]
# u_ids = x[1]
# signature = []
# for hafunc in hashs:
# cur_ = LARGE_NUMBER
# for u in u_ids:
# hash_value = hafunc(u)
# if hash_value < cur_:
# cur_ = hash_value
# signature.append(cur_)
# return(b_id, signature)
def LSH(x, b, r, hash_lsh):
b_id = x[0]
signature = x[1]
ress = []
for i in range(b):
v_hash = hash_lsh(signature[i*r:(i+1)*r])
res = ((v_hash, i), b_id)
ress.append(res)
return ress
def generatePairs(l):
l_len = len(l)
res_l = [(l[i],l[j]) for i in range(l_len) for j in range(i+1, l_len)]
return res_l
def jaccardSimilarity(l1, l2):
# items in l1 are unique to each other, so does l2
l1_len = len(l1)
l2_len = len(l2)
intersect_set = set(l1).intersection(set(l2))
inter_len = len(intersect_set)
union_len = l1_len + l2_len - inter_len
js = inter_len / union_len
return js
def getData(sc):
"""
:return data - [(b_index, u_index), ...]
"""
raw_data = sc.textFile(input_file_path) \
.map(json.loads) \
.map(lambda r: (r['business_id'], r['user_id'])) \
.distinct() \
.cache()
# count how many distinct business_id
b_id_len = raw_data.map(lambda x: x[0]).distinct().count()
# ount how many distinct user_id
u_id_len = raw_data.map(lambda x: x[1]).distinct().count()
# data - [(b_id, u_id), ...]
data = raw_data.groupByKey().cache()
return data, b_id_len, u_id_len
def process(data, hashs, b, r, hash_lsh):
candidates = data.map(lambda x: minHash(x, hashs)) \
.flatMap(lambda x: LSH(x, b, r, hash_lsh)) \
.groupByKey() \
.filter(lambda x: len(x[1]) > 1) \
.map(lambda x: list(x[1])) \
.flatMap(lambda x: generatePairs(x)) \
.distinct() \
.collect()
t_candi = time.time()
print('done candidates. time:%fs.' % (t_candi - t_a))
# print('candidates:', candidates)
print('length of candidates:', len(candidates))
return candidates
def process3(data, candidates):
# no spark
t_p4 = time.time()
data_groupby_bid = data.map(lambda x: (x[0], list(x[1]))).collect()
b_dict = {}
for (k,v) in data_groupby_bid:
b_dict[k] = v
res_valid_pairs = []
for (b1, b2) in candidates:
js_ = jaccardSimilarity(b_dict[b1], b_dict[b2])
if js_ >= JACCARD_SIMILARITY_THRESHOLD:
res_valid_pairs.append(((b1, b2), js_))
print('---number of valid pairs:---', len(res_valid_pairs))
t_p5 = time.time()
print('---done count jaccard similarity. time:%fs.---' % (t_p5 - t_p4))
return res_valid_pairs
def process4(true_positives):
with open(output_file_path, 'w', encoding='utf-8') as fp:
for pair in true_positives:
b1 = pair[0][0]
b2 = pair[0][1]
sim = pair[1]
r_json = json.dumps({'b1': b1, 'b2': b2, 'sim': sim})
fp.write(r_json)
fp.write('\n')
if __name__ == "__main__":
t_a = time.time()
# define spark env
conf = SparkConf() \
.setAppName("task1") \
.setMaster("local[*]")
sc = SparkContext(conf=conf)
# step 1: get data and the rename tables
data, b_id_len, u_id_len = getData(sc)
# generate a list of hash functions for Min-Hash
hashs = generateHashs(NUM_HASHS, u_id_len)
b = NUM_BANDS
r = int(NUM_HASHS / NUM_BANDS)
hash_for_lsh = generateHashForLSH(r)
# step 2: implement Min-Hash to transfer user_id to signature
candidates = process(data, hashs, b, r, hash_for_lsh)
# step 4: verify candidates using Jaccard similarity
true_positives = process3(data, candidates)
# step 5: output
process4(true_positives)
# with open(output_file_path, 'w', encoding='utf-8') as fp:
# json.dump(js_pairs, fp)
t_b = time.time()
print('time consume: %fs' % (t_b-t_a))
| 26.033582
| 151
| 0.578902
|
8a5628670ca908b9dfde19eaedbbd15b104e061f
| 4,752
|
py
|
Python
|
conpaas-services/src/conpaas/core/clouds/base.py
|
bopopescu/conpaas-1
|
cea3c02f499a729464697de7cf98c2041febc0ab
|
[
"BSD-3-Clause"
] | 1
|
2015-08-03T03:57:06.000Z
|
2015-08-03T03:57:06.000Z
|
conpaas-services/src/conpaas/core/clouds/base.py
|
bopopescu/conpaas-1
|
cea3c02f499a729464697de7cf98c2041febc0ab
|
[
"BSD-3-Clause"
] | null | null | null |
conpaas-services/src/conpaas/core/clouds/base.py
|
bopopescu/conpaas-1
|
cea3c02f499a729464697de7cf98c2041febc0ab
|
[
"BSD-3-Clause"
] | 2
|
2017-05-27T09:07:53.000Z
|
2020-07-26T03:15:55.000Z
|
# -*- coding: utf-8 -*-
"""
conpaas.core.clouds.base
========================
ConPaaS core: cloud-independent IaaS code.
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
from conpaas.core.node import ServiceNode
from conpaas.core.log import create_logger
class Cloud:
''' Abstract Cloud '''
def __init__(self, cloud_name):
#TODO: it shouldn't be cloud_name == config file section name
self.cloud_name = cloud_name
self.connected = False
self.driver = None
self.cx_template = None
self.cx = None
self.logger = create_logger(__name__)
def get_cloud_name(self):
return self.cloud_name
def _check_cloud_params(self, iaas_config, cloud_params=[]):
"""Check for missing or empty parameters in iaas_config"""
error_template = '%s config param %s for %s'
for field in cloud_params:
if not iaas_config.has_option(self.cloud_name, field):
raise Exception('Missing ' + error_template % (
self.get_cloud_type(), field, self.cloud_name
))
if iaas_config.get(self.cloud_name, field) == '':
raise Exception('Empty ' + error_template % (
self.get_cloud_type(), field, self.cloud_name
))
return None
def _connect(self):
'''
_connect is the method used to set the driver and connect to the cloud
'''
raise NotImplementedError(
'_connect not implemented for this cloud driver')
def get_cloud_type(self):
raise NotImplementedError(
'get_cloud_type not implemented for this cloud driver')
def get_context_template(self):
return self.cx_template
def set_context_template(self, cx):
"""
Set the context template (i.e. without replacing anything in it)
"""
self.cx_template = cx
self.cx = cx
def config(self, config_params, context):
raise NotImplementedError(
'config not implemented for this cloud driver')
def list_vms(self, has_private_ip=True):
'''
lists the service nodes in the cloud instances
@return vms: List[ServiceNode]
'''
self.logger.debug('list_vms(has_private_ip=%s)' % has_private_ip)
if self.connected is False:
self._connect()
return [serviceNode for serviceNode in
self._create_service_nodes(self.driver.list_nodes())]
def new_instances(self, count, name='conpaas', inst_type=None):
raise NotImplementedError(
'new_instances not implemented for this cloud driver')
def _create_service_nodes(self, instances, has_private_ip=True):
'''
creates a list of ServiceNode
@param instances: List of nodes provide by the driver or a single node
@type instances: L{libcloud.compute.Node} or C{libcloud.compute.Node}
@param has_private_ip: some instances only need the public ip
@type has_private_ip: C{bool}
'''
if type(instances) is list:
return [ self.__create_one_service_node(node, has_private_ip)
for node in instances ]
return self.__create_one_service_node(instances, has_private_ip)
def __create_one_service_node(self, instance, has_private_ip=True):
'''
creates a single ServiceNode
@param instance: node provided by the driver
@type instance: C{libcloud.compute.Node}
@param has_private_ip: some instances only need the public ip
@type has_private_ip: C{bool}
'''
ip, private_ip = self.__get_ips(instance, has_private_ip)
return ServiceNode(instance.id, ip, private_ip, self.cloud_name)
def __get_ips(self, instance, has_private_ip):
if instance.public_ips:
ip = instance.public_ips[0]
else:
ip = ''
if has_private_ip:
if instance.private_ips:
private_ip = instance.private_ips[0]
else:
private_ip = ''
else:
private_ip = ip
if hasattr(ip, 'address'):
ip = ip.address
if hasattr(private_ip, 'address'):
private_ip = private_ip.address
return ip, private_ip
def kill_instance(self, node):
'''Kill a VM instance.
@param node: A ServiceNode instance, where node.id is the
vm_id
'''
self.logger.debug('kill_instance(node=%s)' % node)
if self.connected is False:
self._connect()
return self.driver.destroy_node(node.as_libcloud_node())
| 30.857143
| 79
| 0.608375
|
3f6dba3bcd5c4ebc2983a38da5d37c7a236f7bd0
| 20,531
|
py
|
Python
|
servers/unity/src/win/tango_pygame.py
|
arpitgogia/mars_city
|
30cacd80487a8c2354bbc15b4fad211ed1cb4f9d
|
[
"BSD-2-Clause-FreeBSD"
] | 25
|
2016-07-20T04:49:14.000Z
|
2021-08-25T09:05:04.000Z
|
servers/unity/src/win/tango_pygame.py
|
arpitgogia/mars_city
|
30cacd80487a8c2354bbc15b4fad211ed1cb4f9d
|
[
"BSD-2-Clause-FreeBSD"
] | 16
|
2016-12-27T08:30:27.000Z
|
2018-06-18T08:51:44.000Z
|
servers/unity/src/win/tango_pygame.py
|
arpitgogia/mars_city
|
30cacd80487a8c2354bbc15b4fad211ed1cb4f9d
|
[
"BSD-2-Clause-FreeBSD"
] | 49
|
2016-07-20T13:08:27.000Z
|
2020-06-02T18:26:12.000Z
|
import argparse
import ctypes
import sys
import PyTango
import numpy
import pygame
from PyTango.server import Device, DeviceMeta, attribute
from pykinect2 import *
from pykinect2 import PyKinectRuntime
from pykinect2 import PyKinectV2
import _ctypes
import threading
import time
KINECT_FPS = 30
if sys.hexversion >= 0x03000000:
pass
else:
pass
# colors for drawing different bodies
SKELETON_COLORS = [pygame.color.THECOLORS["red"],
pygame.color.THECOLORS["blue"],
pygame.color.THECOLORS["green"],
pygame.color.THECOLORS["orange"],
pygame.color.THECOLORS["purple"],
pygame.color.THECOLORS["yellow"],
pygame.color.THECOLORS["violet"]]
class PyTracker(Device):
"""
PyTango class which is responsible for sending skeleton array.
"""
__metaclass__ = DeviceMeta
POLLING = 30
# the Tracker object, from which get skeletal data
skletonobj = None
def init_device(self):
Device.init_device(self)
print "init tango"
self.info_stream('In Python init_device method')
self.set_state(PyTango.DevState.ON)
self.skleton = PyTango.IMAGE
def set_skleton(self, skletonobj):
self.skletonobj = skletonobj
# TODO!
# skleton = attribute(label="skleton",
# dtype=[[numpy.float64,
# PyTango.IMAGE,
# PyTango.READ, 100, 100], ],
# polling_period=POLLING,
# max_dim_x=1024,
# max_dim_y=1024)
skleton = attribute(label="skleton",
dtype=PyTango.IMAGE,
polling_period=POLLING,
max_dim_x=1024,
max_dim_y=1024)
# skleton = attribute(label="skleton",
# dtype=[[bytearray,numpy.int32],],
# polling_period=POLLING,
# max_dim_x=1024,
# max_dim_y=1024)
def aquire_skelton_status(self):
if self.skletonobj is None:
return False
else:
return True
def read_skleton(self):
# print self.aquire_skelton_status()
if self.aquire_skelton_status():
try:
# print "inside try statement"
# print self.skletonobj
self.bodies = self.skletonobj.get_bodies()
# print self.bodies
# print "after bodies"
for i in range(0, self._kinect.max_body_count):
body = self.bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
# print joints
# convert joint coordinates to color space
joint_points = self.skletonobj._kinect.body_joints_to_color_space(joints)
self.coord_array = self.skletonobj.save_body_coodrinates(joints, joint_points)
print self.coord_array
self.push_change_event("skleton", self.coord_array)
except:
print("error reading skleton")
return self.coord_array
else:
print("no skeleton found")
class Tracker:
"""
This class has all the functions required to get data from Kinect and post them on a Pygame and it also initiates
Tango server.
"""
device_name = None
def get_bodies(self):
"""
Gets skeletons from kinect
:return skeleton array:
"""
if self._bodies is not None:
return self._bodies
def set_tracker_in_device(self):
util = PyTango.Util.instance()
device = util.get_device_by_name("C3/unity/" + self.device_name)
device.set_skleton(self)
def start_tango(self):
"""
Starts the tango server. args should be equal to filename
:return:
"""
PyTango.server.run((PyTracker,),
post_init_callback=self.set_tracker_in_device,
args=['tango_pygame', self.device_name])
def draw_body_bone(self, joints, jointPoints, color, joint0, joint1):
"""
Takes a start and an end joint to make a straight joint.
:param joints: address of joints
:param jointPoints: joints in colour space
:param color: colors to be used to plot
:param joint0: starting joint
:param joint1: Ending Joint
:return:
"""
joint0State = joints[joint0].TrackingState
joint1State = joints[joint1].TrackingState
# both joints are not tracked
if (joint0State == PyKinectV2.TrackingState_NotTracked) or (joint1State == PyKinectV2.TrackingState_NotTracked):
return
# both joints are not *really* tracked
if (joint0State == PyKinectV2.TrackingState_Inferred) and (joint1State == PyKinectV2.TrackingState_Inferred):
return
# ok, at least one is good
start = (jointPoints[joint0].x, jointPoints[joint0].y)
end = (jointPoints[joint1].x, jointPoints[joint1].y)
try:
# print start, " ", end
pygame.draw.line(self._frame_surface, color, start, end, 8)
except: # need to catch it due to possible invalid positions (with inf)
pass
def skleton2numpyarray(self, skleton, strlength):
"""
Converts a dictionary(skeleton) to Numpy array
:param skleton: Input dictionary data type
:param strlength: length of the string for value of the key
:return: numpy array
"""
names = ['id', 'data']
len1 = "S" + str(strlength) # string length accepted
formats = ['S9', len1]
dtype = dict(names=names, formats=formats)
array = numpy.array(skleton.items(), dtype=dtype)
return array
def get_coordinates(self, joints, jointPoints, color, start, end):
"""
:param joints: Joint address
:param jointPoints: joint in colour space
:param color: color of skeleton
:param start: start joint
:param end: end joint
:return: list containing tuple of coordinates os tart and end
"""
final_coordinates = []
joint0State = joints[start].TrackingState
joint1State = joints[end].TrackingState
# both joints are not tracked
if (joint0State == PyKinectV2.TrackingState_NotTracked) or (joint1State == PyKinectV2.TrackingState_NotTracked):
return
# both joints are not *really* tracked
if (joint0State == PyKinectV2.TrackingState_Inferred) and (joint1State == PyKinectV2.TrackingState_Inferred):
return
# ok, at least one is good
startcordinates = (jointPoints[start].x, jointPoints[start].y)
endcoordinates = (jointPoints[end].x, jointPoints[end].y)
try:
# print start, " ", end
final_coordinates.append(startcordinates)
final_coordinates.append(endcoordinates)
return final_coordinates
except: # need to catch it due to possible invalid positions (with inf)
pass
def save_body_coodrinates(self, joints, jointPoints):
# Torso
torso = []
torso.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_Head, PyKinectV2.JointType_Neck))
torso.append(
self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_Neck, PyKinectV2.JointType_SpineShoulder))
torso.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_SpineShoulder,
PyKinectV2.JointType_SpineMid))
torso.append(
self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_SpineMid, PyKinectV2.JointType_SpineBase))
torso.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_SpineShoulder,
PyKinectV2.JointType_ShoulderRight))
torso.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_SpineShoulder,
PyKinectV2.JointType_ShoulderLeft))
torso.append(
self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipRight))
torso.append(
self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipLeft))
# Right Arm
right_arm = []
right_arm.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_ShoulderRight,
PyKinectV2.JointType_ElbowRight))
right_arm.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_ElbowRight,
PyKinectV2.JointType_WristRight))
right_arm.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_WristRight,
PyKinectV2.JointType_HandRight))
right_arm.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_HandRight,
PyKinectV2.JointType_HandTipRight))
right_arm.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_WristRight,
PyKinectV2.JointType_ThumbRight))
# Left Arm
left_arm = []
left_arm.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_ShoulderLeft,
PyKinectV2.JointType_ElbowLeft))
left_arm.append(
self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_ElbowLeft,
PyKinectV2.JointType_WristLeft))
left_arm.append(
self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_HandLeft))
left_arm.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_HandLeft,
PyKinectV2.JointType_HandTipLeft))
left_arm.append(
self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_WristLeft,
PyKinectV2.JointType_ThumbLeft))
# Right Leg
right_leg = []
right_leg.append(
self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_HipRight, PyKinectV2.JointType_KneeRight))
right_leg.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_KneeRight,
PyKinectV2.JointType_AnkleRight))
right_leg.append(self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_AnkleRight,
PyKinectV2.JointType_FootRight))
# Left Leg
left_leg = []
left_leg.append(
self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_HipLeft, PyKinectV2.JointType_KneeLeft))
left_leg.append(
self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_KneeLeft, PyKinectV2.JointType_AnkleLeft))
left_leg.append(
self.get_coordinates(joints, jointPoints, PyKinectV2.JointType_AnkleLeft, PyKinectV2.JointType_FootLeft))
skeleton = {"torso": str(torso), "right arm": str(right_arm), "left arm": str(left_arm),
"right leg": str(right_leg),
"left leg": str(left_leg)}
strlength = len(torso) + len(right_arm) + len(left_arm) + len(right_leg) + len(left_leg)
skeletonnd = self.skleton2numpyarray(skeleton, strlength)
return skeletonnd
def draw_body(self, joints, jointPoints, color):
"""
:param joints: Joints
:param jointPoints: joint point in color space
:param color: color of skeleton
:return: none
"""
# Torso
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_Head, PyKinectV2.JointType_Neck)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_Neck, PyKinectV2.JointType_SpineShoulder)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder,
PyKinectV2.JointType_SpineMid)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineMid, PyKinectV2.JointType_SpineBase)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder,
PyKinectV2.JointType_ShoulderRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder,
PyKinectV2.JointType_ShoulderLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipLeft)
# Right Arm
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ShoulderRight,
PyKinectV2.JointType_ElbowRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ElbowRight,
PyKinectV2.JointType_WristRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_HandRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HandRight,
PyKinectV2.JointType_HandTipRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristRight,
PyKinectV2.JointType_ThumbRight)
# Left Arm
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ShoulderLeft,
PyKinectV2.JointType_ElbowLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ElbowLeft, PyKinectV2.JointType_WristLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_HandLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HandLeft, PyKinectV2.JointType_HandTipLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_ThumbLeft)
# Right Leg
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HipRight, PyKinectV2.JointType_KneeRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_KneeRight, PyKinectV2.JointType_AnkleRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_AnkleRight, PyKinectV2.JointType_FootRight)
# Left Leg
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HipLeft, PyKinectV2.JointType_KneeLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_KneeLeft, PyKinectV2.JointType_AnkleLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_AnkleLeft, PyKinectV2.JointType_FootLeft)
def draw_color_frame(self, frame, target_surface):
"""
utility function for pygame to write the color frame to the print buffer of pygame
:param frame:
:param target_surface:
:return:
"""
target_surface.lock()
address = self._kinect.surface_as_array(target_surface.get_buffer())
ctypes.memmove(address, frame.ctypes.data, frame.size)
del address
target_surface.unlock()
def __init__(self, device_name):
self.device_name = device_name
main_thread = threading.Thread(target=self.run())
main_thread.daemon = True
main_thread.run()
time.sleep(0.1)
print "starting tango"
self.start_tango()
def run(self):
pygame.init()
self._clock = pygame.time.Clock()
# Set the width and height of the screen [width, height]
self._infoObject = pygame.display.Info()
self._screen = pygame.display.set_mode((self._infoObject.current_w >> 1, self._infoObject.current_h >> 1),
pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)
pygame.display.set_caption("Kinect for Windows v2 Body Game")
self._done = False
self._clock = pygame.time.Clock()
# Kinect runtime object, we want only color and body frames
self._kinect = PyKinectRuntime.PyKinectRuntime(
PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body)
# back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
self._frame_surface = pygame.Surface(
(self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32)
# here we will store skeleton data
self._bodies = None
# -------- Main Program Loop -----------
while not self._done:
# --- Main event loop
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
self._done = True # Flag that we are done so we exit this loop
elif event.type == pygame.VIDEORESIZE: # window resized
self._screen = pygame.display.set_mode(event.dict['size'],
pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)
# --- Woohoo! We've got a color frame! Let's fill out back buffer surface with frame's data
if self._kinect.has_new_color_frame():
frame = self._kinect.get_last_color_frame()
self.draw_color_frame(frame, self._frame_surface)
frame = None
# We have a body frame, so can get skeletons
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
# --- draw skeletons to _frame_surface
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
# convert joint coordinates to color space
joint_points = self._kinect.body_joints_to_color_space(joints)
self.draw_body(joints, joint_points, SKELETON_COLORS[i])
# self.save_body_coodrinates(joints,joint_points)
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface, (self._screen.get_width(), target_height))
self._screen.blit(surface_to_draw, (0, 0))
surface_to_draw = None
pygame.display.update()
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
self._clock.tick(60)
# Close our Kinect sensor, close the window and quit.
self._kinect.close()
pygame.quit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Enter the device name.')
parser.add_argument('device',
choices=['eras1'],
help='the device where this data will be published'
)
# # parse arguments
try:
args = parser.parse_args()
except:
pass
else:
print args.device, "hello"
t = Tracker(args.device)
| 45.222467
| 130
| 0.612683
|
a9bb58fc9d1e7ccb23d15e8200e8d09450955908
| 8,290
|
py
|
Python
|
torchvision/models/shufflenetv2.py
|
ZJUGuoShuai/vision
|
a9940fe4b2b63bd82a2f853616e00fd0bd112f9a
|
[
"BSD-3-Clause"
] | 3
|
2021-10-30T10:13:40.000Z
|
2021-12-12T10:26:14.000Z
|
torchvision/models/shufflenetv2.py
|
ZJUGuoShuai/vision
|
a9940fe4b2b63bd82a2f853616e00fd0bd112f9a
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/models/shufflenetv2.py
|
ZJUGuoShuai/vision
|
a9940fe4b2b63bd82a2f853616e00fd0bd112f9a
|
[
"BSD-3-Clause"
] | 1
|
2020-01-10T12:50:14.000Z
|
2020-01-10T12:50:14.000Z
|
from typing import Callable, Any, List
import torch
import torch.nn as nn
from torch import Tensor
from .._internally_replaced_utils import load_state_dict_from_url
__all__ = ["ShuffleNetV2", "shufflenet_v2_x0_5", "shufflenet_v2_x1_0", "shufflenet_v2_x1_5", "shufflenet_v2_x2_0"]
model_urls = {
"shufflenetv2_x0.5": "https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth",
"shufflenetv2_x1.0": "https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth",
"shufflenetv2_x1.5": None,
"shufflenetv2_x2.0": None,
}
def channel_shuffle(x: Tensor, groups: int) -> Tensor:
batchsize, num_channels, height, width = x.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp: int, oup: int, stride: int) -> None:
super(InvertedResidual, self).__init__()
if not (1 <= stride <= 3):
raise ValueError("illegal stride value")
self.stride = stride
branch_features = oup // 2
assert (self.stride != 1) or (inp == branch_features << 1)
if self.stride > 1:
self.branch1 = nn.Sequential(
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(inp),
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
else:
self.branch1 = nn.Sequential()
self.branch2 = nn.Sequential(
nn.Conv2d(
inp if (self.stride > 1) else branch_features,
branch_features,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(branch_features),
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
@staticmethod
def depthwise_conv(
i: int, o: int, kernel_size: int, stride: int = 1, padding: int = 0, bias: bool = False
) -> nn.Conv2d:
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
def forward(self, x: Tensor) -> Tensor:
if self.stride == 1:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out
class ShuffleNetV2(nn.Module):
def __init__(
self,
stages_repeats: List[int],
stages_out_channels: List[int],
num_classes: int = 1000,
inverted_residual: Callable[..., nn.Module] = InvertedResidual,
) -> None:
super(ShuffleNetV2, self).__init__()
if len(stages_repeats) != 3:
raise ValueError("expected stages_repeats as list of 3 positive ints")
if len(stages_out_channels) != 5:
raise ValueError("expected stages_out_channels as list of 5 positive ints")
self._stage_out_channels = stages_out_channels
input_channels = 3
output_channels = self._stage_out_channels[0]
self.conv1 = nn.Sequential(
nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True),
)
input_channels = output_channels
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# Static annotations for mypy
self.stage2: nn.Sequential
self.stage3: nn.Sequential
self.stage4: nn.Sequential
stage_names = ["stage{}".format(i) for i in [2, 3, 4]]
for name, repeats, output_channels in zip(stage_names, stages_repeats, self._stage_out_channels[1:]):
seq = [inverted_residual(input_channels, output_channels, 2)]
for i in range(repeats - 1):
seq.append(inverted_residual(output_channels, output_channels, 1))
setattr(self, name, nn.Sequential(*seq))
input_channels = output_channels
output_channels = self._stage_out_channels[-1]
self.conv5 = nn.Sequential(
nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True),
)
self.fc = nn.Linear(output_channels, num_classes)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.maxpool(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.conv5(x)
x = x.mean([2, 3]) # globalpool
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _shufflenetv2(arch: str, pretrained: bool, progress: bool, *args: Any, **kwargs: Any) -> ShuffleNetV2:
model = ShuffleNetV2(*args, **kwargs)
if pretrained:
model_url = model_urls[arch]
if model_url is None:
raise NotImplementedError("pretrained {} is not supported as of now".format(arch))
else:
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
return model
def shufflenet_v2_x0_5(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 with 0.5x output channels, as described in
`"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
<https://arxiv.org/abs/1807.11164>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _shufflenetv2("shufflenetv2_x0.5", pretrained, progress, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs)
def shufflenet_v2_x1_0(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 with 1.0x output channels, as described in
`"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
<https://arxiv.org/abs/1807.11164>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _shufflenetv2("shufflenetv2_x1.0", pretrained, progress, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs)
def shufflenet_v2_x1_5(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 with 1.5x output channels, as described in
`"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
<https://arxiv.org/abs/1807.11164>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _shufflenetv2("shufflenetv2_x1.5", pretrained, progress, [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs)
def shufflenet_v2_x2_0(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ShuffleNetV2:
"""
Constructs a ShuffleNetV2 with 2.0x output channels, as described in
`"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
<https://arxiv.org/abs/1807.11164>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _shufflenetv2("shufflenetv2_x2.0", pretrained, progress, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs)
| 37.511312
| 115
| 0.634741
|
55b6688029abd4e4424759b411ebdffcf832abb5
| 2,189
|
py
|
Python
|
test.py
|
e-rus/bumps
|
080ff80f939f3edf54a1fdc425e3f333d42ee8c4
|
[
"MIT"
] | null | null | null |
test.py
|
e-rus/bumps
|
080ff80f939f3edf54a1fdc425e3f333d42ee8c4
|
[
"MIT"
] | null | null | null |
test.py
|
e-rus/bumps
|
080ff80f939f3edf54a1fdc425e3f333d42ee8c4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Run tests for bumps.
Usage:
./test.py
- run all tests
./test.py --with-coverage
- run all tests with coverage report
"""
import os
import sys
import subprocess
from glob import glob
import nose
from distutils.util import get_platform
platform = '.%s-%s' % (get_platform(), sys.version[:3])
# Make sure that we have a private version of mplconfig
mplconfig = os.path.join(os.getcwd(), '.mplconfig')
os.environ['MPLCONFIGDIR'] = mplconfig
if not os.path.exists(mplconfig):
os.mkdir(mplconfig)
import matplotlib
matplotlib.use('Agg')
def addpath(path):
"""
Add a directory to the python path environment, and to the PYTHONPATH
environment variable for subprocesses.
"""
path = os.path.abspath(path)
if 'PYTHONPATH' in os.environ:
PYTHONPATH = path + os.pathsep + os.environ['PYTHONPATH']
else:
PYTHONPATH = path
os.environ['PYTHONPATH'] = PYTHONPATH
sys.path.insert(0, path)
sys.dont_write_bytecode = True
sys.stderr = sys.stdout # Doctest doesn't see sys.stderr
#import numpy as np; np.seterr(all='raise')
# Check that we are running from the root.
root = os.path.abspath(os.getcwd())
assert os.path.exists(
os.path.join(root, 'bumps', 'cli.py')), "Not in bumps root"
addpath(root)
# Set the nosetest args
nose_args = ['-v', '--all-modules',
'-m(^_?test_|_test$|^test$)',
'--with-doctest', '--doctest-extension=.rst',
'--doctest-options=+ELLIPSIS,+NORMALIZE_WHITESPACE',
'--cover-package=bumps',
'-e.*amqp_map.*',
]
# exclude gui subdirectory if wx is not available
try:
import wx
except ImportError:
nose_args.append('-egui')
nose_args += sys.argv[1:] # allow coverage arguments
# Add targets
nose_args += [os.path.join(root,'bumps')]
nose_args += glob('doc/g*/*.rst')
nose_args += glob('doc/_examples/*/*.rst')
print("nosetests " + " ".join(nose_args))
if not nose.run(argv=nose_args):
sys.exit(1)
# Run the command line version of bumps which should display help text.
# for p in ['bin/bumps']:
# ret = subprocess.call((sys.executable, p), shell=False)
# if ret != 0: sys.exit()
| 25.453488
| 73
| 0.658748
|
b8f8619f73414e37cb8dc6621819db6b442dcfa3
| 18,296
|
py
|
Python
|
mvlearn/semi_supervised/ctclassifier.py
|
Gauravsinghal09/mvlearn
|
81092120595fadfc3d1f624d0a772594d8bb1578
|
[
"Apache-2.0"
] | 1
|
2020-12-29T15:41:29.000Z
|
2020-12-29T15:41:29.000Z
|
mvlearn/semi_supervised/ctclassifier.py
|
Gauravsinghal09/mvlearn
|
81092120595fadfc3d1f624d0a772594d8bb1578
|
[
"Apache-2.0"
] | null | null | null |
mvlearn/semi_supervised/ctclassifier.py
|
Gauravsinghal09/mvlearn
|
81092120595fadfc3d1f624d0a772594d8bb1578
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements multi-view co-training classification for 2-view data.
from .base import BaseCoTrainEstimator
import numpy as np
from sklearn.naive_bayes import GaussianNB
from ..utils.utils import check_Xs, check_Xs_y_nan_allowed
class CTClassifier(BaseCoTrainEstimator):
r"""
This class implements the co-training classifier for supervised and
semi-supervised learning with the framework as described in [#1CTC]_.
The best use case is when the 2 views of input data are sufficiently
distinct and independent as detailed in [#1CTC]_. However, this can
also be successful when a single matrix of input data is given as
both views and two estimators are chosen which are quite different.
[#2CTC]_. See the examples below.
In the semi-supervised case, performance can vary greatly, so using
a separate validation set or cross validation procedure is
recommended to ensure the classifier has fit well.
Parameters
----------
estimator1 : classifier object, (default=sklearn GaussianNB)
The classifier object which will be trained on view 1 of the data.
This classifier should support the predict_proba() function so that
classification probabilities can be computed and co-training can be
performed effectively.
estimator2 : classifier object, (default=sklearn GaussianNB)
The classifier object which will be trained on view 2 of the data.
Does not need to be of the same type as ``estimator1``, but should
support predict_proba().
p : int, optional (default=None)
The number of positive classifications from the unlabeled_pool
training set which will be given a positive "label". If None, the
default is the floor of the ratio of positive to negative examples
in the labeled training data (at least 1). If only one of ``p`` or
``n`` is not None, the other will be set to be the same. When the
labels are 0 or 1, positive is defined as 1, and in general, positive
is the larger label.
n : int, optional (default=None)
The number of negative classifications from the unlabeled_pool
training set which will be given a negative "label". If None, the
default is the floor of the ratio of positive to negative examples
in the labeled training data (at least 1). If only one of ``p`` or
``n`` is not None, the other will be set to be the same. When the
labels are 0 or 1, negative is defined as 0, and in general, negative
is the smaller label.
unlabeled_pool_size : int, optional (default=75)
The number of unlabeled_pool samples which will be kept in a
separate pool for classification and selection by the updated
classifier at each training iteration.
num_iter : int, optional (default=50)
The maximum number of training iterations to run.
random_state : int (default=None)
The starting random seed for fit() and class operations, passed to
numpy.random.seed().
Attributes
----------
estimator1_ : classifier object
The classifier used on view 1.
estimator2_ : classifier object
The classifier used on view 2.
class_name_: string
The name of the class.
p_ : int, optional (default=None)
The number of positive classifications from the unlabeled_pool
training set which will be given a positive "label" each round.
n_ : int, optional (default=None)
The number of negative classifications from the unlabeled_pool
training set which will be given a negative "label" each round.
classes_ : array-like of shape (n_classes,)
Unique class labels.
Examples
--------
>>> # Supervised learning of single-view data with 2 distinct estimators
>>> from mvlearn.semi_supervised import CTClassifier
>>> from mvlearn.datasets import load_UCImultifeature
>>> import numpy as np
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.model_selection import train_test_split
>>> data, labels = load_UCImultifeature(select_labeled=[0,1])
>>> X1 = data[0] # Only using the first view
>>> X1_train, X1_test, l_train, l_test = train_test_split(X1, labels)
>>> # Supervised learning with a single view of data and 2 estimator types
>>> estimator1 = GaussianNB()
>>> estimator2 = RandomForestClassifier()
>>> ctc = CTClassifier(estimator1, estimator2, random_state=1)
>>> # Use the same matrix for each view
>>> ctc = ctc.fit([X1_train, X1_train], l_train)
>>> preds = ctc.predict([X1_test, X1_test])
>>> print("Accuracy: ", sum(preds==l_test) / len(preds))
Accuracy: 0.97
Notes
-----
Multi-view co-training is most helpful for tasks in semi-supervised
learning where each view offers unique information not seen in the
other. As is shown in the example notebooks for using this algorithm,
multi-view co-training can provide good classification results even
when number of unlabeled samples far exceeds the number of labeled
samples. This classifier uses 2 classifiers which work individually
on each view but which share information and thus result in improved
performance over looking at the views completely separately or even
when concatenating the views to get more features in a single-view
setting. The classifier can be initialized with or without the
classifiers desired for each view being specified, but if the
classifier for a certain view is specified, then it must support a
predict_proba() method in order to give a sense of the most likely labels
for different examples. This is because the algorithm must be able to
determine which of the training samples it is most confident about during
training epochs. The algorithm, as first proposed by Blum and Mitchell,
is described in detail below.
*Algorithm*
Given:
* a set *L* of labeled training samples (with 2 views)
* a set *U* of unlabeled samples (with 2 views)
Create a pool *U'* of examples by choosing *u* examples at random
from *U*
Loop for *k* iterations
* Use *L* to train a classifier *h1* (``estimator1``) that considers
only the view 1 portion of the data (i.e. Xs[0])
* Use *L* to train a classifier *h2* (``estimator2``) that considers
only the view 2 portion of the data (i.e. Xs[1])
* Allow *h1* to label *p* (``self.p_``) positive and *n* (``self.n_``)
negative samples from view 1 of *U'*
* Allow *h2* to label *p* positive and *n* negative samples
from view 2 of *U'*
* Add these self-labeled samples to *L*
* Randomly take 2*p* + 2*n* samples from *U* to replenish *U'*
References
----------
.. [#1CTC] Blum, A., & Mitchell, T. (1998, July). Combining labeled and
unlabeled_pool data with co-training. In Proceedings of the
eleventh annual conference on Computational learning theory
(pp. 92-100). ACM.
.. [#2CTC] Goldman, Sally, and Yan Zhou. "Enhancing supervised
learning with unlabeled data." ICML. 2000.
"""
def __init__(
self,
estimator1=None,
estimator2=None,
p=None,
n=None,
unlabeled_pool_size=75,
num_iter=50,
random_state=None
):
# initialize a BaseCTEstimator object
super().__init__(estimator1, estimator2, random_state)
# if not given, set classifiers as gaussian naive bayes estimators
if self.estimator1_ is None:
self.estimator1_ = GaussianNB()
if self.estimator2_ is None:
self.estimator2_ = GaussianNB()
# If only 1 of p or n is not None, set them equal
if (p is not None and n is None):
n = p
self.p_, self.n_ = p, n
elif (p is None and n is not None):
p = n
self.p_, self.n_ = p, n
else:
self.p_, self.n_ = p, n
self.n_views = 2 # only 2 view learning supported currently
self.class_name_ = "CTClassifier"
self.unlabeled_pool_size = unlabeled_pool_size
self.num_iter = num_iter
self._check_params()
def _check_params(self):
r"""
Checks that cotraining parameters are valid. Throws AttributeError
if estimators are invalid. Throws ValueError if any other parameters
are not valid. The checks performed are:
- estimator1 and estimator2 have predict_proba methods
- p and n are both positive
- unlabeled_pool_size is positive
- num_iter is positive
"""
# verify that estimator1 and estimator2 have predict_proba
if (not hasattr(self.estimator1_, 'predict_proba') or
not hasattr(self.estimator2_, 'predict_proba')):
raise AttributeError("Co-training classifier must be initialized "
"with classifiers supporting "
"predict_proba().")
if (self.p_ is not None and self.p_ <= 0) or (self.n_ is not None and
self.n_ <= 0):
raise ValueError("Both p and n must be positive.")
if self.unlabeled_pool_size <= 0:
raise ValueError("unlabeled_pool_size must be positive.")
if self.num_iter <= 0:
raise ValueError("num_iter must be positive.")
def fit(
self,
Xs,
y
):
r"""
Fit the classifier object to the data in Xs, y.
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
A list of the different views of data to train on.
y : array, shape (n_samples,)
The labels of the training data. Unlabeled_pool examples should
have label np.nan.
Returns
-------
self : returns an instance of self
"""
# verify Xs and y
Xs, y = check_Xs_y_nan_allowed(Xs,
y,
multiview=True,
enforce_views=self.n_views,
max_classes=2, min_classes=1)
y = np.array(y)
if self.random_state is not None:
np.random.seed(self.random_state)
self.classes_ = list(set(y[~np.isnan(y)]))
self.n_classes = len(self.classes_)
# extract the multiple views given
X1 = Xs[0]
X2 = Xs[1]
# if don't have 2 classes of labeled data, then just fit and return,
# since can't do any iterations of cotraining
if self.n_classes > 1:
# if both p & n are none, set as ratio of one class to the other
if (self.p_ is None and self.n_ is None):
num_class_n = sum(1 for y_n in y if y_n == self.classes_[0])
num_class_p = sum(1 for y_p in y if y_p == self.classes_[1])
p_over_n_ratio = num_class_p // num_class_n
if p_over_n_ratio > 1:
self.p_, self.n_ = p_over_n_ratio, 1
else:
self.n_, self.p_ = num_class_n // num_class_p, 1
# the full set of unlabeled samples
U = [i for i, y_i in enumerate(y) if np.isnan(y_i)]
# shuffle unlabeled_pool data for easy random access
np.random.shuffle(U)
# the small pool of unlabled samples to draw from in training
unlabeled_pool = U[-min(len(U), self.unlabeled_pool_size):]
# the labeled samples
L = [i for i, y_i in enumerate(y) if ~np.isnan(y_i)]
# remove the pool from overall unlabeled data
U = U[:-len(unlabeled_pool)]
# number of rounds of co-training
it = 0
# machine epsilon
eps = np.finfo(float).eps
while it < self.num_iter and U:
it += 1
# fit each model to its respective view
self.estimator1_.fit(X1[L], y[L])
self.estimator2_.fit(X2[L], y[L])
# predict log probability for greater spread in confidence
y1_prob = np.log(self.estimator1_.
predict_proba(X1[unlabeled_pool]) + eps)
y2_prob = np.log(self.estimator2_.
predict_proba(X2[unlabeled_pool]) + eps)
n, p = [], []
accurate_guesses_estimator1 = 0
accurate_guesses_estimator2 = 0
wrong_guesses_estimator1 = 0
wrong_guesses_estimator2 = 0
# take the most confident labeled examples from the
# unlabeled pool in each category and put them in L
for i in (y1_prob[:, 0].argsort())[-self.n_:]:
if y1_prob[i, 0] > np.log(0.5):
n.append(i)
for i in (y1_prob[:, 1].argsort())[-self.p_:]:
if y1_prob[i, 1] > np.log(0.5):
p.append(i)
for i in (y2_prob[:, 0].argsort())[-self.n_:]:
if y2_prob[i, 0] > np.log(0.5):
n.append(i)
for i in (y2_prob[:, 1].argsort())[-self.p_:]:
if y2_prob[i, 1] > np.log(0.5):
p.append(i)
# create new labels for new additions to the labeled group
y[[unlabeled_pool[x] for x in n]] = self.classes_[0]
y[[unlabeled_pool[x] for x in p]] = self.classes_[1]
L.extend([unlabeled_pool[x] for x in p])
L.extend([unlabeled_pool[x] for x in n])
# remove newly labeled samples from unlabeled_pool
unlabeled_pool = [elem for elem in unlabeled_pool
if not (elem in p or elem in n)]
# add new elements to unlabeled_pool
add_counter = 0
num_to_add = len(p) + len(n)
while add_counter != num_to_add and U:
add_counter += 1
unlabeled_pool.append(U.pop())
# if only had 1 class in the labeled examples
else:
# the labeled sample indices
L = [i for i, y_i in enumerate(y) if ~np.isnan(y_i)]
# fit the overall model on fully "labeled" data
self.estimator1_.fit(X1[L], y[L])
self.estimator2_.fit(X2[L], y[L])
return self
def predict(self, Xs):
r"""
Predict the classes of the examples in the two input views.
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
A list of the different views of data to predict.
Returns
-------
y_pred : array-like (n_samples,)
The predicted class of each input example. If the two classifiers
don't agree, pick the one with the highest predicted probability
from predict_proba()
"""
Xs = check_Xs(Xs,
multiview=True,
enforce_views=self.n_views)
X1 = Xs[0]
X2 = Xs[1]
# predict each view independently
y1 = self.estimator1_.predict(X1)
y2 = self.estimator2_.predict(X2)
# initialize
y_pred = np.zeros(X1.shape[0],)
num_disagree = 0
num_agree = 0
# predict samples based on trained classifiers
for i, (y1_i, y2_i) in enumerate(zip(y1, y2)):
# if classifiers agree, use their prediction
if y1_i == y2_i:
y_pred[i] = y1_i
# if classifiers don't agree, take the more confident
else:
y1_probs = self.estimator1_.predict_proba([X1[i]])[0]
y2_probs = self.estimator2_.predict_proba([X2[i]])[0]
sum_y_probs = [prob1 + prob2 for (prob1, prob2) in
zip(y1_probs, y2_probs)]
max_sum_prob = max(sum_y_probs)
y_pred[i] = self.classes_[sum_y_probs.index(max_sum_prob)]
return y_pred
def predict_proba(self, Xs):
r"""
Predict the probability of each example belonging to a each class.
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
A list of the different views of data to predict.
Returns
-------
y_proba : array-like (n_samples, n_classes)
The probability of each sample being in each class.
"""
Xs = check_Xs(Xs,
multiview=True,
enforce_views=self.n_views)
X1 = Xs[0]
X2 = Xs[1]
y_proba = np.full((X1.shape[0], self.n_classes), -1)
# predict each probability independently
y1_proba = self.estimator1_.predict_proba(X1)
y2_proba = self.estimator2_.predict_proba(X2)
# return the average probability for the sample
return (y1_proba + y2_proba) * .5
| 39.346237
| 78
| 0.595267
|
b97a7c6a2fddbc915914b6b5f474484b092b1931
| 4,176
|
py
|
Python
|
home_portal/kookboek/database.py
|
Runner-42/HomePortal
|
011bc6752e3212c151401139dbfe8d46ead985ad
|
[
"MIT"
] | null | null | null |
home_portal/kookboek/database.py
|
Runner-42/HomePortal
|
011bc6752e3212c151401139dbfe8d46ead985ad
|
[
"MIT"
] | null | null | null |
home_portal/kookboek/database.py
|
Runner-42/HomePortal
|
011bc6752e3212c151401139dbfe8d46ead985ad
|
[
"MIT"
] | null | null | null |
'''Database Models for the Kookboek application'''
from home_portal.extensions import db_kookboek as db
class RecipesIngredients(db.Model):
'''
The RecipiesIngredients class defines the attributes
required to create a many-to-many relationship between recipes
and ingredients.
It also contains the amount and unit information for the required ingredients
related to the recipe.
'''
__bind_key__ = 'kookboek_db'
__tablename__ = 'RecipesIngredients'
recipe_id = db.Column(
db.Integer,
db.ForeignKey('Recipes.id'),
primary_key=True
)
ingredient_id = db.Column(
db.Integer,
db.ForeignKey('Ingredients.id'),
primary_key=True
)
amount = db.Column(
db.Numeric,
nullable=False
)
unit = db.Column(
db.String(64),
unique=False,
nullable=False
)
unit_description = db.Column(
db.Text(),
unique=False,
nullable=True
)
class Recipe(db.Model):
'''
The recipe class defines the attributes of the
Recipes table
'''
__bind_key__ = 'kookboek_db'
__tablename__ = 'Recipes'
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(128),
unique=True,
nullable=False
)
category = db.Column(
db.String(64),
nullable=False
)
preparation = db.Column(
db.Text(),
nullable=True,
unique=False
)
picture = db.Column(
db.LargeBinary,
nullable=True
)
ingredients = db.relationship('Ingredient',
secondary=lambda: RecipesIngredients.__table__,
backref='Recipe')
def __repr__(self):
return "testeke {} {}".format(self.id, self.name)
@property
def get_recipe_id(self):
if self.id:
return self.id
else:
return 0
@property
def get_ingredients_list(self):
'''
Function returns the ingredients related to a recipe
Recipe information is provided via it's ID
'''
ingredients = Ingredient.query.join(
RecipesIngredients,
RecipesIngredients.recipe_id == self.id)\
.filter(RecipesIngredients.ingredient_id == Ingredient.id)\
.all()
return ingredients
class Unit(db.Model):
'''
The Unit class defines the attributes of the unit
table.
'''
__bind_key__ = 'kookboek_db'
__tablename__ = 'Units'
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(64),
unique=True,
nullable=False
)
description = db.Column(
db.Text(),
unique=False,
nullable=True
)
class Category(db.Model):
'''
The Category class defines the attributes of the category
table.
'''
__bind_key__ = 'kookboek_db'
__tablename__ = 'Categories'
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(64),
unique=True,
nullable=False
)
description = db.Column(
db.Text(),
unique=False,
nullable=True
)
class Ingredient(db.Model):
'''
The Ingredient class defines the attributes of the ingredient
table.
'''
__bind_key__ = 'kookboek_db'
__tablename__ = 'Ingredients'
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(64),
unique=True,
nullable=False
)
picture = db.Column(
db.LargeBinary,
nullable=True
)
default_unit = db.Column(
db.String(32),
nullable=False
)
unit_description = db.Column(
db.String(128),
nullable=True
)
default_amount = db.Column(
db.Numeric,
nullable=True
)
recipes = db.relationship('Recipe',
secondary=lambda: RecipesIngredients.__table__,
backref='Ingredient'
)
| 23.2
| 81
| 0.564416
|
40bc299cc3be7154fb9e60a77b6996d238e91266
| 3,530
|
py
|
Python
|
__init__.py
|
slashx57/ipx800
|
b6ee9b6f316afb24a3d236bca0d7f4ac6101e10c
|
[
"MIT"
] | null | null | null |
__init__.py
|
slashx57/ipx800
|
b6ee9b6f316afb24a3d236bca0d7f4ac6101e10c
|
[
"MIT"
] | 1
|
2020-10-27T06:19:58.000Z
|
2020-10-27T12:37:41.000Z
|
__init__.py
|
slashx57/ipx800
|
b6ee9b6f316afb24a3d236bca0d7f4ac6101e10c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 *-*
import sys
import logging
import voluptuous as vol
from threading import Lock
# Import the device class from the component that you want to support
from homeassistant.const import CONF_HOST, CONF_API_KEY, CONF_PORT, CONF_USERNAME, CONF_PASSWORD
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from pyipx800.pyipx800 import pyipx800
from pyipx800.pyipxInput import Input
from pyipx800.pyipxRelay import Relay
from pyipx800.pyipxAnalog import Analog
from pyipx800.pyipxCounter import Counter
from pyipx800.pyipxVirtuals import VirtualInput, VirtualOutput, VirtualAnalog
DOMAIN = "ipx800"
DEFAULT_PORT = 80
# Home Assistant depends on 3rd party packages for API specific code.
REQUIREMENTS = ['requests', 'requests-xml']
_LOGGER = logging.getLogger(__name__)
# Schema to validate the user's configuration
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string
})
},extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up is called when Home Assistant is loading our component."""
#from pyipx800 import pyipx800
_LOGGER.debug("Entering setup")
# Assign configuration variables. The configuration check takes care they are
# present.
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
apikey = conf.get(CONF_API_KEY)
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
hass.data[DOMAIN] = IpxData(host, port, apikey)
try:
hass.data[DOMAIN].update()
except:
_LOGGER.error("Update error %s", str(sys.exc_info()[0]))
return False
#load_platform(hass, 'sensor', DOMAIN, {}, config)
#load_platform(hass, 'light', DOMAIN, {}, config)
_LOGGER.debug("Exiting setup")
# Return boolean to indicate that initialization was successfully.
return True
class IpxData:
"""Stores the data """
def __init__(self, host, port, apikey):
self.mutex = Lock()
self._ipx = None
self._host = host
self._port = port
self._apikey = apikey
self.inputs = None
self.relays = None
self.analogs = None
self.counters = None
self.virt_inputs = None
self.virt_outputs = None
self.virt_analogs = None
def update(self):
with self.mutex:
if self._ipx == None:
# Setup connection with IPX800
self._ipx = pyipx800(self._host, self._port, self._apikey)
self._ipx.configure()
self.counters = self._ipx.counters
_LOGGER.debug("counters found:"+str(len(self.counters)))
self.relays = self._ipx.relays
_LOGGER.debug("relays found:"+str(len(self.relays)))
self.analogs = self._ipx.analogs
_LOGGER.debug("analogs found:"+str(len(self.analogs)))
self.inputs = self._ipx.inputs
_LOGGER.debug("inputs found:"+str(len(self.inputs)))
self.virt_inputs = self._ipx.virt_inputs
_LOGGER.debug("virt_inputs found:"+str(len(self.virt_inputs)))
self.virt_outputs = self._ipx.virt_outputs
_LOGGER.debug("virt_outputs found:"+str(len(self.virt_outputs)))
self.virt_analogs = self._ipx.virt_analogs
_LOGGER.debug("virt_analogs found:"+str(len(self.virt_analogs)))
return self._ipx
| 30.695652
| 96
| 0.6983
|
dce594e56d0b644ca22dae7c7059b28d282ec44c
| 155
|
py
|
Python
|
bin/django-admin.py
|
Ao99/try-django
|
645d965c25752e6d0ca1f400c5ee1b719f564802
|
[
"MIT"
] | null | null | null |
bin/django-admin.py
|
Ao99/try-django
|
645d965c25752e6d0ca1f400c5ee1b719f564802
|
[
"MIT"
] | null | null | null |
bin/django-admin.py
|
Ao99/try-django
|
645d965c25752e6d0ca1f400c5ee1b719f564802
|
[
"MIT"
] | null | null | null |
#!/home/ubuntu/environment/TryDjango/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 25.833333
| 48
| 0.793548
|
83b7e601b7e570eeae9c23f307ddbcc27b2e015c
| 172
|
py
|
Python
|
aiorpc/__init__.py
|
brglng/aiorpc
|
575a898e54e61cd73ec5cf2b48348e70cfaa5b41
|
[
"WTFPL"
] | 66
|
2016-10-17T19:16:44.000Z
|
2022-02-26T01:10:06.000Z
|
aiorpc/__init__.py
|
webclinic017/aiorpc
|
a46929d70f17a6a98ee8f071012656f57bcd073b
|
[
"WTFPL"
] | 25
|
2018-05-13T03:14:43.000Z
|
2022-03-03T03:29:04.000Z
|
aiorpc/__init__.py
|
webclinic017/aiorpc
|
a46929d70f17a6a98ee8f071012656f57bcd073b
|
[
"WTFPL"
] | 20
|
2017-09-13T17:04:21.000Z
|
2022-02-03T12:26:25.000Z
|
from aiorpc.client import RPCClient
from aiorpc.server import *
__all__ = ['RPCClient', 'RPCServer', 'register', 'msgpack_init', 'set_timeout', 'serve', 'register_class']
| 34.4
| 106
| 0.744186
|
34775be6ea07f6e8a518f900f4e55f7c07504765
| 7,274
|
py
|
Python
|
fuji_server/models/formal_metadata.py
|
FAIRsFAIR/fuji
|
71b771ec29b4a4405720b87a32631d05f4543a7b
|
[
"MIT"
] | null | null | null |
fuji_server/models/formal_metadata.py
|
FAIRsFAIR/fuji
|
71b771ec29b4a4405720b87a32631d05f4543a7b
|
[
"MIT"
] | null | null | null |
fuji_server/models/formal_metadata.py
|
FAIRsFAIR/fuji
|
71b771ec29b4a4405720b87a32631d05f4543a7b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from fuji_server.models.base_model_ import Model
from fuji_server.models.debug import Debug # noqa: F401,E501
from fuji_server.models.fair_result_common import FAIRResultCommon # noqa: F401,E501
from fuji_server.models.fair_result_common_score import FAIRResultCommonScore # noqa: F401,E501
from fuji_server.models.formal_metadata_output import FormalMetadataOutput # noqa: F401,E501
from fuji_server import util
class FormalMetadata(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id: int=None, metric_identifier: str=None, metric_name: str=None, test_status: str='fail', score: FAIRResultCommonScore=None, output: FormalMetadataOutput=None, test_debug: Debug=None): # noqa: E501
"""FormalMetadata - a model defined in Swagger
:param id: The id of this FormalMetadata. # noqa: E501
:type id: int
:param metric_identifier: The metric_identifier of this FormalMetadata. # noqa: E501
:type metric_identifier: str
:param metric_name: The metric_name of this FormalMetadata. # noqa: E501
:type metric_name: str
:param test_status: The test_status of this FormalMetadata. # noqa: E501
:type test_status: str
:param score: The score of this FormalMetadata. # noqa: E501
:type score: FAIRResultCommonScore
:param output: The output of this FormalMetadata. # noqa: E501
:type output: FormalMetadataOutput
:param test_debug: The test_debug of this FormalMetadata. # noqa: E501
:type test_debug: Debug
"""
self.swagger_types = {
'id': int,
'metric_identifier': str,
'metric_name': str,
'test_status': str,
'score': FAIRResultCommonScore,
'output': FormalMetadataOutput,
'test_debug': Debug
}
self.attribute_map = {
'id': 'id',
'metric_identifier': 'metric_identifier',
'metric_name': 'metric_name',
'test_status': 'test_status',
'score': 'score',
'output': 'output',
'test_debug': 'test_debug'
}
self._id = id
self._metric_identifier = metric_identifier
self._metric_name = metric_name
self._test_status = test_status
self._score = score
self._output = output
self._test_debug = test_debug
@classmethod
def from_dict(cls, dikt) -> 'FormalMetadata':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The FormalMetadata of this FormalMetadata. # noqa: E501
:rtype: FormalMetadata
"""
return util.deserialize_model(dikt, cls)
@property
def id(self) -> int:
"""Gets the id of this FormalMetadata.
:return: The id of this FormalMetadata.
:rtype: int
"""
return self._id
@id.setter
def id(self, id: int):
"""Sets the id of this FormalMetadata.
:param id: The id of this FormalMetadata.
:type id: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def metric_identifier(self) -> str:
"""Gets the metric_identifier of this FormalMetadata.
:return: The metric_identifier of this FormalMetadata.
:rtype: str
"""
return self._metric_identifier
@metric_identifier.setter
def metric_identifier(self, metric_identifier: str):
"""Sets the metric_identifier of this FormalMetadata.
:param metric_identifier: The metric_identifier of this FormalMetadata.
:type metric_identifier: str
"""
if metric_identifier is None:
raise ValueError("Invalid value for `metric_identifier`, must not be `None`") # noqa: E501
self._metric_identifier = metric_identifier
@property
def metric_name(self) -> str:
"""Gets the metric_name of this FormalMetadata.
:return: The metric_name of this FormalMetadata.
:rtype: str
"""
return self._metric_name
@metric_name.setter
def metric_name(self, metric_name: str):
"""Sets the metric_name of this FormalMetadata.
:param metric_name: The metric_name of this FormalMetadata.
:type metric_name: str
"""
if metric_name is None:
raise ValueError("Invalid value for `metric_name`, must not be `None`") # noqa: E501
self._metric_name = metric_name
@property
def test_status(self) -> str:
"""Gets the test_status of this FormalMetadata.
:return: The test_status of this FormalMetadata.
:rtype: str
"""
return self._test_status
@test_status.setter
def test_status(self, test_status: str):
"""Sets the test_status of this FormalMetadata.
:param test_status: The test_status of this FormalMetadata.
:type test_status: str
"""
allowed_values = ["pass", "fail", "indeterminate"] # noqa: E501
if test_status not in allowed_values:
raise ValueError(
"Invalid value for `test_status` ({0}), must be one of {1}"
.format(test_status, allowed_values)
)
self._test_status = test_status
@property
def score(self) -> FAIRResultCommonScore:
"""Gets the score of this FormalMetadata.
:return: The score of this FormalMetadata.
:rtype: FAIRResultCommonScore
"""
return self._score
@score.setter
def score(self, score: FAIRResultCommonScore):
"""Sets the score of this FormalMetadata.
:param score: The score of this FormalMetadata.
:type score: FAIRResultCommonScore
"""
if score is None:
raise ValueError("Invalid value for `score`, must not be `None`") # noqa: E501
self._score = score
@property
def output(self) -> FormalMetadataOutput:
"""Gets the output of this FormalMetadata.
:return: The output of this FormalMetadata.
:rtype: FormalMetadataOutput
"""
return self._output
@output.setter
def output(self, output: FormalMetadataOutput):
"""Sets the output of this FormalMetadata.
:param output: The output of this FormalMetadata.
:type output: FormalMetadataOutput
"""
self._output = output
@property
def test_debug(self) -> Debug:
"""Gets the test_debug of this FormalMetadata.
:return: The test_debug of this FormalMetadata.
:rtype: Debug
"""
return self._test_debug
@test_debug.setter
def test_debug(self, test_debug: Debug):
"""Sets the test_debug of this FormalMetadata.
:param test_debug: The test_debug of this FormalMetadata.
:type test_debug: Debug
"""
self._test_debug = test_debug
| 30.691983
| 222
| 0.631152
|
e6ab7102e55dec6cf4044bb3191474f552bd6bb6
| 11,559
|
py
|
Python
|
solvers/prepare.py
|
brandhsn/pysat
|
95f2dd9760091bf3453ba6c0c7c12172cffefbb7
|
[
"MIT"
] | null | null | null |
solvers/prepare.py
|
brandhsn/pysat
|
95f2dd9760091bf3453ba6c0c7c12172cffefbb7
|
[
"MIT"
] | null | null | null |
solvers/prepare.py
|
brandhsn/pysat
|
95f2dd9760091bf3453ba6c0c7c12172cffefbb7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
##
## prepare.py
##
## Created on: Jan 23, 2018
## Author: Alexey S. Ignatiev
## E-mail: aignatiev@ciencias.ulisboa.pt
##
#
#==============================================================================
from __future__ import print_function
import datetime
import os
import shutil
import sys
import tarfile
import zipfile
try: # Python 2
from urllib import urlopen
except ImportError: # Python 3
from urllib.request import urlopen
#
#==============================================================================
sources = {
'glucose30': (
'http://www.labri.fr/perso/lsimon/downloads/softwares/glucose-3.0.tgz',
'solvers/glucose30.tar.gz'
),
'glucose41': (
'http://www.labri.fr/perso/lsimon/downloads/softwares/glucose-syrup-4.1.tgz',
'solvers/glucose41.tar.gz'
),
'lingeling': (
'http://fmv.jku.at/lingeling/lingeling-bbc-9230380-160707-druplig-009.tar.gz',
'solvers/lingeling.tar.gz'
),
'maplechrono': (
'http://sat2018.forsyte.tuwien.ac.at/solvers/main_and_glucose_hack/MapleLCMDistChronoBT.zip',
'solvers/maplechrono.zip'
),
'maplecm': (
'http://sat2018.forsyte.tuwien.ac.at/solvers/main_and_glucose_hack/Maple_CM.zip',
'solvers/maplecm.zip'
),
'maplesat': (
'https://sites.google.com/a/gsd.uwaterloo.ca/maplesat/MapleCOMSPS_pure_LRB.zip',
'solvers/maplesat.zip'
),
'minicard': (
'https://github.com/liffiton/minicard/archive/v1.2.tar.gz',
'http://reason.di.fc.ul.pt/~aign/storage/mirror/minicard-v1.2.tar.gz',
'solvers/minicard.tar.gz'
),
'minisat22': (
'http://minisat.se/downloads/minisat-2.2.0.tar.gz',
'solvers/minisat22.tar.gz'
),
'minisatgh': (
'https://github.com/niklasso/minisat/archive/master.zip',
'http://reason.di.fc.ul.pt/~aign/storage/mirror/minisatgh-master.zip',
'solvers/minisatgh.zip'
)
}
#
#==============================================================================
to_extract = {
'glucose30': [],
'glucose41': [],
'lingeling': ['druplig-009.zip', 'lingeling-bbc-9230380-160707.tar.gz'],
'maplechrono': [],
'maplecm': [],
'maplesat': [],
'minicard': [],
'minisat22': [],
'minisatgh': []
}
#
#==============================================================================
to_move = {
'glucose30': [],
'glucose41': [],
'lingeling': [
('druplig-009/druplig.c', 'druplig.c'),
('druplig-009/druplig.h', 'druplig.h'),
('lingeling-bbc-9230380-160707/lglconst.h', 'lglconst.h'),
('lingeling-bbc-9230380-160707/lglib.c', 'lglib.c'),
('lingeling-bbc-9230380-160707/lglib.h', 'lglib.h'),
('lingeling-bbc-9230380-160707/lgloptl.h', 'lgloptl.h'),
('lingeling-bbc-9230380-160707/lglopts.c', 'lglopts.c'),
('lingeling-bbc-9230380-160707/lglopts.h', 'lglopts.h')
],
'maplechrono': [
('sources/core', 'core'),
('sources/mtl', 'mtl'),
('sources/utils', 'utils')
],
'maplecm': [
('sources/core', 'core'),
('sources/mtl', 'mtl'),
('sources/utils', 'utils')
],
'maplesat': [],
'minicard': [
('core', '_core'),
('minicard', 'core')
],
'minisat22': [],
'minisatgh': [
('minisat/core', 'core'),
('minisat/mtl', 'mtl'),
('minisat/utils', 'utils')
]
}
#
#==============================================================================
to_remove = {
'glucose30': [
'core/Dimacs.h',
'core/Main.cc',
'core/Makefile',
'mtl/config.mk',
'mtl/template.mk',
'simp',
'utils/Makefile'
],
'glucose41': [
'._Changelog',
'._LICENCE',
'._README',
'._core',
'._mtl',
'._parallel',
'._simp',
'._utils',
'Changelog',
'core/._BoundedQueue.h',
'core/._Constants.h',
'core/._Dimacs.h',
'core/._Makefile',
'core/._Solver.cc',
'core/._Solver.h',
'core/._SolverStats.h',
'core/._SolverTypes.h',
'core/Dimacs.h',
'core/Makefile',
'LICENCE',
'README',
'mtl/._Alg.h',
'mtl/._Alloc.h',
'mtl/._Clone.h',
'mtl/._Heap.h',
'mtl/._IntTypes.h',
'mtl/._Map.h',
'mtl/._Queue.h',
'mtl/._Sort.h',
'mtl/._Vec.h',
'mtl/._VecThreads.h',
'mtl/._XAlloc.h',
'mtl/._config.mk',
'mtl/._template.mk',
'mtl/config.mk',
'mtl/template.mk',
'simp',
'parallel',
'utils/._Makefile',
'utils/._Options.cc',
'utils/._Options.h',
'utils/._ParseUtils.h',
'utils/._System.cc',
'utils/._System.h',
'utils/Makefile'
],
'lingeling': [
'druplig-009',
'druplig-009.zip',
'lingeling-bbc-9230380-160707',
'lingeling-bbc-9230380-160707.tar.gz',
'extract-and-compile.sh',
'.tar.gz',
'README'
],
'maplechrono': [
'bin',
'core/Dimacs.h',
'core/Main.cc',
'core/Makefile',
'mtl/config.mk',
'mtl/template.mk',
'utils/Makefile',
'sources',
'starexec_build'
],
'maplecm': [
'__MACOSX',
'bin',
'core/Dimacs.h',
'core/Main.cc',
'core/Makefile',
'mtl/config.mk',
'mtl/template.mk',
'utils/Makefile',
'utils/Options.o',
'utils/System.o',
'sources',
'starexec_build'
],
'maplesat': [
'core/Dimacs.h',
'core/Main.cc',
'core/Makefile',
'doc',
'mtl/config.mk',
'mtl/template.mk',
'simp',
'utils/Makefile',
'LICENSE',
'README'
],
'minicard': [
'_core',
'encodings',
'minicard_encodings',
'minicard_simp_encodings',
'tests',
'core/Dimacs.h',
'core/Main.cc',
'core/Makefile',
'core/opb.h',
'mtl/config.mk',
'mtl/template.mk',
'utils/Makefile',
'LICENSE',
'README',
'.gitignore'
],
'minisat22': [
'core/Dimacs.h',
'core/Main.cc',
'core/Makefile',
'doc',
'mtl/config.mk',
'mtl/template.mk',
'simp',
'utils/Makefile',
'LICENSE',
'README'
],
'minisatgh': [
'core/Dimacs.h',
'core/Main.cc',
'doc',
'minisat',
'CMakeLists.txt',
'LICENSE',
'Makefile',
'README',
'.gitignore'
]
}
#
#==============================================================================
def do(to_install):
"""
Prepare all solvers specified in the command line.
"""
for solver in to_install:
print('preparing {0}'.format(solver))
download_archive(sources[solver])
extract_archive(sources[solver][-1], solver)
adapt_files(solver)
patch_solver(solver)
compile_solver(solver)
#
#==============================================================================
def download_archive(sources):
"""
Downloads an archive and saves locally (taken from PySMT).
"""
# last element is expected to be the local archive name
save_to = sources[-1]
# not downloading the file again if it exists
if os.path.exists(save_to):
print('not downloading {0} since it exists locally'.format(save_to))
return
# try all possible sources one by one
for url in sources[:-1]:
# make five attempts per source
for i in range(5):
# first attempt to get a response
response = urlopen(url)
# handling redirections
u = urlopen(response.geturl())
meta = u.info()
if meta.get('Content-Length') and len(meta.get('Content-Length')) > 0:
filesz = int(meta.get('Content-Length'))
if os.path.exists(save_to) and os.path.getsize(save_to) == filesz:
print('not downloading {0} since it exists locally'.format(save_to))
return
print('downloading: {0} ({1} bytes)...'.format(save_to, filesz), end=' ')
with open(save_to, 'wb') as fp:
block_sz = 8192
while True:
buff = u.read(block_sz)
if not buff:
break
fp.write(buff)
print('done')
break
else:
continue
break # successfully got the file
else:
assert 0, 'something went wrong -- cannot download {0}'.format(save_to)
#
#==============================================================================
def extract_archive(archive, solver, put_inside = False):
"""
Unzips/untars a previously downloaded archive file.
"""
print('extracting {0}'.format(archive))
root = os.path.join('solvers', solver if put_inside else '')
if archive.endswith('.tar.gz'):
if os.path.exists(archive[:-7]):
shutil.rmtree(archive[:-7])
tfile = tarfile.open(archive, 'r:gz')
tfile.extractall(root)
# normally, directory should be the first name
# but glucose4.1 has some garbage in the archive
for name in tfile.getnames():
if not name.startswith('./.'):
directory = name
break
elif archive.endswith('.zip'):
if os.path.exists(archive[:-4]):
shutil.rmtree(archive[:-4])
myzip = zipfile.ZipFile(archive, 'r')
myzip.extractall(root)
directory = myzip.namelist()[0]
directory = directory.rstrip('/').split('/')[0]
myzip.close()
if not put_inside:
if os.path.exists(os.path.join('solvers', solver)):
shutil.rmtree(os.path.join('solvers', solver))
shutil.move(os.path.join('solvers', directory), os.path.join('solvers', solver))
#
#==============================================================================
def adapt_files(solver):
"""
Rename and remove files whenever necessary.
"""
print("adapting {0}'s files".format(solver))
root = os.path.join('solvers', solver)
for arch in to_extract[solver]:
arch = os.path.join(root, arch)
extract_archive(arch, solver, put_inside=True)
for fnames in to_move[solver]:
old = os.path.join(root, fnames[0])
new = os.path.join(root, fnames[1])
os.rename(old, new)
for f in to_remove[solver]:
f = os.path.join(root, f)
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
#
#==============================================================================
def patch_solver(solver):
"""
Applies a patch to a given solver.
"""
print('patching {0}'.format(solver))
os.system('patch -p0 < solvers/patches/{0}.patch'.format(solver))
#
#==============================================================================
def compile_solver(solver):
"""
Compiles a given solver as a library.
"""
print('compiling {0}'.format(solver))
os.system('cd solvers/{0} && make && cd ../..'.format(solver))
| 27.197647
| 101
| 0.487758
|
e1fd4c9ff52e3287262a64e2b108208e3ae67c18
| 158
|
py
|
Python
|
engine/src/hopeit/server/runtime.py
|
pcanto-hopeit/hopeit.engine
|
c17b0438e56940a4d1b2f071cca90ae8b6f70629
|
[
"Apache-2.0"
] | 15
|
2020-07-09T17:41:14.000Z
|
2021-10-04T20:13:08.000Z
|
engine/src/hopeit/server/runtime.py
|
pcanto-hopeit/hopeit.engine
|
c17b0438e56940a4d1b2f071cca90ae8b6f70629
|
[
"Apache-2.0"
] | 48
|
2020-07-10T15:16:17.000Z
|
2022-03-03T19:46:46.000Z
|
engine/src/hopeit/server/runtime.py
|
pcanto-hopeit/hopeit.engine
|
c17b0438e56940a4d1b2f071cca90ae8b6f70629
|
[
"Apache-2.0"
] | 3
|
2020-07-08T20:12:58.000Z
|
2021-01-10T15:57:21.000Z
|
"""
Server runtime: module to keep a reference to current runtime hopeit engine Server object
"""
from hopeit.server.engine import Server
server = Server()
| 19.75
| 89
| 0.765823
|
4a6e4556d2f19e3257d547aa7ffc1b38afd823e3
| 20,742
|
py
|
Python
|
code_python/3-CameraCalibration/main.py
|
khanfarhan10/Computer-Vision-2
|
9ed58481e48d95f43464fb34d4da74ee0184bd14
|
[
"MIT"
] | 1
|
2022-01-05T23:11:51.000Z
|
2022-01-05T23:11:51.000Z
|
code_python/3-CameraCalibration/main.py
|
khanfarhan10/Computer-Vision-2
|
9ed58481e48d95f43464fb34d4da74ee0184bd14
|
[
"MIT"
] | null | null | null |
code_python/3-CameraCalibration/main.py
|
khanfarhan10/Computer-Vision-2
|
9ed58481e48d95f43464fb34d4da74ee0184bd14
|
[
"MIT"
] | 2
|
2021-08-14T13:03:12.000Z
|
2021-10-12T08:48:18.000Z
|
# region Imports
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
import os
from typing import Union
from datetime import datetime
from pyrealsense2 import pyrealsense2 as rs
from cv2 import cv2 as cv
import numpy as np
# endregion
# region Real Sense Functions
# ------------------------------------------------------------------------------
# Real Sense Functions
# ------------------------------------------------------------------------------
def rs_config_color_pipeline(config_rs: rs.config,
stream_type_rs: rs.stream = rs.stream.color,
width: int = 848,
height: int = 480,
format_rs: rs.format = rs.format.rgb8,
fps: int = 15):
"""
Configs the pipeline to enable the color stream
example:
pipeline = rs.pipeline(ctx=rs.context())
config = rs.config()
config = rs_config_color_pipeline(config_rs=config)
pipeline.start(config=config)
@param config_rs: real sense configuration
@type config_rs: rs.config
@param stream_type_rs: Sets the stream type, default is rs.stream.color
@type stream_type_rs: rs.stream
@param width: The width of the stream in pixels, default is 848
@type width: int
@param height: The height of the stream in pixels, default is 480
@type height: int
@param format_rs: real sense stream format, default is rs.format.rgb8
@type format_rs: rs.format
@param fps: The fps of the stream, default is 15
@type fps: int
@return: The configuration file already configured
@rtype: rs.config
"""
# Configure the pipeline to stream the color stream
config_rs.enable_stream(stream_type_rs,
width,
height,
format_rs,
fps)
return config_rs
def rs_config_IR_pipeline(config_rs: rs.config,
stream_type_rs: rs.stream = rs.stream.infrared,
width: int = 848,
height: int = 480,
format_rs: rs.format = rs.format.y8,
fps: int = 15):
"""
Configs the pipeline to enable the infrared (IR) left and right stream
example:
pipeline = rs.pipeline(ctx=rs.context())
config = rs.config()
config = rs_config_IR_pipeline(config_rs=config)
pipeline.start(config=config)
@param config_rs: real sense configuration
@type config_rs: rs.config
@param stream_type_rs: Sets the stream type, default is rs.stream.infrared
@type stream_type_rs: rs.stream
@param width: The width of the stream in pixels, default is 848
@type width: int
@param height: The height of the stream in pixels, default is 480
@type height: int
@param format_rs: real sense stream format, default is rs.format.y8
@type format_rs: rs.format
@param fps: The fps of the stream, default is 15
@type fps: int
@return: The configuration file already configured
@rtype: rs.config
"""
# https://github.com/IntelRealSense/librealsense/issues/1140
# Configure the pipeline to stream the IR stream. One config to each cameras
config_rs.enable_stream(stream_type_rs,
1,
width,
height,
format_rs,
fps)
config_rs.enable_stream(stream_type_rs,
2,
width,
height,
format_rs,
fps)
return config_rs
def rs_config_depth_pipeline(config_rs: rs.config,
stream_type_rs: rs.stream = rs.stream.depth,
width: int = 848,
height: int = 480,
format_rs: rs.format = rs.format.z16,
fps: int = 15):
"""
Configs the pipeline to enable the depth stream
example:
pipeline = rs.pipeline(ctx=rs.context())
config = rs.config()
config = rs_config_depth_pipeline(config_rs=config)
pipeline.start(config=config)
@param config_rs: real sense configuration
@type config_rs: rs.config
@param stream_type_rs: Sets the stream type, default is rs.stream.depth
@type stream_type_rs: rs.stream
@param width: The width of the stream in pixels, default is 848
@type width: int
@param height: The height of the stream in pixels, default is 480
@type height: int
@param format_rs: real sense stream format, default is rs.format.z16
@type format_rs: rs.format
@param fps: The fps of the stream, default is 15
@type fps: int
@return: The configuration file already configured
@rtype: rs.config
"""
# https://github.com/IntelRealSense/librealsense/issues/1140
# Configure the pipeline to stream the IR stream. One config to each cameras
config_rs.enable_stream(stream_type_rs,
width,
height,
format_rs,
fps)
return config_rs
def get_extrinsics(src: rs.video_stream_profile, dst: rs.video_stream_profile):
"""
Returns R, T transform from src to dst.
@param src: The source
@type src: rs.video_stream_profile
@param dst: The destiny
@type dst: rs.video_stream_profile
@return: Rotation and Transform Matrix
@rtype: np.ndarray, np.ndarray
"""
_extrinsics = src.get_extrinsics_to(dst)
R = np.reshape(_extrinsics.rotation, [3, 3]).T
T = np.array(_extrinsics.translation)
return R, T
def unpack_profile(pipeline_rs: rs.pipeline):
"""
The keys are the names of the streams ie. Depth, Infrared 1, Infrared 2,
Color
example:
unpacked_profiles = unpack_profile(pipeline_rs=pipeline)
@param pipeline_rs: Pipeline where profiles need to be extracted. Extracted
features are: Type, Video Profile, Stream Profile and Unique ID.
@type pipeline_rs: rs.pipeline_profile
@return: Dictionary in which the key is the profile name and the values
are the extracted properties: Type, Video Profile, Stream Profile and
Unique ID.
@rtype: dict
"""
profiles = pipeline_rs.get_active_profile()
unpacked = {}
for profile in profiles.get_streams():
_key = profile.stream_name()
values = {
"Type": profile.stream_type(),
"Video Profile": profile.as_video_stream_profile(),
"Stream Profile": profile.as_stream_profile(),
"UID": profile.unique_id(),
}
unpacked[_key] = values
return unpacked
def get_intrinsics_extrinsics(pipeline_rs: rs.pipeline):
"""
Gets the intrinsics and extrinsics parameters of the available streams
Intrinsics parameters are from every profile available.
Extrinsics parameters can only be from Color to Infrared 1 or Infrared 2
to Infrared 1.
example:
intrinsics, extrinsics = get_intrinsics_extrinsics(pipeline_rs=pipeline)
@param pipeline_rs: The pipeline to extract the streams/profiles
@type pipeline_rs: rs.pipeline
@return: A dictionary with intrinsics parameters and another dictionary
with extrinsics parameters
@rtype: dict, dict
"""
unpacked_profiles = unpack_profile(pipeline_rs=pipeline_rs)
_intrinsics = {}
for _key, _value in unpacked_profiles.items():
_intrinsics[_key] = _value.get("Video Profile").get_intrinsics()
_extrinsics = {}
if unpacked_profiles.__contains__("Infrared 1"):
if unpacked_profiles.__contains__("Color"):
_extrinsics["Color -> Infrared 1"] = get_extrinsics(
unpacked_profiles.get("Color").get("Video Profile"),
unpacked_profiles.get("Infrared 1").get("Video Profile"))
if unpacked_profiles.__contains__("Infrared 2"):
_extrinsics["Infrared 2 -> Infrared 1"] = get_extrinsics(
unpacked_profiles.get("Infrared 2").get("Video Profile"),
unpacked_profiles.get("Infrared 1").get("Video Profile"))
return _intrinsics, _extrinsics
# endregion
# region Utility Functions
# ------------------------------------------------------------------------------
# Utility Functions
# ------------------------------------------------------------------------------
def reject_outliers_2(data: np.ndarray, m: float = 2.):
"""
Sets the outliers to 0 on an numpy array. Based on:
https://stackoverflow.com/questions/11686720/is-there-a-numpy-builtin-to-reject-outliers-from-a-list
"""
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d / (mdev if mdev else 1.)
return data[s < m]
# endregion
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
path = os.path.join("..", "..", "data",
"CV_D435_20201104_160738_RGB_calibration.bag")
timestamp = "_" + datetime.now().strftime('%Y%m%d_%H%M%S')
save_path = os.path.join(f"calib_params{timestamp}")
# Number of corners required to compute the calibration matrix
_MIN_CORNERS = 40
# Sets the length of the chessboard square
square_size = 2.5 # Length of the square (2.5)
units = 0.01 # Units of square_size (cm)
# Distance between left and right IR cameras in meters. Cameras are
# assumed to be parallel to each other. We are assuming no distortion for
# all cameras
baseline = 0.05 # m
# Number of Inner Corners of the chessboard pattern
chess_rows = 6
chess_cols = 9
# Creates a Real Sense Pipeline Object
pipeline = rs.pipeline(ctx=rs.context())
# Create a config object
config = rs.config()
# Tell config that we will use a recorded device from file to be used by
# the pipeline through playback (comment this line if you want to use a
# real camera).
config.enable_device_from_file(file_name=path, repeat_playback=True)
config = rs_config_color_pipeline(config_rs=config)
try:
# Starts the pipeline with the configuration done previously
pipeline.start(config=config)
except RuntimeError as err:
print(err)
raise RuntimeError("Make sure the config streams exists in the device!")
# Create colorizer object to apply to depth frames
colorizer = rs.colorizer()
# Get intrinsics and extrinsics parameters from the multiple profiles of
# the pipeline
intrinsics, extrinsics = get_intrinsics_extrinsics(pipeline_rs=pipeline)
# patternSize = (points_per_row, points_per_column)
chess_size = (chess_rows, chess_cols)
# Stores all corner points that where found on the image
image_points = []
# Creates a list with the real world object(chessboard pattern) coordinates
obj_point = np.zeros((chess_rows * chess_cols, 3), dtype=np.float32)
obj_point[:, :2] = np.mgrid[
0:chess_rows * square_size:square_size,
0:chess_cols * square_size:square_size
].T.reshape(-1, 2)
obj_point = obj_point * units
# Used to store all the real world points of the chessboard pattern
obj_points = []
# Window to show the stream
cv.namedWindow("Color Stream", cv.WINDOW_AUTOSIZE)
# FLAG (Don't touch)
first_run = True
trigger_pressed = False
# Main cycle/loop
while True:
# Read key and waits 1ms
key = cv.waitKey(1)
# Wait for new frames and grabs the frameset
frameset = pipeline.wait_for_frames()
# Get RGB Camera frame
rs_color_rgb = cv.cvtColor(
np.asanyarray(frameset.get_color_frame().get_data()),
cv.COLOR_BGR2RGB
)
rs_color_gray = cv.cvtColor(
np.asanyarray(frameset.get_color_frame().get_data()),
cv.COLOR_BGR2GRAY
)
# Gather image information on the first run
if first_run:
# Image dimensions
_h_, _w_, _c_ = rs_color_rgb.shape[:3]
# Resized image dimensions
_h_rsz_, _w_rsz_ = cv.resize(
src=rs_color_rgb,
dsize=None,
fx=1 / _MIN_CORNERS,
fy=1 / _MIN_CORNERS).shape[:2]
# Creates the space to hold the images
image_corners = np.zeros((_h_rsz_, _w_, _c_))
image_corners[:, :, :] = [65, 65, 65] # Gray
first_run = False
# Creates a division bar and stacks it to the images
div = np.zeros((4, _w_, _c_)) # NOQA
div[:, :, :] = [100, 100, 65] # Dark Cyan Blue
image_bar = np.vstack((div, image_corners)) # NOQA
if not trigger_pressed:
_show = np.copy(rs_color_rgb)
_show = cv.putText(img=_show,
text="PRESS <SPACE> TO CAPTURE",
org=(3, 26),
fontFace=cv.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=(0, 0, 255),
thickness=3,
bottomLeftOrigin=False)
else:
_show = rs_color_rgb
# Render image in opencv window
cv.imshow("Color Stream", np.uint8(np.vstack((_show, image_bar))))
# If SPACE is pressed
if key == 32:
# Find the chessboard inner corners
ret_val, corners = cv.findChessboardCorners(image=rs_color_gray,
patternSize=chess_size)
trigger_pressed = True
if ret_val:
# Adds the real world coordinates to the array that stores the real
# world coordinates.
obj_points.append(obj_point)
"""
corners = cv.cornerSubPix(image=rs_color_gray,
corners=corners,
winSize=(11, 11),
zeroZone=(-1, -1),
criteria=(cv.TERM_CRITERIA_EPS +
cv.TERM_CRITERIA_MAX_ITER,
30, 0.001))
# """
# Adds the image point to the array.
image_points.append(corners) # NOQA - Supressed
# warnings on the current line. Used to prevent "Name corners can
# be undefined"
# Resizes the image to display it.
_img_resized = cv.resize(
src=cv.drawChessboardCorners(image=rs_color_rgb,
patternSize=chess_size,
corners=corners,
patternWasFound=ret_val),
dsize=None,
fx=1 / _MIN_CORNERS,
fy=1 / _MIN_CORNERS)
# Stacks the resized image of the corners to the previous images
image_corners = np.uint8(np.hstack((image_corners, _img_resized)))
# Removes the oldest image.
image_corners = image_corners[:, _w_rsz_:] # NOQA
# If the array of the image points have more than the minimum
# images required for computation...
if len(image_points) >= _MIN_CORNERS:
# Removes the first entry, meaning the oldest one, of the
# image_points and object_points
obj_points.pop(0)
image_points.pop(0)
_, cam_mat, dist_coef, rot_vec, trans_vec = cv.calibrateCamera(
objectPoints=obj_points,
imagePoints=image_points,
imageSize=(rs_color_rgb.shape[1], rs_color_rgb.shape[0]),
cameraMatrix=None,
distCoeffs=None)
rot_vec = np.array(rot_vec)
"""
rotX_avg = np.sum(rot_vec[:, 0, :]) / _MIN_CORNERS
rotY_avg = np.sum(rot_vec[:, 1, :]) / _MIN_CORNERS
rotZ_avg = np.sum(rot_vec[:, 2, :]) / _MIN_CORNERS
# """
_inliers = reject_outliers_2(rot_vec[:, 0, :], m=2)
rotX_avg = np.sum(_inliers) / len(_inliers)
_inliers = reject_outliers_2(rot_vec[:, 1, :], m=2)
rotY_avg = np.sum(_inliers) / len(_inliers)
_inliers = reject_outliers_2(rot_vec[:, 2, :], m=2)
rotZ_avg = np.sum(_inliers) / len(_inliers)
rot_vec_avg = np.vstack((rotX_avg, rotY_avg, rotZ_avg))
rot_mat = cv.Rodrigues(rot_vec_avg)[0]
trans_vec = np.array(trans_vec)
"""
transX_avg = np.sum(trans_vec[:, 0, :]) / _MIN_CORNERS
transY_avg = np.sum(trans_vec[:, 1, :]) / _MIN_CORNERS
transZ_avg = np.sum(trans_vec[:, 2, :]) / _MIN_CORNERS
# """
_inliers = reject_outliers_2(trans_vec[:, 0, :], m=2)
transX_avg = np.sum(_inliers) / len(_inliers)
_inliers = reject_outliers_2(trans_vec[:, 1, :], m=2)
transY_avg = np.sum(_inliers) / len(_inliers)
_inliers = reject_outliers_2(trans_vec[:, 2, :], m=2)
transZ_avg = np.sum(_inliers) / len(_inliers)
trans_vec_avg = np.vstack((transX_avg, transY_avg, transZ_avg))
print("\n----------------------------------------")
print("\tIntrinsics Matrix")
print(np.round(cam_mat, 2))
print("----------------------------------------")
print("\tExtrinsic Matrix")
print(np.round(np.hstack((trans_vec_avg, rot_mat)), 2))
print("----------------------------------------\n")
error_sum = 0
for i in range(len(obj_points)):
image_points_reprojected, _ = cv.projectPoints(
objectPoints=obj_points[i],
rvec=rot_vec[i],
tvec=trans_vec[i],
cameraMatrix=cam_mat,
distCoeffs=dist_coef
)
error = cv.norm(
src1=image_points[i],
src2=image_points_reprojected, # NOQA
normType=cv.NORM_L2
) / len(image_points_reprojected)
error_sum += error
avg_error = error_sum / len(obj_points)
print(f"Error: {avg_error}")
new_cam_mat, ROI = cv.getOptimalNewCameraMatrix(
cameraMatrix=cam_mat,
distCoeffs=dist_coef,
imageSize=(_w_, _h_), # NOQA
alpha=1)
img_undistorted = cv.undistort(src=rs_color_rgb,
cameraMatrix=cam_mat,
distCoeffs=dist_coef,
dst=None,
newCameraMatrix=new_cam_mat)
rs_color_rgb = cv.putText(img=rs_color_rgb,
text="PRESS <ESC> TO EXIT",
org=(3, 26),
fontFace=cv.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=(0, 0, 255),
thickness=3,
bottomLeftOrigin=False)
rs_color_rgb = cv.resize(src=rs_color_rgb,
dsize=None,
fx=0.5,
fy=0.5)
img_undistorted = cv.resize(src=img_undistorted,
dsize=None,
fx=0.5,
fy=0.5)
cv.namedWindow("Original - Undistorted", cv.WINDOW_AUTOSIZE)
cv.imshow("Original - Undistorted",
np.hstack((rs_color_rgb, img_undistorted)))
# if pressed ESCAPE exit program
if key == 27:
cv.destroyAllWindows()
break
try:
np.savez(save_path, cam_mat, dist_coef, # NOQA
rvec=rot_vec_avg, tvec=trans_vec_avg, # NOQA
avg_error=avg_error) # NOQA
except NameError:
pass
| 37.989011
| 104
| 0.543631
|
6765eb37bdbb881a951bde4ad228361e913469b1
| 8,040
|
py
|
Python
|
docs/conf.py
|
hassanobeid1994/tr_b_causal_2020
|
1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
hassanobeid1994/tr_b_causal_2020
|
1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5
|
[
"MIT"
] | 89
|
2020-02-10T02:52:11.000Z
|
2020-06-23T03:50:27.000Z
|
docs/conf.py
|
hassan-obeid/tr_b_causal_2020
|
1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Causal_Inference_Transportation documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Causal_Inference_Transportation"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "ctr_b_causal_2020doc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"ctr_b_causal_2020.tex",
"Causal_Inference_Transportation Documentation",
"hassan",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"ctr_b_causal_2020",
"Causal_Inference_Transportation Documentation",
["hassan"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"ctr_b_causal_2020",
"Causal_Inference_Transportation Documentation",
"hassan",
"Causal_Inference_Transportation",
"causal graphs and transportatability",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.284047
| 84
| 0.701617
|
074279a742c1ca6ba48653ebaadc8507f7df174a
| 190
|
py
|
Python
|
test/test_cli.py
|
GochoMugo/remindme
|
6cf2f94ce07ead754f1ee5976a7e7d7cbfa1a2e4
|
[
"MIT"
] | 17
|
2015-05-02T22:58:07.000Z
|
2017-04-17T06:33:43.000Z
|
test/test_cli.py
|
GochoMugo/remindme
|
6cf2f94ce07ead754f1ee5976a7e7d7cbfa1a2e4
|
[
"MIT"
] | 8
|
2015-02-14T16:22:27.000Z
|
2016-10-26T13:15:19.000Z
|
test/test_cli.py
|
GochoMugo/remindme
|
6cf2f94ce07ead754f1ee5976a7e7d7cbfa1a2e4
|
[
"MIT"
] | 2
|
2016-02-26T10:47:56.000Z
|
2019-10-09T05:49:51.000Z
|
'''
Tests against remindme's command-line runner
'''
import unittest
from remindme import cli
class Test_Cli(unittest.TestCase):
'''Tests against the Command-line Runner.'''
pass
| 15.833333
| 48
| 0.726316
|
17a66209bd2103b0fa62088d384d34cc7813ef49
| 105
|
py
|
Python
|
PrototypeSystem/BackEnd/application/settings/prob.py
|
SEVulDet/SEVulDet
|
df56196067b249d9550cabd0315413c35c8ff420
|
[
"MIT"
] | 1
|
2022-03-15T00:44:57.000Z
|
2022-03-15T00:44:57.000Z
|
PrototypeSystem/BackEnd/application/settings/prob.py
|
SEVulDet/SEVulDet
|
df56196067b249d9550cabd0315413c35c8ff420
|
[
"MIT"
] | null | null | null |
PrototypeSystem/BackEnd/application/settings/prob.py
|
SEVulDet/SEVulDet
|
df56196067b249d9550cabd0315413c35c8ff420
|
[
"MIT"
] | null | null | null |
'''产品上线配置文件'''
from . import Config
class ProductionConfig(Config):
'''生产模式下的配置'''
DEBUG = False
| 17.5
| 31
| 0.657143
|
a5c6e7f9a4b968b181604e312d5be8cd90fa3dbf
| 30,539
|
py
|
Python
|
dibs/server.py
|
phette23/dibs
|
85dd630a95a36ded0acd87acb973139a5fd88742
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
dibs/server.py
|
phette23/dibs
|
85dd630a95a36ded0acd87acb973139a5fd88742
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
dibs/server.py
|
phette23/dibs
|
85dd630a95a36ded0acd87acb973139a5fd88742
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
'''
server.py: DIBS server definition.
Copyright
---------
Copyright (c) 2021 by the California Institute of Technology. This code
is open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
from beaker.middleware import SessionMiddleware
import bottle
from bottle import Bottle, HTTPResponse, static_file, template
from bottle import request, response, redirect, route, get, post, error
from datetime import datetime, timedelta
from decouple import config
import functools
from humanize import naturaldelta
import json
import os
from os.path import realpath, dirname, join
from peewee import *
import sys
import threading
from topi import Tind
from .database import Item, Loan, Recent
from .date_utils import human_datetime
from .email import send_email
from .people import Person, check_password, person_from_session
from .roles import role_to_redirect, has_required_role
if __debug__:
from sidetrack import log, logr, set_debug
# General configuration and initialization.
# .............................................................................
# Begin by creating a Bottle object on which we will define routes. At the end
# of this file, we will replace this object with the final exported application.
dibs = Bottle()
# Tell Bottle where to find templates. This is necessary for both the Bottle
# template() command to work and also to get %include to work inside our .tpl
# template files. Rather surprisingly, the only way to tell Bottle where to
# find the templates is to set this Bottle package-level variable.
bottle.TEMPLATE_PATH.append(join(realpath(dirname(__file__)), 'templates'))
# Cooling-off period after a loan ends, before user can borrow same title again.
_RELOAN_WAIT_TIME = timedelta(minutes = int(config('RELOAN_WAIT_TIME') or 30))
# Where we send users to give feedback.
_FEEDBACK_URL = config('FEEDBACK_URL') or '/'
# The next constant is used to configure Beaker sessions. This is used at
# the very end of this file in the call to SessionMiddleware.
_SESSION_CONFIG = {
# Use simple in-memory session handling. Ultimately we will only need
# sessions for the admin pages, and we won't have many users.
'session.type' : 'memory',
# Save session data automatically, without requiring us to call save().
'session.auto' : True,
# Session cookies should be accessible only to the browser, not JavaScript.
'session.httponly' : True,
# Clear sessions when the user restarts their browser.
'session.cookie_expires' : True,
# The name of the session cookie.
'session.key' : config('COOKIE_NAME') or 'dibs',
# Seconds until the session is invalidated.
'session.timeout' : config('SESSION_TIMEOUT', cast = int) or 604800,
}
# General-purpose utilities used later.
# .............................................................................
def page(name, **kargs):
'''Create a page using template "name" with some standard variables set.'''
# Bottle is unusual in providing global objects like 'request'.
session = request.environ['beaker.session']
logged_in = bool(session.get('user', None))
staff_user = has_required_role(person_from_session(session), 'library')
return template(name, base_url = dibs.base_url, logged_in = logged_in,
staff_user = staff_user, feedback_url = _FEEDBACK_URL, **kargs)
# Bootle hooks -- functions that are run every time a route is invoked.
# .............................................................................
@dibs.hook('before_request')
def expired_loan_removing_wrapper():
'''Clean up expired loans.'''
for loan in Loan.select():
if datetime.now() >= loan.endtime:
barcode = loan.item.barcode
if __debug__: log(f'loan for {barcode} by {loan.user} expired')
Recent.create(item = loan.item, user = loan.user,
nexttime = loan.endtime + timedelta(minutes = 1))
loan.delete_instance()
for recent in Recent.select():
if datetime.now() >= recent.nexttime:
barcode = recent.item.barcode
if __debug__: log(f'expiring recent record for {barcode} by {recent.user}')
recent.delete_instance()
# Decorators -- functions that are run selectively on certain routes.
# .............................................................................
def barcode_verified(func):
'''Check if the given barcode (passed as keyword argument) exists.'''
@functools.wraps(func)
def barcode_verification_wrapper(*args, **kwargs):
if 'barcode' in kwargs:
barcode = kwargs['barcode']
if not Item.get_or_none(Item.barcode == barcode):
if __debug__: log(f'there is no item with barcode {barcode}')
return page('error', summary = 'no such barcode',
message = f'There is no item with barcode {barcode}.')
return func(*args, **kwargs)
return barcode_verification_wrapper
def authenticated(func):
'''Check if the user is authenticated and redirect to /login if not.'''
@functools.wraps(func)
def authentication_check_wrapper(*args, **kwargs):
if request.method == 'HEAD':
# A Beaker session is not present when we get a HEAD. Unsure if
# that's expected or just a Bottle or Beaker behavior. We can't
# proceed with the request, but it's not an error either. I
# haven't found a better alternative than simply returning nothing.
if __debug__: log(f'returning empty HEAD on {request.path}')
return
session = request.environ['beaker.session']
if not session.get('user', None):
if __debug__: log(f'user not found in session object')
redirect(f'{dibs.base_url}/login')
else:
if __debug__: log(f'user is authenticated: {session["user"]}')
return func(*args, **kwargs)
return authentication_check_wrapper
# Administrative interface endpoints.
# .............................................................................
# NOTE: there are three approaches for integrating SSO. First is always
# require SSO before showing anything (not terribly useful here).
# Second use existing end points (e.g. /login, /logout) this supports
# everyone as SSO or not at all, third would be to support both
# SSO via its own end points and allow the app based authentication
# end points to remain for users who are defined in the system only.
# This can be helpful in the case of admin users or service accounts.
@dibs.get('/login')
def show_login_page():
# NOTE: If SSO is implemented this should redirect to the
# SSO end point with a return to /login on success.
if __debug__: log('get /login invoked')
return page('login')
@dibs.post('/login')
def login():
'''Handle performing the login action from the login page.'''
# NOTE: If SSO is implemented this end point will handle the
# successful login case applying role rules if necessary.
email = request.forms.get('email').strip()
password = request.forms.get('password')
if __debug__: log(f'post /login invoked by {email}')
# get our person obj from people.db for demo purposes
user = (Person.get_or_none(Person.uname == email))
if user != None:
if check_password(password, user.secret) == False:
if __debug__: log(f'wrong password -- rejecting {email}')
return page('login')
else:
if __debug__: log(f'creating session for {email}')
session = request.environ['beaker.session']
session['user'] = email
p = role_to_redirect(user.role)
if __debug__: log(f'redirecting to "{p}"')
redirect(f'{dibs.base_url}/{p}')
return
else:
if __debug__: log(f'wrong password -- rejecting {email}')
return page('login')
@dibs.post('/logout')
def logout():
'''Handle the logout action from the navbar menu on every page.'''
session = request.environ['beaker.session']
if not session.get('user', None):
if __debug__: log(f'post /logout invoked by unauthenticated user')
return
user = session['user']
if __debug__: log(f'post /logout invoked by {user}')
del session['user']
redirect(f'{dibs.base_url}/login')
@dibs.get('/list')
@authenticated
def list_items():
'''Display the list of known items.'''
person = person_from_session(request.environ['beaker.session'])
if has_required_role(person, 'library') == False:
redirect(f'{dibs.base_url}/notallowed')
return
if __debug__: log('get /list invoked')
return page('list', items = Item.select())
@dibs.get('/manage')
@authenticated
def list_items():
'''Display the list of known items.'''
person = person_from_session(request.environ['beaker.session'])
if has_required_role(person, 'library') == False:
redirect(f'{dibs.base_url}/notallowed')
return
if __debug__: log('get /manage invoked')
return page('manage', items = Item.select())
@dibs.get('/add')
@authenticated
def add():
'''Display the page to add new items.'''
person = person_from_session(request.environ['beaker.session'])
if has_required_role(person, 'library') == False:
redirect(f'{dibs.base_url}/notallowed')
return
if __debug__: log('get /add invoked')
return page('edit', action = 'add', item = None)
@dibs.get('/edit/<barcode:int>')
@barcode_verified
@authenticated
def edit(barcode):
'''Display the page to add new items.'''
person = person_from_session(request.environ['beaker.session'])
if has_required_role(person, 'library') == False:
redirect(f'{dibs.base_url}/notallowed')
return
if __debug__: log(f'get /edit invoked on {barcode}')
return page('edit', action = 'edit', item = Item.get(Item.barcode == barcode))
@dibs.post('/update/add')
@dibs.post('/update/edit')
@authenticated
def update_item():
'''Handle http post request to add a new item from the add-new-item page.'''
person = person_from_session(request.environ['beaker.session'])
if has_required_role(person, 'library') == False:
redirect(f'{dibs.base_url}/notallowed')
return
if __debug__: log(f'post {request.path} invoked')
if 'cancel' in request.POST:
if __debug__: log(f'user clicked Cancel button')
redirect(f'{dibs.base_url}/list')
return
# The HTML form validates the data types, but the POST might come from
# elsewhere, so we always need to sanity-check the values.
barcode = request.forms.get('barcode').strip()
if not barcode.isdigit():
return page('error', summary = 'invalid barcode',
message = f'{barcode} is not a valid barcode')
duration = request.forms.get('duration').strip()
if not duration.isdigit() or int(duration) <= 0:
return page('error', summary = 'invalid duration',
message = f'Duration must be a positive number')
num_copies = request.forms.get('num_copies').strip()
if not num_copies.isdigit() or int(num_copies) <= 0:
return page('error', summary = 'invalid copy number',
message = f'# of copies must be a positive number')
# Our current approach only uses items with barcodes that exist in TIND.
# If that ever changes, the following needs to change too.
tind = Tind('https://caltech.tind.io')
try:
rec = tind.item(barcode = barcode).parent
except:
if __debug__: log(f'could not find {barcode} in TIND')
return page('error', summary = 'no such barcode',
message = f'There is no item with barcode {barcode}.')
return
item = Item.get_or_none(Item.barcode == barcode)
if '/update/add' in request.path:
if item:
if __debug__: log(f'{barcode} already exists in the database')
return page('error', summary = 'duplicate entry',
message = f'An item with barcode {{barcode}} already exists.')
if __debug__: log(f'adding {barcode}, title {rec.title}')
Item.create(barcode = barcode, title = rec.title, author = rec.author,
tind_id = rec.tind_id, year = rec.year,
edition = rec.edition, thumbnail = rec.thumbnail_url,
num_copies = num_copies, duration = duration)
else:
if not item:
if __debug__: log(f'there is no item with barcode {barcode}')
return page('error', summary = 'no such barcode',
message = f'There is no item with barcode {barcode}.')
if __debug__: log(f'updating {barcode} from {rec}')
#FIXME: Need to validate these values.
item.barcode = barcode
item.num_copies = num_copies
item.duration = duration
# NOTE: Since we don't have these fields in the edit form we don't
# go anything with them.
#for field in ['title', 'author', 'year', 'edition', 'tind_id', 'thumbnail']:
# setattr(item, field, getattr(rec, field, ''))
# NOTE: We only update the specific editable fields.
item.save(only=[Item.barcode, Item.num_copies, Item.duration])
redirect(f'{dibs.base_url}/list')
@dibs.post('/ready')
@barcode_verified
@authenticated
def toggle_ready():
'''Set the ready-to-loan field.'''
barcode = request.POST.barcode.strip()
ready = (request.POST.ready.strip() == 'True')
if __debug__: log(f'post /ready invoked on barcode {barcode}')
item = Item.get(Item.barcode == barcode)
# The status we get is the availability status as it currently shown,
# meaning the user's action is to change the status.
item.ready = not ready
#NOTE: We only save the ready value we toggled.
item.save(only=[Item.ready])
if __debug__: log(f'readiness of {barcode} is now {item.ready}')
# If the readiness state is changed after the item is let out for loans,
# then there may be outstanding loans right now. Delete them.
if list(Loan.select(Loan.item == item)):
if __debug__: log(f'loans for {barcode} have been deleted')
Loan.delete().where(Loan.item == item).execute()
redirect(f'{dibs.base_url}/list')
@dibs.post('/remove')
@barcode_verified
@authenticated
def remove_item():
'''Handle http post request to remove an item from the list page.'''
person = person_from_session(request.environ['beaker.session'])
if has_required_role(person, 'library') == False:
redirect(f'{dibs.base_url}/notallowed')
return
barcode = request.POST.barcode.strip()
if __debug__: log(f'post /remove invoked on barcode {barcode}')
item = Item.get(Item.barcode == barcode)
item.ready = False
# Don't forget to delete any loans involving this item.
if list(Loan.select(Loan.item == item)):
Loan.delete().where(Loan.item == item).execute()
Item.delete().where(Item.barcode == barcode).execute()
redirect(f'{dibs.base_url}/manage')
# User endpoints.
# .............................................................................
@dibs.get('/')
@dibs.get('/<name:re:(info|welcome|about|thankyou)>')
def general_page(name = '/'):
'''Display the welcome page.'''
if __debug__: log(f'get {name} invoked')
if name == 'about':
return page('about')
elif name == 'thankyou':
return page('thankyou')
else:
return page('info', reloan_wait_time = naturaldelta(_RELOAN_WAIT_TIME))
#FIXME: We need an item status which returns a JSON object
# so the item page can update itself without reloading the whole page.
@dibs.get('/item-status/<barcode:int>')
@authenticated
def item_status(barcode):
'''Returns an item summary status as a JSON string'''
user = request.environ['beaker.session'].get('user')
if __debug__: log(f'get /item-status invoked on barcode {barcode} and {user}')
obj = {
'barcode': barcode,
'ready': False,
'available': False,
'explanation': '',
'endtime' : None,
'base_url': dibs.base_url
}
item = Item.get_or_none(Item.barcode == barcode)
if (item != None) and (user != None):
obj['ready'] = item.ready
user_loans = list(Loan.select().where(Loan.user == user))
recent_history = list(Recent.select().where(Recent.item == item))
endtime = None
# First check if the user has recently loaned out this same item.
if any(loan for loan in recent_history if loan.user == user):
if __debug__: log(f'{user} recently borrowed {barcode}')
recent = next(loan for loan in recent_history if loan.user == user)
endtime = recent.nexttime
obj['available'] = False
obj['explanation'] = 'It is too soon after the last time you borrowed this book.'
elif any(user_loans):
# The user has a current loan. If it's for this title, redirect them
# to the viewer; if it's for another title, block the loan button.
if user_loans[0].item == item:
if __debug__: log(f'{user} already has {barcode}; redirecting to uv')
obj['explanation'] = 'You currently have borrowed this book.'
else:
if __debug__: log(f'{user} already has a loan on something else')
obj['available'] = False
endtime = user_loans[0].endtime
loaned_item = user_loans[0].item
obj['explanation'] = ('You have another item on loan'
+ f' ("{loaned_item.title}" by {loaned_item.author})'
+ ' and it has not yet been returned.')
else:
if __debug__: log(f'{user} is allowed to borrow {barcode}')
loans = list(Loan.select().where(Loan.item == item))
obj['available'] = item.ready and (len(loans) < item.num_copies)
if item.ready and not obj['available']:
endtime = min(loan.endtime for loan in loans)
obj['explanation'] = 'All available copies are currently on loan.'
elif not item.ready:
endtime = None
obj['explanation'] = 'This item is not currently available through DIBS.'
else:
# It's available and they can have it.
endtime = None
obj['explanation'] = ''
if endtime != None:
obj['endtime'] = human_datetime(endtime)
else:
obj['endtime'] == None
return json.dumps(obj)
@dibs.get('/item/<barcode:int>')
@barcode_verified
@authenticated
def show_item_info(barcode):
'''Display information about the given item.'''
user = request.environ['beaker.session'].get('user')
if __debug__: log(f'get /item invoked on barcode {barcode} by {user}')
item = Item.get(Item.barcode == barcode)
user_loans = list(Loan.select().where(Loan.user == user))
recent_history = list(Recent.select().where(Recent.item == item))
# First check if the user has recently loaned out this same item.
if any(loan for loan in recent_history if loan.user == user):
if __debug__: log(f'{user} recently borrowed {barcode}')
recent = next(loan for loan in recent_history if loan.user == user)
endtime = recent.nexttime
available = False
explanation = 'It is too soon after the last time you borrowed this book.'
elif any(user_loans):
# The user has a current loan. If it's for this title, redirect them
# to the viewer; if it's for another title, block the loan button.
if user_loans[0].item == item:
if __debug__: log(f'{user} already has {barcode}; redirecting to uv')
redirect(f'{dibs.base_url}/view/{barcode}')
return
else:
if __debug__: log(f'{user} already has a loan on something else')
available = False
endtime = user_loans[0].endtime
loaned_item = user_loans[0].item
explanation = ('You have another item on loan'
+ f' ("{loaned_item.title}" by {loaned_item.author})'
+ ' and it has not yet been returned.')
else:
if __debug__: log(f'{user} is allowed to borrow {barcode}')
loans = list(Loan.select().where(Loan.item == item))
available = item.ready and (len(loans) < item.num_copies)
if item.ready and not available:
endtime = min(loan.endtime for loan in loans)
explanation = 'All available copies are currently on loan.'
elif not item.ready:
endtime = None
explanation = 'This item is not currently available through DIBS.'
else:
# It's available and they can have it.
endtime = datetime.now()
explanation = None
return page('item', item = item, available = available,
endtime = human_datetime(endtime), explanation = explanation)
# Lock object used around some code to prevent concurrent modification.
_THREAD_LOCK = threading.Lock()
@dibs.post('/loan')
@barcode_verified
@authenticated
def loan_item():
'''Handle http post request to loan out an item, from the item info page.'''
user = request.environ['beaker.session'].get('user')
barcode = request.POST.barcode.strip()
if __debug__: log(f'post /loan invoked on barcode {barcode} by {user}')
item = Item.get(Item.barcode == barcode)
if not item.ready:
# Normally we shouldn't see a loan request through our form in this
# case, so either staff has changed the status after item was made
# available or someone got here accidentally (or deliberately).
if __debug__: log(f'{barcode} is not ready for loans')
redirect(f'{dibs.base_url}/view/{barcode}')
return
# The default Bottle dev web server is single-thread, so we won't run into
# the problem of 2 users simultaneously clicking on the loan button. Other
# servers are multithreaded, and there's a risk that the time it takes us
# to look through the loans introduces a window of time when another user
# might click on the same loan button and cause another loan request to be
# initiated before the 1st finishes. So, lock this block of code.
with _THREAD_LOCK:
if any(Loan.select().where(Loan.user == user)):
if __debug__: log(f'{user} already has a loan on something else')
return page('error', summary = 'only one loan at a time',
message = ('Our policy currently prevents users from '
'borrowing more than one item at a time.'))
loans = list(Loan.select().where(Loan.item == item))
if any(loan.user for loan in loans if user == loan.user):
# Shouldn't be able to reach this point b/c the item page shouldn't
# make a loan available for this user & item combo. But if
# something weird happens (e.g., double posting), we might.
if __debug__: log(f'{user} already has a copy of {barcode} loaned out')
if __debug__: log(f'redirecting {user} to /view for {barcode}')
redirect(f'{dibs.base_url}/view/{barcode}')
return
if len(loans) >= item.num_copies:
# This shouldn't be possible, but catch it anyway.
if __debug__: log(f'# loans {len(loans)} >= num_copies for {barcode} ')
redirect(f'{dibs.base_url}/item/{barcode}')
return
recent_history = list(Recent.select().where(Recent.item == item))
if any(loan for loan in recent_history if loan.user == user):
if __debug__: log(f'{user} recently borrowed {barcode}')
recent = next(loan for loan in recent_history if loan.user == user)
return page('error', summary = 'too soon',
message = ('We ask that you wait at least '
f'{naturaldelta(_RELOAN_WAIT_TIME)} before '
'requesting the same item again. Please try '
f'after {human_datetime(recent.nexttime)}'))
# OK, the user is allowed to loan out this item.
start = datetime.now()
end = start + timedelta(hours = item.duration)
if __debug__: log(f'creating new loan for {barcode} for {user}')
Loan.create(item = item, user = user, started = start, endtime = end)
send_email(user, item, start, end, dibs.base_url)
redirect(f'{dibs.base_url}/view/{barcode}')
@dibs.post('/return')
@barcode_verified
@authenticated
def end_loan():
'''Handle http post request to return the given item early.'''
barcode = request.forms.get('barcode').strip()
user = request.environ['beaker.session'].get('user')
if __debug__: log(f'get /return invoked on barcode {barcode} by {user}')
loans = list(Loan.select().join(Item).where(Loan.item.barcode == barcode))
user_loans = [loan for loan in loans if user == loan.user]
if len(user_loans) > 1:
# Internal error -- users should not have more than one loan of an
# item. Right now, we simply log it and move on.
if __debug__: log(f'error: more than one loan for {barcode} by {user}')
elif user_loans:
# Normal case: user has loaned a copy of item. Delete the record and
# add a new Recent loan record.
if __debug__: log(f'deleting loan record for {barcode} by {user}')
user_loans[0].delete_instance()
Recent.create(item = Item.get(Item.barcode == barcode), user = user,
nexttime = datetime.now() + _RELOAN_WAIT_TIME)
else:
# User does not have this item loaned out. Ignore the request.
if __debug__: log(f'{user} does not have {barcode} loaned out')
redirect(f'{dibs.base_url}/thankyou')
@dibs.get('/view/<barcode:int>')
@barcode_verified
@authenticated
def send_item_to_viewer(barcode):
'''Redirect to the viewer.'''
user = request.environ['beaker.session'].get('user')
if __debug__: log(f'get /view invoked on barcode {barcode} by {user}')
loans = list(Loan.select().join(Item).where(Loan.item.barcode == barcode))
user_loans = [loan for loan in loans if user == loan.user]
if user_loans:
if __debug__: log(f'redirecting to viewer for {barcode} for {user}')
return page('uv', barcode = barcode,
endtime = human_datetime(user_loans[0].endtime),
reloan_wait_time = naturaldelta(_RELOAN_WAIT_TIME))
else:
if __debug__: log(f'{user} does not have {barcode} loaned out')
redirect(f'{dibs.base_url}/item/{barcode}')
@dibs.get('/manifests/<barcode:int>')
@barcode_verified
@authenticated
def return_manifest(barcode):
'''Return the manifest file for a given item.'''
user = request.environ['beaker.session'].get('user')
if __debug__: log(f'get /manifests/{barcode} invoked by {user}')
loans = list(Loan.select().join(Item).where(Loan.item.barcode == barcode))
if any(loan.user for loan in loans if user == loan.user):
if __debug__: log(f'returning manifest file for {barcode} for {user}')
return static_file(f'{barcode}-manifest.json', root = 'manifests')
else:
if __debug__: log(f'{user} does not have {barcode} loaned out')
redirect(f'{dibs.base_url}/notallowed')
return
# Universal viewer interface.
# .............................................................................
# The uv subdirectory contains generic html and css. We serve them as static
# files to anyone; they don't need to be controlled. The multiple routes
# are because the UV files themselves reference different paths.
@dibs.route('/view/uv/<filepath:path>')
@dibs.route('/viewer/uv/<filepath:path>')
def serve_uv_files(filepath):
if __debug__: log(f'serving static uv file /viewer/uv/{filepath}')
return static_file(filepath, root = 'viewer/uv')
# The uv subdirectory contains generic html and css. Serve as static files.
@dibs.route('/viewer/<filepath:path>')
def serve_uv_files(filepath):
if __debug__: log(f'serving static uv file /viewer/{filepath}')
return static_file(filepath, root = 'viewer')
# Error pages.
# .............................................................................
# Note: the Bottle session plugin does not seem to supply session arg to @error.
@dibs.get('/notallowed')
@dibs.post('/notallowed')
def not_allowed():
if __debug__: log(f'serving /notallowed')
return page('error', summary = 'access error',
message = ('The requested method does not exist or you do not '
'not have permission to access the requested item.'))
@error(404)
def error404(error):
if __debug__: log(f'error404 called with {error}')
return page('404', code = error.status_code, message = error.body)
@error(405)
def error405(error):
if __debug__: log(f'error405 called with {error}')
return page('error', summary = 'method not allowed',
message = ('The requested method does not exist or you do not '
'not have permission to perform the action.'))
# Miscellaneous static pages.
# .............................................................................
@dibs.get('/favicon.ico')
def favicon():
'''Return the favicon.'''
if __debug__: log(f'returning favicon')
return static_file('favicon.ico', root = 'dibs/static')
@dibs.get('/static/<filename:re:[-a-zA-Z0-9]+.(html|jpg|svg|css|js)>')
def included_file(filename):
'''Return a static file used with %include in a template.'''
if __debug__: log(f'returning included file {filename}')
return static_file(filename, root = 'dibs/static')
# Main exported application.
# .............................................................................
# In the file above, we defined a Bottle application and its routes. Now we
# take that application definition and hand it to a middleware layer for
# session handling (using Beaker). The new "dibs" constitutes the final
# application that is invoked by the WSGI server via ../adapter.wsgi.
dibs = SessionMiddleware(dibs, _SESSION_CONFIG)
| 43.073343
| 93
| 0.62949
|
b87e692a19a88e824687aad7cdb6f012442f4772
| 6,526
|
py
|
Python
|
misc/mysqlproxy.py
|
pzure/CyMySQL
|
a6133a150f58f6752a80a73c74cd3277a6dedcd8
|
[
"MIT"
] | 1
|
2018-06-24T07:29:15.000Z
|
2018-06-24T07:29:15.000Z
|
misc/mysqlproxy.py
|
pzure/CyMySQL
|
a6133a150f58f6752a80a73c74cd3277a6dedcd8
|
[
"MIT"
] | null | null | null |
misc/mysqlproxy.py
|
pzure/CyMySQL
|
a6133a150f58f6752a80a73c74cd3277a6dedcd8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
##############################################################################
#The MIT License (MIT)
#
#Copyright (c) 2016 Hajime Nakagami
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
##############################################################################
import sys
import time
import socket
import binascii
import select
def recv_from_socket(sock, n):
recieved = b''
while n:
bs = sock.recv(n)
recieved += bs
n -= len(bs)
return recieved
def recv_mysql_packet(sock):
head = recv_from_socket(sock, 4)
n = int.from_bytes(head[:3], byteorder='little')
return head + recv_from_socket(sock, n)
def to_ascii(s):
r = ''
for c in s:
r += chr(c) if (c >= 32 and c < 128) else '.'
return r
def print_command_type(code):
r = {
0x01: 'COM_QUIT',
0x03: 'COM_QUERY',
}.get(code, '')
print("%-12s" % (r), end='')
return r
def print_response_type(code):
r = {
0x00: 'OK',
0xFE: 'EOF',
0xFF: 'Error',
}.get(code, '')
print("%-12s" % (r), end='')
return r
def proxy_wire(server_name, server_port, listen_host, listen_port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((listen_host, listen_port))
sock.listen(1)
client_sock, addr = sock.accept()
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.connect((server_name, server_port))
# http://dev.mysql.com/doc/internals/en/connection-phase-packets.html
# initial packet
server_data = recv_mysql_packet(server_sock)
client_sock.send(server_data)
print('S->C initial packets', binascii.b2a_hex(server_data).decode('ascii'))
r = print_response_type(server_data[4])
print(' [' + to_ascii(server_data) + ']')
# initial response (authentication)
client_data = recv_mysql_packet(client_sock)
server_sock.send(client_data)
print('C->S initial response', binascii.b2a_hex(client_data).decode('ascii'))
print_command_type(client_data[4])
print(' [' + to_ascii(client_data) + ']')
# auth result
server_data = recv_mysql_packet(server_sock)
client_sock.send(server_data)
print('S->C auth result', binascii.b2a_hex(server_data).decode('ascii'))
r = print_response_type(server_data[4])
print(' [' + to_ascii(server_data) + ']')
# http://dev.mysql.com/doc/internals/en/packet-OK_Packet.html
# payload first byte eq 0.
if server_data[4:6] == b'\x01\x03':
print("fast auth")
server_data = recv_mysql_packet(server_sock)
client_sock.send(server_data)
print('S->C auth result', binascii.b2a_hex(server_data).decode('ascii'))
if server_data[4:6] == b'\x01\x04':
print("full auth")
client_data = recv_mysql_packet(client_sock)
server_sock.send(client_data)
print('C->S', binascii.b2a_hex(client_data).decode('ascii'))
server_data = recv_mysql_packet(server_sock)
client_sock.send(server_data)
print('S->C', binascii.b2a_hex(server_data).decode('ascii'))
client_data = recv_mysql_packet(client_sock)
server_sock.send(client_data)
print('C->S', binascii.b2a_hex(client_data).decode('ascii'))
server_data = recv_mysql_packet(server_sock)
client_sock.send(server_data)
print('S->C auth result', binascii.b2a_hex(server_data).decode('ascii'))
else:
assert server_data[4] == 0
while True:
client_data = recv_mysql_packet(client_sock)
server_sock.send(client_data)
print('C->S', binascii.b2a_hex(client_data).decode('ascii'))
print_command_type(client_data[4])
print(' [' + to_ascii(client_data) + ']')
if client_data[4] == 0x01: # COM_QUIT
break
assert client_data[4] == 0x03 # COM_QUERY
server_data = recv_mysql_packet(server_sock)
client_sock.send(server_data)
print('S->C', binascii.b2a_hex(server_data).decode('ascii'))
r = print_response_type(server_data[4])
print(' [' + to_ascii(server_data) + ']')
if r:
continue
print('[Column definition]')
while True:
server_data = recv_mysql_packet(server_sock)
client_sock.send(server_data)
print('S->C', binascii.b2a_hex(server_data).decode('ascii'))
r = print_response_type(server_data[4])
print(' [' + to_ascii(server_data) + ']')
if r:
break
print('[Result Rows]')
while True:
server_data = recv_mysql_packet(server_sock)
client_sock.send(server_data)
print('S->C', binascii.b2a_hex(server_data).decode('ascii'))
r = print_response_type(server_data[4])
print(' [' + to_ascii(server_data) + ']')
if r:
break
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage : ' + sys.argv[0] + ' server[:port] [listen_host:]listen_port')
sys.exit()
server = sys.argv[1].split(':')
server_name = server[0]
if len(server) == 1:
server_port = 3306
else:
server_port = int(server[1])
listen = sys.argv[2].split(':')
if len(listen) == 1:
listen_host = 'localhost'
listen_port = int(listen[0])
else:
listen_host = listen[0]
listen_port = int(listen[1])
proxy_wire(server_name, server_port, listen_host, listen_port)
| 35.086022
| 84
| 0.631168
|
9dadca4e49d91502b41182da4677f0146db48fcf
| 2,095
|
py
|
Python
|
loading_scripts/frames_to_redis.py
|
cltl/LongTailAnnotation
|
3c82873451f870ef1c2481a52f0fb881a7ce8631
|
[
"Apache-2.0"
] | 2
|
2019-03-14T08:33:24.000Z
|
2021-03-04T14:12:50.000Z
|
loading_scripts/frames_to_redis.py
|
cltl/LongTailAnnotation
|
3c82873451f870ef1c2481a52f0fb881a7ce8631
|
[
"Apache-2.0"
] | null | null | null |
loading_scripts/frames_to_redis.py
|
cltl/LongTailAnnotation
|
3c82873451f870ef1c2481a52f0fb881a7ce8631
|
[
"Apache-2.0"
] | null | null | null |
import json
import redis
import pickle
import sys
import pandas as pd
from collections import defaultdict
fle = 'data/all'
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
r = redis.Redis(connection_pool=pool)
def build_inc2que():
my_index = defaultdict(set)
subtasks=['1','2','3']
for s in subtasks:
with open('../test_data2/%s_answers.json' % s, 'r') as infile:
answers=json.load(infile)
for q_id in answers.keys():
incs = answers[q_id]["answer_docs"].keys()
for i in incs:
my_index[i].add(q_id)
return my_index
def any_empty_names(participants):
for p in participants:
if 'Name' not in p or p['Name']=='':
return True
return False
def count_suspects(participants):
c=0
for p in participants:
if p['Type'].strip()=='Subject-Suspect':
c+=1
return c
inc2que = build_inc2que()
new_inc2que = {}
allf=pd.read_pickle(fle)
bad_docs_num=0
empty_part=0
too_many_part=0
MAX_PARTICIPANTS=10
for index, row in allf.iterrows():
incident_id = row['incident_uri']
if incident_id not in inc2que.keys(): continue
docs=list(row['hashed_ids'].values())
if len(docs)<2 or len(docs)>4:
bad_docs_num+=1
continue
if len(row['participants'])>MAX_PARTICIPANTS:
too_many_part+=1
continue
if any_empty_names(row['participants']):
empty_part+=1
continue
# suspects=count_suspects(row['participants'])
# if suspects>1:
# print(incident_id, suspects)
new_inc2que[incident_id]=inc2que[incident_id]
dockey = 'incdoc:%s' % incident_id
# r.set(dockey, json.dumps(docs))
strkey = 'incstr:%s' % incident_id
rval = row.to_json()
# r.set(strkey, json.dumps(rval))
print(len(new_inc2que), bad_docs_num, too_many_part, empty_part)
for k,v in new_inc2que.items():
quekey = 'incque:%s' % k
#print(k, v)
# r.set(quekey, json.dumps(list(v)))
with open('../new_inc2que.bin', 'wb') as outfile:
pickle.dump(new_inc2que, outfile)
| 26.1875
| 70
| 0.639141
|
bc7ee7fb500d08114f80eda3dd3c4e7fc1830729
| 1,545
|
py
|
Python
|
example/auto-xp-candy.py
|
nagata-yoshiteru/Switch-Fightstick
|
66685ff57c09444675e24c37455107eb529ce98a
|
[
"MIT"
] | 1
|
2020-04-07T05:12:47.000Z
|
2020-04-07T05:12:47.000Z
|
example/auto-xp-candy.py
|
nagata-yoshiteru/Switch-Fightstick
|
66685ff57c09444675e24c37455107eb529ce98a
|
[
"MIT"
] | null | null | null |
example/auto-xp-candy.py
|
nagata-yoshiteru/Switch-Fightstick
|
66685ff57c09444675e24c37455107eb529ce98a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import serial
from time import sleep
parser = argparse.ArgumentParser()
parser.add_argument('--port', default='COM4')
args = parser.parse_args()
ser = serial.Serial(args.port, 9600)
def send(msg, duration=0):
global ser
try:
ser.write(f'{msg}\r\n'.encode('utf-8'))
print(msg.replace('Button ', '').replace('HAT ', '').replace('LY MIN', '△ ').replace('LY MAX', '▽ ').replace('LX MIN', '◁').replace('LX MAX', '▷'), end=' ', flush=True)
sleep(duration)
ser.write(b'RELEASE\r\n')
except serial.serialutil.SerialException:
while True:
print("Reconnecting... ", end=' ', flush=True)
try:
sleep(0.4)
ser = serial.Serial(args.port, 9600)
print("Success.")
sleep(0.1)
send(msg, duration)
break
except:
print("Faild. Retrying...")
try:
while True:
send('Button A', 0.1)
sleep(3)
send('LY MAX', 0.1)
sleep(1.2)
send('Button A', 0.1)
sleep(1.2)
send('Button A', 0.1)
sleep(1.2)
send('Button A', 0.1)
sleep(1.2)
send('Button A', 0.1)
sleep(3)
send('Button B', 0.1)
sleep(3)
send('Button B', 0.1)
sleep(1.2)
send('LY MAX', 0.1)
sleep(1.2)
send('Button L', 0.1)
sleep(1.2)
print(' ')
except KeyboardInterrupt:
send('RELEASE')
ser.close()
| 27.105263
| 176
| 0.500324
|
515a94697c11b5b0b9813fd897f55fdb75eb130c
| 10,445
|
py
|
Python
|
msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/vendored_sdks/usersactions/operations/_users_onenote_section_groups_sections_parent_notebook_sections_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/vendored_sdks/usersactions/operations/_users_onenote_section_groups_sections_parent_notebook_sections_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/vendored_sdks/usersactions/operations/_users_onenote_section_groups_sections_parent_notebook_sections_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsersOnenoteSectionGroupsSectionsParentNotebookSectionsOperations(object):
"""UsersOnenoteSectionGroupsSectionsParentNotebookSectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_actions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def copy_to_notebook(
self,
user_id, # type: str
section_group_id, # type: str
onenote_section_id, # type: str
onenote_section_id1, # type: str
body, # type: "models.PathsI6Vxt9UsersUserIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdParentnotebookSectionsOnenotesectionId1MicrosoftGraphCopytonotebookPostRequestbodyContentApplicationJsonSchema"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphOnenoteOperation"
"""Invoke action copyToNotebook.
Invoke action copyToNotebook.
:param user_id: key: id of user.
:type user_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_section_id1: key: id of onenoteSection.
:type onenote_section_id1: str
:param body: Action parameters.
:type body: ~users_actions.models.PathsI6Vxt9UsersUserIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdParentnotebookSectionsOnenotesectionId1MicrosoftGraphCopytonotebookPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteOperation, or the result of cls(response)
:rtype: ~users_actions.models.MicrosoftGraphOnenoteOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.copy_to_notebook.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'PathsI6Vxt9UsersUserIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdParentnotebookSectionsOnenotesectionId1MicrosoftGraphCopytonotebookPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
copy_to_notebook.metadata = {'url': '/users/{user-id}/onenote/sectionGroups/{sectionGroup-id}/sections/{onenoteSection-id}/parentNotebook/sections/{onenoteSection-id1}/microsoft.graph.copyToNotebook'} # type: ignore
def copy_to_section_group(
self,
user_id, # type: str
section_group_id, # type: str
onenote_section_id, # type: str
onenote_section_id1, # type: str
body, # type: "models.PathsDoh0LaUsersUserIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdParentnotebookSectionsOnenotesectionId1MicrosoftGraphCopytosectiongroupPostRequestbodyContentApplicationJsonSchema"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphOnenoteOperation"
"""Invoke action copyToSectionGroup.
Invoke action copyToSectionGroup.
:param user_id: key: id of user.
:type user_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_section_id1: key: id of onenoteSection.
:type onenote_section_id1: str
:param body: Action parameters.
:type body: ~users_actions.models.PathsDoh0LaUsersUserIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdParentnotebookSectionsOnenotesectionId1MicrosoftGraphCopytosectiongroupPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteOperation, or the result of cls(response)
:rtype: ~users_actions.models.MicrosoftGraphOnenoteOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.copy_to_section_group.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'PathsDoh0LaUsersUserIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdParentnotebookSectionsOnenotesectionId1MicrosoftGraphCopytosectiongroupPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
copy_to_section_group.metadata = {'url': '/users/{user-id}/onenote/sectionGroups/{sectionGroup-id}/sections/{onenoteSection-id}/parentNotebook/sections/{onenoteSection-id1}/microsoft.graph.copyToSectionGroup'} # type: ignore
| 53.290816
| 247
| 0.713451
|
4c9ef2f0373d75071f70783cddb2ed019259f174
| 961
|
py
|
Python
|
tremana/analysis/metrics.py
|
s-weigand/tremana
|
98a8a546c79ce4f248b3955da21374edfdd61dee
|
[
"Apache-2.0"
] | 1
|
2022-03-07T02:52:25.000Z
|
2022-03-07T02:52:25.000Z
|
tremana/analysis/metrics.py
|
s-weigand/tremana
|
98a8a546c79ce4f248b3955da21374edfdd61dee
|
[
"Apache-2.0"
] | 9
|
2021-04-26T07:08:27.000Z
|
2022-03-28T07:23:31.000Z
|
tremana/analysis/metrics.py
|
s-weigand/tremana
|
98a8a546c79ce4f248b3955da21374edfdd61dee
|
[
"Apache-2.0"
] | null | null | null |
"""Module containing metrics to be calculated on tremor accelerometry data or their FFT."""
import numpy as np
import pandas as pd
def center_of_mass(fft_spectra: pd.DataFrame) -> pd.DataFrame:
r"""Calculate the center of mass of FFT spectra.
.. math::
H_{cm} = \dfrac{1}{N-1} \dfrac{\sum\limits_{i=1}^{N}((i-1)X_i)}{\sum\limits_{i=1}^{N}X_i}
Parameters
----------
fft_spectra : pd.DataFrame
Dataframe with each column being a FFT spectrum.
Returns
-------
pd.DataFrame
Dataframe with the center of mass in the with columns names same as the spectra.
"""
results = {}
N = fft_spectra.shape[0]
weights = np.arange(0, N)
for column in fft_spectra.columns:
sorted_spectrum = fft_spectra[column].sort_values(ascending=False)
results[column] = 1 / (N - 1) * sorted_spectrum.dot(weights).sum() / sorted_spectrum.sum()
return pd.DataFrame(results, index=["H_cm"])
| 30.03125
| 98
| 0.648283
|
1011d173207d775882d348d00a438bdb465b58e3
| 2,762
|
py
|
Python
|
tests/integration/mongodb/test_parties.py
|
vegaprotocol/social-media-verification
|
5e2d23ff5781136de1692c4dc46b4bb02e588e65
|
[
"MIT"
] | null | null | null |
tests/integration/mongodb/test_parties.py
|
vegaprotocol/social-media-verification
|
5e2d23ff5781136de1692c4dc46b4bb02e588e65
|
[
"MIT"
] | 23
|
2021-03-31T10:19:54.000Z
|
2021-10-29T23:55:34.000Z
|
tests/integration/mongodb/test_parties.py
|
vegaprotocol/social-media-verification
|
5e2d23ff5781136de1692c4dc46b4bb02e588e65
|
[
"MIT"
] | null | null | null |
import pytest
from freezegun import freeze_time
from datetime import datetime, timezone, timedelta
from tools import setup_parties_collection
from services.smv_storage import SMVStorage
START_TIME = datetime(2021, 9, 13, 10, 34, 20, 1000, timezone.utc)
START_TIME_EPOCH = int(START_TIME.timestamp())
PUB_KEY = "cc3a5912aba19291b070457f54652bb49b1b3a86ef0537e5224dbdc4e83b2102"
TWITTER_ID = 18237215432962
TWITTER_HANDLE = "my_twt_handle"
@pytest.mark.skipif_no_mongodb
def test_insert(smv_storage: SMVStorage):
setup_parties_collection(
smv_storage,
[],
)
# Before
assert smv_storage.get_parties() == []
# note:
# - each call to get date-time will tick global time by 15seconds
# - tz_offset - make sure it works with a random timezone
with freeze_time(START_TIME, tz_offset=-10, auto_tick_seconds=15):
# Insert
smv_storage.upsert_verified_party(
pub_key=PUB_KEY,
user_id=TWITTER_ID,
screen_name=TWITTER_HANDLE,
)
# validate
parties = smv_storage.get_parties()
assert len(parties) == 1
party = parties[0]
assert party["twitter_handle"] == TWITTER_HANDLE
assert party["party_id"] == PUB_KEY
assert party["twitter_user_id"] == TWITTER_ID
assert party["created"] == START_TIME_EPOCH
assert party["last_modified"] == START_TIME_EPOCH
@pytest.mark.skipif_no_mongodb
def test_dates_after_update(smv_storage: SMVStorage):
setup_parties_collection(
smv_storage,
[],
)
#
# Create
#
# note:
# - each call to get date-time will tick global time by 15seconds
# - tz_offset - make sure it works with a random timezone
with freeze_time(START_TIME, tz_offset=-10, auto_tick_seconds=15):
smv_storage.upsert_verified_party(
pub_key=PUB_KEY,
user_id=TWITTER_ID,
screen_name=TWITTER_HANDLE,
)
parties = smv_storage.get_parties()
assert len(parties) == 1
party = parties[0]
assert party["created"] == START_TIME_EPOCH
assert party["last_modified"] == START_TIME_EPOCH
#
# Update
#
# note:
# - each call to get date-time will tick global time by 15seconds
# - tz_offset - make sure it works with a random timezone
with freeze_time(
START_TIME + timedelta(seconds=11), tz_offset=-10, auto_tick_seconds=15
):
smv_storage.upsert_verified_party(
pub_key=PUB_KEY,
user_id=TWITTER_ID,
screen_name=TWITTER_HANDLE,
)
parties = smv_storage.get_parties()
assert len(parties) == 1
party = parties[0]
assert party["created"] == START_TIME_EPOCH
assert party["last_modified"] == START_TIME_EPOCH + 11
| 29.073684
| 79
| 0.678856
|
80476526f7debf041ec2ef845aae6e9249da336a
| 4,055
|
py
|
Python
|
pypy/module/__builtin__/app_inspect.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 333
|
2015-08-08T18:03:38.000Z
|
2022-03-22T18:13:12.000Z
|
pypy/module/__builtin__/app_inspect.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 7
|
2020-02-16T16:49:05.000Z
|
2021-11-26T09:00:56.000Z
|
pypy/module/__builtin__/app_inspect.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
"""
Plain Python definition of the builtin functions related to run-time
program introspection.
"""
import sys
from __pypy__ import lookup_special
def _caller_locals():
return sys._getframe(0).f_locals
def vars(*obj):
"""Return a dictionary of all the attributes currently bound in obj. If
called with no argument, return the variables bound in local scope."""
if len(obj) == 0:
return _caller_locals()
elif len(obj) != 1:
raise TypeError("vars() takes at most 1 argument.")
try:
return obj[0].__dict__
except AttributeError:
raise TypeError("vars() argument must have __dict__ attribute")
# These are defined in the types module, but we cannot always import it.
# virtualenv when run with -S for instance. Instead, copy the code to create
# the needed types to be checked.
class types(object):
class _C:
def _m(self): pass
ModuleType = type(sys)
ClassType = type(_C)
TypeType = type
_x = _C()
InstanceType = type(_x)
def dir(*args):
"""dir([object]) -> list of strings
Return an alphabetized list of names comprising (some of) the attributes
of the given object, and of attributes reachable from it:
No argument: the names in the current scope.
Module object: the module attributes.
Type or class object: its attributes, and recursively the attributes of
its bases.
Otherwise: its attributes, its class's attributes, and recursively the
attributes of its class's base classes.
"""
if len(args) > 1:
raise TypeError("dir expected at most 1 arguments, got %d" % len(args))
if len(args) == 0:
local_names = _caller_locals().keys() # 2 stackframes away
if not isinstance(local_names, list):
raise TypeError("expected locals().keys() to be a list")
local_names.sort()
return local_names
# import types
obj = args[0]
if isinstance(obj, types.InstanceType):
dir_meth = getattr(obj, '__dir__', None)
else:
dir_meth = lookup_special(obj, '__dir__')
if dir_meth is not None:
names = dir_meth()
if not isinstance(names, list):
raise TypeError("__dir__() must return a list, not %r" % (
type(names),))
names.sort()
return names
# From here, this is python2-specific since in python3
# everything has a __dir__
elif isinstance(obj, types.ModuleType):
try:
return sorted(obj.__dict__)
except AttributeError:
return []
elif isinstance(obj, (types.TypeType, types.ClassType)):
# Don't look at __class__, as metaclass methods would be confusing.
return sorted(_classdir(obj))
else:
names = set()
ns = getattr(obj, '__dict__', None)
if isinstance(ns, dict):
names.update(ns)
klass = getattr(obj, '__class__', None)
if klass is not None:
names.update(_classdir(klass))
## Comment from object.c:
## /* Merge in __members__ and __methods__ (if any).
## XXX Would like this to go away someday; for now, it's
## XXX needed to get at im_self etc of method objects. */
for attr in '__members__', '__methods__':
l = getattr(obj, attr, None)
if not isinstance(l, list):
continue
names.extend(item for item in l if isinstance(item, str))
return sorted(names)
def _classdir(klass):
"""Return a set of the accessible attributes of class/type klass.
This includes all attributes of klass and all of the base classes
recursively.
"""
names = set()
ns = getattr(klass, '__dict__', None)
if ns is not None:
names.update(ns)
bases = getattr(klass, '__bases__', None)
if bases is not None:
# Note that since we are only interested in the keys, the order
# we merge classes is unimportant
for base in bases:
names.update(_classdir(base))
return names
| 33.512397
| 79
| 0.631813
|
1e5f7509809284edb7aa6a8c63753d15a7aa6a66
| 4,110
|
py
|
Python
|
TRMM/trmm_embrace_diurnal_mean_bit_above_western_ghats.py
|
peterwilletts24/Python-Scripts
|
975d6b2e2923cbde40d2760eb9574acee2e10388
|
[
"MIT"
] | 4
|
2017-05-24T09:14:14.000Z
|
2019-01-02T19:20:38.000Z
|
TRMM/trmm_embrace_diurnal_mean_bit_above_western_ghats.py
|
peterwilletts24/Python-Scripts
|
975d6b2e2923cbde40d2760eb9574acee2e10388
|
[
"MIT"
] | null | null | null |
TRMM/trmm_embrace_diurnal_mean_bit_above_western_ghats.py
|
peterwilletts24/Python-Scripts
|
975d6b2e2923cbde40d2760eb9574acee2e10388
|
[
"MIT"
] | 3
|
2017-05-24T09:14:15.000Z
|
2020-09-28T08:32:02.000Z
|
import cPickle as pickle
import numpy as np
from collections import defaultdict
from netCDF4 import Dataset
from scipy.interpolate import griddata
lon_high = 71
lon_low = 67
lat_high= 28
lat_low=20
#lon_high = 116
#lon_low = 30.5
#lat_high= 40
#lat_low=-11.25
pcp_dom, longitude_dom, latitude_dom, time_dom, time_hour = pickle.load(open('/nfs/a90/eepdw/Data/Saved_data/TRMM/trmm_emb_time_update_large.p', 'rb'))
# Load land sea mask. TRMM land sea mask is in % of water coverage so 100% is all water
nc = Dataset('/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/TMPA_mask.nc')
# Regrid lsm to data grid (offset b 0.125 degrees
lsm_lons, lsm_lats = np.meshgrid(nc.variables['lon'][:],nc.variables['lat'][:])
lons_data, lats_data = np.meshgrid(longitude_dom[0], latitude_dom[0])
lsm_regrid = griddata((lsm_lats.flatten(), lsm_lons.flatten()), nc.variables['landseamask'][:].flatten(), (lats_data,lons_data), method='linear')
# Get min and max index positions for latitude and longitude - FOR LSM
# Get min and max index positions for latitude and longitude - FOR PCP
la_index_pcp = np.where((latitude_dom[0]<=lat_high) & (latitude_dom[0] >= lat_low))
lo_index_pcp = np.where((longitude_dom[0]<=lon_high) & (longitude_dom[0] >= lon_low))
la_i_max_pcp = np.max(la_index_pcp)
la_i_min_pcp = np.min(la_index_pcp)
lo_i_max_pcp = np.max(lo_index_pcp)
lo_i_min_pcp = np.min(lo_index_pcp)
print la_i_min_pcp,la_i_max_pcp, lo_i_min_pcp,lo_i_max_pcp
pcp_dom_2 = pcp_dom[:,la_i_min_pcp:la_i_max_pcp, lo_i_min_pcp:lo_i_max_pcp]
lsm= lsm_regrid[la_i_min_pcp:la_i_max_pcp, lo_i_min_pcp:lo_i_max_pcp]
print pcp_dom.shape
print pcp_dom_2.shape
print lsm.shape
####################################################
# Calculate mean for every time in the date range for entire area
mean_of_each_time = pcp_dom_2.reshape((pcp_dom_2.shape[0], -1)).mean(axis=1)
print pcp_dom_2.reshape(pcp_dom_2.shape[0], -1).shape
mean_and_hour=zip(mean_of_each_time,time_hour)
# OCEAN - Calculate mean for every time in the date range
lsm_weights=lsm/100
print pcp_dom_2.reshape(pcp_dom_2.shape[0], -1).shape
print lsm_weights.flatten().shape
mean_oc = np.ma.average(pcp_dom_2.reshape(pcp_dom_2.shape[0], -1), axis=1, weights=lsm_weights.flatten())
oc_mean_and_hour=zip(mean_oc,time_hour)
# LAND - Calculate mean for every time in the date range
lsm_weights=1-(lsm/100)
mean_la = np.ma.average(pcp_dom_2.reshape(pcp_dom_2.shape[0], -1), weights=lsm_weights.flatten(), axis=1)
la_mean_and_hour=zip(mean_la,time_hour)
#####################################################
# Sort into time of day #################
# Total
i = defaultdict(list)
for v,k in mean_and_hour:
i[k.strip()].append(v)
mean=[]
hour=[]
# Average for each time of day
for q,a in enumerate(i.items()):
#print a[1]
if a[1]:
mean.append(np.mean(a[1]))
hour.append(a[0])
print mean
print hour
# Land
i = defaultdict(list)
for v,k in la_mean_and_hour:
i[k.strip()].append(v)
mean_l=[]
hour_l=[]
# Average for each time of day
for q,a in enumerate(i.items()):
#print a[1]
if a[1]:
mean_l.append(np.mean(a[1]))
hour_l.append(a[0])
print mean_l
print hour_l
# Ocean
i = defaultdict(list)
for v,k in oc_mean_and_hour:
i[k.strip()].append(v)
mean_o=[]
hour_o=[]
# Average for each time of day
for q,a in enumerate(i.items()):
#print a[1]
if a[1]:
mean_o.append(np.mean(a[1]))
hour_o.append(a[0])
print mean_o
print hour_o
# Save
np.savez("/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/total_trmm_diurnal_average_lat_%s_%s_lon_%s_%s_bit_above_western_ghats" % (lat_low,lat_high, lon_low, lon_high), mean=mean, hour=hour )
np.savez("/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/sea_trmm_diurnal_average_lat_%s_%s_lon_%s_%s_bit_above_western_ghats" % (lat_low,lat_high, lon_low, lon_high), mean=mean_o, hour=hour_o )
np.savez("/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/land_trmm_diurnal_average_lat_%s_%s_lon_%s_%s_bit_above_western_ghats" % (lat_low,lat_high, lon_low, lon_high), mean=mean_l, hour=hour_l )
| 27.4
| 204
| 0.717518
|
1cba168dc84ddb4447727536b922d1d547f11964
| 9,033
|
py
|
Python
|
pineboolib/application/database/pnsqlsavepoint.py
|
deavid/pineboo
|
acc96ab6d5b8bb182990af6dea4bf0986af15549
|
[
"MIT"
] | 2
|
2015-09-19T16:54:49.000Z
|
2016-09-12T08:06:29.000Z
|
pineboolib/application/database/pnsqlsavepoint.py
|
deavid/pineboo
|
acc96ab6d5b8bb182990af6dea4bf0986af15549
|
[
"MIT"
] | 1
|
2017-08-14T17:07:14.000Z
|
2017-08-15T00:22:47.000Z
|
pineboolib/application/database/pnsqlsavepoint.py
|
deavid/pineboo
|
acc96ab6d5b8bb182990af6dea4bf0986af15549
|
[
"MIT"
] | 9
|
2015-01-15T18:15:42.000Z
|
2019-05-05T18:53:00.000Z
|
# -*- coding: utf-8 -*-
"""
Module for PNSqlSavePoint class.
"""
from pineboolib.core import decorators
from typing import Any, List, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from .pnsqlcursor import PNSqlCursor
from .pnbuffer import PNBuffer
class OpInfo:
"""
OpInfo Class.
Information about an operation.
The information of an operation is; the primary key,
operation performed (0 = insert, 1 = edit, 2 = delete),
buffer with the contents of the record affected by the operation,
position of the current cursor record,
cursor order, cursor filter, cursor name (from the table),
associated cursor.
"""
primaryKey: str
op: int
buffer: "PNBuffer"
at: int
sort: str
filter: str
name: str
cursor: "PNSqlCursor"
autoDelete_: bool
def __init__(self, *args, **kwargs) -> None:
"""Initialize a virtual save point."""
if len(args) > 0:
self.opInfo(*args)
self.setAutoDelete(False)
def opInfo(
self, pK: str, o: Any, b: "PNBuffer", a: int, s: str, f: str, n: str, c: "PNSqlCursor"
) -> None:
"""
Save initialization values.
@param pK. primaryKey.
@param o. option (1,2,3)
@param b. PNBuffer
@param a. cursor postition.
@param s. sort.
@param f. filter.
@param n. cursor name.
@param c. cursor object.
"""
self.primaryKey = pK
self.op = o
self.buffer = b
self.at = a
self.sort = s
self.filter = f
self.name = n
self.cursor = c
def setAutoDelete(self, b: bool) -> None:
"""I specify if I do autoDelete when closing."""
self.autoDelete_ = b
class PNSqlSavePoint:
"""
PNSqlSavePoint Class.
Safeguard point of a set of basic operations about cursors (insert, edit and delete).
Through this class you can save a group of basic operations
about cursors (insert, edit and delete).
Undo a safeguard point, means that all operations
stored are canceled by performing the necessary actions so that
They have no effect.
For proper operation you must keep the buffer's (QSqlRecord)
with the content of the records to be modified or modified by an operation,
indicating the name of the primary key and the cursor to which it belongs.
"""
"""
Pila para almacenar informacion de las operaciones.
"""
opInfos: List[OpInfo] = []
"""
Identificador del punto de salvaguarda
"""
id_: int
countRefSavePoint = 0
def __init__(self, _id=None) -> None:
"""
Initialize the safeguard point.
@param id SavePoint identifier.
"""
self.opInfos.append(OpInfo())
self.opInfos[0].setAutoDelete(True)
self.id_ = _id
self.countRefSavePoint = self.countRefSavePoint + 1
def __del__(self) -> None:
"""Process when the savePoint point is destroyed."""
if self.opInfos:
self.opInfos = []
self.countRefSavePoint = self.countRefSavePoint - 1
def setId(self, id_: int) -> None:
"""
Set the SavePoint identifier.
@param id_. Identifier
"""
self.id_ = id_
def id(self) -> int:
"""
Return the identifier.
@return identifier.
"""
return self.id_
def clear(self) -> None:
"""
Clean the safeguard point.
All stored operations are deleted, therefore, after invoke this method can no longer be undone.
"""
self.opInfos.clear()
@decorators.BetaImplementation
def undo(self) -> None:
"""
Undo the SavePoint.
"""
while self.opInfos:
opInf = self.opInfos.pop()
if opInf.op == 0:
self.undoInsert(opInf)
if opInf.op == 1:
self.undoEdit(opInf)
if opInf.op == 2:
self.undoDel(opInf)
del opInf
self.clear()
@decorators.BetaImplementation
def saveInsert(
self, primaryKey: str, buffer: Optional["PNBuffer"], cursor: Optional["PNSqlCursor"]
) -> None:
"""
Save the buffer with the contents of the inserted record.
@param primaryKey Name of the field that is primary key.
@param buffer buffer with the contents of the record.
@param cursor Cursor associated.
"""
if not cursor or not buffer:
return
self.opInfos.append(
OpInfo(
primaryKey,
0,
buffer,
cursor.at(),
cursor.sort(),
cursor.filter(),
cursor.name,
cursor,
)
)
def saveEdit(
self, primaryKey: str, buffer: Optional["PNBuffer"], cursor: Optional["PNSqlCursor"]
) -> None:
"""
Save the buffer with the contents of the record to be edited.
@param primaryKey Name of the field that is primary key.
@param buffer buffer with the contents of the record.
@param cursor Cursor associated.
"""
if not cursor or not buffer:
return
self.opInfos.append(
OpInfo(
primaryKey,
1,
buffer,
cursor.at(),
cursor.sort(),
cursor.filter(),
cursor.name,
cursor,
)
)
@decorators.BetaImplementation
def saveDel(
self, primaryKey: str, buffer: Optional["PNBuffer"], cursor: Optional["PNSqlCursor"]
) -> None:
"""
Save the buffer with the contents of the record to be deleted.
@param primaryKey Name of the field that is primary key.
@param buffer buffer with the contents of the record.
@param cursor Cursor associated.
"""
if not cursor or not buffer:
return
self.opInfos.append(
OpInfo(
primaryKey,
2,
buffer,
cursor.at(),
cursor.sort(),
cursor.filter(),
cursor.name,
cursor,
)
)
@decorators.BetaImplementation
def undoInsert(self, opInf: OpInfo) -> None:
"""
Undo an insert operation.
@param opInf Operation information.
"""
cursor_ = opInf.cursor
owner = False
if not cursor_:
from . import pnsqlcursor
cursor_ = pnsqlcursor.PNSqlCursor(opInf.name)
cursor_.setForwardOnly(True)
owner = True
if not cursor_:
return
if opInf.buffer.indexField(opInf.primaryKey) and not opInf.buffer.isNull(opInf.primaryKey):
valuePrimaryKey = str(
opInf.buffer.value(opInf.primaryKey)
) # FIXME: (deavid) plz add notes on what needs to be fixed here.
ok = cursor_.select(opInf.primaryKey + "='" + valuePrimaryKey + "'")
if ok and cursor_.next():
cursor_.primeDelete()
if not owner:
cursor_.select(opInf.filter, opInf.sort)
cursor_.seek(opInf.at)
@decorators.BetaImplementation
def undoEdit(self, opInf: OpInfo) -> None:
"""
Undo an edit operation.
@param opInf Operation information.
"""
cursor_ = opInf.cursor
owner = False
if not cursor_:
from . import pnsqlcursor
cursor_ = pnsqlcursor.PNSqlCursor(opInf.name)
cursor_.setForwardOnly(True)
owner = True
if not cursor_:
return
valuePrimaryKey = str(opInf.buffer.value(opInf.primaryKey))
ok = cursor_.select(opInf.primaryKey + "='" + valuePrimaryKey + "'")
if ok and cursor_.next():
# buf = cursor_.primeUpdate()
# buf = opInf.buffer
cursor_.primeUpdate()
cursor_.update()
if not owner:
cursor_.select(opInf.filter, opInf.sort)
cursor_.seek(opInf.at)
else:
del cursor_
@decorators.BetaImplementation
def undoDel(self, opInf: OpInfo) -> None:
"""
Undo an delete operation.
@param opInf Operation information.
"""
cursor_ = opInf.cursor
owner = False
if not cursor_:
from . import pnsqlcursor
cursor_ = pnsqlcursor.PNSqlCursor(opInf.name)
cursor_.setForwardOnly(True)
owner = True
if not cursor_:
return
# buf = cursor_.primeInsert()
# buf = opInf.buffer
cursor_.primeInsert()
cursor_.insert()
if not owner:
cursor_.select(opInf.filter, opInf.sort)
cursor_.seek(opInf.at)
else:
del cursor_
| 26.335277
| 103
| 0.552972
|
0897edd60213c7028faf51cf5dad4953202a7b41
| 1,535
|
py
|
Python
|
py2neo/movies/models.py
|
srlabUsask/py2neo
|
80d3cf1ab0b4cfb03b7824fd7a407b33c95a1e8f
|
[
"Apache-2.0"
] | null | null | null |
py2neo/movies/models.py
|
srlabUsask/py2neo
|
80d3cf1ab0b4cfb03b7824fd7a407b33c95a1e8f
|
[
"Apache-2.0"
] | null | null | null |
py2neo/movies/models.py
|
srlabUsask/py2neo
|
80d3cf1ab0b4cfb03b7824fd7a407b33c95a1e8f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2020, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo.ogm import GraphObject, Property, RelatedTo, RelatedFrom
class Movie(GraphObject):
__primarykey__ = "title"
title = Property()
tagline = Property()
released = Property()
actors = RelatedFrom("Person", "ACTED_IN")
directors = RelatedFrom("Person", "DIRECTED")
producers = RelatedFrom("Person", "PRODUCED")
writers = RelatedFrom("Person", "WROTE")
reviewers = RelatedFrom("Person", "REVIEWED")
def __lt__(self, other):
return self.title < other.title
class Person(GraphObject):
__primarykey__ = "name"
name = Property()
born = Property()
acted_in = RelatedTo(Movie)
directed = RelatedTo(Movie)
produced = RelatedTo(Movie)
wrote = RelatedTo(Movie)
reviewed = RelatedTo(Movie)
def __init__(self, name=None):
self.name = name
def __lt__(self, other):
return self.name < other.name
| 27.410714
| 74
| 0.69316
|
6f47c01fcbf94689d771ef38297c286c8c1dc500
| 5,665
|
py
|
Python
|
test/functional/combine_logs.py
|
AzusNodes/AZUS
|
758d33a9d8d967080e1eeca6947886412523d5cc
|
[
"MIT"
] | 1
|
2021-05-03T13:39:25.000Z
|
2021-05-03T13:39:25.000Z
|
test/functional/combine_logs.py
|
AzusNodes/AZUS
|
758d33a9d8d967080e1eeca6947886412523d5cc
|
[
"MIT"
] | null | null | null |
test/functional/combine_logs.py
|
AzusNodes/AZUS
|
758d33a9d8d967080e1eeca6947886412523d5cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Combine logs from multiple bitcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile.
If no argument is provided, the most recent test directory will be used."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
import tempfile
# N.B.: don't import any local modules here - this script must remain executable
# without the parent module installed.
# Should match same symbol in `test_framework.test_framework`.
TMPDIR_PREFIX = "azus_func_test_"
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{6}")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'testdir', nargs='?', default='',
help=('temporary test directory to combine logs from. '
'Defaults to the most recent'))
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args = parser.parse_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
testdir = args.testdir or find_latest_test_dir()
if not args.testdir:
print("Opening latest test directory: {}".format(testdir), file=sys.stderr)
log_events = read_logs(testdir)
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def find_latest_test_dir():
"""Returns the latest tmpfile test directory prefix."""
tmpdir = tempfile.gettempdir()
def join_tmp(basename):
return os.path.join(tmpdir, basename)
def is_valid_test_tmpdir(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
)
testdir_paths = [
join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name)
]
return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| 37.269737
| 196
| 0.630009
|
bb55ef547b3e5531bbccb478ef7ba69f7dff279f
| 48,638
|
py
|
Python
|
filebeat/tests/system/test_registrar.py
|
tetianakravchenko/beats
|
6aec024e0ab8239791be20885d6d3c58697d18cd
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-01-17T17:31:41.000Z
|
2022-01-17T17:31:41.000Z
|
filebeat/tests/system/test_registrar.py
|
tetianakravchenko/beats
|
6aec024e0ab8239791be20885d6d3c58697d18cd
|
[
"ECL-2.0",
"Apache-2.0"
] | 26
|
2021-11-04T11:17:36.000Z
|
2022-02-16T11:55:30.000Z
|
filebeat/tests/system/test_registrar.py
|
tetianakravchenko/beats
|
6aec024e0ab8239791be20885d6d3c58697d18cd
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2019-08-05T07:42:54.000Z
|
2020-09-02T14:23:19.000Z
|
#!/usr/bin/env python3
"""Test the registrar"""
import os
import platform
import re
import shutil
import stat
import time
import unittest
from filebeat import BaseTest
# Additional tests: to be implemented
# * Check if registrar file can be configured, set config param
# * Check "updating" of registrar file
# * Check what happens when registrar file is deleted
class Test(BaseTest):
"""Test class"""
def test_registrar_file_content(self):
"""Check if registrar file is created correctly and content is as expected
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
# Use \n as line terminator on all platforms per docs.
line = "hello world\n"
line_len = len(line) - 1 + len(os.linesep)
iterations = 5
testfile_path = self.working_dir + "/log/test.log"
testfile = open(testfile_path, 'w')
testfile.write(iterations * line)
testfile.close()
filebeat = self.start_beat()
count = self.log_contains_count("Registry file updated")
self.wait_until(
lambda: self.output_has(lines=5),
max_timeout=15)
# Make sure states written appears one more time
self.wait_until(
lambda: self.log_contains("Registry file updated") > count,
max_timeout=10)
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(self.has_registry, max_timeout=1)
filebeat.check_kill_and_wait()
# Check that a single file exists in the registry.
data = self.get_registry()
assert len(data) == 1
logfile_abs_path = os.path.abspath(testfile_path)
record = self.get_registry_entry_by_path(logfile_abs_path)
self.assertEqual(logfile_abs_path, record.get('source'))
self.assertEqual(iterations * line_len, record.get('offset'))
self.assertTrue("FileStateOS" in record)
self.assertTrue("meta" not in record)
file_state_os = record["FileStateOS"]
if os.name == "nt":
# Windows checks
# TODO: Check for IdxHi, IdxLo, Vol in FileStateOS on Windows.
self.assertEqual(len(file_state_os), 3)
elif platform.system() == "SunOS":
stat = os.stat(logfile_abs_path)
self.assertEqual(file_state_os["inode"], stat.st_ino)
# Python does not return the same st_dev value as Golang or the
# command line stat tool so just check that it's present.
self.assertTrue("device" in file_state_os)
else:
stat = os.stat(logfile_abs_path)
self.assertEqual(stat.st_ino, file_state_os.get('inode'))
self.assertEqual(stat.st_dev, file_state_os.get('device'))
def test_registrar_files(self):
"""
Check that multiple files are put into registrar file
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
testfile_path1 = self.working_dir + "/log/test1.log"
testfile_path2 = self.working_dir + "/log/test2.log"
file1 = open(testfile_path1, 'w')
file2 = open(testfile_path2, 'w')
iterations = 5
for _ in range(0, iterations):
file1.write("hello world") # 11 chars
file1.write("\n") # 1 char
file2.write("goodbye world") # 11 chars
file2.write("\n") # 1 char
file1.close()
file2.close()
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=10),
max_timeout=15)
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(self.has_registry, max_timeout=1)
filebeat.check_kill_and_wait()
# Check that file exist
data = self.get_registry()
# Check that 2 files are port of the registrar file
assert len(data) == 2
def test_custom_registry_file_location(self):
"""
Check that when a custom registry file is used, the path
is created automatically.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
registry_home="a/b/c/registry",
)
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/test.log"
with open(testfile_path, 'w') as testfile:
testfile.write("hello world\n")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(
lambda: self.has_registry("a/b/c/registry/filebeat"),
max_timeout=1)
filebeat.check_kill_and_wait()
assert self.has_registry("a/b/c/registry/filebeat")
def test_registry_file_default_permissions(self):
"""
Test that filebeat default registry permission is set
"""
if os.name == "nt":
# This test is currently skipped on windows because file permission
# configuration isn't implemented on Windows yet
raise unittest.SkipTest
registry_home = "a/b/c/registry"
registry_path = os.path.join(registry_home, "filebeat")
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
registry_home=registry_home,
)
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/test.log"
with open(testfile_path, 'w') as testfile:
testfile.write("hello world\n")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(
lambda: self.has_registry(registry_path),
max_timeout=1)
filebeat.check_kill_and_wait()
self.assertEqual(self.file_permissions(os.path.join(registry_path, "log.json")), "0o600")
def test_registry_file_custom_permissions(self):
"""
Test that filebeat registry permission is set as per configuration
"""
if os.name == "nt":
# This test is currently skipped on windows because file permission
# configuration isn't implemented on Windows yet
raise unittest.SkipTest
registry_home = "a/b/c/registry"
registry_path = os.path.join(registry_home, "filebeat")
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
registry_home=registry_home,
registry_file_permissions=0o640,
)
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/test.log"
with open(testfile_path, 'w') as testfile:
testfile.write("hello world\n")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(
lambda: self.has_registry(registry_path),
max_timeout=1)
filebeat.check_kill_and_wait()
self.assertEqual(self.file_permissions(os.path.join(registry_path, "log.json")), "0o640")
def test_registry_file_update_permissions(self):
"""
Test that filebeat registry permission is updated along with configuration
"""
if os.name == "nt":
# This test is currently skipped on windows because file permission
# configuration isn't implemented on Windows yet
raise unittest.SkipTest
registry_home = "a/b/c/registry_x"
registry_path = os.path.join(registry_home, "filebeat")
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
registry_home=registry_home,
)
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/test.log"
with open(testfile_path, 'w') as testfile:
testfile.write("hello world\n")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(
lambda: self.has_registry(registry_path),
max_timeout=1)
filebeat.check_kill_and_wait()
self.assertEqual(self.file_permissions(os.path.join(registry_path, "log.json")), "0o600")
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
registry_home="a/b/c/registry_x",
registry_file_permissions=0o640
)
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(
lambda: self.has_registry(registry_path),
max_timeout=1)
# Wait a moment to make sure registry is completely written
time.sleep(1)
filebeat.check_kill_and_wait()
self.assertEqual(self.file_permissions(os.path.join(registry_path, "log.json")), "0o640")
@unittest.skipIf(platform.system() == 'Darwin' or os.name == 'nt',
'Flaky test: https://github.com/elastic/beats/issues/26378')
def test_rotating_file(self):
"""
Checks that the registry is properly updated after a file is rotated
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
close_inactive="1s"
)
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
with open(testfile_path, 'w') as testfile:
testfile.write("offset 9\n")
self.wait_until(lambda: self.output_has(lines=1),
max_timeout=10)
testfilerenamed = self.working_dir + "/log/test.1.log"
os.rename(testfile_path, testfilerenamed)
with open(testfile_path, 'w') as testfile:
testfile.write("offset 10\n")
self.wait_until(lambda: self.output_has(lines=2),
max_timeout=10)
# Wait until rotation is detected
self.wait_until(
lambda: self.log_contains(
"Updating state for renamed file"),
max_timeout=10)
time.sleep(1)
filebeat.check_kill_and_wait()
# Check that file exist
data = self.get_registry()
# Make sure the offsets are correctly set
if os.name == "nt":
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path))["offset"] == 11
assert self.get_registry_entry_by_path(os.path.abspath(testfilerenamed))["offset"] == 10
else:
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path))["offset"] == 10
assert self.get_registry_entry_by_path(os.path.abspath(testfilerenamed))["offset"] == 9
# Check that 2 files are port of the registrar file
assert len(data) == 2
def test_data_path(self):
"""
Checks that the registry file is written in a custom data path.
"""
self.render_config_template(
path=self.working_dir + "/test.log",
path_data=self.working_dir + "/datapath",
skip_registry_config=True,
)
with open(self.working_dir + "/test.log", "w") as testfile:
testfile.write("test message\n")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=1))
filebeat.check_kill_and_wait()
assert self.has_registry(data_path=self.working_dir + "/datapath")
def test_rotating_file_inode(self):
"""
Check that inodes are properly written during file rotation
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
scan_frequency="1s",
close_inactive="1s",
clean_removed="false",
)
if os.name == "nt":
raise unittest.SkipTest
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/input"
filebeat = self.start_beat()
with open(testfile_path, 'w') as testfile:
testfile.write("entry1\n")
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
# Wait until rotation is detected
self.wait_until(
lambda: self.log_contains_count(
"Registry file updated. 1 active states") >= 1,
max_timeout=10)
data = self.get_registry()
assert os.stat(testfile_path).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfile_path))["FileStateOS"]["inode"]
testfilerenamed1 = self.working_dir + "/log/input.1"
os.rename(testfile_path, testfilerenamed1)
with open(testfile_path, 'w') as testfile:
testfile.write("entry2\n")
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
# Wait until rotation is detected
self.wait_until(
lambda: self.log_contains_count(
"Updating state for renamed file") == 1,
max_timeout=10)
time.sleep(1)
data = self.get_registry()
assert os.stat(testfile_path).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfile_path))["FileStateOS"]["inode"]
assert os.stat(testfilerenamed1).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfilerenamed1))["FileStateOS"]["inode"]
# Rotate log file, create a new empty one and remove it afterwards
testfilerenamed2 = self.working_dir + "/log/input.2"
os.rename(testfilerenamed1, testfilerenamed2)
os.rename(testfile_path, testfilerenamed1)
with open(testfile_path, 'w') as testfile:
testfile.write("")
os.remove(testfilerenamed2)
with open(testfile_path, 'w') as testfile:
testfile.write("entry3\n")
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Compare file inodes and the one in the registry
assert os.stat(testfile_path).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfile_path))["FileStateOS"]["inode"]
assert os.stat(testfilerenamed1).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfilerenamed1))["FileStateOS"]["inode"]
# Check that 3 files are part of the registrar file. The deleted file
# should never have been detected, but the rotated one should be in
assert len(data) == 3, "Expected 3 files but got: %s" % data
def test_restart_continue(self):
"""
Check that file reading continues after restart
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
scan_frequency="1s"
)
if os.name == "nt":
raise unittest.SkipTest
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/input"
filebeat = self.start_beat()
with open(testfile_path, 'w') as testfile:
testfile.write("entry1\n")
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
# Wait a moment to make sure registry is completely written
time.sleep(1)
assert os.stat(testfile_path).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfile_path))["FileStateOS"]["inode"]
filebeat.check_kill_and_wait()
# Store first registry file
registry_file = "registry/filebeat/log.json"
shutil.copyfile(
self.working_dir + "/" + registry_file,
self.working_dir + "/registry.first",
)
# Append file
with open(testfile_path, 'a') as testfile:
testfile.write("entry2\n")
filebeat = self.start_beat(output="filebeat2.log")
# Output file was rotated
self.wait_until(
lambda: self.output_has(lines=1, output_file="output/filebeat-" + self.today + ".ndjson"),
max_timeout=10)
self.wait_until(
lambda: self.output_has(lines=1, output_file="output/filebeat-" + self.today + "-1.ndjson"),
max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Compare file inodes and the one in the registry
assert os.stat(testfile_path).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfile_path))["FileStateOS"]["inode"]
# Check that 1 files are part of the registrar file. The deleted file
# should never have been detected
assert len(data) == 1
output = self.read_output(output_file="output/filebeat-" + self.today + "-1.ndjson")
# Check that output file has the same number of lines as the log file
assert len(output) == 1
assert output[0]["message"] == "entry2"
def test_rotating_file_with_restart(self):
"""
Check that inodes are properly written during file rotation and restart
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
scan_frequency="1s",
close_inactive="1s",
clean_removed="false"
)
if os.name == "nt":
raise unittest.SkipTest
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/input"
filebeat = self.start_beat()
with open(testfile_path, 'w') as testfile:
testfile.write("entry1\n")
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
# Wait a moment to make sure registry is completely written
time.sleep(1)
data = self.get_registry()
assert os.stat(testfile_path).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfile_path))["FileStateOS"]["inode"]
testfilerenamed1 = self.working_dir + "/log/input.1"
os.rename(testfile_path, testfilerenamed1)
with open(testfile_path, 'w') as testfile:
testfile.write("entry2\n")
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
# Wait until rotation is detected
self.wait_until(
lambda: self.log_contains(
"Updating state for renamed file"),
max_timeout=10)
# Wait a moment to make sure registry is completely written
time.sleep(1)
data = self.get_registry()
assert os.stat(testfile_path).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfile_path))["FileStateOS"]["inode"]
assert os.stat(testfilerenamed1).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfilerenamed1))["FileStateOS"]["inode"]
filebeat.check_kill_and_wait()
# Store first registry file
registry_file = "registry/filebeat/log.json"
shutil.copyfile(
self.working_dir + "/" + registry_file,
self.working_dir + "/registry.first",
)
# Rotate log file, create a new empty one and remove it afterwards
testfilerenamed2 = self.working_dir + "/log/input.2"
os.rename(testfilerenamed1, testfilerenamed2)
os.rename(testfile_path, testfilerenamed1)
with open(testfile_path, 'w') as testfile:
testfile.write("")
os.remove(testfilerenamed2)
with open(testfile_path, 'w') as testfile:
testfile.write("entry3\n")
filebeat = self.start_beat()
# Output file was rotated
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
self.wait_until(
lambda: self.output_has(lines=1, output_file="output/filebeat-" + self.today + "-1.ndjson"),
max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Compare file inodes and the one in the registry
assert os.stat(testfile_path).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfile_path))["FileStateOS"]["inode"]
assert os.stat(testfilerenamed1).st_ino == self.get_registry_entry_by_path(
os.path.abspath(testfilerenamed1))["FileStateOS"]["inode"]
# Check that 3 files are part of the registrar file. The deleted file
# should never have been detected, but the rotated one should be in
assert len(data) == 3
def test_state_after_rotation(self):
"""
Checks that the state is written correctly after rotation
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
ignore_older="2m",
scan_frequency="1s",
close_inactive="1s"
)
os.mkdir(self.working_dir + "/log/")
testfile_path1 = self.working_dir + "/log/input"
testfile_path2 = self.working_dir + "/log/input.1"
testfile_path3 = self.working_dir + "/log/input.2"
with open(testfile_path1, 'w') as testfile:
testfile.write("entry10\n")
with open(testfile_path2, 'w') as testfile:
testfile.write("entry0\n")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
# Wait a moment to make sure file exists
time.sleep(1)
self.get_registry()
# Check that offsets are correct
if os.name == "nt":
# Under windows offset is +1 because of additional newline char
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path1))["offset"] == 9
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path2))["offset"] == 8
else:
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path1))["offset"] == 8
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path2))["offset"] == 7
# Rotate files and remove old one
os.rename(testfile_path2, testfile_path3)
os.rename(testfile_path1, testfile_path2)
with open(testfile_path1, 'w') as testfile1:
testfile1.write("entry200\n")
# Remove file afterwards to make sure not inode reuse happens
os.remove(testfile_path3)
# Now wait until rotation is detected
self.wait_until(
lambda: self.log_contains(
"Updating state for renamed file"),
max_timeout=10)
self.wait_until(
lambda: self.log_contains_count(
"Registry file updated. 2 active states.") >= 1,
max_timeout=15)
time.sleep(1)
filebeat.kill_and_wait()
# Check that offsets are correct
if os.name == "nt":
# Under windows offset is +1 because of additional newline char
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path1))["offset"] == 10
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path2))["offset"] == 9
else:
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path1))["offset"] == 9
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path2))["offset"] == 8
def test_state_after_rotation_ignore_older(self):
"""
Checks that the state is written correctly after rotation and ignore older
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
ignore_older="2m",
scan_frequency="1s",
close_inactive="1s"
)
os.mkdir(self.working_dir + "/log/")
testfile_path1 = self.working_dir + "/log/input"
testfile_path2 = self.working_dir + "/log/input.1"
testfile_path3 = self.working_dir + "/log/input.2"
with open(testfile_path1, 'w') as testfile1:
testfile1.write("entry10\n")
with open(testfile_path2, 'w') as testfile2:
testfile2.write("entry0\n")
# Change modification time so file extends ignore_older
yesterday = time.time() - 3600 * 24
os.utime(testfile_path2, (yesterday, yesterday))
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
# Wait a moment to make sure file exists
time.sleep(1)
self.get_registry()
# Check that offsets are correct
if os.name == "nt":
# Under windows offset is +1 because of additional newline char
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path1))["offset"] == 9
else:
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path1))["offset"] == 8
# Rotate files and remove old one
os.rename(testfile_path2, testfile_path3)
os.rename(testfile_path1, testfile_path2)
with open(testfile_path1, 'w') as testfile1:
testfile1.write("entry200\n")
# Remove file afterwards to make sure not inode reuse happens
os.remove(testfile_path3)
# Now wait until rotation is detected
self.wait_until(
lambda: self.log_contains(
"Updating state for renamed file"),
max_timeout=10)
self.wait_until(
lambda: self.log_contains_count(
"Registry file updated. 2 active states.") >= 1,
max_timeout=15)
# Wait a moment to make sure registry is completely written
time.sleep(1)
filebeat.kill_and_wait()
# Check that offsets are correct
if os.name == "nt":
# Under windows offset is +1 because of additional newline char
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path1))["offset"] == 10
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path2))["offset"] == 9
else:
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path1))["offset"] == 9
assert self.get_registry_entry_by_path(os.path.abspath(testfile_path2))["offset"] == 8
@unittest.skipIf(os.name == 'nt' or platform.system() == "Darwin",
'flaky test https://github.com/elastic/beats/issues/8102')
def test_clean_inactive(self):
"""
Checks that states are properly removed after clean_inactive
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
clean_inactive="3s",
ignore_older="2s",
close_inactive="0.2s",
scan_frequency="0.1s"
)
file1 = "input1"
file2 = "input2"
file3 = "input3"
self.input_logs.write(file1, "first file\n")
self.input_logs.write(file2, "second file\n")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=2), max_timeout=10)
# Wait until registry file is created
self.wait_until(lambda: self.registry.exists(), max_timeout=15)
assert self.registry.count() == 2
# Wait until states are removed from inputs
self.wait_until(self.logs.nextCheck("State removed for", count=2), max_timeout=15)
# Write new file to make sure registrar is flushed again
self.input_logs.write(file3, "third file\n")
self.wait_until(lambda: self.output_has(lines=3), max_timeout=30)
# Wait until state of new file is removed
self.wait_until(self.logs.nextCheck("State removed for"), max_timeout=15)
filebeat.check_kill_and_wait()
# Check that the first two files were removed from the registry
data = self.registry.load()
assert len(data) == 1, "Expected a single file but got: %s" % data
# Make sure the last file in the registry is the correct one and has the correct offset
assert data[0]["offset"] == self.input_logs.size(file3)
@unittest.skipIf(os.name == 'nt', 'flaky test https://github.com/elastic/beats/issues/7690')
def test_clean_removed(self):
"""
Checks that files which were removed, the state is removed
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
scan_frequency="0.1s",
clean_removed=True,
close_removed=True
)
file1 = "input1"
file2 = "input2"
self.input_logs.write(file1, "file to be removed\n")
self.input_logs.write(file2, "2\n")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=2), max_timeout=10)
# Wait until registry file is created
self.wait_until(self.registry.exists)
# Wait until registry is updated
self.wait_until(lambda: self.registry.count() == 2)
self.input_logs.remove(file1)
# Wait until states are removed from inputs
self.wait_until(self.logs.check("Remove state for file as file removed"))
# Add one more line to make sure registry is written
self.input_logs.append(file2, "make sure registry is written\n")
self.wait_until(lambda: self.output_has(lines=3), max_timeout=10)
# Make sure all states are cleaned up
self.wait_until(lambda: self.registry.count() == 1)
filebeat.check_kill_and_wait()
# Make sure the last file in the registry is the correct one and has the correct offset
data = self.registry.load()
assert data[0]["offset"] == self.input_logs.size(file2)
@unittest.skipIf(os.name == 'nt', 'flaky test https://github.com/elastic/beats/issues/10606')
def test_clean_removed_with_clean_inactive(self):
"""
Checks that files which were removed, the state is removed
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
scan_frequency="0.1s",
clean_removed=True,
clean_inactive="60s",
ignore_older="15s",
close_removed=True
)
file1 = "input1"
file2 = "input2"
contents2 = [
"2\n",
"make sure registry is written\n",
]
self.input_logs.write(file1, "file to be removed\n")
self.input_logs.write(file2, contents2[0])
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=2), max_timeout=10)
# Wait until registry file is created
self.wait_until(
lambda: len(self.get_registry()) == 2, max_timeout=10)
self.input_logs.remove(file1)
# Wait until states are removed from inputs
self.wait_until(self.logs.nextCheck("Remove state for file as file removed"))
# Add one more line to make sure registry is written
self.input_logs.append(file2, contents2[1])
self.wait_until(lambda: self.output_has(lines=3))
# wait until next gc and until registry file has been updated
self.wait_until(self.logs.check("Before: 1, After: 1, Pending: 1"))
self.wait_until(
lambda: len(self.get_registry()) == 1, max_timeout=10)
filebeat.check_kill_and_wait()
# Check that the first two files were removed from the registry
data = self.registry.load()
assert len(data) == 1
# Make sure the last file in the registry is the correct one and has the correct offset
assert data[0]["offset"] == self.input_logs.size(file2)
def test_restart_state(self):
"""
Make sure that states are rewritten correctly on restart and cleaned
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
close_inactive="200ms",
ignore_older="2000ms",
)
init_files = ["test" + str(i) + ".log" for i in range(3)]
restart_files = ["test" + str(i + 3) + ".log" for i in range(1)]
for name in init_files:
self.input_logs.write(name, "Hello World\n")
filebeat = self.start_beat()
# Make sure states written appears one more time
self.wait_until(
self.logs.check("Ignore file because ignore_older"),
max_timeout=10)
filebeat.check_kill_and_wait()
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
close_inactive="200ms",
ignore_older="2000ms",
clean_inactive="3s",
)
filebeat = self.start_beat()
logs = self.log_access()
# Write additional file
for name in restart_files:
self.input_logs.write(name, "Hello World\n")
# Make sure all 4 states are persisted
self.wait_until(logs.nextCheck("input states cleaned up. Before: 4, After: 4"))
# Wait until registry file is cleaned
self.wait_until(logs.nextCheck("input states cleaned up. Before: 0, After: 0"))
filebeat.check_kill_and_wait()
def test_restart_state_reset(self):
"""
Test that ttl is set to -1 after restart and no inputs covering it
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
clean_inactive="10s",
ignore_older="5s"
)
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/test.log"
with open(testfile_path, 'w') as testfile:
testfile.write("Hello World\n")
filebeat = self.start_beat()
# Wait until state written
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=30)
filebeat.check_kill_and_wait()
# Check that ttl > 0 was set because of clean_inactive
data = self.get_registry()
assert len(data) == 1
assert data[0]["ttl"] > 0
# No config file which does not match the existing state
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test2.log",
clean_inactive="10s",
ignore_older="5s",
)
filebeat = self.start_beat(output="filebeat2.log")
# Wait until inputs are started
self.wait_until(
lambda: self.log_contains_count(
"Starting input", logfile="filebeat2.log") >= 1,
max_timeout=10)
filebeat.check_kill_and_wait()
# Check that ttl was reset correctly
data = self.get_registry()
assert len(data) == 1
assert data[0]["ttl"] == -2
def test_restart_state_reset_ttl(self):
"""
Test that ttl is reset after restart if clean_inactive changes
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
clean_inactive="20s",
ignore_older="15s"
)
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/test.log"
with open(testfile_path, 'w') as testfile:
testfile.write("Hello World\n")
filebeat = self.start_beat()
# Wait until state written
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=30)
self.wait_until(lambda: self.registry.count() == 1, max_timeout=10)
filebeat.check_kill_and_wait()
# Check that ttl > 0 was set because of clean_inactive
data = self.get_registry()
assert data[0]["ttl"] == 20 * 1000 * 1000 * 1000
# New config file which does not match the existing clean_inactive
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
clean_inactive="40s",
ignore_older="20s",
)
filebeat = self.start_beat(output="filebeat2.log")
# Wait until new state is written
self.wait_until(
lambda: self.log_contains("Registry file updated",
logfile="filebeat2.log"), max_timeout=10)
filebeat.check_kill_and_wait()
# Check that ttl was reset correctly
data = self.get_registry()
assert len(data) == 1
assert data[0]["ttl"] == 40 * 1000 * 1000 * 1000
def test_restart_state_reset_ttl_with_space(self):
"""
Test that ttl is reset after restart if clean_inactive changes
This time it is tested with a space in the filename to see if everything is loaded as
expected
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test file.log",
clean_inactive="20s",
ignore_older="15s"
)
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/test file.log"
with open(testfile_path, 'w') as testfile:
testfile.write("Hello World\n")
filebeat = self.start_beat()
# Wait until state written
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=30)
self.wait_until(lambda: self.registry.count() == 1)
filebeat.check_kill_and_wait()
# Check that ttl > 0 was set because of clean_inactive
data = self.get_registry()
assert data[0]["ttl"] == 20 * 1000 * 1000 * 1000
# new config file with other clean_inactive
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test file.log",
clean_inactive="40s",
ignore_older="5s",
)
filebeat = self.start_beat(output="filebeat2.log")
# Wait until new state is written
self.wait_until(
lambda: self.log_contains("Registry file updated",
logfile="filebeat2.log"), max_timeout=10)
filebeat.check_kill_and_wait()
# Check that ttl was reset correctly
data = self.get_registry()
assert len(data) == 1
assert data[0]["ttl"] == 40 * 1000 * 1000 * 1000
def test_restart_state_reset_ttl_no_clean_inactive(self):
"""
Test that ttl is reset after restart if clean_inactive is disabled
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
clean_inactive="10s",
ignore_older="5s"
)
os.mkdir(self.working_dir + "/log/")
testfile_path = self.working_dir + "/log/test.log"
with open(testfile_path, 'w') as testfile:
testfile.write("Hello World\n")
filebeat = self.start_beat()
# Wait until state written
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=30)
filebeat.check_kill_and_wait()
# Check that ttl > 0 was set because of clean_inactive
data = self.get_registry()
assert len(data) == 1
assert data[0]["ttl"] == 10 * 1000 * 1000 * 1000
# New config without clean_inactive
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
)
filebeat = self.start_beat(output="filebeat2.log")
# Wait until inputs are started
self.wait_until(
lambda: self.log_contains("Registry file updated",
logfile="filebeat2.log"), max_timeout=10)
filebeat.check_kill_and_wait()
# Check that ttl was reset correctly
data = self.get_registry()
assert len(data) == 1
assert data[0]["ttl"] == -1
def test_ignore_older_state(self):
"""
Check that state is also persisted for files falling under ignore_older on startup
without a previous state
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
close_inactive="1s",
ignore_older="1s",
)
os.mkdir(self.working_dir + "/log/")
testfile_path1 = self.working_dir + "/log/test.log"
with open(testfile_path1, 'w') as testfile1:
testfile1.write("Hello World\n")
time.sleep(1)
filebeat = self.start_beat()
# Make sure file falls under ignore_older
self.wait_until(
lambda: self.log_contains("Ignore file because ignore_older reached"),
max_timeout=10)
# Make sure state is loaded for file
self.wait_until(
lambda: self.log_contains("Before: 1, After: 1"),
max_timeout=10)
# Make sure state is written
self.wait_until(
lambda: len(self.get_registry()) == 1, max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
assert len(data) == 1
# Check that offset is set to the end of the file
assert data[0]["offset"] == os.path.getsize(testfile_path1)
@unittest.skipIf(platform.system() == 'Darwin', 'Flaky test: https://github.com/elastic/beats/issues/22407')
def test_ignore_older_state_clean_inactive(self):
"""
Check that state for ignore_older is not persisted when falling under clean_inactive
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
close_inactive="1s",
clean_inactive="2s",
ignore_older="1s",
)
os.mkdir(self.working_dir + "/log/")
testfile_path1 = self.working_dir + "/log/test.log"
with open(testfile_path1, 'w') as testfile1:
testfile1.write("Hello World\n")
time.sleep(2)
filebeat = self.start_beat()
# Make sure file falls under ignore_older
self.wait_until(
lambda: self.log_contains("Ignore file because ignore_older reached"),
max_timeout=10)
self.wait_until(
lambda: self.log_contains(
"Do not write state for ignore_older because clean_inactive reached"),
max_timeout=10)
# Make sure state is loaded for file
self.wait_until(
lambda: self.log_contains("Before: 0, After: 0"),
max_timeout=10)
# Make sure state is written
self.wait_until(
lambda: len(self.get_registry()) == 0, max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
assert len(data) == 0
def test_registrar_files_with_input_level_processors(self):
"""
Check that multiple files are put into registrar file with drop event processor
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
input_processors=[{
"drop_event": {},
}]
)
os.mkdir(self.working_dir + "/log/")
testfile_path1 = self.working_dir + "/log/test1.log"
testfile_path2 = self.working_dir + "/log/test2.log"
file1 = open(testfile_path1, 'w')
file2 = open(testfile_path2, 'w')
iterations = 5
for _ in range(0, iterations):
file1.write("hello world") # 11 chars
file1.write("\n") # 1 char
file2.write("goodbye world") # 11 chars
file2.write("\n") # 1 char
file1.close()
file2.close()
filebeat = self.start_beat()
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(
self.has_registry,
max_timeout=10)
# Wait a moment to make sure registry is completely written
time.sleep(2)
filebeat.check_kill_and_wait()
# Check that file exist
data = self.get_registry()
# Check that 2 files are port of the registrar file
assert len(data) == 2
logfile_abs_path = os.path.abspath(testfile_path1)
record = self.get_registry_entry_by_path(logfile_abs_path)
self.assertEqual(logfile_abs_path, record.get('source'))
self.assertEqual(iterations * (len("hello world") + len(os.linesep)), record.get('offset'))
self.assertTrue("FileStateOS" in record)
file_state_os = record["FileStateOS"]
if os.name == "nt":
# Windows checks
# TODO: Check for IdxHi, IdxLo, Vol in FileStateOS on Windows.
self.assertEqual(len(file_state_os), 3)
elif platform.system() == "SunOS":
stat = os.stat(logfile_abs_path)
self.assertEqual(file_state_os["inode"], stat.st_ino)
# Python does not return the same st_dev value as Golang or the
# command line stat tool so just check that it's present.
self.assertTrue("device" in file_state_os)
else:
stat = os.stat(logfile_abs_path)
self.assertEqual(stat.st_ino, file_state_os.get('inode'))
self.assertEqual(stat.st_dev, file_state_os.get('device'))
def test_registrar_meta(self):
"""
Check that multiple entries for the same file are on the registry when they have
different meta
"""
self.render_config_template(
type='container',
input_raw='''
paths: {path}
stream: stdout
- type: container
paths: {path}
stream: stderr
'''.format(path=os.path.abspath(self.working_dir) + "/log/*/*.log")
)
os.mkdir(self.working_dir + "/log/")
os.mkdir(self.working_dir + "/log/container_id")
testfile_path1 = self.working_dir + "/log/container_id/test.log"
with open(testfile_path1, 'w') as f:
for i in range(0, 10):
f.write('{"log":"hello\\n","stream":"stdout","time":"2018-04-13T13:39:57.924216596Z"}\n')
f.write('{"log":"hello\\n","stream":"stderr","time":"2018-04-13T13:39:57.924216596Z"}\n')
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=20),
max_timeout=15)
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(self.has_registry, max_timeout=1)
filebeat.check_kill_and_wait()
# Check registry contains 2 entries with meta
data = self.get_registry()
assert len(data) == 2
assert data[0]["source"] == data[1]["source"]
assert data[0]["meta"]["stream"] in ("stdout", "stderr")
assert data[1]["meta"]["stream"] in ("stdout", "stderr")
assert data[0]["meta"]["stream"] != data[1]["meta"]["stream"]
| 35.067051
| 112
| 0.608886
|
7e79f83009a80ea072e910c9f0ed15f6768fb670
| 3,123
|
py
|
Python
|
tests/functional/test_inc_push.py
|
kyocum/disdat-luigi
|
bd6a9733c053ebb114151e47ea18062f34b64000
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_inc_push.py
|
kyocum/disdat-luigi
|
bd6a9733c053ebb114151e47ea18062f34b64000
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_inc_push.py
|
kyocum/disdat-luigi
|
bd6a9733c053ebb114151e47ea18062f34b64000
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Test Incremental Push
Use API to create a bundle with some files
push to remote context
author: Kenneth Yocum
"""
import boto3
import luigi
import moto
import pytest
from disdatluigi.pipe import PipeTask
import disdat.api as api
import disdatluigi.api as dlapi
from tests.functional.common import TEST_CONTEXT
TEST_REMOTE = '__test_remote_context__'
TEST_BUCKET = 'test-bucket'
TEST_BUCKET_URL = "s3://{}".format(TEST_BUCKET)
class APush(PipeTask):
def pipe_requires(self):
self.set_bundle_name('a')
def pipe_run(self):
target = self.create_output_file('a.txt')
with target.open('w') as output:
output.write('Hi!')
return {'file': [target.path]}
class BPush(PipeTask):
n = luigi.IntParameter()
def pipe_requires(self):
self.set_bundle_name('b')
self.add_dependency('a', APush, params={})
def pipe_run(self, a):
target = self.create_output_file('b.txt')
a_path = a['file'][0]
with open(a_path) as f:
print(f.read())
with target.open('w') as output:
output.write(str(self.n))
return {'file': [target.path]}
class CPush(PipeTask):
n = luigi.IntParameter(default=2)
def pipe_requires(self):
self.set_bundle_name('c')
self.add_dependency('b', BPush, params={'n': self.n})
def pipe_run(self, b=None):
# Barf!
raise Exception
@moto.mock_s3
def test_add_with_treat_as_bundle():
api.delete_context(TEST_CONTEXT)
api.context(context_name=TEST_CONTEXT)
# Setup moto s3 resources
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
s3_resource.create_bucket(Bucket=TEST_BUCKET)
# Make sure bucket is empty
objects = s3_client.list_objects(Bucket=TEST_BUCKET)
assert 'Contents' not in objects, 'Bucket should be empty'
# Bind remote context
api.remote(TEST_CONTEXT, TEST_REMOTE, TEST_BUCKET_URL)
# Try to run the pipeline - should fail
try:
# Run test pipeline
dlapi.apply(TEST_CONTEXT, CPush, incremental_push=True)
except Exception as e:
pass
# Get objects from remote
objects = s3_client.list_objects(Bucket=TEST_BUCKET)
keys = [o['Key'] for o in objects['Contents']]
keys = [key.split('/')[-1] for key in keys]
# Make sure files exist in S3
for output_file in ['a.txt', 'b.txt']:
assert output_file in keys, 'Pipeline should have pushed file'
api.delete_context(TEST_CONTEXT)
if __name__ == '__main__':
pytest.main([__file__])
| 26.025
| 74
| 0.679795
|
d6801dc35e6011c2ee6b771c7c451c87014d8446
| 3,435
|
py
|
Python
|
test/functional/wallet_zapwallettxes.py
|
limitstory/pyeongtaekcoin
|
c77889d1ec25759b67fab17180f17eb8f96bbaa1
|
[
"MIT"
] | null | null | null |
test/functional/wallet_zapwallettxes.py
|
limitstory/pyeongtaekcoin
|
c77889d1ec25759b67fab17180f17eb8f96bbaa1
|
[
"MIT"
] | null | null | null |
test/functional/wallet_zapwallettxes.py
|
limitstory/pyeongtaekcoin
|
c77889d1ec25759b67fab17180f17eb8f96bbaa1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Pyeongtaekcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the zapwallettxes functionality.
- start two pyeongtaekcoind nodes
- create two transactions on node 0 - one is confirmed and one is unconfirmed.
- restart node 0 and verify that both the confirmed and the unconfirmed
transactions are still available.
- restart node 0 with zapwallettxes and persistmempool, and verify that both
the confirmed and the unconfirmed transactions are still available.
- restart node 0 with just zapwallettxes and verify that the confirmed
transactions are still available, but that the unconfirmed transaction has
been zapped.
"""
from test_framework.test_framework import PyeongtaekcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
wait_until,
)
class ZapWalletTXesTest (PyeongtaekcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(100)
self.sync_all()
# This transaction will be confirmed
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)
self.nodes[0].generate(1)
self.sync_all()
# This transaction will not be confirmed
txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
# Confirmed and unconfirmed transactions are now in the wallet.
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
self.stop_node(0)
self.start_node(0)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed
# transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
self.stop_node(0)
self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3)
self.nodes[0].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes, but not persistmempool.
# The unconfirmed transaction is zapped and is no longer in the wallet.
self.stop_node(0)
self.start_node(0, ["-zapwallettxes=2"])
# tx1 is still be available because it was confirmed
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
# This will raise an exception because the unconfirmed transaction has been zapped
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
if __name__ == '__main__':
ZapWalletTXesTest().main()
| 41.385542
| 112
| 0.710335
|
3e926540f2fcc774497cc0e11061b74ccac7798d
| 17,472
|
py
|
Python
|
tests/model/fvcom_test.py
|
noaa-ocs-modeling/thyme
|
98f036bd49f8f3bcfc13c0593cd887224d971ac5
|
[
"BSD-2-Clause"
] | 5
|
2019-07-09T15:18:52.000Z
|
2020-06-03T02:57:50.000Z
|
tests/model/fvcom_test.py
|
noaa-ocs-modeling/thyme
|
98f036bd49f8f3bcfc13c0593cd887224d971ac5
|
[
"BSD-2-Clause"
] | null | null | null |
tests/model/fvcom_test.py
|
noaa-ocs-modeling/thyme
|
98f036bd49f8f3bcfc13c0593cd887224d971ac5
|
[
"BSD-2-Clause"
] | 2
|
2019-10-10T09:54:47.000Z
|
2020-05-27T19:11:12.000Z
|
from collections import namedtuple
import numpy
import pytest
from thyme.model.fvcom import node_to_centroid
from thyme.model.fvcom import vertical_interpolation
VerticalValues = namedtuple(
'VerticalValues',
['u',
'v',
'h',
'zeta',
'siglay_centroid',
'num_nele',
'num_siglay',
'time_index',
'target_depth_default',
'target_depth_surface',
'target_depth_deep',
'expected_u_target_depth_default',
'expected_v_target_depth_default',
'expected_u_target_depth_surface',
'expected_v_target_depth_surface',
'expected_u_target_depth_deep',
'expected_v_target_depth_deep'])
@pytest.fixture
def vertical_values():
time_index = 0
num_nele = 7
num_siglay = 20
target_depth_default = 4.5
target_depth_surface = 0
target_depth_deep = 15
u = numpy.array(
[[
[-0.7951693, 0.1987104, 0.20282207, 0.22636837, 0.22338444, 0.22267905, 0.25851583],
[-0.79512405, 0.17984135, 0.18718982, 0.20845266, 0.20881361, 0.2097686, 0.24099022],
[-0.79499704, 0.11546519, 0.11235027, 0.12845743, 0.14677778, 0.17036788, 0.14718369],
[-0.7947852, -0.04345991, -0.0362343, -0.01943639, 0.03145187, 0.05482485, 0.01756552],
[-0.794485, -0.11175916, -0.10817654, -0.07648413, -0.05387694, -0.02363495, -0.07206099],
[-0.79409266, -0.15002064, -0.1458673, -0.11296615, -0.09673776, -0.07272214, -0.11500881],
[-0.7936041, -0.18279588, -0.17216434, -0.15122096, -0.13439055, -0.12422991, -0.15305918],
[-0.7930144, -0.22257489, -0.21475703, -0.19943172, -0.19317457, -0.18883257, -0.19185503],
[-0.7923182, -0.26589555, -0.26381484, -0.2563959, -0.2623669, -0.2418433, -0.22976391],
[-0.7915093, -0.29618508, -0.3025776, -0.30066454, -0.30951524, -0.2785821, -0.2603346],
[-0.62858844, -0.31844422, -0.3287656, -0.32818496, -0.34010696, -0.2991507, -0.28074494],
[-0.59509104, -0.3200005, -0.3282557, -0.33371437, -0.3424295, -0.3079574, -0.2808675],
[-0.5648844, -0.28674564, -0.29246253, -0.3045539, -0.3127165, -0.2805328, -0.26253417],
[-0.53362864, -0.2308568, -0.23583505, -0.25023454, -0.25649628, -0.2430087, -0.21772045],
[-0.49928024, -0.1515698, -0.17267239, -0.1813446, -0.19186072, -0.19516239, -0.16393802],
[-0.45988485, -0.13119768, -0.10612836, -0.11343177, -0.12980227, -0.1302748, -0.12355448],
[-0.41265842, -0.11103977, -0.09538705, -0.09509068, -0.11300715, -0.10742138, -0.10589793],
[-0.35249037, -0.09441119, -0.0877323, -0.08854523, -0.10422827, -0.09832109, -0.098252],
[-0.26714894, -0.08462795, -0.0790456, -0.07945603, -0.09364957, -0.08832049, -0.08927543],
[-0.10820818, -0.06875627, -0.06421904, -0.06397585, -0.07394486, -0.07045165, -0.07252091]
]])
v = numpy.array(
[[
[-0.6447422, 0.04272371, 0.03626542, 0.03629172, 0.02365562, 0.04447182, 0.02940587],
[-0.64464253, 0.04550566, 0.03864311, 0.03937801, 0.02540983, 0.04566612, 0.03169739],
[-0.6444702, 0.05464792, 0.05194416, 0.05265207, 0.03223358, 0.04813668, 0.0406685],
[-0.6442224, 0.05445727, 0.05883322, 0.04983669, 0.0348991, 0.03746793, 0.04016836],
[-0.64389604, 0.00415152, -0.00149629, -0.00077556, 0.01316911, 0.00495433, 0.01126673],
[-0.6434879, -0.04056849, -0.0399819, -0.02377908, -0.01824965, -0.0271148, -0.02322751],
[-0.64299417, -0.05177833, -0.05065423, -0.04175215, -0.03915307, -0.05457062, -0.0499136],
[-0.6424107, -0.0646683, -0.0626291, -0.05882293, -0.06086947, -0.07759117, -0.06596889],
[-0.64173275, -0.06379095, -0.06311549, -0.06392502, -0.07486805, -0.08554274, -0.06285658],
[-0.6409548, -0.05030349, -0.05021876, -0.05602735, -0.06358989, -0.07058521, -0.04798502],
[-0.48587024, -0.02668872, -0.02443161, -0.03338933, -0.03849523, -0.04692423, -0.03141288],
[-0.45516193, -0.0020759, 0.00556405, -0.00915105, -0.01343074, -0.02884087, -0.01940906],
[-0.4284155, 0.0134382, 0.01965301, 0.00118551, -0.00354341, -0.0235792, -0.01681828],
[-0.40160605, 0.00625094, 0.0087254, -0.00298712, -0.01015626, -0.03078334, -0.02404925],
[-0.3729973, -0.02794126, -0.02092287, -0.02809138, -0.03298125, -0.04707557, -0.04252515],
[-0.34107724, -0.02949184, -0.03453156, -0.03266618, -0.04048001, -0.05651411, -0.03880842],
[-0.30380374, -0.02897151, -0.0317935, -0.02676859, -0.03367185, -0.05099317, -0.03234524],
[-0.2575078, -0.02654107, -0.02930111, -0.0243634, -0.03028261, -0.04743995, -0.02939028],
[-0.19346614, -0.02417713, -0.0263748, -0.02134923, -0.02650077, -0.04303462, -0.02615478],
[-0.07716001, -0.01982844, -0.02149794, -0.01682694, -0.02036272, -0.03471823, -0.02087618]
]])
h = numpy.array(
[
1.372000018755595, 1.91399347, 4.6028045, 3.29994912, 30.32372402, 9.3237240, 12.19253056
])
zeta = numpy.array(
[
-1.1558676163355508, -0.1443424, -0.14408446, -0.15902775, -0.16805193, -0.16938841, -0.16457143,
])
siglay_centroid = numpy.array(
[
[-0.025, -0.02500004, -0.02500004, -0.02500004, -0.02500004, -0.02500004, -0.02500004],
[-0.075, -0.07500005, -0.07500005, -0.07500005, -0.07500005, -0.07500005, -0.07500005],
[-0.125, -0.12500003, -0.12500003, -0.12500003, -0.12500003, -0.12500003, -0.12500003],
[-0.17500001, -0.17500001, -0.17500001, -0.17500001, -0.17500001, -0.17500001, -0.17500001],
[-0.225, -0.22500002, -0.22500002, -0.22500002, -0.22500002, -0.22500002, -0.22500002],
[-0.275, -0.27500007, -0.27500007, -0.27500007, -0.27500007, -0.27500007, -0.27500007],
[-0.325, -0.32500005, -0.32500005, -0.32500005, -0.32500005, -0.32500005, -0.32500005],
[-0.375, -0.37500003, -0.37500003, -0.37500003, -0.37500003, -0.37500003, -0.37500003],
[-0.425, -0.42500004, -0.42500004, -0.42500004, -0.42500004, -0.42500004, -0.42500004],
[-0.475, -0.47500002, -0.47500002, -0.47500002, -0.47500002, -0.47500002, -0.47500002],
[-0.525, -0.52500004, -0.52500004, -0.52500004, -0.52500004, -0.52500004, -0.52500004],
[-0.57500005, -0.57500005, -0.57500005, -0.57500005, -0.57500005, -0.57500005, -0.57500005],
[-0.625, -0.6250001, -0.6250001, -0.6250001, -0.6250001, -0.6250001, -0.6250001],
[-0.67499995, -0.6750001, -0.6750001, -0.6750001, -0.6750001, -0.6750001, -0.6750001],
[-0.725, -0.725, -0.725, -0.725, -0.725, -0.725, -0.725],
[-0.775, -0.7750001, -0.7750001, -0.7750001, -0.7750001, -0.7750001, -0.7750001],
[-0.82500005, -0.82500005, -0.82500005, -0.82500005, -0.82500005, -0.82500005, -0.82500005],
[-0.875, -0.87500006, -0.87500006, -0.87500006, -0.87500006, -0.87500006, -0.87500006],
[-0.92499995, -0.9250001, -0.9250001, -0.9250001, -0.9250001, -0.9250001, -0.9250001],
[-0.975, -0.975, -0.975, -0.975, -0.975, -0.975, -0.975]
])
expected_u_target_depth_default = numpy.array(
[
-0.7100488057968346, -0.30731464, -0.31567158, -0.31442473, 0.09090091, -0.28539867, -0.19117865
])
expected_v_target_depth_default = numpy.array(
[
-0.5634124710903746, -0.03849612, -0.0373252, -0.04470835, 0.03352506, -0.06274381, -0.06568897
])
expected_u_target_depth_surface = numpy.array(
[
-0.79519192, 0.20814494, 0.21063821, 0.23532624, 0.23066987, 0.22913428, 0.26727865
])
expected_v_target_depth_surface = numpy.array(
[
-0.64479204, 0.04133273, 0.03507657, 0.03474857, 0.02277851, 0.04387467, 0.02826011
])
expected_u_target_depth_deep = numpy.array(
[
-0.71004887, -0.30731464, -0.31567158, -0.31442473, -0.32323185, -0.28886639, -0.27053976
])
expected_v_target_depth_deep = numpy.array(
[
-0.56341252, -0.03849612, -0.0373252, -0.04470835, -0.05233803, -0.05875473, -0.03969896
])
return VerticalValues(u, v, h, zeta, siglay_centroid, num_nele, num_siglay, time_index, target_depth_default,
target_depth_surface, target_depth_deep, expected_u_target_depth_default,
expected_v_target_depth_default, expected_u_target_depth_surface,
expected_v_target_depth_surface, expected_u_target_depth_deep, expected_v_target_depth_deep)
def test_vertical_interpolation(vertical_values):
"""Test vertical interpolation."""
u_target_depth, v_target_depth = vertical_interpolation(vertical_values.u, vertical_values.v, vertical_values.h,
vertical_values.zeta, vertical_values.siglay_centroid,
vertical_values.num_nele, vertical_values.num_siglay,
vertical_values.time_index,
vertical_values.target_depth_default)
# print(f"u_target_depth: {u_target_depth}")
# print(f"v_target_depth: {v_target_depth}")
assert numpy.allclose(u_target_depth, vertical_values.expected_u_target_depth_default)
assert numpy.allclose(v_target_depth, vertical_values.expected_v_target_depth_default)
def test_vertical_interpolation_at_surface(vertical_values):
"""Test vertical interpolation."""
u_target_depth, v_target_depth = vertical_interpolation(vertical_values.u, vertical_values.v, vertical_values.h,
vertical_values.zeta, vertical_values.siglay_centroid,
vertical_values.num_nele, vertical_values.num_siglay,
vertical_values.time_index,
vertical_values.target_depth_surface)
# print(f"u_target_depth_surface: {u_target_depth}")
# print(f"v_target_depth_surface: {v_target_depth}")
assert numpy.allclose(u_target_depth, vertical_values.expected_u_target_depth_surface)
assert numpy.allclose(v_target_depth, vertical_values.expected_v_target_depth_surface)
def test_vertical_interpolation_deep(vertical_values):
"""Test vertical interpolation."""
u_target_depth, v_target_depth = vertical_interpolation(vertical_values.u, vertical_values.v, vertical_values.h,
vertical_values.zeta, vertical_values.siglay_centroid,
vertical_values.num_nele, vertical_values.num_siglay,
vertical_values.time_index,
vertical_values.target_depth_deep)
# print(f"u_target_depth_deep: {u_target_depth}")
# print(f"v_target_depth_deep: {v_target_depth}")
assert numpy.allclose(u_target_depth, vertical_values.expected_u_target_depth_deep)
assert numpy.allclose(v_target_depth, vertical_values.expected_v_target_depth_deep)
NodeToCentroidValues = namedtuple(
'NodeToCentroidValues',
['zeta',
'h',
'lat_node',
'lon_node',
'lat_centroid',
'lon_centroid',
'time_index',
'expected_h_centroid',
'expected_zeta_centroid'])
@pytest.fixture
def node_to_centroid_values():
zeta = numpy.array(
[
[-0.146551, -0.146383, -0.145782, -0.146226, -0.145688, -0.145071, -0.146047, -0.145581, -0.145099,
-0.144439, -0.145729, -0.145431, -0.144983, -0.144627, -0.144069, -0.145415, -0.145071, -0.144579,
-0.144391, -0.143806, -0.144057,
-0.169269, -0.169959, -0.170395, -0.170738, -0.166229, -0.166729, -0.167032, -0.162118, -0.162876,
-0.163448, -0.163812, -0.158493, -0.159403, -0.159949, -0.160389
]
])
h = numpy.array(
[
1.6438, 1.8724, 1.875, 2.0302, 1.9946, 2.0446, 2.1169, 2.1307, 2.1457, 2.2134, 2.1768, 2.178, 2.2341,
2.2623, 2.331, 2.1949, 2.2047, 2.2817, 2.3537, 2.434, 2.404,
31.461, 31.874, 32.37, 32.361, 30.565, 31.089, 31.196, 29.231, 29.033, 28.687, 28.761, 26.053, 25.788,
26.267, 26.835
])
lat_centroid = numpy.array(
[
30.260946, 30.262287, 30.260996, 30.259796, 30.26362, 30.262392, 30.261309, 30.260002, 30.258938, 30.264635,
30.263693, 30.262672, 30.261517, 30.260561, 30.259289, 30.258574, 30.264799, 30.264191, 30.263008,
30.262444, 30.261324, 30.260527, 30.259325, 30.259058, 30.260839, 30.259773,
29.969425, 29.963093, 29.967611, 29.961212, 29.96579, 29.959322, 29.980326, 29.985025,
29.978645, 29.983408, 29.976954, 29.981771, 29.996161, 30.001049, 29.994678, 29.999624,
29.993172, 29.998173
])
lon_centroid = numpy.array(
[
-88.126221, -88.124329, -88.124817, -88.123871, -88.122406, -88.122925, -88.12204, -88.122498, -88.12149,
-88.120361, -88.120972, -88.120026, -88.120575, -88.119843, -88.1203, -88.119263, -88.118958, -88.118195,
-88.118652, -88.117645, -88.117676, -88.118591, -88.118286, -88.117096, -88.116516, -88.116272,
-88.264221, -88.25528, -88.245392, -88.236328, -88.226379, -88.217194, -88.265015,
-88.256958, -88.246277, -88.238129, -88.227295, -88.219116, -88.260498, -88.253448,
-88.241791, -88.23468, -88.222839, -88.215698
])
lat_node = numpy.array(
[
30.260969, 30.262211, 30.259659, 30.263531, 30.26112, 30.258608, 30.264805, 30.262526, 30.260281, 30.257925,
30.265354, 30.263748, 30.261744, 30.259661, 30.258137, 30.265295, 30.263531, 30.262054, 30.260176, 30.25886,
30.260286,
29.958637, 29.956696, 29.954746, 29.952793, 29.973944, 29.972195, 29.970428, 29.991339, 29.989796,
29.988235, 29.986652, 30.007345, 30.006006, 30.004629, 30.003235
])
lon_node = numpy.array(
[
-88.127625, -88.125763, -88.125275, -88.12384, -88.123413, -88.122894, -88.121857, -88.12149, -88.121185,
-88.120422, -88.119629, -88.119568, -88.119049, -88.119293, -88.118073, -88.117676, -88.11734, -88.116516,
-88.117462, -88.115784, -88.11557,
-88.265167, -88.246277, -88.227234, -88.207947, -88.254395, -88.235504, -88.2164, -88.267578,
-88.248901, -88.230011, -88.210937, -88.265045, -88.246429, -88.2276, -88.208527
])
time_index = 0
expected_h_centroid = numpy.array(
[
1.79706003, 1.96634453, 1.91399347, 1.97068808, 2.09215354, 2.05125885, 2.08959921, 2.06159611, 2.13525152,
2.15697391, 2.14185806, 2.1812977, 2.17015265, 2.2140166, 2.20713333, 2.26888473, 2.18323025, 2.19253056,
2.20559465, 2.23989205, 2.28985895, 2.28394075, 2.31515434, 2.37337259, 2.3464489, 2.39725858,
30.74697778, 31.29994912, 31.1760438, 31.77788929, 31.55164158, 31.9756924, 30.00374208, 29.60978718,
30.22881187, 29.6028045, 30.32372402, 29.54809818, 28.10520577, 26.95770733, 27.83656214, 26.91390017,
27.90491454, 27.28733678
])
expected_zeta_centroid = numpy.array(
[
-0.1462386, -0.14609691, -0.14595087, -0.1455159, -0.14595329, -0.14583367, -0.14545806, -0.14528592,
-0.14486764, -0.14573735, -0.1456864, -0.14532972, -0.14522106, -0.14490293, -0.14472167, -0.14437832,
-0.14552505, -0.14530566, -0.14516168, -0.14487977, -0.14465093, -0.14466511, -0.1443643, -0.14408686,
-0.1443424, -0.14408446,
-0.16695492, -0.16848552, -0.16763914, -0.16902775, -0.16805193, -0.16938841, -0.16457143, -0.16374129,
-0.16527793, -0.16435127, -0.16573603, -0.16476413, -0.16116232, -0.16025768, -0.16190908, -0.16093319,
-0.16240301, -0.16138305
])
return NodeToCentroidValues(zeta, h, lat_node, lon_node, lat_centroid, lon_centroid, time_index,
expected_h_centroid, expected_zeta_centroid)
def test_node_to_centroid(node_to_centroid_values):
"""Test horizontal interpolation from nodes to centroids(elements)."""
h_centroid, zeta_centroid = node_to_centroid(node_to_centroid_values.zeta, node_to_centroid_values.h,
node_to_centroid_values.lon_node, node_to_centroid_values.lat_node,
node_to_centroid_values.lon_centroid,
node_to_centroid_values.lat_centroid,
node_to_centroid_values.time_index)
# print(f"h_centroid: {h_centroid}")
# print(f"zeta_centroid: {zeta_centroid}")
assert numpy.allclose(h_centroid, node_to_centroid_values.expected_h_centroid)
assert numpy.allclose(zeta_centroid, node_to_centroid_values.expected_zeta_centroid)
| 53.431193
| 120
| 0.602278
|
23673b1284e13b6568c8357ec84e688351f1cc6c
| 3,157
|
py
|
Python
|
scripts/ms-gui.py
|
btobab/minesweeper
|
0baec6b1535374a436fd878187e556780972de15
|
[
"MIT"
] | null | null | null |
scripts/ms-gui.py
|
btobab/minesweeper
|
0baec6b1535374a436fd878187e556780972de15
|
[
"MIT"
] | null | null | null |
scripts/ms-gui.py
|
btobab/minesweeper
|
0baec6b1535374a436fd878187e556780972de15
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""GUI for Mine Sweeper.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
# from __future__ import print_function
import argparse
try:
from PyQt4 import QtGui, QtCore
from PyQt4.QCore import QWidget, QApplication, QGridLayout
except ImportError:
from PyQt5 import QtCore
from PyQt5.QtWidgets import QWidget, QApplication, QGridLayout
import sys
sys.path.append("..")
from minesweeper.msgame import MSGame
from minesweeper import gui
def ms_game_main(board_width, board_height, num_mines, port, ip_add):
"""Main function for Mine Sweeper Game.
Parameters
----------
board_width : int
the width of the board (> 0)
board_height : int
the height of the board (> 0)
num_mines : int
the number of mines, cannot be larger than
(board_width x board_height)
port : int
UDP port number, default is 5678
ip_add : string
the ip address for receiving the command,
default is localhost.
"""
ms_game = MSGame(board_width, board_height, num_mines,
port=port, ip_add=ip_add)
ms_app = QApplication([])
ms_window = QWidget()
ms_window.setAutoFillBackground(True)
ms_window.setWindowTitle("Mine Sweeper")
ms_layout = QGridLayout()
ms_window.setLayout(ms_layout)
fun_wg = gui.ControlWidget()
grid_wg = gui.GameWidget(ms_game, fun_wg)
remote_thread = gui.RemoteControlThread()
def update_grid_remote(move_msg):
"""Update grid from remote control."""
if grid_wg.ms_game.game_status == 2:
grid_wg.ms_game.play_move_msg(str(move_msg))
grid_wg.update_grid()
remote_thread.transfer.connect(update_grid_remote)
def reset_button_state():
"""Reset button state."""
grid_wg.reset_game()
fun_wg.reset_button.clicked.connect(reset_button_state)
ms_layout.addWidget(fun_wg, 0, 0)
ms_layout.addWidget(grid_wg, 1, 0)
remote_thread.control_start(grid_wg.ms_game)
ms_window.show()
ms_app.exec_()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Mine Sweeper Minesweeper \
with interfaces for \
Reinforcement Learning \
by Yuhuang Hu")
parser.add_argument("--board-width", type=int,
default=20,
help="width of the board.")
parser.add_argument("--board-height", type=int,
default=20,
help="height of the board.")
parser.add_argument("--num-mines", type=int,
default=40,
help="number of mines.")
parser.add_argument("--port", type=int,
default=5678,
help="The port for TCP connection.")
parser.add_argument("--ip-add", type=str,
default="127.0.0.1",
help="The IP address for TCP connection.")
args = parser.parse_args()
ms_game_main(**vars(args))
| 30.95098
| 76
| 0.604371
|
6d79d41c5854a6d242be3d6bb3b5c731a151d80d
| 19
|
py
|
Python
|
__init__.py
|
spacecoalmen/asteroid_scraper
|
eea0c1700479e62d08eb60ead547c9ce5e37d7ba
|
[
"MIT"
] | null | null | null |
__init__.py
|
spacecoalmen/asteroid_scraper
|
eea0c1700479e62d08eb60ead547c9ce5e37d7ba
|
[
"MIT"
] | null | null | null |
__init__.py
|
spacecoalmen/asteroid_scraper
|
eea0c1700479e62d08eb60ead547c9ce5e37d7ba
|
[
"MIT"
] | null | null | null |
__author__ = 'gas'
| 9.5
| 18
| 0.684211
|
af1db83b3edecd1d33a04a34bc5e6c68c79f8391
| 254
|
py
|
Python
|
manage.py
|
jessamynsmith/quotations
|
b2a9b70190756fa261840faea181860b166e253f
|
[
"MIT"
] | 2
|
2015-05-01T19:44:41.000Z
|
2015-07-17T13:52:46.000Z
|
manage.py
|
jessamynsmith/quotations
|
b2a9b70190756fa261840faea181860b166e253f
|
[
"MIT"
] | 13
|
2019-10-18T17:06:52.000Z
|
2022-02-10T07:37:30.000Z
|
manage.py
|
jessamynsmith/quotations
|
b2a9b70190756fa261840faea181860b166e253f
|
[
"MIT"
] | 3
|
2015-05-06T15:38:30.000Z
|
2015-07-26T21:12:32.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "underquoted.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.090909
| 75
| 0.775591
|
294648a88995f06643dba79c995c4aa91693ab41
| 469
|
py
|
Python
|
aether/gallery/views.py
|
katajakasa/aetherguild4
|
a7e294f0cff11e2508751f1013e6648fdc56bb94
|
[
"MIT"
] | null | null | null |
aether/gallery/views.py
|
katajakasa/aetherguild4
|
a7e294f0cff11e2508751f1013e6648fdc56bb94
|
[
"MIT"
] | 1
|
2021-06-10T17:36:11.000Z
|
2021-06-10T17:36:11.000Z
|
aether/gallery/views.py
|
katajakasa/aetherguild4
|
a7e294f0cff11e2508751f1013e6648fdc56bb94
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.core.paginator import Paginator
from django.views.decorators.cache import never_cache
from .models import GalleryGroup
from aether.utils.misc import get_page
@never_cache
def gallery_index(request):
paginator = Paginator(GalleryGroup.objects.order_by('-created_at').all(), 5)
page = get_page(request)
return render(request, 'gallery/galleries.html', {
'galleries': paginator.get_page(page),
})
| 29.3125
| 80
| 0.763326
|
7ced7654419825afbac8cdee8b4960f93470ad0d
| 148
|
py
|
Python
|
while_with_break.py
|
agolla0440/my_python_workbook
|
3595037467e89e0950d0c356b2ba087e574e2bd9
|
[
"Apache-2.0"
] | null | null | null |
while_with_break.py
|
agolla0440/my_python_workbook
|
3595037467e89e0950d0c356b2ba087e574e2bd9
|
[
"Apache-2.0"
] | null | null | null |
while_with_break.py
|
agolla0440/my_python_workbook
|
3595037467e89e0950d0c356b2ba087e574e2bd9
|
[
"Apache-2.0"
] | null | null | null |
i = 1
while i < 6:
# print(i)
if i == 2:
i += 1
continue
if i == 5:
break
else:
print(i)
i += 1
| 12.333333
| 16
| 0.337838
|
31317ebb52ea98eaf6c931c267eb860198105e22
| 991
|
py
|
Python
|
examples/basic/midiout.py
|
Czaki/python-rtmidi
|
eb16ab3268b29b94cd2baa6bfc777f5cf5f908ba
|
[
"MIT"
] | 260
|
2015-06-25T06:44:31.000Z
|
2022-02-28T15:44:29.000Z
|
examples/basic/midiout.py
|
rwreynolds/python-rtmidi
|
04ba3a69da0437b21ca3bb6f359c8ebaf06f3bd2
|
[
"MIT"
] | 99
|
2016-02-10T22:19:23.000Z
|
2022-03-31T06:22:15.000Z
|
examples/basic/midiout.py
|
rwreynolds/python-rtmidi
|
04ba3a69da0437b21ca3bb6f359c8ebaf06f3bd2
|
[
"MIT"
] | 70
|
2015-06-25T06:57:01.000Z
|
2022-03-02T05:25:28.000Z
|
#!/usr/bin/env python
#
# midiout.py
#
"""Show how to open an output port and send MIDI events."""
from __future__ import print_function
import logging
import sys
import time
from rtmidi.midiutil import open_midioutput
from rtmidi.midiconstants import NOTE_OFF, NOTE_ON
log = logging.getLogger('midiout')
logging.basicConfig(level=logging.DEBUG)
# Prompts user for MIDI input port, unless a valid port number or name
# is given as the first argument on the command line.
# API backend defaults to ALSA on Linux.
port = sys.argv[1] if len(sys.argv) > 1 else None
try:
midiout, port_name = open_midioutput(port)
except (EOFError, KeyboardInterrupt):
sys.exit()
note_on = [NOTE_ON, 60, 112] # channel 1, middle C, velocity 112
note_off = [NOTE_OFF, 60, 0]
with midiout:
print("Sending NoteOn event.")
midiout.send_message(note_on)
time.sleep(1)
print("Sending NoteOff event.")
midiout.send_message(note_off)
time.sleep(0.1)
del midiout
print("Exit.")
| 23.046512
| 70
| 0.733602
|
e369e53119fa1bf7d97223640a58c642b73a1c32
| 180
|
py
|
Python
|
config.py
|
SnoozeTime/kucoin-trade-report
|
c71be0ad327908abfbbeddf8fbf75f442c63e5af
|
[
"WTFPL"
] | 2
|
2018-04-16T01:46:35.000Z
|
2021-11-11T21:30:30.000Z
|
config.py
|
SnoozeTime/kucoin-trade-report
|
c71be0ad327908abfbbeddf8fbf75f442c63e5af
|
[
"WTFPL"
] | 1
|
2021-06-01T21:47:23.000Z
|
2021-06-01T21:47:23.000Z
|
config.py
|
SnoozeTime/kucoin-trade-report
|
c71be0ad327908abfbbeddf8fbf75f442c63e5af
|
[
"WTFPL"
] | null | null | null |
import os
# Either add KUCOIN_API and KUCOIN_SECRET as environment variable, or write them directly here.
KEY = os.getenv("KUCOIN_API", "")
SECRET = os.getenv("KUCOIN_SECRET", "")
| 36
| 95
| 0.75
|
9004c1c26b9635e6e17a2d299ca6437399fcb06d
| 2,694
|
py
|
Python
|
caffe2/python/operator_test/decay_adagrad_test.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 60,067
|
2017-01-18T17:21:31.000Z
|
2022-03-31T21:37:45.000Z
|
caffe2/python/operator_test/decay_adagrad_test.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 66,955
|
2017-01-18T17:21:38.000Z
|
2022-03-31T23:56:11.000Z
|
caffe2/python/operator_test/decay_adagrad_test.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 19,210
|
2017-01-18T17:45:04.000Z
|
2022-03-31T23:51:56.000Z
|
import functools
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestDecayAdagrad(hu.HypothesisTestCase):
@staticmethod
def ref_decay_adagrad(param, mom1, mom2, grad, LR, ITER,
beta1, beta2, epsilon, weight_decay, bias_correction_first, output_grad=False):
t = ITER + 1
mom1_out = (beta1 * mom1) + (1 - beta1) * grad
mom2_out = mom2 + np.square(grad)
if bias_correction_first:
c = 1 - np.power(beta1, t)
else:
c = 1.0
grad_out = mom1_out / c / (np.sqrt(mom2_out) + epsilon) + weight_decay * param
param_out = param + LR * grad_out
return param_out, mom1_out, mom2_out
@given(inputs=hu.tensors(n=4),
ITER=st.integers(min_value=0, max_value=10000),
LR=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta1=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta2=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
weight_decay=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
def test_decay_adagrad(self, inputs, ITER, LR, beta1, beta2, epsilon, weight_decay, gc, dc):
bias_correction_first = True
param, mom1, mom2, grad = inputs
mom2 = np.abs(mom2)
ITER = np.array([ITER], dtype=np.int64)
LR = np.array([LR], dtype=np.float32)
op = core.CreateOperator(
"DecayAdagrad",
["param", "mom1", "mom2", "grad", "lr", "iter"],
["output_param", "output_mom1", "output_mom2"],
beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, bias_correction_first=bias_correction_first)
# Iter lives on the CPU
input_device_options = {'iter': hu.cpu_do}
self.assertReferenceChecks(
gc, op,
[param, mom1, mom2, grad, LR, ITER],
functools.partial(
self.ref_decay_adagrad,
beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, bias_correction_first=bias_correction_first),
input_device_options=input_device_options)
if __name__ == "__main__":
import unittest
unittest.main()
| 39.043478
| 131
| 0.612843
|
6ae23ac19b0ec7dfc760086f1f8546debc834f4b
| 4,796
|
py
|
Python
|
10 On-policy control with approximation/Mountain Car/mountaincar.py
|
c-boe/Reinforcement-learning
|
a8cfcf3ff022a31e6f21fe3497c5557443778258
|
[
"MIT"
] | 1
|
2021-04-20T00:43:53.000Z
|
2021-04-20T00:43:53.000Z
|
12 Eligibility Traces/Mountain Car/mountaincar.py
|
c-boe/Reinforcement-learning
|
a8cfcf3ff022a31e6f21fe3497c5557443778258
|
[
"MIT"
] | null | null | null |
12 Eligibility Traces/Mountain Car/mountaincar.py
|
c-boe/Reinforcement-learning
|
a8cfcf3ff022a31e6f21fe3497c5557443778258
|
[
"MIT"
] | null | null | null |
"""
Implementation of mountaincar environment for example 10.1 of "Reinforcement
learning" by Sutton and Barto
"""
import numpy as np
import matplotlib.pyplot as plt
class MountainCar():
"""Mountain car environment from example 10.1 of Sutton and Barto's
"Reinforcement Learning" """
def __init__(self):
self.x_bound_l = -1.2
self.x_bound_u = 0.5
self.v_bound_l = -0.07
self.v_bound_u = 0.07
self.x_init_l = -0.6
self.x_init_u = -0.4
def reset(self):
"""
Initialize position and velocity of mountain car
Returns
-------
state : list [position, velocity]
state at beginning of every episode
"""
x = np.random.uniform(low=self.x_init_l, high=self.x_init_u, size=1)
v = 0
state = [x, v]
return state
def step(self, state, action):
"""
Take action in current state
Parameters
----------
state : list [current position, current velocity]
state before taking action
action : int
action taken by agent
Returns
-------
next_state : list [next position, next velocity]
state after taking action
reward : int
negative reward for every step
done : bool
episode finished or not.
final_state : list [final position, final velocity]
state at the end of the episode
"""
done = False
final_state = []
x, v = state
throttle = self.action(action)
next_v = self.__bound_v(v + 0.001*throttle - 0.0025*np.cos(3*x))
next_x = self.__bound_x(x + next_v)
if next_x == self.x_bound_l:
next_v = 0
next_state = np.array([next_x, next_v])
if next_x == self.x_bound_u:
done = True
final_state = [next_x, next_v]
reward = -1
return next_state, reward, done, final_state
def __bound_v(self, v):
"""
Apply velocity boundaries
"""
if v < self.v_bound_l:
v = self.v_bound_l
elif v > self.v_bound_u:
v = self.v_bound_u
return v
def __bound_x(self, x):
"""
Apply positional boundaries
"""
if x < self.x_bound_l:
x = self.x_bound_l
elif x > self.x_bound_u:
x = self.x_bound_u
return x
def action(self, action):
"""
Parameters
----------
action : int
action taken by agent
Returns
-------
throttle : int
reverse, forward or zero throttle
"""
if action == 0:
throttle = -1 #reverse
elif action == 1:
throttle = 0
elif action == 2:
throttle = 1 # foreward
return throttle
def render(self, x):
"""
Plot mountain car position x at every step of episode
Parameters
----------
x : int
x coordinate of mountain car
Returns
-------
None.
"""
x = np.array([x])
y = np.cos(2*np.pi*(x/2 + 0.75))
x_mountain = np.linspace(self.x_bound_l, self.x_bound_u,100)
y_mountain = np.cos(2*np.pi*(x_mountain/2 + 0.75))
plt.figure("Mountain car")
plt.clf()
plt.plot(x_mountain, y_mountain)
plt.title("Mountain car")
if len(x) == 1:
plt.plot(x, y,'o')
plt.xlim((self.x_bound_l, self.x_bound_u))
plt.ylim((-1, 1))
plt.pause(0.01)
else:
nr_steps = len(x)
for step in range(nr_steps):
plt.clf()
plt.plot(x[step], y[step],'o')
plt.xlim((self.x_bound_l, self.x_bound_u))
plt.ylim((-1, 1))
plt.pause(0.1)
def plot_step_per_ep(self, episode, steps):
"""
Plot number of steps per episode of mountain car as function of episode
Parameters
----------
episode : int
current episode
steps : int
number of steps until episode is done
Returns
-------
None.
"""
plt.figure("Steps per episode")
plt.plot(episode, steps,'o')
plt.yscale("log")
plt.pause(0.001)
plt.xlabel("Episode")
plt.ylabel("Steps per episode (log scale)")
| 24.222222
| 79
| 0.4804
|
d6e8cef0a969e3df068ff5dfa3a19d1acec67b32
| 989
|
py
|
Python
|
app/app/urls.py
|
Omar2B/recipe-app-api
|
8031bf1e1f0050bac6ad60ceff9615365f1a5720
|
[
"MIT"
] | null | null | null |
app/app/urls.py
|
Omar2B/recipe-app-api
|
8031bf1e1f0050bac6ad60ceff9615365f1a5720
|
[
"MIT"
] | 6
|
2021-03-19T01:47:22.000Z
|
2021-09-08T01:54:59.000Z
|
app/app/urls.py
|
Omar2B/recipe-app-api
|
8031bf1e1f0050bac6ad60ceff9615365f1a5720
|
[
"MIT"
] | null | null | null |
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('user.urls')),
path('api/recipe/', include('recipe.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.62963
| 77
| 0.715875
|
3e05fc0b3653e4bbdbe636b73a7db05321e3074f
| 1,390
|
py
|
Python
|
SLACKBOT/Api/config.py
|
edoi777/tokenGL
|
0a96c9a75213374527867031fa2ee5c1dd74e96a
|
[
"MIT"
] | null | null | null |
SLACKBOT/Api/config.py
|
edoi777/tokenGL
|
0a96c9a75213374527867031fa2ee5c1dd74e96a
|
[
"MIT"
] | null | null | null |
SLACKBOT/Api/config.py
|
edoi777/tokenGL
|
0a96c9a75213374527867031fa2ee5c1dd74e96a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from tcr.ttypes import ApplicationType
import re
class Config():
LINE_HOST_DOMAIN = 'https://gd2.line.naver.jp'
LINE_OBS_DOMAIN = 'https://obs-sg.line-apps.com'
LINE_TIMELINE_API = 'https://gd2.line.naver.jp/mh/api'
LINE_TIMELINE_MH = 'https://gd2.line.naver.jp/mh'
LINE_LOGIN_QUERY_PATH = '/api/v4p/rs'
LINE_AUTH_QUERY_PATH = '/api/v4/TalkService.do'
LINE_API_QUERY_PATH_FIR = '/S4'
LINE_POLL_QUERY_PATH_FIR = '/P4'
LINE_CALL_QUERY_PATH = '/V4'
LINE_CERTIFICATE_PATH = '/Q'
LINE_CHAN_QUERY_PATH = '/CH4'
LINE_SQUARE_QUERY_PATH = '/SQS1'
CHANNEL_ID = {
'LINE_TIMELINE': '1341209950',
'LINE_WEBTOON': '1401600689',
'LINE_TODAY': '1518712866',
'LINE_STORE': '1376922440',
'LINE_MUSIC': '1381425814',
'LINE_SERVICES': '1459630796'
}
APP_TYPE = ApplicationType._VALUES_TO_NAMES[400]
APP_VER = '7.18.1'
CARRIER = '51089, 1-0'
SYSTEM_NAME = 'SlackBOT'
SYSTEM_VER = '11.2.5'
IP_ADDR = '8.8.8.8'
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
def __init__(self):
self.APP_NAME = '%s\t%s\t%s\t%s' % (self.APP_TYPE, self.APP_VER, self.SYSTEM_NAME, self.SYSTEM_VER)
self.USER_AGENT = 'SlackBOT/%s' % self.APP_VER
| 33.902439
| 107
| 0.584892
|
bc35e02e6a58135193f75176bc85a487b2edb75c
| 3,099
|
py
|
Python
|
pandas/tests/plotting/frame/test_frame_groupby.py
|
CJL89/pandas
|
6210077d32a9e9675526ea896e6d1f9189629d4a
|
[
"BSD-3-Clause"
] | 28,899
|
2016-10-13T03:32:12.000Z
|
2022-03-31T21:39:05.000Z
|
pandas/tests/plotting/frame/test_frame_groupby.py
|
CJL89/pandas
|
6210077d32a9e9675526ea896e6d1f9189629d4a
|
[
"BSD-3-Clause"
] | 31,004
|
2016-10-12T23:22:27.000Z
|
2022-03-31T23:17:38.000Z
|
pandas/tests/plotting/frame/test_frame_groupby.py
|
CJL89/pandas
|
6210077d32a9e9675526ea896e6d1f9189629d4a
|
[
"BSD-3-Clause"
] | 15,149
|
2016-10-13T03:21:31.000Z
|
2022-03-31T18:46:47.000Z
|
""" Test cases for DataFrame.plot """
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase
pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestDataFramePlotsGroupby(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame(
{
"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20),
}
)
def _assert_ytickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_yticklabels(), visible=exp)
def _assert_xtickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_xticklabels(), visible=exp)
@pytest.mark.parametrize(
"kwargs, expected",
[
# behavior without keyword
({}, [True, False, True, False]),
# set sharey=True should be identical
({"sharey": True}, [True, False, True, False]),
# sharey=False, all yticklabels should be visible
({"sharey": False}, [True, True, True, True]),
],
)
def test_groupby_boxplot_sharey(self, kwargs, expected):
# https://github.com/pandas-dev/pandas/issues/20968
# sharey can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame(
{
"a": [-1.43, -0.15, -3.70, -1.43, -0.14],
"b": [0.56, 0.84, 0.29, 0.56, 0.85],
"c": [0, 1, 2, 3, 1],
},
index=[0, 1, 2, 3, 4],
)
axes = df.groupby("c").boxplot(**kwargs)
self._assert_ytickslabels_visibility(axes, expected)
@pytest.mark.parametrize(
"kwargs, expected",
[
# behavior without keyword
({}, [True, True, True, True]),
# set sharex=False should be identical
({"sharex": False}, [True, True, True, True]),
# sharex=True, xticklabels should be visible
# only for bottom plots
({"sharex": True}, [False, False, True, True]),
],
)
def test_groupby_boxplot_sharex(self, kwargs, expected):
# https://github.com/pandas-dev/pandas/issues/20968
# sharex can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame(
{
"a": [-1.43, -0.15, -3.70, -1.43, -0.14],
"b": [0.56, 0.84, 0.29, 0.56, 0.85],
"c": [0, 1, 2, 3, 1],
},
index=[0, 1, 2, 3, 4],
)
axes = df.groupby("c").boxplot(**kwargs)
self._assert_xtickslabels_visibility(axes, expected)
| 33.322581
| 66
| 0.552436
|
03b9f4a1284b7165c1012042f8d132e3a257c65b
| 1,015
|
py
|
Python
|
client_code/utils/__init__.py
|
hugetim/anvil-extras
|
ca83f6ada5149514c2affbe1ab081a4ca677c7e0
|
[
"MIT"
] | null | null | null |
client_code/utils/__init__.py
|
hugetim/anvil-extras
|
ca83f6ada5149514c2affbe1ab081a4ca677c7e0
|
[
"MIT"
] | null | null | null |
client_code/utils/__init__.py
|
hugetim/anvil-extras
|
ca83f6ada5149514c2affbe1ab081a4ca677c7e0
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
#
# Copyright (c) 2021 The Anvil Extras project team members listed at
# https://github.com/anvilistas/anvil-extras/graphs/contributors
#
# This software is published at https://github.com/anvilistas/anvil-extras
from functools import cache
__version__ = "1.7.1"
def __dir__():
return ["auto_refreshing", "wait_for_writeback", "timed", "BindingRefreshDict"]
@cache
def __getattr__(name):
# todo use dynamic imports but __import__ is not yet supported in skult
if name == "auto_refreshing":
from ._auto_refreshing import auto_refreshing
return auto_refreshing
elif name == "timed":
from ._timed import timed
return timed
elif name == "wait_for_writeback":
from ._writeback_waiter import wait_for_writeback
return wait_for_writeback
elif name == "BindingRefreshDict":
from ._auto_refreshing import BindingRefreshDict
return BindingRefreshDict
else:
raise AttributeError(name)
| 26.710526
| 83
| 0.715271
|
fb8850eb046f6cd0ae3d874216809a8c18ddd654
| 9,359
|
py
|
Python
|
CartPole_HSPGA.py
|
unc-optimization/ProxHSPGA
|
8860ac7acccffcc46eb8e89c2ad4249488c69ad5
|
[
"MIT"
] | null | null | null |
CartPole_HSPGA.py
|
unc-optimization/ProxHSPGA
|
8860ac7acccffcc46eb8e89c2ad4249488c69ad5
|
[
"MIT"
] | null | null | null |
CartPole_HSPGA.py
|
unc-optimization/ProxHSPGA
|
8860ac7acccffcc46eb8e89c2ad4249488c69ad5
|
[
"MIT"
] | 3
|
2020-03-10T19:08:01.000Z
|
2022-01-05T01:49:32.000Z
|
"""@package CartPole_HSPGA
This package implements the HSPGA algorithm for CartPole-v0 environment.
Copyright (c) 2020 Nhan H. Pham, Department of Statistics and Operations Research, University of North Carolina at Chapel Hill
Copyright (c) 2020 Lam M. Nguyen, IBM Research, Thomas J. Watson Research Center
Yorktown Heights
Copyright (c) 2020 Dzung T. Phan, IBM Research, Thomas J. Watson Research Center
Yorktown Heights
Copyright (c) 2020 Phuong Ha Nguyen, Department of Electrical and Computer Engineering, University of Connecticut
Copyright (c) 2020 Marten van Dijk, Department of Electrical and Computer Engineering, University of Connecticut
Copyright (c) 2020 Quoc Tran-Dinh, Department of Statistics and Operations Research, University of North Carolina at Chapel Hill
All rights reserved.
If you found this helpful and are using it within our software please cite the following publication:
* N. H. Pham, L. M. Nguyen, D. T. Phan, P. H. Nguyen, M. van Dijk and Q. Tran-Dinh, **A Hybrid Stochastic Policy Gradient Algorithm for Reinforcement Learning**, The 23rd International Conference on Artificial Intelligence and Statistics (AISTATS 2020), Palermo, Italy, 2020.
"""
from rllab.envs.gym_env import GymEnv
from rllab.policies.categorical_mlp_policy import CategoricalMLPPolicy
import numpy as np
import theano
import theano.tensor as TT
from rllab.sampler import parallel_sampler
from lasagne.updates import adam
import pandas as pd
import random
import os
from utils.utils import *
# whether to load existing policy
load_policy=True
# snapshot batchsize
snap_bs = 20
# effective length of a trajectory
traj_length = 200
# minibatch size in the inner loop
m_bs = 3
# number of trajectories for evaluation
num_eval_traj = 50
# discount factor
discount = 0.99
# stepsizes
learning_rate = 5e-3
beta = 0.99
# number of inner iterations
max_inner = 3
# total number of trajectories
max_num_traj = 5000
# initialize environment
env = GymEnv("CartPole-v0")
# initialize a neural network policy with a single hidden layer of 8 hidden units
policy = CategoricalMLPPolicy(env.spec, hidden_sizes=(8,))
prev_policy = CategoricalMLPPolicy(env.spec, hidden_sizes=(8,))
# policy.distribution returns a distribution object under rllab.distributions. It contains many utilities for computing
# distribution-related quantities, given the computed dist_info_vars. Below we use dist.log_likelihood_sym to compute
# the symbolic log-likelihood. For this example, the corresponding distribution is an instance of the class
# rllab.distributions.DiagonalGaussian
dist = policy.distribution
prev_dist = prev_policy.distribution
# create placeholders
observations_var = env.observation_space.new_tensor_variable(
'observations',
# It should have 1 extra dimension since we want to represent a list of observations
extra_dims=1
)
actions_var = env.action_space.new_tensor_variable(
'actions',
extra_dims=1
)
d_rewards_var = TT.vector('d_rewards')
importance_weights_var = TT.vector('importance_weight')
# policy.dist_info_sym returns a dictionary, whose values are symbolic expressions for quantities related to the
# distribution of the actions. For a Gaussian policy, it contains the mean and (log) standard deviation.
dist_info_vars = policy.dist_info_sym(observations_var)
prev_dist_info_vars = prev_policy.dist_info_sym(observations_var)
params = policy.get_params(trainable=True)
prev_params = prev_policy.get_params(trainable=True)
importance_weights = dist.likelihood_ratio_sym(actions_var,dist_info_vars,prev_dist_info_vars)
# create surrogate losses
surr_on1 = TT.mean(- dist.log_likelihood_sym(actions_var,dist_info_vars)*d_rewards_var)
surr_on2 = TT.mean(prev_dist.log_likelihood_sym(actions_var,prev_dist_info_vars)*d_rewards_var*importance_weights_var)
grad = theano.grad(surr_on1, params)
grad_diff = [sum(x) for x in zip(theano.grad(surr_on1,params),theano.grad(surr_on2,prev_params))]
print("Parameters shapes")
for i in range(len(params)):
print(params[i].shape.eval())
eval_grad1 = TT.matrix('eval_grad0',dtype=params[0].dtype)
eval_grad2 = TT.vector('eval_grad1',dtype=params[1].dtype)
eval_grad3 = TT.matrix('eval_grad3',dtype=params[2].dtype)
eval_grad4 = TT.vector('eval_grad4',dtype=params[3].dtype)
f_compute_grad = theano.function(
inputs = [observations_var, actions_var, d_rewards_var],
outputs = grad,
allow_input_downcast=True
)
f_update = theano.function(
inputs = [eval_grad1, eval_grad2, eval_grad3, eval_grad4],
outputs = None,
updates = adam([eval_grad1, eval_grad2, eval_grad3, eval_grad4], params, learning_rate=learning_rate)
)
f_importance_weights = theano.function(
inputs = [observations_var, actions_var],
outputs = importance_weights,
allow_input_downcast=True
)
f_compute_grad_diff = theano.function(
inputs=[observations_var, actions_var, d_rewards_var, importance_weights_var],
outputs=grad_diff,
allow_input_downcast=True
)
# log directory
log_dir = "log_file/Cartpole_Discrete/HSPGA" + "_lr" + str(learning_rate)
# check if directory exists, if not, create directory
if not os.path.exists( log_dir ):
os.makedirs( log_dir )
# setup parallel sampler
parallel_sampler.populate_task(env, policy)
parallel_sampler.initialize(8)
# initialize log Data Frame
avg_return_data = pd.DataFrame()
# loop for 10 runs
for k in range(10):
print("Run #{}".format(k))
# load policy
file_name = 'cartpole_policy' + '.txt'
if load_policy:
policy.set_param_values(np.loadtxt('save_model/' + file_name), trainable=True)
else:
np.savetxt("save_model/" + file_name,policy.get_param_values(trainable=True))
load_policy = True
# intial setup
avg_return = list()
eps_list = []
max_rewards = -np.inf
num_traj = 0
# loop till done
while num_traj <= max_num_traj:
# sample snapshot batch of trajectories
paths = parallel_sampler.sample_paths_on_trajectories(policy.get_param_values(),snap_bs,traj_length,show_bar=False)
paths = paths[:snap_bs]
# extract information
observations, actions, d_rewards = extract_path(paths, discount)
# compute policy gradient
v_est = compute_snapshot_grad_est(f_compute_grad, observations, actions, d_rewards)
# perform update
f_update(v_est[0],v_est[1],v_est[2],v_est[3])
# sample trajectories for evaluating current policy
tmp_paths = parallel_sampler.sample_paths_on_trajectories(policy.get_param_values(),num_eval_traj,show_bar=False)
avg_return.append(np.mean([sum(p["rewards"]) for p in tmp_paths]))
eps_list.append(num_traj)
print(str(num_traj)+' Average Return:', avg_return[-1])
# update best policy
if avg_return[-1] > max_rewards:
max_rewards = avg_return[-1]
best_policy_ = policy.get_param_values(trainable=True)
# update number of trajectories sampled
num_traj += snap_bs
# inner loop
for _ in range(max_inner):
# sample trajectories
sub_paths = parallel_sampler.sample_paths_on_trajectories(policy.get_param_values(),2*m_bs,traj_length,show_bar=False)
# update number of trajectories sampled
num_traj += 2*m_bs
sub_paths_1 = sub_paths[0:m_bs-1]
sub_paths_2 = sub_paths[m_bs:2*m_bs-1]
# extract information
sub_observations_1, sub_actions_1, sub_d_rewards_1 = extract_path(sub_paths_1, discount)
sub_observations_2, sub_actions_2, sub_d_rewards_2 = extract_path(sub_paths_2, discount)
path_info_1 = {
'obs': sub_observations_1,
'acts': sub_actions_1,
'rws': sub_d_rewards_1,
}
path_info_2 = {
'obs': sub_observations_2,
'acts': sub_actions_2,
'rws': sub_d_rewards_2,
}
# compute Hybrid SPG estimator
v_est = compute_hybrid_spg_est(f_compute_grad,f_compute_grad_diff,f_importance_weights,path_info_1,path_info_2,beta,v_est)
# perform update
prev_policy.set_param_values(policy.get_param_values(trainable=True), trainable=True)
f_update(v_est[0],v_est[1],v_est[2],v_est[3])
# check if we are done
if num_traj >= max_num_traj:
tmp_paths = parallel_sampler.sample_paths_on_trajectories(policy.get_param_values(),num_eval_traj,show_bar=False)
avg_return.append(np.mean([sum(p["rewards"]) for p in tmp_paths]))
eps_list.append(num_traj)
print(str(num_traj)+' Average Return:', avg_return[-1])
break
# log data
if k==0:
avg_return_data["Episodes"]=eps_list
avg_return_data["MeanRewards_"+str(k)]=avg_return
avg_return_df = pd.DataFrame()
avg_return_df["Episodes"]=eps_list
avg_return_df["MeanRewards"]=avg_return
avg_return_df.to_csv(os.path.join(log_dir,"avg_return_" + str(k) + ".csv"), index=False)
np.savetxt(os.path.join(log_dir,"final_policy_"+str(k) + ".txt"),policy.get_param_values(trainable=True))
print(avg_return_data)
avg_return_data.to_csv(os.path.join(log_dir,"avg_return_total.csv"),index=False)
| 34.921642
| 275
| 0.726573
|
bce15b3a324020907ab870e60a049109e42b132e
| 8,566
|
py
|
Python
|
plugins/tcpsubnet.py
|
whoISstar/lmp
|
d16c80d377eb479133eb4441b1a76d99013f5876
|
[
"Apache-2.0"
] | null | null | null |
plugins/tcpsubnet.py
|
whoISstar/lmp
|
d16c80d377eb479133eb4441b1a76d99013f5876
|
[
"Apache-2.0"
] | 1
|
2021-11-12T07:53:06.000Z
|
2021-11-12T07:53:06.000Z
|
plugins/tcpsubnet.py
|
dxlearn/lmp
|
2bf9b3e38a72a263bb6b02410a6e7a37fa1855f5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @lint-avoid-python-3-compatibility-imports
#
# tcpsubnet Summarize TCP bytes sent to different subnets.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: tcpsubnet [-h] [-v] [-J] [-f FORMAT] [-i INTERVAL] [subnets]
#
# This uses dynamic tracing of kernel functions, and will need to be updated
# to match kernel changes.
#
# This is an adaptation of tcptop from written by Brendan Gregg.
#
# WARNING: This traces all send at the TCP level, and while it
# summarizes data in-kernel to reduce overhead, there may still be some
# overhead at high TCP send/receive rates (eg, ~13% of one CPU at 100k TCP
# events/sec. This is not the same as packet rate: funccount can be used to
# count the kprobes below to find out the TCP rate). Test in a lab environment
# first. If your send rate is low (eg, <1k/sec) then the overhead is
# expected to be negligible.
#
# Copyright 2017 Rodrigo Manyari
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 03-Oct-2017 Rodrigo Manyari Created this based on tcptop.
# 13-Feb-2018 Rodrigo Manyari Fix pep8 errors, some refactoring.
# 05-Mar-2018 Rodrigo Manyari Add date time to output.
import argparse
import json
import logging
import struct
import socket
from bcc import BPF
from datetime import datetime as dt
from time import sleep
# for influxdb
from init_db import influx_client
from db_modules import write2db
from const import DatabaseType
# arguments
examples = """examples:
./tcpsubnet # Trace TCP sent to the default subnets:
# 127.0.0.1/32,10.0.0.0/8,172.16.0.0/12,
# 192.168.0.0/16,0.0.0.0/0
./tcpsubnet -f K # Trace TCP sent to the default subnets
# aggregated in KBytes.
./tcpsubnet 10.80.0.0/24 # Trace TCP sent to 10.80.0.0/24 only
./tcpsubnet -J # Format the output in JSON.
"""
default_subnets = "127.0.0.1/32,10.0.0.0/8," \
"172.16.0.0/12,192.168.0.0/16,0.0.0.0/0"
parser = argparse.ArgumentParser(
description="Summarize TCP send and aggregate by subnet",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("subnets", help="comma separated list of subnets",
type=str, nargs="?", default=default_subnets)
parser.add_argument("-v", "--verbose", action="store_true",
help="output debug statements")
parser.add_argument("-J", "--json", action="store_true",
help="format output in JSON")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
parser.add_argument("-f", "--format", default="B",
help="[bkmBKM] format to report: bits, Kbits, Mbits, bytes, " +
"KBytes, MBytes (default B)", choices=["b", "k", "m", "B", "K", "M"])
parser.add_argument("-i", "--interval", default=1, type=int,
help="output interval, in seconds (default 1)")
args = parser.parse_args()
level = logging.INFO
if args.verbose:
level = logging.DEBUG
logging.basicConfig(level=level)
logging.debug("Starting with the following args:")
logging.debug(args)
# args checking
if int(args.interval) <= 0:
logging.error("Invalid interval, must be > 0. Exiting.")
exit(1)
else:
args.interval = int(args.interval)
# map of supported formats
formats = {
"b": lambda x: (x * 8),
"k": lambda x: ((x * 8) / 1024),
"m": lambda x: ((x * 8) / pow(1024, 2)),
"B": lambda x: x,
"K": lambda x: x / 1024,
"M": lambda x: x / pow(1024, 2)
}
# Let's swap the string with the actual numeric value
# once here so we don't have to do it on every interval
formatFn = formats[args.format]
# define the basic structure of the BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <bcc/proto.h>
struct index_key_t {
u32 index;
};
BPF_HASH(ipv4_send_bytes, struct index_key_t);
int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk,
struct msghdr *msg, size_t size)
{
u16 family = sk->__sk_common.skc_family;
if (family == AF_INET) {
u32 dst = sk->__sk_common.skc_daddr;
unsigned categorized = 0;
__SUBNETS__
}
return 0;
}
"""
# data structure from template
class lmp_data(object):
def __init__(self,a,b,c,d):
self.time = a
self.glob = b
self.subnet = c
self.throughput = d
data_struct = {"measurement":'tcpsubnet',
"time":[],
"tags":['glob',],
"fields":['time','subnet','throughput']}
# Takes in a mask and returns the integer equivalent
# e.g.
# mask_to_int(8) returns 4278190080
def mask_to_int(n):
return ((1 << n) - 1) << (32 - n)
# Takes in a list of subnets and returns a list
# of tuple-3 containing:
# - The subnet info at index 0
# - The addr portion as an int at index 1
# - The mask portion as an int at index 2
#
# e.g.
# parse_subnets([10.10.0.0/24]) returns
# [
# ['10.10.0.0/24', 168427520, 4294967040],
# ]
def parse_subnets(subnets):
m = []
for s in subnets:
parts = s.split("/")
if len(parts) != 2:
msg = "Subnet [%s] is invalid, please refer to the examples." % s
raise ValueError(msg)
netaddr_int = 0
mask_int = 0
try:
netaddr_int = struct.unpack("!I", socket.inet_aton(parts[0]))[0]
except:
msg = ("Invalid net address in subnet [%s], " +
"please refer to the examples.") % s
raise ValueError(msg)
try:
mask_int = int(parts[1])
except:
msg = "Invalid mask in subnet [%s]. Mask must be an int" % s
raise ValueError(msg)
if mask_int < 0 or mask_int > 32:
msg = ("Invalid mask in subnet [%s]. Must be an " +
"int between 0 and 32.") % s
raise ValueError(msg)
mask_int = mask_to_int(int(parts[1]))
m.append([s, netaddr_int, mask_int])
return m
def generate_bpf_subnets(subnets):
template = """
if (!categorized && (__NET_ADDR__ & __NET_MASK__) ==
(dst & __NET_MASK__)) {
struct index_key_t key = {.index = __POS__};
ipv4_send_bytes.increment(key, size);
categorized = 1;
}
"""
bpf = ''
for i, s in enumerate(subnets):
branch = template
branch = branch.replace("__NET_ADDR__", str(socket.htonl(s[1])))
branch = branch.replace("__NET_MASK__", str(socket.htonl(s[2])))
branch = branch.replace("__POS__", str(i))
bpf += branch
return bpf
subnets = []
if args.subnets:
subnets = args.subnets.split(",")
subnets = parse_subnets(subnets)
logging.debug("Packets are going to be categorized in the following subnets:")
logging.debug(subnets)
bpf_subnets = generate_bpf_subnets(subnets)
# initialize BPF
bpf_text = bpf_text.replace("__SUBNETS__", bpf_subnets)
logging.debug("Done preprocessing the BPF program, " +
"this is what will actually get executed:")
logging.debug(bpf_text)
if args.ebpf:
print(bpf_text)
exit()
b = BPF(text=bpf_text)
ipv4_send_bytes = b["ipv4_send_bytes"]
# if not args.json:
# print("Tracing... Output every %d secs. Hit Ctrl-C to end" % args.interval)
# output
exiting = 0
while (1):
try:
sleep(args.interval)
except KeyboardInterrupt:
exiting = 1
# IPv4: build dict of all seen keys
keys = ipv4_send_bytes
for k, v in ipv4_send_bytes.items():
if k not in keys:
keys[k] = v
# to hold json data
data = {}
# output
# now = dt.now()
# data['date'] = now.strftime('%x')
# data['time'] = now.strftime('%X')
# data['entries'] = {}
# if not args.json:
# print(now.strftime('%x %X'))
for k, v in reversed(sorted(keys.items(), key=lambda keys: keys[1].value)):
send_bytes = 0
if k in ipv4_send_bytes:
send_bytes = int(ipv4_send_bytes[k].value)
subnet = subnets[k.index][0]
send = formatFn(send_bytes)
# if args.json:
# data['entries'][subnet] = send
# else:
# print("%-21s %6d" % (subnet, send))
# write to influxdb
test_data = lmp_data(dt.now().isoformat(),'glob',subnet, send)
#print(test_data)
write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
if args.json:
print(json.dumps(data))
ipv4_send_bytes.clear()
if exiting:
exit(0)
| 30.05614
| 84
| 0.618842
|
1a29e8021f43c52bd2436ef9b804215ae2571660
| 107
|
py
|
Python
|
test_project/tests/admin.py
|
epanchee/django-autocomplete-light
|
ebc00e78456aed51cf943890d899a75a6b56400d
|
[
"MIT"
] | 1
|
2020-09-18T21:35:06.000Z
|
2020-09-18T21:35:06.000Z
|
test_project/tests/admin.py
|
epanchee/django-autocomplete-light
|
ebc00e78456aed51cf943890d899a75a6b56400d
|
[
"MIT"
] | null | null | null |
test_project/tests/admin.py
|
epanchee/django-autocomplete-light
|
ebc00e78456aed51cf943890d899a75a6b56400d
|
[
"MIT"
] | 1
|
2020-09-18T21:35:08.000Z
|
2020-09-18T21:35:08.000Z
|
from django.contrib import admin
from django.contrib.auth.models import User
admin.site.unregister(User)
| 17.833333
| 43
| 0.82243
|
12967185a9bff3ff51bfb781411bb007ec0ec512
| 229
|
py
|
Python
|
rhea/system/memmap/__init__.py
|
mngr0/rhea
|
9ad8d193f7f78f1d192af438568d45fb5a398c8c
|
[
"MIT"
] | null | null | null |
rhea/system/memmap/__init__.py
|
mngr0/rhea
|
9ad8d193f7f78f1d192af438568d45fb5a398c8c
|
[
"MIT"
] | null | null | null |
rhea/system/memmap/__init__.py
|
mngr0/rhea
|
9ad8d193f7f78f1d192af438568d45fb5a398c8c
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
# generic memory-space object
from .memspace import MemorySpace
# register file objects
from .regfile import RegisterBits
from .regfile import Register
from .regfile import RegisterFile
| 19.083333
| 38
| 0.829694
|
ac6d95a06ca58025d8e0b4c59463f6fa7b7ac887
| 3,124
|
py
|
Python
|
sympy/printing/python.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 603
|
2020-12-23T13:49:32.000Z
|
2022-03-31T23:38:03.000Z
|
sympy/printing/python.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 387
|
2020-12-15T14:54:04.000Z
|
2022-03-31T07:00:21.000Z
|
sympy/printing/python.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 35
|
2021-03-26T03:12:04.000Z
|
2022-03-23T10:15:10.000Z
|
import keyword as kw
import sympy
from .repr import ReprPrinter
from .str import StrPrinter
# A list of classes that should be printed using StrPrinter
STRPRINT = ("Add", "Infinity", "Integer", "Mul", "NegativeInfinity",
"Pow", "Zero")
class PythonPrinter(ReprPrinter, StrPrinter):
"""A printer which converts an expression into its Python interpretation."""
def __init__(self, settings=None):
super().__init__(settings)
self.symbols = []
self.functions = []
# Create print methods for classes that should use StrPrinter instead
# of ReprPrinter.
for name in STRPRINT:
f_name = "_print_%s" % name
f = getattr(StrPrinter, f_name)
setattr(PythonPrinter, f_name, f)
def _print_Function(self, expr):
func = expr.func.__name__
if not hasattr(sympy, func) and not func in self.functions:
self.functions.append(func)
return StrPrinter._print_Function(self, expr)
# procedure (!) for defining symbols which have be defined in print_python()
def _print_Symbol(self, expr):
symbol = self._str(expr)
if symbol not in self.symbols:
self.symbols.append(symbol)
return StrPrinter._print_Symbol(self, expr)
def _print_module(self, expr):
raise ValueError('Modules in the expression are unacceptable')
def python(expr, **settings):
"""Return Python interpretation of passed expression
(can be passed to the exec() function without any modifications)"""
printer = PythonPrinter(settings)
exprp = printer.doprint(expr)
result = ''
# Returning found symbols and functions
renamings = {}
for symbolname in printer.symbols:
newsymbolname = symbolname
# Escape symbol names that are reserved python keywords
if kw.iskeyword(newsymbolname):
while True:
newsymbolname += "_"
if (newsymbolname not in printer.symbols and
newsymbolname not in printer.functions):
renamings[sympy.Symbol(
symbolname)] = sympy.Symbol(newsymbolname)
break
result += newsymbolname + ' = Symbol(\'' + symbolname + '\')\n'
for functionname in printer.functions:
newfunctionname = functionname
# Escape function names that are reserved python keywords
if kw.iskeyword(newfunctionname):
while True:
newfunctionname += "_"
if (newfunctionname not in printer.symbols and
newfunctionname not in printer.functions):
renamings[sympy.Function(
functionname)] = sympy.Function(newfunctionname)
break
result += newfunctionname + ' = Function(\'' + functionname + '\')\n'
if renamings:
exprp = expr.subs(renamings)
result += 'e = ' + printer._str(exprp)
return result
def print_python(expr, **settings):
"""Print output of python() function"""
print(python(expr, **settings))
| 35.5
| 80
| 0.620038
|
748d7141f121f1071e4788807d20fc1fe277aa5a
| 7,906
|
py
|
Python
|
sdk/lusid/api/scopes_api.py
|
finbourne/lusid-sdk-python
|
d238c5c661908639dab57d026966630448bfb0d6
|
[
"MIT"
] | 6
|
2018-06-19T15:50:17.000Z
|
2022-03-26T22:53:16.000Z
|
sdk/lusid/api/scopes_api.py
|
finbourne/lusid-sdk-python-preview
|
6b1ae2c634f4b9a816e070470e9c8e6e06eae0ee
|
[
"MIT"
] | 98
|
2020-04-15T06:05:43.000Z
|
2022-03-01T10:25:25.000Z
|
sdk/lusid/api/scopes_api.py
|
finbourne/lusid-sdk-python-preview
|
6b1ae2c634f4b9a816e070470e9c8e6e06eae0ee
|
[
"MIT"
] | 9
|
2019-09-30T11:19:25.000Z
|
2021-11-17T19:49:59.000Z
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3648
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from lusid.api_client import ApiClient
from lusid.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ScopesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def list_scopes(self, **kwargs): # noqa: E501
"""[EARLY ACCESS] ListScopes: List Scopes # noqa: E501
List all the scopes that contain data. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_scopes(async_req=True)
>>> result = thread.get()
:param filter: Expression to filter the result set. For example, to filter on the Scope, use \"scope eq 'string'\" Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:type filter: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfScopeDefinition
"""
kwargs['_return_http_data_only'] = True
return self.list_scopes_with_http_info(**kwargs) # noqa: E501
def list_scopes_with_http_info(self, **kwargs): # noqa: E501
"""[EARLY ACCESS] ListScopes: List Scopes # noqa: E501
List all the scopes that contain data. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_scopes_with_http_info(async_req=True)
>>> result = thread.get()
:param filter: Expression to filter the result set. For example, to filter on the Scope, use \"scope eq 'string'\" Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:type filter: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfScopeDefinition, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'filter'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_scopes" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) > 2147483647): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `list_scopes`, length must be less than or equal to `2147483647`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) < 0): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `list_scopes`, length must be greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'filter' in local_var_params and not re.search(r'^[\s\S]*$', local_var_params['filter']): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `list_scopes`, must conform to the pattern `/^[\s\S]*$/`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfScopeDefinition",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/scopes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 43.679558
| 254
| 0.605742
|
25038f40daf1252fd827d5ec357322e0d21a070f
| 184,026
|
py
|
Python
|
numpy/ma/tests/test_core.py
|
schaefed/numpy
|
6d5a74261d497f9989c823626ec955efe88348ae
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/ma/tests/test_core.py
|
schaefed/numpy
|
6d5a74261d497f9989c823626ec955efe88348ae
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/ma/tests/test_core.py
|
schaefed/numpy
|
6d5a74261d497f9989c823626ec955efe88348ae
|
[
"BSD-3-Clause"
] | 1
|
2019-08-14T08:08:41.000Z
|
2019-08-14T08:08:41.000Z
|
# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant"
import warnings
import pickle
import operator
import itertools
from functools import reduce
import numpy as np
import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import (
run_module_suite, assert_raises, assert_warns, suppress_warnings
)
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
assert_mask_equal
)
from numpy.ma.core import (
MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,
allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2,
arcsin, arctan, argsort, array, asarray, choose, concatenate,
conjugate, cos, cosh, count, default_fill_value, diag, divide, empty,
empty_like, equal, exp, flatten_mask, filled, fix_invalid,
flatten_structured_array, fromflex, getmask, getmaskarray, greater,
greater_equal, identity, inner, isMaskedArray, less, less_equal, log,
log10, make_mask, make_mask_descr, mask_or, masked, masked_array,
masked_equal, masked_greater, masked_greater_equal, masked_inside,
masked_less, masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, max, maximum,
maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
mvoid, nomask, not_equal, ones, outer, power, product, put, putmask,
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
pi = np.pi
suppress_copy_mask_on_assignment = suppress_warnings()
suppress_copy_mask_on_assignment.filter(
numpy.ma.core.MaskedArrayFutureWarning,
"setting an item on a masked array which has a shared mask will not copy")
class TestMaskedArray(object):
# Base test class for MaskedArrays.
def setup(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
def test_basicattributes(self):
# Tests some basic array attributes.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.ndim, 1)
assert_equal(b.ndim, 1)
assert_equal(a.size, 3)
assert_equal(b.size, 3)
assert_equal(a.shape, (3,))
assert_equal(b.shape, (3,))
def test_basic0d(self):
# Checks masking a scalar
x = masked_array(0)
assert_equal(str(x), '0')
x = masked_array(0, mask=True)
assert_equal(str(x), str(masked_print_option))
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
assert_(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_(not isMaskedArray(x))
assert_(isMaskedArray(xm))
assert_((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(zm.dtype, z.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_array_equal(xm, xf)
assert_array_equal(filled(xm, 1.e20), xf)
assert_array_equal(x, xm)
def test_basic2d(self):
# Test of basic array creation and properties in 2 dimensions.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
assert_(not isMaskedArray(x))
assert_(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_equal(xm, xf)
assert_equal(filled(xm, 1.e20), xf)
assert_equal(x, xm)
def test_concatenate_basic(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# basic concatenation
assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
assert_equal(np.concatenate((x, y)), concatenate((x, y)))
assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
def test_concatenate_alongaxis(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# Concatenation along an axis
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
assert_equal(xm.mask, np.reshape(m1, s))
assert_equal(ym.mask, np.reshape(m2, s))
xmym = concatenate((xm, ym), 1)
assert_equal(np.concatenate((x, y), 1), xmym)
assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
x = zeros(2)
y = array(ones(2), mask=[False, True])
z = concatenate((x, y))
assert_array_equal(z, [0, 0, 1, 1])
assert_array_equal(z.mask, [False, False, False, True])
z = concatenate((y, x))
assert_array_equal(z, [1, 1, 0, 0])
assert_array_equal(z.mask, [False, True, False, False])
def test_concatenate_flexible(self):
# Tests the concatenation on flexible arrays.
data = masked_array(list(zip(np.random.rand(10),
np.arange(10))),
dtype=[('a', float), ('b', int)])
test = concatenate([data[:5], data[5:]])
assert_equal_records(test, data)
def test_creation_ndmin(self):
# Check the use of ndmin
x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
assert_equal(x.shape, (1, 3))
assert_equal(x._data, [[1, 2, 3]])
assert_equal(x._mask, [[1, 0, 0]])
def test_creation_ndmin_from_maskedarray(self):
# Make sure we're not losing the original mask w/ ndmin
x = array([1, 2, 3])
x[-1] = masked
xx = array(x, ndmin=2, dtype=float)
assert_equal(x.shape, x._mask.shape)
assert_equal(xx.shape, xx._mask.shape)
def test_creation_maskcreation(self):
# Tests how masks are initialized at the creation of Maskedarrays.
data = arange(24, dtype=float)
data[[3, 6, 15]] = masked
dma_1 = MaskedArray(data)
assert_equal(dma_1.mask, data.mask)
dma_2 = MaskedArray(dma_1)
assert_equal(dma_2.mask, dma_1.mask)
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
x = array([1, 2, 3], mask=True)
assert_equal(x._mask, [True, True, True])
x = array([1, 2, 3], mask=False)
assert_equal(x._mask, [False, False, False])
y = array([1, 2, 3], mask=x._mask, copy=False)
assert_(np.may_share_memory(x.mask, y.mask))
y = array([1, 2, 3], mask=x._mask, copy=True)
assert_(not np.may_share_memory(x.mask, y.mask))
def test_creation_with_list_of_maskedarrays(self):
# Tests creating a masked array from a list of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_(data.mask is nomask)
def test_creation_from_ndarray_with_padding(self):
x = np.array([('A', 0)], dtype={'names':['f0','f1'],
'formats':['S4','i8'],
'offsets':[0,8]})
data = array(x) # used to fail due to 'V' padding field in x.dtype.descr
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xm.fill_value = -9999
xm._hardmask = True
xmm = asarray(xm)
assert_equal(xmm._data, xm._data)
assert_equal(xmm._mask, xm._mask)
assert_equal(xmm.fill_value, xm.fill_value)
assert_equal(xmm._hardmask, xm._hardmask)
def test_asarray_default_order(self):
# See Issue #6646
m = np.eye(3).T
assert_(not m.flags.c_contiguous)
new_m = asarray(m)
assert_(new_m.flags.c_contiguous)
def test_asarray_enforce_order(self):
# See Issue #6646
m = np.eye(3).T
assert_(not m.flags.c_contiguous)
new_m = asarray(m, order='C')
assert_(new_m.flags.c_contiguous)
def test_fix_invalid(self):
# Checks fix_invalid.
with np.errstate(invalid='ignore'):
data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
data_fixed = fix_invalid(data)
assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
assert_equal(data_fixed._mask, [1., 0., 1.])
def test_maskedelement(self):
# Test of masked element
x = arange(6)
x[1] = masked
assert_(str(masked) == '--')
assert_(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
def test_set_element_as_object(self):
# Tests setting elements with object
a = empty(1, dtype=object)
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
assert_(a[0] is x)
import datetime
dt = datetime.datetime.now()
a[0] = dt
assert_(a[0] is dt)
def test_indexing(self):
# Tests conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_equal(np.sort(x1), sort(x2, endwith=False))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_equal(x1[2], x2[2])
assert_equal(x1[2:5], x2[2:5])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
assert_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
assert_equal(x1, x2)
x2[1] = masked
assert_equal(x1, x2)
x2[1:3] = masked
assert_equal(x1, x2)
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_matrix_indexing(self):
# Tests conversions and indexing
x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])
x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
# tests of indexing
assert_(type(x2[1, 0]) is type(x1[1, 0]))
assert_(x1[1, 0] == x2[1, 0])
assert_(x2[1, 1] is masked)
assert_equal(x1[0, 2], x2[0, 2])
assert_equal(x1[0, 1:], x2[0, 1:])
assert_equal(x1[:, 2], x2[:, 2])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[0, 2] = 9
x2[0, 2] = 9
assert_equal(x1, x2)
x1[0, 1:] = 99
x2[0, 1:] = 99
assert_equal(x1, x2)
x2[0, 1] = masked
assert_equal(x1, x2)
x2[0, 1:] = masked
assert_equal(x1, x2)
x2[0, :] = x1[0, :]
x2[0, 1] = masked
assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x3)[1], array([1, 1, 0])))
assert_(allequal(getmask(x3[1]), array([1, 1, 0])))
x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x4[1]), array([1, 1, 0])))
assert_(allequal(x4[1], array([1, 2, 3])))
x1 = np.matrix(np.arange(5) * 1.0)
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
@suppress_copy_mask_on_assignment
def test_copy(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
assert_(m is m2)
m3 = make_mask(m, copy=1)
assert_(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
assert_(allequal(x1, y1.data))
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
assert_(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
assert_(y1a.mask is y1.mask)
y2 = array(x1, mask=m3)
assert_(y2._data.__array_interface__ == x1.__array_interface__)
assert_(y2._mask.__array_interface__ == m3.__array_interface__)
assert_(y2[2] is masked)
y2[2] = 9
assert_(y2[2] is not masked)
assert_(y2._mask.__array_interface__ == m3.__array_interface__)
assert_(allequal(y2.mask, 0))
y2a = array(x1, mask=m, copy=1)
assert_(y2a._data.__array_interface__ != x1.__array_interface__)
#assert_( y2a.mask is not m)
assert_(y2a._mask.__array_interface__ != m.__array_interface__)
assert_(y2a[2] is masked)
y2a[2] = 9
assert_(y2a[2] is not masked)
#assert_( y2a.mask is not m)
assert_(y2a._mask.__array_interface__ != m.__array_interface__)
assert_(allequal(y2a.mask, 0))
y3 = array(x1 * 1.0, mask=m)
assert_(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_equal(concatenate([x4, x4]), y4)
assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = repeat(x4, 2, axis=0)
assert_equal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert_equal(y5, y7)
y8 = x4.repeat(2, 0)
assert_equal(y5, y8)
y9 = x4.copy()
assert_equal(y9._data, x4._data)
assert_equal(y9._mask, x4._mask)
x = masked_array([1, 2, 3], mask=[0, 1, 0])
# Copy is False by default
y = masked_array(x)
assert_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)
y = masked_array(x, copy=True)
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
def test_copy_on_python_builtins(self):
# Tests copy works on python builtins (issue#8019)
assert_(isMaskedArray(np.ma.copy([1,2,3])))
assert_(isMaskedArray(np.ma.copy((1,2,3))))
def test_copy_immutable(self):
# Tests that the copy method is immutable, GitHub issue #5247
a = np.ma.array([1, 2, 3])
b = np.ma.array([4, 5, 6])
a_copy_method = a.copy
b.copy
assert_equal(a_copy_method(), [1, 2, 3])
def test_deepcopy(self):
from copy import deepcopy
a = array([0, 1, 2], mask=[False, True, False])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
assert_not_equal(id(a._mask), id(copied._mask))
copied[1] = 1
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
copied.mask[1] = False
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
def test_str_repr(self):
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(str(a), '[0 -- 2]')
assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n'
' mask = [False True False],\n'
' fill_value = 999999)\n')
a = np.ma.arange(2000)
a[1:50] = np.ma.masked
assert_equal(
repr(a),
'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n'
' mask = [False True True ..., False False False],\n'
' fill_value = 999999)\n'
)
def test_pickling(self):
# Tests pickling
for dtype in (int, float, str, object):
a = arange(10).astype(dtype)
a.fill_value = 999
masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked
True, # Fully masked
False) # Fully unmasked
for mask in masks:
a.mask = mask
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled._data, a._data)
if dtype in (object, int):
assert_equal(a_pickled.fill_value, 999)
else:
assert_equal(a_pickled.fill_value, dtype(999))
assert_array_equal(a_pickled.mask, mask)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
assert_(isinstance(a_pickled._data, np.matrix))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
mc = np.ma.masked
mc_pickled = pickle.loads(mc.dumps())
assert_equal(mc_pickled._baseclass, mc._baseclass)
assert_equal(mc_pickled._mask, mc._mask)
assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
# Tests pickling w/ structured array
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
# Tests pickling w/ F_CONTIGUOUS arrays
a = arange(10)
a.shape = (-1, 2)
b = a.T
test = pickle.loads(pickle.dumps(b))
assert_equal(test, b)
def test_single_element_subscript(self):
# Tests single element subscripts of Maskedarrays.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
def test_topython(self):
# Tests some communication issues with Python.
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
assert_raises(TypeError, float, array([1, 1]))
with suppress_warnings() as sup:
sup.filter(UserWarning, 'Warning: converting a masked element')
assert_(np.isnan(float(array([1], mask=[1]))))
a = array([1, 2, 3], mask=[1, 0, 0])
assert_raises(TypeError, lambda: float(a))
assert_equal(float(a[-1]), 3.)
assert_(np.isnan(float(a[0])))
assert_raises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
assert_raises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_equal(z.real, x)
assert_equal(z.imag, 10 * x)
assert_equal((z * conjugate(z)).real, 101 * x * x)
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_equal(x, z)
def test_oddfeatures_2(self):
# Tests some more features.
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
@suppress_copy_mask_on_assignment
def test_oddfeatures_3(self):
# Tests some generic features
atest = array([10], mask=True)
btest = array([20])
idx = atest.mask
atest[idx] = btest[idx]
assert_equal(atest, [20])
def test_filled_with_object_dtype(self):
a = np.ma.masked_all(1, dtype='O')
assert_equal(a.filled('x')[0], 'x')
def test_filled_with_flexible_dtype(self):
# Test filled w/ flexible dtype
flexi = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
flexi[0] = masked
assert_equal(flexi.filled(),
np.array([(default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),)], dtype=flexi.dtype))
flexi[0] = masked
assert_equal(flexi.filled(1),
np.array([(1, '1', 1.)], dtype=flexi.dtype))
def test_filled_with_mvoid(self):
# Test filled w/ mvoid
ndtype = [('a', int), ('b', float)]
a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)
# Filled using default
test = a.filled()
assert_equal(tuple(test), (1, default_fill_value(1.)))
# Explicit fill_value
test = a.filled((-1, -1))
assert_equal(tuple(test), (1, -1))
# Using predefined filling values
a.fill_value = (-999, -999)
assert_equal(tuple(a.filled()), (1, -999))
def test_filled_with_nested_dtype(self):
# Test filled w/ nested dtype
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([(1, (1, 1)), (2, (2, 2))],
mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
test = a.filled(0)
control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
assert_equal(test, control)
test = a['B'].filled(0)
control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
assert_equal(test, control)
# test if mask gets set correctly (see #6760)
Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))]))
assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)),
('f1', 'i1', (2, 2))], (2, 2))]))
assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)),
('f1', '?', (2, 2))], (2, 2))]))
def test_filled_with_f_order(self):
# Test filled w/ F-contiguous array
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
order='F') # this is currently ignored
assert_(a.flags['F_CONTIGUOUS'])
assert_(a.filled(0).flags['F_CONTIGUOUS'])
def test_optinfo_propagation(self):
# Checks that _optinfo dictionary isn't back-propagated
x = array([1, 2, 3, ], dtype=float)
x._optinfo['info'] = '???'
y = x.copy()
assert_equal(y._optinfo['info'], '???')
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
def test_optinfo_forward_propagation(self):
a = array([1,2,2,4])
a._optinfo["key"] = "value"
assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"])
assert_equal(a._optinfo["key"], a[:2]._optinfo["key"])
assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"])
assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"])
assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"])
assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"])
assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"])
def test_fancy_printoptions(self):
# Test printing a masked array w/ fancy dtype.
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = array([(1, (2, 3.0)), (4, (5, 6.0))],
mask=[(1, (0, 1)), (0, (1, 0))],
dtype=fancydtype)
control = "[(--, (2, --)) (4, (--, 6.0))]"
assert_equal(str(test), control)
# Test 0-d array with multi-dimensional dtype
t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
0.0),
mask = (False, [[True, False, True],
[False, False, True]],
False),
dtype = "int, (2,3)float, float")
control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)"
assert_equal(str(t_2d0), control)
def test_flatten_structured_array(self):
# Test flatten_structured_array on arrays
# On ndarray
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[1., 1.], [2., 2.]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
mask=[[0, 1], [1, 0]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# On masked array with nested structure
ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
a = array([(1, (1, 1.1)), (2, (2, 2.2))],
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
mask=[[0, 1, 0], [1, 0, 1]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# Keeping the initial shape
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
def test_void0d(self):
# Test creating a mvoid object
ndtype = [('a', int), ('b', int)]
a = np.array([(1, 2,)], dtype=ndtype)[0]
f = mvoid(a)
assert_(isinstance(f, mvoid))
a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]
assert_(isinstance(a, mvoid))
a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
f = mvoid(a._data[0], a._mask[0])
assert_(isinstance(f, mvoid))
def test_mvoid_getitem(self):
# Test mvoid.__getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
f = a[0]
assert_(isinstance(f, mvoid))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
assert_(isinstance(f, mvoid))
assert_(f[0] is masked)
assert_(f['a'] is masked)
assert_equal(f[1], 4)
# exotic dtype
A = masked_array(data=[([0,1],)],
mask=[([True, False],)],
dtype=[("A", ">i2", (2,))])
assert_equal(A[0]["A"], A["A"][0])
assert_equal(A[0]["A"], masked_array(data=[0, 1],
mask=[True, False], dtype=">i2"))
def test_mvoid_iter(self):
# Test iteration on __getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
assert_equal(list(a[0]), [1, 2])
# w/ mask
assert_equal(list(a[1]), [masked, 4])
def test_mvoid_print(self):
# Test printing a mvoid
mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])
assert_equal(str(mx[0]), "(1, 1)")
mx['b'][0] = masked
ini_display = masked_print_option._display
masked_print_option.set_display("-X-")
try:
assert_equal(str(mx[0]), "(1, -X-)")
assert_equal(repr(mx[0]), "(1, -X-)")
finally:
masked_print_option.set_display(ini_display)
# also check if there are object datatypes (see gh-7493)
mx = array([(1,), (2,)], dtype=[('a', 'O')])
assert_equal(str(mx[0]), "(1,)")
def test_mvoid_multidim_print(self):
# regression test for gh-6019
t_ma = masked_array(data = [([1, 2, 3],)],
mask = [([False, True, False],)],
fill_value = ([999999, 999999, 999999],),
dtype = [('a', '<i4', (3,))])
assert_(str(t_ma[0]) == "([1, --, 3],)")
assert_(repr(t_ma[0]) == "([1, --, 3],)")
# additional tests with structured arrays
t_2d = masked_array(data = [([[1, 2], [3,4]],)],
mask = [([[False, True], [True, False]],)],
dtype = [('a', '<i4', (2,2))])
assert_(str(t_2d[0]) == "([[1, --], [--, 4]],)")
assert_(repr(t_2d[0]) == "([[1, --], [--, 4]],)")
t_0d = masked_array(data = [(1,2)],
mask = [(True,False)],
dtype = [('a', '<i4'), ('b', '<i4')])
assert_(str(t_0d[0]) == "(--, 2)")
assert_(repr(t_0d[0]) == "(--, 2)")
t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
mask = [([[False, True], [True, False]], False)],
dtype = [('a', '<i4', (2,2)), ('b', float)])
assert_(str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
assert_(repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
t_ne = masked_array(data=[(1, (1, 1))],
mask=[(True, (True, False))],
dtype = [('a', '<i4'), ('b', 'i4,i4')])
assert_(str(t_ne[0]) == "(--, (--, 1))")
assert_(repr(t_ne[0]) == "(--, (--, 1))")
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
mx = masked_array([mx1, mx2], mask=[False, True])
assert_(mx[0] is mx1)
assert_(mx[1] is not mx2)
assert_(np.all(mx[1].data == mx2.data))
assert_(np.all(mx[1].mask))
# check that we return a view.
mx[1].data[0] = 0.
assert_(mx2[0] == 0.)
class TestMaskedArrayArithmetic(object):
# Base test class for MaskedArrays.
def setup(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def teardown(self):
np.seterr(**self.err_status)
def test_basic_arithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_equal(a2d * a2d, a2d * a2dm)
assert_equal(a2d + a2d, a2d + a2dm)
assert_equal(a2d - a2d, a2d - a2dm)
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_equal(-x, -xm)
assert_equal(x + y, xm + ym)
assert_equal(x - y, xm - ym)
assert_equal(x * y, xm * ym)
assert_equal(x / y, xm / ym)
assert_equal(a10 + y, a10 + ym)
assert_equal(a10 - y, a10 - ym)
assert_equal(a10 * y, a10 * ym)
assert_equal(a10 / y, a10 / ym)
assert_equal(x + a10, xm + a10)
assert_equal(x - a10, xm - a10)
assert_equal(x * a10, xm * a10)
assert_equal(x / a10, xm / a10)
assert_equal(x ** 2, xm ** 2)
assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)
assert_equal(x ** y, xm ** ym)
assert_equal(np.add(x, y), add(xm, ym))
assert_equal(np.subtract(x, y), subtract(xm, ym))
assert_equal(np.multiply(x, y), multiply(xm, ym))
assert_equal(np.divide(x, y), divide(xm, ym))
def test_divide_on_different_shapes(self):
x = arange(6, dtype=float)
x.shape = (2, 3)
y = arange(3, dtype=float)
z = x / y
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
z = x / y[None,:]
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
y = arange(2, dtype=float)
z = x / y[:, None]
assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
# Tests mixed arithmetics.
na = np.array([1])
ma = array([1])
assert_(isinstance(na + ma, MaskedArray))
assert_(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
a = array([tiny, 1. / tiny, 0.])
assert_equal(getmaskarray(a / 2), [0, 0, 0])
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
# Tests some scalar arithmetics on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
assert_((1 / array(0)).mask)
assert_((1 + xm).mask)
assert_((-xm).mask)
assert_(maximum(xm, xm).mask)
assert_(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
# Tests (in)equality on masked singleton
a = array([1, 2, 3], mask=[1, 1, 0])
assert_((a[0] == 0) is masked)
assert_((a[0] != 0) is masked)
assert_equal((a[-1] == 0), False)
assert_equal((a[-1] != 0), True)
def test_arithmetic_with_masked_singleton(self):
# Checks that there's no collapsing to masked
x = masked_array([1, 2])
y = x * masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
y = x[0] * masked
assert_(y is masked)
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
# Check that we're not losing the shape of a singleton
x = masked_array([1, ])
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y.mask, [True, ])
def test_scalar_arithmetic(self):
x = array(0, mask=0)
assert_equal(x.filled().ctypes.data, x.ctypes.data)
# Make sure we don't lose the shape in some circumstances
xm = array((0, 0)) / 0.
assert_equal(xm.shape, (2,))
assert_equal(xm.mask, [1, 1])
def test_basic_ufuncs(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.cos(x), cos(xm))
assert_equal(np.cosh(x), cosh(xm))
assert_equal(np.sin(x), sin(xm))
assert_equal(np.sinh(x), sinh(xm))
assert_equal(np.tan(x), tan(xm))
assert_equal(np.tanh(x), tanh(xm))
assert_equal(np.sqrt(abs(x)), sqrt(xm))
assert_equal(np.log(abs(x)), log(xm))
assert_equal(np.log10(abs(x)), log10(xm))
assert_equal(np.exp(x), exp(xm))
assert_equal(np.arcsin(z), arcsin(zm))
assert_equal(np.arccos(z), arccos(zm))
assert_equal(np.arctan(z), arctan(zm))
assert_equal(np.arctan2(x, y), arctan2(xm, ym))
assert_equal(np.absolute(x), absolute(xm))
assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
assert_equal(np.equal(x, y), equal(xm, ym))
assert_equal(np.not_equal(x, y), not_equal(xm, ym))
assert_equal(np.less(x, y), less(xm, ym))
assert_equal(np.greater(x, y), greater(xm, ym))
assert_equal(np.less_equal(x, y), less_equal(xm, ym))
assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
assert_equal(np.conjugate(x), conjugate(xm))
def test_count_func(self):
# Tests count
assert_equal(1, count(1))
assert_equal(0, array(1, mask=[1]))
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
res = count(ott)
assert_(res.dtype.type is np.intp)
assert_equal(3, res)
ott = ott.reshape((2, 2))
res = count(ott)
assert_(res.dtype.type is np.intp)
assert_equal(3, res)
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_equal([1, 2], res)
assert_(getmask(res) is nomask)
ott = array([0., 1., 2., 3.])
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_(res.dtype.type is np.intp)
assert_raises(np.AxisError, ott.count, axis=1)
def test_count_on_python_builtins(self):
# Tests count works on python builtins (issue#8019)
assert_equal(3, count([1,2,3]))
assert_equal(2, count((1,2)))
def test_minmax_func(self):
# Tests minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# max doesn't work if shaped
xr = np.ravel(x)
xmr = ravel(xm)
# following are true because of careful selection of data
assert_equal(max(xr), maximum.reduce(xmr))
assert_equal(min(xr), minimum.reduce(xmr))
assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_equal(minimum(x, y), where(less(x, y), x, y))
assert_equal(maximum(x, y), where(greater(x, y), x, y))
assert_(minimum.reduce(x) == 0)
assert_(maximum.reduce(x) == 4)
x = arange(4).reshape(2, 2)
x[-1, -1] = masked
assert_equal(maximum.reduce(x, axis=None), 2)
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
assert_(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
assert_(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
assert_(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
assert_(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
# Test np.min/maximum.reduce on array w/ full False mask
a = array([1, 2, 3], mask=[False, False, False])
b = np.maximum.reduce(a)
assert_equal(b, 3)
def test_minmax_funcs_with_output(self):
# Tests the min/max functions with explicit outputs
mask = np.random.rand(12).round()
xm = array(np.random.uniform(0, 10, 12), mask=mask)
xm.shape = (3, 4)
for funcname in ('min', 'max'):
# Initialize
npfunc = getattr(np, funcname)
mafunc = getattr(numpy.ma.core, funcname)
# Use the np version
nout = np.empty((4,), dtype=int)
try:
result = npfunc(xm, axis=0, out=nout)
except MaskError:
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
assert_(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
assert_(result is nout)
def test_minmax_methods(self):
# Additional tests on max/min
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
assert_(xm[0].max() is masked)
assert_(xm[0].max(0) is masked)
assert_(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
assert_(xm[0].min() is masked)
assert_(xm[0].min(0) is masked)
assert_(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
assert_(xm[0].ptp() is masked)
assert_(xm[0].ptp(0) is masked)
assert_(xm[0].ptp(-1) is masked)
x = array([1, 2, 3], mask=True)
assert_(x.min() is masked)
assert_(x.max() is masked)
assert_(x.ptp() is masked)
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_binops_d2D(self):
# Test binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a * b
control = array([[2., 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a * b
control = array([[2, 3], [8, 10], [18, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2, 3], [8, 10], [18, 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_domained_binops_d2D(self):
# Test domained binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a / b
control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a / b
control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_noshrinking(self):
# Check that we don't shrink a mask when not wanted
# Binary operations
a = masked_array([1., 2., 3.], mask=[False, False, False],
shrink=False)
b = a + 1
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a += 1
assert_equal(a.mask, [0, 0, 0])
# Domained binary operation
b = a / 1.
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a /= 1.
assert_equal(a.mask, [0, 0, 0])
def test_ufunc_nomask(self):
# check the case ufuncs should set the mask to false
m = np.ma.array([1])
# check we don't get array([False], dtype=bool)
assert_equal(np.true_divide(m, 5).mask.shape, ())
def test_noshink_on_creation(self):
# Check that the mask is not shrunk on array creation when not wanted
a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)
assert_equal(a.mask, [0, 0, 0])
def test_mod(self):
# Tests mod
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(mod(x, y), mod(xm, ym))
test = mod(ym, xm)
assert_equal(test, np.mod(ym, xm))
assert_equal(test.mask, mask_or(xm.mask, ym.mask))
test = mod(xm, ym)
assert_equal(test, np.mod(xm, ym))
assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_imag_real(self):
# Check complex
xx = array([1 + 10j, 20 + 2j], mask=[1, 0])
assert_equal(xx.imag, [10, 2])
assert_equal(xx.imag.filled(), [1e+20, 2])
assert_equal(xx.imag.dtype, xx._data.imag.dtype)
assert_equal(xx.real, [1, 20])
assert_equal(xx.real.filled(), [1e+20, 20])
assert_equal(xx.real.dtype, xx._data.real.dtype)
def test_methods_with_output(self):
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)
for funcname in funclist:
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty(4, dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty(4, dtype=int)
result = xmmeth(axis=0, out=output)
assert_(result is output)
assert_(output[0] is masked)
def test_count_mean_with_matrix(self):
m = np.ma.array(np.matrix([[1,2],[3,4]]), mask=np.zeros((2,2)))
assert_equal(m.count(axis=0).shape, (1,2))
assert_equal(m.count(axis=1).shape, (2,1))
#make sure broadcasting inside mean and var work
assert_equal(m.mean(axis=0), [[2., 3.]])
assert_equal(m.mean(axis=1), [[1.5], [3.5]])
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a == a)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [False, False])
test = (a == a[0])
assert_equal(test.data, [True, False])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test.data, [False, True])
assert_equal(test.mask, [True, False])
test = (a[0] == b)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [False, False])
# complicated dtype, 2-dimensional array.
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([[(1, (1, 1)), (2, (2, 2))],
[(3, (3, 3)), (4, (4, 4))]],
mask=[[(0, (1, 0)), (0, (0, 1))],
[(1, (0, 0)), (1, (1, 1))]], dtype=ndtype)
test = (a[0, 0] == a)
assert_equal(test.data, [[True, False], [False, False]])
assert_equal(test.mask, [[False, False], [False, True]])
def test_ne_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a != a)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [False, False])
test = (a != a[0])
assert_equal(test.data, [False, True])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test.data, [True, False])
assert_equal(test.mask, [True, False])
test = (a[0] != b)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [False, False])
# complicated dtype, 2-dimensional array.
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([[(1, (1, 1)), (2, (2, 2))],
[(3, (3, 3)), (4, (4, 4))]],
mask=[[(0, (1, 0)), (0, (0, 1))],
[(1, (0, 0)), (1, (1, 1))]], dtype=ndtype)
test = (a[0, 0] != a)
assert_equal(test.data, [[False, True], [True, True]])
assert_equal(test.mask, [[False, False], [False, True]])
def test_eq_ne_structured_extra(self):
# ensure simple examples are symmetric and make sense.
# from https://github.com/numpy/numpy/pull/8590#discussion_r101126465
dt = np.dtype('i4,i4')
for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt),
mvoid((1, 2), mask=(0, 1), dtype=dt),
mvoid((1, 2), mask=(1, 0), dtype=dt),
mvoid((1, 2), mask=(1, 1), dtype=dt)):
ma1 = m1.view(MaskedArray)
r1 = ma1.view('2i4')
for m2 in (np.array((1, 1), dtype=dt),
mvoid((1, 1), dtype=dt),
mvoid((1, 0), mask=(0, 1), dtype=dt),
mvoid((3, 2), mask=(0, 1), dtype=dt)):
ma2 = m2.view(MaskedArray)
r2 = ma2.view('2i4')
eq_expected = (r1 == r2).all()
assert_equal(m1 == m2, eq_expected)
assert_equal(m2 == m1, eq_expected)
assert_equal(ma1 == m2, eq_expected)
assert_equal(m1 == ma2, eq_expected)
assert_equal(ma1 == ma2, eq_expected)
# Also check it is the same if we do it element by element.
el_by_el = [m1[name] == m2[name] for name in dt.names]
assert_equal(array(el_by_el, dtype=bool).all(), eq_expected)
ne_expected = (r1 != r2).any()
assert_equal(m1 != m2, ne_expected)
assert_equal(m2 != m1, ne_expected)
assert_equal(ma1 != m2, ne_expected)
assert_equal(m1 != ma2, ne_expected)
assert_equal(ma1 != ma2, ne_expected)
el_by_el = [m1[name] != m2[name] for name in dt.names]
assert_equal(array(el_by_el, dtype=bool).any(), ne_expected)
def test_eq_with_None(self):
# Really, comparisons with None should not be done, but check them
# anyway. Note that pep8 will flag these tests.
# Deprecation is in place for arrays, and when it happens this
# test will fail (and have to be changed accordingly).
# With partial mask
with suppress_warnings() as sup:
sup.filter(FutureWarning, "Comparison to `None`")
a = array([None, 1], mask=[0, 1])
assert_equal(a == None, array([True, False], mask=[0, 1]))
assert_equal(a.data == None, [True, False])
assert_equal(a != None, array([False, True], mask=[0, 1]))
# With nomask
a = array([None, 1], mask=False)
assert_equal(a == None, [True, False])
assert_equal(a != None, [False, True])
# With complete mask
a = array([None, 2], mask=True)
assert_equal(a == None, array([False, True], mask=True))
assert_equal(a != None, array([True, False], mask=True))
# Fully masked, even comparison to None should return "masked"
a = masked
assert_equal(a == None, masked)
def test_eq_with_scalar(self):
a = array(1)
assert_equal(a == 1, True)
assert_equal(a == 0, False)
assert_equal(a != 1, False)
assert_equal(a != 0, True)
b = array(1, mask=True)
assert_equal(b == 0, masked)
assert_equal(b == 1, masked)
assert_equal(b != 0, masked)
assert_equal(b != 1, masked)
def test_eq_different_dimensions(self):
m1 = array([1, 1], mask=[0, 1])
# test comparison with both masked and regular arrays.
for m2 in (array([[0, 1], [1, 2]]),
np.array([[0, 1], [1, 2]])):
test = (m1 == m2)
assert_equal(test.data, [[False, False],
[True, False]])
assert_equal(test.mask, [[False, True],
[False, True]])
def test_numpyarithmetics(self):
# Check that the mask is not back-propagated when using numpy functions
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
mask=[1, 1, 0, 0, 1])
test = log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
test = np.log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
class TestMaskedArrayAttributes(object):
def test_keepmask(self):
# Tests the keep mask flag
x = masked_array([1, 2, 3], mask=[1, 0, 0])
mx = masked_array(x)
assert_equal(mx.mask, x.mask)
mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)
assert_equal(mx.mask, [0, 1, 0])
mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)
assert_equal(mx.mask, [1, 1, 0])
# We default to true
mx = masked_array(x, mask=[0, 1, 0])
assert_equal(mx.mask, [1, 1, 0])
def test_hardmask(self):
# Test hard_mask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
# We need to copy, to avoid updating d in xh !
xs = array(d, mask=m, hard_mask=False, copy=True)
xh[[1, 4]] = [10, 40]
xs[[1, 4]] = [10, 40]
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
assert_equal(xs.mask, [0, 0, 0, 1, 0])
assert_(xh._hardmask)
assert_(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
assert_equal(xs._data, [0, 10, 20, 30, 40])
assert_equal(xs.mask, nomask)
xh[0] = masked
xs[0] = masked
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, [1, 0, 0, 0, 0])
xh[:] = 1
xs[:] = 1
assert_equal(xh._data, [0, 1, 1, 3, 4])
assert_equal(xs._data, [1, 1, 1, 1, 1])
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, nomask)
# Switch to soft mask
xh.soften_mask()
xh[:] = arange(5)
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh.mask, nomask)
# Switch back to hard mask
xh.harden_mask()
xh[xh < 3] = masked
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh[filled(xh > 1, False)] = 5
assert_equal(xh._data, [0, 1, 2, 5, 5])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)
xh[0] = 0
assert_equal(xh._data, [[1, 0], [3, 4]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[-1, -1] = 5
assert_equal(xh._data, [[1, 0], [3, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[filled(xh < 5, False)] = 2
assert_equal(xh._data, [[1, 2], [2, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
def test_hardmask_again(self):
# Another test of hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
xh[4:5] = 999
xh[0:1] = 999
assert_equal(xh._data, [999, 1, 2, 3, 4])
def test_hardmask_oncemore_yay(self):
# OK, yet another test of hardmask
# Make sure that harden_mask/soften_mask//unshare_mask returns self
a = array([1, 2, 3], mask=[1, 0, 0])
b = a.harden_mask()
assert_equal(a, b)
b[0] = 0
assert_equal(a, b)
assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))
a = b.soften_mask()
a[0] = 0
assert_equal(a, b)
assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))
def test_smallmask(self):
# Checks the behaviour of _smallmask
a = arange(10)
a[1] = masked
a[1] = 1
assert_equal(a._mask, nomask)
a = arange(10)
a._smallmask = False
a[1] = masked
a[1] = 1
assert_equal(a._mask, zeros(10))
def test_shrink_mask(self):
# Tests .shrink_mask()
a = array([1, 2, 3], mask=[0, 0, 0])
b = a.shrink_mask()
assert_equal(a, b)
assert_equal(a.mask, nomask)
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
# test simple access
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
assert_equal(test.flat[1], 2)
assert_equal(test.flat[2], masked)
assert_(np.all(test.flat[0:2] == test[0, 0:2]))
# Test flat on masked_matrices
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
assert_equal(test, control)
# Test setting
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
testflat = test.flat
testflat[:] = testflat[[2, 1, 0]]
assert_equal(test, control)
testflat[0] = 9
assert_equal(test[0, 0], 9)
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
[(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]],
dtype=[('a', int), ('b', float), ('c', '|S8')])
x['a'][0, 1] = masked
x['b'][1, 0] = masked
x['c'][0, 2] = masked
x[-1, -1] = masked
xflat = x.flat
assert_equal(xflat[0], x[0, 0])
assert_equal(xflat[1], x[0, 1])
assert_equal(xflat[2], x[0, 2])
assert_equal(xflat[:3], x[0])
assert_equal(xflat[3], x[1, 0])
assert_equal(xflat[4], x[1, 1])
assert_equal(xflat[5], x[1, 2])
assert_equal(xflat[3:], x[1])
assert_equal(xflat[-1], x[-1, -1])
i = 0
j = 0
for xf in xflat:
assert_equal(xf, x[j, i])
i += 1
if i >= x.shape[-1]:
i = 0
j += 1
# test that matrices keep the correct shape (#4615)
a = masked_array(np.matrix(np.eye(2)), mask=0)
b = a.flat
b01 = b[:2]
assert_equal(b01.data, array([[1., 0.]]))
assert_equal(b01.mask, array([[False, False]]))
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
a = np.zeros(4, dtype='f4,i4')
m = np.ma.array(a)
m.dtype = np.dtype('f4')
repr(m) # raises?
assert_equal(m.dtype, np.dtype('f4'))
# check that dtype changes that change shape of mask too much
# are not allowed
def assign():
m = np.ma.array(a)
m.dtype = np.dtype('f8')
assert_raises(ValueError, assign)
b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises?
assert_equal(b.dtype, np.dtype('f4'))
# check that nomask is preserved
a = np.zeros(4, dtype='f4')
m = np.ma.array(a)
m.dtype = np.dtype('f4,i4')
assert_equal(m.dtype, np.dtype('f4,i4'))
assert_equal(m._mask, np.ma.nomask)
class TestFillingValues(object):
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
_check_fill_value = np.ma.core._check_fill_value
fval = _check_fill_value(0, int)
assert_equal(fval, 0)
fval = _check_fill_value(None, int)
assert_equal(fval, default_fill_value(0))
fval = _check_fill_value(0, "|S3")
assert_equal(fval, b"0")
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value(b"camelot!"))
assert_raises(TypeError, _check_fill_value, 1e+20, int)
assert_raises(TypeError, _check_fill_value, 'stuff', int)
def test_check_on_fields(self):
# Tests _check_fill_value with records
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....Using a flexible type w/ a different type shouldn't matter
# BEHAVIOR in 1.5 and earlier: match structured types by position
#fill_val = np.array((-999, -12345678.9, "???"),
# dtype=[("A", int), ("B", float), ("C", "|S3")])
# BEHAVIOR in 1.6 and later: match structured types by name
fill_val = np.array(("???", -999, -12345678.9),
dtype=[("c", "|S3"), ("a", int), ("b", float), ])
# suppress deprecation warning in 1.12 (remove in 1.13)
with assert_warns(FutureWarning):
fval = _check_fill_value(fill_val, ndtype)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....Using an object-array shouldn't matter either
fill_val = np.ndarray(shape=(1,), dtype=object)
fill_val[0] = (-999, -12345678.9, b"???")
fval = _check_fill_value(fill_val, object)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
# NOTE: This test was never run properly as "fill_value" rather than
# "fill_val" was assigned. Written properly, it fails.
#fill_val = np.array((-999, -12345678.9, "???"))
#fval = _check_fill_value(fill_val, ndtype)
#assert_(isinstance(fval, ndarray))
#assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
# Tests the behavior of fill_value during conversion
# We had a tailored comment to make sure special attributes are
# properly dealt with
a = array([b'3', b'4', b'5'])
a._optinfo.update({'comment':"updated!"})
b = array(a, dtype=int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
b = array(a, dtype=float)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0.))
b = a.astype(int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
assert_equal(b._optinfo['comment'], "updated!")
b = a.astype([('a', '|S3')])
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
def test_default_fill_value(self):
# check all calling conventions
f1 = default_fill_value(1.)
f2 = default_fill_value(np.array(1.))
f3 = default_fill_value(np.array(1.).dtype)
assert_equal(f1, f2)
assert_equal(f1, f3)
def test_default_fill_value_structured(self):
fields = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
f1 = default_fill_value(fields)
f2 = default_fill_value(fields.dtype)
expected = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.)), dtype=fields.dtype)
assert_equal(f1, expected)
assert_equal(f2, expected)
def test_default_fill_value_void(self):
dt = np.dtype([('v', 'V7')])
f = default_fill_value(dt)
assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v']))
def test_fillvalue(self):
# Yet more fun with the fill_value
data = masked_array([1, 2, 3], fill_value=-999)
series = data[[0, 2, 1]]
assert_equal(series._fill_value, data._fill_value)
mtype = [('f', float), ('s', '|S3')]
x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)
x.fill_value = 999
assert_equal(x.fill_value.item(), [999., b'999'])
assert_equal(x['f'].fill_value, 999)
assert_equal(x['s'].fill_value, b'999')
x.fill_value = (9, '???')
assert_equal(x.fill_value.item(), (9, b'???'))
assert_equal(x['f'].fill_value, 9)
assert_equal(x['s'].fill_value, b'???')
x = array([1, 2, 3.1])
x.fill_value = 999
assert_equal(np.asarray(x.fill_value).dtype, float)
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
def test_fillvalue_exotic_dtype(self):
# Tests yet more exotic flexible dtypes
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('i', int), ('s', '|S8'), ('f', float)]
control = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),),
dtype=ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
# The shape shouldn't matter
ndtype = [('f0', float, (2, 2))]
control = np.array((default_fill_value(0.),),
dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
ndtype = np.dtype("int, (2,3)float, float")
control = np.array((default_fill_value(0),
default_fill_value(0.),
default_fill_value(0.),),
dtype="int, float, float").astype(ndtype)
test = _check_fill_value(None, ndtype)
assert_equal(test, control)
control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
# but when indexing, fill value should become scalar not tuple
# See issue #6723
M = masked_array(control)
assert_equal(M["f1"].fill_value.ndim, 0)
def test_fillvalue_datetime_timedelta(self):
# Test default fillvalue for datetime64 and timedelta64 types.
# See issue #4476, this would return '?' which would cause errors
# elsewhere
for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m",
"h", "D", "W", "M", "Y"):
control = numpy.datetime64("NaT", timecode)
test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
np.testing.assert_equal(test, control)
control = numpy.timedelta64("NaT", timecode)
test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
np.testing.assert_equal(test, control)
def test_extremum_fill_value(self):
# Tests extremum fill values for flexible type.
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
assert_equal(test.dtype, a.dtype)
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
test = minimum_fill_value(a)
assert_equal(test.dtype, a.dtype)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
test = maximum_fill_value(a)
assert_equal(test.dtype, a.dtype)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
def test_extremum_fill_value_subdtype(self):
a = array(([2, 3, 4],), dtype=[('value', np.int8, 3)])
test = minimum_fill_value(a)
assert_equal(test.dtype, a.dtype)
assert_equal(test[0], np.full(3, minimum_fill_value(a['value'])))
test = maximum_fill_value(a)
assert_equal(test.dtype, a.dtype)
assert_equal(test[0], np.full(3, maximum_fill_value(a['value'])))
def test_fillvalue_individual_fields(self):
# Test setting fill_value on individual fields
ndtype = [('a', int), ('b', int)]
# Explicit fill_value
a = array(list(zip([1, 2, 3], [4, 5, 6])),
fill_value=(-999, -999), dtype=ndtype)
aa = a['a']
aa.set_fill_value(10)
assert_equal(aa._fill_value, np.array(10))
assert_equal(tuple(a.fill_value), (10, -999))
a.fill_value['b'] = -10
assert_equal(tuple(a.fill_value), (10, -10))
# Implicit fill_value
t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype)
tt = t['a']
tt.set_fill_value(10)
assert_equal(tt._fill_value, np.array(10))
assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))
def test_fillvalue_implicit_structured_array(self):
# Check that fill_value is always defined for structured arrays
ndtype = ('b', float)
adtype = ('a', float)
a = array([(1.,), (2.,)], mask=[(False,), (False,)],
fill_value=(np.nan,), dtype=np.dtype([adtype]))
b = empty(a.shape, dtype=[adtype, ndtype])
b['a'] = a['a']
b['a'].set_fill_value(a['a'].fill_value)
f = b._fill_value[()]
assert_(np.isnan(f[0]))
assert_equal(f[-1], default_fill_value(1.))
def test_fillvalue_as_arguments(self):
# Test adding a fill_value parameter to empty/ones/zeros
a = empty(3, fill_value=999.)
assert_equal(a.fill_value, 999.)
a = ones(3, fill_value=999., dtype=float)
assert_equal(a.fill_value, 999.)
a = zeros(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
def test_shape_argument(self):
# Test that shape can be provides as an argument
# GH issue 6106
a = empty(shape=(3, ))
assert_equal(a.shape, (3, ))
a = ones(shape=(3, ), dtype=float)
assert_equal(a.shape, (3, ))
a = zeros(shape=(3, ), dtype=complex)
assert_equal(a.shape, (3, ))
def test_fillvalue_in_view(self):
# Test the behavior of fill_value in view
# Create initial masked array
x = array([1, 2, 3], fill_value=1, dtype=np.int64)
# Check that fill_value is preserved by default
y = x.view()
assert_(y.fill_value == 1)
# Check that fill_value is preserved if dtype is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute
y = x.view(MaskedArray)
assert_(y.fill_value == 1)
# Check that fill_value is preserved if type is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute (by
# default, the first argument is dtype, not type)
y = x.view(type=MaskedArray)
assert_(y.fill_value == 1)
# Check that code does not crash if passed an ndarray sub-class that
# does not have a _fill_value attribute
y = x.view(np.ndarray)
y = x.view(type=np.ndarray)
# Check that fill_value can be overridden with view
y = x.view(MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value can be overridden with view (using type=)
y = x.view(type=MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value gets reset if passed a dtype but not a
# fill_value. This is because even though in some cases one can safely
# cast the fill_value, e.g. if taking an int64 view of an int32 array,
# in other cases, this cannot be done (e.g. int32 view of an int64
# array with a large fill_value).
y = x.view(dtype=np.int32)
assert_(y.fill_value == 999999)
def test_fillvalue_bytes_or_str(self):
# Test whether fill values work as expected for structured dtypes
# containing bytes or str. See issue #7259.
a = empty(shape=(3, ), dtype="(2)3S,(2)3U")
assert_equal(a["f0"].fill_value, default_fill_value(b"spam"))
assert_equal(a["f1"].fill_value, default_fill_value("eggs"))
class TestUfuncs(object):
# Test class for the application of ufuncs on MaskedArrays.
def setup(self):
# Base data definition.
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def teardown(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
# Tests new ufuncs on MaskedArrays.
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
'floor', 'ceil',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma.core, f)
args = self.d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
assert_equal(ur.filled(0), mr.filled(0), f)
assert_mask_equal(ur.mask, mr.mask, err_msg=f)
def test_reduce(self):
# Tests reduce on MaskedArrays.
a = self.d[0]
assert_(not alltrue(a, axis=0))
assert_(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
def test_minmax(self):
# Tests extrema on MaskedArrays.
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
assert_(amask.max(1)[0].mask)
assert_(amask.min(1)[0].mask)
def test_ndarray_mask(self):
# Check that the mask of the result is a ndarray (not a MaskedArray...)
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
test = np.sqrt(a)
control = masked_array([-1, 0, 1, np.sqrt(2), -1],
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_(not isinstance(test.mask, MaskedArray))
def test_treatment_of_NotImplemented(self):
# Check that NotImplemented is returned at appropriate places
a = masked_array([1., 2.], mask=[1, 0])
assert_raises(TypeError, operator.mul, a, "abc")
assert_raises(TypeError, operator.truediv, a, "abc")
class MyClass(object):
__array_priority__ = a.__array_priority__ + 1
def __mul__(self, other):
return "My mul"
def __rmul__(self, other):
return "My rmul"
me = MyClass()
assert_(me * a == "My mul")
assert_(a * me == "My rmul")
# and that __array_priority__ is respected
class MyClass2(object):
__array_priority__ = 100
def __mul__(self, other):
return "Me2mul"
def __rmul__(self, other):
return "Me2rmul"
def __rdiv__(self, other):
return "Me2rdiv"
__rtruediv__ = __rdiv__
me_too = MyClass2()
assert_(a.__mul__(me_too) is NotImplemented)
assert_(all(multiply.outer(a, me_too) == "Me2rmul"))
assert_(a.__truediv__(me_too) is NotImplemented)
assert_(me_too * a == "Me2mul")
assert_(a * me_too == "Me2rmul")
assert_(a / me_too == "Me2rdiv")
def test_no_masked_nan_warnings(self):
# check that a nan in masked position does not
# cause ufunc warnings
m = np.ma.array([0.5, np.nan], mask=[0,1])
with warnings.catch_warnings():
warnings.filterwarnings("error")
# test unary and binary ufuncs
exp(m)
add(m, 1)
m > 0
# test different unary domains
sqrt(m)
log(m)
tan(m)
arcsin(m)
arccos(m)
arccosh(m)
# test binary domains
divide(m, 2)
# also check that allclose uses ma ufuncs, to avoid warning
allclose(m, 0.5)
class TestMaskedArrayInPlaceArithmetics(object):
# Test MaskedArray Arithmetics
def setup(self):
x = arange(10)
y = arange(10)
xm = arange(10)
xm[2] = masked
self.intdata = (x, y, xm)
self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
self.othertypes = [np.dtype(_).type for _ in self.othertypes]
self.uint8data = (
x.astype(np.uint8),
y.astype(np.uint8),
xm.astype(np.uint8)
)
def test_inplace_addition_scalar(self):
# Test of inplace additions
(x, y, xm) = self.intdata
xm[2] = masked
x += 1
assert_equal(x, y + 1)
xm += 1
assert_equal(xm, y + 1)
(x, _, xm) = self.floatdata
id1 = x.data.ctypes._data
x += 1.
assert_(id1 == x.data.ctypes._data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
# Test of inplace additions
(x, y, xm) = self.intdata
m = xm.mask
a = arange(10, dtype=np.int16)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar(self):
# Test of inplace subtractions
(x, y, xm) = self.intdata
x -= 1
assert_equal(x, y - 1)
xm -= 1
assert_equal(xm, y - 1)
def test_inplace_subtraction_array(self):
# Test of inplace subtractions
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
x *= 2.0
assert_equal(x, y * 2)
xm *= 2.0
assert_equal(xm, y * 2)
def test_inplace_multiplication_array(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_division_scalar_int(self):
# Test of inplace division
(x, y, xm) = self.intdata
x = arange(10) * 2
xm = arange(10) * 2
xm[2] = masked
x //= 2
assert_equal(x, y)
xm //= 2
assert_equal(xm, y)
def test_inplace_division_scalar_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
x /= 2.0
assert_equal(x, y / 2.0)
xm /= arange(10)
assert_equal(xm, ones((10,)))
def test_inplace_division_array_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x /= a
xm /= a
assert_equal(x, y / a)
assert_equal(xm, y / a)
assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
def test_inplace_division_misc(self):
x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = xm / ym
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_sub(self):
# Test keeping data w/ (inplace) subtraction
# Test sub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - 1
assert_equal(xx.data, [0, 1, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test isub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x -= 1
assert_equal(x.data, [0, 1, 3])
assert_equal(x.mask, [0, 0, 1])
# Test sub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 0, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test isub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x -= array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 0, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_mul(self):
# Test keeping data w/ (inplace) multiplication
# Test mul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * 2
assert_equal(xx.data, [2, 4, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test imul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x *= 2
assert_equal(x.data, [2, 4, 3])
assert_equal(x.mask, [0, 0, 1])
# Test mul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * array([10, 20, 30], mask=[1, 0, 0])
assert_equal(xx.data, [1, 40, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test imul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x *= array([10, 20, 30], mask=[1, 0, 0])
assert_equal(x.data, [1, 40, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_div(self):
# Test keeping data w/ (inplace) division
# Test div on scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x / 2.
assert_equal(xx.data, [1 / 2., 2 / 2., 3])
assert_equal(xx.mask, [0, 0, 1])
# Test idiv on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= 2.
assert_equal(x.data, [1 / 2., 2 / 2., 3])
assert_equal(x.mask, [0, 0, 1])
# Test div on array
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x / array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(xx.data, [1., 2. / 20., 3.])
assert_equal(xx.mask, [1, 0, 1])
# Test idiv on array
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(x.data, [1., 2 / 20., 3.])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_pow(self):
# Test keeping data w/ (inplace) power
# Test pow on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x ** 2.5
assert_equal(xx.data, [1., 2. ** 2.5, 3.])
assert_equal(xx.mask, [0, 0, 1])
# Test ipow on scalar
x **= 2.5
assert_equal(x.data, [1., 2. ** 2.5, 3])
assert_equal(x.mask, [0, 0, 1])
def test_datafriendly_add_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a += b
assert_equal(a, [[2, 2], [4, 4]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a += b
assert_equal(a, [[2, 2], [4, 4]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_sub_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_mul_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_inplace_addition_scalar_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
xm[2] = masked
x += t(1)
assert_equal(x, y + t(1))
xm += t(1)
assert_equal(xm, y + t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_addition_array_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_scalar_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x -= t(1)
assert_equal(x, y - t(1))
xm -= t(1)
assert_equal(xm, y - t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_array_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_scalar_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x *= t(2)
assert_equal(x, y * t(2))
xm *= t(2)
assert_equal(xm, y * t(2))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_array_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
x //= t(2)
xm //= t(2)
assert_equal(x, y)
assert_equal(xm, y)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x //= a
xm //= a
assert_equal(x, y // a)
assert_equal(xm, y // a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with suppress_warnings() as sup:
sup.record(UserWarning)
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= t(2)
assert_equal(x, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
try:
xm /= t(2)
assert_equal(xm, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
if issubclass(t, np.integer):
assert_equal(len(sup.log), 2, "Failed on type=%s." % t)
else:
assert_equal(len(sup.log), 0, "Failed on type=%s." % t)
def test_inplace_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with suppress_warnings() as sup:
sup.record(UserWarning)
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= a
assert_equal(x, y / a)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
try:
xm /= a
assert_equal(xm, y / a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
if issubclass(t, np.integer):
assert_equal(len(sup.log), 2, "Failed on type=%s." % t)
else:
assert_equal(len(sup.log), 0, "Failed on type=%s." % t)
def test_inplace_pow_type(self):
# Test keeping data w/ (inplace) power
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
# Test pow on scalar
x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
xx = x ** t(2)
xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
assert_equal(xx.data, xx_r.data)
assert_equal(xx.mask, xx_r.mask)
# Test ipow on scalar
x **= t(2)
assert_equal(x.data, xx_r.data)
assert_equal(x.mask, xx_r.mask)
assert_equal(len(w), 0, "Failed on type=%s." % t)
class TestMaskedArrayMethods(object):
# Test class for miscellaneous MaskedArrays methods.
def setup(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
assert_(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
assert_(not allclose(a, b))
b[0] = np.inf
assert_(allclose(a, b))
# Test allclose w/ masked
a = masked_array(a)
a[-1] = masked
assert_(allclose(a, b, masked_equal=True))
assert_(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
assert_(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
assert_(allclose(a, a))
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
assert_(not mxbig.all())
assert_(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
assert_(not mxsmall.all())
assert_(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
assert_(not mXbig.all())
assert_(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
assert_(not mXsmall.all())
assert_(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
full = array([1, 2, 3], mask=True)
assert_(full.all() is masked)
full.all(out=store)
assert_(store)
assert_(store._mask, True)
assert_(store is not masked)
store = empty((), dtype=bool)
assert_(full.any() is masked)
full.any(out=store)
assert_(not store)
assert_(store._mask, True)
assert_(store is not masked)
def test_argmax_argmin(self):
# Tests argmin & argmax on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_equal(mx.argmin(), 35)
assert_equal(mX.argmin(), 35)
assert_equal(m2x.argmin(), 4)
assert_equal(m2X.argmin(), 4)
assert_equal(mx.argmax(), 28)
assert_equal(mX.argmax(), 28)
assert_equal(m2x.argmax(), 31)
assert_equal(m2X.argmax(), 31)
assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])
assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])
assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])
assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])
assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])
assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])
assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])
assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])
def test_clip(self):
# Tests clip on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])
mx = array(x, mask=m)
clipped = mx.clip(2, 8)
assert_equal(clipped.mask, mx.mask)
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
def test_compress(self):
# test compress
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
condition = (a > 1.5) & (a < 3.5)
assert_equal(a.compress(condition), [2., 3.])
a[[2, 3]] = masked
b = a.compress(condition)
assert_equal(b._data, [2., 3.])
assert_equal(b._mask, [0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
condition = (a < 4.)
b = a.compress(condition)
assert_equal(b._data, [1., 2., 3.])
assert_equal(b._mask, [0, 0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
a = masked_array([[10, 20, 30], [40, 50, 60]],
mask=[[0, 0, 1], [1, 0, 0]])
b = a.compress(a.ravel() >= 22)
assert_equal(b._data, [30, 40, 50, 60])
assert_equal(b._mask, [1, 1, 0, 0])
x = np.array([3, 1, 2])
b = a.compress(x >= 2, axis=1)
assert_equal(b._data, [[10, 30], [40, 60]])
assert_equal(b._mask, [[0, 1], [1, 0]])
def test_compressed(self):
# Tests compressed
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
assert_(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
b = empty_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
b = empty(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
# check empty_like mask handling
a = masked_array([1, 2, 3], mask=[False, True, False])
b = empty_like(a)
assert_(not np.may_share_memory(a.mask, b.mask))
b = a.view(masked_array)
assert_(np.may_share_memory(a.mask, b.mask))
@suppress_copy_mask_on_assignment
def test_put(self):
# Tests put.
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
assert_(x[3] is masked)
assert_(x[4] is masked)
x[[1, 4]] = [10, 40]
assert_(x[3] is masked)
assert_(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
i = [0, 2, 4, 6]
x.put(i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
put(x, i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
def test_put_nomask(self):
# GitHub issue 6425
x = zeros(10)
z = array([3., -1.], mask=[False, True])
x.put([1, 2], z)
assert_(x[0] is not masked)
assert_equal(x[0], 0)
assert_(x[1] is not masked)
assert_equal(x[1], 3)
assert_(x[2] is masked)
assert_(x[3] is not masked)
assert_equal(x[3], 0)
def test_put_hardmask(self):
# Tests put on hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d + 1, mask=m, hard_mask=True, copy=True)
xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])
assert_equal(xh._data, [3, 4, 2, 4, 5])
def test_putmask(self):
x = arange(6) + 1
mx = array(x, mask=[0, 0, 0, 1, 1, 1])
mask = [0, 0, 1, 0, 0, 1]
# w/o mask, w/o masked values
xx = x.copy()
putmask(xx, mask, 99)
assert_equal(xx, [1, 2, 99, 4, 5, 99])
# w/ mask, w/o masked values
mxx = mx.copy()
putmask(mxx, mask, 99)
assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])
assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])
# w/o mask, w/ masked values
values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])
xx = x.copy()
putmask(xx, mask, values)
assert_equal(xx._data, [1, 2, 30, 4, 5, 60])
assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])
# w/ mask, w/ masked values
mxx = mx.copy()
putmask(mxx, mask, values)
assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])
assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])
# w/ mask, w/ masked values + hardmask
mxx = mx.copy()
mxx.harden_mask()
putmask(mxx, mask, values)
assert_equal(mxx, [1, 2, 30, 4, 5, 60])
def test_ravel(self):
# Tests ravel
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel._mask.shape, aravel.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel.shape, (1, 5))
assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
# Test index ordering
assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
def test_reshape(self):
# Tests reshape
x = arange(4)
x[0] = masked
y = x.reshape(2, 2)
assert_equal(y.shape, (2, 2,))
assert_equal(y._mask.shape, (2, 2,))
assert_equal(x.shape, (4,))
assert_equal(x._mask.shape, (4,))
def test_sort(self):
# Test sort
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
sortedx = sort(x)
assert_equal(sortedx._data, [1, 2, 3, 4])
assert_equal(sortedx._mask, [0, 0, 0, 1])
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [4, 1, 2, 3])
assert_equal(sortedx._mask, [1, 0, 0, 0])
x.sort()
assert_equal(x._data, [1, 2, 3, 4])
assert_equal(x._mask, [0, 0, 0, 1])
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
x.sort(endwith=False)
assert_equal(x._data, [4, 1, 2, 3])
assert_equal(x._mask, [1, 0, 0, 0])
x = [1, 4, 2, 3]
sortedx = sort(x)
assert_(not isinstance(sorted, MaskedArray))
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [-2, -1, 0, 1, 2])
x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [1, 2, -2, -1, 0])
assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
def test_argsort_matches_sort(self):
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
for kwargs in [dict(),
dict(endwith=True),
dict(endwith=False),
dict(fill_value=2),
dict(fill_value=2, endwith=True),
dict(fill_value=2, endwith=False)]:
sortedx = sort(x, **kwargs)
argsortedx = x[argsort(x, **kwargs)]
assert_equal(sortedx._data, argsortedx._data)
assert_equal(sortedx._mask, argsortedx._mask)
def test_sort_2d(self):
# Check sort of 2D array.
# 2D array w/o mask
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
# 2D array w/mask
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])
# 3D
a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],
[[1, 2, 3], [7, 8, 9], [4, 5, 6]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
[[4, 5, 6], [1, 2, 3], [7, 8, 9]]])
a[a % 4 == 0] = masked
am = a.copy()
an = a.filled(99)
am.sort(0)
an.sort(0)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(1)
an.sort(1)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(2)
an.sort(2)
assert_equal(am, an)
def test_sort_flexible(self):
# Test sort on structured dtype.
a = array(
data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
mask_last = array(
data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
mask_first = array(
data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)],
mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)],
dtype=[('A', int), ('B', int)])
test = sort(a)
assert_equal(test, mask_last)
assert_equal(test.mask, mask_last.mask)
test = sort(a, endwith=False)
assert_equal(test, mask_first)
assert_equal(test.mask, mask_first.mask)
# Test sort on dtype with subarray (gh-8069)
dt = np.dtype([('v', int, 2)])
a = a.view(dt)
mask_last = mask_last.view(dt)
mask_first = mask_first.view(dt)
test = sort(a)
assert_equal(test, mask_last)
assert_equal(test.mask, mask_last.mask)
test = sort(a, endwith=False)
assert_equal(test, mask_first)
assert_equal(test.mask, mask_first.mask)
def test_argsort(self):
# Test argsort
a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])
assert_equal(np.argsort(a), argsort(a))
def test_squeeze(self):
# Check squeeze
data = masked_array([[1, 2, 3]])
assert_equal(data.squeeze(), [1, 2, 3])
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
data = masked_array([[1]], mask=True)
assert_(data.squeeze() is masked)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mX = array(x, mask=m).reshape(6, 6)
mXX = mX.reshape(3, 2, 2, 3)
mXswapped = mX.swapaxes(0, 1)
assert_equal(mXswapped[-1], mX[:, -1])
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_take(self):
# Tests take
x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])
assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))
assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])
assert_equal(x.take([[0, 1], [0, 1]]),
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
# assert_equal crashes when passed np.ma.mask
assert_(x[1] is np.ma.masked)
assert_(x.take(1) is np.ma.masked)
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
assert_equal(take(x, [0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
def test_take_masked_indices(self):
# Test take w/ masked indices
a = np.array((40, 18, 37, 9, 22))
indices = np.arange(3)[None,:] + np.arange(5)[:, None]
mindices = array(indices, mask=(indices >= len(a)))
# No mask
test = take(a, mindices, mode='clip')
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 22],
[22, 22, 22]])
assert_equal(test, ctrl)
# Masked indices
test = take(a, mindices)
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 40],
[22, 40, 40]])
ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# Masked input + masked indices
a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))
test = take(a, mindices)
ctrl[0, 1] = ctrl[1, 0] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_tolist(self):
# Tests to list
# ... on 1D
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
assert_(xlist[1] is None)
assert_(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]
assert_equal(xlist[0], [0, None, 2, 3])
assert_equal(xlist[1], [4, 5, 6, 7])
assert_equal(xlist[2], [8, 9, None, 11])
assert_equal(xlist, ctrl)
# ... on structured array w/ masked records
x = array(list(zip([1, 2, 3],
[1.1, 2.2, 3.3],
['one', 'two', 'thr'])),
dtype=[('a', int), ('b', float), ('c', '|S8')])
x[-1] = masked
assert_equal(x.tolist(),
[(1, 1.1, b'one'),
(2, 2.2, b'two'),
(None, None, None)])
# ... on structured array w/ masked fields
a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],
dtype=[('a', int), ('b', int)])
test = a.tolist()
assert_equal(test, [[1, None], [3, 4]])
# ... on mvoid
a = a[0]
test = a.tolist()
assert_equal(test, [1, None])
def test_tolist_specialcase(self):
# Test mvoid.tolist: make sure we return a standard Python object
a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])
# w/o mask: each entry is a np.void whose elements are standard Python
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
# w/ mask: each entry is a ma.void whose elements should be
# standard Python
a.mask[0] = (0, 1)
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
def test_toflex(self):
# Test the conversion to records
data = arange(10)
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = [('i', int), ('s', '|S3'), ('f', float)]
data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),
'ABCDEFGHIJKLM',
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = np.dtype("int, (2,3)float, float")
data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),
np.random.rand(10),
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal_records(record['_data'], data._data)
assert_equal_records(record['_mask'], data._mask)
def test_fromflex(self):
# Test the reconstruction of a masked_array from a record
a = array([1, 2, 3])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([1, 2, 3], mask=[0, 0, 1])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
dtype=[('A', int), ('B', float)])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.data, a.data)
def test_arraymethod(self):
# Test a _arraymethod w/ n argument
marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])
control = masked_array([[1], [2], [3], [4], [5]],
mask=[0, 0, 1, 0, 0])
assert_equal(marray.T, control)
assert_equal(marray.transpose(), control)
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
class TestMaskedArrayMathMethods(object):
def setup(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_cumsumprod(self):
# Tests cumsum & cumprod on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXcp = mX.cumsum(0)
assert_equal(mXcp._data, mX.filled(0).cumsum(0))
mXcp = mX.cumsum(1)
assert_equal(mXcp._data, mX.filled(0).cumsum(1))
mXcp = mX.cumprod(0)
assert_equal(mXcp._data, mX.filled(1).cumprod(0))
mXcp = mX.cumprod(1)
assert_equal(mXcp._data, mX.filled(1).cumprod(1))
def test_cumsumprod_with_output(self):
# Tests cumsum/cumprod w/ output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
for funcname in ('cumsum', 'cumprod'):
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
assert_(result is output)
def test_ptp(self):
# Tests ptp on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, float)
cols = np.zeros(m, float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_equal(mX.ptp(0), cols)
assert_equal(mX.ptp(1), rows)
def test_add_object(self):
x = masked_array(['a', 'b'], mask=[1, 0], dtype=object)
y = x + 'x'
assert_equal(y[1], 'bx')
assert_(y.mask[0])
def test_sum_object(self):
# Test sum on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
# Test prod on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
# Test mean/anom on object dtype
a = masked_array([1, 2, 3], dtype=object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
def test_trace(self):
# Tests trace on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_almost_equal(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0))
assert_equal(np.trace(mX), mX.trace())
def test_dot(self):
# Tests dot on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
fx = mx.filled(0)
r = mx.dot(mx)
assert_almost_equal(r.filled(0), fx.dot(fx))
assert_(r.mask is nomask)
fX = mX.filled(0)
r = mX.dot(mX)
assert_almost_equal(r.filled(0), fX.dot(fX))
assert_(r.mask[1,3])
r1 = empty_like(r)
mX.dot(mX, out=r1)
assert_almost_equal(r, r1)
mYY = mXX.swapaxes(-1, -2)
fXX, fYY = mXX.filled(0), mYY.filled(0)
r = mXX.dot(mYY)
assert_almost_equal(r.filled(0), fXX.dot(fYY))
r1 = empty_like(r)
mXX.dot(mYY, out=r1)
assert_almost_equal(r, r1)
def test_dot_shape_mismatch(self):
# regression test
x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
z = masked_array([[0,1],[3,3]])
x.dot(y, out=z)
assert_almost_equal(z.filled(0), [[1, 0], [15, 16]])
assert_almost_equal(z.mask, [[0, 1], [0, 0]])
def test_varmean_nomask(self):
# gh-5769
foo = array([1,2,3,4], dtype='f8')
bar = array([1,2,3,4], dtype='f8')
assert_equal(type(foo.mean()), np.float64)
assert_equal(type(foo.var()), np.float64)
assert((foo.mean() == bar.mean()) is np.bool_(True))
# check array type is preserved and out works
foo = array(np.arange(16).reshape((4,4)), dtype='f8')
bar = empty(4, dtype='f4')
assert_equal(type(foo.mean(axis=1)), MaskedArray)
assert_equal(type(foo.var(axis=1)), MaskedArray)
assert_(foo.mean(axis=1, out=bar) is bar)
assert_(foo.var(axis=1, out=bar) is bar)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_almost_equal(mX.std(axis=None, ddof=1),
mX.compressed().std(ddof=1))
assert_almost_equal(mX.var(axis=None, ddof=1),
mX.compressed().var(ddof=1))
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
@suppress_copy_mask_on_assignment
def test_varstd_specialcases(self):
# Test a special case for var
nout = np.array(-1, dtype=float)
mout = array(-1, dtype=float)
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
assert_(method() is masked)
assert_(method(0) is masked)
assert_(method(-1) is masked)
# Using a masked array as explicit output
method(out=mout)
assert_(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout)
assert_(np.isnan(nout))
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
assert_(method(ddof=1) is masked)
assert_(method(0, ddof=1) is masked)
assert_(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
method(out=mout, ddof=1)
assert_(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout, ddof=1)
assert_(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
test = a.std(axis=0, ddof=0)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=1)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=2)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [1, 1, 1])
def test_diag(self):
# Test diag
x = arange(9).reshape((3, 3))
x[1, 1] = masked
out = np.diag(x)
assert_equal(out, [0, 4, 8])
out = diag(x)
assert_equal(out, [0, 4, 8])
assert_equal(out.mask, [0, 1, 0])
out = diag(out)
control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(out, control)
def test_axis_methods_nomask(self):
# Test the combination nomask & methods w/ axis
a = array([[1, 2, 3], [4, 5, 6]])
assert_equal(a.sum(0), [5, 7, 9])
assert_equal(a.sum(-1), [6, 15])
assert_equal(a.sum(1), [6, 15])
assert_equal(a.prod(0), [4, 10, 18])
assert_equal(a.prod(-1), [6, 120])
assert_equal(a.prod(1), [6, 120])
assert_equal(a.min(0), [1, 2, 3])
assert_equal(a.min(-1), [1, 4])
assert_equal(a.min(1), [1, 4])
assert_equal(a.max(0), [4, 5, 6])
assert_equal(a.max(-1), [3, 6])
assert_equal(a.max(1), [3, 6])
class TestMaskedArrayMathMethodsComplex(object):
# Test class for miscellaneous MaskedArrays methods.
def setup(self):
# Base data definition.
x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479j,
7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
class TestMaskedArrayFunctions(object):
# Test class for miscellaneous functions.
def setup(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
self.info = (xm, ym)
def test_masked_where_bool(self):
x = [1, 2]
y = masked_where(False, x)
assert_equal(y, [1, 2])
assert_equal(y[1], 2)
def test_masked_equal_wlist(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [0, 0, 1])
mx = masked_not_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [1, 1, 0])
def test_masked_equal_fill_value(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx._mask, [0, 0, 1])
assert_equal(mx.fill_value, 3)
def test_masked_where_condition(self):
# Tests masking functions.
x = array([1., 2., 3., 4., 5.])
x[2] = masked
assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))
assert_equal(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2))
assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))
assert_equal(masked_where(less_equal(x, 2), x),
masked_less_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5])
def test_masked_where_oddities(self):
# Tests some generic features.
atest = ones((10, 10, 10), dtype=float)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_equal(atest, ctest)
def test_masked_where_shape_constraint(self):
a = arange(10)
try:
test = masked_equal(1, a)
except IndexError:
pass
else:
raise AssertionError("Should have failed...")
test = masked_equal(a, 1)
assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_masked_where_structured(self):
# test that masked_where on a structured array sets a structured
# mask (see issue #2972)
a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")])
am = np.ma.masked_where(a["A"] < 5, a)
assert_equal(am.mask.dtype.names, am.dtype.names)
assert_equal(am["A"],
np.ma.masked_array(np.zeros(10), np.ones(10)))
def test_masked_otherfunctions(self):
assert_equal(masked_inside(list(range(5)), 1, 3),
[0, 199, 199, 199, 4])
assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])
assert_equal(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0])
assert_equal(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1])
assert_equal(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0])
assert_equal(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1])
def test_round(self):
a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],
mask=[0, 1, 0, 0, 0])
assert_equal(a.round(), [1., 2., 3., 5., 6.])
assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])
assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])
b = empty_like(a)
a.round(out=b)
assert_equal(b, [1., 2., 3., 5., 6.])
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_round_with_output(self):
# Testing round with an explicit output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
assert_(result is output)
def test_round_with_scalar(self):
# Testing round with scalar/zero dimension input
# GH issue 2244
a = array(1.1, mask=[False])
assert_equal(a.round(), 1)
a = array(1.1, mask=[True])
assert_(a.round() is masked)
a = array(1.1, mask=[False])
output = np.empty(1, dtype=float)
output.fill(-9999)
a.round(out=output)
assert_equal(output, 1)
a = array(1.1, mask=[False])
output = array(-9999., mask=[True])
a.round(out=output)
assert_equal(output[()], 1)
a = array(1.1, mask=[True])
output = array(-9999., mask=[False])
a.round(out=output)
assert_(output[()] is masked)
def test_identity(self):
a = identity(5)
assert_(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
assert_(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])
assert_equal(y._mask, [1, 0, 0, 0, 1])
b.mask = nomask
y = power(x, b)
assert_equal(y._mask, [1, 0, 0, 0, 1])
z = x ** b
assert_equal(z._mask, y._mask)
assert_almost_equal(z, y)
assert_almost_equal(z._data, y._data)
x **= b
assert_equal(x._mask, y._mask)
assert_almost_equal(x, y)
assert_almost_equal(x._data, y._data)
def test_power_with_broadcasting(self):
# Test power w/ broadcasting
a2 = np.array([[1., 2., 3.], [4., 5., 6.]])
a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])
b1 = np.array([2, 4, 3])
b2 = np.array([b1, b1])
b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])
ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],
mask=[[1, 1, 0], [0, 1, 1]])
# No broadcasting, base & exp w/ mask
test = a2m ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# No broadcasting, base w/ mask, exp w/o mask
test = a2m ** b2
assert_equal(test, ctrl)
assert_equal(test.mask, a2m.mask)
# No broadcasting, base w/o mask, exp w/ mask
test = a2 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, b2m.mask)
ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],
mask=[[0, 1, 0], [0, 1, 0]])
test = b1 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
test = b2m ** b1
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_where(self):
# Test the where function
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
d = where(xm > 2, xm, -9)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
assert_equal(d._mask, xm._mask)
d = where(xm > 2, -9, ym)
assert_equal(d, [5., 0., 3., 2., -1., -9.,
-9., -10., -9., 1., 0., -9.])
assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])
d = where(xm > 2, xm, masked)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
tmp = xm._mask.copy()
tmp[(xm <= 2).filled(True)] = True
assert_equal(d._mask, tmp)
ixm = xm.astype(int)
d = where(ixm > 2, ixm, masked)
assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])
assert_equal(d.dtype, ixm.dtype)
def test_where_object(self):
a = np.array(None)
b = masked_array(None)
r = b.copy()
assert_equal(np.ma.where(True, a, a), r)
assert_equal(np.ma.where(True, b, b), r)
def test_where_with_masked_choice(self):
x = arange(10)
x[3] = masked
c = x >= 8
# Set False to masked
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_equal(x, z)
# Set True to masked
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
def test_where_with_masked_condition(self):
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(1, 6)
x[-1] = masked
y = arange(1, 6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_equal(z, zm)
assert_(getmask(zm) is nomask)
assert_equal(zm, [1, 2, 3, 40, 50])
z = where(c, masked, 1)
assert_equal(z, [99, 99, 99, 1, 1])
z = where(c, 1, masked)
assert_equal(z, [99, 1, 1, 99, 99])
def test_where_type(self):
# Test the type conservation with where
x = np.arange(4, dtype=np.int32)
y = np.arange(4, dtype=np.float32) * 2.2
test = where(x > 1.5, y, x).dtype
control = np.find_common_type([np.int32, np.float32], [])
assert_equal(test, control)
def test_where_broadcast(self):
# Issue 8599
x = np.arange(9).reshape(3, 3)
y = np.zeros(3)
core = np.where([1, 0, 1], x, y)
ma = where([1, 0, 1], x, y)
assert_equal(core, ma)
assert_equal(core.dtype, ma.dtype)
def test_where_structured(self):
# Issue 8600
dt = np.dtype([('a', int), ('b', int)])
x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt)
y = np.array((10, 20), dtype=dt)
core = np.where([0, 1, 1], x, y)
ma = np.where([0, 1, 1], x, y)
assert_equal(core, ma)
assert_equal(core.dtype, ma.dtype)
def test_where_structured_masked(self):
dt = np.dtype([('a', int), ('b', int)])
x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt)
ma = where([0, 1, 1], x, masked)
expected = masked_where([1, 0, 0], x)
assert_equal(ma.dtype, expected.dtype)
assert_equal(ma, expected)
assert_equal(ma.mask, expected.mask)
def test_choose(self):
# Test choose
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
chosen = choose([2, 3, 1, 0], choices)
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='clip')
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='wrap')
assert_equal(chosen, array([20, 1, 12, 3]))
# Check with some masked indices
indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([99, 1, 12, 99]))
assert_equal(chosen.mask, [1, 0, 0, 1])
# Check with some masked choices
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([20, 31, 12, 3]))
assert_equal(chosen.mask, [1, 0, 0, 1])
def test_choose_with_out(self):
# Test choose with an explicit out keyword
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
assert_(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([99, 31, 12, 99]))
assert_equal(store.mask, [1, 0, 0, 1])
# Check with some masked choices + out ina ndarray !
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
store = empty(4, dtype=int).view(ndarray)
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([999999, 31, 12, 999999]))
def test_reshape(self):
a = arange(10)
a[0] = masked
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
assert_(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
assert_(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
assert_(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
assert_(b.flags['F'])
c = np.reshape(a, (2, 5))
assert_(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
assert_(c[0, 0] is masked)
assert_(c.flags['C'])
def test_make_mask_descr(self):
# Flexible
ntype = [('a', float), ('b', float)]
test = make_mask_descr(ntype)
assert_equal(test, [('a', bool), ('b', bool)])
assert_(test is make_mask_descr(test))
# Standard w/ shape
ntype = (float, 2)
test = make_mask_descr(ntype)
assert_equal(test, (bool, 2))
assert_(test is make_mask_descr(test))
# Standard standard
ntype = float
test = make_mask_descr(ntype)
assert_equal(test, np.dtype(bool))
assert_(test is make_mask_descr(test))
# Nested
ntype = [('a', float), ('b', [('ba', float), ('bb', float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
assert_(test is make_mask_descr(test))
# Named+ shape
ntype = [('a', (float, 2))]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([('a', (bool, 2))]))
assert_(test is make_mask_descr(test))
# 2 names
ntype = [(('A', 'a'), float)]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([(('A', 'a'), bool)]))
assert_(test is make_mask_descr(test))
# nested boolean types should preserve identity
base_type = np.dtype([('a', int, 3)])
base_mtype = make_mask_descr(base_type)
sub_type = np.dtype([('a', int), ('b', base_mtype)])
test = make_mask_descr(sub_type)
assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])]))
assert_(test.fields['b'][0] is base_mtype)
def test_make_mask(self):
# Test make_mask
# w/ a list as an input
mask = [0, 1]
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
mask = np.array([0, 1], dtype=bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
mdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', float), ('b', float)]
bdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
# Ensure this also works for void
mask = np.array((False, True), dtype='?,?')[()]
assert_(isinstance(mask, np.void))
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test, mask)
assert_(test is not mask)
mask = np.array((0, 1), dtype='i4,i4')[()]
test2 = make_mask(mask, dtype=mask.dtype)
assert_equal(test2, test)
# test that nomask is returned when m is nomask.
bools = [True, False]
dtypes = [MaskType, float]
msgformat = 'copy=%s, shrink=%s, dtype=%s'
for cpy, shr, dt in itertools.product(bools, bools, dtypes):
res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt)
assert_(res is nomask, msgformat % (cpy, shr, dt))
def test_mask_or(self):
# Initialize
mtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
assert_equal(test, mask)
test = mask_or(nomask, mask)
assert_equal(test, mask)
# Using False as input
test = mask_or(mask, False)
assert_equal(test, mask)
# Using another array w / the same dtype
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
test = mask_or(mask, other)
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
othertype = [('A', bool), ('B', bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
assert_equal(mask_or(amask, bmask), cntrl)
def test_flatten_mask(self):
# Tests flatten mask
# Standard dtype
mask = np.array([0, 0, 1], dtype=bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
test = flatten_mask(mask)
control = np.array([0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
data = [(0, (0, 0)), (0, (0, 1))]
mask = np.array(data, dtype=mdtype)
test = flatten_mask(mask)
control = np.array([0, 0, 0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
def test_on_ndarray(self):
# Test functions on ndarrays
a = np.array([1, 2, 3, 4])
m = array(a, mask=False)
test = anom(a)
assert_equal(test, m.anom())
test = reshape(a, (2, 2))
assert_equal(test, m.reshape(2, 2))
def test_compress(self):
# Test compress function on ndarray and masked array
# Address Github #2495.
arr = np.arange(8)
arr.shape = 4, 2
cond = np.array([True, False, True, True])
control = arr[[0, 2, 3]]
test = np.ma.compress(cond, arr, axis=0)
assert_equal(test, control)
marr = np.ma.array(arr)
test = np.ma.compress(cond, marr, axis=0)
assert_equal(test, control)
def test_compressed(self):
# Test ma.compressed function.
# Address gh-4026
a = np.ma.array([1, 2])
test = np.ma.compressed(a)
assert_(type(test) is np.ndarray)
# Test case when input data is ndarray subclass
class A(np.ndarray):
pass
a = np.ma.array(A(shape=0))
test = np.ma.compressed(a)
assert_(type(test) is A)
# Test that compress flattens
test = np.ma.compressed([[1],[2]])
assert_equal(test.ndim, 1)
test = np.ma.compressed([[[[[1]]]]])
assert_equal(test.ndim, 1)
# Test case when input is MaskedArray subclass
class M(MaskedArray):
pass
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test.ndim, 1)
# with .compressed() overridden
class M(MaskedArray):
def compressed(self):
return 42
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test, 42)
def test_convolve(self):
a = masked_equal(np.arange(5), 2)
b = np.array([1, 1])
test = np.ma.convolve(a, b)
assert_equal(test, masked_equal([0, 1, -1, -1, 7, 4], -1))
test = np.ma.convolve(a, b, propagate_mask=False)
assert_equal(test, masked_equal([0, 1, 1, 3, 7, 4], -1))
test = np.ma.convolve([1, 1], [1, 1, 1])
assert_equal(test, masked_equal([1, 2, 2, 1], -1))
a = [1, 1]
b = masked_equal([1, -1, -1, 1], -1)
test = np.ma.convolve(a, b, propagate_mask=False)
assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1))
test = np.ma.convolve(a, b, propagate_mask=True)
assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1))
class TestMaskedFields(object):
def setup(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
def test_set_records_masks(self):
base = self.data['base']
mdtype = self.data['mdtype']
# Set w/ nomask or masked
base.mask = nomask
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = masked
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ simple boolean
base.mask = False
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = True
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ list
base.mask = [0, 0, 0, 1, 1]
assert_equal_records(base._mask,
np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
dtype=mdtype))
def test_set_record_element(self):
# Check setting an element of a record)
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 2, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
[b'pi', b'two', b'three', b'four', b'five'])
def test_set_record_slice(self):
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[:3] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 3, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
[b'pi', b'pi', b'pi', b'four', b'five'])
def test_mask_element(self):
"Check record access"
base = self.data['base']
base[0] = masked
for n in ('a', 'b', 'c'):
assert_equal(base[n].mask, [1, 1, 0, 0, 1])
assert_equal(base[n]._data, base._data[n])
def test_getmaskarray(self):
# Test getmaskarray on flexible dtype
ndtype = [('a', int), ('b', float)]
test = empty(3, dtype=ndtype)
assert_equal(getmaskarray(test),
np.array([(0, 0), (0, 0), (0, 0)],
dtype=[('a', '|b1'), ('b', '|b1')]))
test[:] = masked
assert_equal(getmaskarray(test),
np.array([(1, 1), (1, 1), (1, 1)],
dtype=[('a', '|b1'), ('b', '|b1')]))
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
assert_(isinstance(test, np.matrix))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
dtype=[('a', bool), ('b', bool)])
def _test_index(i):
assert_equal(type(a[i]), mvoid)
assert_equal_records(a[i]._data, a._data[i])
assert_equal_records(a[i]._mask, a._mask[i])
assert_equal(type(a[i, ...]), MaskedArray)
assert_equal_records(a[i,...]._data, a._data[i,...])
assert_equal_records(a[i,...]._mask, a._mask[i,...])
_test_index(1) # No mask
_test_index(0) # One element masked
_test_index(-2) # All element masked
def test_setitem(self):
# Issue 4866: check that one can set individual items in [record][col]
# and [col][record] order
ndtype = np.dtype([('a', float), ('b', int)])
ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)
ma['a'][1] = 3.0
assert_equal(ma['a'], np.array([1.0, 3.0]))
ma[1]['a'] = 4.0
assert_equal(ma['a'], np.array([1.0, 4.0]))
# Issue 2403
mdtype = np.dtype([('a', bool), ('b', bool)])
# soft mask
control = np.array([(False, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a[0]['a'] = 2
assert_equal(a.mask, control)
# hard mask
control = np.array([(True, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a[0]['a'] = 2
assert_equal(a.mask, control)
def test_setitem_scalar(self):
# 8510
mask_0d = np.ma.masked_array(1, mask=True)
arr = np.ma.arange(3)
arr[0] = mask_0d
assert_array_equal(arr.mask, [True, False, False])
def test_element_len(self):
# check that len() works for mvoid (Github issue #576)
for rec in self.data['base']:
assert_equal(len(rec), len(self.data['ddtype']))
class TestMaskedObjectArray(object):
def test_getitem(self):
arr = np.ma.array([None, None])
for dt in [float, object]:
a0 = np.eye(2).astype(dt)
a1 = np.eye(3).astype(dt)
arr[0] = a0
arr[1] = a1
assert_(arr[0] is a0)
assert_(arr[1] is a1)
assert_(isinstance(arr[0,...], MaskedArray))
assert_(isinstance(arr[1,...], MaskedArray))
assert_(arr[0,...][()] is a0)
assert_(arr[1,...][()] is a1)
arr[0] = np.ma.masked
assert_(arr[1] is a1)
assert_(isinstance(arr[0,...], MaskedArray))
assert_(isinstance(arr[1,...], MaskedArray))
assert_equal(arr[0,...].mask, True)
assert_(arr[1,...][()] is a1)
# gh-5962 - object arrays of arrays do something special
assert_equal(arr[0].data, a0)
assert_equal(arr[0].mask, True)
assert_equal(arr[0,...][()].data, a0)
assert_equal(arr[0,...][()].mask, True)
def test_nested_ma(self):
arr = np.ma.array([None, None])
# set the first object to be an unmasked masked constant. A little fiddly
arr[0,...] = np.array([np.ma.masked], object)[0,...]
# check the above line did what we were aiming for
assert_(arr.data[0] is np.ma.masked)
# test that getitem returned the value by identity
assert_(arr[0] is np.ma.masked)
# now mask the masked value!
arr[0] = np.ma.masked
assert_(arr[0] is np.ma.masked)
class TestMaskedView(object):
def setup(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
self.data = (data, a, controlmask)
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
assert_(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
assert_(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
def test_view_to_simple_dtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
assert_(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
def test_view_to_flexible_dtype(self):
(data, a, controlmask) = self.data
test = a.view([('A', float), ('B', float)])
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'])
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
assert_(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
assert_(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
def test_view_to_subdtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
assert_(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
assert_(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
assert_(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
assert_(isinstance(test, np.matrix))
assert_(not isinstance(test, MaskedArray))
class TestOptionalArgs(object):
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including multiple axes)
d = np.arange(24.0).reshape((2,3,4))
m = np.zeros(24, dtype=bool).reshape((2,3,4))
# mask out last element of last dimension
m[:,:,-1] = True
a = np.ma.array(d, mask=m)
def testaxis(f, a, d):
numpy_f = numpy.__getattribute__(f)
ma_f = np.ma.__getattribute__(f)
# test axis arg
assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1))
assert_equal(ma_f(a, axis=(0,1))[...,:-1],
numpy_f(d[...,:-1], axis=(0,1)))
def testkeepdims(f, a, d):
numpy_f = numpy.__getattribute__(f)
ma_f = np.ma.__getattribute__(f)
# test keepdims arg
assert_equal(ma_f(a, keepdims=True).shape,
numpy_f(d, keepdims=True).shape)
assert_equal(ma_f(a, keepdims=False).shape,
numpy_f(d, keepdims=False).shape)
# test both at once
assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1],
numpy_f(d[...,:-1], axis=1, keepdims=True))
assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1],
numpy_f(d[...,:-1], axis=(0,1), keepdims=True))
for f in ['sum', 'prod', 'mean', 'var', 'std']:
testaxis(f, a, d)
testkeepdims(f, a, d)
for f in ['min', 'max']:
testaxis(f, a, d)
d = (np.arange(24).reshape((2,3,4))%2 == 0)
a = np.ma.array(d, mask=m)
for f in ['all', 'any']:
testaxis(f, a, d)
testkeepdims(f, a, d)
def test_count(self):
# test np.ma.count specially
d = np.arange(24.0).reshape((2,3,4))
m = np.zeros(24, dtype=bool).reshape((2,3,4))
m[:,0,:] = True
a = np.ma.array(d, mask=m)
assert_equal(count(a), 16)
assert_equal(count(a, axis=1), 2*ones((2,4)))
assert_equal(count(a, axis=(0,1)), 4*ones((4,)))
assert_equal(count(a, keepdims=True), 16*ones((1,1,1)))
assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4)))
assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4)))
assert_equal(count(a, axis=-2), 2*ones((2,4)))
assert_raises(ValueError, count, a, axis=(1,1))
assert_raises(np.AxisError, count, a, axis=3)
# check the 'nomask' path
a = np.ma.array(d, mask=nomask)
assert_equal(count(a), 24)
assert_equal(count(a, axis=1), 3*ones((2,4)))
assert_equal(count(a, axis=(0,1)), 6*ones((4,)))
assert_equal(count(a, keepdims=True), 24*ones((1,1,1)))
assert_equal(np.ndim(count(a, keepdims=True)), 3)
assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4)))
assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4)))
assert_equal(count(a, axis=-2), 3*ones((2,4)))
assert_raises(ValueError, count, a, axis=(1,1))
assert_raises(np.AxisError, count, a, axis=3)
# check the 'masked' singleton
assert_equal(count(np.ma.masked), 0)
# check 0-d arrays do not allow axis > 0
assert_raises(np.AxisError, count, np.ma.array(1), axis=1)
class TestMaskedConstant(object):
def _do_add_test(self, add):
# sanity check
assert_(add(np.ma.masked, 1) is np.ma.masked)
# now try with a vector
vector = np.array([1, 2, 3])
result = add(np.ma.masked, vector)
# lots of things could go wrong here
assert_(result is not np.ma.masked)
assert_(not isinstance(result, np.ma.core.MaskedConstant))
assert_equal(result.shape, vector.shape)
assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool))
def test_ufunc(self):
self._do_add_test(np.add)
def test_operator(self):
self._do_add_test(lambda a, b: a + b)
def test_ctor(self):
m = np.ma.array(np.ma.masked)
# most importantly, we do not want to create a new MaskedConstant
# instance
assert_(not isinstance(m, np.ma.core.MaskedConstant))
assert_(m is not np.ma.masked)
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
assert_equal(np.argwhere(a), [[1], [3]])
def test_append_masked_array():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_equal([4,3,2], value=2)
result = np.ma.append(a, b)
expected_data = [1, 2, 3, 4, 3, 2]
expected_mask = [False, True, False, False, False, True]
assert_array_equal(result.data, expected_data)
assert_array_equal(result.mask, expected_mask)
a = np.ma.masked_all((2,2))
b = np.ma.ones((3,1))
result = np.ma.append(a, b)
expected_data = [1] * 3
expected_mask = [True] * 4 + [False] * 3
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
result = np.ma.append(a, b, axis=None)
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
def test_append_masked_array_along_axis():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
# When `axis` is specified, `values` must have the correct shape.
assert_raises(ValueError, np.ma.append, a, b, axis=0)
result = np.ma.append(a[np.newaxis,:], b, axis=0)
expected = np.ma.arange(1, 10)
expected[[1, 6]] = np.ma.masked
expected = expected.reshape((3,3))
assert_array_equal(result.data, expected.data)
assert_array_equal(result.mask, expected.mask)
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j)
def test_ufunc_with_output():
# check that giving an output argument always returns that output.
# Regression test for gh-8416.
x = array([1., 2., 3.], mask=[0, 0, 1])
y = np.add(x, 1., out=x)
assert_(y is x)
def test_astype():
descr = [('v', int, 3), ('x', [('y', float)])]
x = array(([1, 2, 3], (1.0,)), dtype=descr)
assert_equal(x, x.astype(descr))
###############################################################################
if __name__ == "__main__":
run_module_suite()
| 38.124301
| 81
| 0.514324
|
973ffe8f46131dcce1847617099087c3b5f18237
| 1,907
|
py
|
Python
|
data/diabetes/tpot/fold8/pipeline.py
|
luisferreira97/autoautoml
|
501d2de8b2153748b57e5c8cb247058c587ce29c
|
[
"MIT"
] | 7
|
2020-05-15T23:10:26.000Z
|
2022-01-21T10:36:50.000Z
|
data/diabetes/tpot/fold8/pipeline.py
|
luisferreira97/autoautoml
|
501d2de8b2153748b57e5c8cb247058c587ce29c
|
[
"MIT"
] | 13
|
2020-11-13T18:47:51.000Z
|
2021-07-24T10:04:57.000Z
|
data/diabetes/tpot/fold8/pipeline.py
|
luisferreira97/autoautoml
|
501d2de8b2153748b57e5c8cb247058c587ce29c
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import (SelectPercentile, VarianceThreshold,
f_classif)
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from tpot.builtins import StackingEstimator
from tpot.export_utils import set_param_recursive
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv("PATH/TO/DATA/FILE",
sep="COLUMN_SEPARATOR", dtype=np.float64)
features = tpot_data.drop("target", axis=1)
training_features, testing_features, training_target, testing_target = train_test_split(
features, tpot_data["target"], random_state=42
)
# Average CV score on the training set was: 0.8601459527631278
exported_pipeline = make_pipeline(
PolynomialFeatures(degree=2, include_bias=False, interaction_only=False),
SelectPercentile(score_func=f_classif, percentile=53),
VarianceThreshold(threshold=0.005),
StackingEstimator(
estimator=SGDClassifier(
alpha=0.0,
eta0=0.01,
fit_intercept=True,
l1_ratio=0.75,
learning_rate="constant",
loss="squared_hinge",
penalty="elasticnet",
power_t=50.0,
)
),
RandomForestClassifier(
bootstrap=True,
criterion="entropy",
max_features=0.55,
min_samples_leaf=1,
min_samples_split=11,
n_estimators=100,
),
)
# Fix random state for all the steps in exported pipeline
set_param_recursive(exported_pipeline.steps, "random_state", 42)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| 36.673077
| 88
| 0.718406
|
c656421b86cef86c44486c7b55c66e9b54ee4af0
| 845
|
py
|
Python
|
simulation/bin/collapse_utr_bed.py
|
LuChenLab/SCAPE
|
49c4063f1ec3ac2d72f935b61de4a18c66db754d
|
[
"MIT"
] | 3
|
2022-03-15T05:22:29.000Z
|
2022-03-21T18:32:04.000Z
|
simulation/bin/collapse_utr_bed.py
|
LuChenLab/SCAPE
|
49c4063f1ec3ac2d72f935b61de4a18c66db754d
|
[
"MIT"
] | 3
|
2022-02-20T04:43:18.000Z
|
2022-03-19T12:19:56.000Z
|
simulation/bin/collapse_utr_bed.py
|
LuChenLab/SCAPE
|
49c4063f1ec3ac2d72f935b61de4a18c66db754d
|
[
"MIT"
] | 1
|
2022-03-21T18:32:15.000Z
|
2022-03-21T18:32:15.000Z
|
import sys
from collections import defaultdict
def main(args):
bedin, bedout = args
out_dic = defaultdict(list)
with open(bedin) as fh, open(bedout, 'w') as fo:
for line in fh:
gid = line.strip().split('\t')[-1]
out_dic[gid].extend([line])
for gid, detail in out_dic.items():
detail = list(map(lambda x: x.strip().split('\t'), detail))
detail_ = list(map(lambda x: ','.join(x), zip(*detail)))
chrom, st, en = detail_[:3]
strand = detail_[-2].split(',')[0]
chrom = chrom.split(',')[0]
if strand == '+' or strand == '-':
st = str(min(map(lambda x: int(x), st.split(','))))
en = str(max(map(lambda x: int(x), en.split(','))))
else:
raise(f'Unrecognize strand {strand}')
for i in detail:
fo.write('\t'.join([chrom, st, en, strand] + i) + '\n')
if __name__ == '__main__':
main(sys.argv[1:])
| 28.166667
| 62
| 0.592899
|
ff717dbd2769c74a2caa4ff19ef057520a27d1eb
| 30,776
|
py
|
Python
|
src/transformers/models/auto/auto_factory.py
|
holazzer/transformers
|
53191d75ecca21c028077b3227f9ac47379e4690
|
[
"Apache-2.0"
] | 28
|
2021-09-15T01:25:00.000Z
|
2022-03-01T20:21:28.000Z
|
src/transformers/models/auto/auto_factory.py
|
holazzer/transformers
|
53191d75ecca21c028077b3227f9ac47379e4690
|
[
"Apache-2.0"
] | 1
|
2021-08-09T01:51:17.000Z
|
2021-08-09T01:51:17.000Z
|
src/transformers/models/auto/auto_factory.py
|
holazzer/transformers
|
53191d75ecca21c028077b3227f9ac47379e4690
|
[
"Apache-2.0"
] | 1
|
2021-12-02T05:20:55.000Z
|
2021-12-02T05:20:55.000Z
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory function to build auto-model classes."""
import importlib
from collections import OrderedDict
from ...configuration_utils import PretrainedConfig
from ...file_utils import copy_func
from ...utils import logging
from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
logger = logging.get_logger(__name__)
CLASS_DOCSTRING = """
This is a generic model class that will be instantiated as one of the model classes of the library when created
with the :meth:`~transformers.BaseAutoModelClass.from_pretrained` class method or the
:meth:`~transformers.BaseAutoModelClass.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
FROM_CONFIG_DOCSTRING = """
Instantiates one of the model classes of the library from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.BaseAutoModelClass.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('checkpoint_placeholder')
>>> model = BaseAutoModelClass.from_config(config)
"""
FROM_PRETRAINED_TORCH_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the :obj:`model_type` property of the config object (either
passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's missing,
by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
The model is set in evaluation mode by default using ``model.eval()`` (so for instance, dropout modules are
deactivated). To train the model, you should first set it back in training mode with ``model.train()``
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (additional positional arguments, `optional`):
Will be passed along to the underlying model ``__init__()`` method.
config (:class:`~transformers.PretrainedConfig`, `optional`):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :meth:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (`Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
kwargs (additional keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder')
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_pretrained('./tf_model/shortcut_placeholder_tf_model_config.json')
>>> model = BaseAutoModelClass.from_pretrained('./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
FROM_PRETRAINED_TF_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the :obj:`model_type` property of the config object (either
passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's missing,
by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In
this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the PyTorch model in a
TensorFlow model using the provided conversion scripts and loading the TensorFlow model
afterwards.
model_args (additional positional arguments, `optional`):
Will be passed along to the underlying model ``__init__()`` method.
config (:class:`~transformers.PretrainedConfig`, `optional`):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :meth:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_pt (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a PyTorch checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
kwargs (additional keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder')
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_pretrained('./pt_model/shortcut_placeholder_pt_model_config.json')
>>> model = BaseAutoModelClass.from_pretrained('./pt_model/shortcut_placeholder_pytorch_model.bin', from_pt=True, config=config)
"""
FROM_PRETRAINED_FLAX_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the :obj:`model_type` property of the config object (either
passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's missing,
by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In
this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the PyTorch model in a
TensorFlow model using the provided conversion scripts and loading the TensorFlow model
afterwards.
model_args (additional positional arguments, `optional`):
Will be passed along to the underlying model ``__init__()`` method.
config (:class:`~transformers.PretrainedConfig`, `optional`):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :meth:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_pt (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a PyTorch checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
kwargs (additional keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder')
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained('checkpoint_placeholder', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_pretrained('./pt_model/shortcut_placeholder_pt_model_config.json')
>>> model = BaseAutoModelClass.from_pretrained('./pt_model/shortcut_placeholder_pytorch_model.bin', from_pt=True, config=config)
"""
def _get_model_class(config, model_mapping):
supported_models = model_mapping[type(config)]
if not isinstance(supported_models, (list, tuple)):
return supported_models
name_to_model = {model.__name__: model for model in supported_models}
architectures = getattr(config, "architectures", [])
for arch in architectures:
if arch in name_to_model:
return name_to_model[arch]
elif f"TF{arch}" in name_to_model:
return name_to_model[f"TF{arch}"]
elif f"Flax{arch}" in name_to_model:
return name_to_model[f"Flax{arch}"]
# If not architecture is set in the config or match the supported models, the first element of the tuple is the
# defaults.
return supported_models[0]
class _BaseAutoModelClass:
# Base class for auto models.
_model_mapping = None
def __init__(self, *args, **kwargs):
raise EnvironmentError(
f"{self.__class__.__name__} is designed to be instantiated "
f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
f"`{self.__class__.__name__}.from_config(config)` methods."
)
@classmethod
def from_config(cls, config, **kwargs):
if type(config) in cls._model_mapping.keys():
model_class = _get_model_class(config, cls._model_mapping)
return model_class._from_config(config, **kwargs)
raise ValueError(
f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
kwargs["_from_auto"] = True
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in cls._model_mapping.keys():
model_class = _get_model_class(config, cls._model_mapping)
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
)
def insert_head_doc(docstring, head_doc=""):
if len(head_doc) > 0:
return docstring.replace(
"one of the model classes of the library ",
f"one of the model classes of the library (with a {head_doc} head) ",
)
return docstring.replace(
"one of the model classes of the library ", "one of the base model classes of the library "
)
def auto_class_update(cls, checkpoint_for_example="bert-base-cased", head_doc=""):
# Create a new class with the right name from the base class
model_mapping = cls._model_mapping
name = cls.__name__
class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc)
cls.__doc__ = class_docstring.replace("BaseAutoModelClass", name)
# Now we need to copy and re-register `from_config` and `from_pretrained` as class methods otherwise we can't
# have a specific docstrings for them.
from_config = copy_func(_BaseAutoModelClass.from_config)
from_config_docstring = insert_head_doc(FROM_CONFIG_DOCSTRING, head_doc=head_doc)
from_config_docstring = from_config_docstring.replace("BaseAutoModelClass", name)
from_config_docstring = from_config_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
from_config.__doc__ = from_config_docstring
from_config = replace_list_option_in_docstrings(model_mapping._model_mapping, use_model_types=False)(from_config)
cls.from_config = classmethod(from_config)
if name.startswith("TF"):
from_pretrained_docstring = FROM_PRETRAINED_TF_DOCSTRING
elif name.startswith("Flax"):
from_pretrained_docstring = FROM_PRETRAINED_FLAX_DOCSTRING
else:
from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING
from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained)
from_pretrained_docstring = insert_head_doc(from_pretrained_docstring, head_doc=head_doc)
from_pretrained_docstring = from_pretrained_docstring.replace("BaseAutoModelClass", name)
from_pretrained_docstring = from_pretrained_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
shortcut = checkpoint_for_example.split("/")[-1].split("-")[0]
from_pretrained_docstring = from_pretrained_docstring.replace("shortcut_placeholder", shortcut)
from_pretrained.__doc__ = from_pretrained_docstring
from_pretrained = replace_list_option_in_docstrings(model_mapping._model_mapping)(from_pretrained)
cls.from_pretrained = classmethod(from_pretrained)
return cls
def get_values(model_mapping):
result = []
for model in model_mapping.values():
if isinstance(model, (list, tuple)):
result += list(model)
else:
result.append(model)
return result
def getattribute_from_module(module, attr):
if attr is None:
return None
if isinstance(attr, tuple):
return tuple(getattribute_from_module(module, a) for a in attr)
if hasattr(module, attr):
return getattr(module, attr)
# Some of the mappings have entries model_type -> object of another model type. In that case we try to grab the
# object at the top level.
transformers_module = importlib.import_module("transformers")
return getattribute_from_module(transformers_module, attr)
class _LazyAutoMapping(OrderedDict):
"""
" A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.
Args:
- config_mapping: The map model type to config class
- model_mapping: The map model type to model (or tokenizer) class
"""
def __init__(self, config_mapping, model_mapping):
self._config_mapping = config_mapping
self._reverse_config_mapping = {v: k for k, v in config_mapping.items()}
self._model_mapping = model_mapping
self._modules = {}
def __getitem__(self, key):
model_type = self._reverse_config_mapping[key.__name__]
if model_type not in self._model_mapping:
raise KeyError(key)
model_name = self._model_mapping[model_type]
return self._load_attr_from_module(model_type, model_name)
def _load_attr_from_module(self, model_type, attr):
module_name = model_type_to_module_name(model_type)
if module_name not in self._modules:
self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models")
return getattribute_from_module(self._modules[module_name], attr)
def keys(self):
return [
self._load_attr_from_module(key, name)
for key, name in self._config_mapping.items()
if key in self._model_mapping.keys()
]
def get(self, key, default):
try:
return self.__getitem__(key)
except KeyError:
return default
def __bool__(self):
return bool(self.keys())
def values(self):
return [
self._load_attr_from_module(key, name)
for key, name in self._model_mapping.items()
if key in self._config_mapping.keys()
]
def items(self):
return [
(
self._load_attr_from_module(key, self._config_mapping[key]),
self._load_attr_from_module(key, self._model_mapping[key]),
)
for key in self._model_mapping.keys()
if key in self._config_mapping.keys()
]
def __iter__(self):
return iter(self._mapping.keys())
def __contains__(self, item):
if not hasattr(item, "__name__") or item.__name__ not in self._reverse_config_mapping:
return False
model_type = self._reverse_config_mapping[item.__name__]
return model_type in self._model_mapping
| 57.525234
| 147
| 0.65824
|
3457fd67ec44990abe89ab83cfdb62ab7e2b32cb
| 10,530
|
py
|
Python
|
GameFiles/Generator.py
|
ConnerGallimore/Project-Run
|
1a65f238412eafd675064b888d365c80a4516fa5
|
[
"MIT"
] | 1
|
2021-06-08T21:22:42.000Z
|
2021-06-08T21:22:42.000Z
|
GameFiles/Generator.py
|
ConnerGallimore/Project-Run
|
1a65f238412eafd675064b888d365c80a4516fa5
|
[
"MIT"
] | null | null | null |
GameFiles/Generator.py
|
ConnerGallimore/Project-Run
|
1a65f238412eafd675064b888d365c80a4516fa5
|
[
"MIT"
] | 2
|
2021-06-14T20:41:30.000Z
|
2021-06-24T19:18:48.000Z
|
from ast import Param
import pygame
from GameFiles.Bug import *
from GameFiles.Platform import *
from GameFiles.Currency import *
"""Functions listed here are intended to generate non-player sprites."""
def coin_gen(coins, sprites, screen_width, screen_height):
"""Generate the coin obstacles for the game."""
# generate 20 coins
for i in range(20):
coin = Currency(screen_width, screen_height)
# add coin to lists
sprites.add(coin)
coins.add(coin)
def plat_gen(platforms, sprites, screen_width, screen_height):
"""Generate the platform obstacles for the game."""
# generate 4 platforms
for i in range(4):
platform = Platform(screen_width, screen_height)
# add platforms to lists
platforms.add(platform)
sprites.add(platform)
def bug_gen(bugs, obstacles, sprites, screen_width, screen_height):
"""Generate the bug obstacles for the game."""
# generate 6 bugs
for i in range(6):
bug = Bug(screen_width, screen_height)
# add bugs to lists
bugs.add(bug)
obstacles.add(bug)
sprites.add(bug)
def change_coins(coins, index):
"""Change the locations of the coins."""
x = 0
y = 0
count = 0
# special set that occurs when the end condition is met.
if index == -1:
for coin in coins:
coin.relocate(0,0)
coin.invisible = True
coin.touched = False
# 1st set
if index == 0:
for coin in coins:
if count < 6:
coin.relocate(coin.width + 45*x, coin.height - 120)
count += 1
coin.invisible = False
elif 6 <= count < 9:
y += 1
coin.relocate(coin.width + 45*x, (coin.height - 120) - 30*y)
count += 1
coin.invisible = False
elif 9 <= count < 12:
y -= 1
coin.relocate(coin.width + 45*x, (coin.height - 150) - 30*y)
count += 1
coin.invisible = False
elif count < 18:
coin.relocate(coin.width + 45*x, coin.height - 120)
count += 1
coin.invisible = False
else:
coin.relocate(0,0)
coin.invisible = True
x += 1
coin.touched = False
# 2nd set
if index == 1:
for coin in coins:
if count < 4:
coin.relocate((coin.width + 460) + 35*x, (coin.height - 280) + 30*count)
count += 1
coin.invisible = False
else:
count = 0
x += 1
coin.relocate(0,0)
coin.invisible = True
coin.touched = False
if index == 2:
for coin in coins:
if count < 2:
if y < 6:
coin.relocate((coin.height + 370) + 35*x, (coin.height - 190) + 30*count)
elif y < 12:
coin.relocate((coin.height + 600) + 35*x, (coin.height - 190) + 30*count)
count += 1
y += 1
coin.invisible = False
else:
count = 0
x += 1
coin.invisible = True
coin.touched = False
if index == 3:
for coin in coins:
if count < 8:
coin.relocate(700 + 35*x, 350)
coin.invisible = False
elif count < 10:
coin.relocate(700 + 35*x, 350)
coin.invisible = True
elif count < 18:
coin.relocate(700 + 35*x, 275)
coin.invisible = False
else:
coin.relocate(0, 0)
coin.invisible = True
count += 1
x += 1
coin.touched = False
if index == 4:
for coin in coins:
if count < 5:
coin.relocate(700 + 35*x, 420)
coin.invisible = False
count += 1
elif count < 9:
coin.relocate(0,0)
coin.invisible = True
count += 1
elif count == 9:
coin.relocate(0, 0)
count = 0
coin.invisible = True
x += 1
coin.touched = False
if index == 5:
for coin in coins:
if count < 3:
coin.relocate(670 + 35*x, 275 + 30*count)
count += 1
coin.invisible = False
else:
count = 0
x += 1
coin.relocate(0,0)
coin.invisible = True
coin.touched = False
def change_platforms(platforms, index):
"""Change the locations of the platforms."""
# used to change positions of certain platforms
x = 0
y = 0
count = 0
# special set that occurs when the end condition is met.
if index == -1:
for platform in platforms:
if isinstance(platform, Platform):
platform.relocate(0,0)
platform.invisible = True
if index == 0:
for platform in platforms:
if isinstance(platform, Platform):
if y % 2 == 0:
platform.relocate(platform.width+ 250*x, platform.height - 80)
platform.invisible = False
else:
platform.invisible = True
x += 1
y += 1
platform.invisible = False
if index == 1:
for platform in platforms:
if isinstance(platform, Platform):
if count < 1:
platform.relocate(platform.width + 60, platform.height - 80)
x += 1
y += 1
platform.invisible = False
if index == 2:
for platform in platforms:
if isinstance(platform, Platform):
platform.relocate(0,0)
platform.invisible = True
if index == 3:
for platform in platforms:
if isinstance(platform, Platform):
if count < 2:
platform.relocate(700 + 350*x, 400 - 75*y)
platform.invisible = False
else:
platform.relocate(0, 0)
platform.invisible = True
x += 1
y += 1
count += 1
if index == 4:
for platform in platforms:
if isinstance(platform, Platform):
platform.relocate(0,0)
platform.invisible = True
if index == 5:
for platform in platforms:
if isinstance(platform, Platform):
platform.relocate(0,0)
platform.invisible = True
def change_bugs(bugs, index):
"""Change the locations of the bugs."""
# used to change positions of certain bugs
x = 0
y = 0
count = 0
# special set that occurs when the end condition is met.
if index == -1:
for bug in bugs:
bug.relocate(0,0)
bug.invisible = True
bug.touched = False
if index == 0:
for bug in bugs:
if count < 1:
bug.relocate(bug.width + 345 + 250*x, (bug.height - 145) - 250*y)
bug.invisible = False
count += 1
elif count < 2:
bug.relocate(bug.width + 395, bug.height - 80)
bug.invisible = False
count += 1
else:
bug.relocate(0,0)
bug.invisible = True
bug.touched = False
x += 1
y += 1
if index == 1:
for bug in bugs:
if count < 1:
bug.relocate(bug.width + 50*x, bug.height - 70)
count += 1
bug.invisible = False
elif count < 2:
bug.relocate(bug.width + 385, bug.height - 130)
count += 1
bug.invisible = False
elif count < 5:
bug.relocate((bug.width + 510) + 50*x, bug.height - 50)
bug.invisible = False
count += 1
else:
bug.relocate(0,0)
bug.invisible = True
bug.touched = False
x += 1
y += 1
if index == 2:
for bug in bugs:
if count < 3:
bug.relocate(1000 + 45*x, 420)
bug.invisible = False
count += 1
else:
bug.relocate(1200 + 45*x, 420)
bug.invisible = False
x += 1
bug.touched = False
if index == 3:
for bug in bugs:
if count < 1:
bug.relocate(720, 430)
bug.invisible = False
elif count < 2:
bug.relocate(1000, 330)
bug.invisible = False
elif count < 3:
bug.relocate(1345, 275)
bug.invisible = False
else:
bug.relocate(0,0)
bug.invisible = True
count += 1
bug.touched = False
if index == 4:
for bug in bugs:
if count < 3:
bug.relocate(915 + 40*x, 420)
bug.invisible = False
count += 1
x += 1
else:
bug.relocate(1150 + 40*x, 420)
bug.invisible = False
x += 1
bug.touched = False
if index == 5:
for bug in bugs:
if count < 2:
bug.relocate(700, 420 - 45*y)
count += 1
y += 1
bug.invisible = False
elif count < 4:
bug.relocate(700, 240 - 45*x)
count += 1
y = 0
x += 1
bug.invisible = False
else:
bug.relocate(855, 330 - 45*y)
y += 1
bug.invisible = False
bug.touched = False
| 25.373494
| 93
| 0.439411
|
d1f01811f8688f69e74366e63bb44e1c45178106
| 1,755
|
py
|
Python
|
authentik/outposts/controllers/k8s/utils.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 15
|
2020-01-05T09:09:57.000Z
|
2020-11-28T05:27:39.000Z
|
authentik/outposts/controllers/k8s/utils.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 302
|
2020-01-21T08:03:59.000Z
|
2020-12-04T05:04:57.000Z
|
authentik/outposts/controllers/k8s/utils.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 3
|
2020-03-04T08:21:59.000Z
|
2020-08-01T20:37:18.000Z
|
"""k8s utils"""
from pathlib import Path
from kubernetes.client.models.v1_container_port import V1ContainerPort
from kubernetes.client.models.v1_service_port import V1ServicePort
from kubernetes.config.incluster_config import SERVICE_TOKEN_FILENAME
from authentik.outposts.controllers.k8s.triggers import NeedsRecreate
def get_namespace() -> str:
"""Get the namespace if we're running in a pod, otherwise default to default"""
path = Path(SERVICE_TOKEN_FILENAME.replace("token", "namespace"))
if path.exists():
with open(path, "r", encoding="utf8") as _namespace_file:
return _namespace_file.read()
return "default"
def compare_port(
current: V1ServicePort | V1ContainerPort, reference: V1ServicePort | V1ContainerPort
) -> bool:
"""Compare a single port"""
if current.name != reference.name:
return False
if current.protocol != reference.protocol:
return False
if isinstance(current, V1ServicePort) and isinstance(reference, V1ServicePort):
# We only care about the target port
if current.target_port != reference.target_port:
return False
if isinstance(current, V1ContainerPort) and isinstance(reference, V1ContainerPort):
# We only care about the target port
if current.container_port != reference.container_port:
return False
return True
def compare_ports(
current: list[V1ServicePort | V1ContainerPort], reference: list[V1ServicePort | V1ContainerPort]
):
"""Compare ports of a list"""
if len(current) != len(reference):
raise NeedsRecreate()
for port in reference:
if not any(compare_port(port, current_port) for current_port in current):
raise NeedsRecreate()
| 36.5625
| 100
| 0.71567
|
2bfddc7e206a757a3b2fd3e79bb0ee01853d1c11
| 213
|
py
|
Python
|
boot.py
|
jfcherng/Sublime-VisualizeZeroWidthChars
|
9469b01a13c0c8bfde17ca9627d08ba0ee757d43
|
[
"MIT"
] | 2
|
2021-03-27T05:55:22.000Z
|
2021-04-01T13:40:24.000Z
|
boot.py
|
jfcherng-sublime/ST-VisualizeZeroWidthChars
|
9469b01a13c0c8bfde17ca9627d08ba0ee757d43
|
[
"MIT"
] | 1
|
2019-08-25T20:08:05.000Z
|
2019-08-26T07:02:54.000Z
|
boot.py
|
jfcherng/Sublime-VisualizeZeroWidthChars
|
9469b01a13c0c8bfde17ca9627d08ba0ee757d43
|
[
"MIT"
] | null | null | null |
from .plugin import set_up, tear_down
# main plugin classes
from .plugin.sublime_text.VisualizeZeroWidthChars import *
def plugin_loaded() -> None:
set_up()
def plugin_unloaded() -> None:
tear_down()
| 16.384615
| 58
| 0.732394
|
9f33c01ff3f6e61346080a60b314d9f8c5894ece
| 2,808
|
py
|
Python
|
hw/scripts/gen_config.py
|
ausbin/vortex
|
592a8400e2f3323dce398ab36d435752e959e033
|
[
"BSD-3-Clause"
] | 9
|
2021-04-19T02:07:14.000Z
|
2021-12-13T07:00:37.000Z
|
hw/scripts/gen_config.py
|
ausbin/vortex
|
592a8400e2f3323dce398ab36d435752e959e033
|
[
"BSD-3-Clause"
] | null | null | null |
hw/scripts/gen_config.py
|
ausbin/vortex
|
592a8400e2f3323dce398ab36d435752e959e033
|
[
"BSD-3-Clause"
] | 4
|
2021-04-08T22:12:45.000Z
|
2022-03-25T00:39:01.000Z
|
#!/usr/bin/env python3
# coding=utf-8
from __future__ import print_function
import os
import os.path as path
import re
import argparse
from datetime import datetime
script_dir = path.dirname(path.realpath(__file__))
defines = {}
for k, v in os.environ.items():
if k.upper().startswith('V_'):
defines[k[2:]] = v
print('Custom params:', ', '.join(['='.join(x) for x in defines.items()]))
parser = argparse.ArgumentParser()
parser.add_argument('--outc', default='none', help='Output C header')
parser.add_argument('--outv', default='none', help='Output Verilog header')
args = parser.parse_args()
if args.outc == 'none' and args.outv == 'none':
print('Warning: not emitting any files. Specify arguments')
if args.outv != 'none':
with open(args.outv, 'w') as f:
print('''
// auto-generated by gen_config.py. DO NOT EDIT
// Generated at {date}
`ifndef VX_USER_CONFIG
`define VX_USER_CONFIG
'''[1:].format(date=datetime.now()), file=f)
for k, v in defines.items():
print('`define {} {}'.format(k, v), file=f)
print('\n`endif', file=f)
if args.outc != 'none':
with open(args.outc, 'w') as f:
print('''
// auto-generated by gen_config.py. DO NOT EDIT
// Generated at {date}
#ifndef VX_USER_CONFIG
#define VX_USER_CONFIG
'''[1:].format(date=datetime.now()), file=f)
for k, v in defines.items():
print('#define {} {}'.format(k, v), file=f)
print('\n#endif', file=f)
translation_rules = [
# preprocessor directives
(re.compile(r'^\s*`include .*$'), r''),
(re.compile(r'`ifdef'), r'#ifdef'),
(re.compile(r'`ifndef'), r'#ifndef'),
(re.compile(r'`elif'), r'#elif'),
(re.compile(r'`else'), r'#else'),
(re.compile(r'`define'), r'#define'),
(re.compile(r'`endif'), r'#endif'),
# macro expansion
(re.compile(r"`([A-Za-z_][$_0-9A-Za-z]*)"), r'\1'),
# literals
(re.compile(r"\d+'d(\d+)"), r'\1'),
(re.compile(r"\d+'b([01]+)"), r'0b\1'),
(re.compile(r"\d+'h([\da-fA-F]+)"), r'0x\1')
]
if args.outc != 'none':
with open(args.outc, 'a') as f:
print('''
// auto-generated by gen_config.py. DO NOT EDIT
// Generated at {date}
// Translated from VX_config.vh:
'''[1:].format(date=datetime.now()), file=f)
with open(path.join(script_dir, '../rtl/VX_config.vh'), 'r') as r:
lineno = 0
for line in r:
for pat, repl in translation_rules:
match = pat.search(line)
if match:
line = re.sub(pat, repl, line)
#print("*** match @" + str(lineno) + ": " + match.group() + " => " + line)
f.write(line)
lineno = lineno + 1
print('''
'''[1:], file=f)
| 27.80198
| 98
| 0.558048
|
686318af8b6e3f2347106a329f11cb755711e6ce
| 17,390
|
py
|
Python
|
testui/support/appium_driver.py
|
ty-hob/Py-TestUI
|
08fa460deafd15f970f39edce1c5060f581449bc
|
[
"Apache-2.0"
] | 3
|
2020-07-10T06:50:49.000Z
|
2022-02-18T02:14:09.000Z
|
testui/support/appium_driver.py
|
ty-hob/Py-TestUI
|
08fa460deafd15f970f39edce1c5060f581449bc
|
[
"Apache-2.0"
] | 13
|
2021-02-26T07:49:36.000Z
|
2022-03-29T11:32:19.000Z
|
testui/support/appium_driver.py
|
ty-hob/Py-TestUI
|
08fa460deafd15f970f39edce1c5060f581449bc
|
[
"Apache-2.0"
] | 3
|
2021-03-23T18:38:21.000Z
|
2021-09-16T14:57:57.000Z
|
import atexit
import os
import subprocess
import threading
from pathlib import Path
from time import sleep
from ppadb.client import Client as AdbClient
from appium.webdriver import Remote
from appium.webdriver.webdriver import WebDriver
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from webdriver_manager import chrome
from testui.support import logger
from testui.support.api_support import get_chrome_version
from testui.support.testui_driver import TestUIDriver
from testui.support.configuration import Configuration
class NewDriver:
__configuration = Configuration()
def __init__(self):
self.browser = False
self.__driver: WebDriver = None
self.__app_path = None
self.udid = None
self.__appium_url = None
self.__remote_url = None
self.__browser_name = 'chrome'
self.device_name = 'Device'
self.appium_port = 4723
self.__version = None
self.__platform_name = 'Android'
self.__app_package = None
self.__app_activity = None
self.__automation_name = None
self.logger_name = None
self.__full_reset = False
self.__debug = False
self.soft_assert = False
self.__auto_accept_alerts = True
self.process = None
self.file_name = None
self.__appium_log_file = 'appium-stdout.log'
self.__chromedriverArgs = ['relaxed security']
self.__desired_capabilities = {}
self.__chrome_options = {}
# Possible loggers str: behave, pytest, None
def set_logger(self, logger_name='pytest'):
self.logger_name = logger_name
return self
def set_appium_log_file(self, file='appium-stdout.log'):
self.__appium_log_file = file
return self
def set_browser(self, browser):
self.__browser_name = browser
return self
def set_remote_url(self, url):
self.__remote_url = url
return self
def set_soft_assert(self, soft_assert: bool):
self.soft_assert = soft_assert
return self
def set_appium_port(self, port: int):
self.appium_port = port
return self
def set_full_reset(self, full_reset: bool):
self.__full_reset = full_reset
return self
def set_appium_url(self, appium_url: str):
self.__appium_url = appium_url
return self
def set_extra_caps(self, caps=None):
if caps is None:
caps = {}
for cap in caps:
self.__desired_capabilities[cap] = caps[cap]
return self
def set_app_path(self, path: str):
self.__app_path = path
if os.path.isabs(self.__app_path):
return self
else:
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
self.__app_path = os.path.join(root_dir, path)
logger.log(self.__app_path)
return self
def set_udid(self, udid: str):
self.udid = udid
return self
def set_udid_if_exists(self, udid: str, number=None):
self.udid = check_device_exist(udid)
if self.udid is None:
self.udid = get_device_udid(number)
return self
def set_connected_device(self, number: int):
self.udid = get_device_udid(number)
return self
def set_device_name(self, device_name: str):
self.device_name = device_name
return self
def set_version(self, version: str):
self.__version = version
return self
def set_grant_permissions(self, permissions: bool):
self.__auto_accept_alerts = permissions
return self
def set_app_package_activity(self, app_package: str, app_activity: str):
self.__app_package = app_package
self.__app_activity = app_activity
return self
def get_driver(self):
driver = self.__driver
return driver
@property
def configuration(self) -> Configuration:
return self.__configuration
def get_testui_driver(self) -> TestUIDriver:
return TestUIDriver(self)
def set_chrome_driver(self, version=''):
mobile_version = version
if version == '':
if self.udid is None:
self.udid = get_device_udid(0)
mobile_version = check_chrome_version(self.udid)
chrome_driver = chrome.ChromeDriverManager(version=mobile_version).install()
logger.log(f'Driver installed in {chrome_driver}', True)
self.__desired_capabilities['chromedriverExecutable'] = chrome_driver
return self
def set_screenshot_path(self, screenshot_path: str):
self.__configuration.screenshot_path = screenshot_path
return self
def set_save_screenshot_on_fail(self, save_screenshot_on_fail: bool):
self.__configuration.save_full_stacktrace = save_screenshot_on_fail
return self
def set_save_full_stacktrace(self, save_full_stacktrace: bool):
self.__configuration.save_full_stacktrace = save_full_stacktrace
return self
# Available platforms: Android, iOS
def set_platform(self, platform):
self.__platform_name = platform
return self
def __set_common_caps(self):
self.__desired_capabilities['adbExecTimeout'] = 30000
self.__desired_capabilities['platformName'] = self.__platform_name
self.__desired_capabilities['automationName'] = self.__automation_name
self.__desired_capabilities['deviceName'] = self.device_name
if self.__full_reset:
self.__desired_capabilities['enforceAppInstall'] = True
else:
self.__desired_capabilities['noReset'] = True
if self.__version is not None:
self.__desired_capabilities['platformVersion'] = self.__version
if self.udid is not None:
self.__desired_capabilities['udid'] = self.udid
def __set_android_caps(self):
if self.__automation_name is None:
self.__automation_name = 'UiAutomator2'
self.__desired_capabilities['chromeOptions'] = {'w3c': False}
# ToDo It is not being passed to executable. Tried this
# https://github.com/appium/appium/blob/master/docs/en/writing-running-appium/caps.md
self.__desired_capabilities['chromedriverArgs'] = self.__chromedriverArgs
self.__desired_capabilities['chromeDriverPort'] = self.appium_port - 4723 + 8100
self.__desired_capabilities['systemPort'] = self.appium_port - 4723 + 8200
if self.__app_path is None and self.__app_package is None:
self.__desired_capabilities['browserName'] = "chrome"
self.browser = True
if self.__app_package is not None:
self.__desired_capabilities['appPackage'] = self.__app_package
self.__desired_capabilities['appActivity'] = self.__app_activity
if self.__app_path is not None:
self.__desired_capabilities['app'] = self.__app_path
self.__desired_capabilities['androidInstallPath'] = self.__app_path
def __set_ios_caps(self):
if self.__automation_name is None:
self.__automation_name = 'XCUITest'
if self.__app_path is None and self.__app_package is None:
self.__desired_capabilities['browserName'] = "safari"
self.browser = True
if self.__app_path is not None:
self.__desired_capabilities['app'] = self.__app_path
if self.__version is None:
self.__desired_capabilities['platformVersion'] = '13.2'
def __set_selenium_caps(self):
self.__desired_capabilities['browserName'] = self.__browser_name
def set_appium_driver(self) -> TestUIDriver:
if self.__platform_name.lower() == 'android':
self.__set_android_caps()
else:
self.__set_ios_caps()
self.__set_common_caps()
self.__driver, self.process, self.file_name = start_driver(
self.__desired_capabilities, self.__appium_url, self.__debug,
self.appium_port, self.udid, self.__appium_log_file
)
return self.get_testui_driver()
def set_selenium_driver(self, chrome_options=None, firefox_options=None) -> TestUIDriver:
self.__set_selenium_caps()
self.__driver = start_selenium_driver(
self.__desired_capabilities, self.__remote_url,
self.__debug, self.__browser_name, chrome_options, firefox_options)
return self.get_testui_driver()
def set_driver(self, driver) -> TestUIDriver:
self.__set_selenium_caps()
self.__driver = driver
return self.get_testui_driver()
def start_driver(desired_caps, url, debug, port, udid, log_file):
lock = threading.Lock()
lock.acquire()
logger.log("setting capabilities: " + desired_caps.__str__())
logger.log("starting appium driver...")
process = None
if desired_caps['platformName'].lower().__contains__('android'):
url, desired_caps, process, file = __local_run(url, desired_caps, port, udid, log_file)
else:
url, desired_caps, file = __local_run_ios(url, desired_caps, port, udid, log_file)
err = None
for x in range(2):
try:
driver = Remote(url, desired_caps)
atexit.register(__quit_driver, driver, debug)
logger.log(f"appium running on {url}. \n")
lock.release()
return driver, process, file
except Exception as error:
err = error
lock.release()
raise err
def start_selenium_driver(desired_caps, url=None, debug=None, browser=None, chrome_options=None, firefox_options=None) -> WebDriver:
options = chrome_options
if firefox_options is not None:
options = firefox_options
if options is not None:
logger.log("setting options: " + options.to_capabilities().__str__())
logger.log("setting capabilities: " + desired_caps.__str__())
logger.log(f"starting selenium {browser.lower()} driver...")
err = None
for x in range(2):
try:
if url is not None:
logger.log(f"selenium running on {url}. \n")
driver = webdriver.Remote(url, desired_caps, options=options)
else:
if browser.lower() == 'chrome':
driver = webdriver.Chrome(desired_capabilities=desired_caps, options=options)
elif browser.lower() == 'firefox':
import geckodriver_autoinstaller
try:
geckodriver_autoinstaller.install()
except Exception as error:
logger.log_warn("Could not retrieve geckodriver: " + error.__str__())
if "marionette" not in desired_caps:
desired_caps["marionette"] = True
driver = webdriver.Firefox(firefox_options=options, desired_capabilities=desired_caps)
elif browser.lower() == 'safari':
driver = webdriver.Safari(desired_capabilities=desired_caps)
elif browser.lower() == 'edge':
driver = webdriver.Edge(capabilities=desired_caps)
elif browser.lower() == 'ie':
driver = webdriver.Ie(capabilities=desired_caps)
elif browser.lower() == 'opera':
driver = webdriver.Opera(desired_capabilities=desired_caps)
elif browser.lower() == 'phantomjs':
driver = webdriver.PhantomJS(desired_capabilities=desired_caps)
else:
raise Exception(f"Invalid browser '{browser}'. Please choose one from: chrome,firefox,safari,edge,"
f"ie,opera,phantomjs")
atexit.register(__quit_driver, driver, debug)
return driver
except Exception as error:
err = error
raise err
def __local_run(url, desired_caps, use_port, udid, log_file):
if url is None:
port = use_port
bport = use_port + 1
device = 0
if os.getenv('PYTEST_XDIST_WORKER') is not None:
device += os.getenv('PYTEST_XDIST_WORKER').split("w")[1]
port += int(os.getenv('PYTEST_XDIST_WORKER').split("w")[1]) * 2
desired_caps['chromeDriverPort'] = 8200 + int(os.getenv('PYTEST_XDIST_WORKER').split("w")[1])
desired_caps['systemPort'] = 8300 + int(os.getenv('PYTEST_XDIST_WORKER').split("w")[1])
bport += int(os.getenv('PYTEST_XDIST_WORKER').split("w")[1]) * 2
logger.log(f"running: appium -p {port.__str__()} -bp {bport.__str__()}")
if udid is None:
desired_caps = __set_android_device(desired_caps, device)
logger.log(f'setting device for automation: {desired_caps["udid"]}')
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + '/'
Path(root_dir + "appium_logs").mkdir(parents=True, exist_ok=True)
file_path: str
if log_file == 'appium-stdout.log':
file = f'appium_logs/testui-{udid}-' + log_file
else:
file = f'appium_logs/{log_file}'
with open(root_dir + file, 'wb') as out:
process = subprocess.Popen(
['appium', '-p', port.__str__(), '-bp', bport.__str__()],
stdout=out, stderr=subprocess.STDOUT
)
atexit.register(process.kill)
file_path = root_dir + file
while True:
sleep(0.5)
out = open(file_path)
text = out.read()
if text.__contains__("already be in use") or text.__contains__("listener started"):
out.close()
break
out.close()
return f"http://localhost:{port.__str__()}/wd/hub", desired_caps, process, file_path
return url, desired_caps, None
def __local_run_ios(url, desired_caps, use_port, udid, log_file):
process = None
if url is None:
port = use_port + 100
device = 0
if os.getenv('PYTEST_XDIST_WORKER') is not None:
device += os.getenv('PYTEST_XDIST_WORKER').split("w")[1]
port += int(os.getenv('PYTEST_XDIST_WORKER').split("w")[1]) * 2
desired_caps['chromeDriverPort'] = 8200 + int(os.getenv('PYTEST_XDIST_WORKER').split("w")[1])
desired_caps['systemPort'] = 8300 + int(os.getenv('PYTEST_XDIST_WORKER').split("w")[1])
logger.log(f"running: appium -p {port.__str__()}")
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + '/'
Path(root_dir + "appium_logs").mkdir(parents=True, exist_ok=True)
file_path: str
if log_file == 'appium-stdout.log':
file = f'appium_logs/testui-{udid}-' + log_file
else:
file = f'appium_logs/{log_file}'
with open(root_dir + file, 'wb') as out:
process = subprocess.Popen(
['appium', '-p', port.__str__()],
stdout=out, stderr=subprocess.STDOUT
)
atexit.register(process.kill)
file_path = root_dir + file
if udid is None:
desired_caps = __set_ios_device(desired_caps, device)
while True:
sleep(0.5)
out = open(file_path)
text = out.read()
if text.__contains__("already be in use") or text.__contains__("listener started"):
out.close()
break
out.close()
return f"http://localhost:{port.__str__()}/wd/hub", desired_caps, file_path
return url, desired_caps, process
def __set_android_device(desired_caps, number: int):
desired_caps['udid'] = get_device_udid(number)
return desired_caps
def __set_ios_device(desired_caps, number: int):
# TODO
return desired_caps
def get_device_udid(number: int):
client = AdbClient(host="127.0.0.1", port=5037)
devices = client.devices()
if len(devices) == 0:
raise Exception("There are 0 devices connected to the computer!")
if len(devices) > number:
return devices[number].get_serial_no()
else:
new_number = number - (number // len(devices)) * len(devices)
logger.log_warn(f'You choose device number {number + 1} but there are only {len(devices)} connected. '
f'Will use device number {new_number + 1} instead', jump_line=True)
return devices[new_number].get_serial_no()
def check_device_exist(udid):
client = AdbClient(host="127.0.0.1", port=5037)
devices = client.devices()
for device in devices:
if device.get_serial_no() == udid:
return udid
return None
def check_chrome_version(udid):
output = subprocess.Popen(['adb', '-s', udid, 'shell', 'dumpsys', 'package', 'com.android.chrome',
'|', 'grep', 'versionName'], stdout=subprocess.PIPE)
response = output.communicate()
if response.__str__().__contains__('versionName='):
return get_chrome_version(response.__str__().split('versionName=')[1].split('.')[0])
def __quit_driver(driver, debug):
try:
driver.quit()
except Exception as err:
if debug:
logger.log_debug(f"appium was probably closed {err}. \n")
| 39.343891
| 132
| 0.635078
|
8e36864d3e1657a4ddc428a2be560e60809ef8ca
| 16,642
|
py
|
Python
|
src/sage/combinat/rigged_configurations/rc_crystal.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | 3
|
2016-06-19T14:48:31.000Z
|
2022-01-28T08:46:01.000Z
|
src/sage/combinat/rigged_configurations/rc_crystal.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/rigged_configurations/rc_crystal.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | 7
|
2021-11-08T10:01:59.000Z
|
2022-03-03T11:25:52.000Z
|
r"""
Crystal of Rigged Configurations
AUTHORS:
- Travis Scrimshaw (2010-09-26): Initial version
We only consider the highest weight crystal structure, not the
Kirillov-Reshetikhin structure, and we extend this to symmetrizable types.
"""
#*****************************************************************************
# Copyright (C) 2013 Travis Scrimshaw <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.lazy_attribute import lazy_attribute
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.categories.highest_weight_crystals import HighestWeightCrystals
from sage.categories.regular_crystals import RegularCrystals
from sage.categories.classical_crystals import ClassicalCrystals
from sage.categories.infinite_enumerated_sets import InfiniteEnumeratedSets
from sage.combinat.root_system.cartan_type import CartanType
from sage.combinat.rigged_configurations.rigged_configurations import RiggedConfigurations
from sage.combinat.rigged_configurations.rigged_configuration_element import (
RiggedConfigurationElement, RCHighestWeightElement, RCHWNonSimplyLacedElement)
from sage.combinat.rigged_configurations.rigged_partition import RiggedPartition
# Note on implementation, this class is used for simply-laced types only
class CrystalOfRiggedConfigurations(UniqueRepresentation, Parent):
r"""
A highest weight crystal of rigged configurations.
The crystal structure for finite simply-laced types is given
in [CrysStructSchilling06]_. These were then shown to be the crystal
operators in all finite types in [SS2015]_, all simply-laced and
a large class of foldings of simply-laced types in [SS2015II]_,
and all symmetrizable types (uniformly) in [SS2017]_.
INPUT:
- ``cartan_type`` -- (optional) a Cartan type or a Cartan type
given as a folding
- ``wt`` -- the highest weight vector in the weight lattice
EXAMPLES:
For simplicity, we display the rigged configurations horizontally::
sage: RiggedConfigurations.options.display='horizontal'
We start with a simply-laced finite type::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1] + La[2])
sage: mg = RC.highest_weight_vector()
sage: mg.f_string([1,2])
0[ ]0 0[ ]-1
sage: mg.f_string([1,2,2])
0[ ]0 -2[ ][ ]-2
sage: mg.f_string([1,2,2,2])
sage: mg.f_string([2,1,1,2])
-1[ ][ ]-1 -1[ ][ ]-1
sage: RC.cardinality()
8
sage: T = crystals.Tableaux(['A', 2], shape=[2,1])
sage: RC.digraph().is_isomorphic(T.digraph(), edge_labels=True)
True
We construct a non-simply-laced affine type::
sage: La = RootSystem(['C', 3]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[2])
sage: mg = RC.highest_weight_vector()
sage: mg.f_string([2,3])
(/) 1[ ]1 -1[ ]-1
sage: T = crystals.Tableaux(['C', 3], shape=[1,1])
sage: RC.digraph().is_isomorphic(T.digraph(), edge_labels=True)
True
We can construct rigged configurations using a diagram folding of
a simply-laced type. This yields an equivalent but distinct crystal::
sage: vct = CartanType(['C', 3]).as_folding()
sage: RC = crystals.RiggedConfigurations(vct, La[2])
sage: mg = RC.highest_weight_vector()
sage: mg.f_string([2,3])
(/) 0[ ]0 -1[ ]-1
sage: T = crystals.Tableaux(['C', 3], shape=[1,1])
sage: RC.digraph().is_isomorphic(T.digraph(), edge_labels=True)
True
We reset the global options::
sage: RiggedConfigurations.options._reset()
REFERENCES:
- [SS2015]_
- [SS2015II]_
- [SS2017]_
"""
@staticmethod
def __classcall_private__(cls, cartan_type, wt=None, WLR=None):
r"""
Normalize the input arguments to ensure unique representation.
EXAMPLES::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1])
sage: RC2 = crystals.RiggedConfigurations(['A', 2], La[1])
sage: RC3 = crystals.RiggedConfigurations(['A', 2], La[1], La[1].parent())
sage: RC is RC2 and RC2 is RC3
True
sage: La = RootSystem(['A',2,1]).weight_lattice().fundamental_weights()
sage: LaE = RootSystem(['A',2,1]).weight_lattice(extended=True).fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1])
sage: RCE = crystals.RiggedConfigurations(LaE[1])
sage: RC is RCE
False
"""
from sage.combinat.root_system.type_folded import CartanTypeFolded
if wt is None:
wt = cartan_type
cartan_type = wt.parent().cartan_type()
else:
if not isinstance(cartan_type, CartanTypeFolded):
cartan_type = CartanType(cartan_type)
if WLR is None:
WLR = wt.parent()
else:
wt = WLR(wt)
if isinstance(cartan_type, CartanTypeFolded):
return CrystalOfNonSimplyLacedRC(cartan_type, wt, WLR)
return super(CrystalOfRiggedConfigurations, cls).__classcall__(cls, wt, WLR=WLR)
def __init__(self, wt, WLR):
r"""
Initialize ``self``.
EXAMPLES::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1] + La[2])
sage: TestSuite(RC).run()
sage: La = RootSystem(['A', 2, 1]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[0])
sage: TestSuite(RC).run() # long time
"""
self._cartan_type = WLR.cartan_type()
self._wt = wt
self._rc_index = self._cartan_type.index_set()
self._rc_index_inverse = {i: ii for ii,i in enumerate(self._rc_index)}
# We store the Cartan matrix for the vacancy number calculations for speed
self._cartan_matrix = self._cartan_type.cartan_matrix()
if self._cartan_type.is_finite():
category = ClassicalCrystals()
else:
category = (RegularCrystals(), HighestWeightCrystals(), InfiniteEnumeratedSets())
Parent.__init__(self, category=category)
n = self._cartan_type.rank() #== len(self._cartan_type.index_set())
self.module_generators = (self.element_class( self, partition_list=[[] for i in range(n)] ),)
options = RiggedConfigurations.options
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: La = RootSystem(['A', 3]).weight_lattice().fundamental_weights()
sage: crystals.RiggedConfigurations(La[1])
Crystal of rigged configurations of type ['A', 3] and weight Lambda[1]
"""
return "Crystal of rigged configurations of type {0} and weight {1}".format(
self._cartan_type, self._wt)
def _element_constructor_(self, *lst, **options):
"""
Construct a ``RiggedConfigurationElement``.
Typically the user should not call this method since it does not check
if it is an actual configuration in the crystal. Instead the user
should use the iterator.
EXAMPLES::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1] + La[2])
sage: RC(partition_list=[[1],[1]], rigging_list=[[0],[-1]])
<BLANKLINE>
0[ ]0
<BLANKLINE>
0[ ]-1
<BLANKLINE>
sage: RC(partition_list=[[1],[2]])
<BLANKLINE>
0[ ]0
<BLANKLINE>
-2[ ][ ]-2
<BLANKLINE>
TESTS:
Check that :trac:`17054` is fixed::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(4*La[1] + 4*La[2])
sage: B = crystals.infinity.RiggedConfigurations(['A',2])
sage: x = B.an_element().f_string([2,2,1,1,2,1,2,1])
sage: ascii_art(x)
-4[ ][ ][ ][ ]-4 -4[ ][ ][ ][ ]0
sage: ascii_art(RC(x.nu()))
0[ ][ ][ ][ ]-4 0[ ][ ][ ][ ]0
sage: x == B.an_element().f_string([2,2,1,1,2,1,2,1])
True
"""
if isinstance(lst[0], (list, tuple)):
lst = lst[0]
if isinstance(lst[0], RiggedPartition):
lst = [p._clone() for p in lst] # Make a deep copy
elif isinstance(lst[0], RiggedConfigurationElement):
lst = [p._clone() for p in lst[0]] # Make a deep copy
return self.element_class(self, list(lst), **options)
def _calc_vacancy_number(self, partitions, a, i, **options):
r"""
Calculate the vacancy number `p_i^{(a)}(\nu)` in ``self``.
This assumes that `\gamma_a = 1` for all `a` and
`(\alpha_a | \alpha_b ) = A_{ab}`.
INPUT:
- ``partitions`` -- the list of rigged partitions we are using
- ``a`` -- the rigged partition index
- ``i`` -- the row length
TESTS::
sage: La = RootSystem(['A', 2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1] + La[2])
sage: elt = RC(partition_list=[[1],[2]])
sage: RC._calc_vacancy_number(elt.nu(), 1, 2)
-2
"""
vac_num = self._wt[self.index_set()[a]]
for b in range(self._cartan_matrix.ncols()):
val = self._cartan_matrix[a,b]
if val:
vac_num -= val * partitions[b].get_num_cells_to_column(i)
return vac_num
def weight_lattice_realization(self):
"""
Return the weight lattice realization used to express the weights
of elements in ``self``.
EXAMPLES::
sage: La = RootSystem(['A', 2, 1]).weight_lattice(extended=True).fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[0])
sage: RC.weight_lattice_realization()
Extended weight lattice of the Root system of type ['A', 2, 1]
"""
return self._wt.parent()
Element = RCHighestWeightElement
class CrystalOfNonSimplyLacedRC(CrystalOfRiggedConfigurations):
"""
Highest weight crystal of rigged configurations in non-simply-laced type.
"""
def __init__(self, vct, wt, WLR):
"""
Initialize ``self``.
EXAMPLES::
sage: La = RootSystem(['C', 3]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1])
sage: TestSuite(RC).run()
"""
self._folded_ct = vct
CrystalOfRiggedConfigurations.__init__(self, wt, WLR)
@lazy_attribute
def virtual(self):
"""
Return the corresponding virtual crystal.
EXAMPLES::
sage: La = RootSystem(['C', 2, 1]).weight_lattice().fundamental_weights()
sage: vct = CartanType(['C', 2, 1]).as_folding()
sage: RC = crystals.RiggedConfigurations(vct, La[0])
sage: RC
Crystal of rigged configurations of type ['C', 2, 1] and weight Lambda[0]
sage: RC.virtual
Crystal of rigged configurations of type ['A', 3, 1] and weight 2*Lambda[0]
"""
P = self._folded_ct._folding.root_system().weight_lattice()
gamma = self._folded_ct.scaling_factors()
sigma = self._folded_ct.folding_orbit()
vwt = P.sum_of_terms((b, gamma[a]*c) for a,c in self._wt for b in sigma[a])
return CrystalOfRiggedConfigurations(vwt)
def _calc_vacancy_number(self, partitions, a, i, **options):
r"""
Calculate the vacancy number `p_i^{(a)}(\nu)` in ``self``.
INPUT:
- ``partitions`` -- the list of rigged partitions we are using
- ``a`` -- the rigged partition index
- ``i`` -- the row length
TESTS::
sage: La = RootSystem(['C', 3]).weight_lattice().fundamental_weights()
sage: vct = CartanType(['C', 3]).as_folding()
sage: RC = crystals.RiggedConfigurations(vct, La[2])
sage: elt = RC(partition_list=[[], [1], [1]])
sage: RC._calc_vacancy_number(elt.nu(), 1, 1)
0
sage: RC._calc_vacancy_number(elt.nu(), 2, 1)
-1
"""
I = self.index_set()
ia = I[a]
vac_num = self._wt[ia]
gamma = self._folded_ct.scaling_factors()
g = gamma[ia]
for b in range(self._cartan_matrix.ncols()):
ib = I[b]
q = partitions[b].get_num_cells_to_column(g*i, gamma[ib])
vac_num -= self._cartan_matrix[a,b] * q / gamma[ib]
return vac_num
def to_virtual(self, rc):
"""
Convert ``rc`` into a rigged configuration in the virtual crystal.
INPUT:
- ``rc`` -- a rigged configuration element
EXAMPLES::
sage: La = RootSystem(['C', 3]).weight_lattice().fundamental_weights()
sage: vct = CartanType(['C', 3]).as_folding()
sage: RC = crystals.RiggedConfigurations(vct, La[2])
sage: elt = RC(partition_list=[[], [1], [1]]); elt
<BLANKLINE>
(/)
<BLANKLINE>
0[ ]0
<BLANKLINE>
-1[ ]-1
<BLANKLINE>
sage: RC.to_virtual(elt)
<BLANKLINE>
(/)
<BLANKLINE>
0[ ]0
<BLANKLINE>
-2[ ][ ]-2
<BLANKLINE>
0[ ]0
<BLANKLINE>
(/)
<BLANKLINE>
"""
gamma = [int(_) for _ in self._folded_ct.scaling_factors()]
sigma = self._folded_ct._orbit
n = self._folded_ct._folding.rank()
vindex = self._folded_ct._folding.index_set()
partitions = [None] * n
riggings = [None] * n
for a, rp in enumerate(rc):
for i in sigma[a]:
k = vindex.index(i)
partitions[k] = [row_len*gamma[a] for row_len in rp._list]
riggings[k] = [rig_val*gamma[a] for rig_val in rp.rigging]
return self.virtual.element_class(self.virtual, partition_list=partitions,
rigging_list=riggings)
def from_virtual(self, vrc):
"""
Convert ``vrc`` in the virtual crystal into a rigged configuration of
the original Cartan type.
INPUT:
- ``vrc`` -- a virtual rigged configuration
EXAMPLES::
sage: La = RootSystem(['C', 3]).weight_lattice().fundamental_weights()
sage: vct = CartanType(['C', 3]).as_folding()
sage: RC = crystals.RiggedConfigurations(vct, La[2])
sage: elt = RC(partition_list=[[0], [1], [1]])
sage: elt == RC.from_virtual(RC.to_virtual(elt))
True
"""
gamma = list(self._folded_ct.scaling_factors()) #map(int, self._folded_ct.scaling_factors())
sigma = self._folded_ct._orbit
n = self._cartan_type.rank()
partitions = [None] * n
riggings = [None] * n
vac_nums = [None] * n
vindex = self._folded_ct._folding.index_set()
for a in range(n):
index = vindex.index(sigma[a][0])
partitions[a] = [row_len // gamma[a] for row_len in vrc[index]._list]
riggings[a] = [rig_val / gamma[a] for rig_val in vrc[index].rigging]
return self.element_class(self, partition_list=partitions, rigging_list=riggings)
Element = RCHWNonSimplyLacedElement
# deprecations from trac:18555
from sage.misc.superseded import deprecated_function_alias
CrystalOfRiggedConfigurations.global_options = deprecated_function_alias(18555, CrystalOfRiggedConfigurations.options)
| 36.575824
| 118
| 0.591636
|
3a22cae9527978ab063610ba622a4ee004e4edf4
| 13,426
|
py
|
Python
|
nitro/resource/config/lb/lbvserver_transformpolicy_binding.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | 2
|
2020-08-24T18:04:22.000Z
|
2020-08-24T18:04:47.000Z
|
nitro/resource/config/lb/lbvserver_transformpolicy_binding.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
nitro/resource/config/lb/lbvserver_transformpolicy_binding.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class lbvserver_transformpolicy_binding(base_resource) :
"""Binding class showing the transformpolicy that can be bound to lbvserver."""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def priority(self) :
"""Priority."""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority.
:param priority:
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE."""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
:param gotopriorityexpression:
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
"""Name of the policy bound to the LB vserver."""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name of the policy bound to the LB vserver.
:param policyname:
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
:param name:
"""
try :
self._name = name
except Exception as e:
raise e
@property
def bindpoint(self) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE."""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
:param bindpoint:
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def labeltype(self) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel."""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
:param labeltype:
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label invoked."""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the label invoked.
:param labelname:
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke policies bound to a virtual server or policy label."""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke policies bound to a virtual server or policy label.
:param invoke:
"""
try :
self._invoke = invoke
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_transformpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_transformpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
updateresource = lbvserver_transformpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_transformpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
deleteresource = lbvserver_transformpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_transformpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
"""Use this API to fetch lbvserver_transformpolicy_binding resources.
:param service:
:param name:
"""
try :
obj = lbvserver_transformpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
"""Use this API to fetch filtered set of lbvserver_transformpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = lbvserver_transformpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
"""Use this API to count lbvserver_transformpolicy_binding resources configued on NetScaler.
:param service:
:param name:
"""
try :
obj = lbvserver_transformpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
"""Use this API to count the filtered set of lbvserver_transformpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = lbvserver_transformpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
""" """
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
""" """
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_transformpolicy_binding_response(base_response) :
""" """
def __init__(self, length=1) :
self.lbvserver_transformpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_transformpolicy_binding = [lbvserver_transformpolicy_binding() for _ in range(length)]
| 33.733668
| 308
| 0.591986
|
ef5c9936d6df45f2d6f051c3dfbc25a2fcaddc55
| 4,351
|
py
|
Python
|
python/dazl/_gen/com/daml/ledger/api/v1/command_submission_service_pb2.py
|
digital-asset/dazl-client
|
5d54edaea26d7704cc8d73e5945b37ed2806265b
|
[
"Apache-2.0"
] | 8
|
2019-09-08T09:41:03.000Z
|
2022-02-19T12:54:30.000Z
|
python/dazl/_gen/com/daml/ledger/api/v1/command_submission_service_pb2.py
|
digital-asset/dazl-client
|
5d54edaea26d7704cc8d73e5945b37ed2806265b
|
[
"Apache-2.0"
] | 55
|
2019-05-30T23:00:31.000Z
|
2022-01-24T01:51:32.000Z
|
python/dazl/_gen/com/daml/ledger/api/v1/command_submission_service_pb2.py
|
digital-asset/dazl-client
|
5d54edaea26d7704cc8d73e5945b37ed2806265b
|
[
"Apache-2.0"
] | 9
|
2019-06-30T18:15:27.000Z
|
2021-12-03T10:15:27.000Z
|
# Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# fmt: off
# isort: skip_file
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: com/daml/ledger/api/v1/command_submission_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import commands_pb2 as com_dot_daml_dot_ledger_dot_api_dot_v1_dot_commands__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='com/daml/ledger/api/v1/command_submission_service.proto',
package='com.daml.ledger.api.v1',
syntax='proto3',
serialized_options=b'\n\026com.daml.ledger.api.v1B\"CommandSubmissionServiceOuterClassZOgithub.com/digital-asset/dazl-client/go/v7/pkg/generated/com/daml/ledger/api/v1\252\002\026Com.Daml.Ledger.Api.V1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n7com/daml/ledger/api/v1/command_submission_service.proto\x12\x16\x63om.daml.ledger.api.v1\x1a%com/daml/ledger/api/v1/commands.proto\x1a\x1bgoogle/protobuf/empty.proto\"M\n\rSubmitRequest\x12<\n\x08\x63ommands\x18\x01 \x01(\x0b\x32 .com.daml.ledger.api.v1.CommandsR\x08\x63ommands2c\n\x18\x43ommandSubmissionService\x12G\n\x06Submit\x12%.com.daml.ledger.api.v1.SubmitRequest\x1a\x16.google.protobuf.EmptyB\xa6\x01\n\x16\x63om.daml.ledger.api.v1B\"CommandSubmissionServiceOuterClassZOgithub.com/digital-asset/dazl-client/go/v7/pkg/generated/com/daml/ledger/api/v1\xaa\x02\x16\x43om.Daml.Ledger.Api.V1b\x06proto3'
,
dependencies=[com_dot_daml_dot_ledger_dot_api_dot_v1_dot_commands__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_SUBMITREQUEST = _descriptor.Descriptor(
name='SubmitRequest',
full_name='com.daml.ledger.api.v1.SubmitRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='commands', full_name='com.daml.ledger.api.v1.SubmitRequest.commands', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='commands', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=151,
serialized_end=228,
)
_SUBMITREQUEST.fields_by_name['commands'].message_type = com_dot_daml_dot_ledger_dot_api_dot_v1_dot_commands__pb2._COMMANDS
DESCRIPTOR.message_types_by_name['SubmitRequest'] = _SUBMITREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SubmitRequest = _reflection.GeneratedProtocolMessageType('SubmitRequest', (_message.Message,), {
'DESCRIPTOR' : _SUBMITREQUEST,
'__module__' : 'com.daml.ledger.api.v1.command_submission_service_pb2'
# @@protoc_insertion_point(class_scope:com.daml.ledger.api.v1.SubmitRequest)
})
_sym_db.RegisterMessage(SubmitRequest)
DESCRIPTOR._options = None
_COMMANDSUBMISSIONSERVICE = _descriptor.ServiceDescriptor(
name='CommandSubmissionService',
full_name='com.daml.ledger.api.v1.CommandSubmissionService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=230,
serialized_end=329,
methods=[
_descriptor.MethodDescriptor(
name='Submit',
full_name='com.daml.ledger.api.v1.CommandSubmissionService.Submit',
index=0,
containing_service=None,
input_type=_SUBMITREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_COMMANDSUBMISSIONSERVICE)
DESCRIPTOR.services_by_name['CommandSubmissionService'] = _COMMANDSUBMISSIONSERVICE
# @@protoc_insertion_point(module_scope)
| 41.04717
| 630
| 0.800965
|
597e5efc6b3515a3eac06f4cc85a53396734a11a
| 2,154
|
py
|
Python
|
dia 09 FastAPI/site-packages/prospector/postfilter.py
|
RamonNicolas/100-Dias-com-Python
|
b4678b5981415b385528c5fe67c7bb02c8d2f4ca
|
[
"MIT"
] | null | null | null |
dia 09 FastAPI/site-packages/prospector/postfilter.py
|
RamonNicolas/100-Dias-com-Python
|
b4678b5981415b385528c5fe67c7bb02c8d2f4ca
|
[
"MIT"
] | null | null | null |
dia 09 FastAPI/site-packages/prospector/postfilter.py
|
RamonNicolas/100-Dias-com-Python
|
b4678b5981415b385528c5fe67c7bb02c8d2f4ca
|
[
"MIT"
] | null | null | null |
import os
from prospector.suppression import get_suppressions
def filter_messages(relative_filepaths, root, messages):
"""
This method post-processes all messages output by all tools, in order to filter
out any based on the overall output.
The main aim currently is to use information about messages suppressed by
pylint due to inline comments, and use that to suppress messages from other
tools representing the same problem.
For example:
import banana # pylint:disable=unused-import
In this situation, pylint will not warn about an unused import as there is
inline configuration to disable the warning. Pyflakes will still raise that
error, however, because it does not understand pylint disabling messages.
This method uses the information about suppressed messages from pylint to
squash the unwanted redundant error from pyflakes and frosted.
"""
paths_to_ignore, lines_to_ignore, messages_to_ignore = get_suppressions(relative_filepaths, root, messages)
filtered = []
for message in messages:
# first get rid of the pylint informational messages
relative_message_path = os.path.relpath(message.location.path)
if message.source == "pylint" and message.code in (
"suppressed-message",
"file-ignored",
):
continue
# some files are skipped entirely by messages
if relative_message_path in paths_to_ignore:
continue
# some lines are skipped entirely by messages
if relative_message_path in lines_to_ignore:
if message.location.line in lines_to_ignore[relative_message_path]:
continue
# and some lines have only certain messages explicitly ignored
if relative_message_path in messages_to_ignore:
if message.location.line in messages_to_ignore[relative_message_path]:
if message.code in messages_to_ignore[relative_message_path][message.location.line]:
continue
# otherwise this message was not filtered
filtered.append(message)
return filtered
| 37.789474
| 111
| 0.708914
|
0c3aea2e1c3210fdcdf6c48eb2eee848fab50884
| 1,447
|
py
|
Python
|
python/nltk_service_pb2_grpc.py
|
Cartmanishere/grpc-python-golang
|
a07b0509964fe6d4427254e73bc5875abb9cfabc
|
[
"MIT"
] | 4
|
2019-11-01T11:48:03.000Z
|
2021-03-23T17:52:02.000Z
|
python/nltk_service_pb2_grpc.py
|
Cartmanishere/grpc-python-golang
|
a07b0509964fe6d4427254e73bc5875abb9cfabc
|
[
"MIT"
] | null | null | null |
python/nltk_service_pb2_grpc.py
|
Cartmanishere/grpc-python-golang
|
a07b0509964fe6d4427254e73bc5875abb9cfabc
|
[
"MIT"
] | 5
|
2019-11-27T05:38:05.000Z
|
2020-07-22T16:59:10.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import nltk_service_pb2 as nltk__service__pb2
class KeywordServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetKeywords = channel.unary_unary(
'/KeywordService/GetKeywords',
request_serializer=nltk__service__pb2.Request.SerializeToString,
response_deserializer=nltk__service__pb2.Response.FromString,
)
class KeywordServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def GetKeywords(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_KeywordServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetKeywords': grpc.unary_unary_rpc_method_handler(
servicer.GetKeywords,
request_deserializer=nltk__service__pb2.Request.FromString,
response_serializer=nltk__service__pb2.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'KeywordService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 30.787234
| 76
| 0.755356
|
f747a292c1faff5cac56ac5fe3ddced9c9b86c5d
| 4,284
|
py
|
Python
|
Chatbot_Model/Info_Extraction/Entity_Extraction/data.py
|
chenpocufa/Chatbot_CN
|
5e13c129c159143610f4dfc99478d401dd5777e6
|
[
"Apache-2.0"
] | 1
|
2019-08-02T06:09:34.000Z
|
2019-08-02T06:09:34.000Z
|
Chatbot_Model/Info_Extraction/Entity_Extraction/data.py
|
yuxuan2015/Chatbot_CN
|
1adf1c01d3eced5f0644102bdec9be22705b6f3f
|
[
"Apache-2.0"
] | null | null | null |
Chatbot_Model/Info_Extraction/Entity_Extraction/data.py
|
yuxuan2015/Chatbot_CN
|
1adf1c01d3eced5f0644102bdec9be22705b6f3f
|
[
"Apache-2.0"
] | 1
|
2019-06-10T00:36:17.000Z
|
2019-06-10T00:36:17.000Z
|
#-*- coding:utf-8 _*-
"""
@author:charlesXu
@file: data.py
@desc: 构建字向量、词向量
@time: 2018/08/08
"""
import sys, pickle, os, random
import numpy as np
import pdb
## tags, BIO 标注策略 标签矩阵
tag2label = {"O": 0,
"B-PER": 1, "I-PER": 2,
"B-LOC": 3, "I-LOC": 4,
"B-ORG": 5, "I-ORG": 6,
"B-TIM": 7, "I-TIM": 8 # 时间标签
}
def read_corpus(corpus_path):
"""
read corpus and return the list of samples
:param corpus_path:
:return: data
"""
data = []
with open(corpus_path, encoding='utf-8') as fr:
lines = fr.readlines()
sent_, tag_ = [], []
for line in lines:
if line != '\n':
[char, label] = line.strip().split()
sent_.append(char)
tag_.append(label)
else:
data.append((sent_, tag_))
sent_, tag_ = [], []
return data
def vocab_build(vocab_path, corpus_path, min_count):
"""
:param vocab_path:
:param corpus_path:
:param min_count:
:return:
"""
data = read_corpus(corpus_path)
word2id = {}
for sent_, tag_ in data:
for word in sent_:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <='\u005a') or ('\u0061' <= word <='\u007a'):
word = '<ENG>'
if word not in word2id:
word2id[word] = [len(word2id)+1, 1]
else:
word2id[word][1] += 1
low_freq_words = []
for word, [word_id, word_freq] in word2id.items():
if word_freq < min_count and word != '<NUM>' and word != '<ENG>':
low_freq_words.append(word)
for word in low_freq_words:
del word2id[word]
new_id = 1
for word in word2id.keys():
word2id[word] = new_id
new_id += 1
word2id['<UNK>'] = new_id
word2id['<PAD>'] = 0
print(len(word2id))
with open(vocab_path, 'wb') as fw:
pickle.dump(word2id, fw)
def sentence2id(sent, word2id):
"""
:param sent:
:param word2id:
:return:
"""
sentence_id = []
for word in sent:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'):
word = '<ENG>'
if word not in word2id:
word = '<UNK>'
sentence_id.append(word2id[word])
return sentence_id
def read_dictionary(vocab_path):
"""
:param vocab_path:
:return:
"""
vocab_path = os.path.join(vocab_path)
with open(vocab_path, 'rb') as fr:
word2id = pickle.load(fr)
print('vocab_size:', len(word2id))
return word2id
def random_embedding(vocab, embedding_dim):
"""
:param vocab:
:param embedding_dim:
:return:
"""
embedding_mat = np.random.uniform(-0.25, 0.25, (len(vocab), embedding_dim))
embedding_mat = np.float32(embedding_mat)
return embedding_mat
def pad_sequences(sequences, pad_mark=0):
"""
:param sequences:
:param pad_mark:
:return:
"""
max_len = max(map(lambda x : len(x), sequences))
seq_list, seq_len_list = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:max_len] + [pad_mark] * max(max_len - len(seq), 0)
seq_list.append(seq_)
seq_len_list.append(min(len(seq), max_len))
return seq_list, seq_len_list
def batch_yield(data, batch_size, vocab, tag2label, shuffle=False):
"""
:param data:
:param batch_size:
:param vocab:
:param tag2label: 标签矩阵转化为数字
:param shuffle:
:return:
"""
if shuffle:
random.shuffle(data) # 每次训练都打乱数据
seqs, labels = [], []
for (sent_, tag_) in data:
sent_ = sentence2id(sent_, vocab)
label_ = [tag2label[tag] for tag in tag_]
if len(seqs) == batch_size:
yield seqs, labels
seqs, labels = [], []
seqs.append(sent_)
labels.append(label_)
if len(seqs) != 0:
yield seqs, labels
# 构建word2id.pkl
# vocab_path = 'D:\project\Chatbot_CN\Chatbot_Model\Info_Extraction\Entity_Extraction\data\word2id_tim.pkl'
# corpus_path = 'D:\project\Chatbot_CN\Chatbot_Data\Info_Extraction\\train_data_tim'
# min = 5
# vocab_build(vocab_path, corpus_path, min)
| 23.8
| 107
| 0.556256
|
2166f30e71f6bd4a6376817e28ff4af3628abab5
| 2,162
|
py
|
Python
|
env/Lib/site-packages/jeepney/io/tests/test_asyncio.py
|
iamswayam/DRF-API-Logger
|
4acd2ee709df98c2e22d6bd0a97bf9191a0d8e4e
|
[
"MIT"
] | 1
|
2020-10-20T12:55:21.000Z
|
2020-10-20T12:55:21.000Z
|
env/Lib/site-packages/jeepney/io/tests/test_asyncio.py
|
iamswayam/DRF-API-Logger
|
4acd2ee709df98c2e22d6bd0a97bf9191a0d8e4e
|
[
"MIT"
] | 29
|
2021-08-17T19:09:23.000Z
|
2022-03-29T19:08:23.000Z
|
env/Lib/site-packages/jeepney/io/tests/test_asyncio.py
|
iamswayam/DRF-API-Logger
|
4acd2ee709df98c2e22d6bd0a97bf9191a0d8e4e
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
import asyncio
import pytest
from jeepney import DBusAddress, new_method_call
from jeepney.bus_messages import message_bus, MatchRule
from jeepney.io.asyncio import (
open_dbus_connection, open_dbus_router, Proxy
)
from .utils import have_session_bus
pytestmark = [
pytest.mark.asyncio,
pytest.mark.skipif(
not have_session_bus, reason="Tests require DBus session bus"
),
]
bus_peer = DBusAddress(
bus_name='org.freedesktop.DBus',
object_path='/org/freedesktop/DBus',
interface='org.freedesktop.DBus.Peer'
)
@pytest.fixture()
async def connection():
conn = await open_dbus_connection(bus='SESSION')
yield conn
await conn.close()
async def test_connect(connection):
assert connection.unique_name.startswith(':')
@pytest.fixture()
async def router():
async with open_dbus_router(bus='SESSION') as router:
yield router
async def test_send_and_get_reply(router):
ping_call = new_method_call(bus_peer, 'Ping')
reply = await asyncio.wait_for(
router.send_and_get_reply(ping_call), timeout=5
)
assert reply.body == ()
async def test_proxy(router):
proxy = Proxy(message_bus, router)
name = "io.gitlab.takluyver.jeepney.examples.Server"
res = await proxy.RequestName(name)
assert res in {(1,), (2,)} # 1: got the name, 2: queued
has_owner, = await proxy.NameHasOwner(name)
assert has_owner is True
async def test_filter(router):
bus = Proxy(message_bus, router)
name = "io.gitlab.takluyver.jeepney.tests.asyncio_test_filter"
match_rule = MatchRule(
type="signal",
sender=message_bus.bus_name,
interface=message_bus.interface,
member="NameOwnerChanged",
path=message_bus.object_path,
)
match_rule.add_arg_condition(0, name)
# Ask the message bus to subscribe us to this signal
await bus.AddMatch(match_rule)
with router.filter(match_rule) as queue:
res, = await bus.RequestName(name)
assert res == 1 # 1: got the name
signal_msg = await asyncio.wait_for(queue.get(), timeout=2.0)
assert signal_msg.body == (name, '', router.unique_name)
| 27.717949
| 69
| 0.701665
|
1654942c3ce882d710d0e804cfffdfa574d488a5
| 128
|
py
|
Python
|
__init__.py
|
Mem-Tech/meme-voice-assistant
|
9ec4c6bcd7decb5593095fa53646bc8ff2810ce7
|
[
"MIT"
] | 1
|
2021-03-05T21:36:14.000Z
|
2021-03-05T21:36:14.000Z
|
__init__.py
|
Mem-Tech/meme-voice-assistant
|
9ec4c6bcd7decb5593095fa53646bc8ff2810ce7
|
[
"MIT"
] | 2
|
2021-04-30T22:09:03.000Z
|
2021-04-30T22:11:19.000Z
|
__init__.py
|
Mem-Tech/meme-voice-assistant
|
9ec4c6bcd7decb5593095fa53646bc8ff2810ce7
|
[
"MIT"
] | null | null | null |
from .util import KEYWORDS
from .util import KEYWORD_FILE_PATHS
from .util import LIBRARY_PATH
from .util import MODEL_FILE_PATH
| 32
| 36
| 0.851563
|
49a317b0295df5a38461bfe7b496daf6b5be3808
| 742
|
py
|
Python
|
question_bank/populating-next-right-pointers-in-each-node-ii/populating-next-right-pointers-in-each-node-ii.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 9
|
2020-08-12T10:01:00.000Z
|
2022-01-05T04:37:48.000Z
|
question_bank/populating-next-right-pointers-in-each-node-ii/populating-next-right-pointers-in-each-node-ii.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 1
|
2021-02-16T10:19:31.000Z
|
2021-02-16T10:19:31.000Z
|
question_bank/populating-next-right-pointers-in-each-node-ii/populating-next-right-pointers-in-each-node-ii.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 4
|
2020-08-12T10:13:31.000Z
|
2021-11-05T01:26:58.000Z
|
# -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:60 ms, 在所有 Python3 提交中击败了81.56% 的用户
内存消耗:14.5 MB, 在所有 Python3 提交中击败了54.12% 的用户
解题思路:
先把每层的节点以自左向右的顺序保存,
然后修改next
"""
class Solution:
def connect(self, root: 'Node') -> 'Node':
record = {}
def find(root, h): # 遍历,并保存每层的节点
if root:
if h in record:
record[h].append(root)
else:
record[h] = [root]
find(root.left, h+1) # 自左向右的顺序遍历
find(root.right, h+1)
find(root, 1)
for h, ns in record.items(): # 处理每层的节点
for i in range(len(ns)-1):
ns[i].next = ns[i+1]
ns[-1] = None
return root
| 25.586207
| 51
| 0.463612
|
4b020a0db2cde3115d4c68fdace8bf9d66478795
| 34
|
py
|
Python
|
lemora.py
|
javaarchive/Lemora
|
25414cae68eb744fe1dba589000acb6b45b8f1a0
|
[
"MIT"
] | null | null | null |
lemora.py
|
javaarchive/Lemora
|
25414cae68eb744fe1dba589000acb6b45b8f1a0
|
[
"MIT"
] | null | null | null |
lemora.py
|
javaarchive/Lemora
|
25414cae68eb744fe1dba589000acb6b45b8f1a0
|
[
"MIT"
] | null | null | null |
import SQLiteKV
import EnhancedDB
| 11.333333
| 17
| 0.882353
|
3710a872cbc5ae9bb631f603819d7857aa148476
| 3,894
|
py
|
Python
|
python_scripts/create_essentialgenes_list.py
|
Gregory94/LaanLab-SATAY-DataAnalysis
|
276cb96d42dfcf4bed16aaaf0786519d96831ed0
|
[
"Apache-2.0"
] | 2
|
2020-04-01T14:54:34.000Z
|
2020-09-03T11:11:40.000Z
|
python_scripts/create_essentialgenes_list.py
|
Gregory94/LaanLab-SATAY-DataAnalysis
|
276cb96d42dfcf4bed16aaaf0786519d96831ed0
|
[
"Apache-2.0"
] | 39
|
2020-03-22T08:59:11.000Z
|
2021-03-23T16:34:25.000Z
|
python_scripts/create_essentialgenes_list.py
|
leilaicruz/LaanLab-SATAY-DataAnalysis
|
276cb96d42dfcf4bed16aaaf0786519d96831ed0
|
[
"Apache-2.0"
] | 2
|
2021-03-31T12:45:44.000Z
|
2021-05-20T08:25:34.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 6 13:41:44 2020
@author: gregoryvanbeek
This python code creates a .txt file that includes all essential genes that are annotated as essential genes in Saccharomyces Cerevisiae.
It gets this information from multiple .txt files and combines all information in one file.
It checks whether a gene is already present in the file (either with the same name or an alias).
It stores the gene names using the oln naming convention.
"""
import os, sys
file_dirname = os.path.dirname(os.path.abspath('__file__'))
sys.path.insert(1,os.path.join(file_dirname,'python_modules'))
from gene_names import gene_aliases
#%%INPUT
file_list = [os.path.join(file_dirname, '..','data_files','Cerevisiae_EssentialGenes_List_1.txt'),
os.path.join(file_dirname, '..','data_files','Cerevisiae_EssentialGenes_List_2.txt')]
#%%
def create_essentialgenes_list(inputfiles_list = None):
'''
This function requires as input a list of paths to files containing essential genes.
Multiple files can be present in this list.
The input files have to have the following layout:
- Three header lines (can be empty or containing any text)
- Each new lines should contain one gene name in either oln or designation naming convention.
This function is dependable on the following custom made modules:
- gene_names.py (requires the file Yeast_Protein_Names.txt)
The output will be a text file containing all uniquely found genes in all input files given.
The file will be stored at the same location of the first file of the input list with the name 'Cerevisiae_AllEssentialGenes_List.txt'.
In this file each line contains one gene and it has a single header line containing all the filenames that were used to create this file.
'''
if inputfiles_list == None:
raise ValueError('Input list containing one or more paths is missing.')
else:
files = inputfiles_list
path = os.path.dirname(files[0])
filename_list = []
for file in files:
filename_list.append(os.path.basename(file))
del (inputfiles_list, file)
#%%
all_genes_list = []
for file in files:#ASSUMES THREE HEADER LINES
filename = os.path.basename(file)
with open(file) as f:
lines = f.readlines()
print('Number of genes found in %s: %i' % (filename, (len(lines)-3)))
for line in lines[3:]:
all_genes_list.append(line.strip('\n'))
del (file, f, lines, line)
#%%
gene_aliases_dict = gene_aliases(os.path.join(file_dirname,'..','data_files','Yeast_Protein_Names.txt'))[0]
# r"C:\Users\gregoryvanbeek\Documents\GitHub\LaanLab-SATAY-DataAnalysis\Python_scripts\Data_Files\Yeast_Protein_Names.txt")[0]
#%%
all_genes_oln_list = []
for gene in all_genes_list:
if gene in gene_aliases_dict:
all_genes_oln_list.append(gene)
else:
for key, val in gene_aliases_dict.items():
if gene in val:
all_genes_oln_list.append(key)
break
del (gene, all_genes_list, key, val, gene_aliases_dict)
#%%
unique_genes_list = list(set(all_genes_oln_list))
unique_genes_list.sort()
print('Number of unique essential genes found : %i' % len(unique_genes_list))
del (all_genes_oln_list)
#%%
save_filename = r'Cerevisiae_AllEssentialGenes_List.txt'
save_file = os.path.join(path, save_filename)
print('Creating text file with all unique genes at %s' % save_file)
with open(save_file, 'w') as f:
f.write('All essential genes found in lists:' + str(filename_list) + '\n')
for gene in unique_genes_list:
f.write(gene + '\n')
del (gene)
#%%
if __name__ == '__main__':
create_essentialgenes_list(file_list)
| 30.904762
| 141
| 0.681561
|
5bc5d96a5d628d55c5fbc4fd0d6f4c71ff2d248f
| 7,631
|
py
|
Python
|
GDAL_RapidEye_NDVI.py
|
leandromet/Geoprocessamento---Geoprocessing
|
21e72dc6ac51d958a5570c35ea9db6f976c30a03
|
[
"MIT"
] | 2
|
2016-05-29T14:46:54.000Z
|
2022-03-31T13:05:52.000Z
|
GDAL_RapidEye_NDVI.py
|
leandromet/Geoprocessamento---Geoprocessing
|
21e72dc6ac51d958a5570c35ea9db6f976c30a03
|
[
"MIT"
] | null | null | null |
GDAL_RapidEye_NDVI.py
|
leandromet/Geoprocessamento---Geoprocessing
|
21e72dc6ac51d958a5570c35ea9db6f976c30a03
|
[
"MIT"
] | 1
|
2016-06-22T12:18:41.000Z
|
2016-06-22T12:18:41.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
#-------------------------------------------------------------------------------
# Name: RapidEyeNDVIfromCutRegions
# Purpose: Calculates the NDVI of a defined region based on centroid of features from a shapefile.
#
# Author: leandro.biondo@florestal.gov.br
#
# Created: 29/12/2014
# Copyright: (c) leandro.biondo 2015
# Licence: GPL
#-------------------------------------------------------------------------------
"""
import os
import sys
import numpy as np
from osgeo import gdal
from osgeo import ogr
import glob
def calculate_ndvi ( red_filename, nir_filename ):
"""
A function to calculate the Normalised Difference Vegetation Index
from red and near infrarred reflectances. The reflectance data ought to
be present on two different files, specified by the varaibles
`red_filename` and `nir_filename`. The file format ought to be
recognised by GDAL
"""
g_red = gdal.Open ( red_filename )
red = g_red.ReadAsArray()
g_nir = gdal.Open ( nir_filename )
nir = g_nir.ReadAsArray()
if ( g_red.RasterXSize != g_nir.RasterXSize ) or \
( g_red.RasterYSize != g_nir.RasterYSize ):
print "ERROR: Input datasets do't match!"
print "\t Red data shape is %dx%d" % ( red.shape )
print "\t NIR data shape is %dx%d" % ( nir.shape )
sys.exit ( -1 )
passer = np.logical_and ( red > 1, nir > 1 )
ndvi = np.where ( passer, (1.*nir - 1.*red ) / ( 1.*nir + 1.*red ), -999 )
return ndvi
def save_raster ( output_name, raster_data, dataset, driver="GTiff" ):
"""
A function to save a 1-band raster using GDAL to the file indicated
by ``output_name``. It requires a GDAL-accesible dataset to collect
the projection and geotransform.
"""
# Open the reference dataset
g_input = gdal.Open ( dataset )
# Get the Geotransform vector
geo_transform = g_input.GetGeoTransform ()
x_size = g_input.RasterXSize # Raster xsize
y_size = g_input.RasterYSize # Raster ysize
srs = g_input.GetProjectionRef () # Projection
# Need a driver object. By default, we use GeoTIFF
if driver == "GTiff":
driver = gdal.GetDriverByName ( driver )
dataset_out = driver.Create ( output_name, x_size, y_size, 1, \
gdal.GDT_Float32, ['TFW=YES', \
'COMPRESS=LZW', 'TILED=YES'] )
else:
driver = gdal.GetDriverByName ( driver )
dataset_out = driver.Create ( output_name, x_size, y_size, 1, \
gdal.GDT_Float32 )
dataset_out.SetGeoTransform ( geo_transform )
dataset_out.SetProjection ( srs )
dataset_out.GetRasterBand ( 1 ).WriteArray ( \
raster_data.astype(np.float32) )
dataset_out.GetRasterBand ( 1 ).SetNoDataValue ( float(-999) )
dataset_out = None
def prep_cut_call (caminhoi, imagemtif, ptcenter, NameT):
# "Funcao que prepara um TIF e calcula o NDVI para uma area relacionada um ponto central"
#Ponto para corte, no caso o centro da imagem pelos seus dados
redcorte = (map(sum,zip(ptcenter,(-150,-150))), map(sum,zip(ptcenter,(150,150))))
print 'redcorte', redcorte
#Corte imagem
os.system("gdalwarp -overwrite -te "+str(redcorte[0][0])+" "+str(redcorte[0][1])+" "\
+str(redcorte[1][0])+" "+str(redcorte[1][1])+\
" %s %sRD_cut.tif"%(imagemtif, caminhoi))
os.system("gdal_translate -b 3 %sRD_cut.tif %sred2_cut.tif" %(caminhoi, caminhoi))
os.system("gdal_translate -b 5 %sRD_cut.tif %snir2_cut.tif" %(caminhoi, caminhoi))
#Calculo NDVI do corte RED/NIR
c_ndvi = calculate_ndvi ( "%sred2_cut.tif"%caminhoi, "%snir2_cut.tif"%caminhoi)
save_raster ( "%sndvi2_cutdes.tif"%(caminhoi), c_ndvi,\
"%sred2_cut.tif"%caminhoi, "GTiff" )
#Estatisticas do resultado
src_ds = gdal.Open("%sndvi2_cutdes.tif"%(caminhoi))
srcband = src_ds.GetRasterBand(1)
stats = srcband.GetStatistics(0,1)
M_ndvi = srcband.ReadAsArray().astype(np.float)
print M_ndvi, (stats[0], stats[1], stats[2], stats[3] ), np.histogram(M_ndvi, bins=[-10,0.0,0.2,0.4,0.6,0.8,1.0,10])
return (stats[0], stats[1], stats[2], stats[3] ), np.histogram(M_ndvi, bins=[-10,0.0,0.2,0.4,0.6,0.8,1.0,10])
if __name__ == "__main__":
caminhoi="//home//leandro//NDVI_UTM//s19//"
#Arquivo de feicoes que se deseja usar os centroides
shapefile = "//home//leandro//NDVI_UTM//s19//grid_comp_S19.shp"
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(shapefile, True)
layer = dataSource.GetLayer()
#Cria campos para saidas estatisticas
layer.CreateField(ogr.FieldDefn("ndvi_med", ogr.OFTReal),False)
layer.CreateField(ogr.FieldDefn("ndvi_desvP", ogr.OFTReal),False)
layer.CreateField(ogr.FieldDefn("ndvi_neg", ogr.OFTReal),False)
layer.CreateField(ogr.FieldDefn("ndvi_0p0", ogr.OFTReal),False)
layer.CreateField(ogr.FieldDefn("ndvi_0p2", ogr.OFTReal),False)
layer.CreateField(ogr.FieldDefn("ndvi_0p4", ogr.OFTReal),False)
layer.CreateField(ogr.FieldDefn("ndvi_0p6", ogr.OFTReal),False)
layer.CreateField(ogr.FieldDefn("ndvi_0p8", ogr.OFTReal),False)
layer.CreateField(ogr.FieldDefn("ndvi_1p0", ogr.OFTReal),False)
#exit(0)
#Arquivo de feicoes que representam o grid de imagens
shapefile2 = "//home//leandro//NDVI_UTM//s19//rd_comp_S19.shp"
dataSource2 = driver.Open(shapefile2, 0)
layer2 = dataSource2.GetLayer()
c5 = 0
for feature2 in layer2:
c = 0
dentro = 0
geom2 = feature2.GetGeometryRef()
TileId = int(feature2.GetField("TILE_ID"))
for feature in layer:
geom = feature.GetGeometryRef()
ptcenter = (geom.GetX(), geom.GetY())
# print ptcenter
if geom.Intersects(geom2):
dentro+=1
print "dentro",dentro
# print ptcenter
#Laco que encontra todos os arquivos TIF a serem processados
samef=0
for infile in glob.glob(r'/home/leandro/geodados/imagens/RapidEye/brasil/*/fuso_19s/*/*%s*.tif'%TileId):
#Ignora arquivos "browse."
print infile
if infile.find("browse.") == -1:
#Ignora arquivos "udm."
if infile.find("udm.") == -1:
print "c5=",c5
samef+=1
imagemtif = infile
NameT = "_"+str(TileId)+"_"+str(dentro)+"_"+str(samef)
Statistica, ( Stat2, Bins ) = prep_cut_call (caminhoi, imagemtif, ptcenter, NameT)
print caminhoi, imagemtif, ptcenter, NameT, TileId, "(Min, Max, Mean, StDv)", Statistica, Stat2, Bins
feature.SetField("ndvi_med", round(Statistica[2], 3))
feature.SetField("ndvi_DesvP", round(Statistica[3], 3))
feature.SetField("ndvi_neg", int(Stat2[0]))
feature.SetField("ndvi_0p0", int(Stat2[1]))
feature.SetField("ndvi_0p2", int(Stat2[2]))
feature.SetField("ndvi_0p4", int(Stat2[3]))
feature.SetField("ndvi_0p6", int(Stat2[4]))
feature.SetField("ndvi_0p8", int(Stat2[5]))
feature.SetField("ndvi_1p0", int(Stat2[6]))
layer.SetFeature(feature)
c5+=1
#if c5==5:
# exit(0)
c+=1
feature.Destroy()
layer.ResetReading()
print "Pontos: %i sendo %i contidas e %i nao" %( c, dentro, (c-dentro))
if dentro > 0:
print TileId
layer=None
layer2=None
dataSource=None
dataSource2=None
| 39.133333
| 125
| 0.614205
|
0d521455abf779fed1fe67706e333f40bb5a43dc
| 38,189
|
py
|
Python
|
test/vpp_papi_provider.py
|
akanouras/vpp
|
a55a9fc239b5e47487a9489aa5dba9d229502d7e
|
[
"Apache-2.0"
] | 751
|
2017-07-13T06:16:46.000Z
|
2022-03-30T09:14:35.000Z
|
test/vpp_papi_provider.py
|
akanouras/vpp
|
a55a9fc239b5e47487a9489aa5dba9d229502d7e
|
[
"Apache-2.0"
] | 15
|
2018-03-19T15:20:07.000Z
|
2022-03-18T19:48:21.000Z
|
test/vpp_papi_provider.py
|
akanouras/vpp
|
a55a9fc239b5e47487a9489aa5dba9d229502d7e
|
[
"Apache-2.0"
] | 479
|
2017-07-13T06:17:26.000Z
|
2022-03-31T18:20:43.000Z
|
# NB NB NB NB NB NB NB NB NB NB NB
#
# NOTE: The API binary wrappers in this file are in the process of being
# deprecated. DO NOT ADD NEW WRAPPERS HERE. Call the functions using
# named arguments directly instead.
#
import os
import time
from collections import deque
import queue
from six import moves, iteritems
from vpp_papi import VPPApiClient, mac_pton
from hook import Hook
from vpp_ip_route import MPLS_IETF_MAX_LABEL, MPLS_LABEL_INVALID
#
# Dictionary keyed on message name to override default values for
# named parameters
#
defaultmapping = {
'acl_interface_add_del': {'is_add': 1, 'is_input': 1},
'bd_ip_mac_add_del': {'is_add': 1, },
'bfd_udp_add': {'is_authenticated': False, 'bfd_key_id': None,
'conf_key_id': None},
'bfd_udp_auth_activate': {'bfd_key_id': None, 'conf_key_id': None,
'is_delayed': False},
'bier_disp_entry_add_del': {'next_hop_rpf_id': -1, 'next_hop_is_ip4': 1,
'is_add': 1, },
'bier_disp_table_add_del': {'is_add': 1, },
'bier_imp_add': {'is_add': 1, },
'bier_route_add_del': {'is_add': 1, },
'bier_table_add_del': {'is_add': 1, },
'bridge_domain_add_del': {'flood': 1, 'uu_flood': 1, 'forward': 1,
'learn': 1, 'is_add': 1, },
'bvi_delete': {},
'geneve_add_del_tunnel': {'mcast_sw_if_index': 4294967295, 'is_add': 1,
'decap_next_index': 4294967295, },
'input_acl_set_interface': {'ip4_table_index': 4294967295,
'ip6_table_index': 4294967295,
'l2_table_index': 4294967295, },
'ip6_add_del_address_using_prefix': {'is_add': 1, },
'ip6nd_send_router_solicitation': {'irt': 1, 'mrt': 120, },
'ip_add_del_route': {'next_hop_sw_if_index': 4294967295,
'next_hop_weight': 1, 'next_hop_via_label': 1048576,
'classify_table_index': 4294967295, 'is_add': 1, },
'ip_mroute_add_del': {'is_add': 1, },
'ip_neighbor_add_del': {'is_add': 1, },
'ipsec_interface_add_del_spd': {'is_add': 1, },
'ipsec_spd_add_del': {'is_add': 1, },
'ipsec_spd_dump': {'sa_id': 4294967295, },
'ipsec_spd_entry_add_del': {'local_port_stop': 65535,
'remote_port_stop': 65535, 'priority': 100,
'is_outbound': 1,
'is_add': 1, },
'ipsec_tunnel_if_add_del': {'is_add': 1, 'anti_replay': 1, },
'l2_emulation': {'enable': 1, },
'l2fib_add_del': {'is_add': 1, },
'lisp_add_del_adjacency': {'is_add': 1, },
'lisp_add_del_local_eid': {'is_add': 1, },
'lisp_add_del_locator': {'priority': 1, 'weight': 1, 'is_add': 1, },
'lisp_add_del_locator_set': {'is_add': 1, },
'lisp_add_del_remote_mapping': {'is_add': 1, },
'macip_acl_interface_add_del': {'is_add': 1, },
'mpls_ip_bind_unbind': {'is_ip4': 1, 'is_bind': 1, },
'mpls_route_add_del': {'mr_next_hop_sw_if_index': 4294967295,
'mr_next_hop_weight': 1,
'mr_next_hop_via_label': 1048576,
'mr_is_add': 1,
'mr_classify_table_index': 4294967295, },
'mpls_table_add_del': {'is_add': 1, },
'mpls_tunnel_add_del': {'next_hop_sw_if_index': 4294967295,
'next_hop_weight': 1,
'next_hop_via_label': 1048576,
'is_add': 1, },
'output_acl_set_interface': {'ip4_table_index': 4294967295,
'ip6_table_index': 4294967295,
'l2_table_index': 4294967295, },
'pppoe_add_del_session': {'is_add': 1, },
'policer_add_del': {'is_add': 1, 'conform_action': {'type': 1}, },
'set_ipfix_exporter': {'collector_port': 4739, },
'sr_policy_add': {'weight': 1, 'is_encap': 1, },
'sw_interface_add_del_address': {'is_add': 1, },
'sw_interface_ip6nd_ra_prefix': {'val_lifetime': 4294967295,
'pref_lifetime': 4294967295, },
'sw_interface_set_ip_directed_broadcast': {'enable': 1, },
'sw_interface_set_l2_bridge': {'enable': 1, },
'sw_interface_set_mpls_enable': {'enable': 1, },
'sw_interface_set_mtu': {'mtu': [0, 0, 0, 0], },
'sw_interface_set_unnumbered': {'is_add': 1, },
'sw_interface_span_enable_disable': {'state': 1, },
'vxlan_add_del_tunnel': {'mcast_sw_if_index': 4294967295, 'is_add': 1,
'decap_next_index': 4294967295,
'instance': 4294967295, },
'want_bfd_events': {'enable_disable': 1, },
'want_igmp_events': {'enable': 1, },
'want_interface_events': {'enable_disable': 1, },
'want_l2_macs_events': {'enable_disable': 1, 'pid': os.getpid(), },
'want_l2_macs_events2': {'enable_disable': 1, 'pid': os.getpid(), },
}
def as_fn_signature(d):
return ", ".join(f"{k}={v}" for k, v in d.items())
class CliFailedCommandError(Exception):
""" cli command failed."""
class CliSyntaxError(Exception):
""" cli command had a syntax error."""
class UnexpectedApiReturnValueError(Exception):
""" exception raised when the API return value is unexpected """
pass
class VppPapiProvider(object):
"""VPP-api provider using vpp-papi
@property hook: hook object providing before and after api/cli hooks
"""
_zero, _negative = range(2)
def __init__(self, name, test_class, read_timeout):
self.hook = Hook(test_class)
self.name = name
self.test_class = test_class
self._expect_api_retval = self._zero
self._expect_stack = []
# install_dir is a class attribute. We need to set it before
# calling the constructor.
VPPApiClient.apidir = os.getenv('VPP_INSTALL_PATH')
self.vpp = VPPApiClient(logger=test_class.logger,
read_timeout=read_timeout,
use_socket=True,
server_address=test_class.get_api_sock_path())
self._events = queue.Queue()
def __enter__(self):
return self
def assert_negative_api_retval(self):
""" Expect API failure - used with with, e.g.::
with self.vapi.assert_negative_api_retval():
self.vapi.<api call expected to fail>
..
"""
self._expect_stack.append(self._expect_api_retval)
self._expect_api_retval = self._negative
return self
def assert_zero_api_retval(self):
""" Expect API success - used with with, e.g.::
with self.vapi.assert_negative_api_retval():
self.vapi.<api call expected to succeed>
:note: this is useful only inside another with block
as success is the default expected value
"""
self._expect_stack.append(self._expect_api_retval)
self._expect_api_retval = self._zero
return self
def __exit__(self, exc_type, exc_value, traceback):
self._expect_api_retval = self._expect_stack.pop()
def register_hook(self, hook):
"""Replace hook registration with new hook
:param hook:
"""
self.hook = hook
def collect_events(self):
""" Collect all events from the internal queue and clear the queue. """
result = []
while True:
try:
e = self._events.get(block=False)
result.append(e)
except queue.Empty:
return result
return result
def wait_for_event(self, timeout, name=None):
""" Wait for and return next event. """
if name:
self.test_class.logger.debug("Expecting event '%s' within %ss",
name, timeout)
else:
self.test_class.logger.debug("Expecting event within %ss",
timeout)
try:
e = self._events.get(timeout=timeout)
except queue.Empty:
raise Exception("Event did not occur within timeout")
msgname = type(e).__name__
if name and msgname != name:
raise Exception("Unexpected event received: %s, expected: %s"
% msgname)
self.test_class.logger.debug("Returning event %s:%s" % (name, e))
return e
def __call__(self, name, event):
""" Enqueue event in the internal event queue. """
self.test_class.logger.debug("New event: %s: %s" % (name, event))
self._events.put(event)
def factory(self, name, apifn):
def f(*a, **ka):
fields = apifn._func.msg.fields
# add positional and kw arguments
d = ka
for i, o in enumerate(fields[3:]):
try:
d[o] = a[i]
except BaseException:
break
# Default override
if name in defaultmapping:
for k, v in iteritems(defaultmapping[name]):
if k in d:
continue
d[k] = v
return self.api(apifn, d)
return f
def __getattribute__(self, name):
try:
method = super(VppPapiProvider, self).__getattribute__(name)
except AttributeError:
method = self.factory(name, getattr(self.papi, name))
# lazily load the method so we don't need to call factory
# again for this name.
setattr(self, name, method)
return method
def connect(self):
"""Connect the API to VPP"""
# This might be called before VPP is prepared to listen to the socket
retries = 0
while not os.path.exists(self.test_class.get_api_sock_path()):
time.sleep(0.5)
retries += 1
if retries > 120:
break
self.vpp.connect(self.name[:63])
self.papi = self.vpp.api
self.vpp.register_event_callback(self)
def disconnect(self):
"""Disconnect the API from VPP"""
self.vpp.disconnect()
def api(self, api_fn, api_args, expected_retval=0):
""" Call API function and check it's return value.
Call the appropriate hooks before and after the API call
:param api_fn: API function to call
:param api_args: tuple of API function arguments
:param expected_retval: Expected return value (Default value = 0)
:returns: reply from the API
"""
self.hook.before_api(api_fn.__name__, api_args)
reply = api_fn(**api_args)
if self._expect_api_retval == self._negative:
if hasattr(reply, 'retval') and reply.retval >= 0:
msg = "%s(%s) passed unexpectedly: expected negative " \
"return value instead of %d in %s" % \
(api_fn.__name__, as_fn_signature(api_args),
reply.retval,
moves.reprlib.repr(reply))
self.test_class.logger.info(msg)
raise UnexpectedApiReturnValueError(msg)
elif self._expect_api_retval == self._zero:
if hasattr(reply, 'retval') and reply.retval != expected_retval:
msg = "%s(%s) failed, expected %d return value instead " \
"of %d in %s" % (api_fn.__name__,
as_fn_signature(api_args),
expected_retval, reply.retval,
repr(reply))
self.test_class.logger.info(msg)
raise UnexpectedApiReturnValueError(msg)
else:
raise Exception("Internal error, unexpected value for "
"self._expect_api_retval %s" %
self._expect_api_retval)
self.hook.after_api(api_fn.__name__, api_args)
return reply
def cli_return_response(self, cli):
""" Execute a CLI, calling the before/after hooks appropriately.
Return the reply without examining it
:param cli: CLI to execute
:returns: response object
"""
self.hook.before_cli(cli)
cli += '\n'
r = self.papi.cli_inband(cmd=cli)
self.hook.after_cli(cli)
return r
def cli(self, cli):
""" Execute a CLI, calling the before/after hooks appropriately.
:param cli: CLI to execute
:returns: CLI output
"""
r = self.cli_return_response(cli)
if r.retval == -156:
raise CliSyntaxError(r.reply)
if r.retval != 0:
raise CliFailedCommandError(r.reply)
if hasattr(r, 'reply'):
return r.reply
def ppcli(self, cli):
""" Helper method to print CLI command in case of info logging level.
:param cli: CLI to execute
:returns: CLI output
"""
return cli + "\n" + self.cli(cli)
def ip6nd_send_router_solicitation(self, sw_if_index, irt=1, mrt=120,
mrc=0, mrd=0):
return self.api(self.papi.ip6nd_send_router_solicitation,
{'irt': irt,
'mrt': mrt,
'mrc': mrc,
'mrd': mrd,
'sw_if_index': sw_if_index})
def want_interface_events(self, enable_disable=1):
return self.api(self.papi.want_interface_events,
{'enable_disable': enable_disable,
'pid': os.getpid(), })
def sw_interface_set_mac_address(self, sw_if_index, mac):
return self.api(self.papi.sw_interface_set_mac_address,
{'sw_if_index': sw_if_index,
'mac_address': mac})
def p2p_ethernet_add(self, sw_if_index, remote_mac, subif_id):
"""Create p2p ethernet subinterface
:param sw_if_index: main (parent) interface
:param remote_mac: client (remote) mac address
"""
return self.api(
self.papi.p2p_ethernet_add,
{'parent_if_index': sw_if_index,
'remote_mac': remote_mac,
'subif_id': subif_id})
def p2p_ethernet_del(self, sw_if_index, remote_mac):
"""Delete p2p ethernet subinterface
:param sw_if_index: main (parent) interface
:param remote_mac: client (remote) mac address
"""
return self.api(
self.papi.p2p_ethernet_del,
{'parent_if_index': sw_if_index,
'remote_mac': remote_mac})
def create_vlan_subif(self, sw_if_index, vlan):
"""
:param vlan:
:param sw_if_index:
"""
return self.api(self.papi.create_vlan_subif,
{'sw_if_index': sw_if_index,
'vlan_id': vlan})
def create_loopback(self, mac=''):
"""
:param mac: (Optional)
"""
return self.api(self.papi.create_loopback,
{'mac_address': mac})
def ip_route_dump(self, table_id, is_ip6=False):
return self.api(self.papi.ip_route_dump,
{'table': {
'table_id': table_id,
'is_ip6': is_ip6
}})
def ip_route_v2_dump(self, table_id, is_ip6=False, src=0):
return self.api(self.papi.ip_route_v2_dump,
{
'src': src,
'table': {
'table_id': table_id,
'is_ip6': is_ip6
}
})
def ip_neighbor_add_del(self,
sw_if_index,
mac_address,
ip_address,
is_add=1,
flags=0):
""" Add neighbor MAC to IPv4 or IPv6 address.
:param sw_if_index:
:param mac_address:
:param dst_address:
:param is_add: (Default value = 1)
:param flags: (Default value = 0/NONE)
"""
return self.api(
self.papi.ip_neighbor_add_del,
{
'is_add': is_add,
'neighbor': {
'sw_if_index': sw_if_index,
'flags': flags,
'mac_address': mac_address,
'ip_address': ip_address
}
}
)
def udp_encap_add(self,
src_ip,
dst_ip,
src_port,
dst_port,
table_id=0):
""" Add a GRE tunnel
:param src_ip:
:param dst_ip:
:param src_port:
:param dst_port:
:param outer_fib_id: (Default value = 0)
"""
return self.api(
self.papi.udp_encap_add,
{
'udp_encap': {
'src_ip': src_ip,
'dst_ip': dst_ip,
'src_port': src_port,
'dst_port': dst_port,
'table_id': table_id
}
})
def udp_encap_del(self, id):
return self.api(self.papi.udp_encap_del, {'id': id})
def udp_encap_dump(self):
return self.api(self.papi.udp_encap_dump, {})
def want_udp_encap_stats(self, enable=1):
return self.api(self.papi.want_udp_encap_stats,
{'enable': enable,
'pid': os.getpid()})
def mpls_route_dump(self, table_id):
return self.api(self.papi.mpls_route_dump,
{'table': {
'mt_table_id': table_id
}})
def mpls_table_dump(self):
return self.api(self.papi.mpls_table_dump, {})
def mpls_table_add_del(
self,
table_id,
is_add=1):
"""
:param table_id
:param is_add: (Default value = 1)
"""
return self.api(
self.papi.mpls_table_add_del,
{'mt_table':
{
'mt_table_id': table_id,
},
'mt_is_add': is_add})
def mpls_route_add_del(self,
table_id,
label,
eos,
eos_proto,
is_multicast,
paths,
is_add,
is_multipath):
""" MPLS Route add/del """
return self.api(
self.papi.mpls_route_add_del,
{'mr_route':
{
'mr_table_id': table_id,
'mr_label': label,
'mr_eos': eos,
'mr_eos_proto': eos_proto,
'mr_is_multicast': is_multicast,
'mr_n_paths': len(paths),
'mr_paths': paths,
},
'mr_is_add': is_add,
'mr_is_multipath': is_multipath})
def mpls_ip_bind_unbind(
self,
label,
prefix,
table_id=0,
ip_table_id=0,
is_bind=1):
"""
"""
return self.api(
self.papi.mpls_ip_bind_unbind,
{'mb_mpls_table_id': table_id,
'mb_label': label,
'mb_ip_table_id': ip_table_id,
'mb_is_bind': is_bind,
'mb_prefix': prefix})
def mpls_tunnel_add_del(
self,
tun_sw_if_index,
paths,
is_add=1,
l2_only=0,
is_multicast=0):
"""
"""
return self.api(
self.papi.mpls_tunnel_add_del,
{'mt_is_add': is_add,
'mt_tunnel':
{
'mt_sw_if_index': tun_sw_if_index,
'mt_l2_only': l2_only,
'mt_is_multicast': is_multicast,
'mt_n_paths': len(paths),
'mt_paths': paths,
}})
def input_acl_set_interface(
self,
is_add,
sw_if_index,
ip4_table_index=0xFFFFFFFF,
ip6_table_index=0xFFFFFFFF,
l2_table_index=0xFFFFFFFF):
"""
:param is_add:
:param sw_if_index:
:param ip4_table_index: (Default value = 0xFFFFFFFF)
:param ip6_table_index: (Default value = 0xFFFFFFFF)
:param l2_table_index: (Default value = 0xFFFFFFFF)
"""
return self.api(
self.papi.input_acl_set_interface,
{'sw_if_index': sw_if_index,
'ip4_table_index': ip4_table_index,
'ip6_table_index': ip6_table_index,
'l2_table_index': l2_table_index,
'is_add': is_add})
def output_acl_set_interface(
self,
is_add,
sw_if_index,
ip4_table_index=0xFFFFFFFF,
ip6_table_index=0xFFFFFFFF,
l2_table_index=0xFFFFFFFF):
"""
:param is_add:
:param sw_if_index:
:param ip4_table_index: (Default value = 0xFFFFFFFF)
:param ip6_table_index: (Default value = 0xFFFFFFFF)
:param l2_table_index: (Default value = 0xFFFFFFFF)
"""
return self.api(
self.papi.output_acl_set_interface,
{'sw_if_index': sw_if_index,
'ip4_table_index': ip4_table_index,
'ip6_table_index': ip6_table_index,
'l2_table_index': l2_table_index,
'is_add': is_add})
def set_ipfix_exporter(
self,
collector_address,
src_address,
path_mtu,
template_interval,
vrf_id=0,
collector_port=4739,
udp_checksum=0):
return self.api(
self.papi.set_ipfix_exporter,
{
'collector_address': collector_address,
'collector_port': collector_port,
'src_address': src_address,
'vrf_id': vrf_id,
'path_mtu': path_mtu,
'template_interval': template_interval,
'udp_checksum': udp_checksum,
})
def mfib_signal_dump(self):
return self.api(self.papi.mfib_signal_dump, {})
def ip_mroute_dump(self, table_id, is_ip6=False):
return self.api(self.papi.ip_mroute_dump,
{'table': {
'table_id': table_id,
'is_ip6': is_ip6
}})
def vxlan_gbp_tunnel_dump(self, sw_if_index=0xffffffff):
return self.api(self.papi.vxlan_gbp_tunnel_dump,
{'sw_if_index': sw_if_index})
def pppoe_add_del_session(
self,
client_ip,
client_mac,
session_id=0,
is_add=1,
decap_vrf_id=0):
"""
:param is_add: (Default value = 1)
:param is_ipv6: (Default value = 0)
:param client_ip:
:param session_id: (Default value = 0)
:param client_mac:
:param decap_vrf_id: (Default value = 0)
"""
return self.api(self.papi.pppoe_add_del_session,
{'is_add': is_add,
'session_id': session_id,
'client_ip': client_ip,
'decap_vrf_id': decap_vrf_id,
'client_mac': client_mac})
def sr_mpls_policy_add(self, bsid, weight, type, segments):
return self.api(self.papi.sr_mpls_policy_add,
{'bsid': bsid,
'weight': weight,
'is_spray': type,
'n_segments': len(segments),
'segments': segments})
def sr_mpls_policy_del(self, bsid):
return self.api(self.papi.sr_mpls_policy_del,
{'bsid': bsid})
def bier_table_add_del(self,
bti,
mpls_label,
is_add=1):
""" BIER Table add/del """
return self.api(
self.papi.bier_table_add_del,
{'bt_tbl_id': {"bt_set": bti.set_id,
"bt_sub_domain": bti.sub_domain_id,
"bt_hdr_len_id": bti.hdr_len_id},
'bt_label': mpls_label,
'bt_is_add': is_add})
def bier_table_dump(self):
return self.api(self.papi.bier_table_dump, {})
def bier_route_add_del(self,
bti,
bp,
paths,
is_add=1,
is_replace=0):
""" BIER Route add/del """
return self.api(
self.papi.bier_route_add_del,
{
'br_route': {
'br_tbl_id': {"bt_set": bti.set_id,
"bt_sub_domain": bti.sub_domain_id,
"bt_hdr_len_id": bti.hdr_len_id},
'br_bp': bp,
'br_n_paths': len(paths),
'br_paths': paths,
},
'br_is_add': is_add,
'br_is_replace': is_replace
})
def bier_route_dump(self, bti):
return self.api(
self.papi.bier_route_dump,
{'br_tbl_id': {"bt_set": bti.set_id,
"bt_sub_domain": bti.sub_domain_id,
"bt_hdr_len_id": bti.hdr_len_id}})
def bier_imp_add(self,
bti,
src,
ibytes,
is_add=1):
""" BIER Imposition Add """
return self.api(
self.papi.bier_imp_add,
{'bi_tbl_id': {"bt_set": bti.set_id,
"bt_sub_domain": bti.sub_domain_id,
"bt_hdr_len_id": bti.hdr_len_id},
'bi_src': src,
'bi_n_bytes': len(ibytes),
'bi_bytes': ibytes})
def bier_imp_del(self, bi_index):
""" BIER Imposition del """
return self.api(
self.papi.bier_imp_del,
{'bi_index': bi_index})
def bier_imp_dump(self):
return self.api(self.papi.bier_imp_dump, {})
def bier_disp_table_add_del(self,
bdti,
is_add=1):
""" BIER Disposition Table add/del """
return self.api(
self.papi.bier_disp_table_add_del,
{'bdt_tbl_id': bdti,
'bdt_is_add': is_add})
def bier_disp_table_dump(self):
return self.api(self.papi.bier_disp_table_dump, {})
def bier_disp_entry_add_del(self,
bdti,
bp,
payload_proto,
next_hop_afi,
next_hop,
next_hop_tbl_id=0,
next_hop_rpf_id=~0,
next_hop_is_ip4=1,
is_add=1):
""" BIER Route add/del """
lstack = []
while (len(lstack) < 16):
lstack.append({})
return self.api(
self.papi.bier_disp_entry_add_del,
{'bde_tbl_id': bdti,
'bde_bp': bp,
'bde_payload_proto': payload_proto,
'bde_n_paths': 1,
'bde_paths': [{'table_id': next_hop_tbl_id,
'rpf_id': next_hop_rpf_id,
'n_labels': 0,
'label_stack': lstack}],
'bde_is_add': is_add})
def bier_disp_entry_dump(self, bdti):
return self.api(
self.papi.bier_disp_entry_dump,
{'bde_tbl_id': bdti})
def ipsec_spd_add_del(self, spd_id, is_add=1):
""" SPD add/del - Wrapper to add or del ipsec SPD
Sample CLI : 'ipsec spd add 1'
:param spd_id - SPD ID to be created in the vpp . mandatory
:param is_add - create (1) or delete(0) SPD (Default 1 - add) .
optional
:returns: reply from the API
"""
return self.api(
self.papi.ipsec_spd_add_del, {
'spd_id': spd_id, 'is_add': is_add})
def ipsec_spds_dump(self):
return self.api(self.papi.ipsec_spds_dump, {})
def ipsec_interface_add_del_spd(self, spd_id, sw_if_index, is_add=1):
""" IPSEC interface SPD add/del - \
Wrapper to associate/disassociate SPD to interface in VPP
Sample CLI : 'set interface ipsec spd GigabitEthernet0/6/0 1'
:param spd_id - SPD ID to associate with the interface . mandatory
:param sw_if_index - Interface Index which needs to ipsec \
association mandatory
:param is_add - add(1) or del(0) association with interface \
(Default 1 - add) . optional
:returns: reply from the API
"""
return self.api(
self.papi.ipsec_interface_add_del_spd,
{'spd_id': spd_id, 'sw_if_index': sw_if_index, 'is_add': is_add})
def ipsec_spd_interface_dump(self, spd_index=None):
return self.api(self.papi.ipsec_spd_interface_dump,
{'spd_index': spd_index if spd_index else 0,
'spd_index_valid': 1 if spd_index else 0})
def ipsec_spd_entry_add_del(self,
spd_id,
sa_id,
local_address_start,
local_address_stop,
remote_address_start,
remote_address_stop,
local_port_start=0,
local_port_stop=65535,
remote_port_start=0,
remote_port_stop=65535,
protocol=0,
policy=0,
priority=100,
is_outbound=1,
is_add=1,
is_ipv6=0,
is_ip_any=0):
""" IPSEC policy SPD add/del -
Wrapper to configure ipsec SPD policy entries in VPP
:param spd_id: SPD ID for the policy
:param local_address_start: local-ip-range start address
:param local_address_stop: local-ip-range stop address
:param remote_address_start: remote-ip-range start address
:param remote_address_stop: remote-ip-range stop address
:param local_port_start: (Default value = 0)
:param local_port_stop: (Default value = 65535)
:param remote_port_start: (Default value = 0)
:param remote_port_stop: (Default value = 65535)
:param protocol: Any(0), AH(51) & ESP(50) protocol (Default value = 0)
:param sa_id: Security Association ID for mapping it to SPD
:param policy: bypass(0), discard(1), resolve(2) or protect(3) action
(Default value = 0)
:param priority: value for the spd action (Default value = 100)
:param is_outbound: flag for inbound(0) or outbound(1)
(Default value = 1)
:param is_add: (Default value = 1)
"""
return self.api(
self.papi.ipsec_spd_entry_add_del,
{
'is_add': is_add,
'entry':
{
'spd_id': spd_id,
'sa_id': sa_id,
'local_address_start': local_address_start,
'local_address_stop': local_address_stop,
'remote_address_start': remote_address_start,
'remote_address_stop': remote_address_stop,
'local_port_start': local_port_start,
'local_port_stop': local_port_stop,
'remote_port_start': remote_port_start,
'remote_port_stop': remote_port_stop,
'protocol': protocol,
'policy': policy,
'priority': priority,
'is_outbound': is_outbound,
}
})
def ipsec_spd_dump(self, spd_id, sa_id=0xffffffff):
return self.api(self.papi.ipsec_spd_dump,
{'spd_id': spd_id,
'sa_id': sa_id})
def ipsec_tunnel_if_add_del(self, local_ip, remote_ip, local_spi,
remote_spi, crypto_alg, local_crypto_key,
remote_crypto_key, integ_alg, local_integ_key,
remote_integ_key, is_add=1, esn=0, salt=0,
anti_replay=1, renumber=0,
udp_encap=0, show_instance=0xffffffff):
return self.api(
self.papi.ipsec_tunnel_if_add_del,
{
'local_ip': local_ip,
'remote_ip': remote_ip,
'local_spi': local_spi,
'remote_spi': remote_spi,
'crypto_alg': crypto_alg,
'local_crypto_key_len': len(local_crypto_key),
'local_crypto_key': local_crypto_key,
'remote_crypto_key_len': len(remote_crypto_key),
'remote_crypto_key': remote_crypto_key,
'integ_alg': integ_alg,
'local_integ_key_len': len(local_integ_key),
'local_integ_key': local_integ_key,
'remote_integ_key_len': len(remote_integ_key),
'remote_integ_key': remote_integ_key,
'is_add': is_add,
'esn': esn,
'anti_replay': anti_replay,
'renumber': renumber,
'show_instance': show_instance,
'udp_encap': udp_encap,
'salt': salt
})
def ipsec_select_backend(self, protocol, index):
return self.api(self.papi.ipsec_select_backend,
{'protocol': protocol, 'index': index})
def ipsec_backend_dump(self):
return self.api(self.papi.ipsec_backend_dump, {})
def punt_socket_register(self, reg, pathname,
header_version=1):
""" Register punt socket """
return self.api(self.papi.punt_socket_register,
{'header_version': header_version,
'punt': reg,
'pathname': pathname})
def punt_socket_deregister(self, reg):
""" Unregister punt socket """
return self.api(self.papi.punt_socket_deregister,
{'punt': reg})
def gbp_endpoint_dump(self):
""" GBP endpoint Dump """
return self.api(self.papi.gbp_endpoint_dump, {})
def gbp_recirc_dump(self):
""" GBP recirc Dump """
return self.api(self.papi.gbp_recirc_dump, {})
def gbp_ext_itf_dump(self):
""" GBP recirc Dump """
return self.api(self.papi.gbp_ext_itf_dump, {})
def gbp_subnet_dump(self):
""" GBP Subnet Dump """
return self.api(self.papi.gbp_subnet_dump, {})
def gbp_contract_dump(self):
""" GBP contract Dump """
return self.api(self.papi.gbp_contract_dump, {})
def gbp_vxlan_tunnel_dump(self):
""" GBP VXLAN tunnel add/del """
return self.api(self.papi.gbp_vxlan_tunnel_dump, {})
def igmp_enable_disable(self, sw_if_index, enable, host):
""" Enable/disable IGMP on a given interface """
return self.api(self.papi.igmp_enable_disable,
{'enable': enable,
'mode': host,
'sw_if_index': sw_if_index})
def igmp_proxy_device_add_del(self, vrf_id, sw_if_index, add):
""" Add/del IGMP proxy device """
return self.api(self.papi.igmp_proxy_device_add_del,
{'vrf_id': vrf_id, 'sw_if_index': sw_if_index,
'add': add})
def igmp_proxy_device_add_del_interface(self, vrf_id, sw_if_index, add):
""" Add/del interface to/from IGMP proxy device """
return self.api(self.papi.igmp_proxy_device_add_del_interface,
{'vrf_id': vrf_id, 'sw_if_index': sw_if_index,
'add': add})
def igmp_listen(self, filter, sw_if_index, saddrs, gaddr):
""" Listen for new (S,G) on specified interface
:param enable: add/delas
:param sw_if_index: interface sw index
:param saddr: source ip4 addr
:param gaddr: group ip4 addr
"""
return self.api(self.papi.igmp_listen,
{
'group':
{
'filter': filter,
'sw_if_index': sw_if_index,
'n_srcs': len(saddrs),
'saddrs': saddrs,
'gaddr': gaddr
}
})
def igmp_clear_interface(self, sw_if_index):
""" Remove all (S,G)s from specified interface
doesn't send IGMP report!
"""
return self.api(
self.papi.igmp_clear_interface, {
'sw_if_index': sw_if_index})
def want_igmp_events(self, enable=1):
return self.api(self.papi.want_igmp_events, {'enable': enable,
'pid': os.getpid()})
| 36.826422
| 79
| 0.513394
|
83268a3562d671fa42d9905ea25c04cab6227db2
| 16,779
|
py
|
Python
|
myven/lib/python3.8/site-packages/ansible/modules/cloud/centurylink/clc_group.py
|
baltham/dne-dna-code
|
4a13309a790a670d2f07e635c9264a0c29976c6a
|
[
"MIT"
] | 1
|
2021-04-02T08:08:39.000Z
|
2021-04-02T08:08:39.000Z
|
myven/lib/python3.8/site-packages/ansible/modules/cloud/centurylink/clc_group.py
|
baltham/dne-dna-code
|
4a13309a790a670d2f07e635c9264a0c29976c6a
|
[
"MIT"
] | null | null | null |
myven/lib/python3.8/site-packages/ansible/modules/cloud/centurylink/clc_group.py
|
baltham/dne-dna-code
|
4a13309a790a670d2f07e635c9264a0c29976c6a
|
[
"MIT"
] | 1
|
2020-05-03T01:13:16.000Z
|
2020-05-03T01:13:16.000Z
|
#!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_group
short_description: Create/delete Server Groups at Centurylink Cloud
description:
- Create or delete Server Groups at Centurylink Centurylink Cloud
version_added: "2.0"
options:
name:
description:
- The name of the Server Group
required: True
description:
description:
- A description of the Server Group
required: False
parent:
description:
- The parent group of the server group. If parent is not provided, it creates the group at top level.
required: False
location:
description:
- Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
associated with the account
required: False
state:
description:
- Whether to create or delete the group
default: present
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the tasks to finish before returning.
choices: [ True, False ]
default: True
required: False
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Create a Server Group
---
- name: Create Server Group
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create / Verify a Server Group at CenturyLink Cloud
clc_group:
name: My Cool Server Group
parent: Default Group
state: present
register: clc
- name: debug
debug:
var: clc
# Delete a Server Group
---
- name: Delete Server Group
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete / Verify Absent a Server Group at CenturyLink Cloud
clc_group:
name: My Cool Server Group
parent: Default Group
state: absent
register: clc
- name: debug
debug:
var: clc
'''
RETURN = '''
group:
description: The group information
returned: success
type: dict
sample:
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":"2015-07-29T18:52:47Z",
"modifiedBy":"service.wfad",
"modifiedDate":"2015-07-29T18:52:47Z"
},
"customFields":[
],
"description":"test group",
"groups":[
],
"id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
"links":[
{
"href":"/v2/groups/wfad",
"rel":"createGroup",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad",
"rel":"createServer",
"verbs":[
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"parentGroup"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
"rel":"defaults",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
"rel":"billing"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
"rel":"archiveGroupAction"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
"rel":"statistics"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
"rel":"horizontalAutoscalePolicyMapping",
"verbs":[
"GET",
"PUT",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
}
],
"locationId":"UC1",
"name":"test group",
"status":"active",
"type":"default"
}
'''
__version__ = '${version}'
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcGroup(object):
clc = None
root_group = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.group_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
location = self.module.params.get('location')
group_name = self.module.params.get('name')
parent_name = self.module.params.get('parent')
group_description = self.module.params.get('description')
state = self.module.params.get('state')
self._set_clc_credentials_from_env()
self.group_dict = self._get_group_tree_for_datacenter(
datacenter=location)
if state == "absent":
changed, group, requests = self._ensure_group_is_absent(
group_name=group_name, parent_name=parent_name)
if requests:
self._wait_for_requests_to_complete(requests)
else:
changed, group = self._ensure_group_is_present(
group_name=group_name, parent_name=parent_name, group_description=group_description)
try:
group = group.data
except AttributeError:
group = group_name
self.module.exit_json(changed=changed, group=group)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
description=dict(default=None),
parent=dict(default=None),
location=dict(default=None),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=True))
return argument_spec
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_group_is_absent(self, group_name, parent_name):
"""
Ensure that group_name is absent by deleting it if necessary
:param group_name: string - the name of the clc server group to delete
:param parent_name: string - the name of the parent group for group_name
:return: changed, group
"""
changed = False
group = []
results = []
if self._group_exists(group_name=group_name, parent_name=parent_name):
if not self.module.check_mode:
group.append(group_name)
result = self._delete_group(group_name)
results.append(result)
changed = True
return changed, group, results
def _delete_group(self, group_name):
"""
Delete the provided server group
:param group_name: string - the server group to delete
:return: none
"""
response = None
group, parent = self.group_dict.get(group_name)
try:
response = group.Delete()
except CLCException as ex:
self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
group_name, ex.response_text
))
return response
def _ensure_group_is_present(
self,
group_name,
parent_name,
group_description):
"""
Checks to see if a server group exists, creates it if it doesn't.
:param group_name: the name of the group to validate/create
:param parent_name: the name of the parent group for group_name
:param group_description: a short description of the server group (used when creating)
:return: (changed, group) -
changed: Boolean- whether a change was made,
group: A clc group object for the group
"""
if not self.root_group:
raise AssertionError("Implementation Error: Root Group not set")
parent = parent_name if parent_name is not None else self.root_group.name
description = group_description
changed = False
group = group_name
parent_exists = self._group_exists(group_name=parent, parent_name=None)
child_exists = self._group_exists(
group_name=group_name,
parent_name=parent)
if parent_exists and child_exists:
group, parent = self.group_dict[group_name]
changed = False
elif parent_exists and not child_exists:
if not self.module.check_mode:
group = self._create_group(
group=group,
parent=parent,
description=description)
changed = True
else:
self.module.fail_json(
msg="parent group: " +
parent +
" does not exist")
return changed, group
def _create_group(self, group, parent, description):
"""
Create the provided server group
:param group: clc_sdk.Group - the group to create
:param parent: clc_sdk.Parent - the parent group for {group}
:param description: string - a text description of the group
:return: clc_sdk.Group - the created group
"""
response = None
(parent, grandparent) = self.group_dict[parent]
try:
response = parent.Create(name=group, description=description)
except CLCException as ex:
self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
group, ex.response_text))
return response
def _group_exists(self, group_name, parent_name):
"""
Check to see if a group exists
:param group_name: string - the group to check
:param parent_name: string - the parent of group_name
:return: boolean - whether the group exists
"""
result = False
if group_name in self.group_dict:
(group, parent) = self.group_dict[group_name]
if parent_name is None or parent_name == parent.name:
result = True
return result
def _get_group_tree_for_datacenter(self, datacenter=None):
"""
Walk the tree of groups for a datacenter
:param datacenter: string - the datacenter to walk (ex: 'UC1')
:return: a dictionary of groups and parents
"""
self.root_group = self.clc.v2.Datacenter(
location=datacenter).RootGroup()
return self._walk_groups_recursive(
parent_group=None,
child_group=self.root_group)
def _walk_groups_recursive(self, parent_group, child_group):
"""
Walk a parent-child tree of groups, starting with the provided child group
:param parent_group: clc_sdk.Group - the parent group to start the walk
:param child_group: clc_sdk.Group - the child group to start the walk
:return: a dictionary of groups and parents
"""
result = {str(child_group): (child_group, parent_group)}
groups = child_group.Subgroups().groups
if len(groups) > 0:
for group in groups:
if group.type != 'default':
continue
result.update(self._walk_groups_recursive(child_group, group))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process group request')
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcGroup._define_module_argument_spec(),
supports_check_mode=True)
clc_group = ClcGroup(module)
clc_group.process_request()
if __name__ == '__main__':
main()
| 32.707602
| 120
| 0.581799
|
2f8426b3618682e3773a391edbc46b450b5d5969
| 10,295
|
py
|
Python
|
pytest_subprocessor/__init__.py
|
AnyVisionltd/automation-infra
|
e94c10224b0711160c9fc361045b8f2cfc9c4ca8
|
[
"MIT"
] | 6
|
2021-03-10T14:02:42.000Z
|
2021-12-08T20:17:21.000Z
|
pytest_subprocessor/__init__.py
|
solganik/automation-infra
|
66379f7366eaa52f412a9150a018ea17ddcdf59b
|
[
"MIT"
] | 5
|
2021-05-10T18:00:07.000Z
|
2022-03-12T00:36:54.000Z
|
pytest_subprocessor/__init__.py
|
solganik/automation-infra
|
66379f7366eaa52f412a9150a018ea17ddcdf59b
|
[
"MIT"
] | 5
|
2021-03-10T14:02:11.000Z
|
2021-07-16T20:58:13.000Z
|
import copy
import queue
import threading
from plumbum import local
import sys
import uuid
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
import json
import logging
import os
import subprocess
from concurrent import futures
from json import JSONDecodeError
import pytest
from _pytest.outcomes import Exit
from _pytest.runner import CallInfo
from infra.utils.plugin_logging import InfraFormatter
from .worker import Worker
SESSION_ID_ENV_VAR = "HABERTEST_SESSION_ID"
ITEM_ID_ENV_VAR = "HABERTEST_ITEM_ID"
SERIALIZED_REPORT_LOCATION = '/tmp/habertest_infra_reports'
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_parse(pluginmanager, args):
now = datetime.now().strftime("%Y_%m_%d__%H%M_%S")
if not any(['--logs-dir' in arg for arg in args]):
args.append(f'--logs-dir=logs/{now}')
if not any(['--html' in arg for arg in args]):
args.extend([f'--html=logs/{now}/report.html', '--self-contained-html'])
def pytest_addoption(parser):
group = parser.getgroup("pytest_subprocessor")
group.addoption("--num-parallel", type=int, default=1,
help="number of resourcess to provision and run tests against in parallel")
group.addoption("--logs-dir", action="store", default=f'logs/{datetime.now().strftime("%Y_%m_%d__%H%M_%S")}', help="custom directory to store logs in")
group.addoption("--sf", dest="secondary_flags", action="append", default=[],
help='flags to pass to the secondary pytest call (after provisioning).'
'Can be passed individually like --sf=-flag1 --sf=--flag2 or with escaped " marks like '
'--sf=\\"--flag1 value1 --flag2\\"')
def pytest_addhooks(pluginmanager):
from pytest_subprocessor import hooks
pluginmanager.add_hookspecs(hooks)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.option.assertmode = 'rewrite'
configure_logging(config)
def configure_logging(config):
session_logs_dir = config.getoption("--logs-dir")
os.makedirs(session_logs_dir, exist_ok=True)
config.option.logger_logsdir = session_logs_dir
main_process_logs_dir = f'{config.option.logger_logsdir}/infra'
os.makedirs(main_process_logs_dir, exist_ok=True)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(InfraFormatter())
root_logger.addHandler(console_handler)
debug_file_handler = logging.FileHandler(f'{main_process_logs_dir}/debug.log', mode='w')
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(InfraFormatter())
root_logger.addHandler(debug_file_handler)
info_file_handler = logging.FileHandler(f'{main_process_logs_dir}/info.log', mode='w')
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(InfraFormatter())
root_logger.addHandler(info_file_handler)
def pytest_sessionstart(session):
session.tests_queue = queue.Queue()
def pytest_exception_interact(node, call, report):
logging.error(call)
os._exit(666)
@pytest.hookimpl(tryfirst=True)
def pytest_runtestloop(session):
"""
This is the (interesting part of the) pytest implementation of runtest_loop:
├── pytest_runtestloop
│ └── pytest_runtest_protocol
│ ├── pytest_runtest_logstart
│ ├── pytest_runtest_setup
│ │ └── pytest_fixture_setup
│ ├── pytest_runtest_makereport
│ ├── pytest_runtest_logreport
│ │ └── pytest_report_teststatus
│ ├── pytest_runtest_call
│ │ └── pytest_pyfunc_call
│ ├── pytest_runtest_teardown
│ │ └── pytest_fixture_post_finalizer
│ └── pytest_runtest_logfinish
In this plugin, we implement our own version of runtest_protocol which runs before pytest default implementation
(tryfirst=True), which builds and calls pytest commands of gathered tests on a subprocess.
After the subprocess finishes, the pytest default implementation hook is called, which triggeres runtest_protocol
hook, which we implement further down in this module.
"""
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Interrupted("%d errors during collection" % session.testsfailed)
if session.config.option.collectonly:
return True
workers = list()
for i in range(session.config.option.num_parallel):
worker = Worker(session)
workers.append(worker)
session.config.hook.pytest_build_items_iter(session=session, workers=workers)
for worker in workers:
logging.debug(f"starting worker {worker.id}")
worker.start()
for fut in futures.as_completed([worker.completion for worker in workers]):
logging.debug("waiting for futures to complete")
try:
fut.result()
for i, item in enumerate(fut.worker.handled_items):
nextitem = fut.worker.handled_items[i + 1] if i + 1 < len(fut.worker.handled_items) else None
session.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
except:
raise
if session.shouldfail:
raise session.Failed(session.shouldfail)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
# TODO: add another pytest command which says only run post-mortem hoooks
# At this point the tests have run and wrote their serialized reports to disk on /tmp/habertest...
# using pytest_report_from_serializable
return True
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_protocol(item, nextitem):
"""
This hook is a firstResult hook, which means it stops calling other hooks when 1 hook implementation returns a
value. We return True at the end of this implementation, and we run first (tryfirst=True), therefore pytests
implementation of this hook WILL NOT BE TRIGGERED.
In pytests implementation, the hook calls the 2 log hooks (logstart and logfinish) which we call here as well,
but in the middle it (basically) calls call_and_report for each item.
We have already called the test items our runtest_loop we implemented up above in this module, so the items already
ran and all we need to do is collect the serialized reports.
REMINDER: the tests have already run on a subprocess in the runtest_loop.
"""
item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
# here all we really need to do is collect the reports already written to disk by the child
# which ran the actual tests..
run_fictitious_testprotocol(item)
item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
return True
def run_fictitious_testprotocol(item):
"""
REMINDER: Tests have already run on subprocess. Here we just need to convince the current pytest process that
the tests have already run and to collect their reports.
"""
call = CallInfo.from_call(
lambda: True, when="setup", reraise=(Exit,)
)
item.ihook.pytest_runtest_makereport(item=item, call=call)
call = CallInfo.from_call(
lambda: True, when="call", reraise=(Exit,)
)
item.ihook.pytest_runtest_makereport(item=item, call=call)
call = CallInfo.from_call(
lambda: True, when="teardown", reraise=(Exit,)
)
item.ihook.pytest_runtest_makereport(item=item, call=call)
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_makereport(item, call):
"""
REMINDER: We already invoked the tests on a subprocess. The tests after they ran serialized the reports onto
the disk, so here we just need to deserialize the existing reports, and attach them to the existing "test run".
"""
report = report_from_disk(item, call)
if not report:
return None
# After getting the report, we need to log it and report the teststatus so that the current pytest_session
# will believe acknowledge that the tests ran.
# We need to call these hooks because they are called by pytests implementation of runtest_protocol which we
# overode, such that if we dont call the hooks ourselves no one will.
item.ihook.pytest_runtest_logreport(report=report)
item.ihook.pytest_report_teststatus(report=report, config=item.config)
return report
def report_from_disk(item, call):
"""The tests have been run via pytest subprocess, which writes serialized reports to the disk.
All thats left to do is read from the disk and deserialize :) """
report_file = serialized_path(item, call) # f'{SERIALIZED_REPORT_LOCATION}/{item.nodeid.replace("/", "-").replace(":", "..")}.{call.when}.report'
if not os.path.exists(report_file):
# This probably means the subprocess test run froze/timed_out/got fucked somehow:
logging.error(f"report: {report_file} doesnt exists")
item.teardown()
return
with open(report_file, 'r') as f:
report_ser = json.load(f)
report = item.config.hook.pytest_report_from_serializable(data=report_ser, config=item.config)
os.remove(report_file)
return report
def serialized_path(item, call):
item_id = getattr(item, 'id', None) or item.config.option.item_id
return f'{SERIALIZED_REPORT_LOCATION}/{item_id}.{call.when}.report'
@pytest.hookimpl(trylast=True)
def pytest_build_items_iter(session, workers):
logging.debug("subprocessor building items_iter. This is the trivial case and shouldn't usually happen..")
session.tests_queue.queue = queue.deque(session.items)
@pytest.hookimpl(trylast=True)
def pytest_get_next_item(session, worker):
logging.debug("trying to get_next_item via trivial implementation")
try:
item = session.tests_queue.get(block=False)
logging.debug(f"trivial implementation returning item {os.path.split(item.nodeid)[1]}.. "
f"This shouldn't usually happen")
return item
except queue.Empty:
return None
@pytest.hookimpl(tryfirst=True)
def pytest_keyboard_interrupt(excinfo):
logging.info("exiting from ctrl+c")
os._exit(9)
| 39.293893
| 155
| 0.71763
|
deeaa5004302a879ac7a5a538c4202e680342ce4
| 6,735
|
py
|
Python
|
models/convolution_lstm.py
|
yaorong0921/Driver-Intention-Prediction
|
baeb3e15dc75f113cbf03d58cb0dc66bd6cc8b39
|
[
"MIT"
] | 10
|
2020-08-19T11:24:48.000Z
|
2022-03-06T11:24:41.000Z
|
models/convolution_lstm.py
|
Harlan728/Driver-Intention-Prediction
|
ec1d435895ab234b2fff5aef7668577773122354
|
[
"MIT"
] | 3
|
2021-03-16T03:15:39.000Z
|
2022-02-23T22:22:00.000Z
|
models/convolution_lstm.py
|
Harlan728/Driver-Intention-Prediction
|
ec1d435895ab234b2fff5aef7668577773122354
|
[
"MIT"
] | 3
|
2021-03-07T20:46:23.000Z
|
2022-03-28T14:10:48.000Z
|
import torch
import torch.nn as nn
from torch.autograd import Variable
class ConvLSTMCell(nn.Module):
def __init__(self, input_channels, hidden_channels, kernel_size):
super(ConvLSTMCell, self).__init__()
assert hidden_channels % 2 == 0
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_features = 4
self.padding = int((kernel_size - 1) / 2)
self.Wxi = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Whi = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wxf = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Whf = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wxc = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Whc = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wxo = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Who = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wci = None
self.Wcf = None
self.Wco = None
def forward(self, x, h, c):
ci = torch.sigmoid(self.Wxi(x) + self.Whi(h) + c * self.Wci)
cf = torch.sigmoid(self.Wxf(x) + self.Whf(h) + c * self.Wcf)
cc = cf * c + ci * torch.tanh(self.Wxc(x) + self.Whc(h))
co = torch.sigmoid(self.Wxo(x) + self.Who(h) + cc * self.Wco)
ch = co * torch.tanh(cc)
return ch, cc
def init_hidden(self, batch_size, hidden, shape):
if self.Wci is None:
self.Wci = Variable(torch.zeros(1, hidden, shape[0], shape[1])).cuda()
self.Wcf = Variable(torch.zeros(1, hidden, shape[0], shape[1])).cuda()
self.Wco = Variable(torch.zeros(1, hidden, shape[0], shape[1])).cuda()
else:
assert shape[0] == self.Wci.size()[2], 'Input Height Mismatched!'
assert shape[1] == self.Wci.size()[3], 'Input Width Mismatched!'
return (Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])).cuda(),
Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])).cuda())
class ConvLSTM(nn.Module):
# input_channels corresponds to the first input feature map
# hidden state is a list of succeeding lstm layers.
def __init__(self, input_channels, hidden_channels, kernel_size, step=1, effective_step=[1]):
super(ConvLSTM, self).__init__()
self.input_channels = [input_channels] + hidden_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_layers = len(hidden_channels)
self.step = step
self.effective_step = effective_step
self._all_layers = []
for i in range(self.num_layers):
name = 'cell{}'.format(i)
cell = ConvLSTMCell(self.input_channels[i], self.hidden_channels[i], self.kernel_size)
setattr(self, name, cell)
self._all_layers.append(cell)
def forward(self, input):
internal_state = []
outputs = []
for step in range(self.step):
x = input[step]
for i in range(self.num_layers):
# all cells are initialized in the first step
name = 'cell{}'.format(i)
if step == 0:
bsize, _, height, width = x.size()
(h, c) = getattr(self, name).init_hidden(batch_size=bsize, hidden=self.hidden_channels[i],
shape=(height, width))
internal_state.append((h, c))
# do forward
(h, c) = internal_state[i]
x, new_c = getattr(self, name)(x, h, c)
internal_state[i] = (x, new_c)
# only record effective steps
if step in self.effective_step:
outputs.append(x)
return outputs, (x, new_c)
class Decoder(nn.Module):
def __init__(self, num_step, num_channel):
super(Decoder, self).__init__()
self._all_layers = []
self.num_step = num_step
self.num_channel = num_channel
for i in range(self.num_step):
name = 'conv{}'.format(i)
conv = nn.Conv2d(self.num_channel, 3, 1, stride=1, padding=0)
setattr(self, name, conv)
self._all_layers.append(conv)
def forward(self, input):
output = []
for i in range(self.num_step):
name = 'conv{}'.format(i)
y = getattr(self, name)(input[i])
output.append(y)
return output
class Encoder(nn.Module):
def __init__(self, hidden_channels, sample_size, sample_duration):
super(Encoder, self).__init__()
self.convlstm = ConvLSTM(input_channels=3, hidden_channels=hidden_channels, kernel_size=3, step=sample_duration,
effective_step=[sample_duration-1])
################## W/o output decoder
self.conv2 = nn.Conv2d(32, 3, 1, stride=1, padding=0)
################## With output decoder
# self.decoder = Decoder(sample_duration, 32)
def forward(self, x):
b,t,c,h,w = x.size()
x = x.permute(1,0,2,3,4)
output_convlstm, _ = self.convlstm(x)
# x = self.decoder(output_convlstm)
x = self.conv2(output_convlstm[0])
return x
def test():
#if __name__ == '__main__':
# gradient check
convlstm = ConvLSTM(input_channels=48, hidden_channels=[128, 64, 64, 32, 32], kernel_size=3, step=5,
effective_step=[2,4]).cuda()
loss_fn = torch.nn.MSELoss()
input = Variable(torch.randn(1, 48, 64, 64)).cuda()
target = Variable(torch.randn(1, 32, 64, 64)).double().cuda()
output = convlstm(input)
output = output[0][0].double()
res = torch.autograd.gradcheck(loss_fn, (output, target), eps=1e-6, raise_exception=True)
print(res)
def test_convlstm():
"""Constructs a convlstm model.
"""
model = encoder(hidden_channels=[128, 64, 64, 32], sample_size=[112,112], sample_duration=4).cuda()
input = Variable(torch.randn(20, 3, 4, 112, 112)).cuda()
output = model(input)
print(output.size())
def encoder(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = Encoder(**kwargs)
return model
#if __name__ == '__main__':
# test_convlstm()
| 40.329341
| 120
| 0.610245
|
b04bfb7d75c07ec32bfa969a3d0c5ca5465509f6
| 2,741
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/tests/unit/mock/procenv.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/tests/unit/mock/procenv.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/tests/unit/mock/procenv.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
# (c) 2016, Matt Davis <mdavis@ansible.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import json
from contextlib import contextmanager
from io import BytesIO, StringIO
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_bytes
@contextmanager
def swap_stdin_and_argv(stdin_data='', argv_data=tuple()):
"""
context manager that temporarily masks the test runner's values for stdin and argv
"""
real_stdin = sys.stdin
real_argv = sys.argv
if PY3:
fake_stream = StringIO(stdin_data)
fake_stream.buffer = BytesIO(to_bytes(stdin_data))
else:
fake_stream = BytesIO(to_bytes(stdin_data))
try:
sys.stdin = fake_stream
sys.argv = argv_data
yield
finally:
sys.stdin = real_stdin
sys.argv = real_argv
@contextmanager
def swap_stdout():
"""
context manager that temporarily replaces stdout for tests that need to verify output
"""
old_stdout = sys.stdout
if PY3:
fake_stream = StringIO()
else:
fake_stream = BytesIO()
try:
sys.stdout = fake_stream
yield fake_stream
finally:
sys.stdout = old_stdout
class ModuleTestCase(unittest.TestCase):
def setUp(self, module_args=None):
if module_args is None:
module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False}
args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap.__enter__()
def tearDown(self):
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap.__exit__(None, None, None)
| 30.120879
| 105
| 0.711054
|
32fa649c1f69c9f03c3aac81b7234c6d26939624
| 9,426
|
py
|
Python
|
ansible_navigator/ui_framework/colorize.py
|
NilashishC/ansible-navigator
|
8f6e546ea29308786b89248ac778b7af15f50c4b
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ansible_navigator/ui_framework/colorize.py
|
NilashishC/ansible-navigator
|
8f6e546ea29308786b89248ac778b7af15f50c4b
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ansible_navigator/ui_framework/colorize.py
|
NilashishC/ansible-navigator
|
8f6e546ea29308786b89248ac778b7af15f50c4b
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
""" Tokenize and color text
"""
import json
import logging
import os
import re
import colorsys
import curses
import functools
from itertools import chain
from ..tm_tokenize.grammars import Grammars
from ..tm_tokenize.tokenize import tokenize
from .curses_defs import CursesLine
from .curses_defs import CursesLinePart
CURSES_STYLES = {
0: None,
1: getattr(curses, "A_BOLD", None),
2: getattr(curses, "A_DIM", None),
3: getattr(curses, "A_ITALIC", None),
4: getattr(curses, "A_UNDERLINE", None),
5: getattr(curses, "A_BLINK", None),
6: getattr(curses, "A_BLINK", None),
7: getattr(curses, "A_REVERSE", None),
8: getattr(curses, "A_INVIS", None),
}
THEME = "dark_vs.json"
class ColorSchema:
"""Simple holer for the schema (theme)"""
# pylint: disable=too-few-public-methods
def __init__(self, schema):
"""start
:param schema: The color scheme, theme to use
:type schema: dict
"""
self._schema = schema
@functools.lru_cache(maxsize=None)
def get_color(self, scope):
"""Get a color from the schema, from most specific to least
:param scope: The scope, aka format
:type scope: str
:return: the color in rgb format or None
:rtype: tuple or None
"""
for name in reversed(scope):
for parts in range(0, len(name.split("."))):
prop = name.split()[-1].rsplit(".", parts)[0]
color = next(
(tc for tc in self._schema["tokenColors"] if prop in to_list(tc["scope"])), None
)
if color:
foreground = color.get("settings", {}).get("foreground", None)
return hex_to_rgb(foreground)
return None
class Colorize:
"""Functionality for coloring"""
# pylint: disable=too-few-public-methods
def __init__(self, share_dir):
self._logger = logging.getLogger(__name__)
self._schema = None
self._theme_dir = os.path.join(share_dir, "themes")
self._grammar_dir = os.path.join(share_dir, "grammar")
self._grammars = Grammars(self._grammar_dir)
self._load()
def _load(self):
with open(os.path.join(self._theme_dir, THEME)) as data_file:
self._schema = ColorSchema(json.load(data_file))
@functools.lru_cache(maxsize=100)
def render(self, doc, scope):
"""render some text into columns and colors
:param doc: The thing to tokenize and color
:type doc: str
:param scope: The scope, aka the format of the string
:type scope: str
:return: A list of lines, each a list of dicts
:rtype: list
"""
if scope == "source.ansi":
return [ansi_to_curses(l) for l in doc.splitlines()]
try:
compiler = self._grammars.compiler_for_scope(scope)
except KeyError:
compiler = None
if compiler:
state = compiler.root_state
lines = []
for line_idx, line in enumerate(doc.splitlines()):
first_line = line_idx == 0
state, regions = tokenize(compiler, state, line, first_line)
lines.append((regions, line))
return columns_and_colors(lines, self._schema)
res = [[{"column": 0, "chars": l, "color": None}] for l in doc.splitlines()]
return res
def to_list(thing):
"""convert something to a list if necessary
:param thing: Maybe a list?
:type thing: str or list
:return: listified thing
:rtype: list
"""
if not isinstance(thing, list):
return [thing]
return thing
def hex_to_rgb(value):
"""Convert a hex value to RGB
:param value: the hex color
:type value: string
:returns: rgb tuple
:rtype: tuple
"""
if value:
value = value.lstrip("#")
lenv = len(value)
return tuple(int(value[i : i + lenv // 3], 16) for i in range(0, lenv, lenv // 3))
return None
def hex_to_rgb_curses(value):
"""Convert a hex color to RGB scaled to 1000
b/c that's what curses needs
:param value: a rgb color
:type value: tuple
:return: The colors scaled to 1000
:rtype: tuple
"""
scale = lambda x: int(x * 1000 / 255)
red, green, blue = hex_to_rgb(value)
return (scale(red), scale(green), scale(blue))
def rgb_to_ansi(red: int, green: int, blue: int, colors: int) -> int:
"""Convert an RGB color to an terminal color
:param red: the red component
:type red: int
:param green: the green component
:type green: int
:param blue: the blue component
:type blue: int
:param colors: The number of color supported by the termina
:type colors: int
"""
# https://github.com/Qix-/color-convert/blob/master/conversions.js
if colors == 256:
if red == green and green == blue:
if red < 8:
ansi = 16
if red > 248:
ansi = 231
ansi = round(((red - 8) / 247) * 24) + 232
else:
ansi = (
16
+ (36 * round(red / 255 * 5))
+ (6 * round(green / 255 * 5))
+ round(blue / 255 * 5)
)
elif colors == 16:
value = colorsys.rgb_to_hsv(red, green, blue)[2]
value = round(value / 50)
if value == 0:
ansi = 30
else:
ansi = (round(blue / 255) << 2) | (round(green / 255) << 1) | round(red / 255)
if value == 2:
ansi += 8
else: # colors == 8, sorry
ansi = (round(blue / 255) << 2) | (round(green / 255) << 1) | round(red / 255)
return ansi
def columns_and_colors(lines, schema):
"""Convert to colors and columns
:param lines: A list of regiosn (line parts) and the line
:type lines: list of lines, each a ([regions], line)
:param scheam: An instance of the ColorSchema
:type schema: ColorSchema
"""
result = []
for line in lines:
column = 0
char_dicts = [{"chars": c, "color": None} for c in line[1]]
for region in line[0]:
color = schema.get_color(region.scope)
if color:
for idx in range(region.start, region.end):
char_dicts[idx]["color"] = color
if char_dicts:
grouped = [char_dicts.pop(0)]
while char_dicts:
entry = char_dicts.pop(0)
if entry["color"] == grouped[-1]["color"]:
grouped[-1]["chars"] += entry["chars"]
else:
grouped.append(entry)
result.append(grouped)
else:
result.append([{"chars": line[1], "color": None}])
for line in result:
column = 0
for chunk in line:
chunk["column"] = column
column += len(chunk["chars"])
return result
def ansi_to_curses(line: str) -> CursesLine:
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
"""Convert ansible color codes to curses colors
:param line: A string with ansi colors
:type line: string
:return: A list of str tuples [(x, s, c), (x, s, c)...]
:rtype: list
"""
printable = []
ansi_regex = re.compile(r"(\x1b\[[\d;]*m)")
color_regex = re.compile(
r"""(?x)
\x1b\[ # Control Sequence Introducer
(?P<fg_action>(38;5|39);)? # optional FG color action
(?P<_bg_action>(48;5|49);)? # optional BG color action
(?P<one>\d+) # required, one number
(;(?P<two>\d+))? # optional 2nd number
m
"""
)
parts = ansi_regex.split(line)
colno = 0
color = 0
style = 0
while parts:
part = parts.pop(0)
if part:
match = color_regex.match(part)
if match:
cap = match.groupdict()
one = cap["one"]
two = cap["two"]
if cap["fg_action"] == "39;":
pass # default color
elif one == "0" and two is None:
pass # default color
elif cap["fg_action"] == "38;5;":
color = curses.color_pair(int(one) % curses.COLORS)
if two:
style = CURSES_STYLES.get(int(two), None) or 0
elif not cap["fg_action"]:
ansi_16 = list(chain(range(30, 38), range(90, 98)))
if two is None:
color = ansi_16.index(int(one)) if int(one) in ansi_16 else int(one)
color = curses.color_pair(color % curses.COLORS)
else:
color = ansi_16.index(int(two)) if int(two) in ansi_16 else int(two)
color = curses.color_pair(color % curses.COLORS)
style = CURSES_STYLES.get(int(one), None) or 0
else:
curses_line = CursesLinePart(
column=colno, string=part, color=color, decoration=style
)
printable.append(curses_line)
colno += len(part)
color = 0
style = 0
return tuple(printable)
| 31.525084
| 100
| 0.53745
|
b6390ba1ac14add6f86e4c46067c63e9c74a075a
| 493
|
py
|
Python
|
utils/csv_reader.py
|
dsc-jnec/mailmerge-pyth
|
4fa6f896008cfa28ef0e29b0b9b4e8481b5841e9
|
[
"MIT"
] | 1
|
2020-12-09T10:23:24.000Z
|
2020-12-09T10:23:24.000Z
|
utils/csv_reader.py
|
dsc-jnec/mailmerge-pyth
|
4fa6f896008cfa28ef0e29b0b9b4e8481b5841e9
|
[
"MIT"
] | 3
|
2020-11-19T06:57:23.000Z
|
2020-11-21T13:41:31.000Z
|
utils/csv_reader.py
|
dsc-jnec/mailmerge-pyth
|
4fa6f896008cfa28ef0e29b0b9b4e8481b5841e9
|
[
"MIT"
] | null | null | null |
import csv
res={}
def csvread(filename):
with open(filename, 'r') as file:
csv_file = csv.reader(file)
f=0
for row in csv_file:
if(f == 0):
for i in row:
res[i]=[]
f+=1
else:
j = 0
for key, value in res.items():
value.append(row[j])
j+=1
return res
res = csvread("sample.csv")
for i in res:
print(i,res[i])
| 21.434783
| 46
| 0.409736
|
cc0398182a0e1c70be76cab4f3d7dc82367fd04e
| 3,401
|
py
|
Python
|
python/tvm/rpc/tornado_util.py
|
mostafaelhoushi/tvm
|
ae21eddf5f13ffa82d514e8311c87f38bcac559a
|
[
"Apache-2.0"
] | 1
|
2021-03-07T15:30:16.000Z
|
2021-03-07T15:30:16.000Z
|
python/tvm/rpc/tornado_util.py
|
mostafaelhoushi/tvm
|
ae21eddf5f13ffa82d514e8311c87f38bcac559a
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/rpc/tornado_util.py
|
mostafaelhoushi/tvm
|
ae21eddf5f13ffa82d514e8311c87f38bcac559a
|
[
"Apache-2.0"
] | 1
|
2020-02-09T10:42:31.000Z
|
2020-02-09T10:42:31.000Z
|
"""Utilities used in tornado."""
import socket
import errno
from tornado import ioloop
class TCPHandler(object):
"""TCP socket handler backed tornado event loop.
Parameters
----------
sock : Socket
The TCP socket, will set it to non-blocking mode.
"""
def __init__(self, sock):
self._sock = sock
self._ioloop = ioloop.IOLoop.current()
self._sock.setblocking(0)
self._pending_write = []
self._signal_close = False
def _event_handler(_, events):
self._event_handler(events)
self._ioloop.add_handler(
self._sock.fileno(), _event_handler,
self._ioloop.READ | self._ioloop.ERROR)
def signal_close(self):
"""Signal the handler to close.
The handler will be closed after the existing
pending message are sent to the peer.
"""
if not self._pending_write:
self.close()
else:
self._signal_close = True
def close(self):
"""Close the socket"""
if self._sock is not None:
try:
self._ioloop.remove_handler(self._sock.fileno())
self._sock.close()
except socket.error:
pass
self._sock = None
self.on_close()
def write_message(self, message, binary=True):
assert binary
if self._sock is None:
raise IOError("socket is already closed")
self._pending_write.append(message)
self._update_write()
def _event_handler(self, events):
"""centeral event handler"""
if (events & self._ioloop.ERROR) or (events & self._ioloop.READ):
if self._update_read() and (events & self._ioloop.WRITE):
self._update_write()
elif events & self._ioloop.WRITE:
self._update_write()
def _update_write(self):
"""Update the state on write"""
while self._pending_write:
try:
msg = self._pending_write[0]
if self._sock is None:
return
nsend = self._sock.send(msg)
if nsend != len(msg):
self._pending_write[0] = msg[nsend:]
else:
self._pending_write.pop(0)
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
break
else:
self.on_error(err)
if self._pending_write:
self._ioloop.update_handler(
self._sock.fileno(), self._ioloop.READ | self._ioloop.ERROR | self._ioloop.WRITE)
else:
if self._signal_close:
self.close()
else:
self._ioloop.update_handler(
self._sock.fileno(), self._ioloop.READ | self._ioloop.ERROR)
def _update_read(self):
"""Update state when there is read event"""
try:
msg = bytes(self._sock.recv(4096))
if msg:
self.on_message(msg)
return True
# normal close, remote is closed
self.close()
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
pass
else:
self.on_error(err)
return False
| 32.084906
| 97
| 0.539547
|
8a898ba7aa8f29a4187d26cc93a0131abe98a036
| 403
|
py
|
Python
|
wave2.0/division2B/hw2/E_diplomas_in_folders.py
|
stanislav-kudriavtsev/Yandex-Algorithms-Training
|
0ad882e04847f6c2a973716a419befb21aa1df20
|
[
"CC0-1.0"
] | null | null | null |
wave2.0/division2B/hw2/E_diplomas_in_folders.py
|
stanislav-kudriavtsev/Yandex-Algorithms-Training
|
0ad882e04847f6c2a973716a419befb21aa1df20
|
[
"CC0-1.0"
] | null | null | null |
wave2.0/division2B/hw2/E_diplomas_in_folders.py
|
stanislav-kudriavtsev/Yandex-Algorithms-Training
|
0ad882e04847f6c2a973716a419befb21aa1df20
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""https://contest.yandex.ru/contest/28738/problems/E/"""
# pylint: disable=invalid-name
if __name__ == "__main__":
folders_total = int(input())
folders = tuple(map(int, input().split()))
summ, maxx = 0, folders[0]
for folder in folders:
summ += folder
if folder > maxx:
maxx = folder
print(summ - maxx)
| 22.388889
| 57
| 0.590571
|
f72ef0d45883357b7389e4970e9395830f94792a
| 80
|
py
|
Python
|
run.py
|
5x/ds-ants-geopy-extended
|
6017b5da444cc33bde47f0c7cf2cf06a640a354c
|
[
"MIT"
] | null | null | null |
run.py
|
5x/ds-ants-geopy-extended
|
6017b5da444cc33bde47f0c7cf2cf06a640a354c
|
[
"MIT"
] | null | null | null |
run.py
|
5x/ds-ants-geopy-extended
|
6017b5da444cc33bde47f0c7cf2cf06a640a354c
|
[
"MIT"
] | 1
|
2019-12-06T10:20:08.000Z
|
2019-12-06T10:20:08.000Z
|
from ants.ants import demonstrate
if __name__ == '__main__':
demonstrate()
| 16
| 33
| 0.725
|
3093c94958966ef44830ce87bd26e8ddd31e291d
| 308
|
py
|
Python
|
2017/02day/sol.py
|
zagura/aoc-2017
|
bfd38fb6fbe4211017a306d218b32ecff741e006
|
[
"MIT"
] | 2
|
2018-12-09T16:00:09.000Z
|
2018-12-09T17:56:15.000Z
|
2017/02day/sol.py
|
zagura/aoc-2017
|
bfd38fb6fbe4211017a306d218b32ecff741e006
|
[
"MIT"
] | null | null | null |
2017/02day/sol.py
|
zagura/aoc-2017
|
bfd38fb6fbe4211017a306d218b32ecff741e006
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
lines = sys.stdin.readlines()
#print("Read lines finished!")
fullsum = 0
for l in lines:
vals = l.strip().split('\t')
# print(vals)
vals2 = list(map(lambda x: int(x), vals))
# print(vals2)
diff = max(vals2) - min(vals2)
fullsum += diff
print(fullsum)
| 16.210526
| 45
| 0.616883
|
f23857ab9b6ba46fcd36d92f03c30c76aa51d610
| 918
|
py
|
Python
|
backupCynanBot.py
|
Omnigazer/CynanBotCommon
|
9055a63ffcc5bfb2f95f6934f8fd981088a11d9b
|
[
"Unlicense"
] | 2
|
2021-02-27T16:45:19.000Z
|
2021-05-21T15:57:02.000Z
|
backupCynanBot.py
|
Omnigazer/CynanBotCommon
|
9055a63ffcc5bfb2f95f6934f8fd981088a11d9b
|
[
"Unlicense"
] | 1
|
2021-03-03T14:05:16.000Z
|
2021-03-03T14:05:16.000Z
|
backupCynanBot.py
|
Omnigazer/CynanBotCommon
|
9055a63ffcc5bfb2f95f6934f8fd981088a11d9b
|
[
"Unlicense"
] | 3
|
2021-02-27T16:54:55.000Z
|
2021-05-06T14:12:32.000Z
|
import os
import shutil
import sys
from shutil import SameFileError
def find_files(src):
relevant_files = []
for root, dirs, files in os.walk(src):
relevant_files.extend(os.path.join(root, f) for f in files
if f.endswith(".json") or f.endswith(".sqlite"))
return relevant_files
def copy_files(dest, file_list):
for file in file_list:
dir = os.path.join(dest, os.path.dirname(file))
os.makedirs(dir, exist_ok = True)
try:
shutil.copy2(file, dir)
except SameFileError as e:
print(f'Encountered crazy copy error with file \"{file}\" in dir \"{dir}\"')
def main():
args = sys.argv[1:]
if not args:
print("./backupCynanBot.py <src> <dest>")
sys.exit(1)
file_list = find_files(args[0])
copy_files(args[1], file_list)
if __name__ == '__main__':
main()
| 24.810811
| 89
| 0.592593
|
d1fe94ffbd0404de860e527cb144606693004b52
| 834
|
py
|
Python
|
dcmanager/tests/unit/test_dcmanager.py
|
starlingx-staging/stx-kingbird
|
9869ad4640e76384fa14f031a59134cd439929a8
|
[
"Apache-2.0"
] | null | null | null |
dcmanager/tests/unit/test_dcmanager.py
|
starlingx-staging/stx-kingbird
|
9869ad4640e76384fa14f031a59134cd439929a8
|
[
"Apache-2.0"
] | null | null | null |
dcmanager/tests/unit/test_dcmanager.py
|
starlingx-staging/stx-kingbird
|
9869ad4640e76384fa14f031a59134cd439929a8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
"""
test_dcmanager
----------------------------------
Tests for `dcmanager` module.
"""
from dcmanager.tests import base
class TestDCManager(base.DCManagerTestCase):
def test_something(self):
pass
| 26.903226
| 75
| 0.705036
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.